repository_name
stringclasses
316 values
func_path_in_repository
stringlengths
6
223
func_name
stringlengths
1
134
language
stringclasses
1 value
func_code_string
stringlengths
57
65.5k
func_documentation_string
stringlengths
1
46.3k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
called_functions
listlengths
1
156
enclosing_scope
stringlengths
2
1.48M
Adarnof/adarnauth-esi
esi/clients.py
build_spec
python
def build_spec(base_version, http_client=None, **kwargs): base_spec = get_spec(base_version, http_client=http_client, config=SPEC_CONFIG) if kwargs: for resource, resource_version in kwargs.items(): versioned_spec = get_spec(resource_version, http_client=http_client, config=SPEC_CONFIG) try: spec_resource = versioned_spec.resources[resource.capitalize()] except KeyError: raise AttributeError( 'Resource {0} not found on API revision {1}'.format(resource, resource_version)) base_spec.resources[resource.capitalize()] = spec_resource return base_spec
Generates the Spec used to initialize a SwaggerClient, supporting mixed resource versions :param http_client: :class:`bravado.requests_client.RequestsClient` :param base_version: Version to base the spec on. Any resource without an explicit version will be this. :param kwargs: Explicit resource versions, by name (eg Character='v4') :return: :class:`bravado_core.spec.Spec`
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/clients.py#L160-L178
[ "def get_spec(name, http_client=None, config=None):\n \"\"\"\n :param name: Name of the revision of spec, eg latest or v4\n :param http_client: Requests client used for retrieving specs\n :param config: Spec configuration - see Spec.CONFIG_DEFAULTS\n :return: :class:`bravado_core.spec.Spec`\n \"\"...
from __future__ import unicode_literals from bravado.client import SwaggerClient, CONFIG_DEFAULTS from bravado import requests_client from bravado.swagger_model import Loader from bravado.http_future import HttpFuture from bravado_core.spec import Spec from esi.errors import TokenExpiredError from esi import app_settings from django.core.cache import cache from datetime import datetime from hashlib import md5 import json try: import urlparse except ImportError: # py3 from urllib import parse as urlparse SPEC_CONFIG = {'use_models': False} class CachingHttpFuture(HttpFuture): """ Used to add caching to certain HTTP requests according to "Expires" header """ def __init__(self, *args, **kwargs): super(CachingHttpFuture, self).__init__(*args, **kwargs) self.cache_key = self._build_cache_key(self.future.request) @staticmethod def _build_cache_key(request): """ Generated the key name used to cache responses :param request: request used to retrieve API response :return: formatted cache name """ str_hash = md5( (request.method + request.url + str(request.params) + str(request.data) + str(request.json)).encode( 'utf-8')).hexdigest() return 'esi_%s' % str_hash @staticmethod def _time_to_expiry(expires): """ Determines the seconds until a HTTP header "Expires" timestamp :param expires: HTTP response "Expires" header :return: seconds until "Expires" time """ try: expires_dt = datetime.strptime(str(expires), '%a, %d %b %Y %H:%M:%S %Z') delta = expires_dt - datetime.utcnow() return delta.seconds except ValueError: return 0 def result(self, **kwargs): if app_settings.ESI_CACHE_RESPONSE and self.future.request.method == 'GET' and self.operation is not None: """ Only cache if all are true: - settings dictate caching - it's a http get request - it's to a swagger api endpoint """ cached = cache.get(self.cache_key) if cached: result, response = cached else: _also_return_response = self.also_return_response # preserve original value self.also_return_response = True # override to always get the raw response for expiry header result, response = super(CachingHttpFuture, self).result(**kwargs) self.also_return_response = _also_return_response # restore original value if 'Expires' in response.headers: expires = self._time_to_expiry(response.headers['Expires']) if expires > 0: cache.set(self.cache_key, (result, response), expires) if self.also_return_response: return result, response else: return result else: return super(CachingHttpFuture, self).result(**kwargs) requests_client.HttpFuture = CachingHttpFuture class TokenAuthenticator(requests_client.Authenticator): """ Adds the authorization header containing access token, if specified. Sets ESI datasource to tranquility or singularity. """ def __init__(self, token=None, datasource=None): host = urlparse.urlsplit(app_settings.ESI_API_URL).hostname super(TokenAuthenticator, self).__init__(host) self.token = token self.datasource = datasource def apply(self, request): if self.token and self.token.expired: if self.token.can_refresh: self.token.refresh() else: raise TokenExpiredError() request.headers['Authorization'] = 'Bearer ' + self.token.access_token if self.token else None request.params['datasource'] = self.datasource or app_settings.ESI_API_DATASOURCE return request def build_cache_name(name): """ Cache key name formatter :param name: Name of the spec dict to cache, usually version :return: String name for cache key :rtype: str """ return 'esi_swaggerspec_%s' % name def cache_spec(name, spec): """ Cache the spec dict :param name: Version name :param spec: Spec dict :return: True if cached """ return cache.set(build_cache_name(name), spec, app_settings.ESI_SPEC_CACHE_DURATION) def build_spec_url(spec_version): """ Generates the URL to swagger.json for the ESI version :param spec_version: Name of the swagger spec version, like latest or v4 :return: URL to swagger.json for the requested spec version """ return urlparse.urljoin(app_settings.ESI_API_URL, spec_version + '/swagger.json') def get_spec(name, http_client=None, config=None): """ :param name: Name of the revision of spec, eg latest or v4 :param http_client: Requests client used for retrieving specs :param config: Spec configuration - see Spec.CONFIG_DEFAULTS :return: :class:`bravado_core.spec.Spec` """ http_client = http_client or requests_client.RequestsClient() def load_spec(): loader = Loader(http_client) return loader.load_spec(build_spec_url(name)) spec_dict = cache.get_or_set(build_cache_name(name), load_spec, app_settings.ESI_SPEC_CACHE_DURATION) config = dict(CONFIG_DEFAULTS, **(config or {})) return Spec.from_dict(spec_dict, build_spec_url(name), http_client, config) def read_spec(path, http_client=None): """ Reads in a swagger spec file used to initialize a SwaggerClient :param path: String path to local swagger spec file. :param http_client: :class:`bravado.requests_client.RequestsClient` :return: :class:`bravado_core.spec.Spec` """ with open(path, 'r') as f: spec_dict = json.loads(f.read()) return SwaggerClient.from_spec(spec_dict, http_client=http_client, config=SPEC_CONFIG) def esi_client_factory(token=None, datasource=None, spec_file=None, version=None, **kwargs): """ Generates an ESI client. :param token: :class:`esi.Token` used to access authenticated endpoints. :param datasource: Name of the ESI datasource to access. :param spec_file: Absolute path to a swagger spec file to load. :param version: Base ESI API version. Accepted values are 'legacy', 'latest', 'dev', or 'vX' where X is a number. :param kwargs: Explicit resource versions to build, in the form Character='v4'. Same values accepted as version. :return: :class:`bravado.client.SwaggerClient` If a spec_file is specified, specific versioning is not available. Meaning the version and resource version kwargs are ignored in favour of the versions available in the spec_file. """ client = requests_client.RequestsClient() if token or datasource: client.authenticator = TokenAuthenticator(token=token, datasource=datasource) api_version = version or app_settings.ESI_API_VERSION if spec_file: return read_spec(spec_file, http_client=client) else: spec = build_spec(api_version, http_client=client, **kwargs) return SwaggerClient(spec) def minimize_spec(spec_dict, operations=None, resources=None): """ Trims down a source spec dict to only the operations or resources indicated. :param spec_dict: The source spec dict to minimize. :type spec_dict: dict :param operations: A list of opertion IDs to retain. :type operations: list of str :param resources: A list of resource names to retain. :type resources: list of str :return: Minimized swagger spec dict :rtype: dict """ operations = operations or [] resources = resources or [] # keep the ugly overhead for now but only add paths we need minimized = {key: value for key, value in spec_dict.items() if key != 'paths'} minimized['paths'] = {} for path_name, path in spec_dict['paths'].items(): for method, data in path.items(): if data['operationId'] in operations or any(tag in resources for tag in data['tags']): if path_name not in minimized['paths']: minimized['paths'][path_name] = {} minimized['paths'][path_name][method] = data return minimized
Adarnof/adarnauth-esi
esi/clients.py
read_spec
python
def read_spec(path, http_client=None): with open(path, 'r') as f: spec_dict = json.loads(f.read()) return SwaggerClient.from_spec(spec_dict, http_client=http_client, config=SPEC_CONFIG)
Reads in a swagger spec file used to initialize a SwaggerClient :param path: String path to local swagger spec file. :param http_client: :class:`bravado.requests_client.RequestsClient` :return: :class:`bravado_core.spec.Spec`
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/clients.py#L181-L191
null
from __future__ import unicode_literals from bravado.client import SwaggerClient, CONFIG_DEFAULTS from bravado import requests_client from bravado.swagger_model import Loader from bravado.http_future import HttpFuture from bravado_core.spec import Spec from esi.errors import TokenExpiredError from esi import app_settings from django.core.cache import cache from datetime import datetime from hashlib import md5 import json try: import urlparse except ImportError: # py3 from urllib import parse as urlparse SPEC_CONFIG = {'use_models': False} class CachingHttpFuture(HttpFuture): """ Used to add caching to certain HTTP requests according to "Expires" header """ def __init__(self, *args, **kwargs): super(CachingHttpFuture, self).__init__(*args, **kwargs) self.cache_key = self._build_cache_key(self.future.request) @staticmethod def _build_cache_key(request): """ Generated the key name used to cache responses :param request: request used to retrieve API response :return: formatted cache name """ str_hash = md5( (request.method + request.url + str(request.params) + str(request.data) + str(request.json)).encode( 'utf-8')).hexdigest() return 'esi_%s' % str_hash @staticmethod def _time_to_expiry(expires): """ Determines the seconds until a HTTP header "Expires" timestamp :param expires: HTTP response "Expires" header :return: seconds until "Expires" time """ try: expires_dt = datetime.strptime(str(expires), '%a, %d %b %Y %H:%M:%S %Z') delta = expires_dt - datetime.utcnow() return delta.seconds except ValueError: return 0 def result(self, **kwargs): if app_settings.ESI_CACHE_RESPONSE and self.future.request.method == 'GET' and self.operation is not None: """ Only cache if all are true: - settings dictate caching - it's a http get request - it's to a swagger api endpoint """ cached = cache.get(self.cache_key) if cached: result, response = cached else: _also_return_response = self.also_return_response # preserve original value self.also_return_response = True # override to always get the raw response for expiry header result, response = super(CachingHttpFuture, self).result(**kwargs) self.also_return_response = _also_return_response # restore original value if 'Expires' in response.headers: expires = self._time_to_expiry(response.headers['Expires']) if expires > 0: cache.set(self.cache_key, (result, response), expires) if self.also_return_response: return result, response else: return result else: return super(CachingHttpFuture, self).result(**kwargs) requests_client.HttpFuture = CachingHttpFuture class TokenAuthenticator(requests_client.Authenticator): """ Adds the authorization header containing access token, if specified. Sets ESI datasource to tranquility or singularity. """ def __init__(self, token=None, datasource=None): host = urlparse.urlsplit(app_settings.ESI_API_URL).hostname super(TokenAuthenticator, self).__init__(host) self.token = token self.datasource = datasource def apply(self, request): if self.token and self.token.expired: if self.token.can_refresh: self.token.refresh() else: raise TokenExpiredError() request.headers['Authorization'] = 'Bearer ' + self.token.access_token if self.token else None request.params['datasource'] = self.datasource or app_settings.ESI_API_DATASOURCE return request def build_cache_name(name): """ Cache key name formatter :param name: Name of the spec dict to cache, usually version :return: String name for cache key :rtype: str """ return 'esi_swaggerspec_%s' % name def cache_spec(name, spec): """ Cache the spec dict :param name: Version name :param spec: Spec dict :return: True if cached """ return cache.set(build_cache_name(name), spec, app_settings.ESI_SPEC_CACHE_DURATION) def build_spec_url(spec_version): """ Generates the URL to swagger.json for the ESI version :param spec_version: Name of the swagger spec version, like latest or v4 :return: URL to swagger.json for the requested spec version """ return urlparse.urljoin(app_settings.ESI_API_URL, spec_version + '/swagger.json') def get_spec(name, http_client=None, config=None): """ :param name: Name of the revision of spec, eg latest or v4 :param http_client: Requests client used for retrieving specs :param config: Spec configuration - see Spec.CONFIG_DEFAULTS :return: :class:`bravado_core.spec.Spec` """ http_client = http_client or requests_client.RequestsClient() def load_spec(): loader = Loader(http_client) return loader.load_spec(build_spec_url(name)) spec_dict = cache.get_or_set(build_cache_name(name), load_spec, app_settings.ESI_SPEC_CACHE_DURATION) config = dict(CONFIG_DEFAULTS, **(config or {})) return Spec.from_dict(spec_dict, build_spec_url(name), http_client, config) def build_spec(base_version, http_client=None, **kwargs): """ Generates the Spec used to initialize a SwaggerClient, supporting mixed resource versions :param http_client: :class:`bravado.requests_client.RequestsClient` :param base_version: Version to base the spec on. Any resource without an explicit version will be this. :param kwargs: Explicit resource versions, by name (eg Character='v4') :return: :class:`bravado_core.spec.Spec` """ base_spec = get_spec(base_version, http_client=http_client, config=SPEC_CONFIG) if kwargs: for resource, resource_version in kwargs.items(): versioned_spec = get_spec(resource_version, http_client=http_client, config=SPEC_CONFIG) try: spec_resource = versioned_spec.resources[resource.capitalize()] except KeyError: raise AttributeError( 'Resource {0} not found on API revision {1}'.format(resource, resource_version)) base_spec.resources[resource.capitalize()] = spec_resource return base_spec def esi_client_factory(token=None, datasource=None, spec_file=None, version=None, **kwargs): """ Generates an ESI client. :param token: :class:`esi.Token` used to access authenticated endpoints. :param datasource: Name of the ESI datasource to access. :param spec_file: Absolute path to a swagger spec file to load. :param version: Base ESI API version. Accepted values are 'legacy', 'latest', 'dev', or 'vX' where X is a number. :param kwargs: Explicit resource versions to build, in the form Character='v4'. Same values accepted as version. :return: :class:`bravado.client.SwaggerClient` If a spec_file is specified, specific versioning is not available. Meaning the version and resource version kwargs are ignored in favour of the versions available in the spec_file. """ client = requests_client.RequestsClient() if token or datasource: client.authenticator = TokenAuthenticator(token=token, datasource=datasource) api_version = version or app_settings.ESI_API_VERSION if spec_file: return read_spec(spec_file, http_client=client) else: spec = build_spec(api_version, http_client=client, **kwargs) return SwaggerClient(spec) def minimize_spec(spec_dict, operations=None, resources=None): """ Trims down a source spec dict to only the operations or resources indicated. :param spec_dict: The source spec dict to minimize. :type spec_dict: dict :param operations: A list of opertion IDs to retain. :type operations: list of str :param resources: A list of resource names to retain. :type resources: list of str :return: Minimized swagger spec dict :rtype: dict """ operations = operations or [] resources = resources or [] # keep the ugly overhead for now but only add paths we need minimized = {key: value for key, value in spec_dict.items() if key != 'paths'} minimized['paths'] = {} for path_name, path in spec_dict['paths'].items(): for method, data in path.items(): if data['operationId'] in operations or any(tag in resources for tag in data['tags']): if path_name not in minimized['paths']: minimized['paths'][path_name] = {} minimized['paths'][path_name][method] = data return minimized
Adarnof/adarnauth-esi
esi/clients.py
esi_client_factory
python
def esi_client_factory(token=None, datasource=None, spec_file=None, version=None, **kwargs): client = requests_client.RequestsClient() if token or datasource: client.authenticator = TokenAuthenticator(token=token, datasource=datasource) api_version = version or app_settings.ESI_API_VERSION if spec_file: return read_spec(spec_file, http_client=client) else: spec = build_spec(api_version, http_client=client, **kwargs) return SwaggerClient(spec)
Generates an ESI client. :param token: :class:`esi.Token` used to access authenticated endpoints. :param datasource: Name of the ESI datasource to access. :param spec_file: Absolute path to a swagger spec file to load. :param version: Base ESI API version. Accepted values are 'legacy', 'latest', 'dev', or 'vX' where X is a number. :param kwargs: Explicit resource versions to build, in the form Character='v4'. Same values accepted as version. :return: :class:`bravado.client.SwaggerClient` If a spec_file is specified, specific versioning is not available. Meaning the version and resource version kwargs are ignored in favour of the versions available in the spec_file.
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/clients.py#L194-L218
[ "def build_spec(base_version, http_client=None, **kwargs):\n \"\"\"\n Generates the Spec used to initialize a SwaggerClient, supporting mixed resource versions\n :param http_client: :class:`bravado.requests_client.RequestsClient`\n :param base_version: Version to base the spec on. Any resource without a...
from __future__ import unicode_literals from bravado.client import SwaggerClient, CONFIG_DEFAULTS from bravado import requests_client from bravado.swagger_model import Loader from bravado.http_future import HttpFuture from bravado_core.spec import Spec from esi.errors import TokenExpiredError from esi import app_settings from django.core.cache import cache from datetime import datetime from hashlib import md5 import json try: import urlparse except ImportError: # py3 from urllib import parse as urlparse SPEC_CONFIG = {'use_models': False} class CachingHttpFuture(HttpFuture): """ Used to add caching to certain HTTP requests according to "Expires" header """ def __init__(self, *args, **kwargs): super(CachingHttpFuture, self).__init__(*args, **kwargs) self.cache_key = self._build_cache_key(self.future.request) @staticmethod def _build_cache_key(request): """ Generated the key name used to cache responses :param request: request used to retrieve API response :return: formatted cache name """ str_hash = md5( (request.method + request.url + str(request.params) + str(request.data) + str(request.json)).encode( 'utf-8')).hexdigest() return 'esi_%s' % str_hash @staticmethod def _time_to_expiry(expires): """ Determines the seconds until a HTTP header "Expires" timestamp :param expires: HTTP response "Expires" header :return: seconds until "Expires" time """ try: expires_dt = datetime.strptime(str(expires), '%a, %d %b %Y %H:%M:%S %Z') delta = expires_dt - datetime.utcnow() return delta.seconds except ValueError: return 0 def result(self, **kwargs): if app_settings.ESI_CACHE_RESPONSE and self.future.request.method == 'GET' and self.operation is not None: """ Only cache if all are true: - settings dictate caching - it's a http get request - it's to a swagger api endpoint """ cached = cache.get(self.cache_key) if cached: result, response = cached else: _also_return_response = self.also_return_response # preserve original value self.also_return_response = True # override to always get the raw response for expiry header result, response = super(CachingHttpFuture, self).result(**kwargs) self.also_return_response = _also_return_response # restore original value if 'Expires' in response.headers: expires = self._time_to_expiry(response.headers['Expires']) if expires > 0: cache.set(self.cache_key, (result, response), expires) if self.also_return_response: return result, response else: return result else: return super(CachingHttpFuture, self).result(**kwargs) requests_client.HttpFuture = CachingHttpFuture class TokenAuthenticator(requests_client.Authenticator): """ Adds the authorization header containing access token, if specified. Sets ESI datasource to tranquility or singularity. """ def __init__(self, token=None, datasource=None): host = urlparse.urlsplit(app_settings.ESI_API_URL).hostname super(TokenAuthenticator, self).__init__(host) self.token = token self.datasource = datasource def apply(self, request): if self.token and self.token.expired: if self.token.can_refresh: self.token.refresh() else: raise TokenExpiredError() request.headers['Authorization'] = 'Bearer ' + self.token.access_token if self.token else None request.params['datasource'] = self.datasource or app_settings.ESI_API_DATASOURCE return request def build_cache_name(name): """ Cache key name formatter :param name: Name of the spec dict to cache, usually version :return: String name for cache key :rtype: str """ return 'esi_swaggerspec_%s' % name def cache_spec(name, spec): """ Cache the spec dict :param name: Version name :param spec: Spec dict :return: True if cached """ return cache.set(build_cache_name(name), spec, app_settings.ESI_SPEC_CACHE_DURATION) def build_spec_url(spec_version): """ Generates the URL to swagger.json for the ESI version :param spec_version: Name of the swagger spec version, like latest or v4 :return: URL to swagger.json for the requested spec version """ return urlparse.urljoin(app_settings.ESI_API_URL, spec_version + '/swagger.json') def get_spec(name, http_client=None, config=None): """ :param name: Name of the revision of spec, eg latest or v4 :param http_client: Requests client used for retrieving specs :param config: Spec configuration - see Spec.CONFIG_DEFAULTS :return: :class:`bravado_core.spec.Spec` """ http_client = http_client or requests_client.RequestsClient() def load_spec(): loader = Loader(http_client) return loader.load_spec(build_spec_url(name)) spec_dict = cache.get_or_set(build_cache_name(name), load_spec, app_settings.ESI_SPEC_CACHE_DURATION) config = dict(CONFIG_DEFAULTS, **(config or {})) return Spec.from_dict(spec_dict, build_spec_url(name), http_client, config) def build_spec(base_version, http_client=None, **kwargs): """ Generates the Spec used to initialize a SwaggerClient, supporting mixed resource versions :param http_client: :class:`bravado.requests_client.RequestsClient` :param base_version: Version to base the spec on. Any resource without an explicit version will be this. :param kwargs: Explicit resource versions, by name (eg Character='v4') :return: :class:`bravado_core.spec.Spec` """ base_spec = get_spec(base_version, http_client=http_client, config=SPEC_CONFIG) if kwargs: for resource, resource_version in kwargs.items(): versioned_spec = get_spec(resource_version, http_client=http_client, config=SPEC_CONFIG) try: spec_resource = versioned_spec.resources[resource.capitalize()] except KeyError: raise AttributeError( 'Resource {0} not found on API revision {1}'.format(resource, resource_version)) base_spec.resources[resource.capitalize()] = spec_resource return base_spec def read_spec(path, http_client=None): """ Reads in a swagger spec file used to initialize a SwaggerClient :param path: String path to local swagger spec file. :param http_client: :class:`bravado.requests_client.RequestsClient` :return: :class:`bravado_core.spec.Spec` """ with open(path, 'r') as f: spec_dict = json.loads(f.read()) return SwaggerClient.from_spec(spec_dict, http_client=http_client, config=SPEC_CONFIG) def minimize_spec(spec_dict, operations=None, resources=None): """ Trims down a source spec dict to only the operations or resources indicated. :param spec_dict: The source spec dict to minimize. :type spec_dict: dict :param operations: A list of opertion IDs to retain. :type operations: list of str :param resources: A list of resource names to retain. :type resources: list of str :return: Minimized swagger spec dict :rtype: dict """ operations = operations or [] resources = resources or [] # keep the ugly overhead for now but only add paths we need minimized = {key: value for key, value in spec_dict.items() if key != 'paths'} minimized['paths'] = {} for path_name, path in spec_dict['paths'].items(): for method, data in path.items(): if data['operationId'] in operations or any(tag in resources for tag in data['tags']): if path_name not in minimized['paths']: minimized['paths'][path_name] = {} minimized['paths'][path_name][method] = data return minimized
Adarnof/adarnauth-esi
esi/clients.py
minimize_spec
python
def minimize_spec(spec_dict, operations=None, resources=None): operations = operations or [] resources = resources or [] # keep the ugly overhead for now but only add paths we need minimized = {key: value for key, value in spec_dict.items() if key != 'paths'} minimized['paths'] = {} for path_name, path in spec_dict['paths'].items(): for method, data in path.items(): if data['operationId'] in operations or any(tag in resources for tag in data['tags']): if path_name not in minimized['paths']: minimized['paths'][path_name] = {} minimized['paths'][path_name][method] = data return minimized
Trims down a source spec dict to only the operations or resources indicated. :param spec_dict: The source spec dict to minimize. :type spec_dict: dict :param operations: A list of opertion IDs to retain. :type operations: list of str :param resources: A list of resource names to retain. :type resources: list of str :return: Minimized swagger spec dict :rtype: dict
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/clients.py#L221-L247
null
from __future__ import unicode_literals from bravado.client import SwaggerClient, CONFIG_DEFAULTS from bravado import requests_client from bravado.swagger_model import Loader from bravado.http_future import HttpFuture from bravado_core.spec import Spec from esi.errors import TokenExpiredError from esi import app_settings from django.core.cache import cache from datetime import datetime from hashlib import md5 import json try: import urlparse except ImportError: # py3 from urllib import parse as urlparse SPEC_CONFIG = {'use_models': False} class CachingHttpFuture(HttpFuture): """ Used to add caching to certain HTTP requests according to "Expires" header """ def __init__(self, *args, **kwargs): super(CachingHttpFuture, self).__init__(*args, **kwargs) self.cache_key = self._build_cache_key(self.future.request) @staticmethod def _build_cache_key(request): """ Generated the key name used to cache responses :param request: request used to retrieve API response :return: formatted cache name """ str_hash = md5( (request.method + request.url + str(request.params) + str(request.data) + str(request.json)).encode( 'utf-8')).hexdigest() return 'esi_%s' % str_hash @staticmethod def _time_to_expiry(expires): """ Determines the seconds until a HTTP header "Expires" timestamp :param expires: HTTP response "Expires" header :return: seconds until "Expires" time """ try: expires_dt = datetime.strptime(str(expires), '%a, %d %b %Y %H:%M:%S %Z') delta = expires_dt - datetime.utcnow() return delta.seconds except ValueError: return 0 def result(self, **kwargs): if app_settings.ESI_CACHE_RESPONSE and self.future.request.method == 'GET' and self.operation is not None: """ Only cache if all are true: - settings dictate caching - it's a http get request - it's to a swagger api endpoint """ cached = cache.get(self.cache_key) if cached: result, response = cached else: _also_return_response = self.also_return_response # preserve original value self.also_return_response = True # override to always get the raw response for expiry header result, response = super(CachingHttpFuture, self).result(**kwargs) self.also_return_response = _also_return_response # restore original value if 'Expires' in response.headers: expires = self._time_to_expiry(response.headers['Expires']) if expires > 0: cache.set(self.cache_key, (result, response), expires) if self.also_return_response: return result, response else: return result else: return super(CachingHttpFuture, self).result(**kwargs) requests_client.HttpFuture = CachingHttpFuture class TokenAuthenticator(requests_client.Authenticator): """ Adds the authorization header containing access token, if specified. Sets ESI datasource to tranquility or singularity. """ def __init__(self, token=None, datasource=None): host = urlparse.urlsplit(app_settings.ESI_API_URL).hostname super(TokenAuthenticator, self).__init__(host) self.token = token self.datasource = datasource def apply(self, request): if self.token and self.token.expired: if self.token.can_refresh: self.token.refresh() else: raise TokenExpiredError() request.headers['Authorization'] = 'Bearer ' + self.token.access_token if self.token else None request.params['datasource'] = self.datasource or app_settings.ESI_API_DATASOURCE return request def build_cache_name(name): """ Cache key name formatter :param name: Name of the spec dict to cache, usually version :return: String name for cache key :rtype: str """ return 'esi_swaggerspec_%s' % name def cache_spec(name, spec): """ Cache the spec dict :param name: Version name :param spec: Spec dict :return: True if cached """ return cache.set(build_cache_name(name), spec, app_settings.ESI_SPEC_CACHE_DURATION) def build_spec_url(spec_version): """ Generates the URL to swagger.json for the ESI version :param spec_version: Name of the swagger spec version, like latest or v4 :return: URL to swagger.json for the requested spec version """ return urlparse.urljoin(app_settings.ESI_API_URL, spec_version + '/swagger.json') def get_spec(name, http_client=None, config=None): """ :param name: Name of the revision of spec, eg latest or v4 :param http_client: Requests client used for retrieving specs :param config: Spec configuration - see Spec.CONFIG_DEFAULTS :return: :class:`bravado_core.spec.Spec` """ http_client = http_client or requests_client.RequestsClient() def load_spec(): loader = Loader(http_client) return loader.load_spec(build_spec_url(name)) spec_dict = cache.get_or_set(build_cache_name(name), load_spec, app_settings.ESI_SPEC_CACHE_DURATION) config = dict(CONFIG_DEFAULTS, **(config or {})) return Spec.from_dict(spec_dict, build_spec_url(name), http_client, config) def build_spec(base_version, http_client=None, **kwargs): """ Generates the Spec used to initialize a SwaggerClient, supporting mixed resource versions :param http_client: :class:`bravado.requests_client.RequestsClient` :param base_version: Version to base the spec on. Any resource without an explicit version will be this. :param kwargs: Explicit resource versions, by name (eg Character='v4') :return: :class:`bravado_core.spec.Spec` """ base_spec = get_spec(base_version, http_client=http_client, config=SPEC_CONFIG) if kwargs: for resource, resource_version in kwargs.items(): versioned_spec = get_spec(resource_version, http_client=http_client, config=SPEC_CONFIG) try: spec_resource = versioned_spec.resources[resource.capitalize()] except KeyError: raise AttributeError( 'Resource {0} not found on API revision {1}'.format(resource, resource_version)) base_spec.resources[resource.capitalize()] = spec_resource return base_spec def read_spec(path, http_client=None): """ Reads in a swagger spec file used to initialize a SwaggerClient :param path: String path to local swagger spec file. :param http_client: :class:`bravado.requests_client.RequestsClient` :return: :class:`bravado_core.spec.Spec` """ with open(path, 'r') as f: spec_dict = json.loads(f.read()) return SwaggerClient.from_spec(spec_dict, http_client=http_client, config=SPEC_CONFIG) def esi_client_factory(token=None, datasource=None, spec_file=None, version=None, **kwargs): """ Generates an ESI client. :param token: :class:`esi.Token` used to access authenticated endpoints. :param datasource: Name of the ESI datasource to access. :param spec_file: Absolute path to a swagger spec file to load. :param version: Base ESI API version. Accepted values are 'legacy', 'latest', 'dev', or 'vX' where X is a number. :param kwargs: Explicit resource versions to build, in the form Character='v4'. Same values accepted as version. :return: :class:`bravado.client.SwaggerClient` If a spec_file is specified, specific versioning is not available. Meaning the version and resource version kwargs are ignored in favour of the versions available in the spec_file. """ client = requests_client.RequestsClient() if token or datasource: client.authenticator = TokenAuthenticator(token=token, datasource=datasource) api_version = version or app_settings.ESI_API_VERSION if spec_file: return read_spec(spec_file, http_client=client) else: spec = build_spec(api_version, http_client=client, **kwargs) return SwaggerClient(spec)
Adarnof/adarnauth-esi
esi/clients.py
CachingHttpFuture._build_cache_key
python
def _build_cache_key(request): str_hash = md5( (request.method + request.url + str(request.params) + str(request.data) + str(request.json)).encode( 'utf-8')).hexdigest() return 'esi_%s' % str_hash
Generated the key name used to cache responses :param request: request used to retrieve API response :return: formatted cache name
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/clients.py#L32-L41
null
class CachingHttpFuture(HttpFuture): """ Used to add caching to certain HTTP requests according to "Expires" header """ def __init__(self, *args, **kwargs): super(CachingHttpFuture, self).__init__(*args, **kwargs) self.cache_key = self._build_cache_key(self.future.request) @staticmethod @staticmethod def _time_to_expiry(expires): """ Determines the seconds until a HTTP header "Expires" timestamp :param expires: HTTP response "Expires" header :return: seconds until "Expires" time """ try: expires_dt = datetime.strptime(str(expires), '%a, %d %b %Y %H:%M:%S %Z') delta = expires_dt - datetime.utcnow() return delta.seconds except ValueError: return 0 def result(self, **kwargs): if app_settings.ESI_CACHE_RESPONSE and self.future.request.method == 'GET' and self.operation is not None: """ Only cache if all are true: - settings dictate caching - it's a http get request - it's to a swagger api endpoint """ cached = cache.get(self.cache_key) if cached: result, response = cached else: _also_return_response = self.also_return_response # preserve original value self.also_return_response = True # override to always get the raw response for expiry header result, response = super(CachingHttpFuture, self).result(**kwargs) self.also_return_response = _also_return_response # restore original value if 'Expires' in response.headers: expires = self._time_to_expiry(response.headers['Expires']) if expires > 0: cache.set(self.cache_key, (result, response), expires) if self.also_return_response: return result, response else: return result else: return super(CachingHttpFuture, self).result(**kwargs)
Adarnof/adarnauth-esi
esi/clients.py
CachingHttpFuture._time_to_expiry
python
def _time_to_expiry(expires): try: expires_dt = datetime.strptime(str(expires), '%a, %d %b %Y %H:%M:%S %Z') delta = expires_dt - datetime.utcnow() return delta.seconds except ValueError: return 0
Determines the seconds until a HTTP header "Expires" timestamp :param expires: HTTP response "Expires" header :return: seconds until "Expires" time
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/clients.py#L44-L55
null
class CachingHttpFuture(HttpFuture): """ Used to add caching to certain HTTP requests according to "Expires" header """ def __init__(self, *args, **kwargs): super(CachingHttpFuture, self).__init__(*args, **kwargs) self.cache_key = self._build_cache_key(self.future.request) @staticmethod def _build_cache_key(request): """ Generated the key name used to cache responses :param request: request used to retrieve API response :return: formatted cache name """ str_hash = md5( (request.method + request.url + str(request.params) + str(request.data) + str(request.json)).encode( 'utf-8')).hexdigest() return 'esi_%s' % str_hash @staticmethod def result(self, **kwargs): if app_settings.ESI_CACHE_RESPONSE and self.future.request.method == 'GET' and self.operation is not None: """ Only cache if all are true: - settings dictate caching - it's a http get request - it's to a swagger api endpoint """ cached = cache.get(self.cache_key) if cached: result, response = cached else: _also_return_response = self.also_return_response # preserve original value self.also_return_response = True # override to always get the raw response for expiry header result, response = super(CachingHttpFuture, self).result(**kwargs) self.also_return_response = _also_return_response # restore original value if 'Expires' in response.headers: expires = self._time_to_expiry(response.headers['Expires']) if expires > 0: cache.set(self.cache_key, (result, response), expires) if self.also_return_response: return result, response else: return result else: return super(CachingHttpFuture, self).result(**kwargs)
Adarnof/adarnauth-esi
esi/managers.py
TokenQueryset.get_expired
python
def get_expired(self): max_age = timezone.now() - timedelta(seconds=app_settings.ESI_TOKEN_VALID_DURATION) return self.filter(created__lte=max_age)
Get all tokens which have expired. :return: All expired tokens. :rtype: :class:`esi.managers.TokenQueryset`
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/managers.py#L29-L36
null
class TokenQueryset(models.QuerySet): def bulk_refresh(self): """ Refreshes all refreshable tokens in the queryset. Deletes any tokens which fail to refresh. Deletes any tokens which are expired and cannot refresh. Excludes tokens for which the refresh was incomplete for other reasons. """ session = OAuth2Session(app_settings.ESI_SSO_CLIENT_ID) auth = requests.auth.HTTPBasicAuth(app_settings.ESI_SSO_CLIENT_ID, app_settings.ESI_SSO_CLIENT_SECRET) incomplete = [] for model in self.filter(refresh_token__isnull=False): try: model.refresh(session=session, auth=auth) logging.debug("Successfully refreshed {0}".format(repr(model))) except TokenError: logger.info("Refresh failed for {0}. Deleting.".format(repr(model))) model.delete() except IncompleteResponseError: incomplete.append(model.pk) self.filter(refresh_token__isnull=True).get_expired().delete() return self.exclude(pk__in=incomplete) def require_valid(self): """ Ensures all tokens are still valid. If expired, attempts to refresh. Deletes those which fail to refresh or cannot be refreshed. :return: All tokens which are still valid. :rtype: :class:`esi.managers.TokenQueryset` """ expired = self.get_expired() valid = self.exclude(pk__in=expired) valid_expired = expired.bulk_refresh() return valid_expired | valid def require_scopes(self, scope_string): """ :param scope_string: The required scopes. :type scope_string: Union[str, list] :return: The tokens with all requested scopes. :rtype: :class:`esi.managers.TokenQueryset` """ scopes = _process_scopes(scope_string) if not scopes: # asking for tokens with no scopes return self.filter(scopes__isnull=True) from .models import Scope scope_pks = Scope.objects.filter(name__in=scopes).values_list('pk', flat=True) if not len(scopes) == len(scope_pks): # there's a scope we don't recognize, so we can't have any tokens for it return self.none() tokens = self.all() for pk in scope_pks: tokens = tokens.filter(scopes__pk=pk) return tokens def require_scopes_exact(self, scope_string): """ :param scope_string: The required scopes. :type scope_string: Union[str, list] :return: The tokens with only the requested scopes. :rtype: :class:`esi.managers.TokenQueryset` """ num_scopes = len(_process_scopes(scope_string)) pks = [v['pk'] for v in self.annotate(models.Count('scopes')).require_scopes(scope_string).filter( scopes__count=num_scopes).values('pk', 'scopes__id')] return self.filter(pk__in=pks) def equivalent_to(self, token): """ Gets all tokens which match the character and scopes of a reference token :param token: :class:`esi.models.Token` :return: :class:`esi.managers.TokenQueryset` """ return self.filter(character_id=token.character_id).require_scopes_exact(token.scopes.all()).filter( models.Q(user=token.user) | models.Q(user__isnull=True)).exclude(pk=token.pk)
Adarnof/adarnauth-esi
esi/managers.py
TokenQueryset.bulk_refresh
python
def bulk_refresh(self): session = OAuth2Session(app_settings.ESI_SSO_CLIENT_ID) auth = requests.auth.HTTPBasicAuth(app_settings.ESI_SSO_CLIENT_ID, app_settings.ESI_SSO_CLIENT_SECRET) incomplete = [] for model in self.filter(refresh_token__isnull=False): try: model.refresh(session=session, auth=auth) logging.debug("Successfully refreshed {0}".format(repr(model))) except TokenError: logger.info("Refresh failed for {0}. Deleting.".format(repr(model))) model.delete() except IncompleteResponseError: incomplete.append(model.pk) self.filter(refresh_token__isnull=True).get_expired().delete() return self.exclude(pk__in=incomplete)
Refreshes all refreshable tokens in the queryset. Deletes any tokens which fail to refresh. Deletes any tokens which are expired and cannot refresh. Excludes tokens for which the refresh was incomplete for other reasons.
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/managers.py#L38-L58
null
class TokenQueryset(models.QuerySet): def get_expired(self): """ Get all tokens which have expired. :return: All expired tokens. :rtype: :class:`esi.managers.TokenQueryset` """ max_age = timezone.now() - timedelta(seconds=app_settings.ESI_TOKEN_VALID_DURATION) return self.filter(created__lte=max_age) def require_valid(self): """ Ensures all tokens are still valid. If expired, attempts to refresh. Deletes those which fail to refresh or cannot be refreshed. :return: All tokens which are still valid. :rtype: :class:`esi.managers.TokenQueryset` """ expired = self.get_expired() valid = self.exclude(pk__in=expired) valid_expired = expired.bulk_refresh() return valid_expired | valid def require_scopes(self, scope_string): """ :param scope_string: The required scopes. :type scope_string: Union[str, list] :return: The tokens with all requested scopes. :rtype: :class:`esi.managers.TokenQueryset` """ scopes = _process_scopes(scope_string) if not scopes: # asking for tokens with no scopes return self.filter(scopes__isnull=True) from .models import Scope scope_pks = Scope.objects.filter(name__in=scopes).values_list('pk', flat=True) if not len(scopes) == len(scope_pks): # there's a scope we don't recognize, so we can't have any tokens for it return self.none() tokens = self.all() for pk in scope_pks: tokens = tokens.filter(scopes__pk=pk) return tokens def require_scopes_exact(self, scope_string): """ :param scope_string: The required scopes. :type scope_string: Union[str, list] :return: The tokens with only the requested scopes. :rtype: :class:`esi.managers.TokenQueryset` """ num_scopes = len(_process_scopes(scope_string)) pks = [v['pk'] for v in self.annotate(models.Count('scopes')).require_scopes(scope_string).filter( scopes__count=num_scopes).values('pk', 'scopes__id')] return self.filter(pk__in=pks) def equivalent_to(self, token): """ Gets all tokens which match the character and scopes of a reference token :param token: :class:`esi.models.Token` :return: :class:`esi.managers.TokenQueryset` """ return self.filter(character_id=token.character_id).require_scopes_exact(token.scopes.all()).filter( models.Q(user=token.user) | models.Q(user__isnull=True)).exclude(pk=token.pk)
Adarnof/adarnauth-esi
esi/managers.py
TokenQueryset.require_valid
python
def require_valid(self): expired = self.get_expired() valid = self.exclude(pk__in=expired) valid_expired = expired.bulk_refresh() return valid_expired | valid
Ensures all tokens are still valid. If expired, attempts to refresh. Deletes those which fail to refresh or cannot be refreshed. :return: All tokens which are still valid. :rtype: :class:`esi.managers.TokenQueryset`
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/managers.py#L60-L70
[ "def get_expired(self):\n \"\"\"\n Get all tokens which have expired.\n :return: All expired tokens.\n :rtype: :class:`esi.managers.TokenQueryset`\n \"\"\"\n max_age = timezone.now() - timedelta(seconds=app_settings.ESI_TOKEN_VALID_DURATION)\n return self.filter(created__lte=max_age)\n" ]
class TokenQueryset(models.QuerySet): def get_expired(self): """ Get all tokens which have expired. :return: All expired tokens. :rtype: :class:`esi.managers.TokenQueryset` """ max_age = timezone.now() - timedelta(seconds=app_settings.ESI_TOKEN_VALID_DURATION) return self.filter(created__lte=max_age) def bulk_refresh(self): """ Refreshes all refreshable tokens in the queryset. Deletes any tokens which fail to refresh. Deletes any tokens which are expired and cannot refresh. Excludes tokens for which the refresh was incomplete for other reasons. """ session = OAuth2Session(app_settings.ESI_SSO_CLIENT_ID) auth = requests.auth.HTTPBasicAuth(app_settings.ESI_SSO_CLIENT_ID, app_settings.ESI_SSO_CLIENT_SECRET) incomplete = [] for model in self.filter(refresh_token__isnull=False): try: model.refresh(session=session, auth=auth) logging.debug("Successfully refreshed {0}".format(repr(model))) except TokenError: logger.info("Refresh failed for {0}. Deleting.".format(repr(model))) model.delete() except IncompleteResponseError: incomplete.append(model.pk) self.filter(refresh_token__isnull=True).get_expired().delete() return self.exclude(pk__in=incomplete) def require_scopes(self, scope_string): """ :param scope_string: The required scopes. :type scope_string: Union[str, list] :return: The tokens with all requested scopes. :rtype: :class:`esi.managers.TokenQueryset` """ scopes = _process_scopes(scope_string) if not scopes: # asking for tokens with no scopes return self.filter(scopes__isnull=True) from .models import Scope scope_pks = Scope.objects.filter(name__in=scopes).values_list('pk', flat=True) if not len(scopes) == len(scope_pks): # there's a scope we don't recognize, so we can't have any tokens for it return self.none() tokens = self.all() for pk in scope_pks: tokens = tokens.filter(scopes__pk=pk) return tokens def require_scopes_exact(self, scope_string): """ :param scope_string: The required scopes. :type scope_string: Union[str, list] :return: The tokens with only the requested scopes. :rtype: :class:`esi.managers.TokenQueryset` """ num_scopes = len(_process_scopes(scope_string)) pks = [v['pk'] for v in self.annotate(models.Count('scopes')).require_scopes(scope_string).filter( scopes__count=num_scopes).values('pk', 'scopes__id')] return self.filter(pk__in=pks) def equivalent_to(self, token): """ Gets all tokens which match the character and scopes of a reference token :param token: :class:`esi.models.Token` :return: :class:`esi.managers.TokenQueryset` """ return self.filter(character_id=token.character_id).require_scopes_exact(token.scopes.all()).filter( models.Q(user=token.user) | models.Q(user__isnull=True)).exclude(pk=token.pk)
Adarnof/adarnauth-esi
esi/managers.py
TokenQueryset.require_scopes
python
def require_scopes(self, scope_string): scopes = _process_scopes(scope_string) if not scopes: # asking for tokens with no scopes return self.filter(scopes__isnull=True) from .models import Scope scope_pks = Scope.objects.filter(name__in=scopes).values_list('pk', flat=True) if not len(scopes) == len(scope_pks): # there's a scope we don't recognize, so we can't have any tokens for it return self.none() tokens = self.all() for pk in scope_pks: tokens = tokens.filter(scopes__pk=pk) return tokens
:param scope_string: The required scopes. :type scope_string: Union[str, list] :return: The tokens with all requested scopes. :rtype: :class:`esi.managers.TokenQueryset`
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/managers.py#L72-L91
[ "def _process_scopes(scopes):\n if scopes is None:\n # support filtering by no scopes with None passed\n scopes = []\n if not isinstance(scopes, models.QuerySet) and len(scopes) == 1:\n # support a single space-delimited string inside a list because :users:\n scopes = scopes[0]\n ...
class TokenQueryset(models.QuerySet): def get_expired(self): """ Get all tokens which have expired. :return: All expired tokens. :rtype: :class:`esi.managers.TokenQueryset` """ max_age = timezone.now() - timedelta(seconds=app_settings.ESI_TOKEN_VALID_DURATION) return self.filter(created__lte=max_age) def bulk_refresh(self): """ Refreshes all refreshable tokens in the queryset. Deletes any tokens which fail to refresh. Deletes any tokens which are expired and cannot refresh. Excludes tokens for which the refresh was incomplete for other reasons. """ session = OAuth2Session(app_settings.ESI_SSO_CLIENT_ID) auth = requests.auth.HTTPBasicAuth(app_settings.ESI_SSO_CLIENT_ID, app_settings.ESI_SSO_CLIENT_SECRET) incomplete = [] for model in self.filter(refresh_token__isnull=False): try: model.refresh(session=session, auth=auth) logging.debug("Successfully refreshed {0}".format(repr(model))) except TokenError: logger.info("Refresh failed for {0}. Deleting.".format(repr(model))) model.delete() except IncompleteResponseError: incomplete.append(model.pk) self.filter(refresh_token__isnull=True).get_expired().delete() return self.exclude(pk__in=incomplete) def require_valid(self): """ Ensures all tokens are still valid. If expired, attempts to refresh. Deletes those which fail to refresh or cannot be refreshed. :return: All tokens which are still valid. :rtype: :class:`esi.managers.TokenQueryset` """ expired = self.get_expired() valid = self.exclude(pk__in=expired) valid_expired = expired.bulk_refresh() return valid_expired | valid def require_scopes_exact(self, scope_string): """ :param scope_string: The required scopes. :type scope_string: Union[str, list] :return: The tokens with only the requested scopes. :rtype: :class:`esi.managers.TokenQueryset` """ num_scopes = len(_process_scopes(scope_string)) pks = [v['pk'] for v in self.annotate(models.Count('scopes')).require_scopes(scope_string).filter( scopes__count=num_scopes).values('pk', 'scopes__id')] return self.filter(pk__in=pks) def equivalent_to(self, token): """ Gets all tokens which match the character and scopes of a reference token :param token: :class:`esi.models.Token` :return: :class:`esi.managers.TokenQueryset` """ return self.filter(character_id=token.character_id).require_scopes_exact(token.scopes.all()).filter( models.Q(user=token.user) | models.Q(user__isnull=True)).exclude(pk=token.pk)
Adarnof/adarnauth-esi
esi/managers.py
TokenQueryset.require_scopes_exact
python
def require_scopes_exact(self, scope_string): num_scopes = len(_process_scopes(scope_string)) pks = [v['pk'] for v in self.annotate(models.Count('scopes')).require_scopes(scope_string).filter( scopes__count=num_scopes).values('pk', 'scopes__id')] return self.filter(pk__in=pks)
:param scope_string: The required scopes. :type scope_string: Union[str, list] :return: The tokens with only the requested scopes. :rtype: :class:`esi.managers.TokenQueryset`
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/managers.py#L93-L103
[ "def _process_scopes(scopes):\n if scopes is None:\n # support filtering by no scopes with None passed\n scopes = []\n if not isinstance(scopes, models.QuerySet) and len(scopes) == 1:\n # support a single space-delimited string inside a list because :users:\n scopes = scopes[0]\n ...
class TokenQueryset(models.QuerySet): def get_expired(self): """ Get all tokens which have expired. :return: All expired tokens. :rtype: :class:`esi.managers.TokenQueryset` """ max_age = timezone.now() - timedelta(seconds=app_settings.ESI_TOKEN_VALID_DURATION) return self.filter(created__lte=max_age) def bulk_refresh(self): """ Refreshes all refreshable tokens in the queryset. Deletes any tokens which fail to refresh. Deletes any tokens which are expired and cannot refresh. Excludes tokens for which the refresh was incomplete for other reasons. """ session = OAuth2Session(app_settings.ESI_SSO_CLIENT_ID) auth = requests.auth.HTTPBasicAuth(app_settings.ESI_SSO_CLIENT_ID, app_settings.ESI_SSO_CLIENT_SECRET) incomplete = [] for model in self.filter(refresh_token__isnull=False): try: model.refresh(session=session, auth=auth) logging.debug("Successfully refreshed {0}".format(repr(model))) except TokenError: logger.info("Refresh failed for {0}. Deleting.".format(repr(model))) model.delete() except IncompleteResponseError: incomplete.append(model.pk) self.filter(refresh_token__isnull=True).get_expired().delete() return self.exclude(pk__in=incomplete) def require_valid(self): """ Ensures all tokens are still valid. If expired, attempts to refresh. Deletes those which fail to refresh or cannot be refreshed. :return: All tokens which are still valid. :rtype: :class:`esi.managers.TokenQueryset` """ expired = self.get_expired() valid = self.exclude(pk__in=expired) valid_expired = expired.bulk_refresh() return valid_expired | valid def require_scopes(self, scope_string): """ :param scope_string: The required scopes. :type scope_string: Union[str, list] :return: The tokens with all requested scopes. :rtype: :class:`esi.managers.TokenQueryset` """ scopes = _process_scopes(scope_string) if not scopes: # asking for tokens with no scopes return self.filter(scopes__isnull=True) from .models import Scope scope_pks = Scope.objects.filter(name__in=scopes).values_list('pk', flat=True) if not len(scopes) == len(scope_pks): # there's a scope we don't recognize, so we can't have any tokens for it return self.none() tokens = self.all() for pk in scope_pks: tokens = tokens.filter(scopes__pk=pk) return tokens def equivalent_to(self, token): """ Gets all tokens which match the character and scopes of a reference token :param token: :class:`esi.models.Token` :return: :class:`esi.managers.TokenQueryset` """ return self.filter(character_id=token.character_id).require_scopes_exact(token.scopes.all()).filter( models.Q(user=token.user) | models.Q(user__isnull=True)).exclude(pk=token.pk)
Adarnof/adarnauth-esi
esi/managers.py
TokenQueryset.equivalent_to
python
def equivalent_to(self, token): return self.filter(character_id=token.character_id).require_scopes_exact(token.scopes.all()).filter( models.Q(user=token.user) | models.Q(user__isnull=True)).exclude(pk=token.pk)
Gets all tokens which match the character and scopes of a reference token :param token: :class:`esi.models.Token` :return: :class:`esi.managers.TokenQueryset`
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/managers.py#L105-L112
null
class TokenQueryset(models.QuerySet): def get_expired(self): """ Get all tokens which have expired. :return: All expired tokens. :rtype: :class:`esi.managers.TokenQueryset` """ max_age = timezone.now() - timedelta(seconds=app_settings.ESI_TOKEN_VALID_DURATION) return self.filter(created__lte=max_age) def bulk_refresh(self): """ Refreshes all refreshable tokens in the queryset. Deletes any tokens which fail to refresh. Deletes any tokens which are expired and cannot refresh. Excludes tokens for which the refresh was incomplete for other reasons. """ session = OAuth2Session(app_settings.ESI_SSO_CLIENT_ID) auth = requests.auth.HTTPBasicAuth(app_settings.ESI_SSO_CLIENT_ID, app_settings.ESI_SSO_CLIENT_SECRET) incomplete = [] for model in self.filter(refresh_token__isnull=False): try: model.refresh(session=session, auth=auth) logging.debug("Successfully refreshed {0}".format(repr(model))) except TokenError: logger.info("Refresh failed for {0}. Deleting.".format(repr(model))) model.delete() except IncompleteResponseError: incomplete.append(model.pk) self.filter(refresh_token__isnull=True).get_expired().delete() return self.exclude(pk__in=incomplete) def require_valid(self): """ Ensures all tokens are still valid. If expired, attempts to refresh. Deletes those which fail to refresh or cannot be refreshed. :return: All tokens which are still valid. :rtype: :class:`esi.managers.TokenQueryset` """ expired = self.get_expired() valid = self.exclude(pk__in=expired) valid_expired = expired.bulk_refresh() return valid_expired | valid def require_scopes(self, scope_string): """ :param scope_string: The required scopes. :type scope_string: Union[str, list] :return: The tokens with all requested scopes. :rtype: :class:`esi.managers.TokenQueryset` """ scopes = _process_scopes(scope_string) if not scopes: # asking for tokens with no scopes return self.filter(scopes__isnull=True) from .models import Scope scope_pks = Scope.objects.filter(name__in=scopes).values_list('pk', flat=True) if not len(scopes) == len(scope_pks): # there's a scope we don't recognize, so we can't have any tokens for it return self.none() tokens = self.all() for pk in scope_pks: tokens = tokens.filter(scopes__pk=pk) return tokens def require_scopes_exact(self, scope_string): """ :param scope_string: The required scopes. :type scope_string: Union[str, list] :return: The tokens with only the requested scopes. :rtype: :class:`esi.managers.TokenQueryset` """ num_scopes = len(_process_scopes(scope_string)) pks = [v['pk'] for v in self.annotate(models.Count('scopes')).require_scopes(scope_string).filter( scopes__count=num_scopes).values('pk', 'scopes__id')] return self.filter(pk__in=pks)
Adarnof/adarnauth-esi
esi/managers.py
TokenManager.create_from_code
python
def create_from_code(self, code, user=None): # perform code exchange logger.debug("Creating new token from code {0}".format(code[:-5])) oauth = OAuth2Session(app_settings.ESI_SSO_CLIENT_ID, redirect_uri=app_settings.ESI_SSO_CALLBACK_URL) token = oauth.fetch_token(app_settings.ESI_TOKEN_URL, client_secret=app_settings.ESI_SSO_CLIENT_SECRET, code=code) r = oauth.request('get', app_settings.ESI_TOKEN_VERIFY_URL) r.raise_for_status() token_data = r.json() logger.debug(token_data) # translate returned data to a model model = self.create( character_id=token_data['CharacterID'], character_name=token_data['CharacterName'], character_owner_hash=token_data['CharacterOwnerHash'], access_token=token['access_token'], refresh_token=token['refresh_token'], token_type=token_data['TokenType'], user=user, ) # parse scopes if 'Scopes' in token_data: from esi.models import Scope for s in token_data['Scopes'].split(): try: scope = Scope.objects.get(name=s) model.scopes.add(scope) except Scope.DoesNotExist: # This scope isn't included in a data migration. Create a placeholder until it updates. try: help_text = s.split('.')[1].replace('_', ' ').capitalize() except IndexError: # Unusual scope name, missing periods. help_text = s.replace('_', ' ').capitalize() scope = Scope.objects.create(name=s, help_text=help_text) model.scopes.add(scope) logger.debug("Added {0} scopes to new token.".format(model.scopes.all().count())) if not app_settings.ESI_ALWAYS_CREATE_TOKEN: # see if we already have a token for this character and scope combination # if so, we don't need a new one queryset = self.get_queryset().equivalent_to(model) if queryset.exists(): logger.debug( "Identified {0} tokens equivalent to new token. Updating access and refresh tokens.".format( queryset.count())) queryset.update( access_token=model.access_token, refresh_token=model.refresh_token, created=model.created, ) if queryset.filter(user=model.user).exists(): logger.debug("Equivalent token with same user exists. Deleting new token.") model.delete() model = queryset.filter(user=model.user)[0] # pick one at random logger.debug("Successfully created {0} for user {1}".format(repr(model), user)) return model
Perform OAuth code exchange to retrieve a token. :param code: OAuth grant code. :param user: User who will own token. :return: :class:`esi.models.Token`
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/managers.py#L123-L189
[ "def get_queryset(self):\n \"\"\"\n Replace base queryset model with custom TokenQueryset\n :rtype: :class:`esi.managers.TokenQueryset`\n \"\"\"\n return TokenQueryset(self.model, using=self._db)\n" ]
class TokenManager(models.Manager): def get_queryset(self): """ Replace base queryset model with custom TokenQueryset :rtype: :class:`esi.managers.TokenQueryset` """ return TokenQueryset(self.model, using=self._db) def create_from_request(self, request): """ Generate a token from the OAuth callback request. Must contain 'code' in GET. :param request: OAuth callback request. :return: :class:`esi.models.Token` """ logger.debug("Creating new token for {0} session {1}".format(request.user, request.session.session_key[:5])) code = request.GET.get('code') # attach a user during creation for some functionality in a post_save created receiver I'm working on elsewhere model = self.create_from_code(code, user=request.user if request.user.is_authenticated else None) return model
Adarnof/adarnauth-esi
esi/managers.py
TokenManager.create_from_request
python
def create_from_request(self, request): logger.debug("Creating new token for {0} session {1}".format(request.user, request.session.session_key[:5])) code = request.GET.get('code') # attach a user during creation for some functionality in a post_save created receiver I'm working on elsewhere model = self.create_from_code(code, user=request.user if request.user.is_authenticated else None) return model
Generate a token from the OAuth callback request. Must contain 'code' in GET. :param request: OAuth callback request. :return: :class:`esi.models.Token`
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/managers.py#L191-L201
[ "def create_from_code(self, code, user=None):\n \"\"\"\n Perform OAuth code exchange to retrieve a token.\n :param code: OAuth grant code.\n :param user: User who will own token.\n :return: :class:`esi.models.Token`\n \"\"\"\n\n # perform code exchange\n logger.debug(\"Creating new token fro...
class TokenManager(models.Manager): def get_queryset(self): """ Replace base queryset model with custom TokenQueryset :rtype: :class:`esi.managers.TokenQueryset` """ return TokenQueryset(self.model, using=self._db) def create_from_code(self, code, user=None): """ Perform OAuth code exchange to retrieve a token. :param code: OAuth grant code. :param user: User who will own token. :return: :class:`esi.models.Token` """ # perform code exchange logger.debug("Creating new token from code {0}".format(code[:-5])) oauth = OAuth2Session(app_settings.ESI_SSO_CLIENT_ID, redirect_uri=app_settings.ESI_SSO_CALLBACK_URL) token = oauth.fetch_token(app_settings.ESI_TOKEN_URL, client_secret=app_settings.ESI_SSO_CLIENT_SECRET, code=code) r = oauth.request('get', app_settings.ESI_TOKEN_VERIFY_URL) r.raise_for_status() token_data = r.json() logger.debug(token_data) # translate returned data to a model model = self.create( character_id=token_data['CharacterID'], character_name=token_data['CharacterName'], character_owner_hash=token_data['CharacterOwnerHash'], access_token=token['access_token'], refresh_token=token['refresh_token'], token_type=token_data['TokenType'], user=user, ) # parse scopes if 'Scopes' in token_data: from esi.models import Scope for s in token_data['Scopes'].split(): try: scope = Scope.objects.get(name=s) model.scopes.add(scope) except Scope.DoesNotExist: # This scope isn't included in a data migration. Create a placeholder until it updates. try: help_text = s.split('.')[1].replace('_', ' ').capitalize() except IndexError: # Unusual scope name, missing periods. help_text = s.replace('_', ' ').capitalize() scope = Scope.objects.create(name=s, help_text=help_text) model.scopes.add(scope) logger.debug("Added {0} scopes to new token.".format(model.scopes.all().count())) if not app_settings.ESI_ALWAYS_CREATE_TOKEN: # see if we already have a token for this character and scope combination # if so, we don't need a new one queryset = self.get_queryset().equivalent_to(model) if queryset.exists(): logger.debug( "Identified {0} tokens equivalent to new token. Updating access and refresh tokens.".format( queryset.count())) queryset.update( access_token=model.access_token, refresh_token=model.refresh_token, created=model.created, ) if queryset.filter(user=model.user).exists(): logger.debug("Equivalent token with same user exists. Deleting new token.") model.delete() model = queryset.filter(user=model.user)[0] # pick one at random logger.debug("Successfully created {0} for user {1}".format(repr(model), user)) return model
Adarnof/adarnauth-esi
esi/decorators.py
tokens_required
python
def tokens_required(scopes='', new=False): def decorator(view_func): @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(request, *args, **kwargs): # if we're coming back from SSO for a new token, return it token = _check_callback(request) if token and new: tokens = Token.objects.filter(pk=token.pk) logger.debug("Returning new token.") return view_func(request, tokens, *args, **kwargs) if not new: # ensure user logged in to check existing tokens if not request.user.is_authenticated: logger.debug( "Session {0} is not logged in. Redirecting to login.".format(request.session.session_key[:5])) from django.contrib.auth.views import redirect_to_login return redirect_to_login(request.get_full_path()) # collect tokens in db, check if still valid, return if any tokens = Token.objects.filter(user__pk=request.user.pk).require_scopes(scopes).require_valid() if tokens.exists(): logger.debug("Retrieved {0} tokens for {1} session {2}".format(tokens.count(), request.user, request.session.session_key[:5])) return view_func(request, tokens, *args, **kwargs) # trigger creation of new token via sso logger.debug("No tokens identified for {0} session {1}. Redirecting to SSO.".format(request.user, request.session.session_key[:5])) from esi.views import sso_redirect return sso_redirect(request, scopes=scopes) return _wrapped_view return decorator
Decorator for views to request an ESI Token. Accepts required scopes as a space-delimited string or list of strings of scope names. Can require a new token to be retrieved by SSO. Returns a QueryDict of Tokens.
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/decorators.py#L29-L71
null
from __future__ import unicode_literals from functools import wraps from django.utils.decorators import available_attrs from esi.models import Token, CallbackRedirect import logging logger = logging.getLogger(__name__) def _check_callback(request): # ensure session installed in database if not request.session.exists(request.session.session_key): logger.debug("Creating new session for {0}".format(request.user)) request.session.create() # clean up callback redirect, pass token if new requested try: model = CallbackRedirect.objects.get(session_key=request.session.session_key) token = Token.objects.get(pk=model.token.pk) model.delete() logger.debug( "Retrieved new token from callback for {0} session {1}".format(request.user, request.session.session_key[:5])) return token except (CallbackRedirect.DoesNotExist, Token.DoesNotExist, AttributeError): logger.debug("No callback for {0} session {1}".format(request.user, request.session.session_key[:5])) return None def token_required(scopes='', new=False): """ Decorator for views which supplies a single, user-selected token for the view to process. Same parameters as tokens_required. """ def decorator(view_func): @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(request, *args, **kwargs): # if we're coming back from SSO for a new token, return it token = _check_callback(request) if token and new: logger.debug("Got new token from {0} session {1}. Returning to view.".format(request.user, request.session.session_key[:5])) return view_func(request, token, *args, **kwargs) # if we're selecting a token, return it if request.method == 'POST': if request.POST.get("_add", False): logger.debug("{0} has selected to add new token. Redirecting to SSO.".format(request.user)) # user has selected to add a new token from esi.views import sso_redirect return sso_redirect(request, scopes=scopes) token_pk = request.POST.get('_token', None) if token_pk: logger.debug("{0} has selected token {1}".format(request.user, token_pk)) try: token = Token.objects.get(pk=token_pk) # ensure token belongs to this user and has required scopes if ((token.user and token.user == request.user) or not token.user) and Token.objects.filter( pk=token_pk).require_scopes(scopes).require_valid().exists(): logger.debug("Selected token fulfills requirements of view. Returning.") return view_func(request, token, *args, **kwargs) except Token.DoesNotExist: logger.debug("Token {0} not found.".format(token_pk)) pass if not new: # present the user with token choices tokens = Token.objects.filter(user__pk=request.user.pk).require_scopes(scopes).require_valid() if tokens.exists(): logger.debug("Returning list of available tokens for {0}.".format(request.user)) from esi.views import select_token return select_token(request, scopes=scopes, new=new) else: logger.debug("No tokens found for {0} session {1} with scopes {2}".format(request.user, request.session.session_key[:5], scopes)) # prompt the user to add a new token logger.debug("Redirecting {0} session {1} to SSO.".format(request.user, request.session.session_key[:5])) from esi.views import sso_redirect return sso_redirect(request, scopes=scopes) return _wrapped_view return decorator
Adarnof/adarnauth-esi
esi/decorators.py
token_required
python
def token_required(scopes='', new=False): def decorator(view_func): @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(request, *args, **kwargs): # if we're coming back from SSO for a new token, return it token = _check_callback(request) if token and new: logger.debug("Got new token from {0} session {1}. Returning to view.".format(request.user, request.session.session_key[:5])) return view_func(request, token, *args, **kwargs) # if we're selecting a token, return it if request.method == 'POST': if request.POST.get("_add", False): logger.debug("{0} has selected to add new token. Redirecting to SSO.".format(request.user)) # user has selected to add a new token from esi.views import sso_redirect return sso_redirect(request, scopes=scopes) token_pk = request.POST.get('_token', None) if token_pk: logger.debug("{0} has selected token {1}".format(request.user, token_pk)) try: token = Token.objects.get(pk=token_pk) # ensure token belongs to this user and has required scopes if ((token.user and token.user == request.user) or not token.user) and Token.objects.filter( pk=token_pk).require_scopes(scopes).require_valid().exists(): logger.debug("Selected token fulfills requirements of view. Returning.") return view_func(request, token, *args, **kwargs) except Token.DoesNotExist: logger.debug("Token {0} not found.".format(token_pk)) pass if not new: # present the user with token choices tokens = Token.objects.filter(user__pk=request.user.pk).require_scopes(scopes).require_valid() if tokens.exists(): logger.debug("Returning list of available tokens for {0}.".format(request.user)) from esi.views import select_token return select_token(request, scopes=scopes, new=new) else: logger.debug("No tokens found for {0} session {1} with scopes {2}".format(request.user, request.session.session_key[:5], scopes)) # prompt the user to add a new token logger.debug("Redirecting {0} session {1} to SSO.".format(request.user, request.session.session_key[:5])) from esi.views import sso_redirect return sso_redirect(request, scopes=scopes) return _wrapped_view return decorator
Decorator for views which supplies a single, user-selected token for the view to process. Same parameters as tokens_required.
train
https://github.com/Adarnof/adarnauth-esi/blob/f6618a31efbfeedeb96316ab9b82ecadda776ac1/esi/decorators.py#L74-L129
null
from __future__ import unicode_literals from functools import wraps from django.utils.decorators import available_attrs from esi.models import Token, CallbackRedirect import logging logger = logging.getLogger(__name__) def _check_callback(request): # ensure session installed in database if not request.session.exists(request.session.session_key): logger.debug("Creating new session for {0}".format(request.user)) request.session.create() # clean up callback redirect, pass token if new requested try: model = CallbackRedirect.objects.get(session_key=request.session.session_key) token = Token.objects.get(pk=model.token.pk) model.delete() logger.debug( "Retrieved new token from callback for {0} session {1}".format(request.user, request.session.session_key[:5])) return token except (CallbackRedirect.DoesNotExist, Token.DoesNotExist, AttributeError): logger.debug("No callback for {0} session {1}".format(request.user, request.session.session_key[:5])) return None def tokens_required(scopes='', new=False): """ Decorator for views to request an ESI Token. Accepts required scopes as a space-delimited string or list of strings of scope names. Can require a new token to be retrieved by SSO. Returns a QueryDict of Tokens. """ def decorator(view_func): @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(request, *args, **kwargs): # if we're coming back from SSO for a new token, return it token = _check_callback(request) if token and new: tokens = Token.objects.filter(pk=token.pk) logger.debug("Returning new token.") return view_func(request, tokens, *args, **kwargs) if not new: # ensure user logged in to check existing tokens if not request.user.is_authenticated: logger.debug( "Session {0} is not logged in. Redirecting to login.".format(request.session.session_key[:5])) from django.contrib.auth.views import redirect_to_login return redirect_to_login(request.get_full_path()) # collect tokens in db, check if still valid, return if any tokens = Token.objects.filter(user__pk=request.user.pk).require_scopes(scopes).require_valid() if tokens.exists(): logger.debug("Retrieved {0} tokens for {1} session {2}".format(tokens.count(), request.user, request.session.session_key[:5])) return view_func(request, tokens, *args, **kwargs) # trigger creation of new token via sso logger.debug("No tokens identified for {0} session {1}. Redirecting to SSO.".format(request.user, request.session.session_key[:5])) from esi.views import sso_redirect return sso_redirect(request, scopes=scopes) return _wrapped_view return decorator
fake-name/WebRequest
WebRequest/WebRequestClass.py
WebGetRobust.getFileAndName
python
def getFileAndName(self, *args, **kwargs): ''' Give a requested page (note: the arguments for this call are forwarded to getpage()), return the content at the target URL and the filename for the target content as a 2-tuple (pgctnt, hName) for the content at the target URL. The filename specified in the content-disposition header is used, if present. Otherwise, the last section of the url path segment is treated as the filename. ''' pgctnt, hName, mime = self.getFileNameMime(*args, **kwargs) return pgctnt, hName
Give a requested page (note: the arguments for this call are forwarded to getpage()), return the content at the target URL and the filename for the target content as a 2-tuple (pgctnt, hName) for the content at the target URL. The filename specified in the content-disposition header is used, if present. Otherwise, the last section of the url path segment is treated as the filename.
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/WebRequestClass.py#L279-L290
[ "def getFileNameMime(self, requestedUrl, *args, **kwargs):\n\t'''\n\tGive a requested page (note: the arguments for this call are forwarded to getpage()),\n\treturn the content at the target URL, the filename for the target content, and\n\tthe mimetype for the content at the target URL, as a 3-tuple (pgctnt, hName,...
class WebGetRobust( ChromiumMixin.WebGetCrMixin, SeleniumChromiumMixin.WebGetSeleniumChromiumMixin, ): COOKIEFILE = 'cookies.lwp' # the path and filename to save your cookies in cj = None cookielib = None opener = None errorOutCount = 1 # retryDelay = 0.1 retryDelay = 0.01 data = None # creds is a list of 3-tuples that gets inserted into the password manager. # it is structured [(top_level_url1, username1, password1), (top_level_url2, username2, password2)] def __init__(self, creds = None, logPath = "Main.WebRequest", cookie_lock = None, cloudflare = True, auto_waf = True, use_socks = False, alt_cookiejar = None, custom_ua = None, ): super().__init__() self.rules = {} self.rules['auto_waf'] = cloudflare or auto_waf if cookie_lock: self.cookie_lock = cookie_lock elif alt_cookiejar: self.log.info("External cookie-jar specified. Not forcing cookiejar serialization.") self.cookie_lock = None else: self.cookie_lock = COOKIEWRITELOCK self.use_socks = use_socks # Override the global default socket timeout, so hung connections will actually time out properly. socket.setdefaulttimeout(5) self.log = logging.getLogger(logPath) # print("Webget init! Logpath = ", logPath) if custom_ua: self.log.info("User agent overridden!") self.browserHeaders = custom_ua else: # Due to general internet people douchebaggyness, I've basically said to hell with it and decided to spoof a whole assortment of browsers # It should keep people from blocking this scraper *too* easily self.browserHeaders = UA_Constants.getUserAgent() self.data = urllib.parse.urlencode(self.browserHeaders) if creds: print("Have credentials, installing password manager into urllib handler.") passManager = urllib.request.HTTPPasswordMgrWithDefaultRealm() for url, username, password in creds: passManager.add_password(None, url, username, password) self.credHandler = Handlers.PreemptiveBasicAuthHandler(passManager) else: self.credHandler = None self.alt_cookiejar = alt_cookiejar self.__loadCookies() def getpage(self, requestedUrl, *args, **kwargs): try: return self.__getpage(requestedUrl, *args, **kwargs) except Exceptions.CloudFlareWrapper: if self.rules['auto_waf']: self.log.warning("Cloudflare failure! Doing automatic step-through.") if not self.stepThroughCloudFlareWaf(requestedUrl): raise Exceptions.FetchFailureError("Could not step through cloudflare!", requestedUrl) # Cloudflare cookie set, retrieve again return self.__getpage(requestedUrl, *args, **kwargs) else: self.log.info("Cloudflare without step-through setting!") raise except Exceptions.SucuriWrapper: # print("Sucuri!") if self.rules['auto_waf']: self.log.warning("Sucuri failure! Doing automatic step-through.") if not self.stepThroughSucuriWaf(requestedUrl): raise Exceptions.FetchFailureError("Could not step through Sucuri WAF bullshit!", requestedUrl) return self.__getpage(requestedUrl, *args, **kwargs) else: self.log.info("Sucuri without step-through setting!") raise def chunkReport(self, bytesSoFar, totalSize): if totalSize: percent = float(bytesSoFar) / totalSize percent = round(percent * 100, 2) self.log.info("Downloaded %d of %d bytes (%0.2f%%)" % (bytesSoFar, totalSize, percent)) else: self.log.info("Downloaded %d bytes" % (bytesSoFar)) def __chunkRead(self, response, chunkSize=2 ** 18, reportHook=None): contentLengthHeader = response.info().getheader('Content-Length') if contentLengthHeader: totalSize = contentLengthHeader.strip() totalSize = int(totalSize) else: totalSize = None bytesSoFar = 0 pgContent = "" while 1: chunk = response.read(chunkSize) pgContent += chunk bytesSoFar += len(chunk) if not chunk: break if reportHook: reportHook(bytesSoFar, chunkSize, totalSize) return pgContent def getSoup(self, requestedUrl, *args, **kwargs): if 'returnMultiple' in kwargs and kwargs['returnMultiple']: raise Exceptions.ArgumentError("getSoup cannot be called with 'returnMultiple' being true", requestedUrl) if 'soup' in kwargs and kwargs['soup']: raise Exceptions.ArgumentError("getSoup contradicts the 'soup' directive!", requestedUrl) page = self.getpage(requestedUrl, *args, **kwargs) if isinstance(page, bytes): raise Exceptions.ContentTypeError("Received content not decoded! Cannot parse!", requestedUrl) soup = utility.as_soup(page) return soup def getJson(self, requestedUrl, *args, **kwargs): if 'returnMultiple' in kwargs and kwargs['returnMultiple']: raise Exceptions.ArgumentError("getSoup cannot be called with 'returnMultiple' being true", requestedUrl) attempts = 0 while 1: try: page = self.getpage(requestedUrl, *args, **kwargs) if isinstance(page, bytes): page = page.decode(utility.determine_json_encoding(page)) # raise ValueError("Received content not decoded! Cannot parse!") page = page.strip() ret = json.loads(page) return ret except ValueError: if attempts < 1: attempts += 1 self.log.error("JSON Parsing issue retrieving content from page!") for line in traceback.format_exc().split("\n"): self.log.error("%s", line.rstrip()) self.log.error("Retrying!") # Scramble our current UA self.browserHeaders = UA_Constants.getUserAgent() if self.alt_cookiejar: self.cj.init_agent(new_headers=self.browserHeaders) time.sleep(self.retryDelay) else: self.log.error("JSON Parsing issue, and retries exhausted!") # self.log.error("Page content:") # self.log.error(page) # with open("Error-ctnt-{}.json".format(time.time()), "w") as tmp_err_fp: # tmp_err_fp.write(page) raise def getSoupNoRedirects(self, *args, **kwargs): if 'returnMultiple' in kwargs: raise Exceptions.ArgumentError("getSoup cannot be called with 'returnMultiple'") if 'soup' in kwargs and kwargs['soup']: raise Exceptions.ArgumentError("getSoup contradicts the 'soup' directive!") kwargs['returnMultiple'] = True tgt_url = kwargs.get('requestedUrl', None) if not tgt_url: tgt_url = args[0] page, handle = self.getpage(*args, **kwargs) redirurl = handle.geturl() if redirurl != tgt_url: self.log.error("Requested %s, redirected to %s. Raising error", tgt_url, redirurl) raise Exceptions.RedirectedError("Requested %s, redirected to %s" % ( tgt_url, redirurl)) soup = as_soup(page) return soup def getFileNameMime(self, requestedUrl, *args, **kwargs): ''' Give a requested page (note: the arguments for this call are forwarded to getpage()), return the content at the target URL, the filename for the target content, and the mimetype for the content at the target URL, as a 3-tuple (pgctnt, hName, mime). The filename specified in the content-disposition header is used, if present. Otherwise, the last section of the url path segment is treated as the filename. ''' if 'returnMultiple' in kwargs: raise Exceptions.ArgumentError("getFileAndName cannot be called with 'returnMultiple'", requestedUrl) if 'soup' in kwargs and kwargs['soup']: raise Exceptions.ArgumentError("getFileAndName contradicts the 'soup' directive!", requestedUrl) kwargs["returnMultiple"] = True pgctnt, pghandle = self.getpage(requestedUrl, *args, **kwargs) info = pghandle.info() if not 'Content-Disposition' in info: hName = '' elif not 'filename=' in info['Content-Disposition']: hName = '' else: hName = info['Content-Disposition'].split('filename=')[1] # Unquote filename if it's quoted. if ((hName.startswith("'") and hName.endswith("'")) or hName.startswith('"') and hName.endswith('"')) and len(hName) >= 2: hName = hName[1:-1] mime = info.get_content_type() if not hName.strip(): requestedUrl = pghandle.geturl() hName = urllib.parse.urlsplit(requestedUrl).path.split("/")[-1].strip() if "/" in hName: hName = hName.split("/")[-1] return pgctnt, hName, mime def getItem(self, itemUrl): content, handle = self.getpage(itemUrl, returnMultiple=True) if not content or not handle: raise urllib.error.URLError("Failed to retreive file from page '%s'!" % itemUrl) handle_info = handle.info() if handle_info['Content-Disposition'] and 'filename=' in handle_info['Content-Disposition'].lower(): fileN = handle_info['Content-Disposition'].split("=", 1)[-1] else: fileN = urllib.parse.unquote(urllib.parse.urlparse(handle.geturl())[2].split("/")[-1]) fileN = bs4.UnicodeDammit(fileN).unicode_markup mType = handle_info['Content-Type'] # If there is an encoding in the content-type (or any other info), strip it out. # We don't care about the encoding, since WebFunctions will already have handled that, # and returned a decoded unicode object. if mType and ";" in mType: mType = mType.split(";")[0].strip() # *sigh*. So minus.com is fucking up their http headers, and apparently urlencoding the # mime type, because apparently they're shit at things. # Anyways, fix that. if mType and '%2F' in mType: mType = mType.replace('%2F', '/') self.log.info("Retreived file of type '%s', name of '%s' with a size of %0.3f K", mType, fileN, len(content)/1000.0) return content, fileN, mType def getHead(self, url, addlHeaders=None): self.log.warning("TODO: Fixme this neds to be migrated to use the normal fetch interface, so it is WAF-aware.") for x in range(9999): try: self.log.info("Doing HTTP HEAD request for '%s'", url) pgreq = self.__buildRequest(url, None, addlHeaders, None, req_class=Handlers.HeadRequest) pghandle = self.opener.open(pgreq, timeout=30) returl = pghandle.geturl() if returl != url: self.log.info("HEAD request returned a different URL '%s'", returl) return returl except socket.timeout as e: self.log.info("Timeout, retrying....") if x >= 3: self.log.error("Failure fetching: %s", url) raise Exceptions.FetchFailureError("Timout when fetching content", url) except urllib.error.URLError as e: # Continue even in the face of cloudflare crapping it's pants if e.code == 500 and e.geturl(): return e.geturl() self.log.info("URLError, retrying....") if x >= 3: self.log.error("Failure fetching: %s", url) raise Exceptions.FetchFailureError("URLError when fetching content", e.geturl(), err_code=e.code) ###################################################################################################################################################### ###################################################################################################################################################### def __check_suc_cookie(self, components): ''' This is only called if we're on a known sucuri-"protected" site. As such, if we do *not* have a sucuri cloudproxy cookie, we can assume we need to do the normal WAF step-through. ''' netloc = components.netloc.lower() for cookie in self.cj: if cookie.domain_specified and (cookie.domain.lower().endswith(netloc) or (cookie.domain.lower().endswith("127.0.0.1") and ( components.path == "/sucuri_shit_3" or components.path == "/sucuri_shit_2" ))): # Allow testing if "sucuri_cloudproxy_uuid_" in cookie.name: return self.log.info("Missing cloudproxy cookie for known sucuri wrapped site. Doing a pre-emptive chromium fetch.") raise Exceptions.SucuriWrapper("WAF Shit", str(components)) def __check_cf_cookie(self, components): netloc = components.netloc.lower() # TODO: Implement me? # for cookie in self.cj: # if cookie.domain_specified and (cookie.domain.lower().endswith(netloc) # or (cookie.domain.lower().endswith("127.0.0.1") and components.path == "/sucuri_shit_2")): # Allow testing # if "sucuri_cloudproxy_uuid_" in cookie.name: # return # print("Target cookie!") # print("K -> V: %s -> %s" % (cookie.name, cookie.value)) # print(cookie) # print(type(cookie)) # print(cookie.domain) # raise RuntimeError pass def __pre_check(self, requestedUrl): ''' Allow the pre-emptive fetching of sites with a full browser if they're known to be dick hosters. ''' components = urllib.parse.urlsplit(requestedUrl) netloc_l = components.netloc.lower() if netloc_l in Domain_Constants.SUCURI_GARBAGE_SITE_NETLOCS: self.__check_suc_cookie(components) elif netloc_l in Domain_Constants.CF_GARBAGE_SITE_NETLOCS: self.__check_cf_cookie(components) elif components.path == '/sucuri_shit_2': self.__check_suc_cookie(components) elif components.path == '/sucuri_shit_3': self.__check_suc_cookie(components) elif components.path == '/cloudflare_under_attack_shit_2': self.__check_cf_cookie(components) elif components.path == '/cloudflare_under_attack_shit_3': self.__check_cf_cookie(components) def __getpage(self, requestedUrl, **kwargs): self.__pre_check(requestedUrl) self.log.info("Fetching content at URL: %s", requestedUrl) # strip trailing and leading spaces. requestedUrl = requestedUrl.strip() # If we have 'soup' as a param, just pop it, and call `getSoup()`. if 'soup' in kwargs and kwargs['soup']: self.log.warning("'soup' kwarg is depreciated. Please use the `getSoup()` call instead.") kwargs.pop('soup') return self.getSoup(requestedUrl, **kwargs) # Decode the kwargs values addlHeaders = kwargs.setdefault("addlHeaders", None) returnMultiple = kwargs.setdefault("returnMultiple", False) callBack = kwargs.setdefault("callBack", None) postData = kwargs.setdefault("postData", None) retryQuantity = kwargs.setdefault("retryQuantity", None) nativeError = kwargs.setdefault("nativeError", False) binaryForm = kwargs.setdefault("binaryForm", False) # Conditionally encode the referrer if needed, because otherwise # urllib will barf on unicode referrer values. if addlHeaders and 'Referer' in addlHeaders: addlHeaders['Referer'] = iri2uri.iri2uri(addlHeaders['Referer']) retryCount = 0 err_content = None err_reason = None err_code = None while 1: pgctnt = None pghandle = None pgreq = self.__buildRequest(requestedUrl, postData, addlHeaders, binaryForm) errored = False lastErr = "" retryCount = retryCount + 1 if (retryQuantity and retryCount > retryQuantity) or (not retryQuantity and retryCount > self.errorOutCount): self.log.error("Failed to retrieve Website : %s at %s All Attempts Exhausted", pgreq.get_full_url(), time.ctime(time.time())) pgctnt = None try: self.log.critical("Critical Failure to retrieve page! %s at %s, attempt %s", pgreq.get_full_url(), time.ctime(time.time()), retryCount) self.log.critical("Error: %s", lastErr) self.log.critical("Exiting") except: self.log.critical("And the URL could not be printed due to an encoding error") break #print "execution", retryCount try: # print("Getpage!", requestedUrl, kwargs) pghandle = self.opener.open(pgreq, timeout=30) # Get Webpage # print("Gotpage") except Exceptions.GarbageSiteWrapper as err: # print("garbage site:") raise err except urllib.error.HTTPError as err: # Lotta logging self.log.warning("Error opening page: %s at %s On Attempt %s.", pgreq.get_full_url(), time.ctime(time.time()), retryCount) self.log.warning("Error Code: %s", err) if err.fp: err_content = err.fp.read() encoded = err.hdrs.get('Content-Encoding', None) if encoded: _, err_content = self.__decompressContent(encoded, err_content) err_reason = err.reason err_code = err.code lastErr = err try: self.log.warning("Original URL: %s", requestedUrl) errored = True except: self.log.warning("And the URL could not be printed due to an encoding error") if err.code == 404: #print "Unrecoverable - Page not found. Breaking" self.log.critical("Unrecoverable - Page not found. Breaking") break time.sleep(self.retryDelay) if err.code == 503: if err_content: self._check_waf(err_content, requestedUrl) # So I've been seeing this causing CF to bounce too. # As such, poke through those via chromium too. if err.code == 502: if err_content: self._check_waf(err_content, requestedUrl) except UnicodeEncodeError: self.log.critical("Unrecoverable Unicode issue retrieving page - %s", requestedUrl) for line in traceback.format_exc().split("\n"): self.log.critical("%s", line.rstrip()) self.log.critical("Parameters:") self.log.critical(" requestedUrl: '%s'", requestedUrl) self.log.critical(" postData: '%s'", postData) self.log.critical(" addlHeaders: '%s'", addlHeaders) self.log.critical(" binaryForm: '%s'", binaryForm) err_reason = "Unicode Decode Error" err_code = -1 err_content = traceback.format_exc() break except Exception as e: errored = True #traceback.print_exc() lastErr = sys.exc_info() self.log.warning("Retreival failed. Traceback:") self.log.warning(str(lastErr)) self.log.warning(traceback.format_exc()) self.log.warning("Error Retrieving Page! - Trying again - Waiting %s seconds", self.retryDelay) try: self.log.critical("Error on page - %s", requestedUrl) except: self.log.critical("And the URL could not be printed due to an encoding error") time.sleep(self.retryDelay) err_reason = "Unhandled general exception" err_code = -1 err_content = traceback.format_exc() continue if pghandle != None: self.log.info("Request for URL: %s succeeded at %s On Attempt %s. Recieving...", pgreq.get_full_url(), time.ctime(time.time()), retryCount) pgctnt = self.__retreiveContent(pgreq, pghandle, callBack) # if __retreiveContent did not return false, it managed to fetch valid results, so break if pgctnt != False: break if errored and pghandle != None: print(("Later attempt succeeded %s" % pgreq.get_full_url())) elif (errored or not pgctnt) and pghandle is None: if lastErr and nativeError: raise lastErr raise Exceptions.FetchFailureError("Failed to retreive page", requestedUrl, err_content=err_content, err_code=err_code, err_reason=err_reason) if returnMultiple: return pgctnt, pghandle else: return pgctnt ###################################################################################################################################################### ###################################################################################################################################################### def __decode_text_content(self, pageContent, cType): # this *should* probably be done using a parser. # However, it seems to be grossly overkill to shove the whole page (which can be quite large) through a parser just to pull out a tag that # should be right near the page beginning anyways. # As such, it's a regular expression for the moment # Regex is of bytes type, since we can't convert a string to unicode until we know the encoding the # bytes string is using, and we need the regex to get that encoding coding = re.search(b"charset=[\'\"]?([a-zA-Z0-9\-]*)[\'\"]?", pageContent, flags=re.IGNORECASE) cType = b"" charset = None try: if coding: cType = coding.group(1) codecs.lookup(cType.decode("ascii")) charset = cType.decode("ascii") except LookupError: # I'm actually not sure what I was thinking when I wrote this if statement. I don't think it'll ever trigger. if (b";" in cType) and (b"=" in cType): # the server is reporting an encoding. Now we use it to decode the dummy_docType, charset = cType.split(b";") charset = charset.split(b"=")[-1] if cchardet: inferred = cchardet.detect(pageContent) if inferred and inferred['confidence'] is None: # If we couldn't infer a charset, just short circuit and return the content. # It's probably binary. return pageContent elif inferred and inferred['confidence'] is not None and inferred['confidence'] > 0.8: charset = inferred['encoding'] self.log.info("Cchardet inferred encoding: %s", charset) else: self.log.warning("Missing cchardet!") if not charset: self.log.warning("Could not find encoding information on page - Using default charset. Shit may break!") charset = "utf-8" try: pageContent = str(pageContent, charset) except UnicodeDecodeError: self.log.error("Encoding Error! Stripping invalid chars.") pageContent = pageContent.decode('utf-8', errors='ignore') return pageContent def __buildRequest(self, pgreq, postData, addlHeaders, binaryForm, req_class = None): if req_class is None: req_class = urllib.request.Request pgreq = iri2uri.iri2uri(pgreq) try: params = {} headers = {} if postData != None: self.log.info("Making a post-request! Params: '%s'", postData) if isinstance(postData, str): params['data'] = postData.encode("utf-8") elif isinstance(postData, dict): for key, parameter in postData.items(): self.log.info(" Item: '%s' -> '%s'", key, parameter) params['data'] = urllib.parse.urlencode(postData).encode("utf-8") if addlHeaders != None: self.log.info("Have additional GET parameters!") for key, parameter in addlHeaders.items(): self.log.info(" Item: '%s' -> '%s'", key, parameter) headers = addlHeaders if binaryForm: self.log.info("Binary form submission!") if 'data' in params: raise Exceptions.ArgumentError("You cannot make a binary form post and a plain post request at the same time!", pgreq) params['data'] = binaryForm.make_result() headers['Content-type'] = binaryForm.get_content_type() headers['Content-length'] = len(params['data']) return req_class(pgreq, headers=headers, **params) except: self.log.critical("Invalid header or url") raise def __decompressContent(self, coding, pgctnt): """ This is really obnoxious """ #preLen = len(pgctnt) if coding == 'deflate': compType = "deflate" bits_opts = [ -zlib.MAX_WBITS, # deflate zlib.MAX_WBITS, # zlib zlib.MAX_WBITS | 16, # gzip zlib.MAX_WBITS | 32, # "automatic header detection" 0, # Try to guess from header # Try all the raw window options. -8, -9, -10, -11, -12, -13, -14, -15, # Stream with zlib headers 8, 9, 10, 11, 12, 13, 14, 15, # With gzip header+trailer 8+16, 9+16, 10+16, 11+16, 12+16, 13+16, 14+16, 15+16, # Automatic detection 8+32, 9+32, 10+32, 11+32, 12+32, 13+32, 14+32, 15+32, ] err = None for wbits_val in bits_opts: try: pgctnt = zlib.decompress(pgctnt, wbits_val) return compType, pgctnt except zlib.error as e: err = e # We can't get here without err having thrown. raise err elif coding == 'gzip': compType = "gzip" buf = io.BytesIO(pgctnt) f = gzip.GzipFile(fileobj=buf) pgctnt = f.read() elif coding == "sdch": raise Exceptions.ContentTypeError("Wait, someone other then google actually supports SDCH compression (%s)?" % pgreq) else: compType = "none" return compType, pgctnt def __decodeTextContent(self, pgctnt, cType): if cType: if (";" in cType) and ("=" in cType): # the server is reporting an encoding. Now we use it to decode the content # Some wierdos put two charsets in their headers: # `text/html;Charset=UTF-8;charset=UTF-8` # Split, and take the first two entries. docType, charset = cType.split(";")[:2] charset = charset.split("=")[-1] # Only decode content marked as text (yeah, google is serving zip files # with the content-disposition charset header specifying "UTF-8") or # specifically allowed other content types I know are really text. decode = ['application/atom+xml', 'application/xml', "application/json", 'text'] if any([item in docType for item in decode]): try: pgctnt = str(pgctnt, charset) except UnicodeDecodeError: self.log.error("Encoding Error! Stripping invalid chars.") pgctnt = pgctnt.decode('utf-8', errors='ignore') else: # The server is not reporting an encoding in the headers. # Use content-aware mechanisms for determing the content encoding. if "text/html" in cType or \ 'text/javascript' in cType or \ 'text/css' in cType or \ 'application/json' in cType or \ 'application/xml' in cType or \ 'application/atom+xml' in cType or \ cType.startswith("text/"): # If this is a html/text page, we want to decode it using the local encoding pgctnt = self.__decode_text_content(pgctnt, cType) elif "text" in cType: self.log.critical("Unknown content type!") self.log.critical(cType) else: self.log.critical("No content disposition header!") self.log.critical("Cannot guess content type!") return pgctnt def __retreiveContent(self, pgreq, pghandle, callBack): try: # If we have a progress callback, call it for chunked read. # Otherwise, just read in the entire content. if callBack: pgctnt = self.__chunkRead(pghandle, 2 ** 17, reportHook=callBack) else: pgctnt = pghandle.read() if pgctnt is None: return False self.log.info("URL fully retrieved.") preDecompSize = len(pgctnt)/1000.0 encoded = pghandle.headers.get('Content-Encoding') compType, pgctnt = self.__decompressContent(encoded, pgctnt) decompSize = len(pgctnt)/1000.0 # self.log.info("Page content type = %s", type(pgctnt)) cType = pghandle.headers.get("Content-Type") if compType == 'none': self.log.info("Compression type = %s. Content Size = %0.3fK. File type: %s.", compType, decompSize, cType) else: self.log.info("Compression type = %s. Content Size compressed = %0.3fK. Decompressed = %0.3fK. File type: %s.", compType, preDecompSize, decompSize, cType) self._check_waf(pgctnt, pgreq.get_full_url()) pgctnt = self.__decodeTextContent(pgctnt, cType) return pgctnt except Exceptions.GarbageSiteWrapper as err: raise err except Exception: self.log.error("Exception!") self.log.error(str(sys.exc_info())) traceback.print_exc() self.log.error("Error Retrieving Page! - Transfer failed. Waiting %s seconds before retrying", self.retryDelay) try: self.log.critical("Critical Failure to retrieve page! %s at %s", pgreq.get_full_url(), time.ctime(time.time())) self.log.critical("Exiting") except: self.log.critical("And the URL could not be printed due to an encoding error") self.log.error(pghandle) time.sleep(self.retryDelay) return False # HUGE GOD-FUNCTION. # OH GOD FIXME. # postData expects a dict # addlHeaders also expects a dict def _check_waf(self, pageContent, pageUrl): assert isinstance(pageContent, bytes), "Item pageContent must be of type bytes, received %s" % (type(pageContent), ) assert isinstance(pageUrl, str), "Item pageUrl must be of type str, received %s" % (type(pageUrl), ) if b"sucuri_cloudproxy_js=" in pageContent: raise Exceptions.SucuriWrapper("WAF Shit", pageUrl) if b'This process is automatic. Your browser will redirect to your requested content shortly.' in pageContent: raise Exceptions.CloudFlareWrapper("WAF Shit", pageUrl) if b'is currently offline. However, because the site uses Cloudflare\'s Always Online' in pageContent: raise Exceptions.CloudFlareWrapper("WAF Shit", pageUrl) ###################################################################################################################################################### ###################################################################################################################################################### def __loadCookies(self): if self.alt_cookiejar is not None: self.alt_cookiejar.init_agent(new_headers=self.browserHeaders) self.cj = self.alt_cookiejar else: self.cj = http.cookiejar.LWPCookieJar() # This is a subclass of FileCookieJar # that has useful load and save methods if self.cj is not None: if os.path.isfile(self.COOKIEFILE): try: self.__updateCookiesFromFile() # self.log.info("Loading CookieJar") except: self.log.critical("Cookie file is corrupt/damaged?") try: os.remove(self.COOKIEFILE) except FileNotFoundError: pass if http.cookiejar is not None: # self.log.info("Installing CookieJar") self.log.debug(self.cj) cookieHandler = urllib.request.HTTPCookieProcessor(self.cj) args = (cookieHandler, Handlers.HTTPRedirectHandler) if self.credHandler: print("Have cred handler. Building opener using it") args += (self.credHandler, ) if self.use_socks: print("Using Socks handler") if not HAVE_SOCKS: raise RuntimeError("SOCKS Use specified, and no socks installed!") args = (SocksiPyHandler(socks.SOCKS5, "127.0.0.1", 9050), ) + args self.opener = urllib.request.build_opener(*args) #self.opener.addheaders = [('User-Agent', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)')] self.opener.addheaders = self.browserHeaders #urllib2.install_opener(self.opener) for cookie in self.cj: self.log.debug(cookie) #print cookie def _syncCookiesFromFile(self): # self.log.info("Synchronizing cookies with cookieFile.") if os.path.isfile(self.COOKIEFILE): self.cj.save("cookietemp.lwp") self.cj.load(self.COOKIEFILE) self.cj.load("cookietemp.lwp") # First, load any changed cookies so we don't overwrite them # However, we want to persist any cookies that we have that are more recent then the saved cookies, so we temporarily save # the cookies in memory to a temp-file, then load the cookiefile, and finally overwrite the loaded cookies with the ones from the # temp file def __updateCookiesFromFile(self): if os.path.exists(self.COOKIEFILE): # self.log.info("Synchronizing cookies with cookieFile.") self.cj.load(self.COOKIEFILE) # Update cookies from cookiefile def addCookie(self, inCookie): self.log.info("Updating cookie!") self.cj.set_cookie(inCookie) def addSeleniumCookie(self, cookieDict): ''' Install a cookie exported from a selenium webdriver into the active opener ''' # print cookieDict cookie = http.cookiejar.Cookie( version = 0, name = cookieDict['name'], value = cookieDict['value'], port = None, port_specified = False, domain = cookieDict['domain'], domain_specified = True, domain_initial_dot = False, path = cookieDict['path'], path_specified = False, secure = cookieDict['secure'], expires = cookieDict['expiry'] if 'expiry' in cookieDict else None, discard = False, comment = None, comment_url = None, rest = {"httponly":"%s" % cookieDict['httponly'] if 'httponly' in cookieDict else False}, rfc2109 = False ) self.addCookie(cookie) def saveCookies(self, halting=False): if self.cookie_lock: locked = self.cookie_lock.acquire(timeout=5) if not locked: self.log.error("Failed to acquire cookie-lock!") return # print("Have %d cookies before saving cookiejar" % len(self.cj)) try: # self.log.info("Trying to save cookies!") if self.cj is not None: # If cookies were used self._syncCookiesFromFile() # self.log.info("Have cookies to save") for cookie in self.cj: # print(cookie) # print(cookie.expires) if isinstance(cookie.expires, int) and cookie.expires > 30000000000: # Clamp cookies that expire stupidly far in the future because people are assholes cookie.expires = 30000000000 # self.log.info("Calling save function") self.cj.save(self.COOKIEFILE) # save the cookies again # self.log.info("Cookies Saved") else: self.log.info("No cookies to save?") except Exception as e: pass # The destructor call order is too incoherent, and shit fails # during the teardown with null-references. The error printout is # not informative, so just silence it. # print("Possible error on exit (or just the destructor): '%s'." % e) finally: if self.cookie_lock: self.cookie_lock.release() # print("Have %d cookies after saving cookiejar" % len(self.cj)) if not halting: self._syncCookiesFromFile() # print "Have %d cookies after reloading cookiejar" % len(self.cj) def clearCookies(self): if self.cookie_lock: locked = self.cookie_lock.acquire(timeout=5) if not locked: self.log.error("Failed to acquire cookie-lock!") return try: self.cj.clear() self.cj.save(self.COOKIEFILE) # save the cookies again self.cj.save("cookietemp.lwp") self._syncCookiesFromFile() finally: if self.cookie_lock: self.cookie_lock.release() def getCookies(self): if self.cookie_lock: locked = self.cookie_lock.acquire(timeout=5) if not locked: raise RuntimeError("Could not acquire lock on cookiejar") try: # self.log.info("Trying to save cookies!") if self.cj is not None: # If cookies were used self._syncCookiesFromFile() finally: if self.cookie_lock: self.cookie_lock.release() return self.cj ###################################################################################################################################################### ###################################################################################################################################################### def __del__(self): # print "WGH Destructor called!" # print("WebRequest __del__") self.saveCookies(halting=True) sup = super() if hasattr(sup, '__del__'): sup.__del__() def stepThroughCloudFlareWaf(self, url): return self.stepThroughJsWaf(url, titleNotContains='Just a moment...') def stepThroughSucuriWaf(self, url): return self.stepThroughJsWaf(url, titleNotContains="You are being redirected...") def stepThroughJsWaf(self, *args, **kwargs): # Shim to the underlying web browser of choice return self.stepThroughJsWaf_bare_chromium(*args, **kwargs) # Compat for old code. def stepThroughCloudFlare(self, *args, **kwargs): return self.stepThroughJsWaf(*args, **kwargs)
fake-name/WebRequest
WebRequest/WebRequestClass.py
WebGetRobust.getFileNameMime
python
def getFileNameMime(self, requestedUrl, *args, **kwargs): ''' Give a requested page (note: the arguments for this call are forwarded to getpage()), return the content at the target URL, the filename for the target content, and the mimetype for the content at the target URL, as a 3-tuple (pgctnt, hName, mime). The filename specified in the content-disposition header is used, if present. Otherwise, the last section of the url path segment is treated as the filename. ''' if 'returnMultiple' in kwargs: raise Exceptions.ArgumentError("getFileAndName cannot be called with 'returnMultiple'", requestedUrl) if 'soup' in kwargs and kwargs['soup']: raise Exceptions.ArgumentError("getFileAndName contradicts the 'soup' directive!", requestedUrl) kwargs["returnMultiple"] = True pgctnt, pghandle = self.getpage(requestedUrl, *args, **kwargs) info = pghandle.info() if not 'Content-Disposition' in info: hName = '' elif not 'filename=' in info['Content-Disposition']: hName = '' else: hName = info['Content-Disposition'].split('filename=')[1] # Unquote filename if it's quoted. if ((hName.startswith("'") and hName.endswith("'")) or hName.startswith('"') and hName.endswith('"')) and len(hName) >= 2: hName = hName[1:-1] mime = info.get_content_type() if not hName.strip(): requestedUrl = pghandle.geturl() hName = urllib.parse.urlsplit(requestedUrl).path.split("/")[-1].strip() if "/" in hName: hName = hName.split("/")[-1] return pgctnt, hName, mime
Give a requested page (note: the arguments for this call are forwarded to getpage()), return the content at the target URL, the filename for the target content, and the mimetype for the content at the target URL, as a 3-tuple (pgctnt, hName, mime). The filename specified in the content-disposition header is used, if present. Otherwise, the last section of the url path segment is treated as the filename.
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/WebRequestClass.py#L292-L334
[ "def getpage(self, requestedUrl, *args, **kwargs):\n\ttry:\n\t\treturn self.__getpage(requestedUrl, *args, **kwargs)\n\n\texcept Exceptions.CloudFlareWrapper:\n\t\tif self.rules['auto_waf']:\n\t\t\tself.log.warning(\"Cloudflare failure! Doing automatic step-through.\")\n\t\t\tif not self.stepThroughCloudFlareWaf(re...
class WebGetRobust( ChromiumMixin.WebGetCrMixin, SeleniumChromiumMixin.WebGetSeleniumChromiumMixin, ): COOKIEFILE = 'cookies.lwp' # the path and filename to save your cookies in cj = None cookielib = None opener = None errorOutCount = 1 # retryDelay = 0.1 retryDelay = 0.01 data = None # creds is a list of 3-tuples that gets inserted into the password manager. # it is structured [(top_level_url1, username1, password1), (top_level_url2, username2, password2)] def __init__(self, creds = None, logPath = "Main.WebRequest", cookie_lock = None, cloudflare = True, auto_waf = True, use_socks = False, alt_cookiejar = None, custom_ua = None, ): super().__init__() self.rules = {} self.rules['auto_waf'] = cloudflare or auto_waf if cookie_lock: self.cookie_lock = cookie_lock elif alt_cookiejar: self.log.info("External cookie-jar specified. Not forcing cookiejar serialization.") self.cookie_lock = None else: self.cookie_lock = COOKIEWRITELOCK self.use_socks = use_socks # Override the global default socket timeout, so hung connections will actually time out properly. socket.setdefaulttimeout(5) self.log = logging.getLogger(logPath) # print("Webget init! Logpath = ", logPath) if custom_ua: self.log.info("User agent overridden!") self.browserHeaders = custom_ua else: # Due to general internet people douchebaggyness, I've basically said to hell with it and decided to spoof a whole assortment of browsers # It should keep people from blocking this scraper *too* easily self.browserHeaders = UA_Constants.getUserAgent() self.data = urllib.parse.urlencode(self.browserHeaders) if creds: print("Have credentials, installing password manager into urllib handler.") passManager = urllib.request.HTTPPasswordMgrWithDefaultRealm() for url, username, password in creds: passManager.add_password(None, url, username, password) self.credHandler = Handlers.PreemptiveBasicAuthHandler(passManager) else: self.credHandler = None self.alt_cookiejar = alt_cookiejar self.__loadCookies() def getpage(self, requestedUrl, *args, **kwargs): try: return self.__getpage(requestedUrl, *args, **kwargs) except Exceptions.CloudFlareWrapper: if self.rules['auto_waf']: self.log.warning("Cloudflare failure! Doing automatic step-through.") if not self.stepThroughCloudFlareWaf(requestedUrl): raise Exceptions.FetchFailureError("Could not step through cloudflare!", requestedUrl) # Cloudflare cookie set, retrieve again return self.__getpage(requestedUrl, *args, **kwargs) else: self.log.info("Cloudflare without step-through setting!") raise except Exceptions.SucuriWrapper: # print("Sucuri!") if self.rules['auto_waf']: self.log.warning("Sucuri failure! Doing automatic step-through.") if not self.stepThroughSucuriWaf(requestedUrl): raise Exceptions.FetchFailureError("Could not step through Sucuri WAF bullshit!", requestedUrl) return self.__getpage(requestedUrl, *args, **kwargs) else: self.log.info("Sucuri without step-through setting!") raise def chunkReport(self, bytesSoFar, totalSize): if totalSize: percent = float(bytesSoFar) / totalSize percent = round(percent * 100, 2) self.log.info("Downloaded %d of %d bytes (%0.2f%%)" % (bytesSoFar, totalSize, percent)) else: self.log.info("Downloaded %d bytes" % (bytesSoFar)) def __chunkRead(self, response, chunkSize=2 ** 18, reportHook=None): contentLengthHeader = response.info().getheader('Content-Length') if contentLengthHeader: totalSize = contentLengthHeader.strip() totalSize = int(totalSize) else: totalSize = None bytesSoFar = 0 pgContent = "" while 1: chunk = response.read(chunkSize) pgContent += chunk bytesSoFar += len(chunk) if not chunk: break if reportHook: reportHook(bytesSoFar, chunkSize, totalSize) return pgContent def getSoup(self, requestedUrl, *args, **kwargs): if 'returnMultiple' in kwargs and kwargs['returnMultiple']: raise Exceptions.ArgumentError("getSoup cannot be called with 'returnMultiple' being true", requestedUrl) if 'soup' in kwargs and kwargs['soup']: raise Exceptions.ArgumentError("getSoup contradicts the 'soup' directive!", requestedUrl) page = self.getpage(requestedUrl, *args, **kwargs) if isinstance(page, bytes): raise Exceptions.ContentTypeError("Received content not decoded! Cannot parse!", requestedUrl) soup = utility.as_soup(page) return soup def getJson(self, requestedUrl, *args, **kwargs): if 'returnMultiple' in kwargs and kwargs['returnMultiple']: raise Exceptions.ArgumentError("getSoup cannot be called with 'returnMultiple' being true", requestedUrl) attempts = 0 while 1: try: page = self.getpage(requestedUrl, *args, **kwargs) if isinstance(page, bytes): page = page.decode(utility.determine_json_encoding(page)) # raise ValueError("Received content not decoded! Cannot parse!") page = page.strip() ret = json.loads(page) return ret except ValueError: if attempts < 1: attempts += 1 self.log.error("JSON Parsing issue retrieving content from page!") for line in traceback.format_exc().split("\n"): self.log.error("%s", line.rstrip()) self.log.error("Retrying!") # Scramble our current UA self.browserHeaders = UA_Constants.getUserAgent() if self.alt_cookiejar: self.cj.init_agent(new_headers=self.browserHeaders) time.sleep(self.retryDelay) else: self.log.error("JSON Parsing issue, and retries exhausted!") # self.log.error("Page content:") # self.log.error(page) # with open("Error-ctnt-{}.json".format(time.time()), "w") as tmp_err_fp: # tmp_err_fp.write(page) raise def getSoupNoRedirects(self, *args, **kwargs): if 'returnMultiple' in kwargs: raise Exceptions.ArgumentError("getSoup cannot be called with 'returnMultiple'") if 'soup' in kwargs and kwargs['soup']: raise Exceptions.ArgumentError("getSoup contradicts the 'soup' directive!") kwargs['returnMultiple'] = True tgt_url = kwargs.get('requestedUrl', None) if not tgt_url: tgt_url = args[0] page, handle = self.getpage(*args, **kwargs) redirurl = handle.geturl() if redirurl != tgt_url: self.log.error("Requested %s, redirected to %s. Raising error", tgt_url, redirurl) raise Exceptions.RedirectedError("Requested %s, redirected to %s" % ( tgt_url, redirurl)) soup = as_soup(page) return soup def getFileAndName(self, *args, **kwargs): ''' Give a requested page (note: the arguments for this call are forwarded to getpage()), return the content at the target URL and the filename for the target content as a 2-tuple (pgctnt, hName) for the content at the target URL. The filename specified in the content-disposition header is used, if present. Otherwise, the last section of the url path segment is treated as the filename. ''' pgctnt, hName, mime = self.getFileNameMime(*args, **kwargs) return pgctnt, hName def getItem(self, itemUrl): content, handle = self.getpage(itemUrl, returnMultiple=True) if not content or not handle: raise urllib.error.URLError("Failed to retreive file from page '%s'!" % itemUrl) handle_info = handle.info() if handle_info['Content-Disposition'] and 'filename=' in handle_info['Content-Disposition'].lower(): fileN = handle_info['Content-Disposition'].split("=", 1)[-1] else: fileN = urllib.parse.unquote(urllib.parse.urlparse(handle.geturl())[2].split("/")[-1]) fileN = bs4.UnicodeDammit(fileN).unicode_markup mType = handle_info['Content-Type'] # If there is an encoding in the content-type (or any other info), strip it out. # We don't care about the encoding, since WebFunctions will already have handled that, # and returned a decoded unicode object. if mType and ";" in mType: mType = mType.split(";")[0].strip() # *sigh*. So minus.com is fucking up their http headers, and apparently urlencoding the # mime type, because apparently they're shit at things. # Anyways, fix that. if mType and '%2F' in mType: mType = mType.replace('%2F', '/') self.log.info("Retreived file of type '%s', name of '%s' with a size of %0.3f K", mType, fileN, len(content)/1000.0) return content, fileN, mType def getHead(self, url, addlHeaders=None): self.log.warning("TODO: Fixme this neds to be migrated to use the normal fetch interface, so it is WAF-aware.") for x in range(9999): try: self.log.info("Doing HTTP HEAD request for '%s'", url) pgreq = self.__buildRequest(url, None, addlHeaders, None, req_class=Handlers.HeadRequest) pghandle = self.opener.open(pgreq, timeout=30) returl = pghandle.geturl() if returl != url: self.log.info("HEAD request returned a different URL '%s'", returl) return returl except socket.timeout as e: self.log.info("Timeout, retrying....") if x >= 3: self.log.error("Failure fetching: %s", url) raise Exceptions.FetchFailureError("Timout when fetching content", url) except urllib.error.URLError as e: # Continue even in the face of cloudflare crapping it's pants if e.code == 500 and e.geturl(): return e.geturl() self.log.info("URLError, retrying....") if x >= 3: self.log.error("Failure fetching: %s", url) raise Exceptions.FetchFailureError("URLError when fetching content", e.geturl(), err_code=e.code) ###################################################################################################################################################### ###################################################################################################################################################### def __check_suc_cookie(self, components): ''' This is only called if we're on a known sucuri-"protected" site. As such, if we do *not* have a sucuri cloudproxy cookie, we can assume we need to do the normal WAF step-through. ''' netloc = components.netloc.lower() for cookie in self.cj: if cookie.domain_specified and (cookie.domain.lower().endswith(netloc) or (cookie.domain.lower().endswith("127.0.0.1") and ( components.path == "/sucuri_shit_3" or components.path == "/sucuri_shit_2" ))): # Allow testing if "sucuri_cloudproxy_uuid_" in cookie.name: return self.log.info("Missing cloudproxy cookie for known sucuri wrapped site. Doing a pre-emptive chromium fetch.") raise Exceptions.SucuriWrapper("WAF Shit", str(components)) def __check_cf_cookie(self, components): netloc = components.netloc.lower() # TODO: Implement me? # for cookie in self.cj: # if cookie.domain_specified and (cookie.domain.lower().endswith(netloc) # or (cookie.domain.lower().endswith("127.0.0.1") and components.path == "/sucuri_shit_2")): # Allow testing # if "sucuri_cloudproxy_uuid_" in cookie.name: # return # print("Target cookie!") # print("K -> V: %s -> %s" % (cookie.name, cookie.value)) # print(cookie) # print(type(cookie)) # print(cookie.domain) # raise RuntimeError pass def __pre_check(self, requestedUrl): ''' Allow the pre-emptive fetching of sites with a full browser if they're known to be dick hosters. ''' components = urllib.parse.urlsplit(requestedUrl) netloc_l = components.netloc.lower() if netloc_l in Domain_Constants.SUCURI_GARBAGE_SITE_NETLOCS: self.__check_suc_cookie(components) elif netloc_l in Domain_Constants.CF_GARBAGE_SITE_NETLOCS: self.__check_cf_cookie(components) elif components.path == '/sucuri_shit_2': self.__check_suc_cookie(components) elif components.path == '/sucuri_shit_3': self.__check_suc_cookie(components) elif components.path == '/cloudflare_under_attack_shit_2': self.__check_cf_cookie(components) elif components.path == '/cloudflare_under_attack_shit_3': self.__check_cf_cookie(components) def __getpage(self, requestedUrl, **kwargs): self.__pre_check(requestedUrl) self.log.info("Fetching content at URL: %s", requestedUrl) # strip trailing and leading spaces. requestedUrl = requestedUrl.strip() # If we have 'soup' as a param, just pop it, and call `getSoup()`. if 'soup' in kwargs and kwargs['soup']: self.log.warning("'soup' kwarg is depreciated. Please use the `getSoup()` call instead.") kwargs.pop('soup') return self.getSoup(requestedUrl, **kwargs) # Decode the kwargs values addlHeaders = kwargs.setdefault("addlHeaders", None) returnMultiple = kwargs.setdefault("returnMultiple", False) callBack = kwargs.setdefault("callBack", None) postData = kwargs.setdefault("postData", None) retryQuantity = kwargs.setdefault("retryQuantity", None) nativeError = kwargs.setdefault("nativeError", False) binaryForm = kwargs.setdefault("binaryForm", False) # Conditionally encode the referrer if needed, because otherwise # urllib will barf on unicode referrer values. if addlHeaders and 'Referer' in addlHeaders: addlHeaders['Referer'] = iri2uri.iri2uri(addlHeaders['Referer']) retryCount = 0 err_content = None err_reason = None err_code = None while 1: pgctnt = None pghandle = None pgreq = self.__buildRequest(requestedUrl, postData, addlHeaders, binaryForm) errored = False lastErr = "" retryCount = retryCount + 1 if (retryQuantity and retryCount > retryQuantity) or (not retryQuantity and retryCount > self.errorOutCount): self.log.error("Failed to retrieve Website : %s at %s All Attempts Exhausted", pgreq.get_full_url(), time.ctime(time.time())) pgctnt = None try: self.log.critical("Critical Failure to retrieve page! %s at %s, attempt %s", pgreq.get_full_url(), time.ctime(time.time()), retryCount) self.log.critical("Error: %s", lastErr) self.log.critical("Exiting") except: self.log.critical("And the URL could not be printed due to an encoding error") break #print "execution", retryCount try: # print("Getpage!", requestedUrl, kwargs) pghandle = self.opener.open(pgreq, timeout=30) # Get Webpage # print("Gotpage") except Exceptions.GarbageSiteWrapper as err: # print("garbage site:") raise err except urllib.error.HTTPError as err: # Lotta logging self.log.warning("Error opening page: %s at %s On Attempt %s.", pgreq.get_full_url(), time.ctime(time.time()), retryCount) self.log.warning("Error Code: %s", err) if err.fp: err_content = err.fp.read() encoded = err.hdrs.get('Content-Encoding', None) if encoded: _, err_content = self.__decompressContent(encoded, err_content) err_reason = err.reason err_code = err.code lastErr = err try: self.log.warning("Original URL: %s", requestedUrl) errored = True except: self.log.warning("And the URL could not be printed due to an encoding error") if err.code == 404: #print "Unrecoverable - Page not found. Breaking" self.log.critical("Unrecoverable - Page not found. Breaking") break time.sleep(self.retryDelay) if err.code == 503: if err_content: self._check_waf(err_content, requestedUrl) # So I've been seeing this causing CF to bounce too. # As such, poke through those via chromium too. if err.code == 502: if err_content: self._check_waf(err_content, requestedUrl) except UnicodeEncodeError: self.log.critical("Unrecoverable Unicode issue retrieving page - %s", requestedUrl) for line in traceback.format_exc().split("\n"): self.log.critical("%s", line.rstrip()) self.log.critical("Parameters:") self.log.critical(" requestedUrl: '%s'", requestedUrl) self.log.critical(" postData: '%s'", postData) self.log.critical(" addlHeaders: '%s'", addlHeaders) self.log.critical(" binaryForm: '%s'", binaryForm) err_reason = "Unicode Decode Error" err_code = -1 err_content = traceback.format_exc() break except Exception as e: errored = True #traceback.print_exc() lastErr = sys.exc_info() self.log.warning("Retreival failed. Traceback:") self.log.warning(str(lastErr)) self.log.warning(traceback.format_exc()) self.log.warning("Error Retrieving Page! - Trying again - Waiting %s seconds", self.retryDelay) try: self.log.critical("Error on page - %s", requestedUrl) except: self.log.critical("And the URL could not be printed due to an encoding error") time.sleep(self.retryDelay) err_reason = "Unhandled general exception" err_code = -1 err_content = traceback.format_exc() continue if pghandle != None: self.log.info("Request for URL: %s succeeded at %s On Attempt %s. Recieving...", pgreq.get_full_url(), time.ctime(time.time()), retryCount) pgctnt = self.__retreiveContent(pgreq, pghandle, callBack) # if __retreiveContent did not return false, it managed to fetch valid results, so break if pgctnt != False: break if errored and pghandle != None: print(("Later attempt succeeded %s" % pgreq.get_full_url())) elif (errored or not pgctnt) and pghandle is None: if lastErr and nativeError: raise lastErr raise Exceptions.FetchFailureError("Failed to retreive page", requestedUrl, err_content=err_content, err_code=err_code, err_reason=err_reason) if returnMultiple: return pgctnt, pghandle else: return pgctnt ###################################################################################################################################################### ###################################################################################################################################################### def __decode_text_content(self, pageContent, cType): # this *should* probably be done using a parser. # However, it seems to be grossly overkill to shove the whole page (which can be quite large) through a parser just to pull out a tag that # should be right near the page beginning anyways. # As such, it's a regular expression for the moment # Regex is of bytes type, since we can't convert a string to unicode until we know the encoding the # bytes string is using, and we need the regex to get that encoding coding = re.search(b"charset=[\'\"]?([a-zA-Z0-9\-]*)[\'\"]?", pageContent, flags=re.IGNORECASE) cType = b"" charset = None try: if coding: cType = coding.group(1) codecs.lookup(cType.decode("ascii")) charset = cType.decode("ascii") except LookupError: # I'm actually not sure what I was thinking when I wrote this if statement. I don't think it'll ever trigger. if (b";" in cType) and (b"=" in cType): # the server is reporting an encoding. Now we use it to decode the dummy_docType, charset = cType.split(b";") charset = charset.split(b"=")[-1] if cchardet: inferred = cchardet.detect(pageContent) if inferred and inferred['confidence'] is None: # If we couldn't infer a charset, just short circuit and return the content. # It's probably binary. return pageContent elif inferred and inferred['confidence'] is not None and inferred['confidence'] > 0.8: charset = inferred['encoding'] self.log.info("Cchardet inferred encoding: %s", charset) else: self.log.warning("Missing cchardet!") if not charset: self.log.warning("Could not find encoding information on page - Using default charset. Shit may break!") charset = "utf-8" try: pageContent = str(pageContent, charset) except UnicodeDecodeError: self.log.error("Encoding Error! Stripping invalid chars.") pageContent = pageContent.decode('utf-8', errors='ignore') return pageContent def __buildRequest(self, pgreq, postData, addlHeaders, binaryForm, req_class = None): if req_class is None: req_class = urllib.request.Request pgreq = iri2uri.iri2uri(pgreq) try: params = {} headers = {} if postData != None: self.log.info("Making a post-request! Params: '%s'", postData) if isinstance(postData, str): params['data'] = postData.encode("utf-8") elif isinstance(postData, dict): for key, parameter in postData.items(): self.log.info(" Item: '%s' -> '%s'", key, parameter) params['data'] = urllib.parse.urlencode(postData).encode("utf-8") if addlHeaders != None: self.log.info("Have additional GET parameters!") for key, parameter in addlHeaders.items(): self.log.info(" Item: '%s' -> '%s'", key, parameter) headers = addlHeaders if binaryForm: self.log.info("Binary form submission!") if 'data' in params: raise Exceptions.ArgumentError("You cannot make a binary form post and a plain post request at the same time!", pgreq) params['data'] = binaryForm.make_result() headers['Content-type'] = binaryForm.get_content_type() headers['Content-length'] = len(params['data']) return req_class(pgreq, headers=headers, **params) except: self.log.critical("Invalid header or url") raise def __decompressContent(self, coding, pgctnt): """ This is really obnoxious """ #preLen = len(pgctnt) if coding == 'deflate': compType = "deflate" bits_opts = [ -zlib.MAX_WBITS, # deflate zlib.MAX_WBITS, # zlib zlib.MAX_WBITS | 16, # gzip zlib.MAX_WBITS | 32, # "automatic header detection" 0, # Try to guess from header # Try all the raw window options. -8, -9, -10, -11, -12, -13, -14, -15, # Stream with zlib headers 8, 9, 10, 11, 12, 13, 14, 15, # With gzip header+trailer 8+16, 9+16, 10+16, 11+16, 12+16, 13+16, 14+16, 15+16, # Automatic detection 8+32, 9+32, 10+32, 11+32, 12+32, 13+32, 14+32, 15+32, ] err = None for wbits_val in bits_opts: try: pgctnt = zlib.decompress(pgctnt, wbits_val) return compType, pgctnt except zlib.error as e: err = e # We can't get here without err having thrown. raise err elif coding == 'gzip': compType = "gzip" buf = io.BytesIO(pgctnt) f = gzip.GzipFile(fileobj=buf) pgctnt = f.read() elif coding == "sdch": raise Exceptions.ContentTypeError("Wait, someone other then google actually supports SDCH compression (%s)?" % pgreq) else: compType = "none" return compType, pgctnt def __decodeTextContent(self, pgctnt, cType): if cType: if (";" in cType) and ("=" in cType): # the server is reporting an encoding. Now we use it to decode the content # Some wierdos put two charsets in their headers: # `text/html;Charset=UTF-8;charset=UTF-8` # Split, and take the first two entries. docType, charset = cType.split(";")[:2] charset = charset.split("=")[-1] # Only decode content marked as text (yeah, google is serving zip files # with the content-disposition charset header specifying "UTF-8") or # specifically allowed other content types I know are really text. decode = ['application/atom+xml', 'application/xml', "application/json", 'text'] if any([item in docType for item in decode]): try: pgctnt = str(pgctnt, charset) except UnicodeDecodeError: self.log.error("Encoding Error! Stripping invalid chars.") pgctnt = pgctnt.decode('utf-8', errors='ignore') else: # The server is not reporting an encoding in the headers. # Use content-aware mechanisms for determing the content encoding. if "text/html" in cType or \ 'text/javascript' in cType or \ 'text/css' in cType or \ 'application/json' in cType or \ 'application/xml' in cType or \ 'application/atom+xml' in cType or \ cType.startswith("text/"): # If this is a html/text page, we want to decode it using the local encoding pgctnt = self.__decode_text_content(pgctnt, cType) elif "text" in cType: self.log.critical("Unknown content type!") self.log.critical(cType) else: self.log.critical("No content disposition header!") self.log.critical("Cannot guess content type!") return pgctnt def __retreiveContent(self, pgreq, pghandle, callBack): try: # If we have a progress callback, call it for chunked read. # Otherwise, just read in the entire content. if callBack: pgctnt = self.__chunkRead(pghandle, 2 ** 17, reportHook=callBack) else: pgctnt = pghandle.read() if pgctnt is None: return False self.log.info("URL fully retrieved.") preDecompSize = len(pgctnt)/1000.0 encoded = pghandle.headers.get('Content-Encoding') compType, pgctnt = self.__decompressContent(encoded, pgctnt) decompSize = len(pgctnt)/1000.0 # self.log.info("Page content type = %s", type(pgctnt)) cType = pghandle.headers.get("Content-Type") if compType == 'none': self.log.info("Compression type = %s. Content Size = %0.3fK. File type: %s.", compType, decompSize, cType) else: self.log.info("Compression type = %s. Content Size compressed = %0.3fK. Decompressed = %0.3fK. File type: %s.", compType, preDecompSize, decompSize, cType) self._check_waf(pgctnt, pgreq.get_full_url()) pgctnt = self.__decodeTextContent(pgctnt, cType) return pgctnt except Exceptions.GarbageSiteWrapper as err: raise err except Exception: self.log.error("Exception!") self.log.error(str(sys.exc_info())) traceback.print_exc() self.log.error("Error Retrieving Page! - Transfer failed. Waiting %s seconds before retrying", self.retryDelay) try: self.log.critical("Critical Failure to retrieve page! %s at %s", pgreq.get_full_url(), time.ctime(time.time())) self.log.critical("Exiting") except: self.log.critical("And the URL could not be printed due to an encoding error") self.log.error(pghandle) time.sleep(self.retryDelay) return False # HUGE GOD-FUNCTION. # OH GOD FIXME. # postData expects a dict # addlHeaders also expects a dict def _check_waf(self, pageContent, pageUrl): assert isinstance(pageContent, bytes), "Item pageContent must be of type bytes, received %s" % (type(pageContent), ) assert isinstance(pageUrl, str), "Item pageUrl must be of type str, received %s" % (type(pageUrl), ) if b"sucuri_cloudproxy_js=" in pageContent: raise Exceptions.SucuriWrapper("WAF Shit", pageUrl) if b'This process is automatic. Your browser will redirect to your requested content shortly.' in pageContent: raise Exceptions.CloudFlareWrapper("WAF Shit", pageUrl) if b'is currently offline. However, because the site uses Cloudflare\'s Always Online' in pageContent: raise Exceptions.CloudFlareWrapper("WAF Shit", pageUrl) ###################################################################################################################################################### ###################################################################################################################################################### def __loadCookies(self): if self.alt_cookiejar is not None: self.alt_cookiejar.init_agent(new_headers=self.browserHeaders) self.cj = self.alt_cookiejar else: self.cj = http.cookiejar.LWPCookieJar() # This is a subclass of FileCookieJar # that has useful load and save methods if self.cj is not None: if os.path.isfile(self.COOKIEFILE): try: self.__updateCookiesFromFile() # self.log.info("Loading CookieJar") except: self.log.critical("Cookie file is corrupt/damaged?") try: os.remove(self.COOKIEFILE) except FileNotFoundError: pass if http.cookiejar is not None: # self.log.info("Installing CookieJar") self.log.debug(self.cj) cookieHandler = urllib.request.HTTPCookieProcessor(self.cj) args = (cookieHandler, Handlers.HTTPRedirectHandler) if self.credHandler: print("Have cred handler. Building opener using it") args += (self.credHandler, ) if self.use_socks: print("Using Socks handler") if not HAVE_SOCKS: raise RuntimeError("SOCKS Use specified, and no socks installed!") args = (SocksiPyHandler(socks.SOCKS5, "127.0.0.1", 9050), ) + args self.opener = urllib.request.build_opener(*args) #self.opener.addheaders = [('User-Agent', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)')] self.opener.addheaders = self.browserHeaders #urllib2.install_opener(self.opener) for cookie in self.cj: self.log.debug(cookie) #print cookie def _syncCookiesFromFile(self): # self.log.info("Synchronizing cookies with cookieFile.") if os.path.isfile(self.COOKIEFILE): self.cj.save("cookietemp.lwp") self.cj.load(self.COOKIEFILE) self.cj.load("cookietemp.lwp") # First, load any changed cookies so we don't overwrite them # However, we want to persist any cookies that we have that are more recent then the saved cookies, so we temporarily save # the cookies in memory to a temp-file, then load the cookiefile, and finally overwrite the loaded cookies with the ones from the # temp file def __updateCookiesFromFile(self): if os.path.exists(self.COOKIEFILE): # self.log.info("Synchronizing cookies with cookieFile.") self.cj.load(self.COOKIEFILE) # Update cookies from cookiefile def addCookie(self, inCookie): self.log.info("Updating cookie!") self.cj.set_cookie(inCookie) def addSeleniumCookie(self, cookieDict): ''' Install a cookie exported from a selenium webdriver into the active opener ''' # print cookieDict cookie = http.cookiejar.Cookie( version = 0, name = cookieDict['name'], value = cookieDict['value'], port = None, port_specified = False, domain = cookieDict['domain'], domain_specified = True, domain_initial_dot = False, path = cookieDict['path'], path_specified = False, secure = cookieDict['secure'], expires = cookieDict['expiry'] if 'expiry' in cookieDict else None, discard = False, comment = None, comment_url = None, rest = {"httponly":"%s" % cookieDict['httponly'] if 'httponly' in cookieDict else False}, rfc2109 = False ) self.addCookie(cookie) def saveCookies(self, halting=False): if self.cookie_lock: locked = self.cookie_lock.acquire(timeout=5) if not locked: self.log.error("Failed to acquire cookie-lock!") return # print("Have %d cookies before saving cookiejar" % len(self.cj)) try: # self.log.info("Trying to save cookies!") if self.cj is not None: # If cookies were used self._syncCookiesFromFile() # self.log.info("Have cookies to save") for cookie in self.cj: # print(cookie) # print(cookie.expires) if isinstance(cookie.expires, int) and cookie.expires > 30000000000: # Clamp cookies that expire stupidly far in the future because people are assholes cookie.expires = 30000000000 # self.log.info("Calling save function") self.cj.save(self.COOKIEFILE) # save the cookies again # self.log.info("Cookies Saved") else: self.log.info("No cookies to save?") except Exception as e: pass # The destructor call order is too incoherent, and shit fails # during the teardown with null-references. The error printout is # not informative, so just silence it. # print("Possible error on exit (or just the destructor): '%s'." % e) finally: if self.cookie_lock: self.cookie_lock.release() # print("Have %d cookies after saving cookiejar" % len(self.cj)) if not halting: self._syncCookiesFromFile() # print "Have %d cookies after reloading cookiejar" % len(self.cj) def clearCookies(self): if self.cookie_lock: locked = self.cookie_lock.acquire(timeout=5) if not locked: self.log.error("Failed to acquire cookie-lock!") return try: self.cj.clear() self.cj.save(self.COOKIEFILE) # save the cookies again self.cj.save("cookietemp.lwp") self._syncCookiesFromFile() finally: if self.cookie_lock: self.cookie_lock.release() def getCookies(self): if self.cookie_lock: locked = self.cookie_lock.acquire(timeout=5) if not locked: raise RuntimeError("Could not acquire lock on cookiejar") try: # self.log.info("Trying to save cookies!") if self.cj is not None: # If cookies were used self._syncCookiesFromFile() finally: if self.cookie_lock: self.cookie_lock.release() return self.cj ###################################################################################################################################################### ###################################################################################################################################################### def __del__(self): # print "WGH Destructor called!" # print("WebRequest __del__") self.saveCookies(halting=True) sup = super() if hasattr(sup, '__del__'): sup.__del__() def stepThroughCloudFlareWaf(self, url): return self.stepThroughJsWaf(url, titleNotContains='Just a moment...') def stepThroughSucuriWaf(self, url): return self.stepThroughJsWaf(url, titleNotContains="You are being redirected...") def stepThroughJsWaf(self, *args, **kwargs): # Shim to the underlying web browser of choice return self.stepThroughJsWaf_bare_chromium(*args, **kwargs) # Compat for old code. def stepThroughCloudFlare(self, *args, **kwargs): return self.stepThroughJsWaf(*args, **kwargs)
fake-name/WebRequest
WebRequest/WebRequestClass.py
WebGetRobust.__check_suc_cookie
python
def __check_suc_cookie(self, components): ''' This is only called if we're on a known sucuri-"protected" site. As such, if we do *not* have a sucuri cloudproxy cookie, we can assume we need to do the normal WAF step-through. ''' netloc = components.netloc.lower() for cookie in self.cj: if cookie.domain_specified and (cookie.domain.lower().endswith(netloc) or (cookie.domain.lower().endswith("127.0.0.1") and ( components.path == "/sucuri_shit_3" or components.path == "/sucuri_shit_2" ))): # Allow testing if "sucuri_cloudproxy_uuid_" in cookie.name: return self.log.info("Missing cloudproxy cookie for known sucuri wrapped site. Doing a pre-emptive chromium fetch.") raise Exceptions.SucuriWrapper("WAF Shit", str(components))
This is only called if we're on a known sucuri-"protected" site. As such, if we do *not* have a sucuri cloudproxy cookie, we can assume we need to do the normal WAF step-through.
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/WebRequestClass.py#L398-L413
null
class WebGetRobust( ChromiumMixin.WebGetCrMixin, SeleniumChromiumMixin.WebGetSeleniumChromiumMixin, ): COOKIEFILE = 'cookies.lwp' # the path and filename to save your cookies in cj = None cookielib = None opener = None errorOutCount = 1 # retryDelay = 0.1 retryDelay = 0.01 data = None # creds is a list of 3-tuples that gets inserted into the password manager. # it is structured [(top_level_url1, username1, password1), (top_level_url2, username2, password2)] def __init__(self, creds = None, logPath = "Main.WebRequest", cookie_lock = None, cloudflare = True, auto_waf = True, use_socks = False, alt_cookiejar = None, custom_ua = None, ): super().__init__() self.rules = {} self.rules['auto_waf'] = cloudflare or auto_waf if cookie_lock: self.cookie_lock = cookie_lock elif alt_cookiejar: self.log.info("External cookie-jar specified. Not forcing cookiejar serialization.") self.cookie_lock = None else: self.cookie_lock = COOKIEWRITELOCK self.use_socks = use_socks # Override the global default socket timeout, so hung connections will actually time out properly. socket.setdefaulttimeout(5) self.log = logging.getLogger(logPath) # print("Webget init! Logpath = ", logPath) if custom_ua: self.log.info("User agent overridden!") self.browserHeaders = custom_ua else: # Due to general internet people douchebaggyness, I've basically said to hell with it and decided to spoof a whole assortment of browsers # It should keep people from blocking this scraper *too* easily self.browserHeaders = UA_Constants.getUserAgent() self.data = urllib.parse.urlencode(self.browserHeaders) if creds: print("Have credentials, installing password manager into urllib handler.") passManager = urllib.request.HTTPPasswordMgrWithDefaultRealm() for url, username, password in creds: passManager.add_password(None, url, username, password) self.credHandler = Handlers.PreemptiveBasicAuthHandler(passManager) else: self.credHandler = None self.alt_cookiejar = alt_cookiejar self.__loadCookies() def getpage(self, requestedUrl, *args, **kwargs): try: return self.__getpage(requestedUrl, *args, **kwargs) except Exceptions.CloudFlareWrapper: if self.rules['auto_waf']: self.log.warning("Cloudflare failure! Doing automatic step-through.") if not self.stepThroughCloudFlareWaf(requestedUrl): raise Exceptions.FetchFailureError("Could not step through cloudflare!", requestedUrl) # Cloudflare cookie set, retrieve again return self.__getpage(requestedUrl, *args, **kwargs) else: self.log.info("Cloudflare without step-through setting!") raise except Exceptions.SucuriWrapper: # print("Sucuri!") if self.rules['auto_waf']: self.log.warning("Sucuri failure! Doing automatic step-through.") if not self.stepThroughSucuriWaf(requestedUrl): raise Exceptions.FetchFailureError("Could not step through Sucuri WAF bullshit!", requestedUrl) return self.__getpage(requestedUrl, *args, **kwargs) else: self.log.info("Sucuri without step-through setting!") raise def chunkReport(self, bytesSoFar, totalSize): if totalSize: percent = float(bytesSoFar) / totalSize percent = round(percent * 100, 2) self.log.info("Downloaded %d of %d bytes (%0.2f%%)" % (bytesSoFar, totalSize, percent)) else: self.log.info("Downloaded %d bytes" % (bytesSoFar)) def __chunkRead(self, response, chunkSize=2 ** 18, reportHook=None): contentLengthHeader = response.info().getheader('Content-Length') if contentLengthHeader: totalSize = contentLengthHeader.strip() totalSize = int(totalSize) else: totalSize = None bytesSoFar = 0 pgContent = "" while 1: chunk = response.read(chunkSize) pgContent += chunk bytesSoFar += len(chunk) if not chunk: break if reportHook: reportHook(bytesSoFar, chunkSize, totalSize) return pgContent def getSoup(self, requestedUrl, *args, **kwargs): if 'returnMultiple' in kwargs and kwargs['returnMultiple']: raise Exceptions.ArgumentError("getSoup cannot be called with 'returnMultiple' being true", requestedUrl) if 'soup' in kwargs and kwargs['soup']: raise Exceptions.ArgumentError("getSoup contradicts the 'soup' directive!", requestedUrl) page = self.getpage(requestedUrl, *args, **kwargs) if isinstance(page, bytes): raise Exceptions.ContentTypeError("Received content not decoded! Cannot parse!", requestedUrl) soup = utility.as_soup(page) return soup def getJson(self, requestedUrl, *args, **kwargs): if 'returnMultiple' in kwargs and kwargs['returnMultiple']: raise Exceptions.ArgumentError("getSoup cannot be called with 'returnMultiple' being true", requestedUrl) attempts = 0 while 1: try: page = self.getpage(requestedUrl, *args, **kwargs) if isinstance(page, bytes): page = page.decode(utility.determine_json_encoding(page)) # raise ValueError("Received content not decoded! Cannot parse!") page = page.strip() ret = json.loads(page) return ret except ValueError: if attempts < 1: attempts += 1 self.log.error("JSON Parsing issue retrieving content from page!") for line in traceback.format_exc().split("\n"): self.log.error("%s", line.rstrip()) self.log.error("Retrying!") # Scramble our current UA self.browserHeaders = UA_Constants.getUserAgent() if self.alt_cookiejar: self.cj.init_agent(new_headers=self.browserHeaders) time.sleep(self.retryDelay) else: self.log.error("JSON Parsing issue, and retries exhausted!") # self.log.error("Page content:") # self.log.error(page) # with open("Error-ctnt-{}.json".format(time.time()), "w") as tmp_err_fp: # tmp_err_fp.write(page) raise def getSoupNoRedirects(self, *args, **kwargs): if 'returnMultiple' in kwargs: raise Exceptions.ArgumentError("getSoup cannot be called with 'returnMultiple'") if 'soup' in kwargs and kwargs['soup']: raise Exceptions.ArgumentError("getSoup contradicts the 'soup' directive!") kwargs['returnMultiple'] = True tgt_url = kwargs.get('requestedUrl', None) if not tgt_url: tgt_url = args[0] page, handle = self.getpage(*args, **kwargs) redirurl = handle.geturl() if redirurl != tgt_url: self.log.error("Requested %s, redirected to %s. Raising error", tgt_url, redirurl) raise Exceptions.RedirectedError("Requested %s, redirected to %s" % ( tgt_url, redirurl)) soup = as_soup(page) return soup def getFileAndName(self, *args, **kwargs): ''' Give a requested page (note: the arguments for this call are forwarded to getpage()), return the content at the target URL and the filename for the target content as a 2-tuple (pgctnt, hName) for the content at the target URL. The filename specified in the content-disposition header is used, if present. Otherwise, the last section of the url path segment is treated as the filename. ''' pgctnt, hName, mime = self.getFileNameMime(*args, **kwargs) return pgctnt, hName def getFileNameMime(self, requestedUrl, *args, **kwargs): ''' Give a requested page (note: the arguments for this call are forwarded to getpage()), return the content at the target URL, the filename for the target content, and the mimetype for the content at the target URL, as a 3-tuple (pgctnt, hName, mime). The filename specified in the content-disposition header is used, if present. Otherwise, the last section of the url path segment is treated as the filename. ''' if 'returnMultiple' in kwargs: raise Exceptions.ArgumentError("getFileAndName cannot be called with 'returnMultiple'", requestedUrl) if 'soup' in kwargs and kwargs['soup']: raise Exceptions.ArgumentError("getFileAndName contradicts the 'soup' directive!", requestedUrl) kwargs["returnMultiple"] = True pgctnt, pghandle = self.getpage(requestedUrl, *args, **kwargs) info = pghandle.info() if not 'Content-Disposition' in info: hName = '' elif not 'filename=' in info['Content-Disposition']: hName = '' else: hName = info['Content-Disposition'].split('filename=')[1] # Unquote filename if it's quoted. if ((hName.startswith("'") and hName.endswith("'")) or hName.startswith('"') and hName.endswith('"')) and len(hName) >= 2: hName = hName[1:-1] mime = info.get_content_type() if not hName.strip(): requestedUrl = pghandle.geturl() hName = urllib.parse.urlsplit(requestedUrl).path.split("/")[-1].strip() if "/" in hName: hName = hName.split("/")[-1] return pgctnt, hName, mime def getItem(self, itemUrl): content, handle = self.getpage(itemUrl, returnMultiple=True) if not content or not handle: raise urllib.error.URLError("Failed to retreive file from page '%s'!" % itemUrl) handle_info = handle.info() if handle_info['Content-Disposition'] and 'filename=' in handle_info['Content-Disposition'].lower(): fileN = handle_info['Content-Disposition'].split("=", 1)[-1] else: fileN = urllib.parse.unquote(urllib.parse.urlparse(handle.geturl())[2].split("/")[-1]) fileN = bs4.UnicodeDammit(fileN).unicode_markup mType = handle_info['Content-Type'] # If there is an encoding in the content-type (or any other info), strip it out. # We don't care about the encoding, since WebFunctions will already have handled that, # and returned a decoded unicode object. if mType and ";" in mType: mType = mType.split(";")[0].strip() # *sigh*. So minus.com is fucking up their http headers, and apparently urlencoding the # mime type, because apparently they're shit at things. # Anyways, fix that. if mType and '%2F' in mType: mType = mType.replace('%2F', '/') self.log.info("Retreived file of type '%s', name of '%s' with a size of %0.3f K", mType, fileN, len(content)/1000.0) return content, fileN, mType def getHead(self, url, addlHeaders=None): self.log.warning("TODO: Fixme this neds to be migrated to use the normal fetch interface, so it is WAF-aware.") for x in range(9999): try: self.log.info("Doing HTTP HEAD request for '%s'", url) pgreq = self.__buildRequest(url, None, addlHeaders, None, req_class=Handlers.HeadRequest) pghandle = self.opener.open(pgreq, timeout=30) returl = pghandle.geturl() if returl != url: self.log.info("HEAD request returned a different URL '%s'", returl) return returl except socket.timeout as e: self.log.info("Timeout, retrying....") if x >= 3: self.log.error("Failure fetching: %s", url) raise Exceptions.FetchFailureError("Timout when fetching content", url) except urllib.error.URLError as e: # Continue even in the face of cloudflare crapping it's pants if e.code == 500 and e.geturl(): return e.geturl() self.log.info("URLError, retrying....") if x >= 3: self.log.error("Failure fetching: %s", url) raise Exceptions.FetchFailureError("URLError when fetching content", e.geturl(), err_code=e.code) ###################################################################################################################################################### ###################################################################################################################################################### def __check_cf_cookie(self, components): netloc = components.netloc.lower() # TODO: Implement me? # for cookie in self.cj: # if cookie.domain_specified and (cookie.domain.lower().endswith(netloc) # or (cookie.domain.lower().endswith("127.0.0.1") and components.path == "/sucuri_shit_2")): # Allow testing # if "sucuri_cloudproxy_uuid_" in cookie.name: # return # print("Target cookie!") # print("K -> V: %s -> %s" % (cookie.name, cookie.value)) # print(cookie) # print(type(cookie)) # print(cookie.domain) # raise RuntimeError pass def __pre_check(self, requestedUrl): ''' Allow the pre-emptive fetching of sites with a full browser if they're known to be dick hosters. ''' components = urllib.parse.urlsplit(requestedUrl) netloc_l = components.netloc.lower() if netloc_l in Domain_Constants.SUCURI_GARBAGE_SITE_NETLOCS: self.__check_suc_cookie(components) elif netloc_l in Domain_Constants.CF_GARBAGE_SITE_NETLOCS: self.__check_cf_cookie(components) elif components.path == '/sucuri_shit_2': self.__check_suc_cookie(components) elif components.path == '/sucuri_shit_3': self.__check_suc_cookie(components) elif components.path == '/cloudflare_under_attack_shit_2': self.__check_cf_cookie(components) elif components.path == '/cloudflare_under_attack_shit_3': self.__check_cf_cookie(components) def __getpage(self, requestedUrl, **kwargs): self.__pre_check(requestedUrl) self.log.info("Fetching content at URL: %s", requestedUrl) # strip trailing and leading spaces. requestedUrl = requestedUrl.strip() # If we have 'soup' as a param, just pop it, and call `getSoup()`. if 'soup' in kwargs and kwargs['soup']: self.log.warning("'soup' kwarg is depreciated. Please use the `getSoup()` call instead.") kwargs.pop('soup') return self.getSoup(requestedUrl, **kwargs) # Decode the kwargs values addlHeaders = kwargs.setdefault("addlHeaders", None) returnMultiple = kwargs.setdefault("returnMultiple", False) callBack = kwargs.setdefault("callBack", None) postData = kwargs.setdefault("postData", None) retryQuantity = kwargs.setdefault("retryQuantity", None) nativeError = kwargs.setdefault("nativeError", False) binaryForm = kwargs.setdefault("binaryForm", False) # Conditionally encode the referrer if needed, because otherwise # urllib will barf on unicode referrer values. if addlHeaders and 'Referer' in addlHeaders: addlHeaders['Referer'] = iri2uri.iri2uri(addlHeaders['Referer']) retryCount = 0 err_content = None err_reason = None err_code = None while 1: pgctnt = None pghandle = None pgreq = self.__buildRequest(requestedUrl, postData, addlHeaders, binaryForm) errored = False lastErr = "" retryCount = retryCount + 1 if (retryQuantity and retryCount > retryQuantity) or (not retryQuantity and retryCount > self.errorOutCount): self.log.error("Failed to retrieve Website : %s at %s All Attempts Exhausted", pgreq.get_full_url(), time.ctime(time.time())) pgctnt = None try: self.log.critical("Critical Failure to retrieve page! %s at %s, attempt %s", pgreq.get_full_url(), time.ctime(time.time()), retryCount) self.log.critical("Error: %s", lastErr) self.log.critical("Exiting") except: self.log.critical("And the URL could not be printed due to an encoding error") break #print "execution", retryCount try: # print("Getpage!", requestedUrl, kwargs) pghandle = self.opener.open(pgreq, timeout=30) # Get Webpage # print("Gotpage") except Exceptions.GarbageSiteWrapper as err: # print("garbage site:") raise err except urllib.error.HTTPError as err: # Lotta logging self.log.warning("Error opening page: %s at %s On Attempt %s.", pgreq.get_full_url(), time.ctime(time.time()), retryCount) self.log.warning("Error Code: %s", err) if err.fp: err_content = err.fp.read() encoded = err.hdrs.get('Content-Encoding', None) if encoded: _, err_content = self.__decompressContent(encoded, err_content) err_reason = err.reason err_code = err.code lastErr = err try: self.log.warning("Original URL: %s", requestedUrl) errored = True except: self.log.warning("And the URL could not be printed due to an encoding error") if err.code == 404: #print "Unrecoverable - Page not found. Breaking" self.log.critical("Unrecoverable - Page not found. Breaking") break time.sleep(self.retryDelay) if err.code == 503: if err_content: self._check_waf(err_content, requestedUrl) # So I've been seeing this causing CF to bounce too. # As such, poke through those via chromium too. if err.code == 502: if err_content: self._check_waf(err_content, requestedUrl) except UnicodeEncodeError: self.log.critical("Unrecoverable Unicode issue retrieving page - %s", requestedUrl) for line in traceback.format_exc().split("\n"): self.log.critical("%s", line.rstrip()) self.log.critical("Parameters:") self.log.critical(" requestedUrl: '%s'", requestedUrl) self.log.critical(" postData: '%s'", postData) self.log.critical(" addlHeaders: '%s'", addlHeaders) self.log.critical(" binaryForm: '%s'", binaryForm) err_reason = "Unicode Decode Error" err_code = -1 err_content = traceback.format_exc() break except Exception as e: errored = True #traceback.print_exc() lastErr = sys.exc_info() self.log.warning("Retreival failed. Traceback:") self.log.warning(str(lastErr)) self.log.warning(traceback.format_exc()) self.log.warning("Error Retrieving Page! - Trying again - Waiting %s seconds", self.retryDelay) try: self.log.critical("Error on page - %s", requestedUrl) except: self.log.critical("And the URL could not be printed due to an encoding error") time.sleep(self.retryDelay) err_reason = "Unhandled general exception" err_code = -1 err_content = traceback.format_exc() continue if pghandle != None: self.log.info("Request for URL: %s succeeded at %s On Attempt %s. Recieving...", pgreq.get_full_url(), time.ctime(time.time()), retryCount) pgctnt = self.__retreiveContent(pgreq, pghandle, callBack) # if __retreiveContent did not return false, it managed to fetch valid results, so break if pgctnt != False: break if errored and pghandle != None: print(("Later attempt succeeded %s" % pgreq.get_full_url())) elif (errored or not pgctnt) and pghandle is None: if lastErr and nativeError: raise lastErr raise Exceptions.FetchFailureError("Failed to retreive page", requestedUrl, err_content=err_content, err_code=err_code, err_reason=err_reason) if returnMultiple: return pgctnt, pghandle else: return pgctnt ###################################################################################################################################################### ###################################################################################################################################################### def __decode_text_content(self, pageContent, cType): # this *should* probably be done using a parser. # However, it seems to be grossly overkill to shove the whole page (which can be quite large) through a parser just to pull out a tag that # should be right near the page beginning anyways. # As such, it's a regular expression for the moment # Regex is of bytes type, since we can't convert a string to unicode until we know the encoding the # bytes string is using, and we need the regex to get that encoding coding = re.search(b"charset=[\'\"]?([a-zA-Z0-9\-]*)[\'\"]?", pageContent, flags=re.IGNORECASE) cType = b"" charset = None try: if coding: cType = coding.group(1) codecs.lookup(cType.decode("ascii")) charset = cType.decode("ascii") except LookupError: # I'm actually not sure what I was thinking when I wrote this if statement. I don't think it'll ever trigger. if (b";" in cType) and (b"=" in cType): # the server is reporting an encoding. Now we use it to decode the dummy_docType, charset = cType.split(b";") charset = charset.split(b"=")[-1] if cchardet: inferred = cchardet.detect(pageContent) if inferred and inferred['confidence'] is None: # If we couldn't infer a charset, just short circuit and return the content. # It's probably binary. return pageContent elif inferred and inferred['confidence'] is not None and inferred['confidence'] > 0.8: charset = inferred['encoding'] self.log.info("Cchardet inferred encoding: %s", charset) else: self.log.warning("Missing cchardet!") if not charset: self.log.warning("Could not find encoding information on page - Using default charset. Shit may break!") charset = "utf-8" try: pageContent = str(pageContent, charset) except UnicodeDecodeError: self.log.error("Encoding Error! Stripping invalid chars.") pageContent = pageContent.decode('utf-8', errors='ignore') return pageContent def __buildRequest(self, pgreq, postData, addlHeaders, binaryForm, req_class = None): if req_class is None: req_class = urllib.request.Request pgreq = iri2uri.iri2uri(pgreq) try: params = {} headers = {} if postData != None: self.log.info("Making a post-request! Params: '%s'", postData) if isinstance(postData, str): params['data'] = postData.encode("utf-8") elif isinstance(postData, dict): for key, parameter in postData.items(): self.log.info(" Item: '%s' -> '%s'", key, parameter) params['data'] = urllib.parse.urlencode(postData).encode("utf-8") if addlHeaders != None: self.log.info("Have additional GET parameters!") for key, parameter in addlHeaders.items(): self.log.info(" Item: '%s' -> '%s'", key, parameter) headers = addlHeaders if binaryForm: self.log.info("Binary form submission!") if 'data' in params: raise Exceptions.ArgumentError("You cannot make a binary form post and a plain post request at the same time!", pgreq) params['data'] = binaryForm.make_result() headers['Content-type'] = binaryForm.get_content_type() headers['Content-length'] = len(params['data']) return req_class(pgreq, headers=headers, **params) except: self.log.critical("Invalid header or url") raise def __decompressContent(self, coding, pgctnt): """ This is really obnoxious """ #preLen = len(pgctnt) if coding == 'deflate': compType = "deflate" bits_opts = [ -zlib.MAX_WBITS, # deflate zlib.MAX_WBITS, # zlib zlib.MAX_WBITS | 16, # gzip zlib.MAX_WBITS | 32, # "automatic header detection" 0, # Try to guess from header # Try all the raw window options. -8, -9, -10, -11, -12, -13, -14, -15, # Stream with zlib headers 8, 9, 10, 11, 12, 13, 14, 15, # With gzip header+trailer 8+16, 9+16, 10+16, 11+16, 12+16, 13+16, 14+16, 15+16, # Automatic detection 8+32, 9+32, 10+32, 11+32, 12+32, 13+32, 14+32, 15+32, ] err = None for wbits_val in bits_opts: try: pgctnt = zlib.decompress(pgctnt, wbits_val) return compType, pgctnt except zlib.error as e: err = e # We can't get here without err having thrown. raise err elif coding == 'gzip': compType = "gzip" buf = io.BytesIO(pgctnt) f = gzip.GzipFile(fileobj=buf) pgctnt = f.read() elif coding == "sdch": raise Exceptions.ContentTypeError("Wait, someone other then google actually supports SDCH compression (%s)?" % pgreq) else: compType = "none" return compType, pgctnt def __decodeTextContent(self, pgctnt, cType): if cType: if (";" in cType) and ("=" in cType): # the server is reporting an encoding. Now we use it to decode the content # Some wierdos put two charsets in their headers: # `text/html;Charset=UTF-8;charset=UTF-8` # Split, and take the first two entries. docType, charset = cType.split(";")[:2] charset = charset.split("=")[-1] # Only decode content marked as text (yeah, google is serving zip files # with the content-disposition charset header specifying "UTF-8") or # specifically allowed other content types I know are really text. decode = ['application/atom+xml', 'application/xml', "application/json", 'text'] if any([item in docType for item in decode]): try: pgctnt = str(pgctnt, charset) except UnicodeDecodeError: self.log.error("Encoding Error! Stripping invalid chars.") pgctnt = pgctnt.decode('utf-8', errors='ignore') else: # The server is not reporting an encoding in the headers. # Use content-aware mechanisms for determing the content encoding. if "text/html" in cType or \ 'text/javascript' in cType or \ 'text/css' in cType or \ 'application/json' in cType or \ 'application/xml' in cType or \ 'application/atom+xml' in cType or \ cType.startswith("text/"): # If this is a html/text page, we want to decode it using the local encoding pgctnt = self.__decode_text_content(pgctnt, cType) elif "text" in cType: self.log.critical("Unknown content type!") self.log.critical(cType) else: self.log.critical("No content disposition header!") self.log.critical("Cannot guess content type!") return pgctnt def __retreiveContent(self, pgreq, pghandle, callBack): try: # If we have a progress callback, call it for chunked read. # Otherwise, just read in the entire content. if callBack: pgctnt = self.__chunkRead(pghandle, 2 ** 17, reportHook=callBack) else: pgctnt = pghandle.read() if pgctnt is None: return False self.log.info("URL fully retrieved.") preDecompSize = len(pgctnt)/1000.0 encoded = pghandle.headers.get('Content-Encoding') compType, pgctnt = self.__decompressContent(encoded, pgctnt) decompSize = len(pgctnt)/1000.0 # self.log.info("Page content type = %s", type(pgctnt)) cType = pghandle.headers.get("Content-Type") if compType == 'none': self.log.info("Compression type = %s. Content Size = %0.3fK. File type: %s.", compType, decompSize, cType) else: self.log.info("Compression type = %s. Content Size compressed = %0.3fK. Decompressed = %0.3fK. File type: %s.", compType, preDecompSize, decompSize, cType) self._check_waf(pgctnt, pgreq.get_full_url()) pgctnt = self.__decodeTextContent(pgctnt, cType) return pgctnt except Exceptions.GarbageSiteWrapper as err: raise err except Exception: self.log.error("Exception!") self.log.error(str(sys.exc_info())) traceback.print_exc() self.log.error("Error Retrieving Page! - Transfer failed. Waiting %s seconds before retrying", self.retryDelay) try: self.log.critical("Critical Failure to retrieve page! %s at %s", pgreq.get_full_url(), time.ctime(time.time())) self.log.critical("Exiting") except: self.log.critical("And the URL could not be printed due to an encoding error") self.log.error(pghandle) time.sleep(self.retryDelay) return False # HUGE GOD-FUNCTION. # OH GOD FIXME. # postData expects a dict # addlHeaders also expects a dict def _check_waf(self, pageContent, pageUrl): assert isinstance(pageContent, bytes), "Item pageContent must be of type bytes, received %s" % (type(pageContent), ) assert isinstance(pageUrl, str), "Item pageUrl must be of type str, received %s" % (type(pageUrl), ) if b"sucuri_cloudproxy_js=" in pageContent: raise Exceptions.SucuriWrapper("WAF Shit", pageUrl) if b'This process is automatic. Your browser will redirect to your requested content shortly.' in pageContent: raise Exceptions.CloudFlareWrapper("WAF Shit", pageUrl) if b'is currently offline. However, because the site uses Cloudflare\'s Always Online' in pageContent: raise Exceptions.CloudFlareWrapper("WAF Shit", pageUrl) ###################################################################################################################################################### ###################################################################################################################################################### def __loadCookies(self): if self.alt_cookiejar is not None: self.alt_cookiejar.init_agent(new_headers=self.browserHeaders) self.cj = self.alt_cookiejar else: self.cj = http.cookiejar.LWPCookieJar() # This is a subclass of FileCookieJar # that has useful load and save methods if self.cj is not None: if os.path.isfile(self.COOKIEFILE): try: self.__updateCookiesFromFile() # self.log.info("Loading CookieJar") except: self.log.critical("Cookie file is corrupt/damaged?") try: os.remove(self.COOKIEFILE) except FileNotFoundError: pass if http.cookiejar is not None: # self.log.info("Installing CookieJar") self.log.debug(self.cj) cookieHandler = urllib.request.HTTPCookieProcessor(self.cj) args = (cookieHandler, Handlers.HTTPRedirectHandler) if self.credHandler: print("Have cred handler. Building opener using it") args += (self.credHandler, ) if self.use_socks: print("Using Socks handler") if not HAVE_SOCKS: raise RuntimeError("SOCKS Use specified, and no socks installed!") args = (SocksiPyHandler(socks.SOCKS5, "127.0.0.1", 9050), ) + args self.opener = urllib.request.build_opener(*args) #self.opener.addheaders = [('User-Agent', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)')] self.opener.addheaders = self.browserHeaders #urllib2.install_opener(self.opener) for cookie in self.cj: self.log.debug(cookie) #print cookie def _syncCookiesFromFile(self): # self.log.info("Synchronizing cookies with cookieFile.") if os.path.isfile(self.COOKIEFILE): self.cj.save("cookietemp.lwp") self.cj.load(self.COOKIEFILE) self.cj.load("cookietemp.lwp") # First, load any changed cookies so we don't overwrite them # However, we want to persist any cookies that we have that are more recent then the saved cookies, so we temporarily save # the cookies in memory to a temp-file, then load the cookiefile, and finally overwrite the loaded cookies with the ones from the # temp file def __updateCookiesFromFile(self): if os.path.exists(self.COOKIEFILE): # self.log.info("Synchronizing cookies with cookieFile.") self.cj.load(self.COOKIEFILE) # Update cookies from cookiefile def addCookie(self, inCookie): self.log.info("Updating cookie!") self.cj.set_cookie(inCookie) def addSeleniumCookie(self, cookieDict): ''' Install a cookie exported from a selenium webdriver into the active opener ''' # print cookieDict cookie = http.cookiejar.Cookie( version = 0, name = cookieDict['name'], value = cookieDict['value'], port = None, port_specified = False, domain = cookieDict['domain'], domain_specified = True, domain_initial_dot = False, path = cookieDict['path'], path_specified = False, secure = cookieDict['secure'], expires = cookieDict['expiry'] if 'expiry' in cookieDict else None, discard = False, comment = None, comment_url = None, rest = {"httponly":"%s" % cookieDict['httponly'] if 'httponly' in cookieDict else False}, rfc2109 = False ) self.addCookie(cookie) def saveCookies(self, halting=False): if self.cookie_lock: locked = self.cookie_lock.acquire(timeout=5) if not locked: self.log.error("Failed to acquire cookie-lock!") return # print("Have %d cookies before saving cookiejar" % len(self.cj)) try: # self.log.info("Trying to save cookies!") if self.cj is not None: # If cookies were used self._syncCookiesFromFile() # self.log.info("Have cookies to save") for cookie in self.cj: # print(cookie) # print(cookie.expires) if isinstance(cookie.expires, int) and cookie.expires > 30000000000: # Clamp cookies that expire stupidly far in the future because people are assholes cookie.expires = 30000000000 # self.log.info("Calling save function") self.cj.save(self.COOKIEFILE) # save the cookies again # self.log.info("Cookies Saved") else: self.log.info("No cookies to save?") except Exception as e: pass # The destructor call order is too incoherent, and shit fails # during the teardown with null-references. The error printout is # not informative, so just silence it. # print("Possible error on exit (or just the destructor): '%s'." % e) finally: if self.cookie_lock: self.cookie_lock.release() # print("Have %d cookies after saving cookiejar" % len(self.cj)) if not halting: self._syncCookiesFromFile() # print "Have %d cookies after reloading cookiejar" % len(self.cj) def clearCookies(self): if self.cookie_lock: locked = self.cookie_lock.acquire(timeout=5) if not locked: self.log.error("Failed to acquire cookie-lock!") return try: self.cj.clear() self.cj.save(self.COOKIEFILE) # save the cookies again self.cj.save("cookietemp.lwp") self._syncCookiesFromFile() finally: if self.cookie_lock: self.cookie_lock.release() def getCookies(self): if self.cookie_lock: locked = self.cookie_lock.acquire(timeout=5) if not locked: raise RuntimeError("Could not acquire lock on cookiejar") try: # self.log.info("Trying to save cookies!") if self.cj is not None: # If cookies were used self._syncCookiesFromFile() finally: if self.cookie_lock: self.cookie_lock.release() return self.cj ###################################################################################################################################################### ###################################################################################################################################################### def __del__(self): # print "WGH Destructor called!" # print("WebRequest __del__") self.saveCookies(halting=True) sup = super() if hasattr(sup, '__del__'): sup.__del__() def stepThroughCloudFlareWaf(self, url): return self.stepThroughJsWaf(url, titleNotContains='Just a moment...') def stepThroughSucuriWaf(self, url): return self.stepThroughJsWaf(url, titleNotContains="You are being redirected...") def stepThroughJsWaf(self, *args, **kwargs): # Shim to the underlying web browser of choice return self.stepThroughJsWaf_bare_chromium(*args, **kwargs) # Compat for old code. def stepThroughCloudFlare(self, *args, **kwargs): return self.stepThroughJsWaf(*args, **kwargs)
fake-name/WebRequest
WebRequest/WebRequestClass.py
WebGetRobust.__pre_check
python
def __pre_check(self, requestedUrl): ''' Allow the pre-emptive fetching of sites with a full browser if they're known to be dick hosters. ''' components = urllib.parse.urlsplit(requestedUrl) netloc_l = components.netloc.lower() if netloc_l in Domain_Constants.SUCURI_GARBAGE_SITE_NETLOCS: self.__check_suc_cookie(components) elif netloc_l in Domain_Constants.CF_GARBAGE_SITE_NETLOCS: self.__check_cf_cookie(components) elif components.path == '/sucuri_shit_2': self.__check_suc_cookie(components) elif components.path == '/sucuri_shit_3': self.__check_suc_cookie(components) elif components.path == '/cloudflare_under_attack_shit_2': self.__check_cf_cookie(components) elif components.path == '/cloudflare_under_attack_shit_3': self.__check_cf_cookie(components)
Allow the pre-emptive fetching of sites with a full browser if they're known to be dick hosters.
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/WebRequestClass.py#L435-L454
null
class WebGetRobust( ChromiumMixin.WebGetCrMixin, SeleniumChromiumMixin.WebGetSeleniumChromiumMixin, ): COOKIEFILE = 'cookies.lwp' # the path and filename to save your cookies in cj = None cookielib = None opener = None errorOutCount = 1 # retryDelay = 0.1 retryDelay = 0.01 data = None # creds is a list of 3-tuples that gets inserted into the password manager. # it is structured [(top_level_url1, username1, password1), (top_level_url2, username2, password2)] def __init__(self, creds = None, logPath = "Main.WebRequest", cookie_lock = None, cloudflare = True, auto_waf = True, use_socks = False, alt_cookiejar = None, custom_ua = None, ): super().__init__() self.rules = {} self.rules['auto_waf'] = cloudflare or auto_waf if cookie_lock: self.cookie_lock = cookie_lock elif alt_cookiejar: self.log.info("External cookie-jar specified. Not forcing cookiejar serialization.") self.cookie_lock = None else: self.cookie_lock = COOKIEWRITELOCK self.use_socks = use_socks # Override the global default socket timeout, so hung connections will actually time out properly. socket.setdefaulttimeout(5) self.log = logging.getLogger(logPath) # print("Webget init! Logpath = ", logPath) if custom_ua: self.log.info("User agent overridden!") self.browserHeaders = custom_ua else: # Due to general internet people douchebaggyness, I've basically said to hell with it and decided to spoof a whole assortment of browsers # It should keep people from blocking this scraper *too* easily self.browserHeaders = UA_Constants.getUserAgent() self.data = urllib.parse.urlencode(self.browserHeaders) if creds: print("Have credentials, installing password manager into urllib handler.") passManager = urllib.request.HTTPPasswordMgrWithDefaultRealm() for url, username, password in creds: passManager.add_password(None, url, username, password) self.credHandler = Handlers.PreemptiveBasicAuthHandler(passManager) else: self.credHandler = None self.alt_cookiejar = alt_cookiejar self.__loadCookies() def getpage(self, requestedUrl, *args, **kwargs): try: return self.__getpage(requestedUrl, *args, **kwargs) except Exceptions.CloudFlareWrapper: if self.rules['auto_waf']: self.log.warning("Cloudflare failure! Doing automatic step-through.") if not self.stepThroughCloudFlareWaf(requestedUrl): raise Exceptions.FetchFailureError("Could not step through cloudflare!", requestedUrl) # Cloudflare cookie set, retrieve again return self.__getpage(requestedUrl, *args, **kwargs) else: self.log.info("Cloudflare without step-through setting!") raise except Exceptions.SucuriWrapper: # print("Sucuri!") if self.rules['auto_waf']: self.log.warning("Sucuri failure! Doing automatic step-through.") if not self.stepThroughSucuriWaf(requestedUrl): raise Exceptions.FetchFailureError("Could not step through Sucuri WAF bullshit!", requestedUrl) return self.__getpage(requestedUrl, *args, **kwargs) else: self.log.info("Sucuri without step-through setting!") raise def chunkReport(self, bytesSoFar, totalSize): if totalSize: percent = float(bytesSoFar) / totalSize percent = round(percent * 100, 2) self.log.info("Downloaded %d of %d bytes (%0.2f%%)" % (bytesSoFar, totalSize, percent)) else: self.log.info("Downloaded %d bytes" % (bytesSoFar)) def __chunkRead(self, response, chunkSize=2 ** 18, reportHook=None): contentLengthHeader = response.info().getheader('Content-Length') if contentLengthHeader: totalSize = contentLengthHeader.strip() totalSize = int(totalSize) else: totalSize = None bytesSoFar = 0 pgContent = "" while 1: chunk = response.read(chunkSize) pgContent += chunk bytesSoFar += len(chunk) if not chunk: break if reportHook: reportHook(bytesSoFar, chunkSize, totalSize) return pgContent def getSoup(self, requestedUrl, *args, **kwargs): if 'returnMultiple' in kwargs and kwargs['returnMultiple']: raise Exceptions.ArgumentError("getSoup cannot be called with 'returnMultiple' being true", requestedUrl) if 'soup' in kwargs and kwargs['soup']: raise Exceptions.ArgumentError("getSoup contradicts the 'soup' directive!", requestedUrl) page = self.getpage(requestedUrl, *args, **kwargs) if isinstance(page, bytes): raise Exceptions.ContentTypeError("Received content not decoded! Cannot parse!", requestedUrl) soup = utility.as_soup(page) return soup def getJson(self, requestedUrl, *args, **kwargs): if 'returnMultiple' in kwargs and kwargs['returnMultiple']: raise Exceptions.ArgumentError("getSoup cannot be called with 'returnMultiple' being true", requestedUrl) attempts = 0 while 1: try: page = self.getpage(requestedUrl, *args, **kwargs) if isinstance(page, bytes): page = page.decode(utility.determine_json_encoding(page)) # raise ValueError("Received content not decoded! Cannot parse!") page = page.strip() ret = json.loads(page) return ret except ValueError: if attempts < 1: attempts += 1 self.log.error("JSON Parsing issue retrieving content from page!") for line in traceback.format_exc().split("\n"): self.log.error("%s", line.rstrip()) self.log.error("Retrying!") # Scramble our current UA self.browserHeaders = UA_Constants.getUserAgent() if self.alt_cookiejar: self.cj.init_agent(new_headers=self.browserHeaders) time.sleep(self.retryDelay) else: self.log.error("JSON Parsing issue, and retries exhausted!") # self.log.error("Page content:") # self.log.error(page) # with open("Error-ctnt-{}.json".format(time.time()), "w") as tmp_err_fp: # tmp_err_fp.write(page) raise def getSoupNoRedirects(self, *args, **kwargs): if 'returnMultiple' in kwargs: raise Exceptions.ArgumentError("getSoup cannot be called with 'returnMultiple'") if 'soup' in kwargs and kwargs['soup']: raise Exceptions.ArgumentError("getSoup contradicts the 'soup' directive!") kwargs['returnMultiple'] = True tgt_url = kwargs.get('requestedUrl', None) if not tgt_url: tgt_url = args[0] page, handle = self.getpage(*args, **kwargs) redirurl = handle.geturl() if redirurl != tgt_url: self.log.error("Requested %s, redirected to %s. Raising error", tgt_url, redirurl) raise Exceptions.RedirectedError("Requested %s, redirected to %s" % ( tgt_url, redirurl)) soup = as_soup(page) return soup def getFileAndName(self, *args, **kwargs): ''' Give a requested page (note: the arguments for this call are forwarded to getpage()), return the content at the target URL and the filename for the target content as a 2-tuple (pgctnt, hName) for the content at the target URL. The filename specified in the content-disposition header is used, if present. Otherwise, the last section of the url path segment is treated as the filename. ''' pgctnt, hName, mime = self.getFileNameMime(*args, **kwargs) return pgctnt, hName def getFileNameMime(self, requestedUrl, *args, **kwargs): ''' Give a requested page (note: the arguments for this call are forwarded to getpage()), return the content at the target URL, the filename for the target content, and the mimetype for the content at the target URL, as a 3-tuple (pgctnt, hName, mime). The filename specified in the content-disposition header is used, if present. Otherwise, the last section of the url path segment is treated as the filename. ''' if 'returnMultiple' in kwargs: raise Exceptions.ArgumentError("getFileAndName cannot be called with 'returnMultiple'", requestedUrl) if 'soup' in kwargs and kwargs['soup']: raise Exceptions.ArgumentError("getFileAndName contradicts the 'soup' directive!", requestedUrl) kwargs["returnMultiple"] = True pgctnt, pghandle = self.getpage(requestedUrl, *args, **kwargs) info = pghandle.info() if not 'Content-Disposition' in info: hName = '' elif not 'filename=' in info['Content-Disposition']: hName = '' else: hName = info['Content-Disposition'].split('filename=')[1] # Unquote filename if it's quoted. if ((hName.startswith("'") and hName.endswith("'")) or hName.startswith('"') and hName.endswith('"')) and len(hName) >= 2: hName = hName[1:-1] mime = info.get_content_type() if not hName.strip(): requestedUrl = pghandle.geturl() hName = urllib.parse.urlsplit(requestedUrl).path.split("/")[-1].strip() if "/" in hName: hName = hName.split("/")[-1] return pgctnt, hName, mime def getItem(self, itemUrl): content, handle = self.getpage(itemUrl, returnMultiple=True) if not content or not handle: raise urllib.error.URLError("Failed to retreive file from page '%s'!" % itemUrl) handle_info = handle.info() if handle_info['Content-Disposition'] and 'filename=' in handle_info['Content-Disposition'].lower(): fileN = handle_info['Content-Disposition'].split("=", 1)[-1] else: fileN = urllib.parse.unquote(urllib.parse.urlparse(handle.geturl())[2].split("/")[-1]) fileN = bs4.UnicodeDammit(fileN).unicode_markup mType = handle_info['Content-Type'] # If there is an encoding in the content-type (or any other info), strip it out. # We don't care about the encoding, since WebFunctions will already have handled that, # and returned a decoded unicode object. if mType and ";" in mType: mType = mType.split(";")[0].strip() # *sigh*. So minus.com is fucking up their http headers, and apparently urlencoding the # mime type, because apparently they're shit at things. # Anyways, fix that. if mType and '%2F' in mType: mType = mType.replace('%2F', '/') self.log.info("Retreived file of type '%s', name of '%s' with a size of %0.3f K", mType, fileN, len(content)/1000.0) return content, fileN, mType def getHead(self, url, addlHeaders=None): self.log.warning("TODO: Fixme this neds to be migrated to use the normal fetch interface, so it is WAF-aware.") for x in range(9999): try: self.log.info("Doing HTTP HEAD request for '%s'", url) pgreq = self.__buildRequest(url, None, addlHeaders, None, req_class=Handlers.HeadRequest) pghandle = self.opener.open(pgreq, timeout=30) returl = pghandle.geturl() if returl != url: self.log.info("HEAD request returned a different URL '%s'", returl) return returl except socket.timeout as e: self.log.info("Timeout, retrying....") if x >= 3: self.log.error("Failure fetching: %s", url) raise Exceptions.FetchFailureError("Timout when fetching content", url) except urllib.error.URLError as e: # Continue even in the face of cloudflare crapping it's pants if e.code == 500 and e.geturl(): return e.geturl() self.log.info("URLError, retrying....") if x >= 3: self.log.error("Failure fetching: %s", url) raise Exceptions.FetchFailureError("URLError when fetching content", e.geturl(), err_code=e.code) ###################################################################################################################################################### ###################################################################################################################################################### def __check_suc_cookie(self, components): ''' This is only called if we're on a known sucuri-"protected" site. As such, if we do *not* have a sucuri cloudproxy cookie, we can assume we need to do the normal WAF step-through. ''' netloc = components.netloc.lower() for cookie in self.cj: if cookie.domain_specified and (cookie.domain.lower().endswith(netloc) or (cookie.domain.lower().endswith("127.0.0.1") and ( components.path == "/sucuri_shit_3" or components.path == "/sucuri_shit_2" ))): # Allow testing if "sucuri_cloudproxy_uuid_" in cookie.name: return self.log.info("Missing cloudproxy cookie for known sucuri wrapped site. Doing a pre-emptive chromium fetch.") raise Exceptions.SucuriWrapper("WAF Shit", str(components)) def __check_cf_cookie(self, components): netloc = components.netloc.lower() # TODO: Implement me? # for cookie in self.cj: # if cookie.domain_specified and (cookie.domain.lower().endswith(netloc) # or (cookie.domain.lower().endswith("127.0.0.1") and components.path == "/sucuri_shit_2")): # Allow testing # if "sucuri_cloudproxy_uuid_" in cookie.name: # return # print("Target cookie!") # print("K -> V: %s -> %s" % (cookie.name, cookie.value)) # print(cookie) # print(type(cookie)) # print(cookie.domain) # raise RuntimeError pass def __getpage(self, requestedUrl, **kwargs): self.__pre_check(requestedUrl) self.log.info("Fetching content at URL: %s", requestedUrl) # strip trailing and leading spaces. requestedUrl = requestedUrl.strip() # If we have 'soup' as a param, just pop it, and call `getSoup()`. if 'soup' in kwargs and kwargs['soup']: self.log.warning("'soup' kwarg is depreciated. Please use the `getSoup()` call instead.") kwargs.pop('soup') return self.getSoup(requestedUrl, **kwargs) # Decode the kwargs values addlHeaders = kwargs.setdefault("addlHeaders", None) returnMultiple = kwargs.setdefault("returnMultiple", False) callBack = kwargs.setdefault("callBack", None) postData = kwargs.setdefault("postData", None) retryQuantity = kwargs.setdefault("retryQuantity", None) nativeError = kwargs.setdefault("nativeError", False) binaryForm = kwargs.setdefault("binaryForm", False) # Conditionally encode the referrer if needed, because otherwise # urllib will barf on unicode referrer values. if addlHeaders and 'Referer' in addlHeaders: addlHeaders['Referer'] = iri2uri.iri2uri(addlHeaders['Referer']) retryCount = 0 err_content = None err_reason = None err_code = None while 1: pgctnt = None pghandle = None pgreq = self.__buildRequest(requestedUrl, postData, addlHeaders, binaryForm) errored = False lastErr = "" retryCount = retryCount + 1 if (retryQuantity and retryCount > retryQuantity) or (not retryQuantity and retryCount > self.errorOutCount): self.log.error("Failed to retrieve Website : %s at %s All Attempts Exhausted", pgreq.get_full_url(), time.ctime(time.time())) pgctnt = None try: self.log.critical("Critical Failure to retrieve page! %s at %s, attempt %s", pgreq.get_full_url(), time.ctime(time.time()), retryCount) self.log.critical("Error: %s", lastErr) self.log.critical("Exiting") except: self.log.critical("And the URL could not be printed due to an encoding error") break #print "execution", retryCount try: # print("Getpage!", requestedUrl, kwargs) pghandle = self.opener.open(pgreq, timeout=30) # Get Webpage # print("Gotpage") except Exceptions.GarbageSiteWrapper as err: # print("garbage site:") raise err except urllib.error.HTTPError as err: # Lotta logging self.log.warning("Error opening page: %s at %s On Attempt %s.", pgreq.get_full_url(), time.ctime(time.time()), retryCount) self.log.warning("Error Code: %s", err) if err.fp: err_content = err.fp.read() encoded = err.hdrs.get('Content-Encoding', None) if encoded: _, err_content = self.__decompressContent(encoded, err_content) err_reason = err.reason err_code = err.code lastErr = err try: self.log.warning("Original URL: %s", requestedUrl) errored = True except: self.log.warning("And the URL could not be printed due to an encoding error") if err.code == 404: #print "Unrecoverable - Page not found. Breaking" self.log.critical("Unrecoverable - Page not found. Breaking") break time.sleep(self.retryDelay) if err.code == 503: if err_content: self._check_waf(err_content, requestedUrl) # So I've been seeing this causing CF to bounce too. # As such, poke through those via chromium too. if err.code == 502: if err_content: self._check_waf(err_content, requestedUrl) except UnicodeEncodeError: self.log.critical("Unrecoverable Unicode issue retrieving page - %s", requestedUrl) for line in traceback.format_exc().split("\n"): self.log.critical("%s", line.rstrip()) self.log.critical("Parameters:") self.log.critical(" requestedUrl: '%s'", requestedUrl) self.log.critical(" postData: '%s'", postData) self.log.critical(" addlHeaders: '%s'", addlHeaders) self.log.critical(" binaryForm: '%s'", binaryForm) err_reason = "Unicode Decode Error" err_code = -1 err_content = traceback.format_exc() break except Exception as e: errored = True #traceback.print_exc() lastErr = sys.exc_info() self.log.warning("Retreival failed. Traceback:") self.log.warning(str(lastErr)) self.log.warning(traceback.format_exc()) self.log.warning("Error Retrieving Page! - Trying again - Waiting %s seconds", self.retryDelay) try: self.log.critical("Error on page - %s", requestedUrl) except: self.log.critical("And the URL could not be printed due to an encoding error") time.sleep(self.retryDelay) err_reason = "Unhandled general exception" err_code = -1 err_content = traceback.format_exc() continue if pghandle != None: self.log.info("Request for URL: %s succeeded at %s On Attempt %s. Recieving...", pgreq.get_full_url(), time.ctime(time.time()), retryCount) pgctnt = self.__retreiveContent(pgreq, pghandle, callBack) # if __retreiveContent did not return false, it managed to fetch valid results, so break if pgctnt != False: break if errored and pghandle != None: print(("Later attempt succeeded %s" % pgreq.get_full_url())) elif (errored or not pgctnt) and pghandle is None: if lastErr and nativeError: raise lastErr raise Exceptions.FetchFailureError("Failed to retreive page", requestedUrl, err_content=err_content, err_code=err_code, err_reason=err_reason) if returnMultiple: return pgctnt, pghandle else: return pgctnt ###################################################################################################################################################### ###################################################################################################################################################### def __decode_text_content(self, pageContent, cType): # this *should* probably be done using a parser. # However, it seems to be grossly overkill to shove the whole page (which can be quite large) through a parser just to pull out a tag that # should be right near the page beginning anyways. # As such, it's a regular expression for the moment # Regex is of bytes type, since we can't convert a string to unicode until we know the encoding the # bytes string is using, and we need the regex to get that encoding coding = re.search(b"charset=[\'\"]?([a-zA-Z0-9\-]*)[\'\"]?", pageContent, flags=re.IGNORECASE) cType = b"" charset = None try: if coding: cType = coding.group(1) codecs.lookup(cType.decode("ascii")) charset = cType.decode("ascii") except LookupError: # I'm actually not sure what I was thinking when I wrote this if statement. I don't think it'll ever trigger. if (b";" in cType) and (b"=" in cType): # the server is reporting an encoding. Now we use it to decode the dummy_docType, charset = cType.split(b";") charset = charset.split(b"=")[-1] if cchardet: inferred = cchardet.detect(pageContent) if inferred and inferred['confidence'] is None: # If we couldn't infer a charset, just short circuit and return the content. # It's probably binary. return pageContent elif inferred and inferred['confidence'] is not None and inferred['confidence'] > 0.8: charset = inferred['encoding'] self.log.info("Cchardet inferred encoding: %s", charset) else: self.log.warning("Missing cchardet!") if not charset: self.log.warning("Could not find encoding information on page - Using default charset. Shit may break!") charset = "utf-8" try: pageContent = str(pageContent, charset) except UnicodeDecodeError: self.log.error("Encoding Error! Stripping invalid chars.") pageContent = pageContent.decode('utf-8', errors='ignore') return pageContent def __buildRequest(self, pgreq, postData, addlHeaders, binaryForm, req_class = None): if req_class is None: req_class = urllib.request.Request pgreq = iri2uri.iri2uri(pgreq) try: params = {} headers = {} if postData != None: self.log.info("Making a post-request! Params: '%s'", postData) if isinstance(postData, str): params['data'] = postData.encode("utf-8") elif isinstance(postData, dict): for key, parameter in postData.items(): self.log.info(" Item: '%s' -> '%s'", key, parameter) params['data'] = urllib.parse.urlencode(postData).encode("utf-8") if addlHeaders != None: self.log.info("Have additional GET parameters!") for key, parameter in addlHeaders.items(): self.log.info(" Item: '%s' -> '%s'", key, parameter) headers = addlHeaders if binaryForm: self.log.info("Binary form submission!") if 'data' in params: raise Exceptions.ArgumentError("You cannot make a binary form post and a plain post request at the same time!", pgreq) params['data'] = binaryForm.make_result() headers['Content-type'] = binaryForm.get_content_type() headers['Content-length'] = len(params['data']) return req_class(pgreq, headers=headers, **params) except: self.log.critical("Invalid header or url") raise def __decompressContent(self, coding, pgctnt): """ This is really obnoxious """ #preLen = len(pgctnt) if coding == 'deflate': compType = "deflate" bits_opts = [ -zlib.MAX_WBITS, # deflate zlib.MAX_WBITS, # zlib zlib.MAX_WBITS | 16, # gzip zlib.MAX_WBITS | 32, # "automatic header detection" 0, # Try to guess from header # Try all the raw window options. -8, -9, -10, -11, -12, -13, -14, -15, # Stream with zlib headers 8, 9, 10, 11, 12, 13, 14, 15, # With gzip header+trailer 8+16, 9+16, 10+16, 11+16, 12+16, 13+16, 14+16, 15+16, # Automatic detection 8+32, 9+32, 10+32, 11+32, 12+32, 13+32, 14+32, 15+32, ] err = None for wbits_val in bits_opts: try: pgctnt = zlib.decompress(pgctnt, wbits_val) return compType, pgctnt except zlib.error as e: err = e # We can't get here without err having thrown. raise err elif coding == 'gzip': compType = "gzip" buf = io.BytesIO(pgctnt) f = gzip.GzipFile(fileobj=buf) pgctnt = f.read() elif coding == "sdch": raise Exceptions.ContentTypeError("Wait, someone other then google actually supports SDCH compression (%s)?" % pgreq) else: compType = "none" return compType, pgctnt def __decodeTextContent(self, pgctnt, cType): if cType: if (";" in cType) and ("=" in cType): # the server is reporting an encoding. Now we use it to decode the content # Some wierdos put two charsets in their headers: # `text/html;Charset=UTF-8;charset=UTF-8` # Split, and take the first two entries. docType, charset = cType.split(";")[:2] charset = charset.split("=")[-1] # Only decode content marked as text (yeah, google is serving zip files # with the content-disposition charset header specifying "UTF-8") or # specifically allowed other content types I know are really text. decode = ['application/atom+xml', 'application/xml', "application/json", 'text'] if any([item in docType for item in decode]): try: pgctnt = str(pgctnt, charset) except UnicodeDecodeError: self.log.error("Encoding Error! Stripping invalid chars.") pgctnt = pgctnt.decode('utf-8', errors='ignore') else: # The server is not reporting an encoding in the headers. # Use content-aware mechanisms for determing the content encoding. if "text/html" in cType or \ 'text/javascript' in cType or \ 'text/css' in cType or \ 'application/json' in cType or \ 'application/xml' in cType or \ 'application/atom+xml' in cType or \ cType.startswith("text/"): # If this is a html/text page, we want to decode it using the local encoding pgctnt = self.__decode_text_content(pgctnt, cType) elif "text" in cType: self.log.critical("Unknown content type!") self.log.critical(cType) else: self.log.critical("No content disposition header!") self.log.critical("Cannot guess content type!") return pgctnt def __retreiveContent(self, pgreq, pghandle, callBack): try: # If we have a progress callback, call it for chunked read. # Otherwise, just read in the entire content. if callBack: pgctnt = self.__chunkRead(pghandle, 2 ** 17, reportHook=callBack) else: pgctnt = pghandle.read() if pgctnt is None: return False self.log.info("URL fully retrieved.") preDecompSize = len(pgctnt)/1000.0 encoded = pghandle.headers.get('Content-Encoding') compType, pgctnt = self.__decompressContent(encoded, pgctnt) decompSize = len(pgctnt)/1000.0 # self.log.info("Page content type = %s", type(pgctnt)) cType = pghandle.headers.get("Content-Type") if compType == 'none': self.log.info("Compression type = %s. Content Size = %0.3fK. File type: %s.", compType, decompSize, cType) else: self.log.info("Compression type = %s. Content Size compressed = %0.3fK. Decompressed = %0.3fK. File type: %s.", compType, preDecompSize, decompSize, cType) self._check_waf(pgctnt, pgreq.get_full_url()) pgctnt = self.__decodeTextContent(pgctnt, cType) return pgctnt except Exceptions.GarbageSiteWrapper as err: raise err except Exception: self.log.error("Exception!") self.log.error(str(sys.exc_info())) traceback.print_exc() self.log.error("Error Retrieving Page! - Transfer failed. Waiting %s seconds before retrying", self.retryDelay) try: self.log.critical("Critical Failure to retrieve page! %s at %s", pgreq.get_full_url(), time.ctime(time.time())) self.log.critical("Exiting") except: self.log.critical("And the URL could not be printed due to an encoding error") self.log.error(pghandle) time.sleep(self.retryDelay) return False # HUGE GOD-FUNCTION. # OH GOD FIXME. # postData expects a dict # addlHeaders also expects a dict def _check_waf(self, pageContent, pageUrl): assert isinstance(pageContent, bytes), "Item pageContent must be of type bytes, received %s" % (type(pageContent), ) assert isinstance(pageUrl, str), "Item pageUrl must be of type str, received %s" % (type(pageUrl), ) if b"sucuri_cloudproxy_js=" in pageContent: raise Exceptions.SucuriWrapper("WAF Shit", pageUrl) if b'This process is automatic. Your browser will redirect to your requested content shortly.' in pageContent: raise Exceptions.CloudFlareWrapper("WAF Shit", pageUrl) if b'is currently offline. However, because the site uses Cloudflare\'s Always Online' in pageContent: raise Exceptions.CloudFlareWrapper("WAF Shit", pageUrl) ###################################################################################################################################################### ###################################################################################################################################################### def __loadCookies(self): if self.alt_cookiejar is not None: self.alt_cookiejar.init_agent(new_headers=self.browserHeaders) self.cj = self.alt_cookiejar else: self.cj = http.cookiejar.LWPCookieJar() # This is a subclass of FileCookieJar # that has useful load and save methods if self.cj is not None: if os.path.isfile(self.COOKIEFILE): try: self.__updateCookiesFromFile() # self.log.info("Loading CookieJar") except: self.log.critical("Cookie file is corrupt/damaged?") try: os.remove(self.COOKIEFILE) except FileNotFoundError: pass if http.cookiejar is not None: # self.log.info("Installing CookieJar") self.log.debug(self.cj) cookieHandler = urllib.request.HTTPCookieProcessor(self.cj) args = (cookieHandler, Handlers.HTTPRedirectHandler) if self.credHandler: print("Have cred handler. Building opener using it") args += (self.credHandler, ) if self.use_socks: print("Using Socks handler") if not HAVE_SOCKS: raise RuntimeError("SOCKS Use specified, and no socks installed!") args = (SocksiPyHandler(socks.SOCKS5, "127.0.0.1", 9050), ) + args self.opener = urllib.request.build_opener(*args) #self.opener.addheaders = [('User-Agent', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)')] self.opener.addheaders = self.browserHeaders #urllib2.install_opener(self.opener) for cookie in self.cj: self.log.debug(cookie) #print cookie def _syncCookiesFromFile(self): # self.log.info("Synchronizing cookies with cookieFile.") if os.path.isfile(self.COOKIEFILE): self.cj.save("cookietemp.lwp") self.cj.load(self.COOKIEFILE) self.cj.load("cookietemp.lwp") # First, load any changed cookies so we don't overwrite them # However, we want to persist any cookies that we have that are more recent then the saved cookies, so we temporarily save # the cookies in memory to a temp-file, then load the cookiefile, and finally overwrite the loaded cookies with the ones from the # temp file def __updateCookiesFromFile(self): if os.path.exists(self.COOKIEFILE): # self.log.info("Synchronizing cookies with cookieFile.") self.cj.load(self.COOKIEFILE) # Update cookies from cookiefile def addCookie(self, inCookie): self.log.info("Updating cookie!") self.cj.set_cookie(inCookie) def addSeleniumCookie(self, cookieDict): ''' Install a cookie exported from a selenium webdriver into the active opener ''' # print cookieDict cookie = http.cookiejar.Cookie( version = 0, name = cookieDict['name'], value = cookieDict['value'], port = None, port_specified = False, domain = cookieDict['domain'], domain_specified = True, domain_initial_dot = False, path = cookieDict['path'], path_specified = False, secure = cookieDict['secure'], expires = cookieDict['expiry'] if 'expiry' in cookieDict else None, discard = False, comment = None, comment_url = None, rest = {"httponly":"%s" % cookieDict['httponly'] if 'httponly' in cookieDict else False}, rfc2109 = False ) self.addCookie(cookie) def saveCookies(self, halting=False): if self.cookie_lock: locked = self.cookie_lock.acquire(timeout=5) if not locked: self.log.error("Failed to acquire cookie-lock!") return # print("Have %d cookies before saving cookiejar" % len(self.cj)) try: # self.log.info("Trying to save cookies!") if self.cj is not None: # If cookies were used self._syncCookiesFromFile() # self.log.info("Have cookies to save") for cookie in self.cj: # print(cookie) # print(cookie.expires) if isinstance(cookie.expires, int) and cookie.expires > 30000000000: # Clamp cookies that expire stupidly far in the future because people are assholes cookie.expires = 30000000000 # self.log.info("Calling save function") self.cj.save(self.COOKIEFILE) # save the cookies again # self.log.info("Cookies Saved") else: self.log.info("No cookies to save?") except Exception as e: pass # The destructor call order is too incoherent, and shit fails # during the teardown with null-references. The error printout is # not informative, so just silence it. # print("Possible error on exit (or just the destructor): '%s'." % e) finally: if self.cookie_lock: self.cookie_lock.release() # print("Have %d cookies after saving cookiejar" % len(self.cj)) if not halting: self._syncCookiesFromFile() # print "Have %d cookies after reloading cookiejar" % len(self.cj) def clearCookies(self): if self.cookie_lock: locked = self.cookie_lock.acquire(timeout=5) if not locked: self.log.error("Failed to acquire cookie-lock!") return try: self.cj.clear() self.cj.save(self.COOKIEFILE) # save the cookies again self.cj.save("cookietemp.lwp") self._syncCookiesFromFile() finally: if self.cookie_lock: self.cookie_lock.release() def getCookies(self): if self.cookie_lock: locked = self.cookie_lock.acquire(timeout=5) if not locked: raise RuntimeError("Could not acquire lock on cookiejar") try: # self.log.info("Trying to save cookies!") if self.cj is not None: # If cookies were used self._syncCookiesFromFile() finally: if self.cookie_lock: self.cookie_lock.release() return self.cj ###################################################################################################################################################### ###################################################################################################################################################### def __del__(self): # print "WGH Destructor called!" # print("WebRequest __del__") self.saveCookies(halting=True) sup = super() if hasattr(sup, '__del__'): sup.__del__() def stepThroughCloudFlareWaf(self, url): return self.stepThroughJsWaf(url, titleNotContains='Just a moment...') def stepThroughSucuriWaf(self, url): return self.stepThroughJsWaf(url, titleNotContains="You are being redirected...") def stepThroughJsWaf(self, *args, **kwargs): # Shim to the underlying web browser of choice return self.stepThroughJsWaf_bare_chromium(*args, **kwargs) # Compat for old code. def stepThroughCloudFlare(self, *args, **kwargs): return self.stepThroughJsWaf(*args, **kwargs)
fake-name/WebRequest
WebRequest/WebRequestClass.py
WebGetRobust.__decompressContent
python
def __decompressContent(self, coding, pgctnt): #preLen = len(pgctnt) if coding == 'deflate': compType = "deflate" bits_opts = [ -zlib.MAX_WBITS, # deflate zlib.MAX_WBITS, # zlib zlib.MAX_WBITS | 16, # gzip zlib.MAX_WBITS | 32, # "automatic header detection" 0, # Try to guess from header # Try all the raw window options. -8, -9, -10, -11, -12, -13, -14, -15, # Stream with zlib headers 8, 9, 10, 11, 12, 13, 14, 15, # With gzip header+trailer 8+16, 9+16, 10+16, 11+16, 12+16, 13+16, 14+16, 15+16, # Automatic detection 8+32, 9+32, 10+32, 11+32, 12+32, 13+32, 14+32, 15+32, ] err = None for wbits_val in bits_opts: try: pgctnt = zlib.decompress(pgctnt, wbits_val) return compType, pgctnt except zlib.error as e: err = e # We can't get here without err having thrown. raise err elif coding == 'gzip': compType = "gzip" buf = io.BytesIO(pgctnt) f = gzip.GzipFile(fileobj=buf) pgctnt = f.read() elif coding == "sdch": raise Exceptions.ContentTypeError("Wait, someone other then google actually supports SDCH compression (%s)?" % pgreq) else: compType = "none" return compType, pgctnt
This is really obnoxious
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/WebRequestClass.py#L716-L769
null
class WebGetRobust( ChromiumMixin.WebGetCrMixin, SeleniumChromiumMixin.WebGetSeleniumChromiumMixin, ): COOKIEFILE = 'cookies.lwp' # the path and filename to save your cookies in cj = None cookielib = None opener = None errorOutCount = 1 # retryDelay = 0.1 retryDelay = 0.01 data = None # creds is a list of 3-tuples that gets inserted into the password manager. # it is structured [(top_level_url1, username1, password1), (top_level_url2, username2, password2)] def __init__(self, creds = None, logPath = "Main.WebRequest", cookie_lock = None, cloudflare = True, auto_waf = True, use_socks = False, alt_cookiejar = None, custom_ua = None, ): super().__init__() self.rules = {} self.rules['auto_waf'] = cloudflare or auto_waf if cookie_lock: self.cookie_lock = cookie_lock elif alt_cookiejar: self.log.info("External cookie-jar specified. Not forcing cookiejar serialization.") self.cookie_lock = None else: self.cookie_lock = COOKIEWRITELOCK self.use_socks = use_socks # Override the global default socket timeout, so hung connections will actually time out properly. socket.setdefaulttimeout(5) self.log = logging.getLogger(logPath) # print("Webget init! Logpath = ", logPath) if custom_ua: self.log.info("User agent overridden!") self.browserHeaders = custom_ua else: # Due to general internet people douchebaggyness, I've basically said to hell with it and decided to spoof a whole assortment of browsers # It should keep people from blocking this scraper *too* easily self.browserHeaders = UA_Constants.getUserAgent() self.data = urllib.parse.urlencode(self.browserHeaders) if creds: print("Have credentials, installing password manager into urllib handler.") passManager = urllib.request.HTTPPasswordMgrWithDefaultRealm() for url, username, password in creds: passManager.add_password(None, url, username, password) self.credHandler = Handlers.PreemptiveBasicAuthHandler(passManager) else: self.credHandler = None self.alt_cookiejar = alt_cookiejar self.__loadCookies() def getpage(self, requestedUrl, *args, **kwargs): try: return self.__getpage(requestedUrl, *args, **kwargs) except Exceptions.CloudFlareWrapper: if self.rules['auto_waf']: self.log.warning("Cloudflare failure! Doing automatic step-through.") if not self.stepThroughCloudFlareWaf(requestedUrl): raise Exceptions.FetchFailureError("Could not step through cloudflare!", requestedUrl) # Cloudflare cookie set, retrieve again return self.__getpage(requestedUrl, *args, **kwargs) else: self.log.info("Cloudflare without step-through setting!") raise except Exceptions.SucuriWrapper: # print("Sucuri!") if self.rules['auto_waf']: self.log.warning("Sucuri failure! Doing automatic step-through.") if not self.stepThroughSucuriWaf(requestedUrl): raise Exceptions.FetchFailureError("Could not step through Sucuri WAF bullshit!", requestedUrl) return self.__getpage(requestedUrl, *args, **kwargs) else: self.log.info("Sucuri without step-through setting!") raise def chunkReport(self, bytesSoFar, totalSize): if totalSize: percent = float(bytesSoFar) / totalSize percent = round(percent * 100, 2) self.log.info("Downloaded %d of %d bytes (%0.2f%%)" % (bytesSoFar, totalSize, percent)) else: self.log.info("Downloaded %d bytes" % (bytesSoFar)) def __chunkRead(self, response, chunkSize=2 ** 18, reportHook=None): contentLengthHeader = response.info().getheader('Content-Length') if contentLengthHeader: totalSize = contentLengthHeader.strip() totalSize = int(totalSize) else: totalSize = None bytesSoFar = 0 pgContent = "" while 1: chunk = response.read(chunkSize) pgContent += chunk bytesSoFar += len(chunk) if not chunk: break if reportHook: reportHook(bytesSoFar, chunkSize, totalSize) return pgContent def getSoup(self, requestedUrl, *args, **kwargs): if 'returnMultiple' in kwargs and kwargs['returnMultiple']: raise Exceptions.ArgumentError("getSoup cannot be called with 'returnMultiple' being true", requestedUrl) if 'soup' in kwargs and kwargs['soup']: raise Exceptions.ArgumentError("getSoup contradicts the 'soup' directive!", requestedUrl) page = self.getpage(requestedUrl, *args, **kwargs) if isinstance(page, bytes): raise Exceptions.ContentTypeError("Received content not decoded! Cannot parse!", requestedUrl) soup = utility.as_soup(page) return soup def getJson(self, requestedUrl, *args, **kwargs): if 'returnMultiple' in kwargs and kwargs['returnMultiple']: raise Exceptions.ArgumentError("getSoup cannot be called with 'returnMultiple' being true", requestedUrl) attempts = 0 while 1: try: page = self.getpage(requestedUrl, *args, **kwargs) if isinstance(page, bytes): page = page.decode(utility.determine_json_encoding(page)) # raise ValueError("Received content not decoded! Cannot parse!") page = page.strip() ret = json.loads(page) return ret except ValueError: if attempts < 1: attempts += 1 self.log.error("JSON Parsing issue retrieving content from page!") for line in traceback.format_exc().split("\n"): self.log.error("%s", line.rstrip()) self.log.error("Retrying!") # Scramble our current UA self.browserHeaders = UA_Constants.getUserAgent() if self.alt_cookiejar: self.cj.init_agent(new_headers=self.browserHeaders) time.sleep(self.retryDelay) else: self.log.error("JSON Parsing issue, and retries exhausted!") # self.log.error("Page content:") # self.log.error(page) # with open("Error-ctnt-{}.json".format(time.time()), "w") as tmp_err_fp: # tmp_err_fp.write(page) raise def getSoupNoRedirects(self, *args, **kwargs): if 'returnMultiple' in kwargs: raise Exceptions.ArgumentError("getSoup cannot be called with 'returnMultiple'") if 'soup' in kwargs and kwargs['soup']: raise Exceptions.ArgumentError("getSoup contradicts the 'soup' directive!") kwargs['returnMultiple'] = True tgt_url = kwargs.get('requestedUrl', None) if not tgt_url: tgt_url = args[0] page, handle = self.getpage(*args, **kwargs) redirurl = handle.geturl() if redirurl != tgt_url: self.log.error("Requested %s, redirected to %s. Raising error", tgt_url, redirurl) raise Exceptions.RedirectedError("Requested %s, redirected to %s" % ( tgt_url, redirurl)) soup = as_soup(page) return soup def getFileAndName(self, *args, **kwargs): ''' Give a requested page (note: the arguments for this call are forwarded to getpage()), return the content at the target URL and the filename for the target content as a 2-tuple (pgctnt, hName) for the content at the target URL. The filename specified in the content-disposition header is used, if present. Otherwise, the last section of the url path segment is treated as the filename. ''' pgctnt, hName, mime = self.getFileNameMime(*args, **kwargs) return pgctnt, hName def getFileNameMime(self, requestedUrl, *args, **kwargs): ''' Give a requested page (note: the arguments for this call are forwarded to getpage()), return the content at the target URL, the filename for the target content, and the mimetype for the content at the target URL, as a 3-tuple (pgctnt, hName, mime). The filename specified in the content-disposition header is used, if present. Otherwise, the last section of the url path segment is treated as the filename. ''' if 'returnMultiple' in kwargs: raise Exceptions.ArgumentError("getFileAndName cannot be called with 'returnMultiple'", requestedUrl) if 'soup' in kwargs and kwargs['soup']: raise Exceptions.ArgumentError("getFileAndName contradicts the 'soup' directive!", requestedUrl) kwargs["returnMultiple"] = True pgctnt, pghandle = self.getpage(requestedUrl, *args, **kwargs) info = pghandle.info() if not 'Content-Disposition' in info: hName = '' elif not 'filename=' in info['Content-Disposition']: hName = '' else: hName = info['Content-Disposition'].split('filename=')[1] # Unquote filename if it's quoted. if ((hName.startswith("'") and hName.endswith("'")) or hName.startswith('"') and hName.endswith('"')) and len(hName) >= 2: hName = hName[1:-1] mime = info.get_content_type() if not hName.strip(): requestedUrl = pghandle.geturl() hName = urllib.parse.urlsplit(requestedUrl).path.split("/")[-1].strip() if "/" in hName: hName = hName.split("/")[-1] return pgctnt, hName, mime def getItem(self, itemUrl): content, handle = self.getpage(itemUrl, returnMultiple=True) if not content or not handle: raise urllib.error.URLError("Failed to retreive file from page '%s'!" % itemUrl) handle_info = handle.info() if handle_info['Content-Disposition'] and 'filename=' in handle_info['Content-Disposition'].lower(): fileN = handle_info['Content-Disposition'].split("=", 1)[-1] else: fileN = urllib.parse.unquote(urllib.parse.urlparse(handle.geturl())[2].split("/")[-1]) fileN = bs4.UnicodeDammit(fileN).unicode_markup mType = handle_info['Content-Type'] # If there is an encoding in the content-type (or any other info), strip it out. # We don't care about the encoding, since WebFunctions will already have handled that, # and returned a decoded unicode object. if mType and ";" in mType: mType = mType.split(";")[0].strip() # *sigh*. So minus.com is fucking up their http headers, and apparently urlencoding the # mime type, because apparently they're shit at things. # Anyways, fix that. if mType and '%2F' in mType: mType = mType.replace('%2F', '/') self.log.info("Retreived file of type '%s', name of '%s' with a size of %0.3f K", mType, fileN, len(content)/1000.0) return content, fileN, mType def getHead(self, url, addlHeaders=None): self.log.warning("TODO: Fixme this neds to be migrated to use the normal fetch interface, so it is WAF-aware.") for x in range(9999): try: self.log.info("Doing HTTP HEAD request for '%s'", url) pgreq = self.__buildRequest(url, None, addlHeaders, None, req_class=Handlers.HeadRequest) pghandle = self.opener.open(pgreq, timeout=30) returl = pghandle.geturl() if returl != url: self.log.info("HEAD request returned a different URL '%s'", returl) return returl except socket.timeout as e: self.log.info("Timeout, retrying....") if x >= 3: self.log.error("Failure fetching: %s", url) raise Exceptions.FetchFailureError("Timout when fetching content", url) except urllib.error.URLError as e: # Continue even in the face of cloudflare crapping it's pants if e.code == 500 and e.geturl(): return e.geturl() self.log.info("URLError, retrying....") if x >= 3: self.log.error("Failure fetching: %s", url) raise Exceptions.FetchFailureError("URLError when fetching content", e.geturl(), err_code=e.code) ###################################################################################################################################################### ###################################################################################################################################################### def __check_suc_cookie(self, components): ''' This is only called if we're on a known sucuri-"protected" site. As such, if we do *not* have a sucuri cloudproxy cookie, we can assume we need to do the normal WAF step-through. ''' netloc = components.netloc.lower() for cookie in self.cj: if cookie.domain_specified and (cookie.domain.lower().endswith(netloc) or (cookie.domain.lower().endswith("127.0.0.1") and ( components.path == "/sucuri_shit_3" or components.path == "/sucuri_shit_2" ))): # Allow testing if "sucuri_cloudproxy_uuid_" in cookie.name: return self.log.info("Missing cloudproxy cookie for known sucuri wrapped site. Doing a pre-emptive chromium fetch.") raise Exceptions.SucuriWrapper("WAF Shit", str(components)) def __check_cf_cookie(self, components): netloc = components.netloc.lower() # TODO: Implement me? # for cookie in self.cj: # if cookie.domain_specified and (cookie.domain.lower().endswith(netloc) # or (cookie.domain.lower().endswith("127.0.0.1") and components.path == "/sucuri_shit_2")): # Allow testing # if "sucuri_cloudproxy_uuid_" in cookie.name: # return # print("Target cookie!") # print("K -> V: %s -> %s" % (cookie.name, cookie.value)) # print(cookie) # print(type(cookie)) # print(cookie.domain) # raise RuntimeError pass def __pre_check(self, requestedUrl): ''' Allow the pre-emptive fetching of sites with a full browser if they're known to be dick hosters. ''' components = urllib.parse.urlsplit(requestedUrl) netloc_l = components.netloc.lower() if netloc_l in Domain_Constants.SUCURI_GARBAGE_SITE_NETLOCS: self.__check_suc_cookie(components) elif netloc_l in Domain_Constants.CF_GARBAGE_SITE_NETLOCS: self.__check_cf_cookie(components) elif components.path == '/sucuri_shit_2': self.__check_suc_cookie(components) elif components.path == '/sucuri_shit_3': self.__check_suc_cookie(components) elif components.path == '/cloudflare_under_attack_shit_2': self.__check_cf_cookie(components) elif components.path == '/cloudflare_under_attack_shit_3': self.__check_cf_cookie(components) def __getpage(self, requestedUrl, **kwargs): self.__pre_check(requestedUrl) self.log.info("Fetching content at URL: %s", requestedUrl) # strip trailing and leading spaces. requestedUrl = requestedUrl.strip() # If we have 'soup' as a param, just pop it, and call `getSoup()`. if 'soup' in kwargs and kwargs['soup']: self.log.warning("'soup' kwarg is depreciated. Please use the `getSoup()` call instead.") kwargs.pop('soup') return self.getSoup(requestedUrl, **kwargs) # Decode the kwargs values addlHeaders = kwargs.setdefault("addlHeaders", None) returnMultiple = kwargs.setdefault("returnMultiple", False) callBack = kwargs.setdefault("callBack", None) postData = kwargs.setdefault("postData", None) retryQuantity = kwargs.setdefault("retryQuantity", None) nativeError = kwargs.setdefault("nativeError", False) binaryForm = kwargs.setdefault("binaryForm", False) # Conditionally encode the referrer if needed, because otherwise # urllib will barf on unicode referrer values. if addlHeaders and 'Referer' in addlHeaders: addlHeaders['Referer'] = iri2uri.iri2uri(addlHeaders['Referer']) retryCount = 0 err_content = None err_reason = None err_code = None while 1: pgctnt = None pghandle = None pgreq = self.__buildRequest(requestedUrl, postData, addlHeaders, binaryForm) errored = False lastErr = "" retryCount = retryCount + 1 if (retryQuantity and retryCount > retryQuantity) or (not retryQuantity and retryCount > self.errorOutCount): self.log.error("Failed to retrieve Website : %s at %s All Attempts Exhausted", pgreq.get_full_url(), time.ctime(time.time())) pgctnt = None try: self.log.critical("Critical Failure to retrieve page! %s at %s, attempt %s", pgreq.get_full_url(), time.ctime(time.time()), retryCount) self.log.critical("Error: %s", lastErr) self.log.critical("Exiting") except: self.log.critical("And the URL could not be printed due to an encoding error") break #print "execution", retryCount try: # print("Getpage!", requestedUrl, kwargs) pghandle = self.opener.open(pgreq, timeout=30) # Get Webpage # print("Gotpage") except Exceptions.GarbageSiteWrapper as err: # print("garbage site:") raise err except urllib.error.HTTPError as err: # Lotta logging self.log.warning("Error opening page: %s at %s On Attempt %s.", pgreq.get_full_url(), time.ctime(time.time()), retryCount) self.log.warning("Error Code: %s", err) if err.fp: err_content = err.fp.read() encoded = err.hdrs.get('Content-Encoding', None) if encoded: _, err_content = self.__decompressContent(encoded, err_content) err_reason = err.reason err_code = err.code lastErr = err try: self.log.warning("Original URL: %s", requestedUrl) errored = True except: self.log.warning("And the URL could not be printed due to an encoding error") if err.code == 404: #print "Unrecoverable - Page not found. Breaking" self.log.critical("Unrecoverable - Page not found. Breaking") break time.sleep(self.retryDelay) if err.code == 503: if err_content: self._check_waf(err_content, requestedUrl) # So I've been seeing this causing CF to bounce too. # As such, poke through those via chromium too. if err.code == 502: if err_content: self._check_waf(err_content, requestedUrl) except UnicodeEncodeError: self.log.critical("Unrecoverable Unicode issue retrieving page - %s", requestedUrl) for line in traceback.format_exc().split("\n"): self.log.critical("%s", line.rstrip()) self.log.critical("Parameters:") self.log.critical(" requestedUrl: '%s'", requestedUrl) self.log.critical(" postData: '%s'", postData) self.log.critical(" addlHeaders: '%s'", addlHeaders) self.log.critical(" binaryForm: '%s'", binaryForm) err_reason = "Unicode Decode Error" err_code = -1 err_content = traceback.format_exc() break except Exception as e: errored = True #traceback.print_exc() lastErr = sys.exc_info() self.log.warning("Retreival failed. Traceback:") self.log.warning(str(lastErr)) self.log.warning(traceback.format_exc()) self.log.warning("Error Retrieving Page! - Trying again - Waiting %s seconds", self.retryDelay) try: self.log.critical("Error on page - %s", requestedUrl) except: self.log.critical("And the URL could not be printed due to an encoding error") time.sleep(self.retryDelay) err_reason = "Unhandled general exception" err_code = -1 err_content = traceback.format_exc() continue if pghandle != None: self.log.info("Request for URL: %s succeeded at %s On Attempt %s. Recieving...", pgreq.get_full_url(), time.ctime(time.time()), retryCount) pgctnt = self.__retreiveContent(pgreq, pghandle, callBack) # if __retreiveContent did not return false, it managed to fetch valid results, so break if pgctnt != False: break if errored and pghandle != None: print(("Later attempt succeeded %s" % pgreq.get_full_url())) elif (errored or not pgctnt) and pghandle is None: if lastErr and nativeError: raise lastErr raise Exceptions.FetchFailureError("Failed to retreive page", requestedUrl, err_content=err_content, err_code=err_code, err_reason=err_reason) if returnMultiple: return pgctnt, pghandle else: return pgctnt ###################################################################################################################################################### ###################################################################################################################################################### def __decode_text_content(self, pageContent, cType): # this *should* probably be done using a parser. # However, it seems to be grossly overkill to shove the whole page (which can be quite large) through a parser just to pull out a tag that # should be right near the page beginning anyways. # As such, it's a regular expression for the moment # Regex is of bytes type, since we can't convert a string to unicode until we know the encoding the # bytes string is using, and we need the regex to get that encoding coding = re.search(b"charset=[\'\"]?([a-zA-Z0-9\-]*)[\'\"]?", pageContent, flags=re.IGNORECASE) cType = b"" charset = None try: if coding: cType = coding.group(1) codecs.lookup(cType.decode("ascii")) charset = cType.decode("ascii") except LookupError: # I'm actually not sure what I was thinking when I wrote this if statement. I don't think it'll ever trigger. if (b";" in cType) and (b"=" in cType): # the server is reporting an encoding. Now we use it to decode the dummy_docType, charset = cType.split(b";") charset = charset.split(b"=")[-1] if cchardet: inferred = cchardet.detect(pageContent) if inferred and inferred['confidence'] is None: # If we couldn't infer a charset, just short circuit and return the content. # It's probably binary. return pageContent elif inferred and inferred['confidence'] is not None and inferred['confidence'] > 0.8: charset = inferred['encoding'] self.log.info("Cchardet inferred encoding: %s", charset) else: self.log.warning("Missing cchardet!") if not charset: self.log.warning("Could not find encoding information on page - Using default charset. Shit may break!") charset = "utf-8" try: pageContent = str(pageContent, charset) except UnicodeDecodeError: self.log.error("Encoding Error! Stripping invalid chars.") pageContent = pageContent.decode('utf-8', errors='ignore') return pageContent def __buildRequest(self, pgreq, postData, addlHeaders, binaryForm, req_class = None): if req_class is None: req_class = urllib.request.Request pgreq = iri2uri.iri2uri(pgreq) try: params = {} headers = {} if postData != None: self.log.info("Making a post-request! Params: '%s'", postData) if isinstance(postData, str): params['data'] = postData.encode("utf-8") elif isinstance(postData, dict): for key, parameter in postData.items(): self.log.info(" Item: '%s' -> '%s'", key, parameter) params['data'] = urllib.parse.urlencode(postData).encode("utf-8") if addlHeaders != None: self.log.info("Have additional GET parameters!") for key, parameter in addlHeaders.items(): self.log.info(" Item: '%s' -> '%s'", key, parameter) headers = addlHeaders if binaryForm: self.log.info("Binary form submission!") if 'data' in params: raise Exceptions.ArgumentError("You cannot make a binary form post and a plain post request at the same time!", pgreq) params['data'] = binaryForm.make_result() headers['Content-type'] = binaryForm.get_content_type() headers['Content-length'] = len(params['data']) return req_class(pgreq, headers=headers, **params) except: self.log.critical("Invalid header or url") raise def __decodeTextContent(self, pgctnt, cType): if cType: if (";" in cType) and ("=" in cType): # the server is reporting an encoding. Now we use it to decode the content # Some wierdos put two charsets in their headers: # `text/html;Charset=UTF-8;charset=UTF-8` # Split, and take the first two entries. docType, charset = cType.split(";")[:2] charset = charset.split("=")[-1] # Only decode content marked as text (yeah, google is serving zip files # with the content-disposition charset header specifying "UTF-8") or # specifically allowed other content types I know are really text. decode = ['application/atom+xml', 'application/xml', "application/json", 'text'] if any([item in docType for item in decode]): try: pgctnt = str(pgctnt, charset) except UnicodeDecodeError: self.log.error("Encoding Error! Stripping invalid chars.") pgctnt = pgctnt.decode('utf-8', errors='ignore') else: # The server is not reporting an encoding in the headers. # Use content-aware mechanisms for determing the content encoding. if "text/html" in cType or \ 'text/javascript' in cType or \ 'text/css' in cType or \ 'application/json' in cType or \ 'application/xml' in cType or \ 'application/atom+xml' in cType or \ cType.startswith("text/"): # If this is a html/text page, we want to decode it using the local encoding pgctnt = self.__decode_text_content(pgctnt, cType) elif "text" in cType: self.log.critical("Unknown content type!") self.log.critical(cType) else: self.log.critical("No content disposition header!") self.log.critical("Cannot guess content type!") return pgctnt def __retreiveContent(self, pgreq, pghandle, callBack): try: # If we have a progress callback, call it for chunked read. # Otherwise, just read in the entire content. if callBack: pgctnt = self.__chunkRead(pghandle, 2 ** 17, reportHook=callBack) else: pgctnt = pghandle.read() if pgctnt is None: return False self.log.info("URL fully retrieved.") preDecompSize = len(pgctnt)/1000.0 encoded = pghandle.headers.get('Content-Encoding') compType, pgctnt = self.__decompressContent(encoded, pgctnt) decompSize = len(pgctnt)/1000.0 # self.log.info("Page content type = %s", type(pgctnt)) cType = pghandle.headers.get("Content-Type") if compType == 'none': self.log.info("Compression type = %s. Content Size = %0.3fK. File type: %s.", compType, decompSize, cType) else: self.log.info("Compression type = %s. Content Size compressed = %0.3fK. Decompressed = %0.3fK. File type: %s.", compType, preDecompSize, decompSize, cType) self._check_waf(pgctnt, pgreq.get_full_url()) pgctnt = self.__decodeTextContent(pgctnt, cType) return pgctnt except Exceptions.GarbageSiteWrapper as err: raise err except Exception: self.log.error("Exception!") self.log.error(str(sys.exc_info())) traceback.print_exc() self.log.error("Error Retrieving Page! - Transfer failed. Waiting %s seconds before retrying", self.retryDelay) try: self.log.critical("Critical Failure to retrieve page! %s at %s", pgreq.get_full_url(), time.ctime(time.time())) self.log.critical("Exiting") except: self.log.critical("And the URL could not be printed due to an encoding error") self.log.error(pghandle) time.sleep(self.retryDelay) return False # HUGE GOD-FUNCTION. # OH GOD FIXME. # postData expects a dict # addlHeaders also expects a dict def _check_waf(self, pageContent, pageUrl): assert isinstance(pageContent, bytes), "Item pageContent must be of type bytes, received %s" % (type(pageContent), ) assert isinstance(pageUrl, str), "Item pageUrl must be of type str, received %s" % (type(pageUrl), ) if b"sucuri_cloudproxy_js=" in pageContent: raise Exceptions.SucuriWrapper("WAF Shit", pageUrl) if b'This process is automatic. Your browser will redirect to your requested content shortly.' in pageContent: raise Exceptions.CloudFlareWrapper("WAF Shit", pageUrl) if b'is currently offline. However, because the site uses Cloudflare\'s Always Online' in pageContent: raise Exceptions.CloudFlareWrapper("WAF Shit", pageUrl) ###################################################################################################################################################### ###################################################################################################################################################### def __loadCookies(self): if self.alt_cookiejar is not None: self.alt_cookiejar.init_agent(new_headers=self.browserHeaders) self.cj = self.alt_cookiejar else: self.cj = http.cookiejar.LWPCookieJar() # This is a subclass of FileCookieJar # that has useful load and save methods if self.cj is not None: if os.path.isfile(self.COOKIEFILE): try: self.__updateCookiesFromFile() # self.log.info("Loading CookieJar") except: self.log.critical("Cookie file is corrupt/damaged?") try: os.remove(self.COOKIEFILE) except FileNotFoundError: pass if http.cookiejar is not None: # self.log.info("Installing CookieJar") self.log.debug(self.cj) cookieHandler = urllib.request.HTTPCookieProcessor(self.cj) args = (cookieHandler, Handlers.HTTPRedirectHandler) if self.credHandler: print("Have cred handler. Building opener using it") args += (self.credHandler, ) if self.use_socks: print("Using Socks handler") if not HAVE_SOCKS: raise RuntimeError("SOCKS Use specified, and no socks installed!") args = (SocksiPyHandler(socks.SOCKS5, "127.0.0.1", 9050), ) + args self.opener = urllib.request.build_opener(*args) #self.opener.addheaders = [('User-Agent', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)')] self.opener.addheaders = self.browserHeaders #urllib2.install_opener(self.opener) for cookie in self.cj: self.log.debug(cookie) #print cookie def _syncCookiesFromFile(self): # self.log.info("Synchronizing cookies with cookieFile.") if os.path.isfile(self.COOKIEFILE): self.cj.save("cookietemp.lwp") self.cj.load(self.COOKIEFILE) self.cj.load("cookietemp.lwp") # First, load any changed cookies so we don't overwrite them # However, we want to persist any cookies that we have that are more recent then the saved cookies, so we temporarily save # the cookies in memory to a temp-file, then load the cookiefile, and finally overwrite the loaded cookies with the ones from the # temp file def __updateCookiesFromFile(self): if os.path.exists(self.COOKIEFILE): # self.log.info("Synchronizing cookies with cookieFile.") self.cj.load(self.COOKIEFILE) # Update cookies from cookiefile def addCookie(self, inCookie): self.log.info("Updating cookie!") self.cj.set_cookie(inCookie) def addSeleniumCookie(self, cookieDict): ''' Install a cookie exported from a selenium webdriver into the active opener ''' # print cookieDict cookie = http.cookiejar.Cookie( version = 0, name = cookieDict['name'], value = cookieDict['value'], port = None, port_specified = False, domain = cookieDict['domain'], domain_specified = True, domain_initial_dot = False, path = cookieDict['path'], path_specified = False, secure = cookieDict['secure'], expires = cookieDict['expiry'] if 'expiry' in cookieDict else None, discard = False, comment = None, comment_url = None, rest = {"httponly":"%s" % cookieDict['httponly'] if 'httponly' in cookieDict else False}, rfc2109 = False ) self.addCookie(cookie) def saveCookies(self, halting=False): if self.cookie_lock: locked = self.cookie_lock.acquire(timeout=5) if not locked: self.log.error("Failed to acquire cookie-lock!") return # print("Have %d cookies before saving cookiejar" % len(self.cj)) try: # self.log.info("Trying to save cookies!") if self.cj is not None: # If cookies were used self._syncCookiesFromFile() # self.log.info("Have cookies to save") for cookie in self.cj: # print(cookie) # print(cookie.expires) if isinstance(cookie.expires, int) and cookie.expires > 30000000000: # Clamp cookies that expire stupidly far in the future because people are assholes cookie.expires = 30000000000 # self.log.info("Calling save function") self.cj.save(self.COOKIEFILE) # save the cookies again # self.log.info("Cookies Saved") else: self.log.info("No cookies to save?") except Exception as e: pass # The destructor call order is too incoherent, and shit fails # during the teardown with null-references. The error printout is # not informative, so just silence it. # print("Possible error on exit (or just the destructor): '%s'." % e) finally: if self.cookie_lock: self.cookie_lock.release() # print("Have %d cookies after saving cookiejar" % len(self.cj)) if not halting: self._syncCookiesFromFile() # print "Have %d cookies after reloading cookiejar" % len(self.cj) def clearCookies(self): if self.cookie_lock: locked = self.cookie_lock.acquire(timeout=5) if not locked: self.log.error("Failed to acquire cookie-lock!") return try: self.cj.clear() self.cj.save(self.COOKIEFILE) # save the cookies again self.cj.save("cookietemp.lwp") self._syncCookiesFromFile() finally: if self.cookie_lock: self.cookie_lock.release() def getCookies(self): if self.cookie_lock: locked = self.cookie_lock.acquire(timeout=5) if not locked: raise RuntimeError("Could not acquire lock on cookiejar") try: # self.log.info("Trying to save cookies!") if self.cj is not None: # If cookies were used self._syncCookiesFromFile() finally: if self.cookie_lock: self.cookie_lock.release() return self.cj ###################################################################################################################################################### ###################################################################################################################################################### def __del__(self): # print "WGH Destructor called!" # print("WebRequest __del__") self.saveCookies(halting=True) sup = super() if hasattr(sup, '__del__'): sup.__del__() def stepThroughCloudFlareWaf(self, url): return self.stepThroughJsWaf(url, titleNotContains='Just a moment...') def stepThroughSucuriWaf(self, url): return self.stepThroughJsWaf(url, titleNotContains="You are being redirected...") def stepThroughJsWaf(self, *args, **kwargs): # Shim to the underlying web browser of choice return self.stepThroughJsWaf_bare_chromium(*args, **kwargs) # Compat for old code. def stepThroughCloudFlare(self, *args, **kwargs): return self.stepThroughJsWaf(*args, **kwargs)
fake-name/WebRequest
WebRequest/WebRequestClass.py
WebGetRobust.addSeleniumCookie
python
def addSeleniumCookie(self, cookieDict): ''' Install a cookie exported from a selenium webdriver into the active opener ''' # print cookieDict cookie = http.cookiejar.Cookie( version = 0, name = cookieDict['name'], value = cookieDict['value'], port = None, port_specified = False, domain = cookieDict['domain'], domain_specified = True, domain_initial_dot = False, path = cookieDict['path'], path_specified = False, secure = cookieDict['secure'], expires = cookieDict['expiry'] if 'expiry' in cookieDict else None, discard = False, comment = None, comment_url = None, rest = {"httponly":"%s" % cookieDict['httponly'] if 'httponly' in cookieDict else False}, rfc2109 = False ) self.addCookie(cookie)
Install a cookie exported from a selenium webdriver into the active opener
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/WebRequestClass.py#L959-L985
[ "def addCookie(self, inCookie):\n\tself.log.info(\"Updating cookie!\")\n\tself.cj.set_cookie(inCookie)\n" ]
class WebGetRobust( ChromiumMixin.WebGetCrMixin, SeleniumChromiumMixin.WebGetSeleniumChromiumMixin, ): COOKIEFILE = 'cookies.lwp' # the path and filename to save your cookies in cj = None cookielib = None opener = None errorOutCount = 1 # retryDelay = 0.1 retryDelay = 0.01 data = None # creds is a list of 3-tuples that gets inserted into the password manager. # it is structured [(top_level_url1, username1, password1), (top_level_url2, username2, password2)] def __init__(self, creds = None, logPath = "Main.WebRequest", cookie_lock = None, cloudflare = True, auto_waf = True, use_socks = False, alt_cookiejar = None, custom_ua = None, ): super().__init__() self.rules = {} self.rules['auto_waf'] = cloudflare or auto_waf if cookie_lock: self.cookie_lock = cookie_lock elif alt_cookiejar: self.log.info("External cookie-jar specified. Not forcing cookiejar serialization.") self.cookie_lock = None else: self.cookie_lock = COOKIEWRITELOCK self.use_socks = use_socks # Override the global default socket timeout, so hung connections will actually time out properly. socket.setdefaulttimeout(5) self.log = logging.getLogger(logPath) # print("Webget init! Logpath = ", logPath) if custom_ua: self.log.info("User agent overridden!") self.browserHeaders = custom_ua else: # Due to general internet people douchebaggyness, I've basically said to hell with it and decided to spoof a whole assortment of browsers # It should keep people from blocking this scraper *too* easily self.browserHeaders = UA_Constants.getUserAgent() self.data = urllib.parse.urlencode(self.browserHeaders) if creds: print("Have credentials, installing password manager into urllib handler.") passManager = urllib.request.HTTPPasswordMgrWithDefaultRealm() for url, username, password in creds: passManager.add_password(None, url, username, password) self.credHandler = Handlers.PreemptiveBasicAuthHandler(passManager) else: self.credHandler = None self.alt_cookiejar = alt_cookiejar self.__loadCookies() def getpage(self, requestedUrl, *args, **kwargs): try: return self.__getpage(requestedUrl, *args, **kwargs) except Exceptions.CloudFlareWrapper: if self.rules['auto_waf']: self.log.warning("Cloudflare failure! Doing automatic step-through.") if not self.stepThroughCloudFlareWaf(requestedUrl): raise Exceptions.FetchFailureError("Could not step through cloudflare!", requestedUrl) # Cloudflare cookie set, retrieve again return self.__getpage(requestedUrl, *args, **kwargs) else: self.log.info("Cloudflare without step-through setting!") raise except Exceptions.SucuriWrapper: # print("Sucuri!") if self.rules['auto_waf']: self.log.warning("Sucuri failure! Doing automatic step-through.") if not self.stepThroughSucuriWaf(requestedUrl): raise Exceptions.FetchFailureError("Could not step through Sucuri WAF bullshit!", requestedUrl) return self.__getpage(requestedUrl, *args, **kwargs) else: self.log.info("Sucuri without step-through setting!") raise def chunkReport(self, bytesSoFar, totalSize): if totalSize: percent = float(bytesSoFar) / totalSize percent = round(percent * 100, 2) self.log.info("Downloaded %d of %d bytes (%0.2f%%)" % (bytesSoFar, totalSize, percent)) else: self.log.info("Downloaded %d bytes" % (bytesSoFar)) def __chunkRead(self, response, chunkSize=2 ** 18, reportHook=None): contentLengthHeader = response.info().getheader('Content-Length') if contentLengthHeader: totalSize = contentLengthHeader.strip() totalSize = int(totalSize) else: totalSize = None bytesSoFar = 0 pgContent = "" while 1: chunk = response.read(chunkSize) pgContent += chunk bytesSoFar += len(chunk) if not chunk: break if reportHook: reportHook(bytesSoFar, chunkSize, totalSize) return pgContent def getSoup(self, requestedUrl, *args, **kwargs): if 'returnMultiple' in kwargs and kwargs['returnMultiple']: raise Exceptions.ArgumentError("getSoup cannot be called with 'returnMultiple' being true", requestedUrl) if 'soup' in kwargs and kwargs['soup']: raise Exceptions.ArgumentError("getSoup contradicts the 'soup' directive!", requestedUrl) page = self.getpage(requestedUrl, *args, **kwargs) if isinstance(page, bytes): raise Exceptions.ContentTypeError("Received content not decoded! Cannot parse!", requestedUrl) soup = utility.as_soup(page) return soup def getJson(self, requestedUrl, *args, **kwargs): if 'returnMultiple' in kwargs and kwargs['returnMultiple']: raise Exceptions.ArgumentError("getSoup cannot be called with 'returnMultiple' being true", requestedUrl) attempts = 0 while 1: try: page = self.getpage(requestedUrl, *args, **kwargs) if isinstance(page, bytes): page = page.decode(utility.determine_json_encoding(page)) # raise ValueError("Received content not decoded! Cannot parse!") page = page.strip() ret = json.loads(page) return ret except ValueError: if attempts < 1: attempts += 1 self.log.error("JSON Parsing issue retrieving content from page!") for line in traceback.format_exc().split("\n"): self.log.error("%s", line.rstrip()) self.log.error("Retrying!") # Scramble our current UA self.browserHeaders = UA_Constants.getUserAgent() if self.alt_cookiejar: self.cj.init_agent(new_headers=self.browserHeaders) time.sleep(self.retryDelay) else: self.log.error("JSON Parsing issue, and retries exhausted!") # self.log.error("Page content:") # self.log.error(page) # with open("Error-ctnt-{}.json".format(time.time()), "w") as tmp_err_fp: # tmp_err_fp.write(page) raise def getSoupNoRedirects(self, *args, **kwargs): if 'returnMultiple' in kwargs: raise Exceptions.ArgumentError("getSoup cannot be called with 'returnMultiple'") if 'soup' in kwargs and kwargs['soup']: raise Exceptions.ArgumentError("getSoup contradicts the 'soup' directive!") kwargs['returnMultiple'] = True tgt_url = kwargs.get('requestedUrl', None) if not tgt_url: tgt_url = args[0] page, handle = self.getpage(*args, **kwargs) redirurl = handle.geturl() if redirurl != tgt_url: self.log.error("Requested %s, redirected to %s. Raising error", tgt_url, redirurl) raise Exceptions.RedirectedError("Requested %s, redirected to %s" % ( tgt_url, redirurl)) soup = as_soup(page) return soup def getFileAndName(self, *args, **kwargs): ''' Give a requested page (note: the arguments for this call are forwarded to getpage()), return the content at the target URL and the filename for the target content as a 2-tuple (pgctnt, hName) for the content at the target URL. The filename specified in the content-disposition header is used, if present. Otherwise, the last section of the url path segment is treated as the filename. ''' pgctnt, hName, mime = self.getFileNameMime(*args, **kwargs) return pgctnt, hName def getFileNameMime(self, requestedUrl, *args, **kwargs): ''' Give a requested page (note: the arguments for this call are forwarded to getpage()), return the content at the target URL, the filename for the target content, and the mimetype for the content at the target URL, as a 3-tuple (pgctnt, hName, mime). The filename specified in the content-disposition header is used, if present. Otherwise, the last section of the url path segment is treated as the filename. ''' if 'returnMultiple' in kwargs: raise Exceptions.ArgumentError("getFileAndName cannot be called with 'returnMultiple'", requestedUrl) if 'soup' in kwargs and kwargs['soup']: raise Exceptions.ArgumentError("getFileAndName contradicts the 'soup' directive!", requestedUrl) kwargs["returnMultiple"] = True pgctnt, pghandle = self.getpage(requestedUrl, *args, **kwargs) info = pghandle.info() if not 'Content-Disposition' in info: hName = '' elif not 'filename=' in info['Content-Disposition']: hName = '' else: hName = info['Content-Disposition'].split('filename=')[1] # Unquote filename if it's quoted. if ((hName.startswith("'") and hName.endswith("'")) or hName.startswith('"') and hName.endswith('"')) and len(hName) >= 2: hName = hName[1:-1] mime = info.get_content_type() if not hName.strip(): requestedUrl = pghandle.geturl() hName = urllib.parse.urlsplit(requestedUrl).path.split("/")[-1].strip() if "/" in hName: hName = hName.split("/")[-1] return pgctnt, hName, mime def getItem(self, itemUrl): content, handle = self.getpage(itemUrl, returnMultiple=True) if not content or not handle: raise urllib.error.URLError("Failed to retreive file from page '%s'!" % itemUrl) handle_info = handle.info() if handle_info['Content-Disposition'] and 'filename=' in handle_info['Content-Disposition'].lower(): fileN = handle_info['Content-Disposition'].split("=", 1)[-1] else: fileN = urllib.parse.unquote(urllib.parse.urlparse(handle.geturl())[2].split("/")[-1]) fileN = bs4.UnicodeDammit(fileN).unicode_markup mType = handle_info['Content-Type'] # If there is an encoding in the content-type (or any other info), strip it out. # We don't care about the encoding, since WebFunctions will already have handled that, # and returned a decoded unicode object. if mType and ";" in mType: mType = mType.split(";")[0].strip() # *sigh*. So minus.com is fucking up their http headers, and apparently urlencoding the # mime type, because apparently they're shit at things. # Anyways, fix that. if mType and '%2F' in mType: mType = mType.replace('%2F', '/') self.log.info("Retreived file of type '%s', name of '%s' with a size of %0.3f K", mType, fileN, len(content)/1000.0) return content, fileN, mType def getHead(self, url, addlHeaders=None): self.log.warning("TODO: Fixme this neds to be migrated to use the normal fetch interface, so it is WAF-aware.") for x in range(9999): try: self.log.info("Doing HTTP HEAD request for '%s'", url) pgreq = self.__buildRequest(url, None, addlHeaders, None, req_class=Handlers.HeadRequest) pghandle = self.opener.open(pgreq, timeout=30) returl = pghandle.geturl() if returl != url: self.log.info("HEAD request returned a different URL '%s'", returl) return returl except socket.timeout as e: self.log.info("Timeout, retrying....") if x >= 3: self.log.error("Failure fetching: %s", url) raise Exceptions.FetchFailureError("Timout when fetching content", url) except urllib.error.URLError as e: # Continue even in the face of cloudflare crapping it's pants if e.code == 500 and e.geturl(): return e.geturl() self.log.info("URLError, retrying....") if x >= 3: self.log.error("Failure fetching: %s", url) raise Exceptions.FetchFailureError("URLError when fetching content", e.geturl(), err_code=e.code) ###################################################################################################################################################### ###################################################################################################################################################### def __check_suc_cookie(self, components): ''' This is only called if we're on a known sucuri-"protected" site. As such, if we do *not* have a sucuri cloudproxy cookie, we can assume we need to do the normal WAF step-through. ''' netloc = components.netloc.lower() for cookie in self.cj: if cookie.domain_specified and (cookie.domain.lower().endswith(netloc) or (cookie.domain.lower().endswith("127.0.0.1") and ( components.path == "/sucuri_shit_3" or components.path == "/sucuri_shit_2" ))): # Allow testing if "sucuri_cloudproxy_uuid_" in cookie.name: return self.log.info("Missing cloudproxy cookie for known sucuri wrapped site. Doing a pre-emptive chromium fetch.") raise Exceptions.SucuriWrapper("WAF Shit", str(components)) def __check_cf_cookie(self, components): netloc = components.netloc.lower() # TODO: Implement me? # for cookie in self.cj: # if cookie.domain_specified and (cookie.domain.lower().endswith(netloc) # or (cookie.domain.lower().endswith("127.0.0.1") and components.path == "/sucuri_shit_2")): # Allow testing # if "sucuri_cloudproxy_uuid_" in cookie.name: # return # print("Target cookie!") # print("K -> V: %s -> %s" % (cookie.name, cookie.value)) # print(cookie) # print(type(cookie)) # print(cookie.domain) # raise RuntimeError pass def __pre_check(self, requestedUrl): ''' Allow the pre-emptive fetching of sites with a full browser if they're known to be dick hosters. ''' components = urllib.parse.urlsplit(requestedUrl) netloc_l = components.netloc.lower() if netloc_l in Domain_Constants.SUCURI_GARBAGE_SITE_NETLOCS: self.__check_suc_cookie(components) elif netloc_l in Domain_Constants.CF_GARBAGE_SITE_NETLOCS: self.__check_cf_cookie(components) elif components.path == '/sucuri_shit_2': self.__check_suc_cookie(components) elif components.path == '/sucuri_shit_3': self.__check_suc_cookie(components) elif components.path == '/cloudflare_under_attack_shit_2': self.__check_cf_cookie(components) elif components.path == '/cloudflare_under_attack_shit_3': self.__check_cf_cookie(components) def __getpage(self, requestedUrl, **kwargs): self.__pre_check(requestedUrl) self.log.info("Fetching content at URL: %s", requestedUrl) # strip trailing and leading spaces. requestedUrl = requestedUrl.strip() # If we have 'soup' as a param, just pop it, and call `getSoup()`. if 'soup' in kwargs and kwargs['soup']: self.log.warning("'soup' kwarg is depreciated. Please use the `getSoup()` call instead.") kwargs.pop('soup') return self.getSoup(requestedUrl, **kwargs) # Decode the kwargs values addlHeaders = kwargs.setdefault("addlHeaders", None) returnMultiple = kwargs.setdefault("returnMultiple", False) callBack = kwargs.setdefault("callBack", None) postData = kwargs.setdefault("postData", None) retryQuantity = kwargs.setdefault("retryQuantity", None) nativeError = kwargs.setdefault("nativeError", False) binaryForm = kwargs.setdefault("binaryForm", False) # Conditionally encode the referrer if needed, because otherwise # urllib will barf on unicode referrer values. if addlHeaders and 'Referer' in addlHeaders: addlHeaders['Referer'] = iri2uri.iri2uri(addlHeaders['Referer']) retryCount = 0 err_content = None err_reason = None err_code = None while 1: pgctnt = None pghandle = None pgreq = self.__buildRequest(requestedUrl, postData, addlHeaders, binaryForm) errored = False lastErr = "" retryCount = retryCount + 1 if (retryQuantity and retryCount > retryQuantity) or (not retryQuantity and retryCount > self.errorOutCount): self.log.error("Failed to retrieve Website : %s at %s All Attempts Exhausted", pgreq.get_full_url(), time.ctime(time.time())) pgctnt = None try: self.log.critical("Critical Failure to retrieve page! %s at %s, attempt %s", pgreq.get_full_url(), time.ctime(time.time()), retryCount) self.log.critical("Error: %s", lastErr) self.log.critical("Exiting") except: self.log.critical("And the URL could not be printed due to an encoding error") break #print "execution", retryCount try: # print("Getpage!", requestedUrl, kwargs) pghandle = self.opener.open(pgreq, timeout=30) # Get Webpage # print("Gotpage") except Exceptions.GarbageSiteWrapper as err: # print("garbage site:") raise err except urllib.error.HTTPError as err: # Lotta logging self.log.warning("Error opening page: %s at %s On Attempt %s.", pgreq.get_full_url(), time.ctime(time.time()), retryCount) self.log.warning("Error Code: %s", err) if err.fp: err_content = err.fp.read() encoded = err.hdrs.get('Content-Encoding', None) if encoded: _, err_content = self.__decompressContent(encoded, err_content) err_reason = err.reason err_code = err.code lastErr = err try: self.log.warning("Original URL: %s", requestedUrl) errored = True except: self.log.warning("And the URL could not be printed due to an encoding error") if err.code == 404: #print "Unrecoverable - Page not found. Breaking" self.log.critical("Unrecoverable - Page not found. Breaking") break time.sleep(self.retryDelay) if err.code == 503: if err_content: self._check_waf(err_content, requestedUrl) # So I've been seeing this causing CF to bounce too. # As such, poke through those via chromium too. if err.code == 502: if err_content: self._check_waf(err_content, requestedUrl) except UnicodeEncodeError: self.log.critical("Unrecoverable Unicode issue retrieving page - %s", requestedUrl) for line in traceback.format_exc().split("\n"): self.log.critical("%s", line.rstrip()) self.log.critical("Parameters:") self.log.critical(" requestedUrl: '%s'", requestedUrl) self.log.critical(" postData: '%s'", postData) self.log.critical(" addlHeaders: '%s'", addlHeaders) self.log.critical(" binaryForm: '%s'", binaryForm) err_reason = "Unicode Decode Error" err_code = -1 err_content = traceback.format_exc() break except Exception as e: errored = True #traceback.print_exc() lastErr = sys.exc_info() self.log.warning("Retreival failed. Traceback:") self.log.warning(str(lastErr)) self.log.warning(traceback.format_exc()) self.log.warning("Error Retrieving Page! - Trying again - Waiting %s seconds", self.retryDelay) try: self.log.critical("Error on page - %s", requestedUrl) except: self.log.critical("And the URL could not be printed due to an encoding error") time.sleep(self.retryDelay) err_reason = "Unhandled general exception" err_code = -1 err_content = traceback.format_exc() continue if pghandle != None: self.log.info("Request for URL: %s succeeded at %s On Attempt %s. Recieving...", pgreq.get_full_url(), time.ctime(time.time()), retryCount) pgctnt = self.__retreiveContent(pgreq, pghandle, callBack) # if __retreiveContent did not return false, it managed to fetch valid results, so break if pgctnt != False: break if errored and pghandle != None: print(("Later attempt succeeded %s" % pgreq.get_full_url())) elif (errored or not pgctnt) and pghandle is None: if lastErr and nativeError: raise lastErr raise Exceptions.FetchFailureError("Failed to retreive page", requestedUrl, err_content=err_content, err_code=err_code, err_reason=err_reason) if returnMultiple: return pgctnt, pghandle else: return pgctnt ###################################################################################################################################################### ###################################################################################################################################################### def __decode_text_content(self, pageContent, cType): # this *should* probably be done using a parser. # However, it seems to be grossly overkill to shove the whole page (which can be quite large) through a parser just to pull out a tag that # should be right near the page beginning anyways. # As such, it's a regular expression for the moment # Regex is of bytes type, since we can't convert a string to unicode until we know the encoding the # bytes string is using, and we need the regex to get that encoding coding = re.search(b"charset=[\'\"]?([a-zA-Z0-9\-]*)[\'\"]?", pageContent, flags=re.IGNORECASE) cType = b"" charset = None try: if coding: cType = coding.group(1) codecs.lookup(cType.decode("ascii")) charset = cType.decode("ascii") except LookupError: # I'm actually not sure what I was thinking when I wrote this if statement. I don't think it'll ever trigger. if (b";" in cType) and (b"=" in cType): # the server is reporting an encoding. Now we use it to decode the dummy_docType, charset = cType.split(b";") charset = charset.split(b"=")[-1] if cchardet: inferred = cchardet.detect(pageContent) if inferred and inferred['confidence'] is None: # If we couldn't infer a charset, just short circuit and return the content. # It's probably binary. return pageContent elif inferred and inferred['confidence'] is not None and inferred['confidence'] > 0.8: charset = inferred['encoding'] self.log.info("Cchardet inferred encoding: %s", charset) else: self.log.warning("Missing cchardet!") if not charset: self.log.warning("Could not find encoding information on page - Using default charset. Shit may break!") charset = "utf-8" try: pageContent = str(pageContent, charset) except UnicodeDecodeError: self.log.error("Encoding Error! Stripping invalid chars.") pageContent = pageContent.decode('utf-8', errors='ignore') return pageContent def __buildRequest(self, pgreq, postData, addlHeaders, binaryForm, req_class = None): if req_class is None: req_class = urllib.request.Request pgreq = iri2uri.iri2uri(pgreq) try: params = {} headers = {} if postData != None: self.log.info("Making a post-request! Params: '%s'", postData) if isinstance(postData, str): params['data'] = postData.encode("utf-8") elif isinstance(postData, dict): for key, parameter in postData.items(): self.log.info(" Item: '%s' -> '%s'", key, parameter) params['data'] = urllib.parse.urlencode(postData).encode("utf-8") if addlHeaders != None: self.log.info("Have additional GET parameters!") for key, parameter in addlHeaders.items(): self.log.info(" Item: '%s' -> '%s'", key, parameter) headers = addlHeaders if binaryForm: self.log.info("Binary form submission!") if 'data' in params: raise Exceptions.ArgumentError("You cannot make a binary form post and a plain post request at the same time!", pgreq) params['data'] = binaryForm.make_result() headers['Content-type'] = binaryForm.get_content_type() headers['Content-length'] = len(params['data']) return req_class(pgreq, headers=headers, **params) except: self.log.critical("Invalid header or url") raise def __decompressContent(self, coding, pgctnt): """ This is really obnoxious """ #preLen = len(pgctnt) if coding == 'deflate': compType = "deflate" bits_opts = [ -zlib.MAX_WBITS, # deflate zlib.MAX_WBITS, # zlib zlib.MAX_WBITS | 16, # gzip zlib.MAX_WBITS | 32, # "automatic header detection" 0, # Try to guess from header # Try all the raw window options. -8, -9, -10, -11, -12, -13, -14, -15, # Stream with zlib headers 8, 9, 10, 11, 12, 13, 14, 15, # With gzip header+trailer 8+16, 9+16, 10+16, 11+16, 12+16, 13+16, 14+16, 15+16, # Automatic detection 8+32, 9+32, 10+32, 11+32, 12+32, 13+32, 14+32, 15+32, ] err = None for wbits_val in bits_opts: try: pgctnt = zlib.decompress(pgctnt, wbits_val) return compType, pgctnt except zlib.error as e: err = e # We can't get here without err having thrown. raise err elif coding == 'gzip': compType = "gzip" buf = io.BytesIO(pgctnt) f = gzip.GzipFile(fileobj=buf) pgctnt = f.read() elif coding == "sdch": raise Exceptions.ContentTypeError("Wait, someone other then google actually supports SDCH compression (%s)?" % pgreq) else: compType = "none" return compType, pgctnt def __decodeTextContent(self, pgctnt, cType): if cType: if (";" in cType) and ("=" in cType): # the server is reporting an encoding. Now we use it to decode the content # Some wierdos put two charsets in their headers: # `text/html;Charset=UTF-8;charset=UTF-8` # Split, and take the first two entries. docType, charset = cType.split(";")[:2] charset = charset.split("=")[-1] # Only decode content marked as text (yeah, google is serving zip files # with the content-disposition charset header specifying "UTF-8") or # specifically allowed other content types I know are really text. decode = ['application/atom+xml', 'application/xml', "application/json", 'text'] if any([item in docType for item in decode]): try: pgctnt = str(pgctnt, charset) except UnicodeDecodeError: self.log.error("Encoding Error! Stripping invalid chars.") pgctnt = pgctnt.decode('utf-8', errors='ignore') else: # The server is not reporting an encoding in the headers. # Use content-aware mechanisms for determing the content encoding. if "text/html" in cType or \ 'text/javascript' in cType or \ 'text/css' in cType or \ 'application/json' in cType or \ 'application/xml' in cType or \ 'application/atom+xml' in cType or \ cType.startswith("text/"): # If this is a html/text page, we want to decode it using the local encoding pgctnt = self.__decode_text_content(pgctnt, cType) elif "text" in cType: self.log.critical("Unknown content type!") self.log.critical(cType) else: self.log.critical("No content disposition header!") self.log.critical("Cannot guess content type!") return pgctnt def __retreiveContent(self, pgreq, pghandle, callBack): try: # If we have a progress callback, call it for chunked read. # Otherwise, just read in the entire content. if callBack: pgctnt = self.__chunkRead(pghandle, 2 ** 17, reportHook=callBack) else: pgctnt = pghandle.read() if pgctnt is None: return False self.log.info("URL fully retrieved.") preDecompSize = len(pgctnt)/1000.0 encoded = pghandle.headers.get('Content-Encoding') compType, pgctnt = self.__decompressContent(encoded, pgctnt) decompSize = len(pgctnt)/1000.0 # self.log.info("Page content type = %s", type(pgctnt)) cType = pghandle.headers.get("Content-Type") if compType == 'none': self.log.info("Compression type = %s. Content Size = %0.3fK. File type: %s.", compType, decompSize, cType) else: self.log.info("Compression type = %s. Content Size compressed = %0.3fK. Decompressed = %0.3fK. File type: %s.", compType, preDecompSize, decompSize, cType) self._check_waf(pgctnt, pgreq.get_full_url()) pgctnt = self.__decodeTextContent(pgctnt, cType) return pgctnt except Exceptions.GarbageSiteWrapper as err: raise err except Exception: self.log.error("Exception!") self.log.error(str(sys.exc_info())) traceback.print_exc() self.log.error("Error Retrieving Page! - Transfer failed. Waiting %s seconds before retrying", self.retryDelay) try: self.log.critical("Critical Failure to retrieve page! %s at %s", pgreq.get_full_url(), time.ctime(time.time())) self.log.critical("Exiting") except: self.log.critical("And the URL could not be printed due to an encoding error") self.log.error(pghandle) time.sleep(self.retryDelay) return False # HUGE GOD-FUNCTION. # OH GOD FIXME. # postData expects a dict # addlHeaders also expects a dict def _check_waf(self, pageContent, pageUrl): assert isinstance(pageContent, bytes), "Item pageContent must be of type bytes, received %s" % (type(pageContent), ) assert isinstance(pageUrl, str), "Item pageUrl must be of type str, received %s" % (type(pageUrl), ) if b"sucuri_cloudproxy_js=" in pageContent: raise Exceptions.SucuriWrapper("WAF Shit", pageUrl) if b'This process is automatic. Your browser will redirect to your requested content shortly.' in pageContent: raise Exceptions.CloudFlareWrapper("WAF Shit", pageUrl) if b'is currently offline. However, because the site uses Cloudflare\'s Always Online' in pageContent: raise Exceptions.CloudFlareWrapper("WAF Shit", pageUrl) ###################################################################################################################################################### ###################################################################################################################################################### def __loadCookies(self): if self.alt_cookiejar is not None: self.alt_cookiejar.init_agent(new_headers=self.browserHeaders) self.cj = self.alt_cookiejar else: self.cj = http.cookiejar.LWPCookieJar() # This is a subclass of FileCookieJar # that has useful load and save methods if self.cj is not None: if os.path.isfile(self.COOKIEFILE): try: self.__updateCookiesFromFile() # self.log.info("Loading CookieJar") except: self.log.critical("Cookie file is corrupt/damaged?") try: os.remove(self.COOKIEFILE) except FileNotFoundError: pass if http.cookiejar is not None: # self.log.info("Installing CookieJar") self.log.debug(self.cj) cookieHandler = urllib.request.HTTPCookieProcessor(self.cj) args = (cookieHandler, Handlers.HTTPRedirectHandler) if self.credHandler: print("Have cred handler. Building opener using it") args += (self.credHandler, ) if self.use_socks: print("Using Socks handler") if not HAVE_SOCKS: raise RuntimeError("SOCKS Use specified, and no socks installed!") args = (SocksiPyHandler(socks.SOCKS5, "127.0.0.1", 9050), ) + args self.opener = urllib.request.build_opener(*args) #self.opener.addheaders = [('User-Agent', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)')] self.opener.addheaders = self.browserHeaders #urllib2.install_opener(self.opener) for cookie in self.cj: self.log.debug(cookie) #print cookie def _syncCookiesFromFile(self): # self.log.info("Synchronizing cookies with cookieFile.") if os.path.isfile(self.COOKIEFILE): self.cj.save("cookietemp.lwp") self.cj.load(self.COOKIEFILE) self.cj.load("cookietemp.lwp") # First, load any changed cookies so we don't overwrite them # However, we want to persist any cookies that we have that are more recent then the saved cookies, so we temporarily save # the cookies in memory to a temp-file, then load the cookiefile, and finally overwrite the loaded cookies with the ones from the # temp file def __updateCookiesFromFile(self): if os.path.exists(self.COOKIEFILE): # self.log.info("Synchronizing cookies with cookieFile.") self.cj.load(self.COOKIEFILE) # Update cookies from cookiefile def addCookie(self, inCookie): self.log.info("Updating cookie!") self.cj.set_cookie(inCookie) def saveCookies(self, halting=False): if self.cookie_lock: locked = self.cookie_lock.acquire(timeout=5) if not locked: self.log.error("Failed to acquire cookie-lock!") return # print("Have %d cookies before saving cookiejar" % len(self.cj)) try: # self.log.info("Trying to save cookies!") if self.cj is not None: # If cookies were used self._syncCookiesFromFile() # self.log.info("Have cookies to save") for cookie in self.cj: # print(cookie) # print(cookie.expires) if isinstance(cookie.expires, int) and cookie.expires > 30000000000: # Clamp cookies that expire stupidly far in the future because people are assholes cookie.expires = 30000000000 # self.log.info("Calling save function") self.cj.save(self.COOKIEFILE) # save the cookies again # self.log.info("Cookies Saved") else: self.log.info("No cookies to save?") except Exception as e: pass # The destructor call order is too incoherent, and shit fails # during the teardown with null-references. The error printout is # not informative, so just silence it. # print("Possible error on exit (or just the destructor): '%s'." % e) finally: if self.cookie_lock: self.cookie_lock.release() # print("Have %d cookies after saving cookiejar" % len(self.cj)) if not halting: self._syncCookiesFromFile() # print "Have %d cookies after reloading cookiejar" % len(self.cj) def clearCookies(self): if self.cookie_lock: locked = self.cookie_lock.acquire(timeout=5) if not locked: self.log.error("Failed to acquire cookie-lock!") return try: self.cj.clear() self.cj.save(self.COOKIEFILE) # save the cookies again self.cj.save("cookietemp.lwp") self._syncCookiesFromFile() finally: if self.cookie_lock: self.cookie_lock.release() def getCookies(self): if self.cookie_lock: locked = self.cookie_lock.acquire(timeout=5) if not locked: raise RuntimeError("Could not acquire lock on cookiejar") try: # self.log.info("Trying to save cookies!") if self.cj is not None: # If cookies were used self._syncCookiesFromFile() finally: if self.cookie_lock: self.cookie_lock.release() return self.cj ###################################################################################################################################################### ###################################################################################################################################################### def __del__(self): # print "WGH Destructor called!" # print("WebRequest __del__") self.saveCookies(halting=True) sup = super() if hasattr(sup, '__del__'): sup.__del__() def stepThroughCloudFlareWaf(self, url): return self.stepThroughJsWaf(url, titleNotContains='Just a moment...') def stepThroughSucuriWaf(self, url): return self.stepThroughJsWaf(url, titleNotContains="You are being redirected...") def stepThroughJsWaf(self, *args, **kwargs): # Shim to the underlying web browser of choice return self.stepThroughJsWaf_bare_chromium(*args, **kwargs) # Compat for old code. def stepThroughCloudFlare(self, *args, **kwargs): return self.stepThroughJsWaf(*args, **kwargs)
fake-name/WebRequest
WebRequest/Captcha/TwoCaptchaSolver.py
TwoCaptchaSolver._getresult
python
def _getresult(self, captcha_id, timeout=None): timeout = timeout if not timeout: timeout = self.waittime poll_interval = 8 start = time.time() for x in range(int(timeout / poll_interval)+1): self.log.info("Sleeping %s seconds (poll %s of %s, elapsed %0.2fs of %0.2f).", poll_interval, x, int(timeout / poll_interval)+1, (time.time() - start), timeout, ) time.sleep(poll_interval) try: resp = self.doGet('result', { 'action' : 'get', 'key' : self.api_key, 'json' : True, 'id' : captcha_id, } ) self.log.info("Call returned success!") return resp except exc.CaptchaNotReady: self.log.info("Captcha not ready. Waiting longer.") raise exc.CaptchaSolverFailure("Solving captcha timed out after %s seconds!" % (time.time() - start, ))
Poll until a captcha `captcha_id` has been solved, or the poll times out. The timeout is the default 60 seconds, unless overridden by `timeout` (which is in seconds). Polling is done every 8 seconds.
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/Captcha/TwoCaptchaSolver.py#L140-L182
[ "def doGet(self, mode, query_dict):\n\tquery_dict['json'] = True\n\n\turl = self.getUrlFor(mode, query_dict)\n\n\tres = self.wg.getJson(url)\n\n\treturn self._process_response(res)\n" ]
class TwoCaptchaSolver(object): def __init__(self, api_key, wg): self.log = logging.getLogger("Main.WebRequest.Captcha.2Captcha") self.api_key = api_key self.wg = wg # Default timeout is 5 minutes. self.waittime = 60 * 5 def getUrlFor(self, mode, query_dict): # query params for input mode # Normal captcha solving # key String Yes your API key # language Integer Default: 0 No 0 - not specified 1 - Cyrillic (Russian) captcha 2 - Latin captcha # lang String No Language code. See the list of supported languages. # textcaptcha String # Max 140 characters # Endcoding: UTF-8 No Text will be shown to worker to help him to solve the captcha correctly. # For example: type red symbols only. # header_acao Integer Default: 0 No 0 - disabled 1 - enabled. If enabled in.php will include Access-Control-Allow-Origin:* header in the response. # Used for cross-domain AJAX requests in web applications. # pingback String No URL for pingback (callback) response that will be sent when captcha is solved. # URL should be registered on the server. More info here. # json Integer Default: 0 No 0 - server will send the response as plain text 1 - tells the server to send the response as JSON # soft_id Integer No ID of software developer. Developers who integrated their software with 2captcha get reward: 10% of spendings of their software users. # # For solving recaptcha # key String Yes your API key # method String Yes userrecaptcha - defines that you're sending a ReCaptcha V2 with new method # googlekey String Yes Value of k or data-sitekey parameter you found on page # pageurl String Yes Full URL of the page where you see the ReCaptcha # invisible Integer Default: 0 No 1 - means that ReCaptcha is invisible. 0 - normal ReCaptcha. # header_acao Integer Default: 0 No 0 - disabled 1 - enabled. # If enabled in.php will include Access-Control-Allow-Origin:* header in the response. # Used for cross-domain AJAX requests in web applications. Also supported by res.php. # pingback String No URL for pingback (callback) response that will be sent when captcha is solved. # URL should be registered on the server. More info here. # json Integer Default: 0 No 0 - server will send the response as plain text 1 - tells the server to send the response as JSON # soft_id Integer No ID of software developer. Developers who integrated their software with 2captcha get reward: 10% of spendings of their software users. # proxy String No Format: login:password@123.123.123.123:3128 # You can find more info about proxies here. # proxytype String No Type of your proxy: HTTP, HTTPS, SOCKS4, SOCKS5. # # Query params for result mode # Normal captcha results # key String Yes your API key # action String Yes get - get the asnwer for your captcha # id Integer Yes ID of captcha returned by in.php. # json Integer Default: 0 No 0 - server will send the response as plain text 1 - tells the server to send the response as JSON # header_acao Integer Default: 0 No 0 - disabled 1 - enabled. If enabled res.php will include Access-Control-Allow-Origin:* header # in the response. Used for cross-domain AJAX requests in web applications. # Recaptcha results # key String Yes your API key # action String Yes get - get the asnwer for your captcha # id Integer Yes ID of captcha returned by in.php. # json Integer Default: 0 No 0 - server will send the response as plain text 1 - tells the server to send the response as JSON if mode == 'input': path = '/in.php' elif mode == 'result': path = '/res.php' else: raise RuntimeError("Unknown mode (%s). Valid modes are 'input' and 'result'." % mode) query = urllib.parse.urlencode(query_dict) new_url = urllib.parse.urlunsplit( ( 'https', # scheme '2captcha.com', # netloc path, # path query, # query '' # fragment ) ) return new_url def _process_response(self, resp_json): if 'status' and 'request' in resp_json: if resp_json['status'] == 1: return resp_json['request'] elif resp_json['request'] == 'CAPCHA_NOT_READY': raise exc.CaptchaNotReady("Captcha not ready yet.") else: self.log.error("[TwoCaptchaSolver] Error response: %s", resp_json['request']) raise exc.CaptchaSolverFailure("API call returned failure response: %s" % resp_json['request']) raise exc.CaptchaSolverFailure("Failure doing get request") def doGet(self, mode, query_dict): query_dict['json'] = True url = self.getUrlFor(mode, query_dict) res = self.wg.getJson(url) return self._process_response(res) def getbalance(self): """ Get you account balance. Returns value: balance (float), or raises an exception. """ balance = self.doGet('result', { 'action' : 'getbalance', 'key' : self.api_key, 'json' : True, }) return balance def _submit(self, pathfile, filedata, filename): ''' Submit either a file from disk, or a in-memory file to the solver service, and return the request ID associated with the new captcha task. ''' if pathfile and os.path.exists(pathfile): files = {'file': open(pathfile, 'rb')} elif filedata: assert filename files = {'file' : (filename, io.BytesIO(filedata))} else: raise ValueError("You must pass either a valid file path, or a bytes array containing the captcha image!") payload = { 'key' : self.api_key, 'method' : 'post', 'json' : True, } self.log.info("Uploading to 2Captcha.com.") url = self.getUrlFor('input', {}) request = requests.post(url, files=files, data=payload) if not request.ok: raise exc.CaptchaSolverFailure("Posting captcha to solve failed!") resp_json = json.loads(request.text) return self._process_response(resp_json) def solve_simple_captcha(self, pathfile=None, filedata=None, filename=None): """ Upload a image (from disk or a bytearray), and then block until the captcha has been solved. Return value is the captcha result. either pathfile OR filedata AND filename should be specified. Failure will result in a subclass of WebRequest.CaptchaSolverFailure being thrown. """ captcha_id = self._submit(pathfile, filedata, filename) return self._getresult(captcha_id=captcha_id) def solve_recaptcha(self, google_key, page_url, timeout = 15 * 60): ''' Solve a recaptcha on page `page_url` with the input value `google_key`. Timeout is `timeout` seconds, defaulting to 60 seconds. Return value is either the `g-recaptcha-response` value, or an exceptionj is raised (generally `CaptchaSolverFailure`) ''' proxy = SocksProxy.ProxyLauncher([TWOCAPTCHA_IP]) try: captcha_id = self.doGet('input', { 'key' : self.api_key, 'method' : "userrecaptcha", 'googlekey' : google_key, 'pageurl' : page_url, 'proxy' : proxy.get_wan_address(), 'proxytype' : "SOCKS5", 'json' : True, } ) # Allow 15 minutes for the solution # I've been seeing times up to 160+ seconds in testing. return self._getresult(captcha_id=captcha_id, timeout=timeout) finally: proxy.stop()
fake-name/WebRequest
WebRequest/Captcha/TwoCaptchaSolver.py
TwoCaptchaSolver._submit
python
def _submit(self, pathfile, filedata, filename): ''' Submit either a file from disk, or a in-memory file to the solver service, and return the request ID associated with the new captcha task. ''' if pathfile and os.path.exists(pathfile): files = {'file': open(pathfile, 'rb')} elif filedata: assert filename files = {'file' : (filename, io.BytesIO(filedata))} else: raise ValueError("You must pass either a valid file path, or a bytes array containing the captcha image!") payload = { 'key' : self.api_key, 'method' : 'post', 'json' : True, } self.log.info("Uploading to 2Captcha.com.") url = self.getUrlFor('input', {}) request = requests.post(url, files=files, data=payload) if not request.ok: raise exc.CaptchaSolverFailure("Posting captcha to solve failed!") resp_json = json.loads(request.text) return self._process_response(resp_json)
Submit either a file from disk, or a in-memory file to the solver service, and return the request ID associated with the new captcha task.
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/Captcha/TwoCaptchaSolver.py#L184-L213
[ "def getUrlFor(self, mode, query_dict):\n\n\t# query params for input mode\n\t# Normal captcha solving\n\t# key String Yes your API key\n\t# language Integer Default: 0 No 0 - not specified 1 - Cyrillic (Russian) captcha 2 - Latin captcha\n\t# lang String ...
class TwoCaptchaSolver(object): def __init__(self, api_key, wg): self.log = logging.getLogger("Main.WebRequest.Captcha.2Captcha") self.api_key = api_key self.wg = wg # Default timeout is 5 minutes. self.waittime = 60 * 5 def getUrlFor(self, mode, query_dict): # query params for input mode # Normal captcha solving # key String Yes your API key # language Integer Default: 0 No 0 - not specified 1 - Cyrillic (Russian) captcha 2 - Latin captcha # lang String No Language code. See the list of supported languages. # textcaptcha String # Max 140 characters # Endcoding: UTF-8 No Text will be shown to worker to help him to solve the captcha correctly. # For example: type red symbols only. # header_acao Integer Default: 0 No 0 - disabled 1 - enabled. If enabled in.php will include Access-Control-Allow-Origin:* header in the response. # Used for cross-domain AJAX requests in web applications. # pingback String No URL for pingback (callback) response that will be sent when captcha is solved. # URL should be registered on the server. More info here. # json Integer Default: 0 No 0 - server will send the response as plain text 1 - tells the server to send the response as JSON # soft_id Integer No ID of software developer. Developers who integrated their software with 2captcha get reward: 10% of spendings of their software users. # # For solving recaptcha # key String Yes your API key # method String Yes userrecaptcha - defines that you're sending a ReCaptcha V2 with new method # googlekey String Yes Value of k or data-sitekey parameter you found on page # pageurl String Yes Full URL of the page where you see the ReCaptcha # invisible Integer Default: 0 No 1 - means that ReCaptcha is invisible. 0 - normal ReCaptcha. # header_acao Integer Default: 0 No 0 - disabled 1 - enabled. # If enabled in.php will include Access-Control-Allow-Origin:* header in the response. # Used for cross-domain AJAX requests in web applications. Also supported by res.php. # pingback String No URL for pingback (callback) response that will be sent when captcha is solved. # URL should be registered on the server. More info here. # json Integer Default: 0 No 0 - server will send the response as plain text 1 - tells the server to send the response as JSON # soft_id Integer No ID of software developer. Developers who integrated their software with 2captcha get reward: 10% of spendings of their software users. # proxy String No Format: login:password@123.123.123.123:3128 # You can find more info about proxies here. # proxytype String No Type of your proxy: HTTP, HTTPS, SOCKS4, SOCKS5. # # Query params for result mode # Normal captcha results # key String Yes your API key # action String Yes get - get the asnwer for your captcha # id Integer Yes ID of captcha returned by in.php. # json Integer Default: 0 No 0 - server will send the response as plain text 1 - tells the server to send the response as JSON # header_acao Integer Default: 0 No 0 - disabled 1 - enabled. If enabled res.php will include Access-Control-Allow-Origin:* header # in the response. Used for cross-domain AJAX requests in web applications. # Recaptcha results # key String Yes your API key # action String Yes get - get the asnwer for your captcha # id Integer Yes ID of captcha returned by in.php. # json Integer Default: 0 No 0 - server will send the response as plain text 1 - tells the server to send the response as JSON if mode == 'input': path = '/in.php' elif mode == 'result': path = '/res.php' else: raise RuntimeError("Unknown mode (%s). Valid modes are 'input' and 'result'." % mode) query = urllib.parse.urlencode(query_dict) new_url = urllib.parse.urlunsplit( ( 'https', # scheme '2captcha.com', # netloc path, # path query, # query '' # fragment ) ) return new_url def _process_response(self, resp_json): if 'status' and 'request' in resp_json: if resp_json['status'] == 1: return resp_json['request'] elif resp_json['request'] == 'CAPCHA_NOT_READY': raise exc.CaptchaNotReady("Captcha not ready yet.") else: self.log.error("[TwoCaptchaSolver] Error response: %s", resp_json['request']) raise exc.CaptchaSolverFailure("API call returned failure response: %s" % resp_json['request']) raise exc.CaptchaSolverFailure("Failure doing get request") def doGet(self, mode, query_dict): query_dict['json'] = True url = self.getUrlFor(mode, query_dict) res = self.wg.getJson(url) return self._process_response(res) def getbalance(self): """ Get you account balance. Returns value: balance (float), or raises an exception. """ balance = self.doGet('result', { 'action' : 'getbalance', 'key' : self.api_key, 'json' : True, }) return balance def _getresult(self, captcha_id, timeout=None): """ Poll until a captcha `captcha_id` has been solved, or the poll times out. The timeout is the default 60 seconds, unless overridden by `timeout` (which is in seconds). Polling is done every 8 seconds. """ timeout = timeout if not timeout: timeout = self.waittime poll_interval = 8 start = time.time() for x in range(int(timeout / poll_interval)+1): self.log.info("Sleeping %s seconds (poll %s of %s, elapsed %0.2fs of %0.2f).", poll_interval, x, int(timeout / poll_interval)+1, (time.time() - start), timeout, ) time.sleep(poll_interval) try: resp = self.doGet('result', { 'action' : 'get', 'key' : self.api_key, 'json' : True, 'id' : captcha_id, } ) self.log.info("Call returned success!") return resp except exc.CaptchaNotReady: self.log.info("Captcha not ready. Waiting longer.") raise exc.CaptchaSolverFailure("Solving captcha timed out after %s seconds!" % (time.time() - start, )) def solve_simple_captcha(self, pathfile=None, filedata=None, filename=None): """ Upload a image (from disk or a bytearray), and then block until the captcha has been solved. Return value is the captcha result. either pathfile OR filedata AND filename should be specified. Failure will result in a subclass of WebRequest.CaptchaSolverFailure being thrown. """ captcha_id = self._submit(pathfile, filedata, filename) return self._getresult(captcha_id=captcha_id) def solve_recaptcha(self, google_key, page_url, timeout = 15 * 60): ''' Solve a recaptcha on page `page_url` with the input value `google_key`. Timeout is `timeout` seconds, defaulting to 60 seconds. Return value is either the `g-recaptcha-response` value, or an exceptionj is raised (generally `CaptchaSolverFailure`) ''' proxy = SocksProxy.ProxyLauncher([TWOCAPTCHA_IP]) try: captcha_id = self.doGet('input', { 'key' : self.api_key, 'method' : "userrecaptcha", 'googlekey' : google_key, 'pageurl' : page_url, 'proxy' : proxy.get_wan_address(), 'proxytype' : "SOCKS5", 'json' : True, } ) # Allow 15 minutes for the solution # I've been seeing times up to 160+ seconds in testing. return self._getresult(captcha_id=captcha_id, timeout=timeout) finally: proxy.stop()
fake-name/WebRequest
WebRequest/Captcha/TwoCaptchaSolver.py
TwoCaptchaSolver.solve_simple_captcha
python
def solve_simple_captcha(self, pathfile=None, filedata=None, filename=None): captcha_id = self._submit(pathfile, filedata, filename) return self._getresult(captcha_id=captcha_id)
Upload a image (from disk or a bytearray), and then block until the captcha has been solved. Return value is the captcha result. either pathfile OR filedata AND filename should be specified. Failure will result in a subclass of WebRequest.CaptchaSolverFailure being thrown.
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/Captcha/TwoCaptchaSolver.py#L216-L229
[ "def _getresult(self, captcha_id, timeout=None):\n\t\"\"\"\n\tPoll until a captcha `captcha_id` has been solved, or\n\tthe poll times out. The timeout is the default 60 seconds,\n\tunless overridden by `timeout` (which is in seconds).\n\n\tPolling is done every 8 seconds.\n\t\"\"\"\n\ttimeout = timeout\n\n\tif not ...
class TwoCaptchaSolver(object): def __init__(self, api_key, wg): self.log = logging.getLogger("Main.WebRequest.Captcha.2Captcha") self.api_key = api_key self.wg = wg # Default timeout is 5 minutes. self.waittime = 60 * 5 def getUrlFor(self, mode, query_dict): # query params for input mode # Normal captcha solving # key String Yes your API key # language Integer Default: 0 No 0 - not specified 1 - Cyrillic (Russian) captcha 2 - Latin captcha # lang String No Language code. See the list of supported languages. # textcaptcha String # Max 140 characters # Endcoding: UTF-8 No Text will be shown to worker to help him to solve the captcha correctly. # For example: type red symbols only. # header_acao Integer Default: 0 No 0 - disabled 1 - enabled. If enabled in.php will include Access-Control-Allow-Origin:* header in the response. # Used for cross-domain AJAX requests in web applications. # pingback String No URL for pingback (callback) response that will be sent when captcha is solved. # URL should be registered on the server. More info here. # json Integer Default: 0 No 0 - server will send the response as plain text 1 - tells the server to send the response as JSON # soft_id Integer No ID of software developer. Developers who integrated their software with 2captcha get reward: 10% of spendings of their software users. # # For solving recaptcha # key String Yes your API key # method String Yes userrecaptcha - defines that you're sending a ReCaptcha V2 with new method # googlekey String Yes Value of k or data-sitekey parameter you found on page # pageurl String Yes Full URL of the page where you see the ReCaptcha # invisible Integer Default: 0 No 1 - means that ReCaptcha is invisible. 0 - normal ReCaptcha. # header_acao Integer Default: 0 No 0 - disabled 1 - enabled. # If enabled in.php will include Access-Control-Allow-Origin:* header in the response. # Used for cross-domain AJAX requests in web applications. Also supported by res.php. # pingback String No URL for pingback (callback) response that will be sent when captcha is solved. # URL should be registered on the server. More info here. # json Integer Default: 0 No 0 - server will send the response as plain text 1 - tells the server to send the response as JSON # soft_id Integer No ID of software developer. Developers who integrated their software with 2captcha get reward: 10% of spendings of their software users. # proxy String No Format: login:password@123.123.123.123:3128 # You can find more info about proxies here. # proxytype String No Type of your proxy: HTTP, HTTPS, SOCKS4, SOCKS5. # # Query params for result mode # Normal captcha results # key String Yes your API key # action String Yes get - get the asnwer for your captcha # id Integer Yes ID of captcha returned by in.php. # json Integer Default: 0 No 0 - server will send the response as plain text 1 - tells the server to send the response as JSON # header_acao Integer Default: 0 No 0 - disabled 1 - enabled. If enabled res.php will include Access-Control-Allow-Origin:* header # in the response. Used for cross-domain AJAX requests in web applications. # Recaptcha results # key String Yes your API key # action String Yes get - get the asnwer for your captcha # id Integer Yes ID of captcha returned by in.php. # json Integer Default: 0 No 0 - server will send the response as plain text 1 - tells the server to send the response as JSON if mode == 'input': path = '/in.php' elif mode == 'result': path = '/res.php' else: raise RuntimeError("Unknown mode (%s). Valid modes are 'input' and 'result'." % mode) query = urllib.parse.urlencode(query_dict) new_url = urllib.parse.urlunsplit( ( 'https', # scheme '2captcha.com', # netloc path, # path query, # query '' # fragment ) ) return new_url def _process_response(self, resp_json): if 'status' and 'request' in resp_json: if resp_json['status'] == 1: return resp_json['request'] elif resp_json['request'] == 'CAPCHA_NOT_READY': raise exc.CaptchaNotReady("Captcha not ready yet.") else: self.log.error("[TwoCaptchaSolver] Error response: %s", resp_json['request']) raise exc.CaptchaSolverFailure("API call returned failure response: %s" % resp_json['request']) raise exc.CaptchaSolverFailure("Failure doing get request") def doGet(self, mode, query_dict): query_dict['json'] = True url = self.getUrlFor(mode, query_dict) res = self.wg.getJson(url) return self._process_response(res) def getbalance(self): """ Get you account balance. Returns value: balance (float), or raises an exception. """ balance = self.doGet('result', { 'action' : 'getbalance', 'key' : self.api_key, 'json' : True, }) return balance def _getresult(self, captcha_id, timeout=None): """ Poll until a captcha `captcha_id` has been solved, or the poll times out. The timeout is the default 60 seconds, unless overridden by `timeout` (which is in seconds). Polling is done every 8 seconds. """ timeout = timeout if not timeout: timeout = self.waittime poll_interval = 8 start = time.time() for x in range(int(timeout / poll_interval)+1): self.log.info("Sleeping %s seconds (poll %s of %s, elapsed %0.2fs of %0.2f).", poll_interval, x, int(timeout / poll_interval)+1, (time.time() - start), timeout, ) time.sleep(poll_interval) try: resp = self.doGet('result', { 'action' : 'get', 'key' : self.api_key, 'json' : True, 'id' : captcha_id, } ) self.log.info("Call returned success!") return resp except exc.CaptchaNotReady: self.log.info("Captcha not ready. Waiting longer.") raise exc.CaptchaSolverFailure("Solving captcha timed out after %s seconds!" % (time.time() - start, )) def _submit(self, pathfile, filedata, filename): ''' Submit either a file from disk, or a in-memory file to the solver service, and return the request ID associated with the new captcha task. ''' if pathfile and os.path.exists(pathfile): files = {'file': open(pathfile, 'rb')} elif filedata: assert filename files = {'file' : (filename, io.BytesIO(filedata))} else: raise ValueError("You must pass either a valid file path, or a bytes array containing the captcha image!") payload = { 'key' : self.api_key, 'method' : 'post', 'json' : True, } self.log.info("Uploading to 2Captcha.com.") url = self.getUrlFor('input', {}) request = requests.post(url, files=files, data=payload) if not request.ok: raise exc.CaptchaSolverFailure("Posting captcha to solve failed!") resp_json = json.loads(request.text) return self._process_response(resp_json) def solve_recaptcha(self, google_key, page_url, timeout = 15 * 60): ''' Solve a recaptcha on page `page_url` with the input value `google_key`. Timeout is `timeout` seconds, defaulting to 60 seconds. Return value is either the `g-recaptcha-response` value, or an exceptionj is raised (generally `CaptchaSolverFailure`) ''' proxy = SocksProxy.ProxyLauncher([TWOCAPTCHA_IP]) try: captcha_id = self.doGet('input', { 'key' : self.api_key, 'method' : "userrecaptcha", 'googlekey' : google_key, 'pageurl' : page_url, 'proxy' : proxy.get_wan_address(), 'proxytype' : "SOCKS5", 'json' : True, } ) # Allow 15 minutes for the solution # I've been seeing times up to 160+ seconds in testing. return self._getresult(captcha_id=captcha_id, timeout=timeout) finally: proxy.stop()
fake-name/WebRequest
WebRequest/Captcha/TwoCaptchaSolver.py
TwoCaptchaSolver.solve_recaptcha
python
def solve_recaptcha(self, google_key, page_url, timeout = 15 * 60): ''' Solve a recaptcha on page `page_url` with the input value `google_key`. Timeout is `timeout` seconds, defaulting to 60 seconds. Return value is either the `g-recaptcha-response` value, or an exceptionj is raised (generally `CaptchaSolverFailure`) ''' proxy = SocksProxy.ProxyLauncher([TWOCAPTCHA_IP]) try: captcha_id = self.doGet('input', { 'key' : self.api_key, 'method' : "userrecaptcha", 'googlekey' : google_key, 'pageurl' : page_url, 'proxy' : proxy.get_wan_address(), 'proxytype' : "SOCKS5", 'json' : True, } ) # Allow 15 minutes for the solution # I've been seeing times up to 160+ seconds in testing. return self._getresult(captcha_id=captcha_id, timeout=timeout) finally: proxy.stop()
Solve a recaptcha on page `page_url` with the input value `google_key`. Timeout is `timeout` seconds, defaulting to 60 seconds. Return value is either the `g-recaptcha-response` value, or an exceptionj is raised (generally `CaptchaSolverFailure`)
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/Captcha/TwoCaptchaSolver.py#L231-L260
[ "def stop(self):\n\tself.log.info(\"Telling async event-loop to exit\")\n\tself.loop.call_soon_threadsafe(self.loop.stop)\n\tself.log.info(\"Joining asyncio loop thread.\")\n\tself.proxy_process.join()\n\n\t# Close the port\n\tself._close_local_port(self.listen_port, self.remote_ips)\n", "def get_wan_address(self...
class TwoCaptchaSolver(object): def __init__(self, api_key, wg): self.log = logging.getLogger("Main.WebRequest.Captcha.2Captcha") self.api_key = api_key self.wg = wg # Default timeout is 5 minutes. self.waittime = 60 * 5 def getUrlFor(self, mode, query_dict): # query params for input mode # Normal captcha solving # key String Yes your API key # language Integer Default: 0 No 0 - not specified 1 - Cyrillic (Russian) captcha 2 - Latin captcha # lang String No Language code. See the list of supported languages. # textcaptcha String # Max 140 characters # Endcoding: UTF-8 No Text will be shown to worker to help him to solve the captcha correctly. # For example: type red symbols only. # header_acao Integer Default: 0 No 0 - disabled 1 - enabled. If enabled in.php will include Access-Control-Allow-Origin:* header in the response. # Used for cross-domain AJAX requests in web applications. # pingback String No URL for pingback (callback) response that will be sent when captcha is solved. # URL should be registered on the server. More info here. # json Integer Default: 0 No 0 - server will send the response as plain text 1 - tells the server to send the response as JSON # soft_id Integer No ID of software developer. Developers who integrated their software with 2captcha get reward: 10% of spendings of their software users. # # For solving recaptcha # key String Yes your API key # method String Yes userrecaptcha - defines that you're sending a ReCaptcha V2 with new method # googlekey String Yes Value of k or data-sitekey parameter you found on page # pageurl String Yes Full URL of the page where you see the ReCaptcha # invisible Integer Default: 0 No 1 - means that ReCaptcha is invisible. 0 - normal ReCaptcha. # header_acao Integer Default: 0 No 0 - disabled 1 - enabled. # If enabled in.php will include Access-Control-Allow-Origin:* header in the response. # Used for cross-domain AJAX requests in web applications. Also supported by res.php. # pingback String No URL for pingback (callback) response that will be sent when captcha is solved. # URL should be registered on the server. More info here. # json Integer Default: 0 No 0 - server will send the response as plain text 1 - tells the server to send the response as JSON # soft_id Integer No ID of software developer. Developers who integrated their software with 2captcha get reward: 10% of spendings of their software users. # proxy String No Format: login:password@123.123.123.123:3128 # You can find more info about proxies here. # proxytype String No Type of your proxy: HTTP, HTTPS, SOCKS4, SOCKS5. # # Query params for result mode # Normal captcha results # key String Yes your API key # action String Yes get - get the asnwer for your captcha # id Integer Yes ID of captcha returned by in.php. # json Integer Default: 0 No 0 - server will send the response as plain text 1 - tells the server to send the response as JSON # header_acao Integer Default: 0 No 0 - disabled 1 - enabled. If enabled res.php will include Access-Control-Allow-Origin:* header # in the response. Used for cross-domain AJAX requests in web applications. # Recaptcha results # key String Yes your API key # action String Yes get - get the asnwer for your captcha # id Integer Yes ID of captcha returned by in.php. # json Integer Default: 0 No 0 - server will send the response as plain text 1 - tells the server to send the response as JSON if mode == 'input': path = '/in.php' elif mode == 'result': path = '/res.php' else: raise RuntimeError("Unknown mode (%s). Valid modes are 'input' and 'result'." % mode) query = urllib.parse.urlencode(query_dict) new_url = urllib.parse.urlunsplit( ( 'https', # scheme '2captcha.com', # netloc path, # path query, # query '' # fragment ) ) return new_url def _process_response(self, resp_json): if 'status' and 'request' in resp_json: if resp_json['status'] == 1: return resp_json['request'] elif resp_json['request'] == 'CAPCHA_NOT_READY': raise exc.CaptchaNotReady("Captcha not ready yet.") else: self.log.error("[TwoCaptchaSolver] Error response: %s", resp_json['request']) raise exc.CaptchaSolverFailure("API call returned failure response: %s" % resp_json['request']) raise exc.CaptchaSolverFailure("Failure doing get request") def doGet(self, mode, query_dict): query_dict['json'] = True url = self.getUrlFor(mode, query_dict) res = self.wg.getJson(url) return self._process_response(res) def getbalance(self): """ Get you account balance. Returns value: balance (float), or raises an exception. """ balance = self.doGet('result', { 'action' : 'getbalance', 'key' : self.api_key, 'json' : True, }) return balance def _getresult(self, captcha_id, timeout=None): """ Poll until a captcha `captcha_id` has been solved, or the poll times out. The timeout is the default 60 seconds, unless overridden by `timeout` (which is in seconds). Polling is done every 8 seconds. """ timeout = timeout if not timeout: timeout = self.waittime poll_interval = 8 start = time.time() for x in range(int(timeout / poll_interval)+1): self.log.info("Sleeping %s seconds (poll %s of %s, elapsed %0.2fs of %0.2f).", poll_interval, x, int(timeout / poll_interval)+1, (time.time() - start), timeout, ) time.sleep(poll_interval) try: resp = self.doGet('result', { 'action' : 'get', 'key' : self.api_key, 'json' : True, 'id' : captcha_id, } ) self.log.info("Call returned success!") return resp except exc.CaptchaNotReady: self.log.info("Captcha not ready. Waiting longer.") raise exc.CaptchaSolverFailure("Solving captcha timed out after %s seconds!" % (time.time() - start, )) def _submit(self, pathfile, filedata, filename): ''' Submit either a file from disk, or a in-memory file to the solver service, and return the request ID associated with the new captcha task. ''' if pathfile and os.path.exists(pathfile): files = {'file': open(pathfile, 'rb')} elif filedata: assert filename files = {'file' : (filename, io.BytesIO(filedata))} else: raise ValueError("You must pass either a valid file path, or a bytes array containing the captcha image!") payload = { 'key' : self.api_key, 'method' : 'post', 'json' : True, } self.log.info("Uploading to 2Captcha.com.") url = self.getUrlFor('input', {}) request = requests.post(url, files=files, data=payload) if not request.ok: raise exc.CaptchaSolverFailure("Posting captcha to solve failed!") resp_json = json.loads(request.text) return self._process_response(resp_json) def solve_simple_captcha(self, pathfile=None, filedata=None, filename=None): """ Upload a image (from disk or a bytearray), and then block until the captcha has been solved. Return value is the captcha result. either pathfile OR filedata AND filename should be specified. Failure will result in a subclass of WebRequest.CaptchaSolverFailure being thrown. """ captcha_id = self._submit(pathfile, filedata, filename) return self._getresult(captcha_id=captcha_id)
fake-name/WebRequest
WebRequest/HeaderParseMonkeyPatch.py
isUTF8Strict
python
def isUTF8Strict(data): # pragma: no cover - Only used when cchardet is missing. ''' Check if all characters in a bytearray are decodable using UTF-8. ''' try: decoded = data.decode('UTF-8') except UnicodeDecodeError: return False else: for ch in decoded: if 0xD800 <= ord(ch) <= 0xDFFF: return False return True
Check if all characters in a bytearray are decodable using UTF-8.
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/HeaderParseMonkeyPatch.py#L15-L28
null
#!/usr/bin/python3 import sys import codecs import http.client import email.parser cchardet = False try: import cchardet except ImportError: # pragma: no cover pass def decode_headers(header_list): ''' Decode a list of headers. Takes a list of bytestrings, returns a list of unicode strings. The character set for each bytestring is individually decoded. ''' decoded_headers = [] for header in header_list: if cchardet: inferred = cchardet.detect(header) if inferred and inferred['confidence'] > 0.8: # print("Parsing headers!", header) decoded_headers.append(header.decode(inferred['encoding'])) else: decoded_headers.append(header.decode('iso-8859-1')) else: # pragma: no cover # All bytes are < 127 (e.g. ASCII) if all([char & 0x80 == 0 for char in header]): decoded_headers.append(header.decode("us-ascii")) elif isUTF8Strict(header): decoded_headers.append(header.decode("utf-8")) else: decoded_headers.append(header.decode('iso-8859-1')) return decoded_headers def parse_headers(fp, _class=http.client.HTTPMessage): """Parses only RFC2822 headers from a file pointer. email Parser wants to see strings rather than bytes. But a TextIOWrapper around self.rfile would buffer too many bytes from the stream, bytes which we later need to read as bytes. So we read the correct bytes here, as bytes, for email Parser to parse. Note: Monkey-patched version to try to more intelligently determine header encoding """ headers = [] while True: line = fp.readline(http.client._MAXLINE + 1) if len(line) > http.client._MAXLINE: raise http.client.LineTooLong("header line") headers.append(line) if len(headers) > http.client._MAXHEADERS: raise HTTPException("got more than %d headers" % http.client._MAXHEADERS) if line in (b'\r\n', b'\n', b''): break decoded_headers = decode_headers(headers) hstring = ''.join(decoded_headers) return email.parser.Parser(_class=_class).parsestr(hstring) http.client.parse_headers = parse_headers
fake-name/WebRequest
WebRequest/HeaderParseMonkeyPatch.py
decode_headers
python
def decode_headers(header_list): ''' Decode a list of headers. Takes a list of bytestrings, returns a list of unicode strings. The character set for each bytestring is individually decoded. ''' decoded_headers = [] for header in header_list: if cchardet: inferred = cchardet.detect(header) if inferred and inferred['confidence'] > 0.8: # print("Parsing headers!", header) decoded_headers.append(header.decode(inferred['encoding'])) else: decoded_headers.append(header.decode('iso-8859-1')) else: # pragma: no cover # All bytes are < 127 (e.g. ASCII) if all([char & 0x80 == 0 for char in header]): decoded_headers.append(header.decode("us-ascii")) elif isUTF8Strict(header): decoded_headers.append(header.decode("utf-8")) else: decoded_headers.append(header.decode('iso-8859-1')) return decoded_headers
Decode a list of headers. Takes a list of bytestrings, returns a list of unicode strings. The character set for each bytestring is individually decoded.
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/HeaderParseMonkeyPatch.py#L30-L56
[ "def isUTF8Strict(data): # pragma: no cover - Only used when cchardet is missing.\n\t'''\n\tCheck if all characters in a bytearray are decodable\n\tusing UTF-8.\n\t'''\n\ttry:\n\t\tdecoded = data.decode('UTF-8')\n\texcept UnicodeDecodeError:\n\t\treturn False\n\telse:\n\t\tfor ch in decoded:\n\t\t\tif 0xD800 <=...
#!/usr/bin/python3 import sys import codecs import http.client import email.parser cchardet = False try: import cchardet except ImportError: # pragma: no cover pass def isUTF8Strict(data): # pragma: no cover - Only used when cchardet is missing. ''' Check if all characters in a bytearray are decodable using UTF-8. ''' try: decoded = data.decode('UTF-8') except UnicodeDecodeError: return False else: for ch in decoded: if 0xD800 <= ord(ch) <= 0xDFFF: return False return True def parse_headers(fp, _class=http.client.HTTPMessage): """Parses only RFC2822 headers from a file pointer. email Parser wants to see strings rather than bytes. But a TextIOWrapper around self.rfile would buffer too many bytes from the stream, bytes which we later need to read as bytes. So we read the correct bytes here, as bytes, for email Parser to parse. Note: Monkey-patched version to try to more intelligently determine header encoding """ headers = [] while True: line = fp.readline(http.client._MAXLINE + 1) if len(line) > http.client._MAXLINE: raise http.client.LineTooLong("header line") headers.append(line) if len(headers) > http.client._MAXHEADERS: raise HTTPException("got more than %d headers" % http.client._MAXHEADERS) if line in (b'\r\n', b'\n', b''): break decoded_headers = decode_headers(headers) hstring = ''.join(decoded_headers) return email.parser.Parser(_class=_class).parsestr(hstring) http.client.parse_headers = parse_headers
fake-name/WebRequest
WebRequest/HeaderParseMonkeyPatch.py
parse_headers
python
def parse_headers(fp, _class=http.client.HTTPMessage): headers = [] while True: line = fp.readline(http.client._MAXLINE + 1) if len(line) > http.client._MAXLINE: raise http.client.LineTooLong("header line") headers.append(line) if len(headers) > http.client._MAXHEADERS: raise HTTPException("got more than %d headers" % http.client._MAXHEADERS) if line in (b'\r\n', b'\n', b''): break decoded_headers = decode_headers(headers) hstring = ''.join(decoded_headers) return email.parser.Parser(_class=_class).parsestr(hstring)
Parses only RFC2822 headers from a file pointer. email Parser wants to see strings rather than bytes. But a TextIOWrapper around self.rfile would buffer too many bytes from the stream, bytes which we later need to read as bytes. So we read the correct bytes here, as bytes, for email Parser to parse. Note: Monkey-patched version to try to more intelligently determine header encoding
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/HeaderParseMonkeyPatch.py#L59-L87
[ "def decode_headers(header_list):\n\t'''\n\tDecode a list of headers.\n\n\tTakes a list of bytestrings, returns a list of unicode strings.\n\tThe character set for each bytestring is individually decoded.\n\t'''\n\n\tdecoded_headers = []\n\tfor header in header_list:\n\t\tif cchardet:\n\t\t\tinferred = cchardet.det...
#!/usr/bin/python3 import sys import codecs import http.client import email.parser cchardet = False try: import cchardet except ImportError: # pragma: no cover pass def isUTF8Strict(data): # pragma: no cover - Only used when cchardet is missing. ''' Check if all characters in a bytearray are decodable using UTF-8. ''' try: decoded = data.decode('UTF-8') except UnicodeDecodeError: return False else: for ch in decoded: if 0xD800 <= ord(ch) <= 0xDFFF: return False return True def decode_headers(header_list): ''' Decode a list of headers. Takes a list of bytestrings, returns a list of unicode strings. The character set for each bytestring is individually decoded. ''' decoded_headers = [] for header in header_list: if cchardet: inferred = cchardet.detect(header) if inferred and inferred['confidence'] > 0.8: # print("Parsing headers!", header) decoded_headers.append(header.decode(inferred['encoding'])) else: decoded_headers.append(header.decode('iso-8859-1')) else: # pragma: no cover # All bytes are < 127 (e.g. ASCII) if all([char & 0x80 == 0 for char in header]): decoded_headers.append(header.decode("us-ascii")) elif isUTF8Strict(header): decoded_headers.append(header.decode("utf-8")) else: decoded_headers.append(header.decode('iso-8859-1')) return decoded_headers http.client.parse_headers = parse_headers
fake-name/WebRequest
WebRequest/utility.py
determine_json_encoding
python
def determine_json_encoding(json_bytes): ''' Given the fact that the first 2 characters in json are guaranteed to be ASCII, we can use these to determine the encoding. See: http://tools.ietf.org/html/rfc4627#section-3 Copied here: Since the first two characters of a JSON text will always be ASCII characters [RFC0020], it is possible to determine whether an octet stream is UTF-8, UTF-16 (BE or LE), or UTF-32 (BE or LE) by looking at the pattern of nulls in the first four octets. 00 00 00 xx UTF-32BE 00 xx 00 xx UTF-16BE xx 00 00 00 UTF-32LE xx 00 xx 00 UTF-16LE xx xx xx xx UTF-8 ''' assert isinstance(json_bytes, bytes), "`determine_json_encoding()` can only operate on bytestring inputs" if len(json_bytes) > 4: b1, b2, b3, b4 = json_bytes[0], json_bytes[1], json_bytes[2], json_bytes[3] if b1 == 0 and b2 == 0 and b3 == 0 and b4 != 0: return "UTF-32BE" elif b1 == 0 and b2 != 0 and b3 == 0 and b4 != 0: return "UTF-16BE" elif b1 != 0 and b2 == 0 and b3 == 0 and b4 == 0: return "UTF-32LE" elif b1 != 0 and b2 == 0 and b3 != 0 and b4 == 0: return "UTF-16LE" elif b1 != 0 and b2 != 0 and b3 != 0 and b4 != 0: return "UTF-8" else: raise Exceptions.ContentTypeError("Unknown encoding!") elif len(json_bytes) > 2: b1, b2 = json_bytes[0], json_bytes[1] if b1 == 0 and b2 == 0: return "UTF-32BE" elif b1 == 0 and b2 != 0: return "UTF-16BE" elif b1 != 0 and b2 == 0: raise Exceptions.ContentTypeError("Json string too short to definitively infer encoding.") elif b1 != 0 and b2 != 0: return "UTF-8" else: raise Exceptions.ContentTypeError("Unknown encoding!") raise Exceptions.ContentTypeError("Input string too short to guess encoding!")
Given the fact that the first 2 characters in json are guaranteed to be ASCII, we can use these to determine the encoding. See: http://tools.ietf.org/html/rfc4627#section-3 Copied here: Since the first two characters of a JSON text will always be ASCII characters [RFC0020], it is possible to determine whether an octet stream is UTF-8, UTF-16 (BE or LE), or UTF-32 (BE or LE) by looking at the pattern of nulls in the first four octets. 00 00 00 xx UTF-32BE 00 xx 00 xx UTF-16BE xx 00 00 00 UTF-32LE xx 00 xx 00 UTF-16LE xx xx xx xx UTF-8
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/utility.py#L28-L77
null
import bs4 from . import Exceptions def as_soup(in_str): # I already pre-decode the content, and lxml barfs horribly when fed # content with a charset specified as iso-8859-1. # See https://bugs.launchpad.net/beautifulsoup/+bug/972466 and # https://bugs.launchpad.net/lxml/+bug/963936 if isinstance(in_str, bytes): if b'charset=iso-8859-1' in in_str: in_str = in_str.replace(b"charset=iso-8859-1", b"charset=UTF-8") if b'charset=ISO-8859-1' in in_str: in_str = in_str.replace(b"charset=ISO-8859-1", b"charset=UTF-8") elif isinstance(in_str, str): if 'charset=iso-8859-1' in in_str: in_str = in_str.replace("charset=iso-8859-1", "charset=UTF-8") if 'charset=ISO-8859-1' in in_str: in_str = in_str.replace("charset=ISO-8859-1", "charset=UTF-8") else: raise Exceptions.ContentTypeError("as_soup call can only accept either bytes or string. Passed type %s" % type(in_str), None) return bs4.BeautifulSoup(in_str, "lxml")
fake-name/WebRequest
WebRequest/Captcha/PunchPort.py
_is_private_ip
python
def _is_private_ip(ip): networks = [ "0.0.0.0/8", "10.0.0.0/8", "100.64.0.0/10", "127.0.0.0/8", "169.254.0.0/16", "172.16.0.0/12", "192.0.0.0/24", "192.0.2.0/24", "192.88.99.0/24", "192.168.0.0/16", "198.18.0.0/15", "198.51.100.0/24", "203.0.113.0/24", "240.0.0.0/4", "255.255.255.255/32", "224.0.0.0/4", ] for network in networks: try: ipaddr = struct.unpack(">I", socket.inet_aton(ip))[0] netaddr, bits = network.split("/") network_low = struct.unpack(">I", socket.inet_aton(netaddr))[0] network_high = network_low | 1 << (32 - int(bits)) - 1 if ipaddr <= network_high and ipaddr >= network_low: return True except Exception: continue return False
Taken from https://stackoverflow.com/a/39656628/268006 Check if the IP belongs to private network blocks. @param ip: IP address to verify. @return: boolean representing whether the IP belongs or not to a private network block.
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/Captcha/PunchPort.py#L9-L51
null
import logging import struct import socket import upnpclient import WebRequest.Exceptions as exc class UpnpHolePunch(object): def __init__(self): self.log = logging.getLogger("Main.WebRequest.Captcha.UPnP-Manager") self.gateway_device = None self.local_ip = self._get_local_ip() if not self.local_ip: raise exc.CouldNotDetermineLocalIp("Could not determine the local IP. Are you connected to the internet?") self.is_public = not _is_private_ip(self.local_ip) if self.is_public: self.log.info("You seem to have a public IP address. No need to forward a port via UPnP") else: self.log.info("Your local IP is %s, which appears to be private. Looking for a UPnP Gateway.", self.local_ip) self._init_upnp() def _init_upnp(self): devices = upnpclient.discover() self.log.info("Found %s UPnP devices on LAN", len(devices)) for device in devices: try: _ = device.WANIPConn1 self.gateway_device = device self.log.info("Found gateway device: %s", device) except Exception: pass if not self.gateway_device: raise exc.CouldNotFindUpnpGateway("No UPnP Gateway found. Found %s UPnP devices on LAN" % len(devices)) self.log.info("Resolved WAN address: %s", self.get_wan_ip()) def _get_local_ip(self): local_ip = None dummy_hostname, dummy_aliaslist, ipaddrlist = socket.gethostbyname_ex(socket.gethostname()) for ip in ipaddrlist: if not ip.startswith("127."): local_ip = ip if not local_ip: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 53)) local_ip, dummy_port = s.getsockname() s.close() return local_ip def get_wan_ip(self): ret = self.gateway_device.WANIPConn1.GetExternalIPAddress() if not "NewExternalIPAddress" in ret: raise exc.CouldNotDetermineWanIp("No wan IP address found on gateway. What?") return ret["NewExternalIPAddress"] def open_port(self, remote_addresses, remote_port, local_port, duration=None): # Idiot check if not self.gateway_device: raise exc.CouldNotFindUpnpGateway("No UPnP Gateway found.") if duration is None: duration = 60 * 15 # So upnp doesn't actually filter by remote address (except for which interface # to bind to, I think). Therefore, if we pass it multiple addresses on the same # interface, you get rule conflicts. # A horrible hack to fix this is to just add the first mapping, which will # work in any context where there is only one WAN interface. remote_addresses = [remote_addresses[0]] for remote_address in remote_addresses: self.log.info("Forwarding from remote %s:%s to local %s:%s. Lease will expire in %s seconds.", remote_address, remote_port, self.local_ip, local_port, duration) self.gateway_device.WANIPConn1.AddPortMapping( NewRemoteHost = remote_address, NewExternalPort = remote_port, NewProtocol = 'TCP', NewInternalPort = local_port, NewInternalClient = self.local_ip, NewEnabled = '1', NewPortMappingDescription = 'WebRequest CaptchaSolver Hole Punching {}.'.format(local_port), NewLeaseDuration = duration ) def close_port(self, remote_addresses, remote_port): # Idiot check if not self.gateway_device: raise exc.CouldNotFindUpnpGateway("No UPnP Gateway found.") # So upnp doesn't actually filter by remote address (except for which interface # to bind to, I think). Therefore, if we pass it multiple addresses on the same # interface, you get rule conflicts. # A horrible hack to fix this is to just add the first mapping, which will # work in any context where there is only one WAN interface. remote_addresses = [remote_addresses[0]] for remote_address in remote_addresses: self.log.info("Closing forwarded port from remote %s:%s.", remote_address, remote_port) self.gateway_device.WANIPConn1.DeletePortMapping( NewRemoteHost = remote_address, NewExternalPort = remote_port, NewProtocol = 'TCP', ) def test(): logging.basicConfig(level=logging.INFO) puncher = UpnpHolePunch() print("Puncher:", puncher) print("Wan IP:", puncher.get_wan_ip()) if __name__ == '__main__': test()
fake-name/WebRequest
WebRequest/iri2uri.py
iri2uri
python
def iri2uri(uri): assert uri != None, 'iri2uri must be passed a non-none string!' original = uri if isinstance(uri ,str): (scheme, authority, path, query, fragment) = urllib.parse.urlsplit(uri) authority = authority.encode('idna').decode('utf-8') # For each character in 'ucschar' or 'iprivate' # 1. encode as utf-8 # 2. then %-encode each octet of that utf-8 # path = urllib.parse.quote(path) uri = urllib.parse.urlunsplit((scheme, authority, path, query, fragment)) uri = "".join([encode(c) for c in uri]) # urllib.parse.urlunsplit(urllib.parse.urlsplit({something}) # strips any trailing "?" chars. While this may be legal according to the # spec, it breaks some services. Therefore, we patch # the "?" back in if it has been removed. if original.endswith("?") and not uri.endswith("?"): uri = uri+"?" return uri
Convert an IRI to a URI. Note that IRIs must be passed in a unicode strings. That is, do not utf-8 encode the IRI before passing it into the function.
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/iri2uri.py#L51-L75
null
import urllib.parse # Convert an IRI to a URI following the rules in RFC 3987 # # The characters we need to enocde and escape are defined in the spec: # # iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD # ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF # / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD # / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD # / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD # / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD # / %xD0000-DFFFD / %xE1000-EFFFD escape_range = [ (0xA0, 0xD7FF), (0xE000, 0xF8FF), (0xF900, 0xFDCF), (0xFDF0, 0xFFEF), (0x10000, 0x1FFFD), (0x20000, 0x2FFFD), (0x30000, 0x3FFFD), (0x40000, 0x4FFFD), (0x50000, 0x5FFFD), (0x60000, 0x6FFFD), (0x70000, 0x7FFFD), (0x80000, 0x8FFFD), (0x90000, 0x9FFFD), (0xA0000, 0xAFFFD), (0xB0000, 0xBFFFD), (0xC0000, 0xCFFFD), (0xD0000, 0xDFFFD), (0xE1000, 0xEFFFD), (0xF0000, 0xFFFFD), (0x100000, 0x10FFFD), ] def encode(c): retval = c i = ord(c) for low, high in escape_range: if i < low: break if i >= low and i <= high: retval = "".join(["%%%2X" % o for o in c.encode('utf-8')]) break return retval
fake-name/WebRequest
WebRequest/Captcha/AntiCaptchaSolver.py
AntiCaptchaSolver.solve_simple_captcha
python
def solve_simple_captcha(self, pathfile=None, filedata=None, filename=None): if pathfile and os.path.exists(pathfile): fp = open(pathfile, 'rb') elif filedata: fp = io.BytesIO(filedata) else: raise ValueError("You must pass either a valid file path, or a bytes array containing the captcha image!") try: task = python_anticaptcha.ImageToTextTask(fp) job = self.client.createTask(task) job.join(maximum_time = self.waittime) return job.get_captcha_text() except python_anticaptcha.AnticaptchaException as e: raise exc.CaptchaSolverFailure("Failure solving captcha: %s, %s, %s" % ( e.error_id, e.error_code, e.error_description, ))
Upload a image (from disk or a bytearray), and then block until the captcha has been solved. Return value is the captcha result. either pathfile OR filedata should be specified. Filename is ignored (and is only kept for compatibility with the 2captcha solver interface) Failure will result in a subclass of WebRequest.CaptchaSolverFailure being thrown.
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/Captcha/AntiCaptchaSolver.py#L45-L78
null
class AntiCaptchaSolver(object): def __init__(self, api_key, wg): self.log = logging.getLogger("Main.WebRequest.Captcha.AntiCaptcha") self.wg = wg self.client = python_anticaptcha.AnticaptchaClient(api_key) # Default timeout is 5 minutes. self.waittime = 60 * 5 def getbalance(self): """ Get you account balance. Returns value: balance (float), or raises an exception. """ return self.client.getBalance() def solve_recaptcha(self, google_key, page_url, timeout = 15 * 60): ''' Solve a recaptcha on page `page_url` with the input value `google_key`. Timeout is `timeout` seconds, defaulting to 60 seconds. Return value is either the `g-recaptcha-response` value, or an exceptionj is raised (generally `CaptchaSolverFailure`) ''' proxy = SocksProxy.ProxyLauncher(ANTICAPTCHA_IPS) try: antiprox = python_anticaptcha.Proxy( proxy_type = "socks5", proxy_address = proxy.get_wan_ip(), proxy_port = proxy.get_wan_port(), proxy_login = None, proxy_password = None, ) task = python_anticaptcha.NoCaptchaTask( website_url = page_url, website_key = google_key, proxy = antiprox, user_agent = dict(self.wg.browserHeaders).get('User-Agent') ) job = self.client.createTask(task) job.join(maximum_time = timeout) return job.get_solution_response() except python_anticaptcha.AnticaptchaException as e: raise exc.CaptchaSolverFailure("Failure solving captcha: %s, %s, %s" % ( e.error_id, e.error_code, e.error_description, )) finally: proxy.stop()
fake-name/WebRequest
WebRequest/Captcha/AntiCaptchaSolver.py
AntiCaptchaSolver.solve_recaptcha
python
def solve_recaptcha(self, google_key, page_url, timeout = 15 * 60): ''' Solve a recaptcha on page `page_url` with the input value `google_key`. Timeout is `timeout` seconds, defaulting to 60 seconds. Return value is either the `g-recaptcha-response` value, or an exceptionj is raised (generally `CaptchaSolverFailure`) ''' proxy = SocksProxy.ProxyLauncher(ANTICAPTCHA_IPS) try: antiprox = python_anticaptcha.Proxy( proxy_type = "socks5", proxy_address = proxy.get_wan_ip(), proxy_port = proxy.get_wan_port(), proxy_login = None, proxy_password = None, ) task = python_anticaptcha.NoCaptchaTask( website_url = page_url, website_key = google_key, proxy = antiprox, user_agent = dict(self.wg.browserHeaders).get('User-Agent') ) job = self.client.createTask(task) job.join(maximum_time = timeout) return job.get_solution_response() except python_anticaptcha.AnticaptchaException as e: raise exc.CaptchaSolverFailure("Failure solving captcha: %s, %s, %s" % ( e.error_id, e.error_code, e.error_description, )) finally: proxy.stop()
Solve a recaptcha on page `page_url` with the input value `google_key`. Timeout is `timeout` seconds, defaulting to 60 seconds. Return value is either the `g-recaptcha-response` value, or an exceptionj is raised (generally `CaptchaSolverFailure`)
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/Captcha/AntiCaptchaSolver.py#L81-L119
[ "def stop(self):\n\tself.log.info(\"Telling async event-loop to exit\")\n\tself.loop.call_soon_threadsafe(self.loop.stop)\n\tself.log.info(\"Joining asyncio loop thread.\")\n\tself.proxy_process.join()\n\n\t# Close the port\n\tself._close_local_port(self.listen_port, self.remote_ips)\n", "def get_wan_ip(self):\n\...
class AntiCaptchaSolver(object): def __init__(self, api_key, wg): self.log = logging.getLogger("Main.WebRequest.Captcha.AntiCaptcha") self.wg = wg self.client = python_anticaptcha.AnticaptchaClient(api_key) # Default timeout is 5 minutes. self.waittime = 60 * 5 def getbalance(self): """ Get you account balance. Returns value: balance (float), or raises an exception. """ return self.client.getBalance() def solve_simple_captcha(self, pathfile=None, filedata=None, filename=None): """ Upload a image (from disk or a bytearray), and then block until the captcha has been solved. Return value is the captcha result. either pathfile OR filedata should be specified. Filename is ignored (and is only kept for compatibility with the 2captcha solver interface) Failure will result in a subclass of WebRequest.CaptchaSolverFailure being thrown. """ if pathfile and os.path.exists(pathfile): fp = open(pathfile, 'rb') elif filedata: fp = io.BytesIO(filedata) else: raise ValueError("You must pass either a valid file path, or a bytes array containing the captcha image!") try: task = python_anticaptcha.ImageToTextTask(fp) job = self.client.createTask(task) job.join(maximum_time = self.waittime) return job.get_captcha_text() except python_anticaptcha.AnticaptchaException as e: raise exc.CaptchaSolverFailure("Failure solving captcha: %s, %s, %s" % ( e.error_id, e.error_code, e.error_description, ))
fake-name/WebRequest
WebRequest/UA_Constants.py
getUserAgent
python
def getUserAgent(): ''' Generate a randomized user agent by permuting a large set of possible values. The returned user agent should look like a valid, in-use brower, with a specified preferred language of english. Return value is a list of tuples, where each tuple is one of the user-agent headers. Currently can provide approximately 147 * 17 * 5 * 5 * 2 * 3 * 2 values, or ~749K possible unique user-agents. ''' coding = random.choice(ENCODINGS) random.shuffle(coding) coding = random.choice((", ", ",")).join(coding) accept_list = [tmp for tmp in random.choice(ACCEPT)] accept_list.append(random.choice(ACCEPT_POSTFIX)) accept_str = random.choice((", ", ",")).join(accept_list) assert accept_str.count("*.*") <= 1 user_agent = [ ('User-Agent' , random.choice(USER_AGENTS)), ('Accept-Language' , random.choice(ACCEPT_LANGUAGE)), ('Accept' , accept_str), ('Accept-Encoding' , coding) ] return user_agent
Generate a randomized user agent by permuting a large set of possible values. The returned user agent should look like a valid, in-use brower, with a specified preferred language of english. Return value is a list of tuples, where each tuple is one of the user-agent headers. Currently can provide approximately 147 * 17 * 5 * 5 * 2 * 3 * 2 values, or ~749K possible unique user-agents.
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/UA_Constants.py#L137-L164
null
import random random.seed() # Due to general internet people douchebaggyness, I've basically said to hell with it and decided to spoof a whole assortment of browsers # It should keep people from blocking this scraper *too* easily # This file generates a random browser user-agent, It should have an extremely large set of possible UA structures. USER_AGENTS = [ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 Safari/605.1.15", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:60.0) Gecko/20100101 Firefox/60.0", "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0", "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36", "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:59.0) Gecko/20100101 Firefox/59.0", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:59.0) Gecko/20100101 Firefox/59.0", "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:60.0) Gecko/20100101 Firefox/60.0", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.170 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:60.0) Gecko/20100101 Firefox/60.0", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 Safari/605.1.15", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36", "Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0", "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36", "Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/604.5.6 (KHTML, like Gecko) Version/11.0.3 Safari/604.5.6", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.117 Safari/537.36", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:59.0) Gecko/20100101 Firefox/59.0", "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.117 Safari/537.36", "Mozilla/5.0 (X11; Linux x86_64; rv:59.0) Gecko/20100101 Firefox/59.0", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134", "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36", "Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.170 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36 OPR/52.0.2871.99", "Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 Safari/605.1.15", "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko", "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; Trident/5.0)", "Mozilla/5.0 (iPad; CPU OS 11_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.0 Mobile/15E148 Safari/604.1", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.170 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:52.0) Gecko/20100101 Firefox/52.0", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/65.0.3325.181 Chrome/65.0.3325.181 Safari/537.36", "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; Trident/5.0)", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.117 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.170 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36 OPR/52.0.2871.99", "Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:60.0) Gecko/20100101 Firefox/60.0", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36 OPR/52.0.2871.64", "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko", "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:59.0) Gecko/20100101 Firefox/59.0", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36 Edge/15.15063", "Mozilla/5.0 (Windows NT 6.1; rv:59.0) Gecko/20100101 Firefox/59.0", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:60.0) Gecko/20100101 Firefox/60.0", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 YaBrowser/18.3.1.1232 Yowser/2.5 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:59.0) Gecko/20100101 Firefox/59.0", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/66.0.3359.139 Chrome/66.0.3359.139 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:61.0) Gecko/20100101 Firefox/61.0", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/604.4.7 (KHTML, like Gecko) Version/11.0.2 Safari/604.4.7", "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:56.0) Gecko/20100101 Firefox/56.0", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36", ] ACCEPT_LANGUAGE =[ "en-gb,en-us;q=0.7,de-ch;q=0.3", "en-GB,en-US;q=0.8,en;q=0.6", "en-GB,en-US;q=0.8,en;q=0.6", "en-US", "en-us, en;q=1.0,fr-ca, fr;q=0.5,pt-br, pt;q=0.5,es;q=0.5", "en-US,de-DE;q=0.5", "en-us,en;q=0.5", "en-US,en;q=0.8", "en-US,en;q=0.8,en-GB;q=0.6,fr-CA;q=0.4,fr;q=0.2", "en-US,en;q=0.8,es-419;q=0.6", "en-us,en;q=0.8,es;q=0.5,es-mx;q=0.3", "en-US,en;q=0.8,es;q=0.6", "en-US,en;q=0.8,pl;q=0.6", "en-US,en;q=0.8,pl;q=0.6", "en-US,en;q=0.9", "en-US,en;q=0.9,fr;q=0.8,de;q=0.7,id;q=0.6", "en-US,en;q=0.9,ja;q=0.8,fr;q=0.7,de;q=0.6,es;q=0.5,it;q=0.4,nl;q=0.3,sv;q=0.2,nb;q=0.1", ] ACCEPT = [ ["text/html","application/xhtml+xml","application/xml;q=0.9"], ["application/xml","application/xhtml+xml","text/html;q=0.9"," text/plain;q=0.8","image/png"], ["text/html","application/xhtml+xml","application/xml;q=0.9"], # ["image/jpeg","application/x-ms-application","image/gif","application/xaml+xml","image/pjpeg","application/x-ms-xbap","application/x-shockwave-flash","application/msword"], ["text/html","application/xml;q=0.9","application/xhtml+xml","image/png","image/webp","image/jpeg","image/gif","image/x-xbitmap"] ] ACCEPT_POSTFIX = ["*/*;q=0.8", "*/*;q=0.5", "*/*;q=0.8", "*/*", "*/*;q=0.1"] ENCODINGS = [['gzip'], ['gzip', 'deflate'], ['gzip', 'deflate', 'sdch']] if __name__ == '__main__': import pprint ua = getUserAgent() pprint.pprint(ua) # This file based heavily on the UA List, Copyright (c) 2014, Harald Hope # This list was released under the BSD 2 clause. # Home page: techpatterns.com/forums/about304.html # Special thanks to the following: # User-Agent Switcher: www.chrispederick.com/work/user-agent-switcher # Firefox history: www.zytrax.com/tech/web/firefox-history.html # Mobile data: wikipedia.org/wiki/List_of_user_agents_for_mobile_phones # Mobile data: www.zytrax.com/tech/web/mobile_ids.html # Current User-Agents: http://myip.ms/browse/comp_browsers # User-agent data: www.zytrax.com/tech/web/browser_ids.htm # User-agent strings: www.useragentstring.com # User-agent strings: www.webapps-online.com/online-tools/user-agent-strings/dv/ # License: BSD 2 Clause # All rights reserved. Redistribution and use in source and binary forms, # with or without modification, are permitted provided that the following # conditions are met: # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, this # list of conditions and the following disclaimer in the documentation and/or other # materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER # IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE.
fake-name/WebRequest
WebRequest/ChromiumMixin.py
WebGetCrMixin.stepThroughJsWaf_bare_chromium
python
def stepThroughJsWaf_bare_chromium(self, url, titleContains='', titleNotContains='', extra_tid=None): ''' Use Chromium to access a resource behind WAF protection. Params: ``url`` - The URL to access that is protected by WAF ``titleContains`` - A string that is in the title of the protected page, and NOT the WAF intermediate page. The presence of this string in the page title is used to determine whether the WAF protection has been successfully penetrated. ``titleContains`` - A string that is in the title of the WAF intermediate page and NOT in the target page. The presence of this string in the page title is used to determine whether the WAF protection has been successfully penetrated. The current WebGetRobust headers are installed into the selenium browser, which is then used to access the protected resource. Once the protected page has properly loaded, the WAF access cookie is then extracted from the selenium browser, and installed back into the WebGetRobust instance, so it can continue to use the WAF auth in normal requests. ''' if (not titleContains) and (not titleNotContains): raise ValueError("You must pass either a string the title should contain, or a string the title shouldn't contain!") if titleContains and titleNotContains: raise ValueError("You can only pass a single conditional statement!") self.log.info("Attempting to access page through WAF browser verification.") current_title = None if extra_tid is True: extra_tid = threading.get_ident() with self._chrome_context(url, extra_tid=extra_tid) as cr: self._syncIntoChromium(cr) cr.blocking_navigate(url) for _ in range(self.wrapper_step_through_timeout): time.sleep(1) current_title, _ = cr.get_page_url_title() if titleContains and titleContains in current_title: self._syncOutOfChromium(cr) return True if titleNotContains and current_title and titleNotContains not in current_title: self._syncOutOfChromium(cr) return True self._syncOutOfChromium(cr) self.log.error("Failed to step through. Current title: '%s'", current_title) return False
Use Chromium to access a resource behind WAF protection. Params: ``url`` - The URL to access that is protected by WAF ``titleContains`` - A string that is in the title of the protected page, and NOT the WAF intermediate page. The presence of this string in the page title is used to determine whether the WAF protection has been successfully penetrated. ``titleContains`` - A string that is in the title of the WAF intermediate page and NOT in the target page. The presence of this string in the page title is used to determine whether the WAF protection has been successfully penetrated. The current WebGetRobust headers are installed into the selenium browser, which is then used to access the protected resource. Once the protected page has properly loaded, the WAF access cookie is then extracted from the selenium browser, and installed back into the WebGetRobust instance, so it can continue to use the WAF auth in normal requests.
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/ChromiumMixin.py#L237-L292
[ "def _chrome_context(self, itemUrl, extra_tid):\n\tif self.borg_chrome_pool and self.borg_chrome_pool is True:\n\t\tself.log.info(\"Initializing chromium pool on first use!\")\n\t\tself.borg_chrome_pool = ChromiumBorg(chrome_binary=self._cr_binary)\n\n\tif self.borg_chrome_pool:\n\t\tassert itemUrl is not None, \"Y...
class WebGetCrMixin(object): # creds is a list of 3-tuples that gets inserted into the password manager. # it is structured [(top_level_url1, username1, password1), (top_level_url2, username2, password2)] def __init__(self, use_global_tab_pool=True, *args, **kwargs): if "chromium-binary" in kwargs: self._cr_binary = kwargs.pop("chrome-binary") else: self._cr_binary = "google-chrome" super().__init__(*args, **kwargs) self.navigate_timeout_secs = 10 self.wrapper_step_through_timeout = 20 if use_global_tab_pool: self.borg_chrome_pool = True else: self.borg_chrome_pool = None def _chrome_context(self, itemUrl, extra_tid): if self.borg_chrome_pool and self.borg_chrome_pool is True: self.log.info("Initializing chromium pool on first use!") self.borg_chrome_pool = ChromiumBorg(chrome_binary=self._cr_binary) if self.borg_chrome_pool: assert itemUrl is not None, "You need to pass a URL to the contextmanager, so it can dispatch to the correct tab!" return self.borg_chrome_pool.get().tab(url=itemUrl, extra_id=extra_tid) else: return ChromeController.ChromeContext(binary=self._cr_binary) def _syncIntoChromium(self, cr): cr.clear_cookies() # Headers are a list of 2-tuples. We need a dict hdict = dict(self.browserHeaders) cr.update_headers(hdict) for cookie in self.cj: # Something, somewhere is setting cookies without a value, # and that confuses chromium a LOT. Anways, just don't forward # those particular cookies. if cookie and cookie.value: cr.set_cookie(cookie) def _syncOutOfChromium(self, cr): for cookie in cr.get_cookies(): self.cj.set_cookie(cookie) def getItemChromium(self, itemUrl, extra_tid=False): self.log.info("Fetching page for URL: '%s' with Chromium" % itemUrl) if extra_tid is True: extra_tid = threading.get_ident() with self._chrome_context(itemUrl, extra_tid=extra_tid) as cr: self._syncIntoChromium(cr) response = cr.blocking_navigate_and_get_source(itemUrl, timeout=self.navigate_timeout_secs) raw_url = cr.get_current_url() fileN = urllib.parse.unquote(urllib.parse.urlparse(raw_url)[2].split("/")[-1]) fileN = bs4.UnicodeDammit(fileN).unicode_markup self._syncOutOfChromium(cr) # Probably a bad assumption if response['binary']: mType = "application/x-binary" else: mType = "text/html" # Use the new interface that returns the actual type if 'mimetype' in response: mType = response['mimetype'] # So, self._cr.page_source appears to be the *compressed* page source as-rendered. Because reasons. content = response['content'] if isinstance(content, bytes): self._check_waf(content, itemUrl) elif isinstance(content, str): self._check_waf(content.encode("UTF-8"), itemUrl) else: self.log.error("Unknown type of content return: %s" % (type(content), )) return content, fileN, mType def getHeadTitleChromium(self, url, referrer=None, extra_tid=False, title_timeout=None): ''' ''' self.log.info("Getting HEAD with Chromium") if extra_tid is True: extra_tid = threading.get_ident() with self._chrome_context(url, extra_tid=extra_tid) as cr: self._syncIntoChromium(cr) if referrer: cr.blocking_navigate(referrer) time.sleep(random.uniform(2, 6)) cr.blocking_navigate(url) title, cur_url = cr.get_page_url_title() if title_timeout: for _ in range(title_timeout): # Wait until the page sets a title. This generally indicates that # the page is fully rendered, which for some reason seems to not # always be true after blocking_navigate, despite the fact that # that call shouldn't return until DOMContentLoaded has fired if cur_url not in title: break time.sleep(1) title, cur_url = cr.get_page_url_title() self._syncOutOfChromium(cr) self.log.info("Resolved URL for %s -> %s", url, cur_url) ret = { 'url': cur_url, 'title': title, } return ret def getHeadChromium(self, url, referrer=None, extra_tid=None): self.log.info("Getting HEAD with Chromium") if not referrer: referrer = url if extra_tid is True: extra_tid = threading.get_ident() with self._chrome_context(url, extra_tid=extra_tid) as cr: self._syncIntoChromium(cr) cr.blocking_navigate(referrer) time.sleep(random.uniform(2, 6)) cr.blocking_navigate(url) dummy_title, cur_url = cr.get_page_url_title() self._syncOutOfChromium(cr) return cur_url def chromiumGetRenderedItem(self, url, extra_tid=None, title_timeout=None): if extra_tid is True: extra_tid = threading.get_ident() with self._chrome_context(url, extra_tid=extra_tid) as cr: self._syncIntoChromium(cr) # get_rendered_page_source cr.blocking_navigate(url) title, cur_url = cr.get_page_url_title() if title_timeout: for _ in range(title_timeout): # Wait until the page sets a title. This generally indicates that # the page is fully rendered, which for some reason seems to not # always be true after blocking_navigate, despite the fact that # that call shouldn't return until DOMContentLoaded has fired if cur_url not in title: break time.sleep(1) title, cur_url = cr.get_page_url_title() content = cr.get_rendered_page_source() mType = 'text/html' fileN = '' self._syncOutOfChromium(cr) self._check_waf(content.encode("UTF-8"), url) return content, fileN, mType def __del__(self): # print("ChromiumMixin destructor") sup = super() if hasattr(sup, '__del__'): sup.__del__() def chromiumContext(self, url, extra_tid=None): ''' Return a active chromium context, useable for manual operations directly against chromium. The WebRequest user agent and other context is synchronized into the chromium instance at startup, and changes are flushed back to the webrequest instance from chromium at completion. ''' assert url is not None, "You need to pass a URL to the contextmanager, so it can dispatch to the correct tab!" if extra_tid is True: extra_tid = threading.get_ident() return self._chrome_context(url, extra_tid=extra_tid)
fake-name/WebRequest
WebRequest/ChromiumMixin.py
WebGetCrMixin.chromiumContext
python
def chromiumContext(self, url, extra_tid=None): ''' Return a active chromium context, useable for manual operations directly against chromium. The WebRequest user agent and other context is synchronized into the chromium instance at startup, and changes are flushed back to the webrequest instance from chromium at completion. ''' assert url is not None, "You need to pass a URL to the contextmanager, so it can dispatch to the correct tab!" if extra_tid is True: extra_tid = threading.get_ident() return self._chrome_context(url, extra_tid=extra_tid)
Return a active chromium context, useable for manual operations directly against chromium. The WebRequest user agent and other context is synchronized into the chromium instance at startup, and changes are flushed back to the webrequest instance from chromium at completion.
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/ChromiumMixin.py#L295-L310
[ "def _chrome_context(self, itemUrl, extra_tid):\n\tif self.borg_chrome_pool and self.borg_chrome_pool is True:\n\t\tself.log.info(\"Initializing chromium pool on first use!\")\n\t\tself.borg_chrome_pool = ChromiumBorg(chrome_binary=self._cr_binary)\n\n\tif self.borg_chrome_pool:\n\t\tassert itemUrl is not None, \"Y...
class WebGetCrMixin(object): # creds is a list of 3-tuples that gets inserted into the password manager. # it is structured [(top_level_url1, username1, password1), (top_level_url2, username2, password2)] def __init__(self, use_global_tab_pool=True, *args, **kwargs): if "chromium-binary" in kwargs: self._cr_binary = kwargs.pop("chrome-binary") else: self._cr_binary = "google-chrome" super().__init__(*args, **kwargs) self.navigate_timeout_secs = 10 self.wrapper_step_through_timeout = 20 if use_global_tab_pool: self.borg_chrome_pool = True else: self.borg_chrome_pool = None def _chrome_context(self, itemUrl, extra_tid): if self.borg_chrome_pool and self.borg_chrome_pool is True: self.log.info("Initializing chromium pool on first use!") self.borg_chrome_pool = ChromiumBorg(chrome_binary=self._cr_binary) if self.borg_chrome_pool: assert itemUrl is not None, "You need to pass a URL to the contextmanager, so it can dispatch to the correct tab!" return self.borg_chrome_pool.get().tab(url=itemUrl, extra_id=extra_tid) else: return ChromeController.ChromeContext(binary=self._cr_binary) def _syncIntoChromium(self, cr): cr.clear_cookies() # Headers are a list of 2-tuples. We need a dict hdict = dict(self.browserHeaders) cr.update_headers(hdict) for cookie in self.cj: # Something, somewhere is setting cookies without a value, # and that confuses chromium a LOT. Anways, just don't forward # those particular cookies. if cookie and cookie.value: cr.set_cookie(cookie) def _syncOutOfChromium(self, cr): for cookie in cr.get_cookies(): self.cj.set_cookie(cookie) def getItemChromium(self, itemUrl, extra_tid=False): self.log.info("Fetching page for URL: '%s' with Chromium" % itemUrl) if extra_tid is True: extra_tid = threading.get_ident() with self._chrome_context(itemUrl, extra_tid=extra_tid) as cr: self._syncIntoChromium(cr) response = cr.blocking_navigate_and_get_source(itemUrl, timeout=self.navigate_timeout_secs) raw_url = cr.get_current_url() fileN = urllib.parse.unquote(urllib.parse.urlparse(raw_url)[2].split("/")[-1]) fileN = bs4.UnicodeDammit(fileN).unicode_markup self._syncOutOfChromium(cr) # Probably a bad assumption if response['binary']: mType = "application/x-binary" else: mType = "text/html" # Use the new interface that returns the actual type if 'mimetype' in response: mType = response['mimetype'] # So, self._cr.page_source appears to be the *compressed* page source as-rendered. Because reasons. content = response['content'] if isinstance(content, bytes): self._check_waf(content, itemUrl) elif isinstance(content, str): self._check_waf(content.encode("UTF-8"), itemUrl) else: self.log.error("Unknown type of content return: %s" % (type(content), )) return content, fileN, mType def getHeadTitleChromium(self, url, referrer=None, extra_tid=False, title_timeout=None): ''' ''' self.log.info("Getting HEAD with Chromium") if extra_tid is True: extra_tid = threading.get_ident() with self._chrome_context(url, extra_tid=extra_tid) as cr: self._syncIntoChromium(cr) if referrer: cr.blocking_navigate(referrer) time.sleep(random.uniform(2, 6)) cr.blocking_navigate(url) title, cur_url = cr.get_page_url_title() if title_timeout: for _ in range(title_timeout): # Wait until the page sets a title. This generally indicates that # the page is fully rendered, which for some reason seems to not # always be true after blocking_navigate, despite the fact that # that call shouldn't return until DOMContentLoaded has fired if cur_url not in title: break time.sleep(1) title, cur_url = cr.get_page_url_title() self._syncOutOfChromium(cr) self.log.info("Resolved URL for %s -> %s", url, cur_url) ret = { 'url': cur_url, 'title': title, } return ret def getHeadChromium(self, url, referrer=None, extra_tid=None): self.log.info("Getting HEAD with Chromium") if not referrer: referrer = url if extra_tid is True: extra_tid = threading.get_ident() with self._chrome_context(url, extra_tid=extra_tid) as cr: self._syncIntoChromium(cr) cr.blocking_navigate(referrer) time.sleep(random.uniform(2, 6)) cr.blocking_navigate(url) dummy_title, cur_url = cr.get_page_url_title() self._syncOutOfChromium(cr) return cur_url def chromiumGetRenderedItem(self, url, extra_tid=None, title_timeout=None): if extra_tid is True: extra_tid = threading.get_ident() with self._chrome_context(url, extra_tid=extra_tid) as cr: self._syncIntoChromium(cr) # get_rendered_page_source cr.blocking_navigate(url) title, cur_url = cr.get_page_url_title() if title_timeout: for _ in range(title_timeout): # Wait until the page sets a title. This generally indicates that # the page is fully rendered, which for some reason seems to not # always be true after blocking_navigate, despite the fact that # that call shouldn't return until DOMContentLoaded has fired if cur_url not in title: break time.sleep(1) title, cur_url = cr.get_page_url_title() content = cr.get_rendered_page_source() mType = 'text/html' fileN = '' self._syncOutOfChromium(cr) self._check_waf(content.encode("UTF-8"), url) return content, fileN, mType def __del__(self): # print("ChromiumMixin destructor") sup = super() if hasattr(sup, '__del__'): sup.__del__() def stepThroughJsWaf_bare_chromium(self, url, titleContains='', titleNotContains='', extra_tid=None): ''' Use Chromium to access a resource behind WAF protection. Params: ``url`` - The URL to access that is protected by WAF ``titleContains`` - A string that is in the title of the protected page, and NOT the WAF intermediate page. The presence of this string in the page title is used to determine whether the WAF protection has been successfully penetrated. ``titleContains`` - A string that is in the title of the WAF intermediate page and NOT in the target page. The presence of this string in the page title is used to determine whether the WAF protection has been successfully penetrated. The current WebGetRobust headers are installed into the selenium browser, which is then used to access the protected resource. Once the protected page has properly loaded, the WAF access cookie is then extracted from the selenium browser, and installed back into the WebGetRobust instance, so it can continue to use the WAF auth in normal requests. ''' if (not titleContains) and (not titleNotContains): raise ValueError("You must pass either a string the title should contain, or a string the title shouldn't contain!") if titleContains and titleNotContains: raise ValueError("You can only pass a single conditional statement!") self.log.info("Attempting to access page through WAF browser verification.") current_title = None if extra_tid is True: extra_tid = threading.get_ident() with self._chrome_context(url, extra_tid=extra_tid) as cr: self._syncIntoChromium(cr) cr.blocking_navigate(url) for _ in range(self.wrapper_step_through_timeout): time.sleep(1) current_title, _ = cr.get_page_url_title() if titleContains and titleContains in current_title: self._syncOutOfChromium(cr) return True if titleNotContains and current_title and titleNotContains not in current_title: self._syncOutOfChromium(cr) return True self._syncOutOfChromium(cr) self.log.error("Failed to step through. Current title: '%s'", current_title) return False
fake-name/WebRequest
WebRequest/SeleniumModules/SeleniumChromiumMixin.py
WebGetSeleniumChromiumMixin.stepThroughJsWaf_selenium_chromium
python
def stepThroughJsWaf_selenium_chromium(self, url, titleContains='', titleNotContains=''): ''' Use Selenium+SeleniumChromium to access a resource behind cloudflare protection. Params: ``url`` - The URL to access that is protected by cloudflare ``titleContains`` - A string that is in the title of the protected page, and NOT the cloudflare intermediate page. The presence of this string in the page title is used to determine whether the cloudflare protection has been successfully penetrated. The current WebGetRobust headers are installed into the selenium browser, which is then used to access the protected resource. Once the protected page has properly loaded, the cloudflare access cookie is then extracted from the selenium browser, and installed back into the WebGetRobust instance, so it can continue to use the cloudflare auth in normal requests. ''' if (not titleContains) and (not titleNotContains): raise ValueError("You must pass either a string the title should contain, or a string the title shouldn't contain!") if titleContains and titleNotContains: raise ValueError("You can only pass a single conditional statement!") self.log.info("Attempting to access page through cloudflare browser verification.") if not self.selenium_chromium_driver: self._initSeleniumChromiumWebDriver() self._syncIntoSeleniumChromiumWebDriver() self.selenium_chromium_driver.get(url) if titleContains: condition = EC.title_contains(titleContains) elif titleNotContains: condition = SeleniumCommon.title_not_contains(titleNotContains) else: raise ValueError("Wat?") try: WebDriverWait(self.selenium_chromium_driver, 45).until(condition) success = True self.log.info("Successfully accessed main page!") except TimeoutException: self.log.error("Could not pass through cloudflare blocking!") success = False # Add cookies to cookiejar self._syncOutOfSeleniumChromiumWebDriver() self._syncCookiesFromFile() return success
Use Selenium+SeleniumChromium to access a resource behind cloudflare protection. Params: ``url`` - The URL to access that is protected by cloudflare ``titleContains`` - A string that is in the title of the protected page, and NOT the cloudflare intermediate page. The presence of this string in the page title is used to determine whether the cloudflare protection has been successfully penetrated. The current WebGetRobust headers are installed into the selenium browser, which is then used to access the protected resource. Once the protected page has properly loaded, the cloudflare access cookie is then extracted from the selenium browser, and installed back into the WebGetRobust instance, so it can continue to use the cloudflare auth in normal requests.
train
https://github.com/fake-name/WebRequest/blob/b6c94631ff88b5f81f26a9f99a2d5c706810b11f/WebRequest/SeleniumModules/SeleniumChromiumMixin.py#L156-L212
[ "def _initSeleniumChromiumWebDriver(self):\n\tif self.selenium_chromium_driver:\n\t\tself.selenium_chromium_driver.quit()\n\n\twgSettings = dict(self.browserHeaders)\n\n\tchrome_options = Options()\n\tchrome_options.add_argument(\"--headless\")\n\tchrome_options.add_argument(\"--user-agent={}\".format(wgSettings['U...
class WebGetSeleniumChromiumMixin(object): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.selenium_chromium_driver = None def _initSeleniumChromiumWebDriver(self): if self.selenium_chromium_driver: self.selenium_chromium_driver.quit() wgSettings = dict(self.browserHeaders) chrome_options = Options() chrome_options.add_argument("--headless") chrome_options.add_argument("--user-agent={}".format(wgSettings['User-Agent'])) dcap = dict(DesiredCapabilities.CHROME) # Install the headers from the WebGet class into phantomjs dcap["chrome.page.settings.userAgent"] = wgSettings.pop('User-Agent') for headerName in wgSettings: if headerName != 'Accept-Encoding': dcap['chrome.page.customHeaders.{header}'.format(header=headerName)] = wgSettings[headerName] self.selenium_chromium_driver = selenium.webdriver.Chrome( desired_capabilities = dcap, chrome_options = chrome_options, ) self.selenium_chromium_driver.set_window_size(1280, 1024) def _syncIntoSeleniumChromiumWebDriver(self): ''' So selenium is completely retarded, and you can't just set cookes, you have to be navigated to the domain for which you want to set cookies. This is extra double-plus idiotic, as it means you can't set cookies up before navigating. Sigh. ''' pass # for cookie in self.getCookies(): # print("Cookie: ", cookie) # cookurl = [ # "http" if cookieDict['httponly'] else "https", # scheme 0 URL scheme specifier # cookie.domain, # netloc 1 Network location part # "/", # path 2 Hierarchical path # "", # params 3 Parameters for last path element # "", # query 4 Query component # "", # fragment 5 Fragment identifier # ] # cdat = { # 'name' : cookie.name, # 'value' : cookie.value, # 'domain' : cookie.domain, # 'path' : # 'expiry' : # } # print("CDat: ", cdat) # self.selenium_chromium_driver.add_cookie(cdat) def _syncOutOfSeleniumChromiumWebDriver(self): for cookie in self.selenium_chromium_driver.get_cookies(): self.addSeleniumCookie(cookie) def getItemSeleniumChromium(self, itemUrl): self.log.info("Fetching page for URL: '%s' with SeleniumChromium" % itemUrl) if not self.selenium_chromium_driver: self._initSeleniumChromiumWebDriver() self._syncIntoSeleniumChromiumWebDriver() with SeleniumCommon.load_delay_context_manager(self.selenium_chromium_driver): self.selenium_chromium_driver.get(itemUrl) time.sleep(3) fileN = urllib.parse.unquote(urllib.parse.urlparse(self.selenium_chromium_driver.current_url)[2].split("/")[-1]) fileN = bs4.UnicodeDammit(fileN).unicode_markup self._syncOutOfSeleniumChromiumWebDriver() # Probably a bad assumption mType = "text/html" # So, self.selenium_chromium_driver.page_source appears to be the *compressed* page source as-rendered. Because reasons. source = self.selenium_chromium_driver.execute_script("return document.getElementsByTagName('html')[0].innerHTML") source = "<html>"+source+"</html>" return source, fileN, mType def getHeadTitleSeleniumChromium(self, url, referrer=None): self.getHeadSeleniumChromium(url, referrer) ret = { 'url' : self.selenium_chromium_driver.current_url, 'title' : self.selenium_chromium_driver.title, } return ret def getHeadSeleniumChromium(self, url, referrer=None): self.log.info("Getting HEAD with SeleniumChromium") if not self.selenium_chromium_driver: self._initSeleniumChromiumWebDriver() self._syncIntoSeleniumChromiumWebDriver() def try_get(loc_url): tries = 3 for x in range(9999): try: self.selenium_chromium_driver.get(loc_url) time.sleep(random.uniform(2, 6)) return except socket.timeout as e: if x > tries: raise e if referrer: try_get(referrer) try_get(url) self._syncOutOfSeleniumChromiumWebDriver() return self.selenium_chromium_driver.current_url def __del__(self): # print("SeleniumChromium __del__") if self.selenium_chromium_driver != None: self.selenium_chromium_driver.quit() sup = super() if hasattr(sup, '__del__'): sup.__del__()
astroswego/plotypus
src/plotypus/periodogram.py
Lomb_Scargle
python
def Lomb_Scargle(data, precision, min_period, max_period, period_jobs=1): time, mags, *err = data.T scaled_mags = (mags-mags.mean())/mags.std() minf, maxf = 2*np.pi/max_period, 2*np.pi/min_period freqs = np.arange(minf, maxf, precision) pgram = lombscargle(time, scaled_mags, freqs) return 2*np.pi/freqs[np.argmax(pgram)]
Returns the period of *data* according to the `Lomb-Scargle periodogram <https://en.wikipedia.org/wiki/Least-squares_spectral_analysis#The_Lomb.E2.80.93Scargle_periodogram>`_. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Array containing columns *time*, *mag*, and (optional) *error*. precision : number Distance between contiguous frequencies in search-space. min_period : number Minimum period in search-space. max_period : number Maximum period in search-space. period_jobs : int, optional Number of simultaneous processes to use while searching. Only one process will ever be used, but argument is included to conform to *periodogram* standards of :func:`find_period` (default 1). **Returns** period : number The period of *data*.
train
https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/periodogram.py#L20-L51
null
""" Period finding and rephasing functions. """ import numpy as np from scipy.signal import lombscargle from multiprocessing import Pool from functools import partial __all__ = [ 'find_period', 'conditional_entropy', 'CE', 'Lomb_Scargle', 'rephase', 'get_phase' ] def conditional_entropy(data, precision, min_period, max_period, xbins=10, ybins=5, period_jobs=1): """ Returns the period of *data* by minimizing conditional entropy. See `link <http://arxiv.org/pdf/1306.6664v2.pdf>`_ [GDDMD] for details. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Array containing columns *time*, *mag*, and (optional) *error*. precision : number Distance between contiguous frequencies in search-space. min_period : number Minimum period in search-space. max_period : number Maximum period in search-space. xbins : int, optional Number of phase bins for each trial period (default 10). ybins : int, optional Number of magnitude bins for each trial period (default 5). period_jobs : int, optional Number of simultaneous processes to use while searching. Only one process will ever be used, but argument is included to conform to *periodogram* standards of :func:`find_period` (default 1). **Returns** period : number The period of *data*. **Citations** .. [GDDMD] Graham, Matthew J. ; Drake, Andrew J. ; Djorgovski, S. G. ; Mahabal, Ashish A. ; Donalek, Ciro, 2013, Monthly Notices of the Royal Astronomical Society, Volume 434, Issue 3, p.2629-2635 """ periods = np.arange(min_period, max_period, precision) copy = np.ma.copy(data) copy[:,1] = (copy[:,1] - np.min(copy[:,1])) \ / (np.max(copy[:,1]) - np.min(copy[:,1])) partial_job = partial(CE, data=copy, xbins=xbins, ybins=ybins) m = map if period_jobs <= 1 else Pool(period_jobs).map entropies = list(m(partial_job, periods)) return periods[np.argmin(entropies)] def CE(period, data, xbins=10, ybins=5): """ Returns the conditional entropy of *data* rephased with *period*. **Parameters** period : number The period to rephase *data* by. data : array-like, shape = [n_samples, 2] or [n_samples, 3] Array containing columns *time*, *mag*, and (optional) *error*. xbins : int, optional Number of phase bins (default 10). ybins : int, optional Number of magnitude bins (default 5). """ if period <= 0: return np.PINF r = rephase(data, period) bins, *_ = np.histogram2d(r[:,0], r[:,1], [xbins, ybins], [[0,1], [0,1]]) size = r.shape[0] # The following code was once more readable, but much slower. # Here is what it used to be: # ----------------------------------------------------------------------- # return np.sum((lambda p: p * np.log(np.sum(bins[i,:]) / size / p) \ # if p > 0 else 0)(bins[i][j] / size) # for i in np.arange(0, xbins) # for j in np.arange(0, ybins)) if size > 0 else np.PINF # ----------------------------------------------------------------------- # TODO: replace this comment with something that's not old code if size > 0: # bins[i,j] / size divided_bins = bins / size # indices where that is positive # to avoid division by zero arg_positive = divided_bins > 0 # array containing the sums of each column in the bins array column_sums = np.sum(divided_bins, axis=1) #changed 0 by 1 # array is repeated row-wise, so that it can be sliced by arg_positive column_sums = np.repeat(np.reshape(column_sums, (xbins,1)), ybins, axis=1) #column_sums = np.repeat(np.reshape(column_sums, (1,-1)), xbins, axis=0) # select only the elements in both arrays which correspond to a # positive bin select_divided_bins = divided_bins[arg_positive] select_column_sums = column_sums[arg_positive] # initialize the result array A = np.empty((xbins, ybins), dtype=float) # store at every index [i,j] in A which corresponds to a positive bin: # bins[i,j]/size * log(bins[i,:] / size / (bins[i,j]/size)) A[ arg_positive] = select_divided_bins \ * np.log(select_column_sums / select_divided_bins) # store 0 at every index in A which corresponds to a non-positive bin A[~arg_positive] = 0 # return the summation return np.sum(A) else: return np.PINF def find_period(data, min_period=0.2, max_period=32.0, coarse_precision=1e-5, fine_precision=1e-9, periodogram=Lomb_Scargle, period_jobs=1): """find_period(data, min_period=0.2, max_period=32.0, coarse_precision=1e-5, fine_precision=1e-9, periodogram=Lomb_Scargle, period_jobs=1) Returns the period of *data* according to the given *periodogram*, searching first with a coarse precision, and then a fine precision. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Array containing columns *time*, *mag*, and (optional) *error*. min_period : number Minimum period in search-space. max_period : number Maximum period in search-space. coarse_precision : number Distance between contiguous frequencies in search-space during first sweep. fine_precision : number Distance between contiguous frequencies in search-space during second sweep. periodogram : function A function with arguments *data*, *precision*, *min_period*, *max_period*, and *period_jobs*, and return value *period*. period_jobs : int, optional Number of simultaneous processes to use while searching (default 1). **Returns** period : number The period of *data*. """ if min_period >= max_period: return min_period coarse_period = periodogram(data, coarse_precision, min_period, max_period, period_jobs=period_jobs) return coarse_period if coarse_precision <= fine_precision else \ periodogram(data, fine_precision, coarse_period - coarse_precision, coarse_period + coarse_precision, period_jobs=period_jobs) def rephase(data, period=1.0, shift=0.0, col=0, copy=True): """ Returns *data* (or a copy) phased with *period*, and shifted by a phase-shift *shift*. **Parameters** data : array-like, shape = [n_samples, n_cols] Array containing the time or phase values to be rephased in column *col*. period : number, optional Period to phase *data* by (default 1.0). shift : number, optional Phase shift to apply to phases (default 0.0). col : int, optional Column in *data* containing the time or phase values to be rephased (default 0). copy : bool, optional If True, a new array is returned, otherwise *data* is rephased in-place (default True). **Returns** rephased : array-like, shape = [n_samples, n_cols] Array containing the rephased *data*. """ rephased = np.ma.array(data, copy=copy) rephased[:, col] = get_phase(rephased[:, col], period, shift) return rephased def get_phase(time, period=1.0, shift=0.0): """ Returns *time* transformed to phase-space with *period*, after applying a phase-shift *shift*. **Parameters** time : array-like, shape = [n_samples] The times to transform. period : number, optional The period to phase by (default 1.0). shift : number, optional The phase-shift to apply to the phases (default 0.0). **Returns** phase : array-like, shape = [n_samples] *time* transformed into phase-space with *period*, after applying a phase-shift *shift*. """ return (time / period - shift) % 1
astroswego/plotypus
src/plotypus/periodogram.py
conditional_entropy
python
def conditional_entropy(data, precision, min_period, max_period, xbins=10, ybins=5, period_jobs=1): periods = np.arange(min_period, max_period, precision) copy = np.ma.copy(data) copy[:,1] = (copy[:,1] - np.min(copy[:,1])) \ / (np.max(copy[:,1]) - np.min(copy[:,1])) partial_job = partial(CE, data=copy, xbins=xbins, ybins=ybins) m = map if period_jobs <= 1 else Pool(period_jobs).map entropies = list(m(partial_job, periods)) return periods[np.argmin(entropies)]
Returns the period of *data* by minimizing conditional entropy. See `link <http://arxiv.org/pdf/1306.6664v2.pdf>`_ [GDDMD] for details. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Array containing columns *time*, *mag*, and (optional) *error*. precision : number Distance between contiguous frequencies in search-space. min_period : number Minimum period in search-space. max_period : number Maximum period in search-space. xbins : int, optional Number of phase bins for each trial period (default 10). ybins : int, optional Number of magnitude bins for each trial period (default 5). period_jobs : int, optional Number of simultaneous processes to use while searching. Only one process will ever be used, but argument is included to conform to *periodogram* standards of :func:`find_period` (default 1). **Returns** period : number The period of *data*. **Citations** .. [GDDMD] Graham, Matthew J. ; Drake, Andrew J. ; Djorgovski, S. G. ; Mahabal, Ashish A. ; Donalek, Ciro, 2013, Monthly Notices of the Royal Astronomical Society, Volume 434, Issue 3, p.2629-2635
train
https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/periodogram.py#L54-L99
null
""" Period finding and rephasing functions. """ import numpy as np from scipy.signal import lombscargle from multiprocessing import Pool from functools import partial __all__ = [ 'find_period', 'conditional_entropy', 'CE', 'Lomb_Scargle', 'rephase', 'get_phase' ] def Lomb_Scargle(data, precision, min_period, max_period, period_jobs=1): """ Returns the period of *data* according to the `Lomb-Scargle periodogram <https://en.wikipedia.org/wiki/Least-squares_spectral_analysis#The_Lomb.E2.80.93Scargle_periodogram>`_. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Array containing columns *time*, *mag*, and (optional) *error*. precision : number Distance between contiguous frequencies in search-space. min_period : number Minimum period in search-space. max_period : number Maximum period in search-space. period_jobs : int, optional Number of simultaneous processes to use while searching. Only one process will ever be used, but argument is included to conform to *periodogram* standards of :func:`find_period` (default 1). **Returns** period : number The period of *data*. """ time, mags, *err = data.T scaled_mags = (mags-mags.mean())/mags.std() minf, maxf = 2*np.pi/max_period, 2*np.pi/min_period freqs = np.arange(minf, maxf, precision) pgram = lombscargle(time, scaled_mags, freqs) return 2*np.pi/freqs[np.argmax(pgram)] def CE(period, data, xbins=10, ybins=5): """ Returns the conditional entropy of *data* rephased with *period*. **Parameters** period : number The period to rephase *data* by. data : array-like, shape = [n_samples, 2] or [n_samples, 3] Array containing columns *time*, *mag*, and (optional) *error*. xbins : int, optional Number of phase bins (default 10). ybins : int, optional Number of magnitude bins (default 5). """ if period <= 0: return np.PINF r = rephase(data, period) bins, *_ = np.histogram2d(r[:,0], r[:,1], [xbins, ybins], [[0,1], [0,1]]) size = r.shape[0] # The following code was once more readable, but much slower. # Here is what it used to be: # ----------------------------------------------------------------------- # return np.sum((lambda p: p * np.log(np.sum(bins[i,:]) / size / p) \ # if p > 0 else 0)(bins[i][j] / size) # for i in np.arange(0, xbins) # for j in np.arange(0, ybins)) if size > 0 else np.PINF # ----------------------------------------------------------------------- # TODO: replace this comment with something that's not old code if size > 0: # bins[i,j] / size divided_bins = bins / size # indices where that is positive # to avoid division by zero arg_positive = divided_bins > 0 # array containing the sums of each column in the bins array column_sums = np.sum(divided_bins, axis=1) #changed 0 by 1 # array is repeated row-wise, so that it can be sliced by arg_positive column_sums = np.repeat(np.reshape(column_sums, (xbins,1)), ybins, axis=1) #column_sums = np.repeat(np.reshape(column_sums, (1,-1)), xbins, axis=0) # select only the elements in both arrays which correspond to a # positive bin select_divided_bins = divided_bins[arg_positive] select_column_sums = column_sums[arg_positive] # initialize the result array A = np.empty((xbins, ybins), dtype=float) # store at every index [i,j] in A which corresponds to a positive bin: # bins[i,j]/size * log(bins[i,:] / size / (bins[i,j]/size)) A[ arg_positive] = select_divided_bins \ * np.log(select_column_sums / select_divided_bins) # store 0 at every index in A which corresponds to a non-positive bin A[~arg_positive] = 0 # return the summation return np.sum(A) else: return np.PINF def find_period(data, min_period=0.2, max_period=32.0, coarse_precision=1e-5, fine_precision=1e-9, periodogram=Lomb_Scargle, period_jobs=1): """find_period(data, min_period=0.2, max_period=32.0, coarse_precision=1e-5, fine_precision=1e-9, periodogram=Lomb_Scargle, period_jobs=1) Returns the period of *data* according to the given *periodogram*, searching first with a coarse precision, and then a fine precision. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Array containing columns *time*, *mag*, and (optional) *error*. min_period : number Minimum period in search-space. max_period : number Maximum period in search-space. coarse_precision : number Distance between contiguous frequencies in search-space during first sweep. fine_precision : number Distance between contiguous frequencies in search-space during second sweep. periodogram : function A function with arguments *data*, *precision*, *min_period*, *max_period*, and *period_jobs*, and return value *period*. period_jobs : int, optional Number of simultaneous processes to use while searching (default 1). **Returns** period : number The period of *data*. """ if min_period >= max_period: return min_period coarse_period = periodogram(data, coarse_precision, min_period, max_period, period_jobs=period_jobs) return coarse_period if coarse_precision <= fine_precision else \ periodogram(data, fine_precision, coarse_period - coarse_precision, coarse_period + coarse_precision, period_jobs=period_jobs) def rephase(data, period=1.0, shift=0.0, col=0, copy=True): """ Returns *data* (or a copy) phased with *period*, and shifted by a phase-shift *shift*. **Parameters** data : array-like, shape = [n_samples, n_cols] Array containing the time or phase values to be rephased in column *col*. period : number, optional Period to phase *data* by (default 1.0). shift : number, optional Phase shift to apply to phases (default 0.0). col : int, optional Column in *data* containing the time or phase values to be rephased (default 0). copy : bool, optional If True, a new array is returned, otherwise *data* is rephased in-place (default True). **Returns** rephased : array-like, shape = [n_samples, n_cols] Array containing the rephased *data*. """ rephased = np.ma.array(data, copy=copy) rephased[:, col] = get_phase(rephased[:, col], period, shift) return rephased def get_phase(time, period=1.0, shift=0.0): """ Returns *time* transformed to phase-space with *period*, after applying a phase-shift *shift*. **Parameters** time : array-like, shape = [n_samples] The times to transform. period : number, optional The period to phase by (default 1.0). shift : number, optional The phase-shift to apply to the phases (default 0.0). **Returns** phase : array-like, shape = [n_samples] *time* transformed into phase-space with *period*, after applying a phase-shift *shift*. """ return (time / period - shift) % 1
astroswego/plotypus
src/plotypus/periodogram.py
CE
python
def CE(period, data, xbins=10, ybins=5): if period <= 0: return np.PINF r = rephase(data, period) bins, *_ = np.histogram2d(r[:,0], r[:,1], [xbins, ybins], [[0,1], [0,1]]) size = r.shape[0] # The following code was once more readable, but much slower. # Here is what it used to be: # ----------------------------------------------------------------------- # return np.sum((lambda p: p * np.log(np.sum(bins[i,:]) / size / p) \ # if p > 0 else 0)(bins[i][j] / size) # for i in np.arange(0, xbins) # for j in np.arange(0, ybins)) if size > 0 else np.PINF # ----------------------------------------------------------------------- # TODO: replace this comment with something that's not old code if size > 0: # bins[i,j] / size divided_bins = bins / size # indices where that is positive # to avoid division by zero arg_positive = divided_bins > 0 # array containing the sums of each column in the bins array column_sums = np.sum(divided_bins, axis=1) #changed 0 by 1 # array is repeated row-wise, so that it can be sliced by arg_positive column_sums = np.repeat(np.reshape(column_sums, (xbins,1)), ybins, axis=1) #column_sums = np.repeat(np.reshape(column_sums, (1,-1)), xbins, axis=0) # select only the elements in both arrays which correspond to a # positive bin select_divided_bins = divided_bins[arg_positive] select_column_sums = column_sums[arg_positive] # initialize the result array A = np.empty((xbins, ybins), dtype=float) # store at every index [i,j] in A which corresponds to a positive bin: # bins[i,j]/size * log(bins[i,:] / size / (bins[i,j]/size)) A[ arg_positive] = select_divided_bins \ * np.log(select_column_sums / select_divided_bins) # store 0 at every index in A which corresponds to a non-positive bin A[~arg_positive] = 0 # return the summation return np.sum(A) else: return np.PINF
Returns the conditional entropy of *data* rephased with *period*. **Parameters** period : number The period to rephase *data* by. data : array-like, shape = [n_samples, 2] or [n_samples, 3] Array containing columns *time*, *mag*, and (optional) *error*. xbins : int, optional Number of phase bins (default 10). ybins : int, optional Number of magnitude bins (default 5).
train
https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/periodogram.py#L102-L164
[ "def rephase(data, period=1.0, shift=0.0, col=0, copy=True):\n \"\"\"\n Returns *data* (or a copy) phased with *period*, and shifted by a\n phase-shift *shift*.\n\n **Parameters**\n\n data : array-like, shape = [n_samples, n_cols]\n Array containing the time or phase values to be rephased in c...
""" Period finding and rephasing functions. """ import numpy as np from scipy.signal import lombscargle from multiprocessing import Pool from functools import partial __all__ = [ 'find_period', 'conditional_entropy', 'CE', 'Lomb_Scargle', 'rephase', 'get_phase' ] def Lomb_Scargle(data, precision, min_period, max_period, period_jobs=1): """ Returns the period of *data* according to the `Lomb-Scargle periodogram <https://en.wikipedia.org/wiki/Least-squares_spectral_analysis#The_Lomb.E2.80.93Scargle_periodogram>`_. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Array containing columns *time*, *mag*, and (optional) *error*. precision : number Distance between contiguous frequencies in search-space. min_period : number Minimum period in search-space. max_period : number Maximum period in search-space. period_jobs : int, optional Number of simultaneous processes to use while searching. Only one process will ever be used, but argument is included to conform to *periodogram* standards of :func:`find_period` (default 1). **Returns** period : number The period of *data*. """ time, mags, *err = data.T scaled_mags = (mags-mags.mean())/mags.std() minf, maxf = 2*np.pi/max_period, 2*np.pi/min_period freqs = np.arange(minf, maxf, precision) pgram = lombscargle(time, scaled_mags, freqs) return 2*np.pi/freqs[np.argmax(pgram)] def conditional_entropy(data, precision, min_period, max_period, xbins=10, ybins=5, period_jobs=1): """ Returns the period of *data* by minimizing conditional entropy. See `link <http://arxiv.org/pdf/1306.6664v2.pdf>`_ [GDDMD] for details. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Array containing columns *time*, *mag*, and (optional) *error*. precision : number Distance between contiguous frequencies in search-space. min_period : number Minimum period in search-space. max_period : number Maximum period in search-space. xbins : int, optional Number of phase bins for each trial period (default 10). ybins : int, optional Number of magnitude bins for each trial period (default 5). period_jobs : int, optional Number of simultaneous processes to use while searching. Only one process will ever be used, but argument is included to conform to *periodogram* standards of :func:`find_period` (default 1). **Returns** period : number The period of *data*. **Citations** .. [GDDMD] Graham, Matthew J. ; Drake, Andrew J. ; Djorgovski, S. G. ; Mahabal, Ashish A. ; Donalek, Ciro, 2013, Monthly Notices of the Royal Astronomical Society, Volume 434, Issue 3, p.2629-2635 """ periods = np.arange(min_period, max_period, precision) copy = np.ma.copy(data) copy[:,1] = (copy[:,1] - np.min(copy[:,1])) \ / (np.max(copy[:,1]) - np.min(copy[:,1])) partial_job = partial(CE, data=copy, xbins=xbins, ybins=ybins) m = map if period_jobs <= 1 else Pool(period_jobs).map entropies = list(m(partial_job, periods)) return periods[np.argmin(entropies)] def find_period(data, min_period=0.2, max_period=32.0, coarse_precision=1e-5, fine_precision=1e-9, periodogram=Lomb_Scargle, period_jobs=1): """find_period(data, min_period=0.2, max_period=32.0, coarse_precision=1e-5, fine_precision=1e-9, periodogram=Lomb_Scargle, period_jobs=1) Returns the period of *data* according to the given *periodogram*, searching first with a coarse precision, and then a fine precision. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Array containing columns *time*, *mag*, and (optional) *error*. min_period : number Minimum period in search-space. max_period : number Maximum period in search-space. coarse_precision : number Distance between contiguous frequencies in search-space during first sweep. fine_precision : number Distance between contiguous frequencies in search-space during second sweep. periodogram : function A function with arguments *data*, *precision*, *min_period*, *max_period*, and *period_jobs*, and return value *period*. period_jobs : int, optional Number of simultaneous processes to use while searching (default 1). **Returns** period : number The period of *data*. """ if min_period >= max_period: return min_period coarse_period = periodogram(data, coarse_precision, min_period, max_period, period_jobs=period_jobs) return coarse_period if coarse_precision <= fine_precision else \ periodogram(data, fine_precision, coarse_period - coarse_precision, coarse_period + coarse_precision, period_jobs=period_jobs) def rephase(data, period=1.0, shift=0.0, col=0, copy=True): """ Returns *data* (or a copy) phased with *period*, and shifted by a phase-shift *shift*. **Parameters** data : array-like, shape = [n_samples, n_cols] Array containing the time or phase values to be rephased in column *col*. period : number, optional Period to phase *data* by (default 1.0). shift : number, optional Phase shift to apply to phases (default 0.0). col : int, optional Column in *data* containing the time or phase values to be rephased (default 0). copy : bool, optional If True, a new array is returned, otherwise *data* is rephased in-place (default True). **Returns** rephased : array-like, shape = [n_samples, n_cols] Array containing the rephased *data*. """ rephased = np.ma.array(data, copy=copy) rephased[:, col] = get_phase(rephased[:, col], period, shift) return rephased def get_phase(time, period=1.0, shift=0.0): """ Returns *time* transformed to phase-space with *period*, after applying a phase-shift *shift*. **Parameters** time : array-like, shape = [n_samples] The times to transform. period : number, optional The period to phase by (default 1.0). shift : number, optional The phase-shift to apply to the phases (default 0.0). **Returns** phase : array-like, shape = [n_samples] *time* transformed into phase-space with *period*, after applying a phase-shift *shift*. """ return (time / period - shift) % 1
astroswego/plotypus
src/plotypus/periodogram.py
find_period
python
def find_period(data, min_period=0.2, max_period=32.0, coarse_precision=1e-5, fine_precision=1e-9, periodogram=Lomb_Scargle, period_jobs=1): if min_period >= max_period: return min_period coarse_period = periodogram(data, coarse_precision, min_period, max_period, period_jobs=period_jobs) return coarse_period if coarse_precision <= fine_precision else \ periodogram(data, fine_precision, coarse_period - coarse_precision, coarse_period + coarse_precision, period_jobs=period_jobs)
find_period(data, min_period=0.2, max_period=32.0, coarse_precision=1e-5, fine_precision=1e-9, periodogram=Lomb_Scargle, period_jobs=1) Returns the period of *data* according to the given *periodogram*, searching first with a coarse precision, and then a fine precision. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Array containing columns *time*, *mag*, and (optional) *error*. min_period : number Minimum period in search-space. max_period : number Maximum period in search-space. coarse_precision : number Distance between contiguous frequencies in search-space during first sweep. fine_precision : number Distance between contiguous frequencies in search-space during second sweep. periodogram : function A function with arguments *data*, *precision*, *min_period*, *max_period*, and *period_jobs*, and return value *period*. period_jobs : int, optional Number of simultaneous processes to use while searching (default 1). **Returns** period : number The period of *data*.
train
https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/periodogram.py#L167-L212
[ "def Lomb_Scargle(data, precision, min_period, max_period, period_jobs=1):\n \"\"\"\n Returns the period of *data* according to the\n `Lomb-Scargle periodogram <https://en.wikipedia.org/wiki/Least-squares_spectral_analysis#The_Lomb.E2.80.93Scargle_periodogram>`_.\n\n **Parameters**\n\n data : array-l...
""" Period finding and rephasing functions. """ import numpy as np from scipy.signal import lombscargle from multiprocessing import Pool from functools import partial __all__ = [ 'find_period', 'conditional_entropy', 'CE', 'Lomb_Scargle', 'rephase', 'get_phase' ] def Lomb_Scargle(data, precision, min_period, max_period, period_jobs=1): """ Returns the period of *data* according to the `Lomb-Scargle periodogram <https://en.wikipedia.org/wiki/Least-squares_spectral_analysis#The_Lomb.E2.80.93Scargle_periodogram>`_. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Array containing columns *time*, *mag*, and (optional) *error*. precision : number Distance between contiguous frequencies in search-space. min_period : number Minimum period in search-space. max_period : number Maximum period in search-space. period_jobs : int, optional Number of simultaneous processes to use while searching. Only one process will ever be used, but argument is included to conform to *periodogram* standards of :func:`find_period` (default 1). **Returns** period : number The period of *data*. """ time, mags, *err = data.T scaled_mags = (mags-mags.mean())/mags.std() minf, maxf = 2*np.pi/max_period, 2*np.pi/min_period freqs = np.arange(minf, maxf, precision) pgram = lombscargle(time, scaled_mags, freqs) return 2*np.pi/freqs[np.argmax(pgram)] def conditional_entropy(data, precision, min_period, max_period, xbins=10, ybins=5, period_jobs=1): """ Returns the period of *data* by minimizing conditional entropy. See `link <http://arxiv.org/pdf/1306.6664v2.pdf>`_ [GDDMD] for details. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Array containing columns *time*, *mag*, and (optional) *error*. precision : number Distance between contiguous frequencies in search-space. min_period : number Minimum period in search-space. max_period : number Maximum period in search-space. xbins : int, optional Number of phase bins for each trial period (default 10). ybins : int, optional Number of magnitude bins for each trial period (default 5). period_jobs : int, optional Number of simultaneous processes to use while searching. Only one process will ever be used, but argument is included to conform to *periodogram* standards of :func:`find_period` (default 1). **Returns** period : number The period of *data*. **Citations** .. [GDDMD] Graham, Matthew J. ; Drake, Andrew J. ; Djorgovski, S. G. ; Mahabal, Ashish A. ; Donalek, Ciro, 2013, Monthly Notices of the Royal Astronomical Society, Volume 434, Issue 3, p.2629-2635 """ periods = np.arange(min_period, max_period, precision) copy = np.ma.copy(data) copy[:,1] = (copy[:,1] - np.min(copy[:,1])) \ / (np.max(copy[:,1]) - np.min(copy[:,1])) partial_job = partial(CE, data=copy, xbins=xbins, ybins=ybins) m = map if period_jobs <= 1 else Pool(period_jobs).map entropies = list(m(partial_job, periods)) return periods[np.argmin(entropies)] def CE(period, data, xbins=10, ybins=5): """ Returns the conditional entropy of *data* rephased with *period*. **Parameters** period : number The period to rephase *data* by. data : array-like, shape = [n_samples, 2] or [n_samples, 3] Array containing columns *time*, *mag*, and (optional) *error*. xbins : int, optional Number of phase bins (default 10). ybins : int, optional Number of magnitude bins (default 5). """ if period <= 0: return np.PINF r = rephase(data, period) bins, *_ = np.histogram2d(r[:,0], r[:,1], [xbins, ybins], [[0,1], [0,1]]) size = r.shape[0] # The following code was once more readable, but much slower. # Here is what it used to be: # ----------------------------------------------------------------------- # return np.sum((lambda p: p * np.log(np.sum(bins[i,:]) / size / p) \ # if p > 0 else 0)(bins[i][j] / size) # for i in np.arange(0, xbins) # for j in np.arange(0, ybins)) if size > 0 else np.PINF # ----------------------------------------------------------------------- # TODO: replace this comment with something that's not old code if size > 0: # bins[i,j] / size divided_bins = bins / size # indices where that is positive # to avoid division by zero arg_positive = divided_bins > 0 # array containing the sums of each column in the bins array column_sums = np.sum(divided_bins, axis=1) #changed 0 by 1 # array is repeated row-wise, so that it can be sliced by arg_positive column_sums = np.repeat(np.reshape(column_sums, (xbins,1)), ybins, axis=1) #column_sums = np.repeat(np.reshape(column_sums, (1,-1)), xbins, axis=0) # select only the elements in both arrays which correspond to a # positive bin select_divided_bins = divided_bins[arg_positive] select_column_sums = column_sums[arg_positive] # initialize the result array A = np.empty((xbins, ybins), dtype=float) # store at every index [i,j] in A which corresponds to a positive bin: # bins[i,j]/size * log(bins[i,:] / size / (bins[i,j]/size)) A[ arg_positive] = select_divided_bins \ * np.log(select_column_sums / select_divided_bins) # store 0 at every index in A which corresponds to a non-positive bin A[~arg_positive] = 0 # return the summation return np.sum(A) else: return np.PINF def rephase(data, period=1.0, shift=0.0, col=0, copy=True): """ Returns *data* (or a copy) phased with *period*, and shifted by a phase-shift *shift*. **Parameters** data : array-like, shape = [n_samples, n_cols] Array containing the time or phase values to be rephased in column *col*. period : number, optional Period to phase *data* by (default 1.0). shift : number, optional Phase shift to apply to phases (default 0.0). col : int, optional Column in *data* containing the time or phase values to be rephased (default 0). copy : bool, optional If True, a new array is returned, otherwise *data* is rephased in-place (default True). **Returns** rephased : array-like, shape = [n_samples, n_cols] Array containing the rephased *data*. """ rephased = np.ma.array(data, copy=copy) rephased[:, col] = get_phase(rephased[:, col], period, shift) return rephased def get_phase(time, period=1.0, shift=0.0): """ Returns *time* transformed to phase-space with *period*, after applying a phase-shift *shift*. **Parameters** time : array-like, shape = [n_samples] The times to transform. period : number, optional The period to phase by (default 1.0). shift : number, optional The phase-shift to apply to the phases (default 0.0). **Returns** phase : array-like, shape = [n_samples] *time* transformed into phase-space with *period*, after applying a phase-shift *shift*. """ return (time / period - shift) % 1
astroswego/plotypus
src/plotypus/periodogram.py
rephase
python
def rephase(data, period=1.0, shift=0.0, col=0, copy=True): rephased = np.ma.array(data, copy=copy) rephased[:, col] = get_phase(rephased[:, col], period, shift) return rephased
Returns *data* (or a copy) phased with *period*, and shifted by a phase-shift *shift*. **Parameters** data : array-like, shape = [n_samples, n_cols] Array containing the time or phase values to be rephased in column *col*. period : number, optional Period to phase *data* by (default 1.0). shift : number, optional Phase shift to apply to phases (default 0.0). col : int, optional Column in *data* containing the time or phase values to be rephased (default 0). copy : bool, optional If True, a new array is returned, otherwise *data* is rephased in-place (default True). **Returns** rephased : array-like, shape = [n_samples, n_cols] Array containing the rephased *data*.
train
https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/periodogram.py#L215-L244
[ "def get_phase(time, period=1.0, shift=0.0):\n \"\"\"\n Returns *time* transformed to phase-space with *period*, after applying a\n phase-shift *shift*.\n\n **Parameters**\n\n time : array-like, shape = [n_samples]\n The times to transform.\n period : number, optional\n The period to...
""" Period finding and rephasing functions. """ import numpy as np from scipy.signal import lombscargle from multiprocessing import Pool from functools import partial __all__ = [ 'find_period', 'conditional_entropy', 'CE', 'Lomb_Scargle', 'rephase', 'get_phase' ] def Lomb_Scargle(data, precision, min_period, max_period, period_jobs=1): """ Returns the period of *data* according to the `Lomb-Scargle periodogram <https://en.wikipedia.org/wiki/Least-squares_spectral_analysis#The_Lomb.E2.80.93Scargle_periodogram>`_. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Array containing columns *time*, *mag*, and (optional) *error*. precision : number Distance between contiguous frequencies in search-space. min_period : number Minimum period in search-space. max_period : number Maximum period in search-space. period_jobs : int, optional Number of simultaneous processes to use while searching. Only one process will ever be used, but argument is included to conform to *periodogram* standards of :func:`find_period` (default 1). **Returns** period : number The period of *data*. """ time, mags, *err = data.T scaled_mags = (mags-mags.mean())/mags.std() minf, maxf = 2*np.pi/max_period, 2*np.pi/min_period freqs = np.arange(minf, maxf, precision) pgram = lombscargle(time, scaled_mags, freqs) return 2*np.pi/freqs[np.argmax(pgram)] def conditional_entropy(data, precision, min_period, max_period, xbins=10, ybins=5, period_jobs=1): """ Returns the period of *data* by minimizing conditional entropy. See `link <http://arxiv.org/pdf/1306.6664v2.pdf>`_ [GDDMD] for details. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Array containing columns *time*, *mag*, and (optional) *error*. precision : number Distance between contiguous frequencies in search-space. min_period : number Minimum period in search-space. max_period : number Maximum period in search-space. xbins : int, optional Number of phase bins for each trial period (default 10). ybins : int, optional Number of magnitude bins for each trial period (default 5). period_jobs : int, optional Number of simultaneous processes to use while searching. Only one process will ever be used, but argument is included to conform to *periodogram* standards of :func:`find_period` (default 1). **Returns** period : number The period of *data*. **Citations** .. [GDDMD] Graham, Matthew J. ; Drake, Andrew J. ; Djorgovski, S. G. ; Mahabal, Ashish A. ; Donalek, Ciro, 2013, Monthly Notices of the Royal Astronomical Society, Volume 434, Issue 3, p.2629-2635 """ periods = np.arange(min_period, max_period, precision) copy = np.ma.copy(data) copy[:,1] = (copy[:,1] - np.min(copy[:,1])) \ / (np.max(copy[:,1]) - np.min(copy[:,1])) partial_job = partial(CE, data=copy, xbins=xbins, ybins=ybins) m = map if period_jobs <= 1 else Pool(period_jobs).map entropies = list(m(partial_job, periods)) return periods[np.argmin(entropies)] def CE(period, data, xbins=10, ybins=5): """ Returns the conditional entropy of *data* rephased with *period*. **Parameters** period : number The period to rephase *data* by. data : array-like, shape = [n_samples, 2] or [n_samples, 3] Array containing columns *time*, *mag*, and (optional) *error*. xbins : int, optional Number of phase bins (default 10). ybins : int, optional Number of magnitude bins (default 5). """ if period <= 0: return np.PINF r = rephase(data, period) bins, *_ = np.histogram2d(r[:,0], r[:,1], [xbins, ybins], [[0,1], [0,1]]) size = r.shape[0] # The following code was once more readable, but much slower. # Here is what it used to be: # ----------------------------------------------------------------------- # return np.sum((lambda p: p * np.log(np.sum(bins[i,:]) / size / p) \ # if p > 0 else 0)(bins[i][j] / size) # for i in np.arange(0, xbins) # for j in np.arange(0, ybins)) if size > 0 else np.PINF # ----------------------------------------------------------------------- # TODO: replace this comment with something that's not old code if size > 0: # bins[i,j] / size divided_bins = bins / size # indices where that is positive # to avoid division by zero arg_positive = divided_bins > 0 # array containing the sums of each column in the bins array column_sums = np.sum(divided_bins, axis=1) #changed 0 by 1 # array is repeated row-wise, so that it can be sliced by arg_positive column_sums = np.repeat(np.reshape(column_sums, (xbins,1)), ybins, axis=1) #column_sums = np.repeat(np.reshape(column_sums, (1,-1)), xbins, axis=0) # select only the elements in both arrays which correspond to a # positive bin select_divided_bins = divided_bins[arg_positive] select_column_sums = column_sums[arg_positive] # initialize the result array A = np.empty((xbins, ybins), dtype=float) # store at every index [i,j] in A which corresponds to a positive bin: # bins[i,j]/size * log(bins[i,:] / size / (bins[i,j]/size)) A[ arg_positive] = select_divided_bins \ * np.log(select_column_sums / select_divided_bins) # store 0 at every index in A which corresponds to a non-positive bin A[~arg_positive] = 0 # return the summation return np.sum(A) else: return np.PINF def find_period(data, min_period=0.2, max_period=32.0, coarse_precision=1e-5, fine_precision=1e-9, periodogram=Lomb_Scargle, period_jobs=1): """find_period(data, min_period=0.2, max_period=32.0, coarse_precision=1e-5, fine_precision=1e-9, periodogram=Lomb_Scargle, period_jobs=1) Returns the period of *data* according to the given *periodogram*, searching first with a coarse precision, and then a fine precision. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Array containing columns *time*, *mag*, and (optional) *error*. min_period : number Minimum period in search-space. max_period : number Maximum period in search-space. coarse_precision : number Distance between contiguous frequencies in search-space during first sweep. fine_precision : number Distance between contiguous frequencies in search-space during second sweep. periodogram : function A function with arguments *data*, *precision*, *min_period*, *max_period*, and *period_jobs*, and return value *period*. period_jobs : int, optional Number of simultaneous processes to use while searching (default 1). **Returns** period : number The period of *data*. """ if min_period >= max_period: return min_period coarse_period = periodogram(data, coarse_precision, min_period, max_period, period_jobs=period_jobs) return coarse_period if coarse_precision <= fine_precision else \ periodogram(data, fine_precision, coarse_period - coarse_precision, coarse_period + coarse_precision, period_jobs=period_jobs) def get_phase(time, period=1.0, shift=0.0): """ Returns *time* transformed to phase-space with *period*, after applying a phase-shift *shift*. **Parameters** time : array-like, shape = [n_samples] The times to transform. period : number, optional The period to phase by (default 1.0). shift : number, optional The phase-shift to apply to the phases (default 0.0). **Returns** phase : array-like, shape = [n_samples] *time* transformed into phase-space with *period*, after applying a phase-shift *shift*. """ return (time / period - shift) % 1
astroswego/plotypus
src/plotypus/preprocessing.py
Fourier.fit
python
def fit(self, X, y=None): if self.degree_range is not None: self.degree = self.baart_criteria(X, y) return self
Sets *self.degree* according to :func:`baart_criteria` if *degree_range* is not None, otherwise does nothing. **Parameters** X : array-like, shape = [n_samples, 1] Column vector of phases. y : array-like or None, shape = [n_samples], optional Row vector of magnitudes (default None). **Returns** self : returns an instance of self
train
https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/preprocessing.py#L76-L94
[ "def baart_criteria(self, X, y):\n \"\"\"\n Returns the optimal Fourier series degree as determined by\n `Baart's Criteria <http://articles.adsabs.harvard.edu/cgi-bin/nph-iarticle_query?1986A%26A...170...59P&amp;data_type=PDF_HIGH&amp;whole_paper=YES&amp;type=PRINTER&amp;filetype=.pdf>`_ [JOP]_.\n\n **C...
class Fourier(): r""" Transforms observed data from phase-space to Fourier-space. In order to represent a light curve as a Fourier series of the form .. math:: m(t) = A_0 + \sum_{k=1}^n (a_k \sin(k \omega t) + b_k \cos(k \omega t)), phased time observations are transformed into a design matrix :math:`\mathbf{X}` by :func:`Fourier.design_matrix`, such that linear regression can be used to solve for coefficients .. math:: \hat{b} = \begin{bmatrix} A_0 \\ a_1 \\ b_1 \\ \vdots \\ a_n \\ b_n \end{bmatrix} in the matrix equation .. math:: \mathbf{X} \hat{b} = \hat{y} where :math:`\vec{y}` is the vector of observed magnitudes .. math:: \hat{y} = \begin{bmatrix} m_0 \\ m_1 \\ \vdots \\ m_n \end{bmatrix} If *degree_range* is not None, *degree* is selected via :func:`baart_criteria`. Otherwise the provided *degree* is used. **Parameters** degree : positive int, optional Degree of Fourier series to use, assuming *degree_range* is None (default 3). degree_range : 2-tuple or None, optional Range of allowed *degree*\s to search via :func:`baart_criteria`, or None if single provided *degree* is to be used (default None). regressor : object with "fit" and "transform" methods, optional Regression object used for fitting light curve when selecting *degree* via :func:`baart_criteria`. Not used otherwise (default ``sklearn.linear_model.LinearRegression(fit_intercept=False)``). """ def __init__(self, degree=3, degree_range=None, regressor=LinearRegression(fit_intercept=False)): self.degree = degree self.degree_range = degree_range self.regressor = regressor def transform(self, X, y=None, **params): """ Transforms *X* from phase-space to Fourier-space, returning the design matrix produced by :func:`Fourier.design_matrix` for input to a regressor. **Parameters** X : array-like, shape = [n_samples, 1] Column vector of phases. y : None, optional Unused argument for conformity (default None). **Returns** design_matrix : array-like, shape = [n_samples, 2*degree+1] Fourier design matrix produced by :func:`Fourier.design_matrix`. """ data = numpy.dstack((numpy.array(X).T[0], range(len(X))))[0] phase, order = data[data[:,0].argsort()].T design_matrix = self.design_matrix(phase, self.degree) return design_matrix[order.argsort()] def get_params(self, deep=False): """ Get parameters for this preprocessor. **Parameters** deep : boolean, optional Only here for scikit-learn compliance. Ignore it (default False). **Returns** params : dict Mapping of parameter name to value. """ return {'degree': self.degree} def set_params(self, **params): """ Set parameters for this preprocessor. **Returns** self : returns an instance of self """ if 'degree' in params: self.degree = params['degree'] return self def baart_criteria(self, X, y): """ Returns the optimal Fourier series degree as determined by `Baart's Criteria <http://articles.adsabs.harvard.edu/cgi-bin/nph-iarticle_query?1986A%26A...170...59P&amp;data_type=PDF_HIGH&amp;whole_paper=YES&amp;type=PRINTER&amp;filetype=.pdf>`_ [JOP]_. **Citations** .. [JOP] J. O. Petersen, 1986, "Studies of Cepheid type variability. IV. The uncertainties of Fourier decomposition parameters.", A&A, Vol. 170, p. 59-69 """ try: min_degree, max_degree = self.degree_range except ValueError: raise ValueError("Degree range must be a length two sequence") cutoff = self.baart_tolerance(X) pipeline = Pipeline([('Fourier', Fourier()), ('Regressor', self.regressor)]) sorted_X = numpy.sort(X, axis=0) X_sorting = numpy.argsort(rowvec(X)) for degree in range(min_degree, max_degree): pipeline.set_params(Fourier__degree=degree) pipeline.fit(X, y) lc = pipeline.predict(sorted_X) residuals = y[X_sorting] - lc p_c = autocorrelation(residuals) if abs(p_c) <= cutoff: return degree # reached max_degree without reaching cutoff return max_degree @staticmethod def baart_tolerance(X): r""" Returns the autocorrelation cutoff of *X* for :func:`baart_criteria`, as given by .. math:: \frac{1}{\sqrt{2 (\operatorname{card}(\mathbf{X}) - 1)}} **Parameters** X : array-like, shape = [n_samples, 1] Column vector of phases **Returns** """ return (2 * (len(X) - 1))**(-1/2) @staticmethod def design_matrix(phases, degree): r""" Constructs an :math:`N \times 2n+1` matrix of the form: .. math:: \begin{bmatrix} 1 & \sin(1 \cdot 2\pi \cdot \phi_0) & \cos(1 \cdot 2\pi \cdot \phi_0) & \ldots & \sin(n \cdot 2\pi \cdot \phi_0) & \cos(n \cdot 2\pi \cdot \phi_0) \\ \vdots & \vdots & \vdots & \ddots & \vdots & \vdots \\ 1 & \sin(1 \cdot 2\pi \cdot \phi_N) & \cos(1 \cdot 2\pi \cdot \phi_N) & \ldots & \sin(n \cdot 2\pi \cdot \phi_N) & \cos(n \cdot 2\pi \cdot \phi_N) \end{bmatrix} where :math:`n =` *degree*, :math:`N =` *n_samples*, and :math:`\phi_i =` *phases[i]*. Parameters ---------- phases : array-like, shape = [n_samples] """ n_samples = phases.size # initialize coefficient matrix M = numpy.empty((n_samples, 2*degree+1)) # indices i = numpy.arange(1, degree+1) # initialize the Nxn matrix that is repeated within the # sine and cosine terms x = numpy.empty((n_samples, degree)) # the Nxn matrix now has N copies of the same row, and each row is # integer multiples of pi counting from 1 to the degree x[:,:] = i*2*numpy.pi # multiply each row of x by the phases x.T[:,:] *= phases # place 1's in the first column of the coefficient matrix M[:,0] = 1 # the odd indices of the coefficient matrix have sine terms M[:,1::2] = numpy.sin(x) # the even indices of the coefficient matrix have cosine terms M[:,2::2] = numpy.cos(x) return M @staticmethod def phase_shifted_coefficients(amplitude_coefficients, form='cos', shift=0.0): r""" Converts Fourier coefficients from the amplitude form to the phase-shifted form, as either a sine or cosine series. Amplitude form: .. math:: m(t) = A_0 + \sum_{k=1}^n (a_k \sin(k \omega t) + b_k \cos(k \omega t)) Sine form: .. math:: m(t) = A_0 + \sum_{k=1}^n A_k \sin(k \omega t + \Phi_k) Cosine form: .. math:: m(t) = A_0 + \sum_{k=1}^n A_k \cos(k \omega t + \Phi_k) **Parameters** amplitude_coefficients : array-like, shape = [:math:`2n+1`] Array of coefficients :math:`[ A_0, a_1, b_1, \ldots a_n, b_n ]`. form : str, optional Form of output coefficients, must be one of 'sin' or 'cos' (default 'cos'). shift : number, optional Shift to apply to light curve (default 0.0). **Returns** out : array-like, shape = [:math:`2n+1`] Array of coefficients :math:`[ A_0, A_1, \Phi_1, \ldots, A_n, \Phi_n ]`. """ if form != 'sin' and form != 'cos': raise NotImplementedError( 'Fourier series must have form sin or cos') # separate array of coefficients into respective parts A_0 = amplitude_coefficients[0] a_k = amplitude_coefficients[1::2] b_k = amplitude_coefficients[2::2] degree = a_k.size k = numpy.arange(1, degree+1) # A_k and Phi_k are the angle and hypotenuse in the right triangles # pictured below. A_k is obtained with the Pythagorean theorem, and # Phi_k is obtained with the 2-argument inverse tangent. # The positions of a_k and b_k depend on whether it is a sin or cos # series. # # Cos series Sin series # # b_k /| # --------- / | # \ Φ_k |_| / | # \ | A_k / | # \ | / | b_k # \ | a_k / | # A_k \ | / _| # \ | / Φ_k | | # \ | --------- # \| a_k # A_k = numpy.sqrt(a_k**2 + b_k**2) # phase coefficients are shifted to the left by optional ``shift`` if form == 'cos': Phi_k = numpy.arctan2(-a_k, b_k) + 2*pi*k*shift elif form == 'sin': Phi_k = numpy.arctan2(b_k, a_k) + 2*pi*k*shift # constrain Phi between 0 and 2*pi Phi_k %= 2*pi phase_shifted_coefficients_ = numpy.empty(amplitude_coefficients.shape, dtype=float) phase_shifted_coefficients_[0] = A_0 phase_shifted_coefficients_[1::2] = A_k phase_shifted_coefficients_[2::2] = Phi_k return phase_shifted_coefficients_ @staticmethod def fourier_ratios(phase_shifted_coeffs): r""" Returns the :math:`R_{j1}` and :math:`\phi_{j1}` values for the given phase-shifted coefficients. .. math:: R_{j1} = A_j / A_1 .. math:: \phi_{j1} = \phi_j - j \phi_1 **Parameters** phase_shifted_coeffs : array-like, shape = [:math:`2n+1`] Fourier sine or cosine series coefficients. :math:`[ A_0, A_1, \Phi_1, \ldots, A_n, \Phi_n ]`. **Returns** out : array-like, shape = [:math:`2n+1`] Fourier ratios :math:`[ R_{21}, \phi_{21}, \ldots, R_{n1}, \phi_{n1} ]`. """ n_coeff = phase_shifted_coeffs.size # n_coeff = 2*degree + 1 => degree = (n_coeff-1)/2 degree = (n_coeff - 1) / 2 amplitudes = phase_shifted_coeffs[1::2] phases = phase_shifted_coeffs[2::2] # there are degree-1 amplitude ratios, and degree-1 phase deltas, # so altogether there are 2*(degree-1) values ratios = numpy.empty(2*(degree-1), dtype=float) amplitude_ratios = ratios[::2] phase_deltas = ratios[1::2] # amplitudes may be zero, so suppress division by zero warnings with numpy.errstate(divide="ignore"): amplitude_ratios[:] = amplitudes[1:] amplitude_ratios /= amplitudes[0] # indices for phase deltas i = numpy.arange(2, degree+1) phase_deltas[:] = phases[1:] phase_deltas -= i*phases[0] # constrain phase_deltas between 0 and 2*pi phase_deltas %= 2*pi return ratios
astroswego/plotypus
src/plotypus/preprocessing.py
Fourier.transform
python
def transform(self, X, y=None, **params): data = numpy.dstack((numpy.array(X).T[0], range(len(X))))[0] phase, order = data[data[:,0].argsort()].T design_matrix = self.design_matrix(phase, self.degree) return design_matrix[order.argsort()]
Transforms *X* from phase-space to Fourier-space, returning the design matrix produced by :func:`Fourier.design_matrix` for input to a regressor. **Parameters** X : array-like, shape = [n_samples, 1] Column vector of phases. y : None, optional Unused argument for conformity (default None). **Returns** design_matrix : array-like, shape = [n_samples, 2*degree+1] Fourier design matrix produced by :func:`Fourier.design_matrix`.
train
https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/preprocessing.py#L96-L117
[ "def design_matrix(phases, degree):\n r\"\"\"\n Constructs an :math:`N \\times 2n+1` matrix of the form:\n\n .. math::\n\n \\begin{bmatrix}\n 1\n & \\sin(1 \\cdot 2\\pi \\cdot \\phi_0)\n & \\cos(1 \\cdot 2\\pi \\cdot \\phi_0)\n & \\ldots\n & \\sin(n \\cdot 2\\pi ...
class Fourier(): r""" Transforms observed data from phase-space to Fourier-space. In order to represent a light curve as a Fourier series of the form .. math:: m(t) = A_0 + \sum_{k=1}^n (a_k \sin(k \omega t) + b_k \cos(k \omega t)), phased time observations are transformed into a design matrix :math:`\mathbf{X}` by :func:`Fourier.design_matrix`, such that linear regression can be used to solve for coefficients .. math:: \hat{b} = \begin{bmatrix} A_0 \\ a_1 \\ b_1 \\ \vdots \\ a_n \\ b_n \end{bmatrix} in the matrix equation .. math:: \mathbf{X} \hat{b} = \hat{y} where :math:`\vec{y}` is the vector of observed magnitudes .. math:: \hat{y} = \begin{bmatrix} m_0 \\ m_1 \\ \vdots \\ m_n \end{bmatrix} If *degree_range* is not None, *degree* is selected via :func:`baart_criteria`. Otherwise the provided *degree* is used. **Parameters** degree : positive int, optional Degree of Fourier series to use, assuming *degree_range* is None (default 3). degree_range : 2-tuple or None, optional Range of allowed *degree*\s to search via :func:`baart_criteria`, or None if single provided *degree* is to be used (default None). regressor : object with "fit" and "transform" methods, optional Regression object used for fitting light curve when selecting *degree* via :func:`baart_criteria`. Not used otherwise (default ``sklearn.linear_model.LinearRegression(fit_intercept=False)``). """ def __init__(self, degree=3, degree_range=None, regressor=LinearRegression(fit_intercept=False)): self.degree = degree self.degree_range = degree_range self.regressor = regressor def fit(self, X, y=None): """ Sets *self.degree* according to :func:`baart_criteria` if *degree_range* is not None, otherwise does nothing. **Parameters** X : array-like, shape = [n_samples, 1] Column vector of phases. y : array-like or None, shape = [n_samples], optional Row vector of magnitudes (default None). **Returns** self : returns an instance of self """ if self.degree_range is not None: self.degree = self.baart_criteria(X, y) return self def get_params(self, deep=False): """ Get parameters for this preprocessor. **Parameters** deep : boolean, optional Only here for scikit-learn compliance. Ignore it (default False). **Returns** params : dict Mapping of parameter name to value. """ return {'degree': self.degree} def set_params(self, **params): """ Set parameters for this preprocessor. **Returns** self : returns an instance of self """ if 'degree' in params: self.degree = params['degree'] return self def baart_criteria(self, X, y): """ Returns the optimal Fourier series degree as determined by `Baart's Criteria <http://articles.adsabs.harvard.edu/cgi-bin/nph-iarticle_query?1986A%26A...170...59P&amp;data_type=PDF_HIGH&amp;whole_paper=YES&amp;type=PRINTER&amp;filetype=.pdf>`_ [JOP]_. **Citations** .. [JOP] J. O. Petersen, 1986, "Studies of Cepheid type variability. IV. The uncertainties of Fourier decomposition parameters.", A&A, Vol. 170, p. 59-69 """ try: min_degree, max_degree = self.degree_range except ValueError: raise ValueError("Degree range must be a length two sequence") cutoff = self.baart_tolerance(X) pipeline = Pipeline([('Fourier', Fourier()), ('Regressor', self.regressor)]) sorted_X = numpy.sort(X, axis=0) X_sorting = numpy.argsort(rowvec(X)) for degree in range(min_degree, max_degree): pipeline.set_params(Fourier__degree=degree) pipeline.fit(X, y) lc = pipeline.predict(sorted_X) residuals = y[X_sorting] - lc p_c = autocorrelation(residuals) if abs(p_c) <= cutoff: return degree # reached max_degree without reaching cutoff return max_degree @staticmethod def baart_tolerance(X): r""" Returns the autocorrelation cutoff of *X* for :func:`baart_criteria`, as given by .. math:: \frac{1}{\sqrt{2 (\operatorname{card}(\mathbf{X}) - 1)}} **Parameters** X : array-like, shape = [n_samples, 1] Column vector of phases **Returns** """ return (2 * (len(X) - 1))**(-1/2) @staticmethod def design_matrix(phases, degree): r""" Constructs an :math:`N \times 2n+1` matrix of the form: .. math:: \begin{bmatrix} 1 & \sin(1 \cdot 2\pi \cdot \phi_0) & \cos(1 \cdot 2\pi \cdot \phi_0) & \ldots & \sin(n \cdot 2\pi \cdot \phi_0) & \cos(n \cdot 2\pi \cdot \phi_0) \\ \vdots & \vdots & \vdots & \ddots & \vdots & \vdots \\ 1 & \sin(1 \cdot 2\pi \cdot \phi_N) & \cos(1 \cdot 2\pi \cdot \phi_N) & \ldots & \sin(n \cdot 2\pi \cdot \phi_N) & \cos(n \cdot 2\pi \cdot \phi_N) \end{bmatrix} where :math:`n =` *degree*, :math:`N =` *n_samples*, and :math:`\phi_i =` *phases[i]*. Parameters ---------- phases : array-like, shape = [n_samples] """ n_samples = phases.size # initialize coefficient matrix M = numpy.empty((n_samples, 2*degree+1)) # indices i = numpy.arange(1, degree+1) # initialize the Nxn matrix that is repeated within the # sine and cosine terms x = numpy.empty((n_samples, degree)) # the Nxn matrix now has N copies of the same row, and each row is # integer multiples of pi counting from 1 to the degree x[:,:] = i*2*numpy.pi # multiply each row of x by the phases x.T[:,:] *= phases # place 1's in the first column of the coefficient matrix M[:,0] = 1 # the odd indices of the coefficient matrix have sine terms M[:,1::2] = numpy.sin(x) # the even indices of the coefficient matrix have cosine terms M[:,2::2] = numpy.cos(x) return M @staticmethod def phase_shifted_coefficients(amplitude_coefficients, form='cos', shift=0.0): r""" Converts Fourier coefficients from the amplitude form to the phase-shifted form, as either a sine or cosine series. Amplitude form: .. math:: m(t) = A_0 + \sum_{k=1}^n (a_k \sin(k \omega t) + b_k \cos(k \omega t)) Sine form: .. math:: m(t) = A_0 + \sum_{k=1}^n A_k \sin(k \omega t + \Phi_k) Cosine form: .. math:: m(t) = A_0 + \sum_{k=1}^n A_k \cos(k \omega t + \Phi_k) **Parameters** amplitude_coefficients : array-like, shape = [:math:`2n+1`] Array of coefficients :math:`[ A_0, a_1, b_1, \ldots a_n, b_n ]`. form : str, optional Form of output coefficients, must be one of 'sin' or 'cos' (default 'cos'). shift : number, optional Shift to apply to light curve (default 0.0). **Returns** out : array-like, shape = [:math:`2n+1`] Array of coefficients :math:`[ A_0, A_1, \Phi_1, \ldots, A_n, \Phi_n ]`. """ if form != 'sin' and form != 'cos': raise NotImplementedError( 'Fourier series must have form sin or cos') # separate array of coefficients into respective parts A_0 = amplitude_coefficients[0] a_k = amplitude_coefficients[1::2] b_k = amplitude_coefficients[2::2] degree = a_k.size k = numpy.arange(1, degree+1) # A_k and Phi_k are the angle and hypotenuse in the right triangles # pictured below. A_k is obtained with the Pythagorean theorem, and # Phi_k is obtained with the 2-argument inverse tangent. # The positions of a_k and b_k depend on whether it is a sin or cos # series. # # Cos series Sin series # # b_k /| # --------- / | # \ Φ_k |_| / | # \ | A_k / | # \ | / | b_k # \ | a_k / | # A_k \ | / _| # \ | / Φ_k | | # \ | --------- # \| a_k # A_k = numpy.sqrt(a_k**2 + b_k**2) # phase coefficients are shifted to the left by optional ``shift`` if form == 'cos': Phi_k = numpy.arctan2(-a_k, b_k) + 2*pi*k*shift elif form == 'sin': Phi_k = numpy.arctan2(b_k, a_k) + 2*pi*k*shift # constrain Phi between 0 and 2*pi Phi_k %= 2*pi phase_shifted_coefficients_ = numpy.empty(amplitude_coefficients.shape, dtype=float) phase_shifted_coefficients_[0] = A_0 phase_shifted_coefficients_[1::2] = A_k phase_shifted_coefficients_[2::2] = Phi_k return phase_shifted_coefficients_ @staticmethod def fourier_ratios(phase_shifted_coeffs): r""" Returns the :math:`R_{j1}` and :math:`\phi_{j1}` values for the given phase-shifted coefficients. .. math:: R_{j1} = A_j / A_1 .. math:: \phi_{j1} = \phi_j - j \phi_1 **Parameters** phase_shifted_coeffs : array-like, shape = [:math:`2n+1`] Fourier sine or cosine series coefficients. :math:`[ A_0, A_1, \Phi_1, \ldots, A_n, \Phi_n ]`. **Returns** out : array-like, shape = [:math:`2n+1`] Fourier ratios :math:`[ R_{21}, \phi_{21}, \ldots, R_{n1}, \phi_{n1} ]`. """ n_coeff = phase_shifted_coeffs.size # n_coeff = 2*degree + 1 => degree = (n_coeff-1)/2 degree = (n_coeff - 1) / 2 amplitudes = phase_shifted_coeffs[1::2] phases = phase_shifted_coeffs[2::2] # there are degree-1 amplitude ratios, and degree-1 phase deltas, # so altogether there are 2*(degree-1) values ratios = numpy.empty(2*(degree-1), dtype=float) amplitude_ratios = ratios[::2] phase_deltas = ratios[1::2] # amplitudes may be zero, so suppress division by zero warnings with numpy.errstate(divide="ignore"): amplitude_ratios[:] = amplitudes[1:] amplitude_ratios /= amplitudes[0] # indices for phase deltas i = numpy.arange(2, degree+1) phase_deltas[:] = phases[1:] phase_deltas -= i*phases[0] # constrain phase_deltas between 0 and 2*pi phase_deltas %= 2*pi return ratios
astroswego/plotypus
src/plotypus/preprocessing.py
Fourier.baart_criteria
python
def baart_criteria(self, X, y): try: min_degree, max_degree = self.degree_range except ValueError: raise ValueError("Degree range must be a length two sequence") cutoff = self.baart_tolerance(X) pipeline = Pipeline([('Fourier', Fourier()), ('Regressor', self.regressor)]) sorted_X = numpy.sort(X, axis=0) X_sorting = numpy.argsort(rowvec(X)) for degree in range(min_degree, max_degree): pipeline.set_params(Fourier__degree=degree) pipeline.fit(X, y) lc = pipeline.predict(sorted_X) residuals = y[X_sorting] - lc p_c = autocorrelation(residuals) if abs(p_c) <= cutoff: return degree # reached max_degree without reaching cutoff return max_degree
Returns the optimal Fourier series degree as determined by `Baart's Criteria <http://articles.adsabs.harvard.edu/cgi-bin/nph-iarticle_query?1986A%26A...170...59P&amp;data_type=PDF_HIGH&amp;whole_paper=YES&amp;type=PRINTER&amp;filetype=.pdf>`_ [JOP]_. **Citations** .. [JOP] J. O. Petersen, 1986, "Studies of Cepheid type variability. IV. The uncertainties of Fourier decomposition parameters.", A&A, Vol. 170, p. 59-69
train
https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/preprocessing.py#L148-L179
[ "def autocorrelation(X, lag=1):\n \"\"\"\n Computes the autocorrelation of *X* with the given *lag*.\n Autocorrelation is simply\n autocovariance(X) / covariance(X-mean, X-mean),\n where autocovariance is simply\n covariance((X-mean)[:-lag], (X-mean)[lag:]).\n\n See `link <https://en.wikipedia....
class Fourier(): r""" Transforms observed data from phase-space to Fourier-space. In order to represent a light curve as a Fourier series of the form .. math:: m(t) = A_0 + \sum_{k=1}^n (a_k \sin(k \omega t) + b_k \cos(k \omega t)), phased time observations are transformed into a design matrix :math:`\mathbf{X}` by :func:`Fourier.design_matrix`, such that linear regression can be used to solve for coefficients .. math:: \hat{b} = \begin{bmatrix} A_0 \\ a_1 \\ b_1 \\ \vdots \\ a_n \\ b_n \end{bmatrix} in the matrix equation .. math:: \mathbf{X} \hat{b} = \hat{y} where :math:`\vec{y}` is the vector of observed magnitudes .. math:: \hat{y} = \begin{bmatrix} m_0 \\ m_1 \\ \vdots \\ m_n \end{bmatrix} If *degree_range* is not None, *degree* is selected via :func:`baart_criteria`. Otherwise the provided *degree* is used. **Parameters** degree : positive int, optional Degree of Fourier series to use, assuming *degree_range* is None (default 3). degree_range : 2-tuple or None, optional Range of allowed *degree*\s to search via :func:`baart_criteria`, or None if single provided *degree* is to be used (default None). regressor : object with "fit" and "transform" methods, optional Regression object used for fitting light curve when selecting *degree* via :func:`baart_criteria`. Not used otherwise (default ``sklearn.linear_model.LinearRegression(fit_intercept=False)``). """ def __init__(self, degree=3, degree_range=None, regressor=LinearRegression(fit_intercept=False)): self.degree = degree self.degree_range = degree_range self.regressor = regressor def fit(self, X, y=None): """ Sets *self.degree* according to :func:`baart_criteria` if *degree_range* is not None, otherwise does nothing. **Parameters** X : array-like, shape = [n_samples, 1] Column vector of phases. y : array-like or None, shape = [n_samples], optional Row vector of magnitudes (default None). **Returns** self : returns an instance of self """ if self.degree_range is not None: self.degree = self.baart_criteria(X, y) return self def transform(self, X, y=None, **params): """ Transforms *X* from phase-space to Fourier-space, returning the design matrix produced by :func:`Fourier.design_matrix` for input to a regressor. **Parameters** X : array-like, shape = [n_samples, 1] Column vector of phases. y : None, optional Unused argument for conformity (default None). **Returns** design_matrix : array-like, shape = [n_samples, 2*degree+1] Fourier design matrix produced by :func:`Fourier.design_matrix`. """ data = numpy.dstack((numpy.array(X).T[0], range(len(X))))[0] phase, order = data[data[:,0].argsort()].T design_matrix = self.design_matrix(phase, self.degree) return design_matrix[order.argsort()] def get_params(self, deep=False): """ Get parameters for this preprocessor. **Parameters** deep : boolean, optional Only here for scikit-learn compliance. Ignore it (default False). **Returns** params : dict Mapping of parameter name to value. """ return {'degree': self.degree} def set_params(self, **params): """ Set parameters for this preprocessor. **Returns** self : returns an instance of self """ if 'degree' in params: self.degree = params['degree'] return self @staticmethod def baart_tolerance(X): r""" Returns the autocorrelation cutoff of *X* for :func:`baart_criteria`, as given by .. math:: \frac{1}{\sqrt{2 (\operatorname{card}(\mathbf{X}) - 1)}} **Parameters** X : array-like, shape = [n_samples, 1] Column vector of phases **Returns** """ return (2 * (len(X) - 1))**(-1/2) @staticmethod def design_matrix(phases, degree): r""" Constructs an :math:`N \times 2n+1` matrix of the form: .. math:: \begin{bmatrix} 1 & \sin(1 \cdot 2\pi \cdot \phi_0) & \cos(1 \cdot 2\pi \cdot \phi_0) & \ldots & \sin(n \cdot 2\pi \cdot \phi_0) & \cos(n \cdot 2\pi \cdot \phi_0) \\ \vdots & \vdots & \vdots & \ddots & \vdots & \vdots \\ 1 & \sin(1 \cdot 2\pi \cdot \phi_N) & \cos(1 \cdot 2\pi \cdot \phi_N) & \ldots & \sin(n \cdot 2\pi \cdot \phi_N) & \cos(n \cdot 2\pi \cdot \phi_N) \end{bmatrix} where :math:`n =` *degree*, :math:`N =` *n_samples*, and :math:`\phi_i =` *phases[i]*. Parameters ---------- phases : array-like, shape = [n_samples] """ n_samples = phases.size # initialize coefficient matrix M = numpy.empty((n_samples, 2*degree+1)) # indices i = numpy.arange(1, degree+1) # initialize the Nxn matrix that is repeated within the # sine and cosine terms x = numpy.empty((n_samples, degree)) # the Nxn matrix now has N copies of the same row, and each row is # integer multiples of pi counting from 1 to the degree x[:,:] = i*2*numpy.pi # multiply each row of x by the phases x.T[:,:] *= phases # place 1's in the first column of the coefficient matrix M[:,0] = 1 # the odd indices of the coefficient matrix have sine terms M[:,1::2] = numpy.sin(x) # the even indices of the coefficient matrix have cosine terms M[:,2::2] = numpy.cos(x) return M @staticmethod def phase_shifted_coefficients(amplitude_coefficients, form='cos', shift=0.0): r""" Converts Fourier coefficients from the amplitude form to the phase-shifted form, as either a sine or cosine series. Amplitude form: .. math:: m(t) = A_0 + \sum_{k=1}^n (a_k \sin(k \omega t) + b_k \cos(k \omega t)) Sine form: .. math:: m(t) = A_0 + \sum_{k=1}^n A_k \sin(k \omega t + \Phi_k) Cosine form: .. math:: m(t) = A_0 + \sum_{k=1}^n A_k \cos(k \omega t + \Phi_k) **Parameters** amplitude_coefficients : array-like, shape = [:math:`2n+1`] Array of coefficients :math:`[ A_0, a_1, b_1, \ldots a_n, b_n ]`. form : str, optional Form of output coefficients, must be one of 'sin' or 'cos' (default 'cos'). shift : number, optional Shift to apply to light curve (default 0.0). **Returns** out : array-like, shape = [:math:`2n+1`] Array of coefficients :math:`[ A_0, A_1, \Phi_1, \ldots, A_n, \Phi_n ]`. """ if form != 'sin' and form != 'cos': raise NotImplementedError( 'Fourier series must have form sin or cos') # separate array of coefficients into respective parts A_0 = amplitude_coefficients[0] a_k = amplitude_coefficients[1::2] b_k = amplitude_coefficients[2::2] degree = a_k.size k = numpy.arange(1, degree+1) # A_k and Phi_k are the angle and hypotenuse in the right triangles # pictured below. A_k is obtained with the Pythagorean theorem, and # Phi_k is obtained with the 2-argument inverse tangent. # The positions of a_k and b_k depend on whether it is a sin or cos # series. # # Cos series Sin series # # b_k /| # --------- / | # \ Φ_k |_| / | # \ | A_k / | # \ | / | b_k # \ | a_k / | # A_k \ | / _| # \ | / Φ_k | | # \ | --------- # \| a_k # A_k = numpy.sqrt(a_k**2 + b_k**2) # phase coefficients are shifted to the left by optional ``shift`` if form == 'cos': Phi_k = numpy.arctan2(-a_k, b_k) + 2*pi*k*shift elif form == 'sin': Phi_k = numpy.arctan2(b_k, a_k) + 2*pi*k*shift # constrain Phi between 0 and 2*pi Phi_k %= 2*pi phase_shifted_coefficients_ = numpy.empty(amplitude_coefficients.shape, dtype=float) phase_shifted_coefficients_[0] = A_0 phase_shifted_coefficients_[1::2] = A_k phase_shifted_coefficients_[2::2] = Phi_k return phase_shifted_coefficients_ @staticmethod def fourier_ratios(phase_shifted_coeffs): r""" Returns the :math:`R_{j1}` and :math:`\phi_{j1}` values for the given phase-shifted coefficients. .. math:: R_{j1} = A_j / A_1 .. math:: \phi_{j1} = \phi_j - j \phi_1 **Parameters** phase_shifted_coeffs : array-like, shape = [:math:`2n+1`] Fourier sine or cosine series coefficients. :math:`[ A_0, A_1, \Phi_1, \ldots, A_n, \Phi_n ]`. **Returns** out : array-like, shape = [:math:`2n+1`] Fourier ratios :math:`[ R_{21}, \phi_{21}, \ldots, R_{n1}, \phi_{n1} ]`. """ n_coeff = phase_shifted_coeffs.size # n_coeff = 2*degree + 1 => degree = (n_coeff-1)/2 degree = (n_coeff - 1) / 2 amplitudes = phase_shifted_coeffs[1::2] phases = phase_shifted_coeffs[2::2] # there are degree-1 amplitude ratios, and degree-1 phase deltas, # so altogether there are 2*(degree-1) values ratios = numpy.empty(2*(degree-1), dtype=float) amplitude_ratios = ratios[::2] phase_deltas = ratios[1::2] # amplitudes may be zero, so suppress division by zero warnings with numpy.errstate(divide="ignore"): amplitude_ratios[:] = amplitudes[1:] amplitude_ratios /= amplitudes[0] # indices for phase deltas i = numpy.arange(2, degree+1) phase_deltas[:] = phases[1:] phase_deltas -= i*phases[0] # constrain phase_deltas between 0 and 2*pi phase_deltas %= 2*pi return ratios
astroswego/plotypus
src/plotypus/preprocessing.py
Fourier.design_matrix
python
def design_matrix(phases, degree): r""" Constructs an :math:`N \times 2n+1` matrix of the form: .. math:: \begin{bmatrix} 1 & \sin(1 \cdot 2\pi \cdot \phi_0) & \cos(1 \cdot 2\pi \cdot \phi_0) & \ldots & \sin(n \cdot 2\pi \cdot \phi_0) & \cos(n \cdot 2\pi \cdot \phi_0) \\ \vdots & \vdots & \vdots & \ddots & \vdots & \vdots \\ 1 & \sin(1 \cdot 2\pi \cdot \phi_N) & \cos(1 \cdot 2\pi \cdot \phi_N) & \ldots & \sin(n \cdot 2\pi \cdot \phi_N) & \cos(n \cdot 2\pi \cdot \phi_N) \end{bmatrix} where :math:`n =` *degree*, :math:`N =` *n_samples*, and :math:`\phi_i =` *phases[i]*. Parameters ---------- phases : array-like, shape = [n_samples] """ n_samples = phases.size # initialize coefficient matrix M = numpy.empty((n_samples, 2*degree+1)) # indices i = numpy.arange(1, degree+1) # initialize the Nxn matrix that is repeated within the # sine and cosine terms x = numpy.empty((n_samples, degree)) # the Nxn matrix now has N copies of the same row, and each row is # integer multiples of pi counting from 1 to the degree x[:,:] = i*2*numpy.pi # multiply each row of x by the phases x.T[:,:] *= phases # place 1's in the first column of the coefficient matrix M[:,0] = 1 # the odd indices of the coefficient matrix have sine terms M[:,1::2] = numpy.sin(x) # the even indices of the coefficient matrix have cosine terms M[:,2::2] = numpy.cos(x) return M
r""" Constructs an :math:`N \times 2n+1` matrix of the form: .. math:: \begin{bmatrix} 1 & \sin(1 \cdot 2\pi \cdot \phi_0) & \cos(1 \cdot 2\pi \cdot \phi_0) & \ldots & \sin(n \cdot 2\pi \cdot \phi_0) & \cos(n \cdot 2\pi \cdot \phi_0) \\ \vdots & \vdots & \vdots & \ddots & \vdots & \vdots \\ 1 & \sin(1 \cdot 2\pi \cdot \phi_N) & \cos(1 \cdot 2\pi \cdot \phi_N) & \ldots & \sin(n \cdot 2\pi \cdot \phi_N) & \cos(n \cdot 2\pi \cdot \phi_N) \end{bmatrix} where :math:`n =` *degree*, :math:`N =` *n_samples*, and :math:`\phi_i =` *phases[i]*. Parameters ---------- phases : array-like, shape = [n_samples]
train
https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/preprocessing.py#L204-L260
null
class Fourier(): r""" Transforms observed data from phase-space to Fourier-space. In order to represent a light curve as a Fourier series of the form .. math:: m(t) = A_0 + \sum_{k=1}^n (a_k \sin(k \omega t) + b_k \cos(k \omega t)), phased time observations are transformed into a design matrix :math:`\mathbf{X}` by :func:`Fourier.design_matrix`, such that linear regression can be used to solve for coefficients .. math:: \hat{b} = \begin{bmatrix} A_0 \\ a_1 \\ b_1 \\ \vdots \\ a_n \\ b_n \end{bmatrix} in the matrix equation .. math:: \mathbf{X} \hat{b} = \hat{y} where :math:`\vec{y}` is the vector of observed magnitudes .. math:: \hat{y} = \begin{bmatrix} m_0 \\ m_1 \\ \vdots \\ m_n \end{bmatrix} If *degree_range* is not None, *degree* is selected via :func:`baart_criteria`. Otherwise the provided *degree* is used. **Parameters** degree : positive int, optional Degree of Fourier series to use, assuming *degree_range* is None (default 3). degree_range : 2-tuple or None, optional Range of allowed *degree*\s to search via :func:`baart_criteria`, or None if single provided *degree* is to be used (default None). regressor : object with "fit" and "transform" methods, optional Regression object used for fitting light curve when selecting *degree* via :func:`baart_criteria`. Not used otherwise (default ``sklearn.linear_model.LinearRegression(fit_intercept=False)``). """ def __init__(self, degree=3, degree_range=None, regressor=LinearRegression(fit_intercept=False)): self.degree = degree self.degree_range = degree_range self.regressor = regressor def fit(self, X, y=None): """ Sets *self.degree* according to :func:`baart_criteria` if *degree_range* is not None, otherwise does nothing. **Parameters** X : array-like, shape = [n_samples, 1] Column vector of phases. y : array-like or None, shape = [n_samples], optional Row vector of magnitudes (default None). **Returns** self : returns an instance of self """ if self.degree_range is not None: self.degree = self.baart_criteria(X, y) return self def transform(self, X, y=None, **params): """ Transforms *X* from phase-space to Fourier-space, returning the design matrix produced by :func:`Fourier.design_matrix` for input to a regressor. **Parameters** X : array-like, shape = [n_samples, 1] Column vector of phases. y : None, optional Unused argument for conformity (default None). **Returns** design_matrix : array-like, shape = [n_samples, 2*degree+1] Fourier design matrix produced by :func:`Fourier.design_matrix`. """ data = numpy.dstack((numpy.array(X).T[0], range(len(X))))[0] phase, order = data[data[:,0].argsort()].T design_matrix = self.design_matrix(phase, self.degree) return design_matrix[order.argsort()] def get_params(self, deep=False): """ Get parameters for this preprocessor. **Parameters** deep : boolean, optional Only here for scikit-learn compliance. Ignore it (default False). **Returns** params : dict Mapping of parameter name to value. """ return {'degree': self.degree} def set_params(self, **params): """ Set parameters for this preprocessor. **Returns** self : returns an instance of self """ if 'degree' in params: self.degree = params['degree'] return self def baart_criteria(self, X, y): """ Returns the optimal Fourier series degree as determined by `Baart's Criteria <http://articles.adsabs.harvard.edu/cgi-bin/nph-iarticle_query?1986A%26A...170...59P&amp;data_type=PDF_HIGH&amp;whole_paper=YES&amp;type=PRINTER&amp;filetype=.pdf>`_ [JOP]_. **Citations** .. [JOP] J. O. Petersen, 1986, "Studies of Cepheid type variability. IV. The uncertainties of Fourier decomposition parameters.", A&A, Vol. 170, p. 59-69 """ try: min_degree, max_degree = self.degree_range except ValueError: raise ValueError("Degree range must be a length two sequence") cutoff = self.baart_tolerance(X) pipeline = Pipeline([('Fourier', Fourier()), ('Regressor', self.regressor)]) sorted_X = numpy.sort(X, axis=0) X_sorting = numpy.argsort(rowvec(X)) for degree in range(min_degree, max_degree): pipeline.set_params(Fourier__degree=degree) pipeline.fit(X, y) lc = pipeline.predict(sorted_X) residuals = y[X_sorting] - lc p_c = autocorrelation(residuals) if abs(p_c) <= cutoff: return degree # reached max_degree without reaching cutoff return max_degree @staticmethod def baart_tolerance(X): r""" Returns the autocorrelation cutoff of *X* for :func:`baart_criteria`, as given by .. math:: \frac{1}{\sqrt{2 (\operatorname{card}(\mathbf{X}) - 1)}} **Parameters** X : array-like, shape = [n_samples, 1] Column vector of phases **Returns** """ return (2 * (len(X) - 1))**(-1/2) @staticmethod def design_matrix(phases, degree): r""" Constructs an :math:`N \times 2n+1` matrix of the form: .. math:: \begin{bmatrix} 1 & \sin(1 \cdot 2\pi \cdot \phi_0) & \cos(1 \cdot 2\pi \cdot \phi_0) & \ldots & \sin(n \cdot 2\pi \cdot \phi_0) & \cos(n \cdot 2\pi \cdot \phi_0) \\ \vdots & \vdots & \vdots & \ddots & \vdots & \vdots \\ 1 & \sin(1 \cdot 2\pi \cdot \phi_N) & \cos(1 \cdot 2\pi \cdot \phi_N) & \ldots & \sin(n \cdot 2\pi \cdot \phi_N) & \cos(n \cdot 2\pi \cdot \phi_N) \end{bmatrix} where :math:`n =` *degree*, :math:`N =` *n_samples*, and :math:`\phi_i =` *phases[i]*. Parameters ---------- phases : array-like, shape = [n_samples] """ n_samples = phases.size # initialize coefficient matrix M = numpy.empty((n_samples, 2*degree+1)) # indices i = numpy.arange(1, degree+1) # initialize the Nxn matrix that is repeated within the # sine and cosine terms x = numpy.empty((n_samples, degree)) # the Nxn matrix now has N copies of the same row, and each row is # integer multiples of pi counting from 1 to the degree x[:,:] = i*2*numpy.pi # multiply each row of x by the phases x.T[:,:] *= phases # place 1's in the first column of the coefficient matrix M[:,0] = 1 # the odd indices of the coefficient matrix have sine terms M[:,1::2] = numpy.sin(x) # the even indices of the coefficient matrix have cosine terms M[:,2::2] = numpy.cos(x) return M @staticmethod def phase_shifted_coefficients(amplitude_coefficients, form='cos', shift=0.0): r""" Converts Fourier coefficients from the amplitude form to the phase-shifted form, as either a sine or cosine series. Amplitude form: .. math:: m(t) = A_0 + \sum_{k=1}^n (a_k \sin(k \omega t) + b_k \cos(k \omega t)) Sine form: .. math:: m(t) = A_0 + \sum_{k=1}^n A_k \sin(k \omega t + \Phi_k) Cosine form: .. math:: m(t) = A_0 + \sum_{k=1}^n A_k \cos(k \omega t + \Phi_k) **Parameters** amplitude_coefficients : array-like, shape = [:math:`2n+1`] Array of coefficients :math:`[ A_0, a_1, b_1, \ldots a_n, b_n ]`. form : str, optional Form of output coefficients, must be one of 'sin' or 'cos' (default 'cos'). shift : number, optional Shift to apply to light curve (default 0.0). **Returns** out : array-like, shape = [:math:`2n+1`] Array of coefficients :math:`[ A_0, A_1, \Phi_1, \ldots, A_n, \Phi_n ]`. """ if form != 'sin' and form != 'cos': raise NotImplementedError( 'Fourier series must have form sin or cos') # separate array of coefficients into respective parts A_0 = amplitude_coefficients[0] a_k = amplitude_coefficients[1::2] b_k = amplitude_coefficients[2::2] degree = a_k.size k = numpy.arange(1, degree+1) # A_k and Phi_k are the angle and hypotenuse in the right triangles # pictured below. A_k is obtained with the Pythagorean theorem, and # Phi_k is obtained with the 2-argument inverse tangent. # The positions of a_k and b_k depend on whether it is a sin or cos # series. # # Cos series Sin series # # b_k /| # --------- / | # \ Φ_k |_| / | # \ | A_k / | # \ | / | b_k # \ | a_k / | # A_k \ | / _| # \ | / Φ_k | | # \ | --------- # \| a_k # A_k = numpy.sqrt(a_k**2 + b_k**2) # phase coefficients are shifted to the left by optional ``shift`` if form == 'cos': Phi_k = numpy.arctan2(-a_k, b_k) + 2*pi*k*shift elif form == 'sin': Phi_k = numpy.arctan2(b_k, a_k) + 2*pi*k*shift # constrain Phi between 0 and 2*pi Phi_k %= 2*pi phase_shifted_coefficients_ = numpy.empty(amplitude_coefficients.shape, dtype=float) phase_shifted_coefficients_[0] = A_0 phase_shifted_coefficients_[1::2] = A_k phase_shifted_coefficients_[2::2] = Phi_k return phase_shifted_coefficients_ @staticmethod def fourier_ratios(phase_shifted_coeffs): r""" Returns the :math:`R_{j1}` and :math:`\phi_{j1}` values for the given phase-shifted coefficients. .. math:: R_{j1} = A_j / A_1 .. math:: \phi_{j1} = \phi_j - j \phi_1 **Parameters** phase_shifted_coeffs : array-like, shape = [:math:`2n+1`] Fourier sine or cosine series coefficients. :math:`[ A_0, A_1, \Phi_1, \ldots, A_n, \Phi_n ]`. **Returns** out : array-like, shape = [:math:`2n+1`] Fourier ratios :math:`[ R_{21}, \phi_{21}, \ldots, R_{n1}, \phi_{n1} ]`. """ n_coeff = phase_shifted_coeffs.size # n_coeff = 2*degree + 1 => degree = (n_coeff-1)/2 degree = (n_coeff - 1) / 2 amplitudes = phase_shifted_coeffs[1::2] phases = phase_shifted_coeffs[2::2] # there are degree-1 amplitude ratios, and degree-1 phase deltas, # so altogether there are 2*(degree-1) values ratios = numpy.empty(2*(degree-1), dtype=float) amplitude_ratios = ratios[::2] phase_deltas = ratios[1::2] # amplitudes may be zero, so suppress division by zero warnings with numpy.errstate(divide="ignore"): amplitude_ratios[:] = amplitudes[1:] amplitude_ratios /= amplitudes[0] # indices for phase deltas i = numpy.arange(2, degree+1) phase_deltas[:] = phases[1:] phase_deltas -= i*phases[0] # constrain phase_deltas between 0 and 2*pi phase_deltas %= 2*pi return ratios
astroswego/plotypus
src/plotypus/preprocessing.py
Fourier.phase_shifted_coefficients
python
def phase_shifted_coefficients(amplitude_coefficients, form='cos', shift=0.0): r""" Converts Fourier coefficients from the amplitude form to the phase-shifted form, as either a sine or cosine series. Amplitude form: .. math:: m(t) = A_0 + \sum_{k=1}^n (a_k \sin(k \omega t) + b_k \cos(k \omega t)) Sine form: .. math:: m(t) = A_0 + \sum_{k=1}^n A_k \sin(k \omega t + \Phi_k) Cosine form: .. math:: m(t) = A_0 + \sum_{k=1}^n A_k \cos(k \omega t + \Phi_k) **Parameters** amplitude_coefficients : array-like, shape = [:math:`2n+1`] Array of coefficients :math:`[ A_0, a_1, b_1, \ldots a_n, b_n ]`. form : str, optional Form of output coefficients, must be one of 'sin' or 'cos' (default 'cos'). shift : number, optional Shift to apply to light curve (default 0.0). **Returns** out : array-like, shape = [:math:`2n+1`] Array of coefficients :math:`[ A_0, A_1, \Phi_1, \ldots, A_n, \Phi_n ]`. """ if form != 'sin' and form != 'cos': raise NotImplementedError( 'Fourier series must have form sin or cos') # separate array of coefficients into respective parts A_0 = amplitude_coefficients[0] a_k = amplitude_coefficients[1::2] b_k = amplitude_coefficients[2::2] degree = a_k.size k = numpy.arange(1, degree+1) # A_k and Phi_k are the angle and hypotenuse in the right triangles # pictured below. A_k is obtained with the Pythagorean theorem, and # Phi_k is obtained with the 2-argument inverse tangent. # The positions of a_k and b_k depend on whether it is a sin or cos # series. # # Cos series Sin series # # b_k /| # --------- / | # \ Φ_k |_| / | # \ | A_k / | # \ | / | b_k # \ | a_k / | # A_k \ | / _| # \ | / Φ_k | | # \ | --------- # \| a_k # A_k = numpy.sqrt(a_k**2 + b_k**2) # phase coefficients are shifted to the left by optional ``shift`` if form == 'cos': Phi_k = numpy.arctan2(-a_k, b_k) + 2*pi*k*shift elif form == 'sin': Phi_k = numpy.arctan2(b_k, a_k) + 2*pi*k*shift # constrain Phi between 0 and 2*pi Phi_k %= 2*pi phase_shifted_coefficients_ = numpy.empty(amplitude_coefficients.shape, dtype=float) phase_shifted_coefficients_[0] = A_0 phase_shifted_coefficients_[1::2] = A_k phase_shifted_coefficients_[2::2] = Phi_k return phase_shifted_coefficients_
r""" Converts Fourier coefficients from the amplitude form to the phase-shifted form, as either a sine or cosine series. Amplitude form: .. math:: m(t) = A_0 + \sum_{k=1}^n (a_k \sin(k \omega t) + b_k \cos(k \omega t)) Sine form: .. math:: m(t) = A_0 + \sum_{k=1}^n A_k \sin(k \omega t + \Phi_k) Cosine form: .. math:: m(t) = A_0 + \sum_{k=1}^n A_k \cos(k \omega t + \Phi_k) **Parameters** amplitude_coefficients : array-like, shape = [:math:`2n+1`] Array of coefficients :math:`[ A_0, a_1, b_1, \ldots a_n, b_n ]`. form : str, optional Form of output coefficients, must be one of 'sin' or 'cos' (default 'cos'). shift : number, optional Shift to apply to light curve (default 0.0). **Returns** out : array-like, shape = [:math:`2n+1`] Array of coefficients :math:`[ A_0, A_1, \Phi_1, \ldots, A_n, \Phi_n ]`.
train
https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/preprocessing.py#L263-L347
null
class Fourier(): r""" Transforms observed data from phase-space to Fourier-space. In order to represent a light curve as a Fourier series of the form .. math:: m(t) = A_0 + \sum_{k=1}^n (a_k \sin(k \omega t) + b_k \cos(k \omega t)), phased time observations are transformed into a design matrix :math:`\mathbf{X}` by :func:`Fourier.design_matrix`, such that linear regression can be used to solve for coefficients .. math:: \hat{b} = \begin{bmatrix} A_0 \\ a_1 \\ b_1 \\ \vdots \\ a_n \\ b_n \end{bmatrix} in the matrix equation .. math:: \mathbf{X} \hat{b} = \hat{y} where :math:`\vec{y}` is the vector of observed magnitudes .. math:: \hat{y} = \begin{bmatrix} m_0 \\ m_1 \\ \vdots \\ m_n \end{bmatrix} If *degree_range* is not None, *degree* is selected via :func:`baart_criteria`. Otherwise the provided *degree* is used. **Parameters** degree : positive int, optional Degree of Fourier series to use, assuming *degree_range* is None (default 3). degree_range : 2-tuple or None, optional Range of allowed *degree*\s to search via :func:`baart_criteria`, or None if single provided *degree* is to be used (default None). regressor : object with "fit" and "transform" methods, optional Regression object used for fitting light curve when selecting *degree* via :func:`baart_criteria`. Not used otherwise (default ``sklearn.linear_model.LinearRegression(fit_intercept=False)``). """ def __init__(self, degree=3, degree_range=None, regressor=LinearRegression(fit_intercept=False)): self.degree = degree self.degree_range = degree_range self.regressor = regressor def fit(self, X, y=None): """ Sets *self.degree* according to :func:`baart_criteria` if *degree_range* is not None, otherwise does nothing. **Parameters** X : array-like, shape = [n_samples, 1] Column vector of phases. y : array-like or None, shape = [n_samples], optional Row vector of magnitudes (default None). **Returns** self : returns an instance of self """ if self.degree_range is not None: self.degree = self.baart_criteria(X, y) return self def transform(self, X, y=None, **params): """ Transforms *X* from phase-space to Fourier-space, returning the design matrix produced by :func:`Fourier.design_matrix` for input to a regressor. **Parameters** X : array-like, shape = [n_samples, 1] Column vector of phases. y : None, optional Unused argument for conformity (default None). **Returns** design_matrix : array-like, shape = [n_samples, 2*degree+1] Fourier design matrix produced by :func:`Fourier.design_matrix`. """ data = numpy.dstack((numpy.array(X).T[0], range(len(X))))[0] phase, order = data[data[:,0].argsort()].T design_matrix = self.design_matrix(phase, self.degree) return design_matrix[order.argsort()] def get_params(self, deep=False): """ Get parameters for this preprocessor. **Parameters** deep : boolean, optional Only here for scikit-learn compliance. Ignore it (default False). **Returns** params : dict Mapping of parameter name to value. """ return {'degree': self.degree} def set_params(self, **params): """ Set parameters for this preprocessor. **Returns** self : returns an instance of self """ if 'degree' in params: self.degree = params['degree'] return self def baart_criteria(self, X, y): """ Returns the optimal Fourier series degree as determined by `Baart's Criteria <http://articles.adsabs.harvard.edu/cgi-bin/nph-iarticle_query?1986A%26A...170...59P&amp;data_type=PDF_HIGH&amp;whole_paper=YES&amp;type=PRINTER&amp;filetype=.pdf>`_ [JOP]_. **Citations** .. [JOP] J. O. Petersen, 1986, "Studies of Cepheid type variability. IV. The uncertainties of Fourier decomposition parameters.", A&A, Vol. 170, p. 59-69 """ try: min_degree, max_degree = self.degree_range except ValueError: raise ValueError("Degree range must be a length two sequence") cutoff = self.baart_tolerance(X) pipeline = Pipeline([('Fourier', Fourier()), ('Regressor', self.regressor)]) sorted_X = numpy.sort(X, axis=0) X_sorting = numpy.argsort(rowvec(X)) for degree in range(min_degree, max_degree): pipeline.set_params(Fourier__degree=degree) pipeline.fit(X, y) lc = pipeline.predict(sorted_X) residuals = y[X_sorting] - lc p_c = autocorrelation(residuals) if abs(p_c) <= cutoff: return degree # reached max_degree without reaching cutoff return max_degree @staticmethod def baart_tolerance(X): r""" Returns the autocorrelation cutoff of *X* for :func:`baart_criteria`, as given by .. math:: \frac{1}{\sqrt{2 (\operatorname{card}(\mathbf{X}) - 1)}} **Parameters** X : array-like, shape = [n_samples, 1] Column vector of phases **Returns** """ return (2 * (len(X) - 1))**(-1/2) @staticmethod def design_matrix(phases, degree): r""" Constructs an :math:`N \times 2n+1` matrix of the form: .. math:: \begin{bmatrix} 1 & \sin(1 \cdot 2\pi \cdot \phi_0) & \cos(1 \cdot 2\pi \cdot \phi_0) & \ldots & \sin(n \cdot 2\pi \cdot \phi_0) & \cos(n \cdot 2\pi \cdot \phi_0) \\ \vdots & \vdots & \vdots & \ddots & \vdots & \vdots \\ 1 & \sin(1 \cdot 2\pi \cdot \phi_N) & \cos(1 \cdot 2\pi \cdot \phi_N) & \ldots & \sin(n \cdot 2\pi \cdot \phi_N) & \cos(n \cdot 2\pi \cdot \phi_N) \end{bmatrix} where :math:`n =` *degree*, :math:`N =` *n_samples*, and :math:`\phi_i =` *phases[i]*. Parameters ---------- phases : array-like, shape = [n_samples] """ n_samples = phases.size # initialize coefficient matrix M = numpy.empty((n_samples, 2*degree+1)) # indices i = numpy.arange(1, degree+1) # initialize the Nxn matrix that is repeated within the # sine and cosine terms x = numpy.empty((n_samples, degree)) # the Nxn matrix now has N copies of the same row, and each row is # integer multiples of pi counting from 1 to the degree x[:,:] = i*2*numpy.pi # multiply each row of x by the phases x.T[:,:] *= phases # place 1's in the first column of the coefficient matrix M[:,0] = 1 # the odd indices of the coefficient matrix have sine terms M[:,1::2] = numpy.sin(x) # the even indices of the coefficient matrix have cosine terms M[:,2::2] = numpy.cos(x) return M @staticmethod @staticmethod def fourier_ratios(phase_shifted_coeffs): r""" Returns the :math:`R_{j1}` and :math:`\phi_{j1}` values for the given phase-shifted coefficients. .. math:: R_{j1} = A_j / A_1 .. math:: \phi_{j1} = \phi_j - j \phi_1 **Parameters** phase_shifted_coeffs : array-like, shape = [:math:`2n+1`] Fourier sine or cosine series coefficients. :math:`[ A_0, A_1, \Phi_1, \ldots, A_n, \Phi_n ]`. **Returns** out : array-like, shape = [:math:`2n+1`] Fourier ratios :math:`[ R_{21}, \phi_{21}, \ldots, R_{n1}, \phi_{n1} ]`. """ n_coeff = phase_shifted_coeffs.size # n_coeff = 2*degree + 1 => degree = (n_coeff-1)/2 degree = (n_coeff - 1) / 2 amplitudes = phase_shifted_coeffs[1::2] phases = phase_shifted_coeffs[2::2] # there are degree-1 amplitude ratios, and degree-1 phase deltas, # so altogether there are 2*(degree-1) values ratios = numpy.empty(2*(degree-1), dtype=float) amplitude_ratios = ratios[::2] phase_deltas = ratios[1::2] # amplitudes may be zero, so suppress division by zero warnings with numpy.errstate(divide="ignore"): amplitude_ratios[:] = amplitudes[1:] amplitude_ratios /= amplitudes[0] # indices for phase deltas i = numpy.arange(2, degree+1) phase_deltas[:] = phases[1:] phase_deltas -= i*phases[0] # constrain phase_deltas between 0 and 2*pi phase_deltas %= 2*pi return ratios
astroswego/plotypus
src/plotypus/preprocessing.py
Fourier.fourier_ratios
python
def fourier_ratios(phase_shifted_coeffs): r""" Returns the :math:`R_{j1}` and :math:`\phi_{j1}` values for the given phase-shifted coefficients. .. math:: R_{j1} = A_j / A_1 .. math:: \phi_{j1} = \phi_j - j \phi_1 **Parameters** phase_shifted_coeffs : array-like, shape = [:math:`2n+1`] Fourier sine or cosine series coefficients. :math:`[ A_0, A_1, \Phi_1, \ldots, A_n, \Phi_n ]`. **Returns** out : array-like, shape = [:math:`2n+1`] Fourier ratios :math:`[ R_{21}, \phi_{21}, \ldots, R_{n1}, \phi_{n1} ]`. """ n_coeff = phase_shifted_coeffs.size # n_coeff = 2*degree + 1 => degree = (n_coeff-1)/2 degree = (n_coeff - 1) / 2 amplitudes = phase_shifted_coeffs[1::2] phases = phase_shifted_coeffs[2::2] # there are degree-1 amplitude ratios, and degree-1 phase deltas, # so altogether there are 2*(degree-1) values ratios = numpy.empty(2*(degree-1), dtype=float) amplitude_ratios = ratios[::2] phase_deltas = ratios[1::2] # amplitudes may be zero, so suppress division by zero warnings with numpy.errstate(divide="ignore"): amplitude_ratios[:] = amplitudes[1:] amplitude_ratios /= amplitudes[0] # indices for phase deltas i = numpy.arange(2, degree+1) phase_deltas[:] = phases[1:] phase_deltas -= i*phases[0] # constrain phase_deltas between 0 and 2*pi phase_deltas %= 2*pi return ratios
r""" Returns the :math:`R_{j1}` and :math:`\phi_{j1}` values for the given phase-shifted coefficients. .. math:: R_{j1} = A_j / A_1 .. math:: \phi_{j1} = \phi_j - j \phi_1 **Parameters** phase_shifted_coeffs : array-like, shape = [:math:`2n+1`] Fourier sine or cosine series coefficients. :math:`[ A_0, A_1, \Phi_1, \ldots, A_n, \Phi_n ]`. **Returns** out : array-like, shape = [:math:`2n+1`] Fourier ratios :math:`[ R_{21}, \phi_{21}, \ldots, R_{n1}, \phi_{n1} ]`.
train
https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/preprocessing.py#L350-L402
null
class Fourier(): r""" Transforms observed data from phase-space to Fourier-space. In order to represent a light curve as a Fourier series of the form .. math:: m(t) = A_0 + \sum_{k=1}^n (a_k \sin(k \omega t) + b_k \cos(k \omega t)), phased time observations are transformed into a design matrix :math:`\mathbf{X}` by :func:`Fourier.design_matrix`, such that linear regression can be used to solve for coefficients .. math:: \hat{b} = \begin{bmatrix} A_0 \\ a_1 \\ b_1 \\ \vdots \\ a_n \\ b_n \end{bmatrix} in the matrix equation .. math:: \mathbf{X} \hat{b} = \hat{y} where :math:`\vec{y}` is the vector of observed magnitudes .. math:: \hat{y} = \begin{bmatrix} m_0 \\ m_1 \\ \vdots \\ m_n \end{bmatrix} If *degree_range* is not None, *degree* is selected via :func:`baart_criteria`. Otherwise the provided *degree* is used. **Parameters** degree : positive int, optional Degree of Fourier series to use, assuming *degree_range* is None (default 3). degree_range : 2-tuple or None, optional Range of allowed *degree*\s to search via :func:`baart_criteria`, or None if single provided *degree* is to be used (default None). regressor : object with "fit" and "transform" methods, optional Regression object used for fitting light curve when selecting *degree* via :func:`baart_criteria`. Not used otherwise (default ``sklearn.linear_model.LinearRegression(fit_intercept=False)``). """ def __init__(self, degree=3, degree_range=None, regressor=LinearRegression(fit_intercept=False)): self.degree = degree self.degree_range = degree_range self.regressor = regressor def fit(self, X, y=None): """ Sets *self.degree* according to :func:`baart_criteria` if *degree_range* is not None, otherwise does nothing. **Parameters** X : array-like, shape = [n_samples, 1] Column vector of phases. y : array-like or None, shape = [n_samples], optional Row vector of magnitudes (default None). **Returns** self : returns an instance of self """ if self.degree_range is not None: self.degree = self.baart_criteria(X, y) return self def transform(self, X, y=None, **params): """ Transforms *X* from phase-space to Fourier-space, returning the design matrix produced by :func:`Fourier.design_matrix` for input to a regressor. **Parameters** X : array-like, shape = [n_samples, 1] Column vector of phases. y : None, optional Unused argument for conformity (default None). **Returns** design_matrix : array-like, shape = [n_samples, 2*degree+1] Fourier design matrix produced by :func:`Fourier.design_matrix`. """ data = numpy.dstack((numpy.array(X).T[0], range(len(X))))[0] phase, order = data[data[:,0].argsort()].T design_matrix = self.design_matrix(phase, self.degree) return design_matrix[order.argsort()] def get_params(self, deep=False): """ Get parameters for this preprocessor. **Parameters** deep : boolean, optional Only here for scikit-learn compliance. Ignore it (default False). **Returns** params : dict Mapping of parameter name to value. """ return {'degree': self.degree} def set_params(self, **params): """ Set parameters for this preprocessor. **Returns** self : returns an instance of self """ if 'degree' in params: self.degree = params['degree'] return self def baart_criteria(self, X, y): """ Returns the optimal Fourier series degree as determined by `Baart's Criteria <http://articles.adsabs.harvard.edu/cgi-bin/nph-iarticle_query?1986A%26A...170...59P&amp;data_type=PDF_HIGH&amp;whole_paper=YES&amp;type=PRINTER&amp;filetype=.pdf>`_ [JOP]_. **Citations** .. [JOP] J. O. Petersen, 1986, "Studies of Cepheid type variability. IV. The uncertainties of Fourier decomposition parameters.", A&A, Vol. 170, p. 59-69 """ try: min_degree, max_degree = self.degree_range except ValueError: raise ValueError("Degree range must be a length two sequence") cutoff = self.baart_tolerance(X) pipeline = Pipeline([('Fourier', Fourier()), ('Regressor', self.regressor)]) sorted_X = numpy.sort(X, axis=0) X_sorting = numpy.argsort(rowvec(X)) for degree in range(min_degree, max_degree): pipeline.set_params(Fourier__degree=degree) pipeline.fit(X, y) lc = pipeline.predict(sorted_X) residuals = y[X_sorting] - lc p_c = autocorrelation(residuals) if abs(p_c) <= cutoff: return degree # reached max_degree without reaching cutoff return max_degree @staticmethod def baart_tolerance(X): r""" Returns the autocorrelation cutoff of *X* for :func:`baart_criteria`, as given by .. math:: \frac{1}{\sqrt{2 (\operatorname{card}(\mathbf{X}) - 1)}} **Parameters** X : array-like, shape = [n_samples, 1] Column vector of phases **Returns** """ return (2 * (len(X) - 1))**(-1/2) @staticmethod def design_matrix(phases, degree): r""" Constructs an :math:`N \times 2n+1` matrix of the form: .. math:: \begin{bmatrix} 1 & \sin(1 \cdot 2\pi \cdot \phi_0) & \cos(1 \cdot 2\pi \cdot \phi_0) & \ldots & \sin(n \cdot 2\pi \cdot \phi_0) & \cos(n \cdot 2\pi \cdot \phi_0) \\ \vdots & \vdots & \vdots & \ddots & \vdots & \vdots \\ 1 & \sin(1 \cdot 2\pi \cdot \phi_N) & \cos(1 \cdot 2\pi \cdot \phi_N) & \ldots & \sin(n \cdot 2\pi \cdot \phi_N) & \cos(n \cdot 2\pi \cdot \phi_N) \end{bmatrix} where :math:`n =` *degree*, :math:`N =` *n_samples*, and :math:`\phi_i =` *phases[i]*. Parameters ---------- phases : array-like, shape = [n_samples] """ n_samples = phases.size # initialize coefficient matrix M = numpy.empty((n_samples, 2*degree+1)) # indices i = numpy.arange(1, degree+1) # initialize the Nxn matrix that is repeated within the # sine and cosine terms x = numpy.empty((n_samples, degree)) # the Nxn matrix now has N copies of the same row, and each row is # integer multiples of pi counting from 1 to the degree x[:,:] = i*2*numpy.pi # multiply each row of x by the phases x.T[:,:] *= phases # place 1's in the first column of the coefficient matrix M[:,0] = 1 # the odd indices of the coefficient matrix have sine terms M[:,1::2] = numpy.sin(x) # the even indices of the coefficient matrix have cosine terms M[:,2::2] = numpy.cos(x) return M @staticmethod def phase_shifted_coefficients(amplitude_coefficients, form='cos', shift=0.0): r""" Converts Fourier coefficients from the amplitude form to the phase-shifted form, as either a sine or cosine series. Amplitude form: .. math:: m(t) = A_0 + \sum_{k=1}^n (a_k \sin(k \omega t) + b_k \cos(k \omega t)) Sine form: .. math:: m(t) = A_0 + \sum_{k=1}^n A_k \sin(k \omega t + \Phi_k) Cosine form: .. math:: m(t) = A_0 + \sum_{k=1}^n A_k \cos(k \omega t + \Phi_k) **Parameters** amplitude_coefficients : array-like, shape = [:math:`2n+1`] Array of coefficients :math:`[ A_0, a_1, b_1, \ldots a_n, b_n ]`. form : str, optional Form of output coefficients, must be one of 'sin' or 'cos' (default 'cos'). shift : number, optional Shift to apply to light curve (default 0.0). **Returns** out : array-like, shape = [:math:`2n+1`] Array of coefficients :math:`[ A_0, A_1, \Phi_1, \ldots, A_n, \Phi_n ]`. """ if form != 'sin' and form != 'cos': raise NotImplementedError( 'Fourier series must have form sin or cos') # separate array of coefficients into respective parts A_0 = amplitude_coefficients[0] a_k = amplitude_coefficients[1::2] b_k = amplitude_coefficients[2::2] degree = a_k.size k = numpy.arange(1, degree+1) # A_k and Phi_k are the angle and hypotenuse in the right triangles # pictured below. A_k is obtained with the Pythagorean theorem, and # Phi_k is obtained with the 2-argument inverse tangent. # The positions of a_k and b_k depend on whether it is a sin or cos # series. # # Cos series Sin series # # b_k /| # --------- / | # \ Φ_k |_| / | # \ | A_k / | # \ | / | b_k # \ | a_k / | # A_k \ | / _| # \ | / Φ_k | | # \ | --------- # \| a_k # A_k = numpy.sqrt(a_k**2 + b_k**2) # phase coefficients are shifted to the left by optional ``shift`` if form == 'cos': Phi_k = numpy.arctan2(-a_k, b_k) + 2*pi*k*shift elif form == 'sin': Phi_k = numpy.arctan2(b_k, a_k) + 2*pi*k*shift # constrain Phi between 0 and 2*pi Phi_k %= 2*pi phase_shifted_coefficients_ = numpy.empty(amplitude_coefficients.shape, dtype=float) phase_shifted_coefficients_[0] = A_0 phase_shifted_coefficients_[1::2] = A_k phase_shifted_coefficients_[2::2] = Phi_k return phase_shifted_coefficients_ @staticmethod
astroswego/plotypus
src/plotypus/utils.py
verbose_print
python
def verbose_print(message, *, operation, verbosity): if (verbosity is not None) and ((operation in verbosity) or ("all" in verbosity)): print(message, file=stderr)
Prints *message* to stderr only if the given *operation* is in the list *verbosity*. If "all" is in *verbosity*, all operations are printed. **Parameters** message : str The message to print. operation : str The type of operation being performed. verbosity : [str] or None The list of operations to print *message* for. If "all" is contained in the list, then all operations are printed. If None, no operation is printed. **Returns** None
train
https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/utils.py#L19-L41
null
from os import makedirs from os.path import join, isdir from sys import stderr from multiprocessing import Pool from numpy import absolute, concatenate, median, resize __all__ = [ 'verbose_print', 'pmap', 'make_sure_path_exists', 'get_signal', 'get_noise', 'colvec', 'mad', 'autocorrelation' ] def pmap(func, args, processes=None, callback=lambda *_, **__: None, **kwargs): """pmap(func, args, processes=None, callback=do_nothing, **kwargs) Parallel equivalent of ``map(func, args)``, with the additional ability of providing keyword arguments to func, and a callback function which is applied to each element in the returned list. Unlike map, the output is a non-lazy list. If *processes* is 1, no thread pool is used. **Parameters** func : function The function to map. args : iterable The arguments to map *func* over. processes : int or None, optional The number of processes in the thread pool. If only 1, no thread pool is used to avoid useless overhead. If None, the number is chosen based on your system by :class:`multiprocessing.Pool` (default None). callback : function, optional Function to call on the return value of ``func(arg)`` for each *arg* in *args* (default do_nothing). kwargs : dict Extra keyword arguments are unpacked in each call of *func*. **Returns** results : list A list equivalent to ``[func(x, **kwargs) for x in args]``. """ if processes is 1: results = [] for arg in args: result = func(arg, **kwargs) results.append(result) callback(result) return results else: with Pool() if processes is None else Pool(processes) as p: results = [p.apply_async(func, (arg,), kwargs, callback) for arg in args] return [result.get() for result in results] def make_sure_path_exists(path): """ Creates the supplied *path* if it does not exist. Raises *OSError* if the *path* cannot be created. **Parameters** path : str Path to create. **Returns** None """ try: makedirs(path) except OSError: if not isdir(path): raise def get_signal(data): """ Returns all of the values in *data* that are not outliers. **Parameters** data : masked array **Returns** signal : array Non-masked values in *data*. """ return data[~data.mask].data.reshape(-1, data.shape[1]) def get_noise(data): """ Returns all identified outliers in *data*. **Parameters** data : masked array **Returns** noise : array Masked values in *data*. """ return data[data.mask].data.reshape(-1, data.shape[1]) def colvec(X): """ Converts a row-vector *X* into a column-vector. **Parameters** X : array-like, shape = [n_samples] **Returns** out : array-like, shape = [n_samples, 1] """ return resize(X, (X.shape[0], 1)) def rowvec(X): """ Converts a column-vector *X* into a row-vector. **Parameters** X : array-like, shape = [n_samples, 1] **Returns* out : array-like, shape = [n_samples] """ return resize(X, (1, X.shape[0]))[0] def mad(data, axis=None): """ Computes the median absolute deviation of *data* along a given *axis*. See `link <https://en.wikipedia.org/wiki/Median_absolute_deviation>`_ for details. **Parameters** data : array-like **Returns** mad : number or array-like """ return median(absolute(data - median(data, axis)), axis) def autocorrelation(X, lag=1): """ Computes the autocorrelation of *X* with the given *lag*. Autocorrelation is simply autocovariance(X) / covariance(X-mean, X-mean), where autocovariance is simply covariance((X-mean)[:-lag], (X-mean)[lag:]). See `link <https://en.wikipedia.org/wiki/Autocorrelation>`_ for details. **Parameters** X : array-like, shape = [n_samples] lag : int, optional Index difference between points being compared (default 1). """ differences = X - X.mean() products = differences * concatenate((differences[lag:], differences[:lag])) return products.sum() / (differences**2).sum() _latex_replacements = [ ('\\', '\\\\'), ('{', '\\{'), ('{', '\\}'), ('$', '\\$'), ('&', '\\&'), ('#', '\\#'), ('^', '\\textasciicircum{}'), ('_', '\\textunderscore{}'), ('~', '\\~'), ('%', '\\%'), ('<', '\\textless{}'), ('>', '\\textgreater{}'), ('|', '\\textbar{}') ] def sanitize_latex(string): """ Sanitize a string for input to LaTeX. Replacements taken from `Stack Overflow <http://stackoverflow.com/questions/2627135/how-do-i-sanitize-latex-input>`_ **Parameters** string: str **Returns** sanitized_string: str """ sanitized_string = string for old, new in _latex_replacements: sanitized_string = sanitized_string.replace(old, new) return sanitized_string
astroswego/plotypus
src/plotypus/utils.py
pmap
python
def pmap(func, args, processes=None, callback=lambda *_, **__: None, **kwargs): if processes is 1: results = [] for arg in args: result = func(arg, **kwargs) results.append(result) callback(result) return results else: with Pool() if processes is None else Pool(processes) as p: results = [p.apply_async(func, (arg,), kwargs, callback) for arg in args] return [result.get() for result in results]
pmap(func, args, processes=None, callback=do_nothing, **kwargs) Parallel equivalent of ``map(func, args)``, with the additional ability of providing keyword arguments to func, and a callback function which is applied to each element in the returned list. Unlike map, the output is a non-lazy list. If *processes* is 1, no thread pool is used. **Parameters** func : function The function to map. args : iterable The arguments to map *func* over. processes : int or None, optional The number of processes in the thread pool. If only 1, no thread pool is used to avoid useless overhead. If None, the number is chosen based on your system by :class:`multiprocessing.Pool` (default None). callback : function, optional Function to call on the return value of ``func(arg)`` for each *arg* in *args* (default do_nothing). kwargs : dict Extra keyword arguments are unpacked in each call of *func*. **Returns** results : list A list equivalent to ``[func(x, **kwargs) for x in args]``.
train
https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/utils.py#L44-L86
[ "def pmap(func, args, processes=None, callback=lambda *_, **__: None, **kwargs):\n" ]
from os import makedirs from os.path import join, isdir from sys import stderr from multiprocessing import Pool from numpy import absolute, concatenate, median, resize __all__ = [ 'verbose_print', 'pmap', 'make_sure_path_exists', 'get_signal', 'get_noise', 'colvec', 'mad', 'autocorrelation' ] def verbose_print(message, *, operation, verbosity): """ Prints *message* to stderr only if the given *operation* is in the list *verbosity*. If "all" is in *verbosity*, all operations are printed. **Parameters** message : str The message to print. operation : str The type of operation being performed. verbosity : [str] or None The list of operations to print *message* for. If "all" is contained in the list, then all operations are printed. If None, no operation is printed. **Returns** None """ if (verbosity is not None) and ((operation in verbosity) or ("all" in verbosity)): print(message, file=stderr) def make_sure_path_exists(path): """ Creates the supplied *path* if it does not exist. Raises *OSError* if the *path* cannot be created. **Parameters** path : str Path to create. **Returns** None """ try: makedirs(path) except OSError: if not isdir(path): raise def get_signal(data): """ Returns all of the values in *data* that are not outliers. **Parameters** data : masked array **Returns** signal : array Non-masked values in *data*. """ return data[~data.mask].data.reshape(-1, data.shape[1]) def get_noise(data): """ Returns all identified outliers in *data*. **Parameters** data : masked array **Returns** noise : array Masked values in *data*. """ return data[data.mask].data.reshape(-1, data.shape[1]) def colvec(X): """ Converts a row-vector *X* into a column-vector. **Parameters** X : array-like, shape = [n_samples] **Returns** out : array-like, shape = [n_samples, 1] """ return resize(X, (X.shape[0], 1)) def rowvec(X): """ Converts a column-vector *X* into a row-vector. **Parameters** X : array-like, shape = [n_samples, 1] **Returns* out : array-like, shape = [n_samples] """ return resize(X, (1, X.shape[0]))[0] def mad(data, axis=None): """ Computes the median absolute deviation of *data* along a given *axis*. See `link <https://en.wikipedia.org/wiki/Median_absolute_deviation>`_ for details. **Parameters** data : array-like **Returns** mad : number or array-like """ return median(absolute(data - median(data, axis)), axis) def autocorrelation(X, lag=1): """ Computes the autocorrelation of *X* with the given *lag*. Autocorrelation is simply autocovariance(X) / covariance(X-mean, X-mean), where autocovariance is simply covariance((X-mean)[:-lag], (X-mean)[lag:]). See `link <https://en.wikipedia.org/wiki/Autocorrelation>`_ for details. **Parameters** X : array-like, shape = [n_samples] lag : int, optional Index difference between points being compared (default 1). """ differences = X - X.mean() products = differences * concatenate((differences[lag:], differences[:lag])) return products.sum() / (differences**2).sum() _latex_replacements = [ ('\\', '\\\\'), ('{', '\\{'), ('{', '\\}'), ('$', '\\$'), ('&', '\\&'), ('#', '\\#'), ('^', '\\textasciicircum{}'), ('_', '\\textunderscore{}'), ('~', '\\~'), ('%', '\\%'), ('<', '\\textless{}'), ('>', '\\textgreater{}'), ('|', '\\textbar{}') ] def sanitize_latex(string): """ Sanitize a string for input to LaTeX. Replacements taken from `Stack Overflow <http://stackoverflow.com/questions/2627135/how-do-i-sanitize-latex-input>`_ **Parameters** string: str **Returns** sanitized_string: str """ sanitized_string = string for old, new in _latex_replacements: sanitized_string = sanitized_string.replace(old, new) return sanitized_string
astroswego/plotypus
src/plotypus/utils.py
mad
python
def mad(data, axis=None): return median(absolute(data - median(data, axis)), axis)
Computes the median absolute deviation of *data* along a given *axis*. See `link <https://en.wikipedia.org/wiki/Median_absolute_deviation>`_ for details. **Parameters** data : array-like **Returns** mad : number or array-like
train
https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/utils.py#L172-L186
null
from os import makedirs from os.path import join, isdir from sys import stderr from multiprocessing import Pool from numpy import absolute, concatenate, median, resize __all__ = [ 'verbose_print', 'pmap', 'make_sure_path_exists', 'get_signal', 'get_noise', 'colvec', 'mad', 'autocorrelation' ] def verbose_print(message, *, operation, verbosity): """ Prints *message* to stderr only if the given *operation* is in the list *verbosity*. If "all" is in *verbosity*, all operations are printed. **Parameters** message : str The message to print. operation : str The type of operation being performed. verbosity : [str] or None The list of operations to print *message* for. If "all" is contained in the list, then all operations are printed. If None, no operation is printed. **Returns** None """ if (verbosity is not None) and ((operation in verbosity) or ("all" in verbosity)): print(message, file=stderr) def pmap(func, args, processes=None, callback=lambda *_, **__: None, **kwargs): """pmap(func, args, processes=None, callback=do_nothing, **kwargs) Parallel equivalent of ``map(func, args)``, with the additional ability of providing keyword arguments to func, and a callback function which is applied to each element in the returned list. Unlike map, the output is a non-lazy list. If *processes* is 1, no thread pool is used. **Parameters** func : function The function to map. args : iterable The arguments to map *func* over. processes : int or None, optional The number of processes in the thread pool. If only 1, no thread pool is used to avoid useless overhead. If None, the number is chosen based on your system by :class:`multiprocessing.Pool` (default None). callback : function, optional Function to call on the return value of ``func(arg)`` for each *arg* in *args* (default do_nothing). kwargs : dict Extra keyword arguments are unpacked in each call of *func*. **Returns** results : list A list equivalent to ``[func(x, **kwargs) for x in args]``. """ if processes is 1: results = [] for arg in args: result = func(arg, **kwargs) results.append(result) callback(result) return results else: with Pool() if processes is None else Pool(processes) as p: results = [p.apply_async(func, (arg,), kwargs, callback) for arg in args] return [result.get() for result in results] def make_sure_path_exists(path): """ Creates the supplied *path* if it does not exist. Raises *OSError* if the *path* cannot be created. **Parameters** path : str Path to create. **Returns** None """ try: makedirs(path) except OSError: if not isdir(path): raise def get_signal(data): """ Returns all of the values in *data* that are not outliers. **Parameters** data : masked array **Returns** signal : array Non-masked values in *data*. """ return data[~data.mask].data.reshape(-1, data.shape[1]) def get_noise(data): """ Returns all identified outliers in *data*. **Parameters** data : masked array **Returns** noise : array Masked values in *data*. """ return data[data.mask].data.reshape(-1, data.shape[1]) def colvec(X): """ Converts a row-vector *X* into a column-vector. **Parameters** X : array-like, shape = [n_samples] **Returns** out : array-like, shape = [n_samples, 1] """ return resize(X, (X.shape[0], 1)) def rowvec(X): """ Converts a column-vector *X* into a row-vector. **Parameters** X : array-like, shape = [n_samples, 1] **Returns* out : array-like, shape = [n_samples] """ return resize(X, (1, X.shape[0]))[0] def autocorrelation(X, lag=1): """ Computes the autocorrelation of *X* with the given *lag*. Autocorrelation is simply autocovariance(X) / covariance(X-mean, X-mean), where autocovariance is simply covariance((X-mean)[:-lag], (X-mean)[lag:]). See `link <https://en.wikipedia.org/wiki/Autocorrelation>`_ for details. **Parameters** X : array-like, shape = [n_samples] lag : int, optional Index difference between points being compared (default 1). """ differences = X - X.mean() products = differences * concatenate((differences[lag:], differences[:lag])) return products.sum() / (differences**2).sum() _latex_replacements = [ ('\\', '\\\\'), ('{', '\\{'), ('{', '\\}'), ('$', '\\$'), ('&', '\\&'), ('#', '\\#'), ('^', '\\textasciicircum{}'), ('_', '\\textunderscore{}'), ('~', '\\~'), ('%', '\\%'), ('<', '\\textless{}'), ('>', '\\textgreater{}'), ('|', '\\textbar{}') ] def sanitize_latex(string): """ Sanitize a string for input to LaTeX. Replacements taken from `Stack Overflow <http://stackoverflow.com/questions/2627135/how-do-i-sanitize-latex-input>`_ **Parameters** string: str **Returns** sanitized_string: str """ sanitized_string = string for old, new in _latex_replacements: sanitized_string = sanitized_string.replace(old, new) return sanitized_string
astroswego/plotypus
src/plotypus/utils.py
autocorrelation
python
def autocorrelation(X, lag=1): differences = X - X.mean() products = differences * concatenate((differences[lag:], differences[:lag])) return products.sum() / (differences**2).sum()
Computes the autocorrelation of *X* with the given *lag*. Autocorrelation is simply autocovariance(X) / covariance(X-mean, X-mean), where autocovariance is simply covariance((X-mean)[:-lag], (X-mean)[lag:]). See `link <https://en.wikipedia.org/wiki/Autocorrelation>`_ for details. **Parameters** X : array-like, shape = [n_samples] lag : int, optional Index difference between points being compared (default 1).
train
https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/utils.py#L189-L210
null
from os import makedirs from os.path import join, isdir from sys import stderr from multiprocessing import Pool from numpy import absolute, concatenate, median, resize __all__ = [ 'verbose_print', 'pmap', 'make_sure_path_exists', 'get_signal', 'get_noise', 'colvec', 'mad', 'autocorrelation' ] def verbose_print(message, *, operation, verbosity): """ Prints *message* to stderr only if the given *operation* is in the list *verbosity*. If "all" is in *verbosity*, all operations are printed. **Parameters** message : str The message to print. operation : str The type of operation being performed. verbosity : [str] or None The list of operations to print *message* for. If "all" is contained in the list, then all operations are printed. If None, no operation is printed. **Returns** None """ if (verbosity is not None) and ((operation in verbosity) or ("all" in verbosity)): print(message, file=stderr) def pmap(func, args, processes=None, callback=lambda *_, **__: None, **kwargs): """pmap(func, args, processes=None, callback=do_nothing, **kwargs) Parallel equivalent of ``map(func, args)``, with the additional ability of providing keyword arguments to func, and a callback function which is applied to each element in the returned list. Unlike map, the output is a non-lazy list. If *processes* is 1, no thread pool is used. **Parameters** func : function The function to map. args : iterable The arguments to map *func* over. processes : int or None, optional The number of processes in the thread pool. If only 1, no thread pool is used to avoid useless overhead. If None, the number is chosen based on your system by :class:`multiprocessing.Pool` (default None). callback : function, optional Function to call on the return value of ``func(arg)`` for each *arg* in *args* (default do_nothing). kwargs : dict Extra keyword arguments are unpacked in each call of *func*. **Returns** results : list A list equivalent to ``[func(x, **kwargs) for x in args]``. """ if processes is 1: results = [] for arg in args: result = func(arg, **kwargs) results.append(result) callback(result) return results else: with Pool() if processes is None else Pool(processes) as p: results = [p.apply_async(func, (arg,), kwargs, callback) for arg in args] return [result.get() for result in results] def make_sure_path_exists(path): """ Creates the supplied *path* if it does not exist. Raises *OSError* if the *path* cannot be created. **Parameters** path : str Path to create. **Returns** None """ try: makedirs(path) except OSError: if not isdir(path): raise def get_signal(data): """ Returns all of the values in *data* that are not outliers. **Parameters** data : masked array **Returns** signal : array Non-masked values in *data*. """ return data[~data.mask].data.reshape(-1, data.shape[1]) def get_noise(data): """ Returns all identified outliers in *data*. **Parameters** data : masked array **Returns** noise : array Masked values in *data*. """ return data[data.mask].data.reshape(-1, data.shape[1]) def colvec(X): """ Converts a row-vector *X* into a column-vector. **Parameters** X : array-like, shape = [n_samples] **Returns** out : array-like, shape = [n_samples, 1] """ return resize(X, (X.shape[0], 1)) def rowvec(X): """ Converts a column-vector *X* into a row-vector. **Parameters** X : array-like, shape = [n_samples, 1] **Returns* out : array-like, shape = [n_samples] """ return resize(X, (1, X.shape[0]))[0] def mad(data, axis=None): """ Computes the median absolute deviation of *data* along a given *axis*. See `link <https://en.wikipedia.org/wiki/Median_absolute_deviation>`_ for details. **Parameters** data : array-like **Returns** mad : number or array-like """ return median(absolute(data - median(data, axis)), axis) _latex_replacements = [ ('\\', '\\\\'), ('{', '\\{'), ('{', '\\}'), ('$', '\\$'), ('&', '\\&'), ('#', '\\#'), ('^', '\\textasciicircum{}'), ('_', '\\textunderscore{}'), ('~', '\\~'), ('%', '\\%'), ('<', '\\textless{}'), ('>', '\\textgreater{}'), ('|', '\\textbar{}') ] def sanitize_latex(string): """ Sanitize a string for input to LaTeX. Replacements taken from `Stack Overflow <http://stackoverflow.com/questions/2627135/how-do-i-sanitize-latex-input>`_ **Parameters** string: str **Returns** sanitized_string: str """ sanitized_string = string for old, new in _latex_replacements: sanitized_string = sanitized_string.replace(old, new) return sanitized_string
astroswego/plotypus
src/plotypus/utils.py
sanitize_latex
python
def sanitize_latex(string): sanitized_string = string for old, new in _latex_replacements: sanitized_string = sanitized_string.replace(old, new) return sanitized_string
Sanitize a string for input to LaTeX. Replacements taken from `Stack Overflow <http://stackoverflow.com/questions/2627135/how-do-i-sanitize-latex-input>`_ **Parameters** string: str **Returns** sanitized_string: str
train
https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/utils.py#L228-L246
null
from os import makedirs from os.path import join, isdir from sys import stderr from multiprocessing import Pool from numpy import absolute, concatenate, median, resize __all__ = [ 'verbose_print', 'pmap', 'make_sure_path_exists', 'get_signal', 'get_noise', 'colvec', 'mad', 'autocorrelation' ] def verbose_print(message, *, operation, verbosity): """ Prints *message* to stderr only if the given *operation* is in the list *verbosity*. If "all" is in *verbosity*, all operations are printed. **Parameters** message : str The message to print. operation : str The type of operation being performed. verbosity : [str] or None The list of operations to print *message* for. If "all" is contained in the list, then all operations are printed. If None, no operation is printed. **Returns** None """ if (verbosity is not None) and ((operation in verbosity) or ("all" in verbosity)): print(message, file=stderr) def pmap(func, args, processes=None, callback=lambda *_, **__: None, **kwargs): """pmap(func, args, processes=None, callback=do_nothing, **kwargs) Parallel equivalent of ``map(func, args)``, with the additional ability of providing keyword arguments to func, and a callback function which is applied to each element in the returned list. Unlike map, the output is a non-lazy list. If *processes* is 1, no thread pool is used. **Parameters** func : function The function to map. args : iterable The arguments to map *func* over. processes : int or None, optional The number of processes in the thread pool. If only 1, no thread pool is used to avoid useless overhead. If None, the number is chosen based on your system by :class:`multiprocessing.Pool` (default None). callback : function, optional Function to call on the return value of ``func(arg)`` for each *arg* in *args* (default do_nothing). kwargs : dict Extra keyword arguments are unpacked in each call of *func*. **Returns** results : list A list equivalent to ``[func(x, **kwargs) for x in args]``. """ if processes is 1: results = [] for arg in args: result = func(arg, **kwargs) results.append(result) callback(result) return results else: with Pool() if processes is None else Pool(processes) as p: results = [p.apply_async(func, (arg,), kwargs, callback) for arg in args] return [result.get() for result in results] def make_sure_path_exists(path): """ Creates the supplied *path* if it does not exist. Raises *OSError* if the *path* cannot be created. **Parameters** path : str Path to create. **Returns** None """ try: makedirs(path) except OSError: if not isdir(path): raise def get_signal(data): """ Returns all of the values in *data* that are not outliers. **Parameters** data : masked array **Returns** signal : array Non-masked values in *data*. """ return data[~data.mask].data.reshape(-1, data.shape[1]) def get_noise(data): """ Returns all identified outliers in *data*. **Parameters** data : masked array **Returns** noise : array Masked values in *data*. """ return data[data.mask].data.reshape(-1, data.shape[1]) def colvec(X): """ Converts a row-vector *X* into a column-vector. **Parameters** X : array-like, shape = [n_samples] **Returns** out : array-like, shape = [n_samples, 1] """ return resize(X, (X.shape[0], 1)) def rowvec(X): """ Converts a column-vector *X* into a row-vector. **Parameters** X : array-like, shape = [n_samples, 1] **Returns* out : array-like, shape = [n_samples] """ return resize(X, (1, X.shape[0]))[0] def mad(data, axis=None): """ Computes the median absolute deviation of *data* along a given *axis*. See `link <https://en.wikipedia.org/wiki/Median_absolute_deviation>`_ for details. **Parameters** data : array-like **Returns** mad : number or array-like """ return median(absolute(data - median(data, axis)), axis) def autocorrelation(X, lag=1): """ Computes the autocorrelation of *X* with the given *lag*. Autocorrelation is simply autocovariance(X) / covariance(X-mean, X-mean), where autocovariance is simply covariance((X-mean)[:-lag], (X-mean)[lag:]). See `link <https://en.wikipedia.org/wiki/Autocorrelation>`_ for details. **Parameters** X : array-like, shape = [n_samples] lag : int, optional Index difference between points being compared (default 1). """ differences = X - X.mean() products = differences * concatenate((differences[lag:], differences[:lag])) return products.sum() / (differences**2).sum() _latex_replacements = [ ('\\', '\\\\'), ('{', '\\{'), ('{', '\\}'), ('$', '\\$'), ('&', '\\&'), ('#', '\\#'), ('^', '\\textasciicircum{}'), ('_', '\\textunderscore{}'), ('~', '\\~'), ('%', '\\%'), ('<', '\\textless{}'), ('>', '\\textgreater{}'), ('|', '\\textbar{}') ]
astroswego/plotypus
src/plotypus/lightcurve.py
make_predictor
python
def make_predictor(regressor=LassoLarsIC(fit_intercept=False), Selector=GridSearchCV, fourier_degree=(2, 25), selector_processes=1, use_baart=False, scoring='r2', scoring_cv=3, **kwargs): fourier = Fourier(degree_range=fourier_degree, regressor=regressor) \ if use_baart else Fourier() pipeline = Pipeline([('Fourier', fourier), ('Regressor', regressor)]) if use_baart: return pipeline else: params = {'Fourier__degree': list(range(fourier_degree[0], fourier_degree[1]+1))} return Selector(pipeline, params, scoring=scoring, cv=scoring_cv, n_jobs=selector_processes)
make_predictor(regressor=LassoLarsIC(fit_intercept=False), Selector=GridSearchCV, fourier_degree=(2, 25), selector_processes=1, use_baart=False, scoring='r2', scoring_cv=3, **kwargs) Makes a predictor object for use in :func:`get_lightcurve`. **Parameters** regressor : object with "fit" and "transform" methods, optional Regression object used for solving Fourier matrix (default ``sklearn.linear_model.LassoLarsIC(fit_intercept=False)``). Selector : class with "fit" and "predict" methods, optional Model selection class used for finding the best fit (default :class:`sklearn.grid_search.GridSearchCV`). selector_processes : positive integer, optional Number of processes to use for *Selector* (default 1). use_baart : boolean, optional If True, ignores *Selector* and uses Baart's Criteria to find the Fourier degree, within the boundaries (default False). fourier_degree : 2-tuple, optional Tuple containing lower and upper bounds on Fourier degree, in that order (default (2, 25)). scoring : str, optional Scoring method to use for *Selector*. This parameter can be: * "r2", in which case use :math:`R^2` (the default) * "mse", in which case use mean square error scoring_cv : positive integer, optional Number of cross validation folds used in scoring (default 3). **Returns** out : object with "fit" and "predict" methods The created predictor object.
train
https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/lightcurve.py#L35-L81
null
""" Light curve fitting and plotting functions. """ import numpy numpy.random.seed(0) from scipy.stats import sem from sys import stderr from math import floor from os import path import plotypus.utils from .utils import (verbose_print, make_sure_path_exists, get_signal, get_noise, colvec, mad) from .periodogram import find_period, Lomb_Scargle, rephase from .preprocessing import Fourier from sklearn.cross_validation import cross_val_score from sklearn.linear_model import LassoLarsIC from sklearn.pipeline import Pipeline from sklearn.grid_search import GridSearchCV from sklearn.utils import ConvergenceWarning import warnings warnings.filterwarnings("ignore", category=ConvergenceWarning) import matplotlib import matplotlib.pyplot as plt __all__ = [ 'make_predictor', 'get_lightcurve', 'get_lightcurve_from_file', 'find_outliers', 'plot_lightcurve' ] def get_lightcurve(data, copy=False, name=None, predictor=None, periodogram=Lomb_Scargle, sigma_clipping=mad, scoring='r2', scoring_cv=3, scoring_processes=1, period=None, min_period=0.2, max_period=32, coarse_precision=1e-5, fine_precision=1e-9, period_processes=1, sigma=20, shift=None, min_phase_cover=0.0, min_observations=1, n_phases=100, verbosity=None, **kwargs): """get_lightcurve(data, copy=False, name=None, predictor=None, periodogram=Lomb_Scargle, sigma_clipping=mad, scoring='r2', scoring_cv=3, scoring_processes=1, period=None, min_period=0.2, max_period=32, coarse_precision=1e-5, fine_precision=1e-9, period_processes=1, sigma=20, shift=None, min_phase_cover=0.0, n_phases=100, verbosity=None, **kwargs) Fits a light curve to the given `data` using the specified methods, with default behavior defined for all methods. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Photometry array with columns *time*, *magnitude*, and (optional) *error*. *time* should be unphased. name : string or None, optional Name of star being processed. predictor : object that has "fit" and "predict" methods, optional Object which fits the light curve obtained from *data* after rephasing (default ``make_predictor(scoring=scoring, scoring_cv=scoring_cv)``). periodogram : function, optional Function which finds one or more *period*\s. If *period* is already provided, the function is not used. Defaults to :func:`plotypus.periodogram.Lomb_Scargle` sigma_clipping : function, optional Function which takes an array and assigns sigma scores to each element. Defaults to :func:`plotypus.utils.mad`. scoring : str, optional Scoring method used by *predictor*. This parameter can be * "r2", in which case use :func:`R^2` (the default) * "mse", in which case use mean square error scoring_cv : positive integer, optional Number of cross validation folds used in scoring (default 3). scoring_processes : positive integer, optional Number of processes to use for scoring cross validation (default 1). period : number or None, optional Period of oscillation used in the fit. This parameter can be: * None, in which case the period is obtained with the given *periodogram* function (the default). * A single positive number, giving the period to phase *data*. min_period : non-negative number, optional Lower bound on period obtained by *periodogram* (default 0.2). max_period : non-negative number, optional Upper bound on period obtained by *periodogram* (default 32.0). course_precision : positive number, optional Precision used in first period search sweep (default 1e-5). fine_precision : positive number, optional Precision used in second period search sweep (default 1e-9). period_processes : positive integer, optional Number of processes to use for period finding (default 1). sigma : number, optional Upper bound on score obtained by *sigma_clipping* for a point to be considered an inlier. shift : number or None, optional Phase shift to apply to light curve if provided. Light curve is shifted such that max light occurs at ``phase[0]`` if None given (default None). min_phase_cover : number on interval [0, 1], optional Fraction of binned light curve that must contain points in order to proceed. If light curve has insufficient coverage, a warning is printed if "outlier" *verbosity* is on, and None is returned (default 0.0). n_phases : positive integer Number of equally spaced phases to predict magnitudes at (default 100) verbosity : list or None, optional Verbosity level. See :func:`plotypus.utils.verbose_print`. **Returns** out : dict Results of the fit in a dictionary. The keys are: * name : str or None The name of the star. * period : number The star's period. * lightcurve : array-like, shape = [n_phases] Magnitudes of fitted light curve sampled at sample phases. * coefficients : array-like, shape = [n_coeffs] Fitted light curve coefficients. * dA_0 : non-negative number Error on mean magnitude. * phased_data : array-like, shape = [n_samples] *data* transformed from temporal to phase space. * model : predictor object The predictor used to fit the light curve. * R2 : number The :math:`R^2` score of the fit. * MSE : number The mean square error of the fit. * degree : positive integer The degree of the Fourier fit. * shift : number The phase shift applied. * coverage : number on interval [0, 1] The light curve coverage. **See also** :func:`get_lightcurve_from_file` """ data = numpy.ma.array(data, copy=copy) phases = numpy.linspace(0, 1, n_phases, endpoint=False) # TODO ### # Replace dA_0 with error matrix dA if predictor is None: predictor = make_predictor(scoring=scoring, scoring_cv=scoring_cv) while True: signal = get_signal(data) if len(signal) <= scoring_cv: verbose_print( "{}: length of signal ({}) less than cv folds ({})".format( name, len(signal), scoring_cv), operation="coverage", verbosity=verbosity) return elif len(signal) < min_observations: verbose_print( "{}: length of signal ({}) " "less than min_observations ({})".format( name, len(signal), min_observations), operation="coverage", verbosity=verbosity) return # Find the period of the inliers if period is not None: _period = period else: verbose_print("{}: finding period".format(name), operation="period", verbosity=verbosity) _period = find_period(signal, min_period, max_period, coarse_precision, fine_precision, periodogram, period_processes) verbose_print("{}: using period {}".format(name, _period), operation="period", verbosity=verbosity) phase, mag, *err = rephase(signal, _period).T # TODO ### # Generalize number of bins to function parameter ``coverage_bins``, which # defaults to 100, the current hard-coded behavior # Determine whether there is sufficient phase coverage coverage = numpy.zeros((100)) for p in phase: coverage[int(floor(p*100))] = 1 coverage = sum(coverage)/100 if coverage < min_phase_cover: verbose_print("{}: {} {}".format(name, coverage, min_phase_cover), operation="coverage", verbosity=verbosity) verbose_print("Insufficient phase coverage", operation="outlier", verbosity=verbosity) return # Predict light curve with warnings.catch_warnings(record=True) as w: try: predictor = predictor.fit(colvec(phase), mag) except Warning: # not sure if this should be only in verbose mode print(name, w, file=stderr) return # Reject outliers and repeat the process if there are any if sigma: outliers = find_outliers(rephase(data.data, _period), predictor, sigma, sigma_clipping) num_outliers = sum(outliers)[0] if num_outliers == 0 or \ set.issubset(set(numpy.nonzero(outliers.T[0])[0]), set(numpy.nonzero(data.mask.T[0])[0])): data.mask = outliers break if num_outliers > 0: verbose_print("{}: {} outliers".format(name, sum(outliers)[0]), operation="outlier", verbosity=verbosity) data.mask = numpy.ma.mask_or(data.mask, outliers) # Build light curve and optionally shift to max light lightcurve = predictor.predict([[i] for i in phases]) if shift is None: arg_max_light = lightcurve.argmin() lightcurve = numpy.concatenate((lightcurve[arg_max_light:], lightcurve[:arg_max_light])) shift = arg_max_light/len(phases) data.T[0] = rephase(data.data, _period, shift).T[0] # Grab the coefficients from the model coefficients = predictor.named_steps['Regressor'].coef_ \ if isinstance(predictor, Pipeline) \ else predictor.best_estimator_.named_steps['Regressor'].coef_, # compute R^2 and MSE if they haven't already been # (one or zero have been computed, depending on the predictor) estimator = predictor.best_estimator_ \ if hasattr(predictor, 'best_estimator_') \ else predictor get_score = lambda scoring: predictor.best_score_ \ if hasattr(predictor, 'best_score_') \ and predictor.scoring == scoring \ else cross_val_score(estimator, colvec(phase), mag, cv=scoring_cv, scoring=scoring, n_jobs=scoring_processes).mean() return {'name': name, 'period': _period, 'lightcurve': lightcurve, 'coefficients': coefficients[0], 'dA_0': sem(lightcurve), 'phased_data': data, 'model': predictor, 'R2': get_score('r2'), 'MSE': abs(get_score('mean_squared_error')), 'degree': estimator.get_params()['Fourier__degree'], 'shift': shift, 'coverage': coverage} def get_lightcurve_from_file(file, *args, use_cols=None, skiprows=0, verbosity=None, **kwargs): """get_lightcurve_from_file(file, *args, use_cols=None, skiprows=0, **kwargs) Fits a light curve to the data contained in *file* using :func:`get_lightcurve`. **Parameters** file : str or file File or filename to load data from. use_cols : iterable or None, optional Iterable of columns to read from data file, or None to read all columns (default None). skiprows : number, optional Number of rows to skip at beginning of *file* (default 0) **Returns** out : dict See :func:`get_lightcurve`. """ data = numpy.loadtxt(file, skiprows=skiprows, usecols=use_cols) if len(data) != 0: masked_data = numpy.ma.array(data=data, mask=None, dtype=float) return get_lightcurve(masked_data, *args, verbosity=verbosity, **kwargs) else: verbose_print("{}: file contains no data points".format(file), operation="coverage", verbosity=verbosity) return ## These functions were used briefly and then not maintained. ## Will make comebacks of some form in a later release. ## # def get_lightcurves_from_file(filename, directories, *args, **kwargs): # return [get_lightcurve_from_file(path.join(d, filename), *args, **kwargs) # for d in directories] # # # def single_periods(data, period, min_points=10, copy=False, *args, **kwargs): # data = numpy.ma.array(data, copy=copy) # time, mag, *err = data.T # # tstart, tfinal = numpy.min(time), numpy.max(time) # periods = numpy.arange(tstart, tfinal+period, period) # data_range = ( # data[numpy.logical_and(time>pstart, time<=pend),:] # for pstart, pend in zip(periods[:-1], periods[1:]) # ) # # return ( # get_lightcurve(d, period=period, *args, **kwargs) # for d in data_range # if d.shape[0] > min_points # ) # # # def single_periods_from_file(filename, *args, use_cols=(0, 1, 2), skiprows=0, # **kwargs): # data = numpy.ma.array(data=numpy.loadtxt(filename, usecols=use_cols, # skiprows=skiprows), # mask=None, dtype=float) # return single_periods(data, *args, **kwargs) def find_outliers(data, predictor, sigma, method=mad): """find_outliers(data, predictor, sigma, method=mad) Returns a boolean array indicating the outliers in the given *data* array. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Photometry array containing columns *phase*, *magnitude*, and (optional) *error*. predictor : object that has "fit" and "predict" methods, optional Object which fits the light curve obtained from *data* after rephasing. sigma : number Outlier cutoff criteria. method : function, optional Function to score residuals for outlier detection (default :func:`plotypus.utils.mad`). **Returns** out : array-like, shape = data.shape Boolean array indicating the outliers in the given *data* array. """ phase, mag, *err = data.T residuals = numpy.absolute(predictor.predict(colvec(phase)) - mag) outliers = numpy.logical_and((residuals > err[0]) if err else True, residuals > sigma * method(residuals)) return numpy.tile(numpy.vstack(outliers), data.shape[1]) def plot_lightcurve(name, lightcurve, period, data, output='.', legend=False, sanitize_latex=False, color=True, n_phases=100, err_const=0.005, **kwargs): """plot_lightcurve(name, lightcurve, period, data, output='.', legend=False, color=True, n_phases=100, err_const=0.005, **kwargs) Save a plot of the given *lightcurve* to directory *output*. **Parameters** name : str Name of the star. Used in filename and plot title. lightcurve : array-like, shape = [n_samples] Fitted lightcurve. period : number Period to phase time by. data : array-like, shape = [n_samples, 2] or [n_samples, 3] Photometry array containing columns *time*, *magnitude*, and (optional) *error*. *time* should be unphased. output : str, optional Directory to save plot to (default '.'). legend : boolean, optional Whether or not to display legend on plot (default False). color : boolean, optional Whether or not to display color in plot (default True). n_phases : integer, optional Number of phase points in fit (default 100). err_const : number, optional Constant to use in absence of error (default 0.005). **Returns** None """ phases = numpy.linspace(0, 1, n_phases, endpoint=False) ax = plt.gca() ax.invert_yaxis() plt.xlim(0,2) # Plot points used phase, mag, *err = get_signal(data).T error = err[0] if err else mag*err_const inliers = plt.errorbar(numpy.hstack((phase,1+phase)), numpy.hstack((mag, mag)), yerr=numpy.hstack((error, error)), ls='None', ms=.01, mew=.01, capsize=0) # Plot outliers rejected phase, mag, *err = get_noise(data).T error = err[0] if err else mag*err_const outliers = plt.errorbar(numpy.hstack((phase,1+phase)), numpy.hstack((mag, mag)), yerr=numpy.hstack((error, error)), ls='None', marker='o' if color else 'x', ms=.01 if color else 4, mew=.01 if color else 1, capsize=0 if color else 1) # Plot the fitted light curve signal, = plt.plot(numpy.hstack((phases,1+phases)), numpy.hstack((lightcurve, lightcurve)), linewidth=1) if legend: plt.legend([signal, inliers, outliers], ["Light Curve", "Inliers", "Outliers"], loc='best') plt.xlabel('Phase ({0:0.7} day period)'.format(period)) plt.ylabel('Magnitude') plt.title(utils.sanitize_latex(name) if sanitize_latex else name) plt.tight_layout(pad=0.1) make_sure_path_exists(output) plt.savefig(path.join(output, name)) plt.clf()
astroswego/plotypus
src/plotypus/lightcurve.py
get_lightcurve
python
def get_lightcurve(data, copy=False, name=None, predictor=None, periodogram=Lomb_Scargle, sigma_clipping=mad, scoring='r2', scoring_cv=3, scoring_processes=1, period=None, min_period=0.2, max_period=32, coarse_precision=1e-5, fine_precision=1e-9, period_processes=1, sigma=20, shift=None, min_phase_cover=0.0, min_observations=1, n_phases=100, verbosity=None, **kwargs): data = numpy.ma.array(data, copy=copy) phases = numpy.linspace(0, 1, n_phases, endpoint=False) # TODO ### # Replace dA_0 with error matrix dA if predictor is None: predictor = make_predictor(scoring=scoring, scoring_cv=scoring_cv) while True: signal = get_signal(data) if len(signal) <= scoring_cv: verbose_print( "{}: length of signal ({}) less than cv folds ({})".format( name, len(signal), scoring_cv), operation="coverage", verbosity=verbosity) return elif len(signal) < min_observations: verbose_print( "{}: length of signal ({}) " "less than min_observations ({})".format( name, len(signal), min_observations), operation="coverage", verbosity=verbosity) return # Find the period of the inliers if period is not None: _period = period else: verbose_print("{}: finding period".format(name), operation="period", verbosity=verbosity) _period = find_period(signal, min_period, max_period, coarse_precision, fine_precision, periodogram, period_processes) verbose_print("{}: using period {}".format(name, _period), operation="period", verbosity=verbosity) phase, mag, *err = rephase(signal, _period).T # TODO ### # Generalize number of bins to function parameter ``coverage_bins``, which # defaults to 100, the current hard-coded behavior # Determine whether there is sufficient phase coverage coverage = numpy.zeros((100)) for p in phase: coverage[int(floor(p*100))] = 1 coverage = sum(coverage)/100 if coverage < min_phase_cover: verbose_print("{}: {} {}".format(name, coverage, min_phase_cover), operation="coverage", verbosity=verbosity) verbose_print("Insufficient phase coverage", operation="outlier", verbosity=verbosity) return # Predict light curve with warnings.catch_warnings(record=True) as w: try: predictor = predictor.fit(colvec(phase), mag) except Warning: # not sure if this should be only in verbose mode print(name, w, file=stderr) return # Reject outliers and repeat the process if there are any if sigma: outliers = find_outliers(rephase(data.data, _period), predictor, sigma, sigma_clipping) num_outliers = sum(outliers)[0] if num_outliers == 0 or \ set.issubset(set(numpy.nonzero(outliers.T[0])[0]), set(numpy.nonzero(data.mask.T[0])[0])): data.mask = outliers break if num_outliers > 0: verbose_print("{}: {} outliers".format(name, sum(outliers)[0]), operation="outlier", verbosity=verbosity) data.mask = numpy.ma.mask_or(data.mask, outliers) # Build light curve and optionally shift to max light lightcurve = predictor.predict([[i] for i in phases]) if shift is None: arg_max_light = lightcurve.argmin() lightcurve = numpy.concatenate((lightcurve[arg_max_light:], lightcurve[:arg_max_light])) shift = arg_max_light/len(phases) data.T[0] = rephase(data.data, _period, shift).T[0] # Grab the coefficients from the model coefficients = predictor.named_steps['Regressor'].coef_ \ if isinstance(predictor, Pipeline) \ else predictor.best_estimator_.named_steps['Regressor'].coef_, # compute R^2 and MSE if they haven't already been # (one or zero have been computed, depending on the predictor) estimator = predictor.best_estimator_ \ if hasattr(predictor, 'best_estimator_') \ else predictor get_score = lambda scoring: predictor.best_score_ \ if hasattr(predictor, 'best_score_') \ and predictor.scoring == scoring \ else cross_val_score(estimator, colvec(phase), mag, cv=scoring_cv, scoring=scoring, n_jobs=scoring_processes).mean() return {'name': name, 'period': _period, 'lightcurve': lightcurve, 'coefficients': coefficients[0], 'dA_0': sem(lightcurve), 'phased_data': data, 'model': predictor, 'R2': get_score('r2'), 'MSE': abs(get_score('mean_squared_error')), 'degree': estimator.get_params()['Fourier__degree'], 'shift': shift, 'coverage': coverage}
get_lightcurve(data, copy=False, name=None, predictor=None, periodogram=Lomb_Scargle, sigma_clipping=mad, scoring='r2', scoring_cv=3, scoring_processes=1, period=None, min_period=0.2, max_period=32, coarse_precision=1e-5, fine_precision=1e-9, period_processes=1, sigma=20, shift=None, min_phase_cover=0.0, n_phases=100, verbosity=None, **kwargs) Fits a light curve to the given `data` using the specified methods, with default behavior defined for all methods. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Photometry array with columns *time*, *magnitude*, and (optional) *error*. *time* should be unphased. name : string or None, optional Name of star being processed. predictor : object that has "fit" and "predict" methods, optional Object which fits the light curve obtained from *data* after rephasing (default ``make_predictor(scoring=scoring, scoring_cv=scoring_cv)``). periodogram : function, optional Function which finds one or more *period*\s. If *period* is already provided, the function is not used. Defaults to :func:`plotypus.periodogram.Lomb_Scargle` sigma_clipping : function, optional Function which takes an array and assigns sigma scores to each element. Defaults to :func:`plotypus.utils.mad`. scoring : str, optional Scoring method used by *predictor*. This parameter can be * "r2", in which case use :func:`R^2` (the default) * "mse", in which case use mean square error scoring_cv : positive integer, optional Number of cross validation folds used in scoring (default 3). scoring_processes : positive integer, optional Number of processes to use for scoring cross validation (default 1). period : number or None, optional Period of oscillation used in the fit. This parameter can be: * None, in which case the period is obtained with the given *periodogram* function (the default). * A single positive number, giving the period to phase *data*. min_period : non-negative number, optional Lower bound on period obtained by *periodogram* (default 0.2). max_period : non-negative number, optional Upper bound on period obtained by *periodogram* (default 32.0). course_precision : positive number, optional Precision used in first period search sweep (default 1e-5). fine_precision : positive number, optional Precision used in second period search sweep (default 1e-9). period_processes : positive integer, optional Number of processes to use for period finding (default 1). sigma : number, optional Upper bound on score obtained by *sigma_clipping* for a point to be considered an inlier. shift : number or None, optional Phase shift to apply to light curve if provided. Light curve is shifted such that max light occurs at ``phase[0]`` if None given (default None). min_phase_cover : number on interval [0, 1], optional Fraction of binned light curve that must contain points in order to proceed. If light curve has insufficient coverage, a warning is printed if "outlier" *verbosity* is on, and None is returned (default 0.0). n_phases : positive integer Number of equally spaced phases to predict magnitudes at (default 100) verbosity : list or None, optional Verbosity level. See :func:`plotypus.utils.verbose_print`. **Returns** out : dict Results of the fit in a dictionary. The keys are: * name : str or None The name of the star. * period : number The star's period. * lightcurve : array-like, shape = [n_phases] Magnitudes of fitted light curve sampled at sample phases. * coefficients : array-like, shape = [n_coeffs] Fitted light curve coefficients. * dA_0 : non-negative number Error on mean magnitude. * phased_data : array-like, shape = [n_samples] *data* transformed from temporal to phase space. * model : predictor object The predictor used to fit the light curve. * R2 : number The :math:`R^2` score of the fit. * MSE : number The mean square error of the fit. * degree : positive integer The degree of the Fourier fit. * shift : number The phase shift applied. * coverage : number on interval [0, 1] The light curve coverage. **See also** :func:`get_lightcurve_from_file`
train
https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/lightcurve.py#L84-L307
[ "def make_predictor(regressor=LassoLarsIC(fit_intercept=False),\n Selector=GridSearchCV, fourier_degree=(2, 25),\n selector_processes=1,\n use_baart=False, scoring='r2', scoring_cv=3,\n **kwargs):\n \"\"\"make_predictor(regressor=LassoLarsIC...
""" Light curve fitting and plotting functions. """ import numpy numpy.random.seed(0) from scipy.stats import sem from sys import stderr from math import floor from os import path import plotypus.utils from .utils import (verbose_print, make_sure_path_exists, get_signal, get_noise, colvec, mad) from .periodogram import find_period, Lomb_Scargle, rephase from .preprocessing import Fourier from sklearn.cross_validation import cross_val_score from sklearn.linear_model import LassoLarsIC from sklearn.pipeline import Pipeline from sklearn.grid_search import GridSearchCV from sklearn.utils import ConvergenceWarning import warnings warnings.filterwarnings("ignore", category=ConvergenceWarning) import matplotlib import matplotlib.pyplot as plt __all__ = [ 'make_predictor', 'get_lightcurve', 'get_lightcurve_from_file', 'find_outliers', 'plot_lightcurve' ] def make_predictor(regressor=LassoLarsIC(fit_intercept=False), Selector=GridSearchCV, fourier_degree=(2, 25), selector_processes=1, use_baart=False, scoring='r2', scoring_cv=3, **kwargs): """make_predictor(regressor=LassoLarsIC(fit_intercept=False), Selector=GridSearchCV, fourier_degree=(2, 25), selector_processes=1, use_baart=False, scoring='r2', scoring_cv=3, **kwargs) Makes a predictor object for use in :func:`get_lightcurve`. **Parameters** regressor : object with "fit" and "transform" methods, optional Regression object used for solving Fourier matrix (default ``sklearn.linear_model.LassoLarsIC(fit_intercept=False)``). Selector : class with "fit" and "predict" methods, optional Model selection class used for finding the best fit (default :class:`sklearn.grid_search.GridSearchCV`). selector_processes : positive integer, optional Number of processes to use for *Selector* (default 1). use_baart : boolean, optional If True, ignores *Selector* and uses Baart's Criteria to find the Fourier degree, within the boundaries (default False). fourier_degree : 2-tuple, optional Tuple containing lower and upper bounds on Fourier degree, in that order (default (2, 25)). scoring : str, optional Scoring method to use for *Selector*. This parameter can be: * "r2", in which case use :math:`R^2` (the default) * "mse", in which case use mean square error scoring_cv : positive integer, optional Number of cross validation folds used in scoring (default 3). **Returns** out : object with "fit" and "predict" methods The created predictor object. """ fourier = Fourier(degree_range=fourier_degree, regressor=regressor) \ if use_baart else Fourier() pipeline = Pipeline([('Fourier', fourier), ('Regressor', regressor)]) if use_baart: return pipeline else: params = {'Fourier__degree': list(range(fourier_degree[0], fourier_degree[1]+1))} return Selector(pipeline, params, scoring=scoring, cv=scoring_cv, n_jobs=selector_processes) def get_lightcurve_from_file(file, *args, use_cols=None, skiprows=0, verbosity=None, **kwargs): """get_lightcurve_from_file(file, *args, use_cols=None, skiprows=0, **kwargs) Fits a light curve to the data contained in *file* using :func:`get_lightcurve`. **Parameters** file : str or file File or filename to load data from. use_cols : iterable or None, optional Iterable of columns to read from data file, or None to read all columns (default None). skiprows : number, optional Number of rows to skip at beginning of *file* (default 0) **Returns** out : dict See :func:`get_lightcurve`. """ data = numpy.loadtxt(file, skiprows=skiprows, usecols=use_cols) if len(data) != 0: masked_data = numpy.ma.array(data=data, mask=None, dtype=float) return get_lightcurve(masked_data, *args, verbosity=verbosity, **kwargs) else: verbose_print("{}: file contains no data points".format(file), operation="coverage", verbosity=verbosity) return ## These functions were used briefly and then not maintained. ## Will make comebacks of some form in a later release. ## # def get_lightcurves_from_file(filename, directories, *args, **kwargs): # return [get_lightcurve_from_file(path.join(d, filename), *args, **kwargs) # for d in directories] # # # def single_periods(data, period, min_points=10, copy=False, *args, **kwargs): # data = numpy.ma.array(data, copy=copy) # time, mag, *err = data.T # # tstart, tfinal = numpy.min(time), numpy.max(time) # periods = numpy.arange(tstart, tfinal+period, period) # data_range = ( # data[numpy.logical_and(time>pstart, time<=pend),:] # for pstart, pend in zip(periods[:-1], periods[1:]) # ) # # return ( # get_lightcurve(d, period=period, *args, **kwargs) # for d in data_range # if d.shape[0] > min_points # ) # # # def single_periods_from_file(filename, *args, use_cols=(0, 1, 2), skiprows=0, # **kwargs): # data = numpy.ma.array(data=numpy.loadtxt(filename, usecols=use_cols, # skiprows=skiprows), # mask=None, dtype=float) # return single_periods(data, *args, **kwargs) def find_outliers(data, predictor, sigma, method=mad): """find_outliers(data, predictor, sigma, method=mad) Returns a boolean array indicating the outliers in the given *data* array. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Photometry array containing columns *phase*, *magnitude*, and (optional) *error*. predictor : object that has "fit" and "predict" methods, optional Object which fits the light curve obtained from *data* after rephasing. sigma : number Outlier cutoff criteria. method : function, optional Function to score residuals for outlier detection (default :func:`plotypus.utils.mad`). **Returns** out : array-like, shape = data.shape Boolean array indicating the outliers in the given *data* array. """ phase, mag, *err = data.T residuals = numpy.absolute(predictor.predict(colvec(phase)) - mag) outliers = numpy.logical_and((residuals > err[0]) if err else True, residuals > sigma * method(residuals)) return numpy.tile(numpy.vstack(outliers), data.shape[1]) def plot_lightcurve(name, lightcurve, period, data, output='.', legend=False, sanitize_latex=False, color=True, n_phases=100, err_const=0.005, **kwargs): """plot_lightcurve(name, lightcurve, period, data, output='.', legend=False, color=True, n_phases=100, err_const=0.005, **kwargs) Save a plot of the given *lightcurve* to directory *output*. **Parameters** name : str Name of the star. Used in filename and plot title. lightcurve : array-like, shape = [n_samples] Fitted lightcurve. period : number Period to phase time by. data : array-like, shape = [n_samples, 2] or [n_samples, 3] Photometry array containing columns *time*, *magnitude*, and (optional) *error*. *time* should be unphased. output : str, optional Directory to save plot to (default '.'). legend : boolean, optional Whether or not to display legend on plot (default False). color : boolean, optional Whether or not to display color in plot (default True). n_phases : integer, optional Number of phase points in fit (default 100). err_const : number, optional Constant to use in absence of error (default 0.005). **Returns** None """ phases = numpy.linspace(0, 1, n_phases, endpoint=False) ax = plt.gca() ax.invert_yaxis() plt.xlim(0,2) # Plot points used phase, mag, *err = get_signal(data).T error = err[0] if err else mag*err_const inliers = plt.errorbar(numpy.hstack((phase,1+phase)), numpy.hstack((mag, mag)), yerr=numpy.hstack((error, error)), ls='None', ms=.01, mew=.01, capsize=0) # Plot outliers rejected phase, mag, *err = get_noise(data).T error = err[0] if err else mag*err_const outliers = plt.errorbar(numpy.hstack((phase,1+phase)), numpy.hstack((mag, mag)), yerr=numpy.hstack((error, error)), ls='None', marker='o' if color else 'x', ms=.01 if color else 4, mew=.01 if color else 1, capsize=0 if color else 1) # Plot the fitted light curve signal, = plt.plot(numpy.hstack((phases,1+phases)), numpy.hstack((lightcurve, lightcurve)), linewidth=1) if legend: plt.legend([signal, inliers, outliers], ["Light Curve", "Inliers", "Outliers"], loc='best') plt.xlabel('Phase ({0:0.7} day period)'.format(period)) plt.ylabel('Magnitude') plt.title(utils.sanitize_latex(name) if sanitize_latex else name) plt.tight_layout(pad=0.1) make_sure_path_exists(output) plt.savefig(path.join(output, name)) plt.clf()
astroswego/plotypus
src/plotypus/lightcurve.py
get_lightcurve_from_file
python
def get_lightcurve_from_file(file, *args, use_cols=None, skiprows=0, verbosity=None, **kwargs): data = numpy.loadtxt(file, skiprows=skiprows, usecols=use_cols) if len(data) != 0: masked_data = numpy.ma.array(data=data, mask=None, dtype=float) return get_lightcurve(masked_data, *args, verbosity=verbosity, **kwargs) else: verbose_print("{}: file contains no data points".format(file), operation="coverage", verbosity=verbosity) return
get_lightcurve_from_file(file, *args, use_cols=None, skiprows=0, **kwargs) Fits a light curve to the data contained in *file* using :func:`get_lightcurve`. **Parameters** file : str or file File or filename to load data from. use_cols : iterable or None, optional Iterable of columns to read from data file, or None to read all columns (default None). skiprows : number, optional Number of rows to skip at beginning of *file* (default 0) **Returns** out : dict See :func:`get_lightcurve`.
train
https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/lightcurve.py#L310-L341
[ "def verbose_print(message, *, operation, verbosity):\n \"\"\"\n Prints *message* to stderr only if the given *operation* is in the list\n *verbosity*. If \"all\" is in *verbosity*, all operations are printed.\n\n **Parameters**\n\n message : str\n The message to print.\n operation : str\n ...
""" Light curve fitting and plotting functions. """ import numpy numpy.random.seed(0) from scipy.stats import sem from sys import stderr from math import floor from os import path import plotypus.utils from .utils import (verbose_print, make_sure_path_exists, get_signal, get_noise, colvec, mad) from .periodogram import find_period, Lomb_Scargle, rephase from .preprocessing import Fourier from sklearn.cross_validation import cross_val_score from sklearn.linear_model import LassoLarsIC from sklearn.pipeline import Pipeline from sklearn.grid_search import GridSearchCV from sklearn.utils import ConvergenceWarning import warnings warnings.filterwarnings("ignore", category=ConvergenceWarning) import matplotlib import matplotlib.pyplot as plt __all__ = [ 'make_predictor', 'get_lightcurve', 'get_lightcurve_from_file', 'find_outliers', 'plot_lightcurve' ] def make_predictor(regressor=LassoLarsIC(fit_intercept=False), Selector=GridSearchCV, fourier_degree=(2, 25), selector_processes=1, use_baart=False, scoring='r2', scoring_cv=3, **kwargs): """make_predictor(regressor=LassoLarsIC(fit_intercept=False), Selector=GridSearchCV, fourier_degree=(2, 25), selector_processes=1, use_baart=False, scoring='r2', scoring_cv=3, **kwargs) Makes a predictor object for use in :func:`get_lightcurve`. **Parameters** regressor : object with "fit" and "transform" methods, optional Regression object used for solving Fourier matrix (default ``sklearn.linear_model.LassoLarsIC(fit_intercept=False)``). Selector : class with "fit" and "predict" methods, optional Model selection class used for finding the best fit (default :class:`sklearn.grid_search.GridSearchCV`). selector_processes : positive integer, optional Number of processes to use for *Selector* (default 1). use_baart : boolean, optional If True, ignores *Selector* and uses Baart's Criteria to find the Fourier degree, within the boundaries (default False). fourier_degree : 2-tuple, optional Tuple containing lower and upper bounds on Fourier degree, in that order (default (2, 25)). scoring : str, optional Scoring method to use for *Selector*. This parameter can be: * "r2", in which case use :math:`R^2` (the default) * "mse", in which case use mean square error scoring_cv : positive integer, optional Number of cross validation folds used in scoring (default 3). **Returns** out : object with "fit" and "predict" methods The created predictor object. """ fourier = Fourier(degree_range=fourier_degree, regressor=regressor) \ if use_baart else Fourier() pipeline = Pipeline([('Fourier', fourier), ('Regressor', regressor)]) if use_baart: return pipeline else: params = {'Fourier__degree': list(range(fourier_degree[0], fourier_degree[1]+1))} return Selector(pipeline, params, scoring=scoring, cv=scoring_cv, n_jobs=selector_processes) def get_lightcurve(data, copy=False, name=None, predictor=None, periodogram=Lomb_Scargle, sigma_clipping=mad, scoring='r2', scoring_cv=3, scoring_processes=1, period=None, min_period=0.2, max_period=32, coarse_precision=1e-5, fine_precision=1e-9, period_processes=1, sigma=20, shift=None, min_phase_cover=0.0, min_observations=1, n_phases=100, verbosity=None, **kwargs): """get_lightcurve(data, copy=False, name=None, predictor=None, periodogram=Lomb_Scargle, sigma_clipping=mad, scoring='r2', scoring_cv=3, scoring_processes=1, period=None, min_period=0.2, max_period=32, coarse_precision=1e-5, fine_precision=1e-9, period_processes=1, sigma=20, shift=None, min_phase_cover=0.0, n_phases=100, verbosity=None, **kwargs) Fits a light curve to the given `data` using the specified methods, with default behavior defined for all methods. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Photometry array with columns *time*, *magnitude*, and (optional) *error*. *time* should be unphased. name : string or None, optional Name of star being processed. predictor : object that has "fit" and "predict" methods, optional Object which fits the light curve obtained from *data* after rephasing (default ``make_predictor(scoring=scoring, scoring_cv=scoring_cv)``). periodogram : function, optional Function which finds one or more *period*\s. If *period* is already provided, the function is not used. Defaults to :func:`plotypus.periodogram.Lomb_Scargle` sigma_clipping : function, optional Function which takes an array and assigns sigma scores to each element. Defaults to :func:`plotypus.utils.mad`. scoring : str, optional Scoring method used by *predictor*. This parameter can be * "r2", in which case use :func:`R^2` (the default) * "mse", in which case use mean square error scoring_cv : positive integer, optional Number of cross validation folds used in scoring (default 3). scoring_processes : positive integer, optional Number of processes to use for scoring cross validation (default 1). period : number or None, optional Period of oscillation used in the fit. This parameter can be: * None, in which case the period is obtained with the given *periodogram* function (the default). * A single positive number, giving the period to phase *data*. min_period : non-negative number, optional Lower bound on period obtained by *periodogram* (default 0.2). max_period : non-negative number, optional Upper bound on period obtained by *periodogram* (default 32.0). course_precision : positive number, optional Precision used in first period search sweep (default 1e-5). fine_precision : positive number, optional Precision used in second period search sweep (default 1e-9). period_processes : positive integer, optional Number of processes to use for period finding (default 1). sigma : number, optional Upper bound on score obtained by *sigma_clipping* for a point to be considered an inlier. shift : number or None, optional Phase shift to apply to light curve if provided. Light curve is shifted such that max light occurs at ``phase[0]`` if None given (default None). min_phase_cover : number on interval [0, 1], optional Fraction of binned light curve that must contain points in order to proceed. If light curve has insufficient coverage, a warning is printed if "outlier" *verbosity* is on, and None is returned (default 0.0). n_phases : positive integer Number of equally spaced phases to predict magnitudes at (default 100) verbosity : list or None, optional Verbosity level. See :func:`plotypus.utils.verbose_print`. **Returns** out : dict Results of the fit in a dictionary. The keys are: * name : str or None The name of the star. * period : number The star's period. * lightcurve : array-like, shape = [n_phases] Magnitudes of fitted light curve sampled at sample phases. * coefficients : array-like, shape = [n_coeffs] Fitted light curve coefficients. * dA_0 : non-negative number Error on mean magnitude. * phased_data : array-like, shape = [n_samples] *data* transformed from temporal to phase space. * model : predictor object The predictor used to fit the light curve. * R2 : number The :math:`R^2` score of the fit. * MSE : number The mean square error of the fit. * degree : positive integer The degree of the Fourier fit. * shift : number The phase shift applied. * coverage : number on interval [0, 1] The light curve coverage. **See also** :func:`get_lightcurve_from_file` """ data = numpy.ma.array(data, copy=copy) phases = numpy.linspace(0, 1, n_phases, endpoint=False) # TODO ### # Replace dA_0 with error matrix dA if predictor is None: predictor = make_predictor(scoring=scoring, scoring_cv=scoring_cv) while True: signal = get_signal(data) if len(signal) <= scoring_cv: verbose_print( "{}: length of signal ({}) less than cv folds ({})".format( name, len(signal), scoring_cv), operation="coverage", verbosity=verbosity) return elif len(signal) < min_observations: verbose_print( "{}: length of signal ({}) " "less than min_observations ({})".format( name, len(signal), min_observations), operation="coverage", verbosity=verbosity) return # Find the period of the inliers if period is not None: _period = period else: verbose_print("{}: finding period".format(name), operation="period", verbosity=verbosity) _period = find_period(signal, min_period, max_period, coarse_precision, fine_precision, periodogram, period_processes) verbose_print("{}: using period {}".format(name, _period), operation="period", verbosity=verbosity) phase, mag, *err = rephase(signal, _period).T # TODO ### # Generalize number of bins to function parameter ``coverage_bins``, which # defaults to 100, the current hard-coded behavior # Determine whether there is sufficient phase coverage coverage = numpy.zeros((100)) for p in phase: coverage[int(floor(p*100))] = 1 coverage = sum(coverage)/100 if coverage < min_phase_cover: verbose_print("{}: {} {}".format(name, coverage, min_phase_cover), operation="coverage", verbosity=verbosity) verbose_print("Insufficient phase coverage", operation="outlier", verbosity=verbosity) return # Predict light curve with warnings.catch_warnings(record=True) as w: try: predictor = predictor.fit(colvec(phase), mag) except Warning: # not sure if this should be only in verbose mode print(name, w, file=stderr) return # Reject outliers and repeat the process if there are any if sigma: outliers = find_outliers(rephase(data.data, _period), predictor, sigma, sigma_clipping) num_outliers = sum(outliers)[0] if num_outliers == 0 or \ set.issubset(set(numpy.nonzero(outliers.T[0])[0]), set(numpy.nonzero(data.mask.T[0])[0])): data.mask = outliers break if num_outliers > 0: verbose_print("{}: {} outliers".format(name, sum(outliers)[0]), operation="outlier", verbosity=verbosity) data.mask = numpy.ma.mask_or(data.mask, outliers) # Build light curve and optionally shift to max light lightcurve = predictor.predict([[i] for i in phases]) if shift is None: arg_max_light = lightcurve.argmin() lightcurve = numpy.concatenate((lightcurve[arg_max_light:], lightcurve[:arg_max_light])) shift = arg_max_light/len(phases) data.T[0] = rephase(data.data, _period, shift).T[0] # Grab the coefficients from the model coefficients = predictor.named_steps['Regressor'].coef_ \ if isinstance(predictor, Pipeline) \ else predictor.best_estimator_.named_steps['Regressor'].coef_, # compute R^2 and MSE if they haven't already been # (one or zero have been computed, depending on the predictor) estimator = predictor.best_estimator_ \ if hasattr(predictor, 'best_estimator_') \ else predictor get_score = lambda scoring: predictor.best_score_ \ if hasattr(predictor, 'best_score_') \ and predictor.scoring == scoring \ else cross_val_score(estimator, colvec(phase), mag, cv=scoring_cv, scoring=scoring, n_jobs=scoring_processes).mean() return {'name': name, 'period': _period, 'lightcurve': lightcurve, 'coefficients': coefficients[0], 'dA_0': sem(lightcurve), 'phased_data': data, 'model': predictor, 'R2': get_score('r2'), 'MSE': abs(get_score('mean_squared_error')), 'degree': estimator.get_params()['Fourier__degree'], 'shift': shift, 'coverage': coverage} ## These functions were used briefly and then not maintained. ## Will make comebacks of some form in a later release. ## # def get_lightcurves_from_file(filename, directories, *args, **kwargs): # return [get_lightcurve_from_file(path.join(d, filename), *args, **kwargs) # for d in directories] # # # def single_periods(data, period, min_points=10, copy=False, *args, **kwargs): # data = numpy.ma.array(data, copy=copy) # time, mag, *err = data.T # # tstart, tfinal = numpy.min(time), numpy.max(time) # periods = numpy.arange(tstart, tfinal+period, period) # data_range = ( # data[numpy.logical_and(time>pstart, time<=pend),:] # for pstart, pend in zip(periods[:-1], periods[1:]) # ) # # return ( # get_lightcurve(d, period=period, *args, **kwargs) # for d in data_range # if d.shape[0] > min_points # ) # # # def single_periods_from_file(filename, *args, use_cols=(0, 1, 2), skiprows=0, # **kwargs): # data = numpy.ma.array(data=numpy.loadtxt(filename, usecols=use_cols, # skiprows=skiprows), # mask=None, dtype=float) # return single_periods(data, *args, **kwargs) def find_outliers(data, predictor, sigma, method=mad): """find_outliers(data, predictor, sigma, method=mad) Returns a boolean array indicating the outliers in the given *data* array. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Photometry array containing columns *phase*, *magnitude*, and (optional) *error*. predictor : object that has "fit" and "predict" methods, optional Object which fits the light curve obtained from *data* after rephasing. sigma : number Outlier cutoff criteria. method : function, optional Function to score residuals for outlier detection (default :func:`plotypus.utils.mad`). **Returns** out : array-like, shape = data.shape Boolean array indicating the outliers in the given *data* array. """ phase, mag, *err = data.T residuals = numpy.absolute(predictor.predict(colvec(phase)) - mag) outliers = numpy.logical_and((residuals > err[0]) if err else True, residuals > sigma * method(residuals)) return numpy.tile(numpy.vstack(outliers), data.shape[1]) def plot_lightcurve(name, lightcurve, period, data, output='.', legend=False, sanitize_latex=False, color=True, n_phases=100, err_const=0.005, **kwargs): """plot_lightcurve(name, lightcurve, period, data, output='.', legend=False, color=True, n_phases=100, err_const=0.005, **kwargs) Save a plot of the given *lightcurve* to directory *output*. **Parameters** name : str Name of the star. Used in filename and plot title. lightcurve : array-like, shape = [n_samples] Fitted lightcurve. period : number Period to phase time by. data : array-like, shape = [n_samples, 2] or [n_samples, 3] Photometry array containing columns *time*, *magnitude*, and (optional) *error*. *time* should be unphased. output : str, optional Directory to save plot to (default '.'). legend : boolean, optional Whether or not to display legend on plot (default False). color : boolean, optional Whether or not to display color in plot (default True). n_phases : integer, optional Number of phase points in fit (default 100). err_const : number, optional Constant to use in absence of error (default 0.005). **Returns** None """ phases = numpy.linspace(0, 1, n_phases, endpoint=False) ax = plt.gca() ax.invert_yaxis() plt.xlim(0,2) # Plot points used phase, mag, *err = get_signal(data).T error = err[0] if err else mag*err_const inliers = plt.errorbar(numpy.hstack((phase,1+phase)), numpy.hstack((mag, mag)), yerr=numpy.hstack((error, error)), ls='None', ms=.01, mew=.01, capsize=0) # Plot outliers rejected phase, mag, *err = get_noise(data).T error = err[0] if err else mag*err_const outliers = plt.errorbar(numpy.hstack((phase,1+phase)), numpy.hstack((mag, mag)), yerr=numpy.hstack((error, error)), ls='None', marker='o' if color else 'x', ms=.01 if color else 4, mew=.01 if color else 1, capsize=0 if color else 1) # Plot the fitted light curve signal, = plt.plot(numpy.hstack((phases,1+phases)), numpy.hstack((lightcurve, lightcurve)), linewidth=1) if legend: plt.legend([signal, inliers, outliers], ["Light Curve", "Inliers", "Outliers"], loc='best') plt.xlabel('Phase ({0:0.7} day period)'.format(period)) plt.ylabel('Magnitude') plt.title(utils.sanitize_latex(name) if sanitize_latex else name) plt.tight_layout(pad=0.1) make_sure_path_exists(output) plt.savefig(path.join(output, name)) plt.clf()
astroswego/plotypus
src/plotypus/lightcurve.py
find_outliers
python
def find_outliers(data, predictor, sigma, method=mad): phase, mag, *err = data.T residuals = numpy.absolute(predictor.predict(colvec(phase)) - mag) outliers = numpy.logical_and((residuals > err[0]) if err else True, residuals > sigma * method(residuals)) return numpy.tile(numpy.vstack(outliers), data.shape[1])
find_outliers(data, predictor, sigma, method=mad) Returns a boolean array indicating the outliers in the given *data* array. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Photometry array containing columns *phase*, *magnitude*, and (optional) *error*. predictor : object that has "fit" and "predict" methods, optional Object which fits the light curve obtained from *data* after rephasing. sigma : number Outlier cutoff criteria. method : function, optional Function to score residuals for outlier detection (default :func:`plotypus.utils.mad`). **Returns** out : array-like, shape = data.shape Boolean array indicating the outliers in the given *data* array.
train
https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/lightcurve.py#L379-L408
[ "def colvec(X):\n \"\"\"\n Converts a row-vector *X* into a column-vector.\n\n **Parameters**\n\n X : array-like, shape = [n_samples]\n\n **Returns**\n\n out : array-like, shape = [n_samples, 1]\n \"\"\"\n return resize(X, (X.shape[0], 1))\n", "def mad(data, axis=None):\n \"\"\"\n Co...
""" Light curve fitting and plotting functions. """ import numpy numpy.random.seed(0) from scipy.stats import sem from sys import stderr from math import floor from os import path import plotypus.utils from .utils import (verbose_print, make_sure_path_exists, get_signal, get_noise, colvec, mad) from .periodogram import find_period, Lomb_Scargle, rephase from .preprocessing import Fourier from sklearn.cross_validation import cross_val_score from sklearn.linear_model import LassoLarsIC from sklearn.pipeline import Pipeline from sklearn.grid_search import GridSearchCV from sklearn.utils import ConvergenceWarning import warnings warnings.filterwarnings("ignore", category=ConvergenceWarning) import matplotlib import matplotlib.pyplot as plt __all__ = [ 'make_predictor', 'get_lightcurve', 'get_lightcurve_from_file', 'find_outliers', 'plot_lightcurve' ] def make_predictor(regressor=LassoLarsIC(fit_intercept=False), Selector=GridSearchCV, fourier_degree=(2, 25), selector_processes=1, use_baart=False, scoring='r2', scoring_cv=3, **kwargs): """make_predictor(regressor=LassoLarsIC(fit_intercept=False), Selector=GridSearchCV, fourier_degree=(2, 25), selector_processes=1, use_baart=False, scoring='r2', scoring_cv=3, **kwargs) Makes a predictor object for use in :func:`get_lightcurve`. **Parameters** regressor : object with "fit" and "transform" methods, optional Regression object used for solving Fourier matrix (default ``sklearn.linear_model.LassoLarsIC(fit_intercept=False)``). Selector : class with "fit" and "predict" methods, optional Model selection class used for finding the best fit (default :class:`sklearn.grid_search.GridSearchCV`). selector_processes : positive integer, optional Number of processes to use for *Selector* (default 1). use_baart : boolean, optional If True, ignores *Selector* and uses Baart's Criteria to find the Fourier degree, within the boundaries (default False). fourier_degree : 2-tuple, optional Tuple containing lower and upper bounds on Fourier degree, in that order (default (2, 25)). scoring : str, optional Scoring method to use for *Selector*. This parameter can be: * "r2", in which case use :math:`R^2` (the default) * "mse", in which case use mean square error scoring_cv : positive integer, optional Number of cross validation folds used in scoring (default 3). **Returns** out : object with "fit" and "predict" methods The created predictor object. """ fourier = Fourier(degree_range=fourier_degree, regressor=regressor) \ if use_baart else Fourier() pipeline = Pipeline([('Fourier', fourier), ('Regressor', regressor)]) if use_baart: return pipeline else: params = {'Fourier__degree': list(range(fourier_degree[0], fourier_degree[1]+1))} return Selector(pipeline, params, scoring=scoring, cv=scoring_cv, n_jobs=selector_processes) def get_lightcurve(data, copy=False, name=None, predictor=None, periodogram=Lomb_Scargle, sigma_clipping=mad, scoring='r2', scoring_cv=3, scoring_processes=1, period=None, min_period=0.2, max_period=32, coarse_precision=1e-5, fine_precision=1e-9, period_processes=1, sigma=20, shift=None, min_phase_cover=0.0, min_observations=1, n_phases=100, verbosity=None, **kwargs): """get_lightcurve(data, copy=False, name=None, predictor=None, periodogram=Lomb_Scargle, sigma_clipping=mad, scoring='r2', scoring_cv=3, scoring_processes=1, period=None, min_period=0.2, max_period=32, coarse_precision=1e-5, fine_precision=1e-9, period_processes=1, sigma=20, shift=None, min_phase_cover=0.0, n_phases=100, verbosity=None, **kwargs) Fits a light curve to the given `data` using the specified methods, with default behavior defined for all methods. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Photometry array with columns *time*, *magnitude*, and (optional) *error*. *time* should be unphased. name : string or None, optional Name of star being processed. predictor : object that has "fit" and "predict" methods, optional Object which fits the light curve obtained from *data* after rephasing (default ``make_predictor(scoring=scoring, scoring_cv=scoring_cv)``). periodogram : function, optional Function which finds one or more *period*\s. If *period* is already provided, the function is not used. Defaults to :func:`plotypus.periodogram.Lomb_Scargle` sigma_clipping : function, optional Function which takes an array and assigns sigma scores to each element. Defaults to :func:`plotypus.utils.mad`. scoring : str, optional Scoring method used by *predictor*. This parameter can be * "r2", in which case use :func:`R^2` (the default) * "mse", in which case use mean square error scoring_cv : positive integer, optional Number of cross validation folds used in scoring (default 3). scoring_processes : positive integer, optional Number of processes to use for scoring cross validation (default 1). period : number or None, optional Period of oscillation used in the fit. This parameter can be: * None, in which case the period is obtained with the given *periodogram* function (the default). * A single positive number, giving the period to phase *data*. min_period : non-negative number, optional Lower bound on period obtained by *periodogram* (default 0.2). max_period : non-negative number, optional Upper bound on period obtained by *periodogram* (default 32.0). course_precision : positive number, optional Precision used in first period search sweep (default 1e-5). fine_precision : positive number, optional Precision used in second period search sweep (default 1e-9). period_processes : positive integer, optional Number of processes to use for period finding (default 1). sigma : number, optional Upper bound on score obtained by *sigma_clipping* for a point to be considered an inlier. shift : number or None, optional Phase shift to apply to light curve if provided. Light curve is shifted such that max light occurs at ``phase[0]`` if None given (default None). min_phase_cover : number on interval [0, 1], optional Fraction of binned light curve that must contain points in order to proceed. If light curve has insufficient coverage, a warning is printed if "outlier" *verbosity* is on, and None is returned (default 0.0). n_phases : positive integer Number of equally spaced phases to predict magnitudes at (default 100) verbosity : list or None, optional Verbosity level. See :func:`plotypus.utils.verbose_print`. **Returns** out : dict Results of the fit in a dictionary. The keys are: * name : str or None The name of the star. * period : number The star's period. * lightcurve : array-like, shape = [n_phases] Magnitudes of fitted light curve sampled at sample phases. * coefficients : array-like, shape = [n_coeffs] Fitted light curve coefficients. * dA_0 : non-negative number Error on mean magnitude. * phased_data : array-like, shape = [n_samples] *data* transformed from temporal to phase space. * model : predictor object The predictor used to fit the light curve. * R2 : number The :math:`R^2` score of the fit. * MSE : number The mean square error of the fit. * degree : positive integer The degree of the Fourier fit. * shift : number The phase shift applied. * coverage : number on interval [0, 1] The light curve coverage. **See also** :func:`get_lightcurve_from_file` """ data = numpy.ma.array(data, copy=copy) phases = numpy.linspace(0, 1, n_phases, endpoint=False) # TODO ### # Replace dA_0 with error matrix dA if predictor is None: predictor = make_predictor(scoring=scoring, scoring_cv=scoring_cv) while True: signal = get_signal(data) if len(signal) <= scoring_cv: verbose_print( "{}: length of signal ({}) less than cv folds ({})".format( name, len(signal), scoring_cv), operation="coverage", verbosity=verbosity) return elif len(signal) < min_observations: verbose_print( "{}: length of signal ({}) " "less than min_observations ({})".format( name, len(signal), min_observations), operation="coverage", verbosity=verbosity) return # Find the period of the inliers if period is not None: _period = period else: verbose_print("{}: finding period".format(name), operation="period", verbosity=verbosity) _period = find_period(signal, min_period, max_period, coarse_precision, fine_precision, periodogram, period_processes) verbose_print("{}: using period {}".format(name, _period), operation="period", verbosity=verbosity) phase, mag, *err = rephase(signal, _period).T # TODO ### # Generalize number of bins to function parameter ``coverage_bins``, which # defaults to 100, the current hard-coded behavior # Determine whether there is sufficient phase coverage coverage = numpy.zeros((100)) for p in phase: coverage[int(floor(p*100))] = 1 coverage = sum(coverage)/100 if coverage < min_phase_cover: verbose_print("{}: {} {}".format(name, coverage, min_phase_cover), operation="coverage", verbosity=verbosity) verbose_print("Insufficient phase coverage", operation="outlier", verbosity=verbosity) return # Predict light curve with warnings.catch_warnings(record=True) as w: try: predictor = predictor.fit(colvec(phase), mag) except Warning: # not sure if this should be only in verbose mode print(name, w, file=stderr) return # Reject outliers and repeat the process if there are any if sigma: outliers = find_outliers(rephase(data.data, _period), predictor, sigma, sigma_clipping) num_outliers = sum(outliers)[0] if num_outliers == 0 or \ set.issubset(set(numpy.nonzero(outliers.T[0])[0]), set(numpy.nonzero(data.mask.T[0])[0])): data.mask = outliers break if num_outliers > 0: verbose_print("{}: {} outliers".format(name, sum(outliers)[0]), operation="outlier", verbosity=verbosity) data.mask = numpy.ma.mask_or(data.mask, outliers) # Build light curve and optionally shift to max light lightcurve = predictor.predict([[i] for i in phases]) if shift is None: arg_max_light = lightcurve.argmin() lightcurve = numpy.concatenate((lightcurve[arg_max_light:], lightcurve[:arg_max_light])) shift = arg_max_light/len(phases) data.T[0] = rephase(data.data, _period, shift).T[0] # Grab the coefficients from the model coefficients = predictor.named_steps['Regressor'].coef_ \ if isinstance(predictor, Pipeline) \ else predictor.best_estimator_.named_steps['Regressor'].coef_, # compute R^2 and MSE if they haven't already been # (one or zero have been computed, depending on the predictor) estimator = predictor.best_estimator_ \ if hasattr(predictor, 'best_estimator_') \ else predictor get_score = lambda scoring: predictor.best_score_ \ if hasattr(predictor, 'best_score_') \ and predictor.scoring == scoring \ else cross_val_score(estimator, colvec(phase), mag, cv=scoring_cv, scoring=scoring, n_jobs=scoring_processes).mean() return {'name': name, 'period': _period, 'lightcurve': lightcurve, 'coefficients': coefficients[0], 'dA_0': sem(lightcurve), 'phased_data': data, 'model': predictor, 'R2': get_score('r2'), 'MSE': abs(get_score('mean_squared_error')), 'degree': estimator.get_params()['Fourier__degree'], 'shift': shift, 'coverage': coverage} def get_lightcurve_from_file(file, *args, use_cols=None, skiprows=0, verbosity=None, **kwargs): """get_lightcurve_from_file(file, *args, use_cols=None, skiprows=0, **kwargs) Fits a light curve to the data contained in *file* using :func:`get_lightcurve`. **Parameters** file : str or file File or filename to load data from. use_cols : iterable or None, optional Iterable of columns to read from data file, or None to read all columns (default None). skiprows : number, optional Number of rows to skip at beginning of *file* (default 0) **Returns** out : dict See :func:`get_lightcurve`. """ data = numpy.loadtxt(file, skiprows=skiprows, usecols=use_cols) if len(data) != 0: masked_data = numpy.ma.array(data=data, mask=None, dtype=float) return get_lightcurve(masked_data, *args, verbosity=verbosity, **kwargs) else: verbose_print("{}: file contains no data points".format(file), operation="coverage", verbosity=verbosity) return ## These functions were used briefly and then not maintained. ## Will make comebacks of some form in a later release. ## # def get_lightcurves_from_file(filename, directories, *args, **kwargs): # return [get_lightcurve_from_file(path.join(d, filename), *args, **kwargs) # for d in directories] # # # def single_periods(data, period, min_points=10, copy=False, *args, **kwargs): # data = numpy.ma.array(data, copy=copy) # time, mag, *err = data.T # # tstart, tfinal = numpy.min(time), numpy.max(time) # periods = numpy.arange(tstart, tfinal+period, period) # data_range = ( # data[numpy.logical_and(time>pstart, time<=pend),:] # for pstart, pend in zip(periods[:-1], periods[1:]) # ) # # return ( # get_lightcurve(d, period=period, *args, **kwargs) # for d in data_range # if d.shape[0] > min_points # ) # # # def single_periods_from_file(filename, *args, use_cols=(0, 1, 2), skiprows=0, # **kwargs): # data = numpy.ma.array(data=numpy.loadtxt(filename, usecols=use_cols, # skiprows=skiprows), # mask=None, dtype=float) # return single_periods(data, *args, **kwargs) def plot_lightcurve(name, lightcurve, period, data, output='.', legend=False, sanitize_latex=False, color=True, n_phases=100, err_const=0.005, **kwargs): """plot_lightcurve(name, lightcurve, period, data, output='.', legend=False, color=True, n_phases=100, err_const=0.005, **kwargs) Save a plot of the given *lightcurve* to directory *output*. **Parameters** name : str Name of the star. Used in filename and plot title. lightcurve : array-like, shape = [n_samples] Fitted lightcurve. period : number Period to phase time by. data : array-like, shape = [n_samples, 2] or [n_samples, 3] Photometry array containing columns *time*, *magnitude*, and (optional) *error*. *time* should be unphased. output : str, optional Directory to save plot to (default '.'). legend : boolean, optional Whether or not to display legend on plot (default False). color : boolean, optional Whether or not to display color in plot (default True). n_phases : integer, optional Number of phase points in fit (default 100). err_const : number, optional Constant to use in absence of error (default 0.005). **Returns** None """ phases = numpy.linspace(0, 1, n_phases, endpoint=False) ax = plt.gca() ax.invert_yaxis() plt.xlim(0,2) # Plot points used phase, mag, *err = get_signal(data).T error = err[0] if err else mag*err_const inliers = plt.errorbar(numpy.hstack((phase,1+phase)), numpy.hstack((mag, mag)), yerr=numpy.hstack((error, error)), ls='None', ms=.01, mew=.01, capsize=0) # Plot outliers rejected phase, mag, *err = get_noise(data).T error = err[0] if err else mag*err_const outliers = plt.errorbar(numpy.hstack((phase,1+phase)), numpy.hstack((mag, mag)), yerr=numpy.hstack((error, error)), ls='None', marker='o' if color else 'x', ms=.01 if color else 4, mew=.01 if color else 1, capsize=0 if color else 1) # Plot the fitted light curve signal, = plt.plot(numpy.hstack((phases,1+phases)), numpy.hstack((lightcurve, lightcurve)), linewidth=1) if legend: plt.legend([signal, inliers, outliers], ["Light Curve", "Inliers", "Outliers"], loc='best') plt.xlabel('Phase ({0:0.7} day period)'.format(period)) plt.ylabel('Magnitude') plt.title(utils.sanitize_latex(name) if sanitize_latex else name) plt.tight_layout(pad=0.1) make_sure_path_exists(output) plt.savefig(path.join(output, name)) plt.clf()
astroswego/plotypus
src/plotypus/lightcurve.py
plot_lightcurve
python
def plot_lightcurve(name, lightcurve, period, data, output='.', legend=False, sanitize_latex=False, color=True, n_phases=100, err_const=0.005, **kwargs): phases = numpy.linspace(0, 1, n_phases, endpoint=False) ax = plt.gca() ax.invert_yaxis() plt.xlim(0,2) # Plot points used phase, mag, *err = get_signal(data).T error = err[0] if err else mag*err_const inliers = plt.errorbar(numpy.hstack((phase,1+phase)), numpy.hstack((mag, mag)), yerr=numpy.hstack((error, error)), ls='None', ms=.01, mew=.01, capsize=0) # Plot outliers rejected phase, mag, *err = get_noise(data).T error = err[0] if err else mag*err_const outliers = plt.errorbar(numpy.hstack((phase,1+phase)), numpy.hstack((mag, mag)), yerr=numpy.hstack((error, error)), ls='None', marker='o' if color else 'x', ms=.01 if color else 4, mew=.01 if color else 1, capsize=0 if color else 1) # Plot the fitted light curve signal, = plt.plot(numpy.hstack((phases,1+phases)), numpy.hstack((lightcurve, lightcurve)), linewidth=1) if legend: plt.legend([signal, inliers, outliers], ["Light Curve", "Inliers", "Outliers"], loc='best') plt.xlabel('Phase ({0:0.7} day period)'.format(period)) plt.ylabel('Magnitude') plt.title(utils.sanitize_latex(name) if sanitize_latex else name) plt.tight_layout(pad=0.1) make_sure_path_exists(output) plt.savefig(path.join(output, name)) plt.clf()
plot_lightcurve(name, lightcurve, period, data, output='.', legend=False, color=True, n_phases=100, err_const=0.005, **kwargs) Save a plot of the given *lightcurve* to directory *output*. **Parameters** name : str Name of the star. Used in filename and plot title. lightcurve : array-like, shape = [n_samples] Fitted lightcurve. period : number Period to phase time by. data : array-like, shape = [n_samples, 2] or [n_samples, 3] Photometry array containing columns *time*, *magnitude*, and (optional) *error*. *time* should be unphased. output : str, optional Directory to save plot to (default '.'). legend : boolean, optional Whether or not to display legend on plot (default False). color : boolean, optional Whether or not to display color in plot (default True). n_phases : integer, optional Number of phase points in fit (default 100). err_const : number, optional Constant to use in absence of error (default 0.005). **Returns** None
train
https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/lightcurve.py#L411-L492
[ "def get_signal(data):\n \"\"\"\n Returns all of the values in *data* that are not outliers.\n\n **Parameters**\n\n data : masked array\n\n **Returns**\n\n signal : array\n Non-masked values in *data*.\n \"\"\"\n return data[~data.mask].data.reshape(-1, data.shape[1])\n", "def get_n...
""" Light curve fitting and plotting functions. """ import numpy numpy.random.seed(0) from scipy.stats import sem from sys import stderr from math import floor from os import path import plotypus.utils from .utils import (verbose_print, make_sure_path_exists, get_signal, get_noise, colvec, mad) from .periodogram import find_period, Lomb_Scargle, rephase from .preprocessing import Fourier from sklearn.cross_validation import cross_val_score from sklearn.linear_model import LassoLarsIC from sklearn.pipeline import Pipeline from sklearn.grid_search import GridSearchCV from sklearn.utils import ConvergenceWarning import warnings warnings.filterwarnings("ignore", category=ConvergenceWarning) import matplotlib import matplotlib.pyplot as plt __all__ = [ 'make_predictor', 'get_lightcurve', 'get_lightcurve_from_file', 'find_outliers', 'plot_lightcurve' ] def make_predictor(regressor=LassoLarsIC(fit_intercept=False), Selector=GridSearchCV, fourier_degree=(2, 25), selector_processes=1, use_baart=False, scoring='r2', scoring_cv=3, **kwargs): """make_predictor(regressor=LassoLarsIC(fit_intercept=False), Selector=GridSearchCV, fourier_degree=(2, 25), selector_processes=1, use_baart=False, scoring='r2', scoring_cv=3, **kwargs) Makes a predictor object for use in :func:`get_lightcurve`. **Parameters** regressor : object with "fit" and "transform" methods, optional Regression object used for solving Fourier matrix (default ``sklearn.linear_model.LassoLarsIC(fit_intercept=False)``). Selector : class with "fit" and "predict" methods, optional Model selection class used for finding the best fit (default :class:`sklearn.grid_search.GridSearchCV`). selector_processes : positive integer, optional Number of processes to use for *Selector* (default 1). use_baart : boolean, optional If True, ignores *Selector* and uses Baart's Criteria to find the Fourier degree, within the boundaries (default False). fourier_degree : 2-tuple, optional Tuple containing lower and upper bounds on Fourier degree, in that order (default (2, 25)). scoring : str, optional Scoring method to use for *Selector*. This parameter can be: * "r2", in which case use :math:`R^2` (the default) * "mse", in which case use mean square error scoring_cv : positive integer, optional Number of cross validation folds used in scoring (default 3). **Returns** out : object with "fit" and "predict" methods The created predictor object. """ fourier = Fourier(degree_range=fourier_degree, regressor=regressor) \ if use_baart else Fourier() pipeline = Pipeline([('Fourier', fourier), ('Regressor', regressor)]) if use_baart: return pipeline else: params = {'Fourier__degree': list(range(fourier_degree[0], fourier_degree[1]+1))} return Selector(pipeline, params, scoring=scoring, cv=scoring_cv, n_jobs=selector_processes) def get_lightcurve(data, copy=False, name=None, predictor=None, periodogram=Lomb_Scargle, sigma_clipping=mad, scoring='r2', scoring_cv=3, scoring_processes=1, period=None, min_period=0.2, max_period=32, coarse_precision=1e-5, fine_precision=1e-9, period_processes=1, sigma=20, shift=None, min_phase_cover=0.0, min_observations=1, n_phases=100, verbosity=None, **kwargs): """get_lightcurve(data, copy=False, name=None, predictor=None, periodogram=Lomb_Scargle, sigma_clipping=mad, scoring='r2', scoring_cv=3, scoring_processes=1, period=None, min_period=0.2, max_period=32, coarse_precision=1e-5, fine_precision=1e-9, period_processes=1, sigma=20, shift=None, min_phase_cover=0.0, n_phases=100, verbosity=None, **kwargs) Fits a light curve to the given `data` using the specified methods, with default behavior defined for all methods. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Photometry array with columns *time*, *magnitude*, and (optional) *error*. *time* should be unphased. name : string or None, optional Name of star being processed. predictor : object that has "fit" and "predict" methods, optional Object which fits the light curve obtained from *data* after rephasing (default ``make_predictor(scoring=scoring, scoring_cv=scoring_cv)``). periodogram : function, optional Function which finds one or more *period*\s. If *period* is already provided, the function is not used. Defaults to :func:`plotypus.periodogram.Lomb_Scargle` sigma_clipping : function, optional Function which takes an array and assigns sigma scores to each element. Defaults to :func:`plotypus.utils.mad`. scoring : str, optional Scoring method used by *predictor*. This parameter can be * "r2", in which case use :func:`R^2` (the default) * "mse", in which case use mean square error scoring_cv : positive integer, optional Number of cross validation folds used in scoring (default 3). scoring_processes : positive integer, optional Number of processes to use for scoring cross validation (default 1). period : number or None, optional Period of oscillation used in the fit. This parameter can be: * None, in which case the period is obtained with the given *periodogram* function (the default). * A single positive number, giving the period to phase *data*. min_period : non-negative number, optional Lower bound on period obtained by *periodogram* (default 0.2). max_period : non-negative number, optional Upper bound on period obtained by *periodogram* (default 32.0). course_precision : positive number, optional Precision used in first period search sweep (default 1e-5). fine_precision : positive number, optional Precision used in second period search sweep (default 1e-9). period_processes : positive integer, optional Number of processes to use for period finding (default 1). sigma : number, optional Upper bound on score obtained by *sigma_clipping* for a point to be considered an inlier. shift : number or None, optional Phase shift to apply to light curve if provided. Light curve is shifted such that max light occurs at ``phase[0]`` if None given (default None). min_phase_cover : number on interval [0, 1], optional Fraction of binned light curve that must contain points in order to proceed. If light curve has insufficient coverage, a warning is printed if "outlier" *verbosity* is on, and None is returned (default 0.0). n_phases : positive integer Number of equally spaced phases to predict magnitudes at (default 100) verbosity : list or None, optional Verbosity level. See :func:`plotypus.utils.verbose_print`. **Returns** out : dict Results of the fit in a dictionary. The keys are: * name : str or None The name of the star. * period : number The star's period. * lightcurve : array-like, shape = [n_phases] Magnitudes of fitted light curve sampled at sample phases. * coefficients : array-like, shape = [n_coeffs] Fitted light curve coefficients. * dA_0 : non-negative number Error on mean magnitude. * phased_data : array-like, shape = [n_samples] *data* transformed from temporal to phase space. * model : predictor object The predictor used to fit the light curve. * R2 : number The :math:`R^2` score of the fit. * MSE : number The mean square error of the fit. * degree : positive integer The degree of the Fourier fit. * shift : number The phase shift applied. * coverage : number on interval [0, 1] The light curve coverage. **See also** :func:`get_lightcurve_from_file` """ data = numpy.ma.array(data, copy=copy) phases = numpy.linspace(0, 1, n_phases, endpoint=False) # TODO ### # Replace dA_0 with error matrix dA if predictor is None: predictor = make_predictor(scoring=scoring, scoring_cv=scoring_cv) while True: signal = get_signal(data) if len(signal) <= scoring_cv: verbose_print( "{}: length of signal ({}) less than cv folds ({})".format( name, len(signal), scoring_cv), operation="coverage", verbosity=verbosity) return elif len(signal) < min_observations: verbose_print( "{}: length of signal ({}) " "less than min_observations ({})".format( name, len(signal), min_observations), operation="coverage", verbosity=verbosity) return # Find the period of the inliers if period is not None: _period = period else: verbose_print("{}: finding period".format(name), operation="period", verbosity=verbosity) _period = find_period(signal, min_period, max_period, coarse_precision, fine_precision, periodogram, period_processes) verbose_print("{}: using period {}".format(name, _period), operation="period", verbosity=verbosity) phase, mag, *err = rephase(signal, _period).T # TODO ### # Generalize number of bins to function parameter ``coverage_bins``, which # defaults to 100, the current hard-coded behavior # Determine whether there is sufficient phase coverage coverage = numpy.zeros((100)) for p in phase: coverage[int(floor(p*100))] = 1 coverage = sum(coverage)/100 if coverage < min_phase_cover: verbose_print("{}: {} {}".format(name, coverage, min_phase_cover), operation="coverage", verbosity=verbosity) verbose_print("Insufficient phase coverage", operation="outlier", verbosity=verbosity) return # Predict light curve with warnings.catch_warnings(record=True) as w: try: predictor = predictor.fit(colvec(phase), mag) except Warning: # not sure if this should be only in verbose mode print(name, w, file=stderr) return # Reject outliers and repeat the process if there are any if sigma: outliers = find_outliers(rephase(data.data, _period), predictor, sigma, sigma_clipping) num_outliers = sum(outliers)[0] if num_outliers == 0 or \ set.issubset(set(numpy.nonzero(outliers.T[0])[0]), set(numpy.nonzero(data.mask.T[0])[0])): data.mask = outliers break if num_outliers > 0: verbose_print("{}: {} outliers".format(name, sum(outliers)[0]), operation="outlier", verbosity=verbosity) data.mask = numpy.ma.mask_or(data.mask, outliers) # Build light curve and optionally shift to max light lightcurve = predictor.predict([[i] for i in phases]) if shift is None: arg_max_light = lightcurve.argmin() lightcurve = numpy.concatenate((lightcurve[arg_max_light:], lightcurve[:arg_max_light])) shift = arg_max_light/len(phases) data.T[0] = rephase(data.data, _period, shift).T[0] # Grab the coefficients from the model coefficients = predictor.named_steps['Regressor'].coef_ \ if isinstance(predictor, Pipeline) \ else predictor.best_estimator_.named_steps['Regressor'].coef_, # compute R^2 and MSE if they haven't already been # (one or zero have been computed, depending on the predictor) estimator = predictor.best_estimator_ \ if hasattr(predictor, 'best_estimator_') \ else predictor get_score = lambda scoring: predictor.best_score_ \ if hasattr(predictor, 'best_score_') \ and predictor.scoring == scoring \ else cross_val_score(estimator, colvec(phase), mag, cv=scoring_cv, scoring=scoring, n_jobs=scoring_processes).mean() return {'name': name, 'period': _period, 'lightcurve': lightcurve, 'coefficients': coefficients[0], 'dA_0': sem(lightcurve), 'phased_data': data, 'model': predictor, 'R2': get_score('r2'), 'MSE': abs(get_score('mean_squared_error')), 'degree': estimator.get_params()['Fourier__degree'], 'shift': shift, 'coverage': coverage} def get_lightcurve_from_file(file, *args, use_cols=None, skiprows=0, verbosity=None, **kwargs): """get_lightcurve_from_file(file, *args, use_cols=None, skiprows=0, **kwargs) Fits a light curve to the data contained in *file* using :func:`get_lightcurve`. **Parameters** file : str or file File or filename to load data from. use_cols : iterable or None, optional Iterable of columns to read from data file, or None to read all columns (default None). skiprows : number, optional Number of rows to skip at beginning of *file* (default 0) **Returns** out : dict See :func:`get_lightcurve`. """ data = numpy.loadtxt(file, skiprows=skiprows, usecols=use_cols) if len(data) != 0: masked_data = numpy.ma.array(data=data, mask=None, dtype=float) return get_lightcurve(masked_data, *args, verbosity=verbosity, **kwargs) else: verbose_print("{}: file contains no data points".format(file), operation="coverage", verbosity=verbosity) return ## These functions were used briefly and then not maintained. ## Will make comebacks of some form in a later release. ## # def get_lightcurves_from_file(filename, directories, *args, **kwargs): # return [get_lightcurve_from_file(path.join(d, filename), *args, **kwargs) # for d in directories] # # # def single_periods(data, period, min_points=10, copy=False, *args, **kwargs): # data = numpy.ma.array(data, copy=copy) # time, mag, *err = data.T # # tstart, tfinal = numpy.min(time), numpy.max(time) # periods = numpy.arange(tstart, tfinal+period, period) # data_range = ( # data[numpy.logical_and(time>pstart, time<=pend),:] # for pstart, pend in zip(periods[:-1], periods[1:]) # ) # # return ( # get_lightcurve(d, period=period, *args, **kwargs) # for d in data_range # if d.shape[0] > min_points # ) # # # def single_periods_from_file(filename, *args, use_cols=(0, 1, 2), skiprows=0, # **kwargs): # data = numpy.ma.array(data=numpy.loadtxt(filename, usecols=use_cols, # skiprows=skiprows), # mask=None, dtype=float) # return single_periods(data, *args, **kwargs) def find_outliers(data, predictor, sigma, method=mad): """find_outliers(data, predictor, sigma, method=mad) Returns a boolean array indicating the outliers in the given *data* array. **Parameters** data : array-like, shape = [n_samples, 2] or [n_samples, 3] Photometry array containing columns *phase*, *magnitude*, and (optional) *error*. predictor : object that has "fit" and "predict" methods, optional Object which fits the light curve obtained from *data* after rephasing. sigma : number Outlier cutoff criteria. method : function, optional Function to score residuals for outlier detection (default :func:`plotypus.utils.mad`). **Returns** out : array-like, shape = data.shape Boolean array indicating the outliers in the given *data* array. """ phase, mag, *err = data.T residuals = numpy.absolute(predictor.predict(colvec(phase)) - mag) outliers = numpy.logical_and((residuals > err[0]) if err else True, residuals > sigma * method(residuals)) return numpy.tile(numpy.vstack(outliers), data.shape[1])
astroswego/plotypus
src/plotypus/plotypus.py
process_star
python
def process_star(filename, output, *, extension, star_name, period, shift, parameters, period_label, shift_label, **kwargs): if star_name is None: basename = path.basename(filename) if basename.endswith(extension): star_name = basename[:-len(extension)] else: # file has wrong extension return if parameters is not None: if period is None: try: period = parameters[period_label][star_name] except KeyError: pass if shift is None: try: shift = parameters.loc[shift_label][star_name] except KeyError: pass result = get_lightcurve_from_file(filename, name=star_name, period=period, shift=shift, **kwargs) if result is None: return if output is not None: plot_lightcurve(star_name, result['lightcurve'], result['period'], result['phased_data'], output=output, **kwargs) return result
Processes a star's lightcurve, prints its coefficients, and saves its plotted lightcurve to a file. Returns the result of get_lightcurve.
train
https://github.com/astroswego/plotypus/blob/b1162194ca1d4f6c00e79afe3e6fb40f0eaffcb9/src/plotypus/plotypus.py#L312-L345
[ "def get_lightcurve_from_file(file, *args, use_cols=None, skiprows=0,\n verbosity=None,\n **kwargs):\n \"\"\"get_lightcurve_from_file(file, *args, use_cols=None, skiprows=0, **kwargs)\n\n Fits a light curve to the data contained in *file* using\n :fun...
import numpy from numpy import std from sys import exit, stdin, stdout, stderr from os import path, listdir from argparse import ArgumentError, ArgumentParser, SUPPRESS from pandas import read_table from sklearn.linear_model import (LassoCV, LassoLarsCV, LassoLarsIC, LinearRegression, RidgeCV, ElasticNetCV) from sklearn.grid_search import GridSearchCV from matplotlib import rc_params_from_file from functools import partial from itertools import chain, repeat import plotypus.lightcurve from plotypus.lightcurve import (make_predictor, get_lightcurve_from_file, plot_lightcurve) from plotypus.periodogram import Lomb_Scargle, conditional_entropy import plotypus from plotypus.preprocessing import Fourier from plotypus.utils import mad, pmap, verbose_print from plotypus.resources import matplotlibrc import pkg_resources # part of setuptools __version__ = pkg_resources.require("plotypus")[0].version def get_args(): parser = ArgumentParser() general_group = parser.add_argument_group('General') param_group = parser.add_argument_group('Star Parameters') parallel_group = parser.add_argument_group('Parallel') period_group = parser.add_argument_group('Periodogram') fourier_group = parser.add_argument_group('Fourier') outlier_group = parser.add_argument_group('Outlier Detection') # regression_group = parser.add_argument_group('Regression') parser.add_argument('--version', action='version', version='%(prog)s {version}'.format(version=__version__)) general_group.add_argument('-i', '--input', type=str, default=None, help='location of stellar observations ' '(default = stdin)') general_group.add_argument('-o', '--output', type=str, default=None, help='location of plots, or nothing if no plots are to be generated ' '(default = None)') general_group.add_argument('-n', '--star-name', type=str, default=None, help='name of star ' '(default = name of input file)') general_group.add_argument('-f', '--format', type=str, default='%.5f', help='format specifier for output table') general_group.add_argument('--output-sep', type=str, default='\t', help='column separator string in output table ' '(default = TAB)') general_group.add_argument('--no-header', action='store_true', help='suppress header row in table output') general_group.add_argument('--sanitize-latex', action='store_true', help='enable to sanitize star names for LaTeX formatting') general_group.add_argument('--legend', action='store_true', help='whether legends should be put on the output plots ' '(default = False)') general_group.add_argument('--extension', type=str, default='.dat', metavar='EXT', help='extension which follows a star\'s name in data filenames ' '(default = ".dat")') general_group.add_argument('--skiprows', type=int, default=0, help='number of rows at the head of each file to skip') general_group.add_argument('--use-cols', type=int, nargs='+', default=SUPPRESS, help='columns to use from data file ' '(default = 0 1 2)') general_group.add_argument('-s', '--scoring', type=str, choices=['MSE', 'R2'], default=SUPPRESS, help='scoring metric to use ' '(default = "R2")') general_group.add_argument('--scoring-cv', type=int, default=SUPPRESS, metavar='N', help='number of folds in the scoring regularization_cv validation ' '(default = 3)') general_group.add_argument('--shift', type=float, default=None, help='phase shift to apply to each light curve, or shift to max ' 'light if None given ' '(default = None)') general_group.add_argument('--phase-points', type=int, default=100, metavar='N', help='number of phase points to output ' '(default = 100)') general_group.add_argument('--min-phase-cover', type=float, default=SUPPRESS, metavar='COVER', help='minimum fraction of phases that must have points ' '(default = 0)') general_group.add_argument('--min-observations', type=int, default=1, metavar='N', help='minimum number of observation needed to avoid skipping a star ' '(default = 1)') general_group.add_argument('--matplotlibrc', type=str, default=matplotlibrc, metavar='RC', help='matplotlibrc file to use for formatting plots ' '(default file is in plotypus.resources.matplotlibrc)') general_group.add_argument('-v', '--verbosity', type=str, action='append', default=None, choices=['all', 'coverage', 'outlier', 'period'], metavar='OPERATION', help='specifies an operation to print verbose output for, or ' '"all" to print all verbose output ' '(default = None)') param_group.add_argument('--parameters', type=str, default=None, metavar='FILE', help='file containing table of parameters such as period and shift ' '(default = None)') param_group.add_argument('--param-sep', type=str, default="\\s+", help='string or regex to use as column separator when reading ' 'parameters file ' '(default = any whitespace)') param_group.add_argument('--period-label', type=str, default='Period', metavar='LABEL', help='title of period column in parameters file ' '(default = Period)') param_group.add_argument('--shift-label', type=str, default='Shift', metavar='LABEL', help='title of shift column in parameters file ' '(default = Shift)') parallel_group.add_argument('--star-processes', type=int, default=1, metavar='N', help='number of stars to process in parallel ' '(default = 1)') parallel_group.add_argument('--selector-processes', type=int, default=SUPPRESS, metavar='N', help='number of processes to use for each selector ' '(default depends on selector used)') parallel_group.add_argument('--scoring-processes', type=int, default=SUPPRESS, metavar='N', help='number of processes to use for scoring, if not done by selector ' '(default = 1)') parallel_group.add_argument('--period-processes', type=int, default=1, metavar='N', help='number of periods to process in parallel ' '(default = 1)') period_group.add_argument('--period', type=float, default=None, help='period to use for all stars ' '(default = None)') period_group.add_argument('--min-period', type=float, default=SUPPRESS, metavar='P', help='minimum period of each star ' '(default = 0.2)') period_group.add_argument('--max-period', type=float, default=SUPPRESS, metavar='P', help='maximum period of each star ' '(default = 32.0)') period_group.add_argument('--coarse-precision', type=float, default=SUPPRESS, help='level of granularity on first pass ' '(default = 0.00001)') period_group.add_argument('--fine-precision', type=float, default=SUPPRESS, help='level of granularity on second pass ' '(default = 0.000000001)') period_group.add_argument('--periodogram', type=str, choices=["Lomb_Scargle", "conditional_entropy"], default="Lomb_Scargle", help='method for determining period ' '(default = Lomb_Scargle)') fourier_group.add_argument('-d', '--fourier-degree', type=int, nargs=2, default=(2, 20), metavar=('MIN', 'MAX'), help='range of degrees of fourier fits to use ' '(default = 2 20)') fourier_group.add_argument('-r', '--regressor', choices=['LassoCV', 'LassoLarsCV', 'LassoLarsIC', 'OLS', 'RidgeCV', 'ElasticNetCV'], default='LassoLarsIC', help='type of regressor to use ' '(default = "Lasso")') fourier_group.add_argument('--selector', choices=['Baart', 'GridSearch'], default='GridSearch', help='type of model selector to use ' '(default = "GridSearch")') fourier_group.add_argument('--series-form', type=str, default='cos', choices=['sin', 'cos'], help='form of Fourier series to use in coefficient output, ' 'does not affect the fit ' '(default = "cos")') fourier_group.add_argument('--max-iter', type=int, default=1000, metavar='N', help='maximum number of iterations in the regularization path ' '(default = 1000)') fourier_group.add_argument('--regularization-cv', type=int, default=None, metavar='N', help='number of folds used in regularization regularization_cv validation ' '(default = 3)') outlier_group.add_argument('--sigma', type=float, default=SUPPRESS, help='rejection criterion for outliers ' '(default = 20)') outlier_group.add_argument('--sigma-clipping', type=str, choices=["std", "mad"], default="mad", help='sigma clipping metric to use ' '(default = "mad")') args = parser.parse_args() if args.output is not None: rcParams = rc_params_from_file(fname=args.matplotlibrc, fail_on_error=args.output) plotypus.lightcurve.matplotlib.rcParams = rcParams regressor_choices = { "LassoCV" : LassoCV(max_iter=args.max_iter, cv=args.regularization_cv, fit_intercept=False), "LassoLarsCV" : LassoLarsCV(max_iter=args.max_iter, cv=args.regularization_cv, fit_intercept=False), "LassoLarsIC" : LassoLarsIC(max_iter=args.max_iter, fit_intercept=False), "OLS" : LinearRegression(fit_intercept=False), "RidgeCV" : RidgeCV(cv=args.regularization_cv, fit_intercept=False), "ElasticNetCV" : ElasticNetCV(max_iter=args.max_iter, cv=args.regularization_cv, fit_intercept=False) } selector_choices = { "Baart" : None, "GridSearch" : GridSearchCV } periodogram_choices = { "Lomb_Scargle" : Lomb_Scargle, "conditional_entropy" : conditional_entropy } sigma_clipping_choices = { "std" : std, "mad" : mad } if hasattr(args, 'scoring'): scoring_choices = { 'R2' : 'r2', 'MSE' : 'mean_squared_error' } args.scoring = scoring_choices[args.scoring] args.regressor = regressor_choices[args.regressor] Selector = selector_choices[args.selector] or GridSearchCV args.periodogram = periodogram_choices[args.periodogram] args.sigma_clipping = sigma_clipping_choices[args.sigma_clipping] args.predictor = make_predictor(Selector=Selector, use_baart=(args.selector == 'Baart'), **vars(args)) args.phases = numpy.arange(0, 1, 1/args.phase_points) if args.parameters is not None: args.parameters = read_table(args.parameters, args.param_sep, index_col=0, engine='python') return args def main(): args = get_args() min_degree, max_degree = args.fourier_degree filenames = list(map(lambda x: x.strip(), _get_files(args.input))) filepaths = map(lambda filename: filename if path.isfile(filename) else path.join(args.input, filename), filenames) # a dict containing all options which can be pickled, because # all parameters to pmap must be picklable picklable_args = {k: vars(args)[k] for k in vars(args) if k not in {'input'}} sep = args.output_sep if not args.no_header: # print file header print(*['Name', 'Period', 'Shift', 'Coverage', 'Inliers', 'Outliers', 'R^2', 'MSE', 'MaxDegree', 'Params', 'A_0', 'dA_0', sep.join(map(('A_{0}' + sep + 'Phi_{0}').format, range(1, max_degree+1))), sep.join(map(('R_{0}1' + sep + 'phi_{0}1').format, range(2, max_degree+1))), sep.join(map('Phase{}'.format, range(args.phase_points)))], sep=sep) printer = lambda result: _print_star(result, max_degree, args.series_form, args.format, sep) \ if result is not None else None pmap(process_star, filepaths, callback=printer, processes=args.star_processes, **picklable_args) def _print_star(result, max_degree, form, fmt, sep): if result is None: return # function which formats every number in a sequence according to fmt format_all = partial(map, lambda x: fmt % x) # count inliers and outliers points = result['phased_data'][:,0].size outliers = numpy.ma.count_masked(result['phased_data'][:, 0]) inliers = points - outliers # get fourier coefficients and compute ratios coefs = Fourier.phase_shifted_coefficients(result['coefficients'], shift=result['shift'], form=form) _coefs = numpy.concatenate(([coefs[0]], [result['dA_0']], coefs[1:])) fourier_ratios = Fourier.fourier_ratios(coefs) # create the vectors of zeroes coef_zeros = repeat('0', times=(2*max_degree + 1 - len(coefs))) ratio_zeros = repeat('0', times=(2*(max_degree - 1) - len(fourier_ratios))) max_degree = numpy.trim_zeros(coefs[1::2], 'b').size n_params = numpy.count_nonzero(coefs[1::2]) # print the entry for the star with tabs as separators # and itertools.chain to separate the different results into a # continuous list which is then unpacked print(*chain(*[[result['name']], map(str, [result['period'], result['shift'], result['coverage'], inliers, outliers, result['R2'], result['MSE'], max_degree, n_params]), # coefficients and fourier ratios with trailing zeros # formatted defined by the user-provided fmt string format_all(_coefs), coef_zeros, format_all(fourier_ratios), ratio_zeros, format_all(result['lightcurve'])]), sep=sep) def _get_files(input): if input is None: return stdin elif input[0] == "@": with open(input[1:], 'r') as f: return map(lambda x: x.strip(), f.readlines()) elif path.isfile(input): return [input] elif path.isdir(input): return sorted(listdir(input)) else: raise FileNotFoundError('file {} not found'.format(input)) if __name__ == "__main__": exit(main())
robertpeteuil/aws-shortcuts
awss/debg.py
init
python
def init(deb1, deb2=False): global DEBUG # pylint: disable=global-statement global DEBUGALL # pylint: disable=global-statement DEBUG = deb1 DEBUGALL = deb2
Initialize DEBUG and DEBUGALL. Allows other modules to set DEBUG and DEBUGALL, so their call to dprint or dprintx generate output. Args: deb1 (bool): value of DEBUG to set deb2 (bool): optional - value of DEBUGALL to set, defaults to False.
train
https://github.com/robertpeteuil/aws-shortcuts/blob/cf453ca996978a4d88015d1cf6125bce8ca4873b/awss/debg.py#L39-L54
null
"""Debug print functions that execute if debug mode initialized. The debug print functions only print if one of the debug-modes was set by a previous call to this module's init() function. There are two debug-modes: DEBUG allows calls to the dprint function to print DEBUGALL allows calls to the dprintx function to print License: AWSS - Control and connect to AWS EC2 instances from command line Copyright (C) 2017-2018 Robert Peteuil This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. URL: https://github.com/robertpeteuil/aws-shortcuts Author: Robert Peteuil """ from __future__ import print_function from awss.colors import C_NORM, C_TI DEBUG = False DEBUGALL = False def dprint(item1, item2=""): """Print Text if DEBUG set. Args: item1 (str): item to print item2 (str): optional 2nd item to print """ if DEBUG: print(item1, "%s%s%s" % (C_TI, item2, C_NORM)) def dprintx(passeditem, special=False): """Print Text if DEBUGALL set, optionally with PrettyPrint. Args: passeditem (str): item to print special (bool): determines if item prints with PrettyPrint or regular print. """ if DEBUGALL: if special: from pprint import pprint pprint(passeditem) else: print("%s%s%s" % (C_TI, passeditem, C_NORM))
robertpeteuil/aws-shortcuts
awss/debg.py
dprintx
python
def dprintx(passeditem, special=False): if DEBUGALL: if special: from pprint import pprint pprint(passeditem) else: print("%s%s%s" % (C_TI, passeditem, C_NORM))
Print Text if DEBUGALL set, optionally with PrettyPrint. Args: passeditem (str): item to print special (bool): determines if item prints with PrettyPrint or regular print.
train
https://github.com/robertpeteuil/aws-shortcuts/blob/cf453ca996978a4d88015d1cf6125bce8ca4873b/awss/debg.py#L69-L83
null
"""Debug print functions that execute if debug mode initialized. The debug print functions only print if one of the debug-modes was set by a previous call to this module's init() function. There are two debug-modes: DEBUG allows calls to the dprint function to print DEBUGALL allows calls to the dprintx function to print License: AWSS - Control and connect to AWS EC2 instances from command line Copyright (C) 2017-2018 Robert Peteuil This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. URL: https://github.com/robertpeteuil/aws-shortcuts Author: Robert Peteuil """ from __future__ import print_function from awss.colors import C_NORM, C_TI DEBUG = False DEBUGALL = False def init(deb1, deb2=False): """Initialize DEBUG and DEBUGALL. Allows other modules to set DEBUG and DEBUGALL, so their call to dprint or dprintx generate output. Args: deb1 (bool): value of DEBUG to set deb2 (bool): optional - value of DEBUGALL to set, defaults to False. """ global DEBUG # pylint: disable=global-statement global DEBUGALL # pylint: disable=global-statement DEBUG = deb1 DEBUGALL = deb2 def dprint(item1, item2=""): """Print Text if DEBUG set. Args: item1 (str): item to print item2 (str): optional 2nd item to print """ if DEBUG: print(item1, "%s%s%s" % (C_TI, item2, C_NORM))
robertpeteuil/aws-shortcuts
awss/awsc.py
get_inst_info
python
def get_inst_info(qry_string): qry_prefix = "EC2C.describe_instances(" qry_real = qry_prefix + qry_string + ")" qry_results = eval(qry_real) # pylint: disable=eval-used return qry_results
Get details for instances that match the qry_string. Execute a query against the AWS EC2 client object, that is based on the contents of qry_string. Args: qry_string (str): the query to be used against the aws ec2 client. Returns: qry_results (dict): raw information returned from AWS.
train
https://github.com/robertpeteuil/aws-shortcuts/blob/cf453ca996978a4d88015d1cf6125bce8ca4873b/awss/awsc.py#L53-L68
null
"""Communicate with AWS EC2 to get data and interact with instances. Functions for retrieving data for queried instances, retrieving the name of the image of an instance (AMI Name), and for starting or stopping instances. License: AWSS - Control and connect to AWS EC2 instances from command line Copyright (C) 2017-2018 Robert Peteuil This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. URL: https://github.com/robertpeteuil/aws-shortcuts Author: Robert Peteuil """ import boto3 EC2C = "" EC2R = "" def init(): # pragma: no cover """Attach global vars EC2C, and EC2R to the AWS service. Must be called before any other functions in this module will work in production mode. To allow testing on CI servers without AWS credentials, this assignment is done in this function instead of the module itself - as the boto3 methods below require AWS credentials on the host. """ global EC2C # pylint: disable=global-statement global EC2R # pylint: disable=global-statement EC2C = boto3.client('ec2') EC2R = boto3.resource('ec2') def get_all_aminames(i_info): """Get Image_Name for each instance in i_info. Args: i_info (dict): information on instances and details. Returns: i_info (dict): i_info is returned with the aminame added for each instance. """ for i in i_info: try: # pylint: disable=maybe-no-member i_info[i]['aminame'] = EC2R.Image(i_info[i]['ami']).name except AttributeError: i_info[i]['aminame'] = "Unknown" return i_info def get_one_aminame(inst_img_id): """Get Image_Name for the image_id specified. Args: inst_img_id (str): image_id to get name value from. Returns: aminame (str): name of the image. """ try: aminame = EC2R.Image(inst_img_id).name except AttributeError: aminame = "Unknown" return aminame def startstop(inst_id, cmdtodo): """Start or Stop the Specified Instance. Args: inst_id (str): instance-id to perform command against cmdtodo (str): command to perform (start or stop) Returns: response (dict): reponse returned from AWS after performing specified action. """ tar_inst = EC2R.Instance(inst_id) thecmd = getattr(tar_inst, cmdtodo) response = thecmd() return response
robertpeteuil/aws-shortcuts
awss/awsc.py
get_all_aminames
python
def get_all_aminames(i_info): for i in i_info: try: # pylint: disable=maybe-no-member i_info[i]['aminame'] = EC2R.Image(i_info[i]['ami']).name except AttributeError: i_info[i]['aminame'] = "Unknown" return i_info
Get Image_Name for each instance in i_info. Args: i_info (dict): information on instances and details. Returns: i_info (dict): i_info is returned with the aminame added for each instance.
train
https://github.com/robertpeteuil/aws-shortcuts/blob/cf453ca996978a4d88015d1cf6125bce8ca4873b/awss/awsc.py#L71-L87
null
"""Communicate with AWS EC2 to get data and interact with instances. Functions for retrieving data for queried instances, retrieving the name of the image of an instance (AMI Name), and for starting or stopping instances. License: AWSS - Control and connect to AWS EC2 instances from command line Copyright (C) 2017-2018 Robert Peteuil This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. URL: https://github.com/robertpeteuil/aws-shortcuts Author: Robert Peteuil """ import boto3 EC2C = "" EC2R = "" def init(): # pragma: no cover """Attach global vars EC2C, and EC2R to the AWS service. Must be called before any other functions in this module will work in production mode. To allow testing on CI servers without AWS credentials, this assignment is done in this function instead of the module itself - as the boto3 methods below require AWS credentials on the host. """ global EC2C # pylint: disable=global-statement global EC2R # pylint: disable=global-statement EC2C = boto3.client('ec2') EC2R = boto3.resource('ec2') def get_inst_info(qry_string): """Get details for instances that match the qry_string. Execute a query against the AWS EC2 client object, that is based on the contents of qry_string. Args: qry_string (str): the query to be used against the aws ec2 client. Returns: qry_results (dict): raw information returned from AWS. """ qry_prefix = "EC2C.describe_instances(" qry_real = qry_prefix + qry_string + ")" qry_results = eval(qry_real) # pylint: disable=eval-used return qry_results def get_one_aminame(inst_img_id): """Get Image_Name for the image_id specified. Args: inst_img_id (str): image_id to get name value from. Returns: aminame (str): name of the image. """ try: aminame = EC2R.Image(inst_img_id).name except AttributeError: aminame = "Unknown" return aminame def startstop(inst_id, cmdtodo): """Start or Stop the Specified Instance. Args: inst_id (str): instance-id to perform command against cmdtodo (str): command to perform (start or stop) Returns: response (dict): reponse returned from AWS after performing specified action. """ tar_inst = EC2R.Instance(inst_id) thecmd = getattr(tar_inst, cmdtodo) response = thecmd() return response
robertpeteuil/aws-shortcuts
awss/awsc.py
get_one_aminame
python
def get_one_aminame(inst_img_id): try: aminame = EC2R.Image(inst_img_id).name except AttributeError: aminame = "Unknown" return aminame
Get Image_Name for the image_id specified. Args: inst_img_id (str): image_id to get name value from. Returns: aminame (str): name of the image.
train
https://github.com/robertpeteuil/aws-shortcuts/blob/cf453ca996978a4d88015d1cf6125bce8ca4873b/awss/awsc.py#L90-L103
null
"""Communicate with AWS EC2 to get data and interact with instances. Functions for retrieving data for queried instances, retrieving the name of the image of an instance (AMI Name), and for starting or stopping instances. License: AWSS - Control and connect to AWS EC2 instances from command line Copyright (C) 2017-2018 Robert Peteuil This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. URL: https://github.com/robertpeteuil/aws-shortcuts Author: Robert Peteuil """ import boto3 EC2C = "" EC2R = "" def init(): # pragma: no cover """Attach global vars EC2C, and EC2R to the AWS service. Must be called before any other functions in this module will work in production mode. To allow testing on CI servers without AWS credentials, this assignment is done in this function instead of the module itself - as the boto3 methods below require AWS credentials on the host. """ global EC2C # pylint: disable=global-statement global EC2R # pylint: disable=global-statement EC2C = boto3.client('ec2') EC2R = boto3.resource('ec2') def get_inst_info(qry_string): """Get details for instances that match the qry_string. Execute a query against the AWS EC2 client object, that is based on the contents of qry_string. Args: qry_string (str): the query to be used against the aws ec2 client. Returns: qry_results (dict): raw information returned from AWS. """ qry_prefix = "EC2C.describe_instances(" qry_real = qry_prefix + qry_string + ")" qry_results = eval(qry_real) # pylint: disable=eval-used return qry_results def get_all_aminames(i_info): """Get Image_Name for each instance in i_info. Args: i_info (dict): information on instances and details. Returns: i_info (dict): i_info is returned with the aminame added for each instance. """ for i in i_info: try: # pylint: disable=maybe-no-member i_info[i]['aminame'] = EC2R.Image(i_info[i]['ami']).name except AttributeError: i_info[i]['aminame'] = "Unknown" return i_info def startstop(inst_id, cmdtodo): """Start or Stop the Specified Instance. Args: inst_id (str): instance-id to perform command against cmdtodo (str): command to perform (start or stop) Returns: response (dict): reponse returned from AWS after performing specified action. """ tar_inst = EC2R.Instance(inst_id) thecmd = getattr(tar_inst, cmdtodo) response = thecmd() return response
robertpeteuil/aws-shortcuts
awss/awsc.py
startstop
python
def startstop(inst_id, cmdtodo): tar_inst = EC2R.Instance(inst_id) thecmd = getattr(tar_inst, cmdtodo) response = thecmd() return response
Start or Stop the Specified Instance. Args: inst_id (str): instance-id to perform command against cmdtodo (str): command to perform (start or stop) Returns: response (dict): reponse returned from AWS after performing specified action.
train
https://github.com/robertpeteuil/aws-shortcuts/blob/cf453ca996978a4d88015d1cf6125bce8ca4873b/awss/awsc.py#L106-L120
null
"""Communicate with AWS EC2 to get data and interact with instances. Functions for retrieving data for queried instances, retrieving the name of the image of an instance (AMI Name), and for starting or stopping instances. License: AWSS - Control and connect to AWS EC2 instances from command line Copyright (C) 2017-2018 Robert Peteuil This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. URL: https://github.com/robertpeteuil/aws-shortcuts Author: Robert Peteuil """ import boto3 EC2C = "" EC2R = "" def init(): # pragma: no cover """Attach global vars EC2C, and EC2R to the AWS service. Must be called before any other functions in this module will work in production mode. To allow testing on CI servers without AWS credentials, this assignment is done in this function instead of the module itself - as the boto3 methods below require AWS credentials on the host. """ global EC2C # pylint: disable=global-statement global EC2R # pylint: disable=global-statement EC2C = boto3.client('ec2') EC2R = boto3.resource('ec2') def get_inst_info(qry_string): """Get details for instances that match the qry_string. Execute a query against the AWS EC2 client object, that is based on the contents of qry_string. Args: qry_string (str): the query to be used against the aws ec2 client. Returns: qry_results (dict): raw information returned from AWS. """ qry_prefix = "EC2C.describe_instances(" qry_real = qry_prefix + qry_string + ")" qry_results = eval(qry_real) # pylint: disable=eval-used return qry_results def get_all_aminames(i_info): """Get Image_Name for each instance in i_info. Args: i_info (dict): information on instances and details. Returns: i_info (dict): i_info is returned with the aminame added for each instance. """ for i in i_info: try: # pylint: disable=maybe-no-member i_info[i]['aminame'] = EC2R.Image(i_info[i]['ami']).name except AttributeError: i_info[i]['aminame'] = "Unknown" return i_info def get_one_aminame(inst_img_id): """Get Image_Name for the image_id specified. Args: inst_img_id (str): image_id to get name value from. Returns: aminame (str): name of the image. """ try: aminame = EC2R.Image(inst_img_id).name except AttributeError: aminame = "Unknown" return aminame
robertpeteuil/aws-shortcuts
awss/core.py
main
python
def main(): parser = parser_setup() options = parser.parse_args() debug = bool(options.debug > 0) debugall = bool(options.debug > 1) awsc.init() debg.init(debug, debugall) print(C_NORM) options.func(options) sys.exit()
Collect user args and call command funct. Collect command line args and setup environment then call function for command specified in args.
train
https://github.com/robertpeteuil/aws-shortcuts/blob/cf453ca996978a4d88015d1cf6125bce8ca4873b/awss/core.py#L42-L61
[ "def init(): # pragma: no cover\n \"\"\"Attach global vars EC2C, and EC2R to the AWS service.\n\n Must be called before any other functions in this module\n will work in production mode.\n\n To allow testing on CI servers without AWS credentials,\n this assignment is done in this function instead of...
"""Control and connect to AWS EC2 instances from command line. The AWS Shortcuts (awss) library is a CLI utility allowing listing, starting, stopping and connecting to AWS EC2 instances by Name or ID. License: AWSS - Control and connect to AWS EC2 instances from command line Copyright (C) 2017-2018 Robert Peteuil This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. URL: https://github.com/robertpeteuil/aws-shortcuts Author: Robert Peteuil """ from __future__ import print_function from builtins import input from builtins import range import argparse import sys import operator import awss.awsc as awsc import awss.debg as debg from awss.colors import C_NORM, C_HEAD2, C_TI, C_WARN, C_ERR, C_STAT __version__ = '0.9.13' def parser_setup(): """Create ArgumentParser object to parse command line arguments. Returns: parser (object): containing ArgumentParser data and methods. Raises: SystemExit: if the user enters invalid args. """ parser = argparse.ArgumentParser(description="Control AWS instances from" " the command line with: list, start," " stop or ssh.", prog='awss', usage="\tawss {command} [ 'NAME' ] " "[ '-i ID' ] [ OPTIONS ]\n\t{command} =" " list | start | stop | ssh") parser.add_argument('-v', '--version', action="version", version="awss {0}".format(__version__)) subparsers = parser.add_subparsers(title="For additional help on" " command parameters", dest='command', description="type 'awss {command} -h'," " where {command} is: list, start," " stop or ssh") # Parser for LIST command parser_list = subparsers.add_parser('list', description="List AWS " "instances from the command line. " "'awss list' will list instances" " specified using combinations of " "NAME, instance-id and current-state." " If no specifications are given, " " all instances will be listed." " ex: 'awss list TEST " "-r' will list instances named 'TEST'" " that are currently running.", usage="\tawss list [none] [NAME] [-i " "ID] [-r] [-s] [OPTIONS]") parser_list.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_list.add_argument('-i', '--id', action="store", help='specify instance by id') parser_list.add_argument('-s', '--stopped', action='store_const', dest="inst_state", const="stopped", help='list stopped instances') parser_list.add_argument('-r', '--running', action='store_const', dest="inst_state", const="running", help='list running instances') parser_list.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_list.set_defaults(func=cmd_list) # Parser for START command parser_start = subparsers.add_parser('start', usage="\tawss start [NAME]" " [-i ID] [-h]", description="Start an AWS instance" " from the command line.") parser_start.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_start.add_argument('-i', '--id', action="store", help='specify instance-id') parser_start.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_start.set_defaults(func=cmd_startstop) # Parser for STOP command parser_stop = subparsers.add_parser('stop', usage="\tawss stop [NAME]" " [-i ID] [-h]", description="Stop an AWS instance" " from the command line.") parser_stop.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_stop.add_argument('-i', '--id', action="store", help='specify instance-id') parser_stop.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_stop.set_defaults(func=cmd_startstop) # Parser for SSH command parser_ssh = subparsers.add_parser('ssh', usage="\tawss ssh [NAME]" " [-i ID] [-u USER] [-p] [-h]", description="Connect to an AWS i" "nstance via ssh.") parser_ssh.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_ssh.add_argument('-i', '--id', action="store", help='specify instance-id') parser_ssh.add_argument('-u', '--user', action="store", help='override default username for ssh') parser_ssh.add_argument('-p', '--nopem', action="store_true", default=False, help='connect without PEM key') parser_ssh.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_ssh.set_defaults(func=cmd_ssh) return parser def cmd_list(options): """Gather data for instances matching args and call display func. Args: options (object): contains args and data from parser. """ (i_info, param_str) = gather_data(options) if i_info: awsc.get_all_aminames(i_info) param_str = "Instance List - " + param_str + "\n" list_instances(i_info, param_str) else: print("No instances found with parameters: {}".format(param_str)) def cmd_startstop(options): """Start or Stop the specified instance. Finds instances that match args and instance-state expected by the command. Then, the target instance is determined, the action is performed on the instance, and the eturn information is displayed. Args: options (object): contains args and data from parser. """ statelu = {"start": "stopped", "stop": "running"} options.inst_state = statelu[options.command] debg.dprint("toggle set state: ", options.inst_state) (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) response = awsc.startstop(tar_inst, options.command) responselu = {"start": "StartingInstances", "stop": "StoppingInstances"} filt = responselu[options.command] resp = {} state_term = ('CurrentState', 'PreviousState') for i, j in enumerate(state_term): resp[i] = response["{0}".format(filt)][0]["{0}".format(j)]['Name'] print("Current State: {}{}{} - Previous State: {}{}{}\n". format(C_STAT[resp[0]], resp[0], C_NORM, C_STAT[resp[1]], resp[1], C_NORM)) def cmd_ssh(options): """Connect to the specified instance via ssh. Finds instances that match the user specified args that are also in the 'running' state. The target instance is determined, the required connection information is retreived (IP, key and ssh user-name), then an 'ssh' connection is made to the instance. Args: options (object): contains args and data from parser """ import os import subprocess from os.path import expanduser options.inst_state = "running" (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) home_dir = expanduser("~") if options.user is None: tar_aminame = awsc.get_one_aminame(i_info[tar_idx]['ami']) options.user = cmd_ssh_user(tar_aminame, i_info[tar_idx]['tag']['Name']) else: debg.dprint("LoginUser set by user: ", options.user) os_spec = {"nt": ["powershell plink", "\\", "ppk"]} c_itm = os_spec.get(os.name, ["ssh", "/", "pem"]) cmd_ssh_run = c_itm[0] if not options.nopem: cmd_ssh_run += (" -i {0}{1}.aws{1}{2}.{3}". format(home_dir, c_itm[1], i_info[tar_idx]['ssh_key'], c_itm[2])) else: debg.dprint("Connect string: ", "ssh {}@{}". format(options.user, i_info[tar_idx]['pub_dns_name'])) cmd_ssh_run += " {0}@{1}".format(options.user, i_info[tar_idx]['pub_dns_name']) print(cmd_ssh_run) subprocess.call(cmd_ssh_run, shell=True) def cmd_ssh_user(tar_aminame, inst_name): """Calculate instance login-username based on image-name. Args: tar_aminame (str): name of the image instance created with. inst_name (str): name of the instance. Returns: username (str): name for ssh based on AMI-name. """ if tar_aminame == "Unknown": tar_aminame = inst_name # first 5 chars of AMI-name can be anywhere in AMI-Name userlu = {"ubunt": "ubuntu", "debia": "admin", "fedor": "root", "cento": "centos", "openb": "root"} usertemp = ['name'] + [value for key, value in list(userlu.items()) if key in tar_aminame.lower()] usertemp = dict(zip(usertemp[::2], usertemp[1::2])) username = usertemp.get('name', 'ec2-user') debg.dprint("loginuser Calculated: ", username) return username def gather_data(options): """Get Data specific for command selected. Create ec2 specific query and output title based on options specified, retrieves the raw response data from aws, then processes it into the i_info dict, which is used throughout this module. Args: options (object): contains args and data from parser, that has been adjusted by the command specific functions as appropriate. Returns: i_info (dict): information on instances and details. param_str (str): the title to display before the list. """ (qry_string, param_str) = qry_create(options) qry_results = awsc.get_inst_info(qry_string) i_info = process_results(qry_results) return (i_info, param_str) def process_results(qry_results): """Generate dictionary of results from query. Decodes the large dict recturned from the AWS query. Args: qry_results (dict): results from awsc.get_inst_info Returns: i_info (dict): information on instances and details. """ i_info = {} for i, j in enumerate(qry_results['Reservations']): i_info[i] = {'id': j['Instances'][0]['InstanceId']} i_info[i]['state'] = j['Instances'][0]['State']['Name'] i_info[i]['ami'] = j['Instances'][0]['ImageId'] i_info[i]['ssh_key'] = j['Instances'][0]['KeyName'] i_info[i]['pub_dns_name'] = j['Instances'][0]['PublicDnsName'] try: i_info[i]['tag'] = process_tags(j['Instances'][0]['Tags']) except KeyError: i_info[i]['tag'] = {"Name": ""} debg.dprint("numInstances: ", len(i_info)) debg.dprintx("Details except AMI-name") debg.dprintx(i_info, True) return i_info def process_tags(inst_tags): """Create dict of instance tags as only name:value pairs.""" tag_dict = {} for k in range(len(inst_tags)): tag_dict[inst_tags[k]['Key']] = inst_tags[k]['Value'] return tag_dict def qry_create(options): """Create query from the args specified and command chosen. Creates a query string that incorporates the args in the options object, and creates the title for the 'list' function. Args: options (object): contains args and data from parser Returns: qry_string (str): the query to be used against the aws ec2 client. param_str (str): the title to display before the list. """ qry_string = filt_end = param_str = "" filt_st = "Filters=[" param_str_default = "All" if options.id: qry_string += "InstanceIds=['%s']" % (options.id) param_str += "id: '%s'" % (options.id) param_str_default = "" if options.instname: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str) filt_end = "]" param_str_default = "" qry_string += filt_st + ("{'Name': 'tag:Name', 'Values': ['%s']}" % (options.instname)) param_str += "name: '%s'" % (options.instname) if options.inst_state: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str, bool(options.instname), filt_st) qry_string += ("{'Name': 'instance-state-name'," "'Values': ['%s']}" % (options.inst_state)) param_str += "state: '%s'" % (options.inst_state) filt_end = "]" param_str_default = "" qry_string += filt_end param_str += param_str_default debg.dprintx("\nQuery String") debg.dprintx(qry_string, True) debg.dprint("param_str: ", param_str) return(qry_string, param_str) def qry_helper(flag_id, qry_string, param_str, flag_filt=False, filt_st=""): """Dynamically add syntaxtical elements to query. This functions adds syntactical elements to the query string, and report title, based on the types and number of items added thus far. Args: flag_filt (bool): at least one filter item specified. qry_string (str): portion of the query constructed thus far. param_str (str): the title to display before the list. flag_id (bool): optional - instance-id was specified. filt_st (str): optional - syntax to add on end if filter specified. Returns: qry_string (str): the portion of the query that was passed in with the appropriate syntactical elements added. param_str (str): the title to display before the list. """ if flag_id or flag_filt: qry_string += ", " param_str += ", " if not flag_filt: qry_string += filt_st return (qry_string, param_str) def list_instances(i_info, param_str, numbered=False): """Display a list of all instances and their details. Iterates through all the instances in the dict, and displays information for each instance. Args: i_info (dict): information on instances and details. param_str (str): the title to display before the list. numbered (bool): optional - indicates wheter the list should be displayed with numbers before each instance. This is used when called from user_picklist. """ print(param_str) for i in i_info: if numbered: print("Instance {}#{}{}".format(C_WARN, i + 1, C_NORM)) print(" {6}Name: {1}{3:<22}{1}ID: {0}{4:<20}{1:<18}Status: {2}{5}{1}". format(C_TI, C_NORM, C_STAT[i_info[i]['state']], i_info[i]['tag']['Name'], i_info[i]['id'], i_info[i]['state'], C_HEAD2)) print(" AMI: {0}{2:<23}{1}AMI Name: {0}{3:.41}{1}". format(C_TI, C_NORM, i_info[i]['ami'], i_info[i]['aminame'])) list_tags(i_info[i]['tag']) debg.dprintx("All Data") debg.dprintx(i_info, True) def list_tags(tags): """Print tags in dict so they allign with listing above.""" tags_sorted = sorted(list(tags.items()), key=operator.itemgetter(0)) tag_sec_spacer = "" c = 1 ignored_keys = ["Name", "aws:ec2spot:fleet-request-id"] pad_col = {1: 38, 2: 49} for k, v in tags_sorted: # if k != "Name": if k not in ignored_keys: if c < 3: padamt = pad_col[c] sys.stdout.write(" {2}{0}:{3} {1}". format(k, v, C_HEAD2, C_NORM).ljust(padamt)) c += 1 tag_sec_spacer = "\n" else: sys.stdout.write("{2}{0}:{3} {1}\n".format(k, v, C_HEAD2, C_NORM)) c = 1 tag_sec_spacer = "" print(tag_sec_spacer) def determine_inst(i_info, param_str, command): """Determine the instance-id of the target instance. Inspect the number of instance-ids collected and take the appropriate action: exit if no ids, return if single id, and call user_picklist function if multiple ids exist. Args: i_info (dict): information and details for instances. param_str (str): the title to display in the listing. command (str): command specified on the command line. Returns: tar_inst (str): the AWS instance-id of the target. Raises: SystemExit: if no instances are match parameters specified. """ qty_instances = len(i_info) if not qty_instances: print("No instances found with parameters: {}".format(param_str)) sys.exit(1) if qty_instances > 1: print("{} instances match these parameters:".format(qty_instances)) tar_idx = user_picklist(i_info, command) else: tar_idx = 0 tar_inst = i_info[tar_idx]['id'] print("{0}{3}ing{1} instance id {2}{4}{1}". format(C_STAT[command], C_NORM, C_TI, command, tar_inst)) return (tar_inst, tar_idx) def user_picklist(i_info, command): """Display list of instances matching args and ask user to select target. Instance list displayed and user asked to enter the number corresponding to the desired target instance, or '0' to abort. Args: i_info (dict): information on instances and details. command (str): command specified on the command line. Returns: tar_idx (int): the dictionary index number of the targeted instance. """ valid_entry = False awsc.get_all_aminames(i_info) list_instances(i_info, "", True) msg_txt = ("Enter {0}#{1} of instance to {3} ({0}1{1}-{0}{4}{1})" " [{2}0 aborts{1}]: ".format(C_WARN, C_NORM, C_TI, command, len(i_info))) while not valid_entry: entry_raw = obtain_input(msg_txt) try: entry_int = int(entry_raw) except ValueError: entry_int = 999 (tar_idx, valid_entry) = user_entry(entry_int, len(i_info), command) return tar_idx def obtain_input(message_text): # pragma: no cover """Perform input command as a function so it can be mocked.""" return (input(message_text)) def user_entry(entry_int, num_inst, command): """Validate user entry and returns index and validity flag. Processes the user entry and take the appropriate action: abort if '0' entered, set validity flag and index is valid entry, else return invalid index and the still unset validity flag. Args: entry_int (int): a number entered or 999 if a non-int was entered. num_inst (int): the largest valid number that can be entered. command (str): program command to display in prompt. Returns: entry_idx(int): the dictionary index number of the targeted instance valid_entry (bool): specifies if entry_idx is valid. Raises: SystemExit: if the user enters 0 when they are choosing from the list it triggers the "abort" option offered to the user. """ valid_entry = False if not entry_int: print("{}aborting{} - {} instance\n". format(C_ERR, C_NORM, command)) sys.exit() elif entry_int >= 1 and entry_int <= num_inst: entry_idx = entry_int - 1 valid_entry = True else: print("{}Invalid entry:{} enter a number between 1" " and {}.".format(C_ERR, C_NORM, num_inst)) entry_idx = entry_int return (entry_idx, valid_entry) if __name__ == '__main__': main()
robertpeteuil/aws-shortcuts
awss/core.py
parser_setup
python
def parser_setup(): parser = argparse.ArgumentParser(description="Control AWS instances from" " the command line with: list, start," " stop or ssh.", prog='awss', usage="\tawss {command} [ 'NAME' ] " "[ '-i ID' ] [ OPTIONS ]\n\t{command} =" " list | start | stop | ssh") parser.add_argument('-v', '--version', action="version", version="awss {0}".format(__version__)) subparsers = parser.add_subparsers(title="For additional help on" " command parameters", dest='command', description="type 'awss {command} -h'," " where {command} is: list, start," " stop or ssh") # Parser for LIST command parser_list = subparsers.add_parser('list', description="List AWS " "instances from the command line. " "'awss list' will list instances" " specified using combinations of " "NAME, instance-id and current-state." " If no specifications are given, " " all instances will be listed." " ex: 'awss list TEST " "-r' will list instances named 'TEST'" " that are currently running.", usage="\tawss list [none] [NAME] [-i " "ID] [-r] [-s] [OPTIONS]") parser_list.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_list.add_argument('-i', '--id', action="store", help='specify instance by id') parser_list.add_argument('-s', '--stopped', action='store_const', dest="inst_state", const="stopped", help='list stopped instances') parser_list.add_argument('-r', '--running', action='store_const', dest="inst_state", const="running", help='list running instances') parser_list.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_list.set_defaults(func=cmd_list) # Parser for START command parser_start = subparsers.add_parser('start', usage="\tawss start [NAME]" " [-i ID] [-h]", description="Start an AWS instance" " from the command line.") parser_start.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_start.add_argument('-i', '--id', action="store", help='specify instance-id') parser_start.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_start.set_defaults(func=cmd_startstop) # Parser for STOP command parser_stop = subparsers.add_parser('stop', usage="\tawss stop [NAME]" " [-i ID] [-h]", description="Stop an AWS instance" " from the command line.") parser_stop.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_stop.add_argument('-i', '--id', action="store", help='specify instance-id') parser_stop.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_stop.set_defaults(func=cmd_startstop) # Parser for SSH command parser_ssh = subparsers.add_parser('ssh', usage="\tawss ssh [NAME]" " [-i ID] [-u USER] [-p] [-h]", description="Connect to an AWS i" "nstance via ssh.") parser_ssh.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_ssh.add_argument('-i', '--id', action="store", help='specify instance-id') parser_ssh.add_argument('-u', '--user', action="store", help='override default username for ssh') parser_ssh.add_argument('-p', '--nopem', action="store_true", default=False, help='connect without PEM key') parser_ssh.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_ssh.set_defaults(func=cmd_ssh) return parser
Create ArgumentParser object to parse command line arguments. Returns: parser (object): containing ArgumentParser data and methods. Raises: SystemExit: if the user enters invalid args.
train
https://github.com/robertpeteuil/aws-shortcuts/blob/cf453ca996978a4d88015d1cf6125bce8ca4873b/awss/core.py#L64-L157
null
"""Control and connect to AWS EC2 instances from command line. The AWS Shortcuts (awss) library is a CLI utility allowing listing, starting, stopping and connecting to AWS EC2 instances by Name or ID. License: AWSS - Control and connect to AWS EC2 instances from command line Copyright (C) 2017-2018 Robert Peteuil This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. URL: https://github.com/robertpeteuil/aws-shortcuts Author: Robert Peteuil """ from __future__ import print_function from builtins import input from builtins import range import argparse import sys import operator import awss.awsc as awsc import awss.debg as debg from awss.colors import C_NORM, C_HEAD2, C_TI, C_WARN, C_ERR, C_STAT __version__ = '0.9.13' def main(): """Collect user args and call command funct. Collect command line args and setup environment then call function for command specified in args. """ parser = parser_setup() options = parser.parse_args() debug = bool(options.debug > 0) debugall = bool(options.debug > 1) awsc.init() debg.init(debug, debugall) print(C_NORM) options.func(options) sys.exit() def cmd_list(options): """Gather data for instances matching args and call display func. Args: options (object): contains args and data from parser. """ (i_info, param_str) = gather_data(options) if i_info: awsc.get_all_aminames(i_info) param_str = "Instance List - " + param_str + "\n" list_instances(i_info, param_str) else: print("No instances found with parameters: {}".format(param_str)) def cmd_startstop(options): """Start or Stop the specified instance. Finds instances that match args and instance-state expected by the command. Then, the target instance is determined, the action is performed on the instance, and the eturn information is displayed. Args: options (object): contains args and data from parser. """ statelu = {"start": "stopped", "stop": "running"} options.inst_state = statelu[options.command] debg.dprint("toggle set state: ", options.inst_state) (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) response = awsc.startstop(tar_inst, options.command) responselu = {"start": "StartingInstances", "stop": "StoppingInstances"} filt = responselu[options.command] resp = {} state_term = ('CurrentState', 'PreviousState') for i, j in enumerate(state_term): resp[i] = response["{0}".format(filt)][0]["{0}".format(j)]['Name'] print("Current State: {}{}{} - Previous State: {}{}{}\n". format(C_STAT[resp[0]], resp[0], C_NORM, C_STAT[resp[1]], resp[1], C_NORM)) def cmd_ssh(options): """Connect to the specified instance via ssh. Finds instances that match the user specified args that are also in the 'running' state. The target instance is determined, the required connection information is retreived (IP, key and ssh user-name), then an 'ssh' connection is made to the instance. Args: options (object): contains args and data from parser """ import os import subprocess from os.path import expanduser options.inst_state = "running" (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) home_dir = expanduser("~") if options.user is None: tar_aminame = awsc.get_one_aminame(i_info[tar_idx]['ami']) options.user = cmd_ssh_user(tar_aminame, i_info[tar_idx]['tag']['Name']) else: debg.dprint("LoginUser set by user: ", options.user) os_spec = {"nt": ["powershell plink", "\\", "ppk"]} c_itm = os_spec.get(os.name, ["ssh", "/", "pem"]) cmd_ssh_run = c_itm[0] if not options.nopem: cmd_ssh_run += (" -i {0}{1}.aws{1}{2}.{3}". format(home_dir, c_itm[1], i_info[tar_idx]['ssh_key'], c_itm[2])) else: debg.dprint("Connect string: ", "ssh {}@{}". format(options.user, i_info[tar_idx]['pub_dns_name'])) cmd_ssh_run += " {0}@{1}".format(options.user, i_info[tar_idx]['pub_dns_name']) print(cmd_ssh_run) subprocess.call(cmd_ssh_run, shell=True) def cmd_ssh_user(tar_aminame, inst_name): """Calculate instance login-username based on image-name. Args: tar_aminame (str): name of the image instance created with. inst_name (str): name of the instance. Returns: username (str): name for ssh based on AMI-name. """ if tar_aminame == "Unknown": tar_aminame = inst_name # first 5 chars of AMI-name can be anywhere in AMI-Name userlu = {"ubunt": "ubuntu", "debia": "admin", "fedor": "root", "cento": "centos", "openb": "root"} usertemp = ['name'] + [value for key, value in list(userlu.items()) if key in tar_aminame.lower()] usertemp = dict(zip(usertemp[::2], usertemp[1::2])) username = usertemp.get('name', 'ec2-user') debg.dprint("loginuser Calculated: ", username) return username def gather_data(options): """Get Data specific for command selected. Create ec2 specific query and output title based on options specified, retrieves the raw response data from aws, then processes it into the i_info dict, which is used throughout this module. Args: options (object): contains args and data from parser, that has been adjusted by the command specific functions as appropriate. Returns: i_info (dict): information on instances and details. param_str (str): the title to display before the list. """ (qry_string, param_str) = qry_create(options) qry_results = awsc.get_inst_info(qry_string) i_info = process_results(qry_results) return (i_info, param_str) def process_results(qry_results): """Generate dictionary of results from query. Decodes the large dict recturned from the AWS query. Args: qry_results (dict): results from awsc.get_inst_info Returns: i_info (dict): information on instances and details. """ i_info = {} for i, j in enumerate(qry_results['Reservations']): i_info[i] = {'id': j['Instances'][0]['InstanceId']} i_info[i]['state'] = j['Instances'][0]['State']['Name'] i_info[i]['ami'] = j['Instances'][0]['ImageId'] i_info[i]['ssh_key'] = j['Instances'][0]['KeyName'] i_info[i]['pub_dns_name'] = j['Instances'][0]['PublicDnsName'] try: i_info[i]['tag'] = process_tags(j['Instances'][0]['Tags']) except KeyError: i_info[i]['tag'] = {"Name": ""} debg.dprint("numInstances: ", len(i_info)) debg.dprintx("Details except AMI-name") debg.dprintx(i_info, True) return i_info def process_tags(inst_tags): """Create dict of instance tags as only name:value pairs.""" tag_dict = {} for k in range(len(inst_tags)): tag_dict[inst_tags[k]['Key']] = inst_tags[k]['Value'] return tag_dict def qry_create(options): """Create query from the args specified and command chosen. Creates a query string that incorporates the args in the options object, and creates the title for the 'list' function. Args: options (object): contains args and data from parser Returns: qry_string (str): the query to be used against the aws ec2 client. param_str (str): the title to display before the list. """ qry_string = filt_end = param_str = "" filt_st = "Filters=[" param_str_default = "All" if options.id: qry_string += "InstanceIds=['%s']" % (options.id) param_str += "id: '%s'" % (options.id) param_str_default = "" if options.instname: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str) filt_end = "]" param_str_default = "" qry_string += filt_st + ("{'Name': 'tag:Name', 'Values': ['%s']}" % (options.instname)) param_str += "name: '%s'" % (options.instname) if options.inst_state: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str, bool(options.instname), filt_st) qry_string += ("{'Name': 'instance-state-name'," "'Values': ['%s']}" % (options.inst_state)) param_str += "state: '%s'" % (options.inst_state) filt_end = "]" param_str_default = "" qry_string += filt_end param_str += param_str_default debg.dprintx("\nQuery String") debg.dprintx(qry_string, True) debg.dprint("param_str: ", param_str) return(qry_string, param_str) def qry_helper(flag_id, qry_string, param_str, flag_filt=False, filt_st=""): """Dynamically add syntaxtical elements to query. This functions adds syntactical elements to the query string, and report title, based on the types and number of items added thus far. Args: flag_filt (bool): at least one filter item specified. qry_string (str): portion of the query constructed thus far. param_str (str): the title to display before the list. flag_id (bool): optional - instance-id was specified. filt_st (str): optional - syntax to add on end if filter specified. Returns: qry_string (str): the portion of the query that was passed in with the appropriate syntactical elements added. param_str (str): the title to display before the list. """ if flag_id or flag_filt: qry_string += ", " param_str += ", " if not flag_filt: qry_string += filt_st return (qry_string, param_str) def list_instances(i_info, param_str, numbered=False): """Display a list of all instances and their details. Iterates through all the instances in the dict, and displays information for each instance. Args: i_info (dict): information on instances and details. param_str (str): the title to display before the list. numbered (bool): optional - indicates wheter the list should be displayed with numbers before each instance. This is used when called from user_picklist. """ print(param_str) for i in i_info: if numbered: print("Instance {}#{}{}".format(C_WARN, i + 1, C_NORM)) print(" {6}Name: {1}{3:<22}{1}ID: {0}{4:<20}{1:<18}Status: {2}{5}{1}". format(C_TI, C_NORM, C_STAT[i_info[i]['state']], i_info[i]['tag']['Name'], i_info[i]['id'], i_info[i]['state'], C_HEAD2)) print(" AMI: {0}{2:<23}{1}AMI Name: {0}{3:.41}{1}". format(C_TI, C_NORM, i_info[i]['ami'], i_info[i]['aminame'])) list_tags(i_info[i]['tag']) debg.dprintx("All Data") debg.dprintx(i_info, True) def list_tags(tags): """Print tags in dict so they allign with listing above.""" tags_sorted = sorted(list(tags.items()), key=operator.itemgetter(0)) tag_sec_spacer = "" c = 1 ignored_keys = ["Name", "aws:ec2spot:fleet-request-id"] pad_col = {1: 38, 2: 49} for k, v in tags_sorted: # if k != "Name": if k not in ignored_keys: if c < 3: padamt = pad_col[c] sys.stdout.write(" {2}{0}:{3} {1}". format(k, v, C_HEAD2, C_NORM).ljust(padamt)) c += 1 tag_sec_spacer = "\n" else: sys.stdout.write("{2}{0}:{3} {1}\n".format(k, v, C_HEAD2, C_NORM)) c = 1 tag_sec_spacer = "" print(tag_sec_spacer) def determine_inst(i_info, param_str, command): """Determine the instance-id of the target instance. Inspect the number of instance-ids collected and take the appropriate action: exit if no ids, return if single id, and call user_picklist function if multiple ids exist. Args: i_info (dict): information and details for instances. param_str (str): the title to display in the listing. command (str): command specified on the command line. Returns: tar_inst (str): the AWS instance-id of the target. Raises: SystemExit: if no instances are match parameters specified. """ qty_instances = len(i_info) if not qty_instances: print("No instances found with parameters: {}".format(param_str)) sys.exit(1) if qty_instances > 1: print("{} instances match these parameters:".format(qty_instances)) tar_idx = user_picklist(i_info, command) else: tar_idx = 0 tar_inst = i_info[tar_idx]['id'] print("{0}{3}ing{1} instance id {2}{4}{1}". format(C_STAT[command], C_NORM, C_TI, command, tar_inst)) return (tar_inst, tar_idx) def user_picklist(i_info, command): """Display list of instances matching args and ask user to select target. Instance list displayed and user asked to enter the number corresponding to the desired target instance, or '0' to abort. Args: i_info (dict): information on instances and details. command (str): command specified on the command line. Returns: tar_idx (int): the dictionary index number of the targeted instance. """ valid_entry = False awsc.get_all_aminames(i_info) list_instances(i_info, "", True) msg_txt = ("Enter {0}#{1} of instance to {3} ({0}1{1}-{0}{4}{1})" " [{2}0 aborts{1}]: ".format(C_WARN, C_NORM, C_TI, command, len(i_info))) while not valid_entry: entry_raw = obtain_input(msg_txt) try: entry_int = int(entry_raw) except ValueError: entry_int = 999 (tar_idx, valid_entry) = user_entry(entry_int, len(i_info), command) return tar_idx def obtain_input(message_text): # pragma: no cover """Perform input command as a function so it can be mocked.""" return (input(message_text)) def user_entry(entry_int, num_inst, command): """Validate user entry and returns index and validity flag. Processes the user entry and take the appropriate action: abort if '0' entered, set validity flag and index is valid entry, else return invalid index and the still unset validity flag. Args: entry_int (int): a number entered or 999 if a non-int was entered. num_inst (int): the largest valid number that can be entered. command (str): program command to display in prompt. Returns: entry_idx(int): the dictionary index number of the targeted instance valid_entry (bool): specifies if entry_idx is valid. Raises: SystemExit: if the user enters 0 when they are choosing from the list it triggers the "abort" option offered to the user. """ valid_entry = False if not entry_int: print("{}aborting{} - {} instance\n". format(C_ERR, C_NORM, command)) sys.exit() elif entry_int >= 1 and entry_int <= num_inst: entry_idx = entry_int - 1 valid_entry = True else: print("{}Invalid entry:{} enter a number between 1" " and {}.".format(C_ERR, C_NORM, num_inst)) entry_idx = entry_int return (entry_idx, valid_entry) if __name__ == '__main__': main()
robertpeteuil/aws-shortcuts
awss/core.py
cmd_list
python
def cmd_list(options): (i_info, param_str) = gather_data(options) if i_info: awsc.get_all_aminames(i_info) param_str = "Instance List - " + param_str + "\n" list_instances(i_info, param_str) else: print("No instances found with parameters: {}".format(param_str))
Gather data for instances matching args and call display func. Args: options (object): contains args and data from parser.
train
https://github.com/robertpeteuil/aws-shortcuts/blob/cf453ca996978a4d88015d1cf6125bce8ca4873b/awss/core.py#L160-L173
[ "def get_all_aminames(i_info):\n \"\"\"Get Image_Name for each instance in i_info.\n\n Args:\n i_info (dict): information on instances and details.\n Returns:\n i_info (dict): i_info is returned with the aminame\n added for each instance.\n\n \"\"\"\n for i in i_in...
"""Control and connect to AWS EC2 instances from command line. The AWS Shortcuts (awss) library is a CLI utility allowing listing, starting, stopping and connecting to AWS EC2 instances by Name or ID. License: AWSS - Control and connect to AWS EC2 instances from command line Copyright (C) 2017-2018 Robert Peteuil This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. URL: https://github.com/robertpeteuil/aws-shortcuts Author: Robert Peteuil """ from __future__ import print_function from builtins import input from builtins import range import argparse import sys import operator import awss.awsc as awsc import awss.debg as debg from awss.colors import C_NORM, C_HEAD2, C_TI, C_WARN, C_ERR, C_STAT __version__ = '0.9.13' def main(): """Collect user args and call command funct. Collect command line args and setup environment then call function for command specified in args. """ parser = parser_setup() options = parser.parse_args() debug = bool(options.debug > 0) debugall = bool(options.debug > 1) awsc.init() debg.init(debug, debugall) print(C_NORM) options.func(options) sys.exit() def parser_setup(): """Create ArgumentParser object to parse command line arguments. Returns: parser (object): containing ArgumentParser data and methods. Raises: SystemExit: if the user enters invalid args. """ parser = argparse.ArgumentParser(description="Control AWS instances from" " the command line with: list, start," " stop or ssh.", prog='awss', usage="\tawss {command} [ 'NAME' ] " "[ '-i ID' ] [ OPTIONS ]\n\t{command} =" " list | start | stop | ssh") parser.add_argument('-v', '--version', action="version", version="awss {0}".format(__version__)) subparsers = parser.add_subparsers(title="For additional help on" " command parameters", dest='command', description="type 'awss {command} -h'," " where {command} is: list, start," " stop or ssh") # Parser for LIST command parser_list = subparsers.add_parser('list', description="List AWS " "instances from the command line. " "'awss list' will list instances" " specified using combinations of " "NAME, instance-id and current-state." " If no specifications are given, " " all instances will be listed." " ex: 'awss list TEST " "-r' will list instances named 'TEST'" " that are currently running.", usage="\tawss list [none] [NAME] [-i " "ID] [-r] [-s] [OPTIONS]") parser_list.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_list.add_argument('-i', '--id', action="store", help='specify instance by id') parser_list.add_argument('-s', '--stopped', action='store_const', dest="inst_state", const="stopped", help='list stopped instances') parser_list.add_argument('-r', '--running', action='store_const', dest="inst_state", const="running", help='list running instances') parser_list.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_list.set_defaults(func=cmd_list) # Parser for START command parser_start = subparsers.add_parser('start', usage="\tawss start [NAME]" " [-i ID] [-h]", description="Start an AWS instance" " from the command line.") parser_start.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_start.add_argument('-i', '--id', action="store", help='specify instance-id') parser_start.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_start.set_defaults(func=cmd_startstop) # Parser for STOP command parser_stop = subparsers.add_parser('stop', usage="\tawss stop [NAME]" " [-i ID] [-h]", description="Stop an AWS instance" " from the command line.") parser_stop.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_stop.add_argument('-i', '--id', action="store", help='specify instance-id') parser_stop.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_stop.set_defaults(func=cmd_startstop) # Parser for SSH command parser_ssh = subparsers.add_parser('ssh', usage="\tawss ssh [NAME]" " [-i ID] [-u USER] [-p] [-h]", description="Connect to an AWS i" "nstance via ssh.") parser_ssh.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_ssh.add_argument('-i', '--id', action="store", help='specify instance-id') parser_ssh.add_argument('-u', '--user', action="store", help='override default username for ssh') parser_ssh.add_argument('-p', '--nopem', action="store_true", default=False, help='connect without PEM key') parser_ssh.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_ssh.set_defaults(func=cmd_ssh) return parser def cmd_startstop(options): """Start or Stop the specified instance. Finds instances that match args and instance-state expected by the command. Then, the target instance is determined, the action is performed on the instance, and the eturn information is displayed. Args: options (object): contains args and data from parser. """ statelu = {"start": "stopped", "stop": "running"} options.inst_state = statelu[options.command] debg.dprint("toggle set state: ", options.inst_state) (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) response = awsc.startstop(tar_inst, options.command) responselu = {"start": "StartingInstances", "stop": "StoppingInstances"} filt = responselu[options.command] resp = {} state_term = ('CurrentState', 'PreviousState') for i, j in enumerate(state_term): resp[i] = response["{0}".format(filt)][0]["{0}".format(j)]['Name'] print("Current State: {}{}{} - Previous State: {}{}{}\n". format(C_STAT[resp[0]], resp[0], C_NORM, C_STAT[resp[1]], resp[1], C_NORM)) def cmd_ssh(options): """Connect to the specified instance via ssh. Finds instances that match the user specified args that are also in the 'running' state. The target instance is determined, the required connection information is retreived (IP, key and ssh user-name), then an 'ssh' connection is made to the instance. Args: options (object): contains args and data from parser """ import os import subprocess from os.path import expanduser options.inst_state = "running" (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) home_dir = expanduser("~") if options.user is None: tar_aminame = awsc.get_one_aminame(i_info[tar_idx]['ami']) options.user = cmd_ssh_user(tar_aminame, i_info[tar_idx]['tag']['Name']) else: debg.dprint("LoginUser set by user: ", options.user) os_spec = {"nt": ["powershell plink", "\\", "ppk"]} c_itm = os_spec.get(os.name, ["ssh", "/", "pem"]) cmd_ssh_run = c_itm[0] if not options.nopem: cmd_ssh_run += (" -i {0}{1}.aws{1}{2}.{3}". format(home_dir, c_itm[1], i_info[tar_idx]['ssh_key'], c_itm[2])) else: debg.dprint("Connect string: ", "ssh {}@{}". format(options.user, i_info[tar_idx]['pub_dns_name'])) cmd_ssh_run += " {0}@{1}".format(options.user, i_info[tar_idx]['pub_dns_name']) print(cmd_ssh_run) subprocess.call(cmd_ssh_run, shell=True) def cmd_ssh_user(tar_aminame, inst_name): """Calculate instance login-username based on image-name. Args: tar_aminame (str): name of the image instance created with. inst_name (str): name of the instance. Returns: username (str): name for ssh based on AMI-name. """ if tar_aminame == "Unknown": tar_aminame = inst_name # first 5 chars of AMI-name can be anywhere in AMI-Name userlu = {"ubunt": "ubuntu", "debia": "admin", "fedor": "root", "cento": "centos", "openb": "root"} usertemp = ['name'] + [value for key, value in list(userlu.items()) if key in tar_aminame.lower()] usertemp = dict(zip(usertemp[::2], usertemp[1::2])) username = usertemp.get('name', 'ec2-user') debg.dprint("loginuser Calculated: ", username) return username def gather_data(options): """Get Data specific for command selected. Create ec2 specific query and output title based on options specified, retrieves the raw response data from aws, then processes it into the i_info dict, which is used throughout this module. Args: options (object): contains args and data from parser, that has been adjusted by the command specific functions as appropriate. Returns: i_info (dict): information on instances and details. param_str (str): the title to display before the list. """ (qry_string, param_str) = qry_create(options) qry_results = awsc.get_inst_info(qry_string) i_info = process_results(qry_results) return (i_info, param_str) def process_results(qry_results): """Generate dictionary of results from query. Decodes the large dict recturned from the AWS query. Args: qry_results (dict): results from awsc.get_inst_info Returns: i_info (dict): information on instances and details. """ i_info = {} for i, j in enumerate(qry_results['Reservations']): i_info[i] = {'id': j['Instances'][0]['InstanceId']} i_info[i]['state'] = j['Instances'][0]['State']['Name'] i_info[i]['ami'] = j['Instances'][0]['ImageId'] i_info[i]['ssh_key'] = j['Instances'][0]['KeyName'] i_info[i]['pub_dns_name'] = j['Instances'][0]['PublicDnsName'] try: i_info[i]['tag'] = process_tags(j['Instances'][0]['Tags']) except KeyError: i_info[i]['tag'] = {"Name": ""} debg.dprint("numInstances: ", len(i_info)) debg.dprintx("Details except AMI-name") debg.dprintx(i_info, True) return i_info def process_tags(inst_tags): """Create dict of instance tags as only name:value pairs.""" tag_dict = {} for k in range(len(inst_tags)): tag_dict[inst_tags[k]['Key']] = inst_tags[k]['Value'] return tag_dict def qry_create(options): """Create query from the args specified and command chosen. Creates a query string that incorporates the args in the options object, and creates the title for the 'list' function. Args: options (object): contains args and data from parser Returns: qry_string (str): the query to be used against the aws ec2 client. param_str (str): the title to display before the list. """ qry_string = filt_end = param_str = "" filt_st = "Filters=[" param_str_default = "All" if options.id: qry_string += "InstanceIds=['%s']" % (options.id) param_str += "id: '%s'" % (options.id) param_str_default = "" if options.instname: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str) filt_end = "]" param_str_default = "" qry_string += filt_st + ("{'Name': 'tag:Name', 'Values': ['%s']}" % (options.instname)) param_str += "name: '%s'" % (options.instname) if options.inst_state: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str, bool(options.instname), filt_st) qry_string += ("{'Name': 'instance-state-name'," "'Values': ['%s']}" % (options.inst_state)) param_str += "state: '%s'" % (options.inst_state) filt_end = "]" param_str_default = "" qry_string += filt_end param_str += param_str_default debg.dprintx("\nQuery String") debg.dprintx(qry_string, True) debg.dprint("param_str: ", param_str) return(qry_string, param_str) def qry_helper(flag_id, qry_string, param_str, flag_filt=False, filt_st=""): """Dynamically add syntaxtical elements to query. This functions adds syntactical elements to the query string, and report title, based on the types and number of items added thus far. Args: flag_filt (bool): at least one filter item specified. qry_string (str): portion of the query constructed thus far. param_str (str): the title to display before the list. flag_id (bool): optional - instance-id was specified. filt_st (str): optional - syntax to add on end if filter specified. Returns: qry_string (str): the portion of the query that was passed in with the appropriate syntactical elements added. param_str (str): the title to display before the list. """ if flag_id or flag_filt: qry_string += ", " param_str += ", " if not flag_filt: qry_string += filt_st return (qry_string, param_str) def list_instances(i_info, param_str, numbered=False): """Display a list of all instances and their details. Iterates through all the instances in the dict, and displays information for each instance. Args: i_info (dict): information on instances and details. param_str (str): the title to display before the list. numbered (bool): optional - indicates wheter the list should be displayed with numbers before each instance. This is used when called from user_picklist. """ print(param_str) for i in i_info: if numbered: print("Instance {}#{}{}".format(C_WARN, i + 1, C_NORM)) print(" {6}Name: {1}{3:<22}{1}ID: {0}{4:<20}{1:<18}Status: {2}{5}{1}". format(C_TI, C_NORM, C_STAT[i_info[i]['state']], i_info[i]['tag']['Name'], i_info[i]['id'], i_info[i]['state'], C_HEAD2)) print(" AMI: {0}{2:<23}{1}AMI Name: {0}{3:.41}{1}". format(C_TI, C_NORM, i_info[i]['ami'], i_info[i]['aminame'])) list_tags(i_info[i]['tag']) debg.dprintx("All Data") debg.dprintx(i_info, True) def list_tags(tags): """Print tags in dict so they allign with listing above.""" tags_sorted = sorted(list(tags.items()), key=operator.itemgetter(0)) tag_sec_spacer = "" c = 1 ignored_keys = ["Name", "aws:ec2spot:fleet-request-id"] pad_col = {1: 38, 2: 49} for k, v in tags_sorted: # if k != "Name": if k not in ignored_keys: if c < 3: padamt = pad_col[c] sys.stdout.write(" {2}{0}:{3} {1}". format(k, v, C_HEAD2, C_NORM).ljust(padamt)) c += 1 tag_sec_spacer = "\n" else: sys.stdout.write("{2}{0}:{3} {1}\n".format(k, v, C_HEAD2, C_NORM)) c = 1 tag_sec_spacer = "" print(tag_sec_spacer) def determine_inst(i_info, param_str, command): """Determine the instance-id of the target instance. Inspect the number of instance-ids collected and take the appropriate action: exit if no ids, return if single id, and call user_picklist function if multiple ids exist. Args: i_info (dict): information and details for instances. param_str (str): the title to display in the listing. command (str): command specified on the command line. Returns: tar_inst (str): the AWS instance-id of the target. Raises: SystemExit: if no instances are match parameters specified. """ qty_instances = len(i_info) if not qty_instances: print("No instances found with parameters: {}".format(param_str)) sys.exit(1) if qty_instances > 1: print("{} instances match these parameters:".format(qty_instances)) tar_idx = user_picklist(i_info, command) else: tar_idx = 0 tar_inst = i_info[tar_idx]['id'] print("{0}{3}ing{1} instance id {2}{4}{1}". format(C_STAT[command], C_NORM, C_TI, command, tar_inst)) return (tar_inst, tar_idx) def user_picklist(i_info, command): """Display list of instances matching args and ask user to select target. Instance list displayed and user asked to enter the number corresponding to the desired target instance, or '0' to abort. Args: i_info (dict): information on instances and details. command (str): command specified on the command line. Returns: tar_idx (int): the dictionary index number of the targeted instance. """ valid_entry = False awsc.get_all_aminames(i_info) list_instances(i_info, "", True) msg_txt = ("Enter {0}#{1} of instance to {3} ({0}1{1}-{0}{4}{1})" " [{2}0 aborts{1}]: ".format(C_WARN, C_NORM, C_TI, command, len(i_info))) while not valid_entry: entry_raw = obtain_input(msg_txt) try: entry_int = int(entry_raw) except ValueError: entry_int = 999 (tar_idx, valid_entry) = user_entry(entry_int, len(i_info), command) return tar_idx def obtain_input(message_text): # pragma: no cover """Perform input command as a function so it can be mocked.""" return (input(message_text)) def user_entry(entry_int, num_inst, command): """Validate user entry and returns index and validity flag. Processes the user entry and take the appropriate action: abort if '0' entered, set validity flag and index is valid entry, else return invalid index and the still unset validity flag. Args: entry_int (int): a number entered or 999 if a non-int was entered. num_inst (int): the largest valid number that can be entered. command (str): program command to display in prompt. Returns: entry_idx(int): the dictionary index number of the targeted instance valid_entry (bool): specifies if entry_idx is valid. Raises: SystemExit: if the user enters 0 when they are choosing from the list it triggers the "abort" option offered to the user. """ valid_entry = False if not entry_int: print("{}aborting{} - {} instance\n". format(C_ERR, C_NORM, command)) sys.exit() elif entry_int >= 1 and entry_int <= num_inst: entry_idx = entry_int - 1 valid_entry = True else: print("{}Invalid entry:{} enter a number between 1" " and {}.".format(C_ERR, C_NORM, num_inst)) entry_idx = entry_int return (entry_idx, valid_entry) if __name__ == '__main__': main()
robertpeteuil/aws-shortcuts
awss/core.py
cmd_startstop
python
def cmd_startstop(options): statelu = {"start": "stopped", "stop": "running"} options.inst_state = statelu[options.command] debg.dprint("toggle set state: ", options.inst_state) (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) response = awsc.startstop(tar_inst, options.command) responselu = {"start": "StartingInstances", "stop": "StoppingInstances"} filt = responselu[options.command] resp = {} state_term = ('CurrentState', 'PreviousState') for i, j in enumerate(state_term): resp[i] = response["{0}".format(filt)][0]["{0}".format(j)]['Name'] print("Current State: {}{}{} - Previous State: {}{}{}\n". format(C_STAT[resp[0]], resp[0], C_NORM, C_STAT[resp[1]], resp[1], C_NORM))
Start or Stop the specified instance. Finds instances that match args and instance-state expected by the command. Then, the target instance is determined, the action is performed on the instance, and the eturn information is displayed. Args: options (object): contains args and data from parser.
train
https://github.com/robertpeteuil/aws-shortcuts/blob/cf453ca996978a4d88015d1cf6125bce8ca4873b/awss/core.py#L176-L201
[ "def startstop(inst_id, cmdtodo):\n \"\"\"Start or Stop the Specified Instance.\n\n Args:\n inst_id (str): instance-id to perform command against\n cmdtodo (str): command to perform (start or stop)\n Returns:\n response (dict): reponse returned from AWS after\n ...
"""Control and connect to AWS EC2 instances from command line. The AWS Shortcuts (awss) library is a CLI utility allowing listing, starting, stopping and connecting to AWS EC2 instances by Name or ID. License: AWSS - Control and connect to AWS EC2 instances from command line Copyright (C) 2017-2018 Robert Peteuil This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. URL: https://github.com/robertpeteuil/aws-shortcuts Author: Robert Peteuil """ from __future__ import print_function from builtins import input from builtins import range import argparse import sys import operator import awss.awsc as awsc import awss.debg as debg from awss.colors import C_NORM, C_HEAD2, C_TI, C_WARN, C_ERR, C_STAT __version__ = '0.9.13' def main(): """Collect user args and call command funct. Collect command line args and setup environment then call function for command specified in args. """ parser = parser_setup() options = parser.parse_args() debug = bool(options.debug > 0) debugall = bool(options.debug > 1) awsc.init() debg.init(debug, debugall) print(C_NORM) options.func(options) sys.exit() def parser_setup(): """Create ArgumentParser object to parse command line arguments. Returns: parser (object): containing ArgumentParser data and methods. Raises: SystemExit: if the user enters invalid args. """ parser = argparse.ArgumentParser(description="Control AWS instances from" " the command line with: list, start," " stop or ssh.", prog='awss', usage="\tawss {command} [ 'NAME' ] " "[ '-i ID' ] [ OPTIONS ]\n\t{command} =" " list | start | stop | ssh") parser.add_argument('-v', '--version', action="version", version="awss {0}".format(__version__)) subparsers = parser.add_subparsers(title="For additional help on" " command parameters", dest='command', description="type 'awss {command} -h'," " where {command} is: list, start," " stop or ssh") # Parser for LIST command parser_list = subparsers.add_parser('list', description="List AWS " "instances from the command line. " "'awss list' will list instances" " specified using combinations of " "NAME, instance-id and current-state." " If no specifications are given, " " all instances will be listed." " ex: 'awss list TEST " "-r' will list instances named 'TEST'" " that are currently running.", usage="\tawss list [none] [NAME] [-i " "ID] [-r] [-s] [OPTIONS]") parser_list.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_list.add_argument('-i', '--id', action="store", help='specify instance by id') parser_list.add_argument('-s', '--stopped', action='store_const', dest="inst_state", const="stopped", help='list stopped instances') parser_list.add_argument('-r', '--running', action='store_const', dest="inst_state", const="running", help='list running instances') parser_list.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_list.set_defaults(func=cmd_list) # Parser for START command parser_start = subparsers.add_parser('start', usage="\tawss start [NAME]" " [-i ID] [-h]", description="Start an AWS instance" " from the command line.") parser_start.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_start.add_argument('-i', '--id', action="store", help='specify instance-id') parser_start.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_start.set_defaults(func=cmd_startstop) # Parser for STOP command parser_stop = subparsers.add_parser('stop', usage="\tawss stop [NAME]" " [-i ID] [-h]", description="Stop an AWS instance" " from the command line.") parser_stop.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_stop.add_argument('-i', '--id', action="store", help='specify instance-id') parser_stop.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_stop.set_defaults(func=cmd_startstop) # Parser for SSH command parser_ssh = subparsers.add_parser('ssh', usage="\tawss ssh [NAME]" " [-i ID] [-u USER] [-p] [-h]", description="Connect to an AWS i" "nstance via ssh.") parser_ssh.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_ssh.add_argument('-i', '--id', action="store", help='specify instance-id') parser_ssh.add_argument('-u', '--user', action="store", help='override default username for ssh') parser_ssh.add_argument('-p', '--nopem', action="store_true", default=False, help='connect without PEM key') parser_ssh.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_ssh.set_defaults(func=cmd_ssh) return parser def cmd_list(options): """Gather data for instances matching args and call display func. Args: options (object): contains args and data from parser. """ (i_info, param_str) = gather_data(options) if i_info: awsc.get_all_aminames(i_info) param_str = "Instance List - " + param_str + "\n" list_instances(i_info, param_str) else: print("No instances found with parameters: {}".format(param_str)) def cmd_ssh(options): """Connect to the specified instance via ssh. Finds instances that match the user specified args that are also in the 'running' state. The target instance is determined, the required connection information is retreived (IP, key and ssh user-name), then an 'ssh' connection is made to the instance. Args: options (object): contains args and data from parser """ import os import subprocess from os.path import expanduser options.inst_state = "running" (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) home_dir = expanduser("~") if options.user is None: tar_aminame = awsc.get_one_aminame(i_info[tar_idx]['ami']) options.user = cmd_ssh_user(tar_aminame, i_info[tar_idx]['tag']['Name']) else: debg.dprint("LoginUser set by user: ", options.user) os_spec = {"nt": ["powershell plink", "\\", "ppk"]} c_itm = os_spec.get(os.name, ["ssh", "/", "pem"]) cmd_ssh_run = c_itm[0] if not options.nopem: cmd_ssh_run += (" -i {0}{1}.aws{1}{2}.{3}". format(home_dir, c_itm[1], i_info[tar_idx]['ssh_key'], c_itm[2])) else: debg.dprint("Connect string: ", "ssh {}@{}". format(options.user, i_info[tar_idx]['pub_dns_name'])) cmd_ssh_run += " {0}@{1}".format(options.user, i_info[tar_idx]['pub_dns_name']) print(cmd_ssh_run) subprocess.call(cmd_ssh_run, shell=True) def cmd_ssh_user(tar_aminame, inst_name): """Calculate instance login-username based on image-name. Args: tar_aminame (str): name of the image instance created with. inst_name (str): name of the instance. Returns: username (str): name for ssh based on AMI-name. """ if tar_aminame == "Unknown": tar_aminame = inst_name # first 5 chars of AMI-name can be anywhere in AMI-Name userlu = {"ubunt": "ubuntu", "debia": "admin", "fedor": "root", "cento": "centos", "openb": "root"} usertemp = ['name'] + [value for key, value in list(userlu.items()) if key in tar_aminame.lower()] usertemp = dict(zip(usertemp[::2], usertemp[1::2])) username = usertemp.get('name', 'ec2-user') debg.dprint("loginuser Calculated: ", username) return username def gather_data(options): """Get Data specific for command selected. Create ec2 specific query and output title based on options specified, retrieves the raw response data from aws, then processes it into the i_info dict, which is used throughout this module. Args: options (object): contains args and data from parser, that has been adjusted by the command specific functions as appropriate. Returns: i_info (dict): information on instances and details. param_str (str): the title to display before the list. """ (qry_string, param_str) = qry_create(options) qry_results = awsc.get_inst_info(qry_string) i_info = process_results(qry_results) return (i_info, param_str) def process_results(qry_results): """Generate dictionary of results from query. Decodes the large dict recturned from the AWS query. Args: qry_results (dict): results from awsc.get_inst_info Returns: i_info (dict): information on instances and details. """ i_info = {} for i, j in enumerate(qry_results['Reservations']): i_info[i] = {'id': j['Instances'][0]['InstanceId']} i_info[i]['state'] = j['Instances'][0]['State']['Name'] i_info[i]['ami'] = j['Instances'][0]['ImageId'] i_info[i]['ssh_key'] = j['Instances'][0]['KeyName'] i_info[i]['pub_dns_name'] = j['Instances'][0]['PublicDnsName'] try: i_info[i]['tag'] = process_tags(j['Instances'][0]['Tags']) except KeyError: i_info[i]['tag'] = {"Name": ""} debg.dprint("numInstances: ", len(i_info)) debg.dprintx("Details except AMI-name") debg.dprintx(i_info, True) return i_info def process_tags(inst_tags): """Create dict of instance tags as only name:value pairs.""" tag_dict = {} for k in range(len(inst_tags)): tag_dict[inst_tags[k]['Key']] = inst_tags[k]['Value'] return tag_dict def qry_create(options): """Create query from the args specified and command chosen. Creates a query string that incorporates the args in the options object, and creates the title for the 'list' function. Args: options (object): contains args and data from parser Returns: qry_string (str): the query to be used against the aws ec2 client. param_str (str): the title to display before the list. """ qry_string = filt_end = param_str = "" filt_st = "Filters=[" param_str_default = "All" if options.id: qry_string += "InstanceIds=['%s']" % (options.id) param_str += "id: '%s'" % (options.id) param_str_default = "" if options.instname: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str) filt_end = "]" param_str_default = "" qry_string += filt_st + ("{'Name': 'tag:Name', 'Values': ['%s']}" % (options.instname)) param_str += "name: '%s'" % (options.instname) if options.inst_state: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str, bool(options.instname), filt_st) qry_string += ("{'Name': 'instance-state-name'," "'Values': ['%s']}" % (options.inst_state)) param_str += "state: '%s'" % (options.inst_state) filt_end = "]" param_str_default = "" qry_string += filt_end param_str += param_str_default debg.dprintx("\nQuery String") debg.dprintx(qry_string, True) debg.dprint("param_str: ", param_str) return(qry_string, param_str) def qry_helper(flag_id, qry_string, param_str, flag_filt=False, filt_st=""): """Dynamically add syntaxtical elements to query. This functions adds syntactical elements to the query string, and report title, based on the types and number of items added thus far. Args: flag_filt (bool): at least one filter item specified. qry_string (str): portion of the query constructed thus far. param_str (str): the title to display before the list. flag_id (bool): optional - instance-id was specified. filt_st (str): optional - syntax to add on end if filter specified. Returns: qry_string (str): the portion of the query that was passed in with the appropriate syntactical elements added. param_str (str): the title to display before the list. """ if flag_id or flag_filt: qry_string += ", " param_str += ", " if not flag_filt: qry_string += filt_st return (qry_string, param_str) def list_instances(i_info, param_str, numbered=False): """Display a list of all instances and their details. Iterates through all the instances in the dict, and displays information for each instance. Args: i_info (dict): information on instances and details. param_str (str): the title to display before the list. numbered (bool): optional - indicates wheter the list should be displayed with numbers before each instance. This is used when called from user_picklist. """ print(param_str) for i in i_info: if numbered: print("Instance {}#{}{}".format(C_WARN, i + 1, C_NORM)) print(" {6}Name: {1}{3:<22}{1}ID: {0}{4:<20}{1:<18}Status: {2}{5}{1}". format(C_TI, C_NORM, C_STAT[i_info[i]['state']], i_info[i]['tag']['Name'], i_info[i]['id'], i_info[i]['state'], C_HEAD2)) print(" AMI: {0}{2:<23}{1}AMI Name: {0}{3:.41}{1}". format(C_TI, C_NORM, i_info[i]['ami'], i_info[i]['aminame'])) list_tags(i_info[i]['tag']) debg.dprintx("All Data") debg.dprintx(i_info, True) def list_tags(tags): """Print tags in dict so they allign with listing above.""" tags_sorted = sorted(list(tags.items()), key=operator.itemgetter(0)) tag_sec_spacer = "" c = 1 ignored_keys = ["Name", "aws:ec2spot:fleet-request-id"] pad_col = {1: 38, 2: 49} for k, v in tags_sorted: # if k != "Name": if k not in ignored_keys: if c < 3: padamt = pad_col[c] sys.stdout.write(" {2}{0}:{3} {1}". format(k, v, C_HEAD2, C_NORM).ljust(padamt)) c += 1 tag_sec_spacer = "\n" else: sys.stdout.write("{2}{0}:{3} {1}\n".format(k, v, C_HEAD2, C_NORM)) c = 1 tag_sec_spacer = "" print(tag_sec_spacer) def determine_inst(i_info, param_str, command): """Determine the instance-id of the target instance. Inspect the number of instance-ids collected and take the appropriate action: exit if no ids, return if single id, and call user_picklist function if multiple ids exist. Args: i_info (dict): information and details for instances. param_str (str): the title to display in the listing. command (str): command specified on the command line. Returns: tar_inst (str): the AWS instance-id of the target. Raises: SystemExit: if no instances are match parameters specified. """ qty_instances = len(i_info) if not qty_instances: print("No instances found with parameters: {}".format(param_str)) sys.exit(1) if qty_instances > 1: print("{} instances match these parameters:".format(qty_instances)) tar_idx = user_picklist(i_info, command) else: tar_idx = 0 tar_inst = i_info[tar_idx]['id'] print("{0}{3}ing{1} instance id {2}{4}{1}". format(C_STAT[command], C_NORM, C_TI, command, tar_inst)) return (tar_inst, tar_idx) def user_picklist(i_info, command): """Display list of instances matching args and ask user to select target. Instance list displayed and user asked to enter the number corresponding to the desired target instance, or '0' to abort. Args: i_info (dict): information on instances and details. command (str): command specified on the command line. Returns: tar_idx (int): the dictionary index number of the targeted instance. """ valid_entry = False awsc.get_all_aminames(i_info) list_instances(i_info, "", True) msg_txt = ("Enter {0}#{1} of instance to {3} ({0}1{1}-{0}{4}{1})" " [{2}0 aborts{1}]: ".format(C_WARN, C_NORM, C_TI, command, len(i_info))) while not valid_entry: entry_raw = obtain_input(msg_txt) try: entry_int = int(entry_raw) except ValueError: entry_int = 999 (tar_idx, valid_entry) = user_entry(entry_int, len(i_info), command) return tar_idx def obtain_input(message_text): # pragma: no cover """Perform input command as a function so it can be mocked.""" return (input(message_text)) def user_entry(entry_int, num_inst, command): """Validate user entry and returns index and validity flag. Processes the user entry and take the appropriate action: abort if '0' entered, set validity flag and index is valid entry, else return invalid index and the still unset validity flag. Args: entry_int (int): a number entered or 999 if a non-int was entered. num_inst (int): the largest valid number that can be entered. command (str): program command to display in prompt. Returns: entry_idx(int): the dictionary index number of the targeted instance valid_entry (bool): specifies if entry_idx is valid. Raises: SystemExit: if the user enters 0 when they are choosing from the list it triggers the "abort" option offered to the user. """ valid_entry = False if not entry_int: print("{}aborting{} - {} instance\n". format(C_ERR, C_NORM, command)) sys.exit() elif entry_int >= 1 and entry_int <= num_inst: entry_idx = entry_int - 1 valid_entry = True else: print("{}Invalid entry:{} enter a number between 1" " and {}.".format(C_ERR, C_NORM, num_inst)) entry_idx = entry_int return (entry_idx, valid_entry) if __name__ == '__main__': main()
robertpeteuil/aws-shortcuts
awss/core.py
cmd_ssh
python
def cmd_ssh(options): import os import subprocess from os.path import expanduser options.inst_state = "running" (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) home_dir = expanduser("~") if options.user is None: tar_aminame = awsc.get_one_aminame(i_info[tar_idx]['ami']) options.user = cmd_ssh_user(tar_aminame, i_info[tar_idx]['tag']['Name']) else: debg.dprint("LoginUser set by user: ", options.user) os_spec = {"nt": ["powershell plink", "\\", "ppk"]} c_itm = os_spec.get(os.name, ["ssh", "/", "pem"]) cmd_ssh_run = c_itm[0] if not options.nopem: cmd_ssh_run += (" -i {0}{1}.aws{1}{2}.{3}". format(home_dir, c_itm[1], i_info[tar_idx]['ssh_key'], c_itm[2])) else: debg.dprint("Connect string: ", "ssh {}@{}". format(options.user, i_info[tar_idx]['pub_dns_name'])) cmd_ssh_run += " {0}@{1}".format(options.user, i_info[tar_idx]['pub_dns_name']) print(cmd_ssh_run) subprocess.call(cmd_ssh_run, shell=True)
Connect to the specified instance via ssh. Finds instances that match the user specified args that are also in the 'running' state. The target instance is determined, the required connection information is retreived (IP, key and ssh user-name), then an 'ssh' connection is made to the instance. Args: options (object): contains args and data from parser
train
https://github.com/robertpeteuil/aws-shortcuts/blob/cf453ca996978a4d88015d1cf6125bce8ca4873b/awss/core.py#L204-L242
[ "def get_one_aminame(inst_img_id):\n \"\"\"Get Image_Name for the image_id specified.\n\n Args:\n inst_img_id (str): image_id to get name value from.\n Returns:\n aminame (str): name of the image.\n\n \"\"\"\n try:\n aminame = EC2R.Image(inst_img_id).name\n except AttributeErr...
"""Control and connect to AWS EC2 instances from command line. The AWS Shortcuts (awss) library is a CLI utility allowing listing, starting, stopping and connecting to AWS EC2 instances by Name or ID. License: AWSS - Control and connect to AWS EC2 instances from command line Copyright (C) 2017-2018 Robert Peteuil This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. URL: https://github.com/robertpeteuil/aws-shortcuts Author: Robert Peteuil """ from __future__ import print_function from builtins import input from builtins import range import argparse import sys import operator import awss.awsc as awsc import awss.debg as debg from awss.colors import C_NORM, C_HEAD2, C_TI, C_WARN, C_ERR, C_STAT __version__ = '0.9.13' def main(): """Collect user args and call command funct. Collect command line args and setup environment then call function for command specified in args. """ parser = parser_setup() options = parser.parse_args() debug = bool(options.debug > 0) debugall = bool(options.debug > 1) awsc.init() debg.init(debug, debugall) print(C_NORM) options.func(options) sys.exit() def parser_setup(): """Create ArgumentParser object to parse command line arguments. Returns: parser (object): containing ArgumentParser data and methods. Raises: SystemExit: if the user enters invalid args. """ parser = argparse.ArgumentParser(description="Control AWS instances from" " the command line with: list, start," " stop or ssh.", prog='awss', usage="\tawss {command} [ 'NAME' ] " "[ '-i ID' ] [ OPTIONS ]\n\t{command} =" " list | start | stop | ssh") parser.add_argument('-v', '--version', action="version", version="awss {0}".format(__version__)) subparsers = parser.add_subparsers(title="For additional help on" " command parameters", dest='command', description="type 'awss {command} -h'," " where {command} is: list, start," " stop or ssh") # Parser for LIST command parser_list = subparsers.add_parser('list', description="List AWS " "instances from the command line. " "'awss list' will list instances" " specified using combinations of " "NAME, instance-id and current-state." " If no specifications are given, " " all instances will be listed." " ex: 'awss list TEST " "-r' will list instances named 'TEST'" " that are currently running.", usage="\tawss list [none] [NAME] [-i " "ID] [-r] [-s] [OPTIONS]") parser_list.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_list.add_argument('-i', '--id', action="store", help='specify instance by id') parser_list.add_argument('-s', '--stopped', action='store_const', dest="inst_state", const="stopped", help='list stopped instances') parser_list.add_argument('-r', '--running', action='store_const', dest="inst_state", const="running", help='list running instances') parser_list.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_list.set_defaults(func=cmd_list) # Parser for START command parser_start = subparsers.add_parser('start', usage="\tawss start [NAME]" " [-i ID] [-h]", description="Start an AWS instance" " from the command line.") parser_start.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_start.add_argument('-i', '--id', action="store", help='specify instance-id') parser_start.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_start.set_defaults(func=cmd_startstop) # Parser for STOP command parser_stop = subparsers.add_parser('stop', usage="\tawss stop [NAME]" " [-i ID] [-h]", description="Stop an AWS instance" " from the command line.") parser_stop.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_stop.add_argument('-i', '--id', action="store", help='specify instance-id') parser_stop.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_stop.set_defaults(func=cmd_startstop) # Parser for SSH command parser_ssh = subparsers.add_parser('ssh', usage="\tawss ssh [NAME]" " [-i ID] [-u USER] [-p] [-h]", description="Connect to an AWS i" "nstance via ssh.") parser_ssh.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_ssh.add_argument('-i', '--id', action="store", help='specify instance-id') parser_ssh.add_argument('-u', '--user', action="store", help='override default username for ssh') parser_ssh.add_argument('-p', '--nopem', action="store_true", default=False, help='connect without PEM key') parser_ssh.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_ssh.set_defaults(func=cmd_ssh) return parser def cmd_list(options): """Gather data for instances matching args and call display func. Args: options (object): contains args and data from parser. """ (i_info, param_str) = gather_data(options) if i_info: awsc.get_all_aminames(i_info) param_str = "Instance List - " + param_str + "\n" list_instances(i_info, param_str) else: print("No instances found with parameters: {}".format(param_str)) def cmd_startstop(options): """Start or Stop the specified instance. Finds instances that match args and instance-state expected by the command. Then, the target instance is determined, the action is performed on the instance, and the eturn information is displayed. Args: options (object): contains args and data from parser. """ statelu = {"start": "stopped", "stop": "running"} options.inst_state = statelu[options.command] debg.dprint("toggle set state: ", options.inst_state) (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) response = awsc.startstop(tar_inst, options.command) responselu = {"start": "StartingInstances", "stop": "StoppingInstances"} filt = responselu[options.command] resp = {} state_term = ('CurrentState', 'PreviousState') for i, j in enumerate(state_term): resp[i] = response["{0}".format(filt)][0]["{0}".format(j)]['Name'] print("Current State: {}{}{} - Previous State: {}{}{}\n". format(C_STAT[resp[0]], resp[0], C_NORM, C_STAT[resp[1]], resp[1], C_NORM)) def cmd_ssh_user(tar_aminame, inst_name): """Calculate instance login-username based on image-name. Args: tar_aminame (str): name of the image instance created with. inst_name (str): name of the instance. Returns: username (str): name for ssh based on AMI-name. """ if tar_aminame == "Unknown": tar_aminame = inst_name # first 5 chars of AMI-name can be anywhere in AMI-Name userlu = {"ubunt": "ubuntu", "debia": "admin", "fedor": "root", "cento": "centos", "openb": "root"} usertemp = ['name'] + [value for key, value in list(userlu.items()) if key in tar_aminame.lower()] usertemp = dict(zip(usertemp[::2], usertemp[1::2])) username = usertemp.get('name', 'ec2-user') debg.dprint("loginuser Calculated: ", username) return username def gather_data(options): """Get Data specific for command selected. Create ec2 specific query and output title based on options specified, retrieves the raw response data from aws, then processes it into the i_info dict, which is used throughout this module. Args: options (object): contains args and data from parser, that has been adjusted by the command specific functions as appropriate. Returns: i_info (dict): information on instances and details. param_str (str): the title to display before the list. """ (qry_string, param_str) = qry_create(options) qry_results = awsc.get_inst_info(qry_string) i_info = process_results(qry_results) return (i_info, param_str) def process_results(qry_results): """Generate dictionary of results from query. Decodes the large dict recturned from the AWS query. Args: qry_results (dict): results from awsc.get_inst_info Returns: i_info (dict): information on instances and details. """ i_info = {} for i, j in enumerate(qry_results['Reservations']): i_info[i] = {'id': j['Instances'][0]['InstanceId']} i_info[i]['state'] = j['Instances'][0]['State']['Name'] i_info[i]['ami'] = j['Instances'][0]['ImageId'] i_info[i]['ssh_key'] = j['Instances'][0]['KeyName'] i_info[i]['pub_dns_name'] = j['Instances'][0]['PublicDnsName'] try: i_info[i]['tag'] = process_tags(j['Instances'][0]['Tags']) except KeyError: i_info[i]['tag'] = {"Name": ""} debg.dprint("numInstances: ", len(i_info)) debg.dprintx("Details except AMI-name") debg.dprintx(i_info, True) return i_info def process_tags(inst_tags): """Create dict of instance tags as only name:value pairs.""" tag_dict = {} for k in range(len(inst_tags)): tag_dict[inst_tags[k]['Key']] = inst_tags[k]['Value'] return tag_dict def qry_create(options): """Create query from the args specified and command chosen. Creates a query string that incorporates the args in the options object, and creates the title for the 'list' function. Args: options (object): contains args and data from parser Returns: qry_string (str): the query to be used against the aws ec2 client. param_str (str): the title to display before the list. """ qry_string = filt_end = param_str = "" filt_st = "Filters=[" param_str_default = "All" if options.id: qry_string += "InstanceIds=['%s']" % (options.id) param_str += "id: '%s'" % (options.id) param_str_default = "" if options.instname: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str) filt_end = "]" param_str_default = "" qry_string += filt_st + ("{'Name': 'tag:Name', 'Values': ['%s']}" % (options.instname)) param_str += "name: '%s'" % (options.instname) if options.inst_state: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str, bool(options.instname), filt_st) qry_string += ("{'Name': 'instance-state-name'," "'Values': ['%s']}" % (options.inst_state)) param_str += "state: '%s'" % (options.inst_state) filt_end = "]" param_str_default = "" qry_string += filt_end param_str += param_str_default debg.dprintx("\nQuery String") debg.dprintx(qry_string, True) debg.dprint("param_str: ", param_str) return(qry_string, param_str) def qry_helper(flag_id, qry_string, param_str, flag_filt=False, filt_st=""): """Dynamically add syntaxtical elements to query. This functions adds syntactical elements to the query string, and report title, based on the types and number of items added thus far. Args: flag_filt (bool): at least one filter item specified. qry_string (str): portion of the query constructed thus far. param_str (str): the title to display before the list. flag_id (bool): optional - instance-id was specified. filt_st (str): optional - syntax to add on end if filter specified. Returns: qry_string (str): the portion of the query that was passed in with the appropriate syntactical elements added. param_str (str): the title to display before the list. """ if flag_id or flag_filt: qry_string += ", " param_str += ", " if not flag_filt: qry_string += filt_st return (qry_string, param_str) def list_instances(i_info, param_str, numbered=False): """Display a list of all instances and their details. Iterates through all the instances in the dict, and displays information for each instance. Args: i_info (dict): information on instances and details. param_str (str): the title to display before the list. numbered (bool): optional - indicates wheter the list should be displayed with numbers before each instance. This is used when called from user_picklist. """ print(param_str) for i in i_info: if numbered: print("Instance {}#{}{}".format(C_WARN, i + 1, C_NORM)) print(" {6}Name: {1}{3:<22}{1}ID: {0}{4:<20}{1:<18}Status: {2}{5}{1}". format(C_TI, C_NORM, C_STAT[i_info[i]['state']], i_info[i]['tag']['Name'], i_info[i]['id'], i_info[i]['state'], C_HEAD2)) print(" AMI: {0}{2:<23}{1}AMI Name: {0}{3:.41}{1}". format(C_TI, C_NORM, i_info[i]['ami'], i_info[i]['aminame'])) list_tags(i_info[i]['tag']) debg.dprintx("All Data") debg.dprintx(i_info, True) def list_tags(tags): """Print tags in dict so they allign with listing above.""" tags_sorted = sorted(list(tags.items()), key=operator.itemgetter(0)) tag_sec_spacer = "" c = 1 ignored_keys = ["Name", "aws:ec2spot:fleet-request-id"] pad_col = {1: 38, 2: 49} for k, v in tags_sorted: # if k != "Name": if k not in ignored_keys: if c < 3: padamt = pad_col[c] sys.stdout.write(" {2}{0}:{3} {1}". format(k, v, C_HEAD2, C_NORM).ljust(padamt)) c += 1 tag_sec_spacer = "\n" else: sys.stdout.write("{2}{0}:{3} {1}\n".format(k, v, C_HEAD2, C_NORM)) c = 1 tag_sec_spacer = "" print(tag_sec_spacer) def determine_inst(i_info, param_str, command): """Determine the instance-id of the target instance. Inspect the number of instance-ids collected and take the appropriate action: exit if no ids, return if single id, and call user_picklist function if multiple ids exist. Args: i_info (dict): information and details for instances. param_str (str): the title to display in the listing. command (str): command specified on the command line. Returns: tar_inst (str): the AWS instance-id of the target. Raises: SystemExit: if no instances are match parameters specified. """ qty_instances = len(i_info) if not qty_instances: print("No instances found with parameters: {}".format(param_str)) sys.exit(1) if qty_instances > 1: print("{} instances match these parameters:".format(qty_instances)) tar_idx = user_picklist(i_info, command) else: tar_idx = 0 tar_inst = i_info[tar_idx]['id'] print("{0}{3}ing{1} instance id {2}{4}{1}". format(C_STAT[command], C_NORM, C_TI, command, tar_inst)) return (tar_inst, tar_idx) def user_picklist(i_info, command): """Display list of instances matching args and ask user to select target. Instance list displayed and user asked to enter the number corresponding to the desired target instance, or '0' to abort. Args: i_info (dict): information on instances and details. command (str): command specified on the command line. Returns: tar_idx (int): the dictionary index number of the targeted instance. """ valid_entry = False awsc.get_all_aminames(i_info) list_instances(i_info, "", True) msg_txt = ("Enter {0}#{1} of instance to {3} ({0}1{1}-{0}{4}{1})" " [{2}0 aborts{1}]: ".format(C_WARN, C_NORM, C_TI, command, len(i_info))) while not valid_entry: entry_raw = obtain_input(msg_txt) try: entry_int = int(entry_raw) except ValueError: entry_int = 999 (tar_idx, valid_entry) = user_entry(entry_int, len(i_info), command) return tar_idx def obtain_input(message_text): # pragma: no cover """Perform input command as a function so it can be mocked.""" return (input(message_text)) def user_entry(entry_int, num_inst, command): """Validate user entry and returns index and validity flag. Processes the user entry and take the appropriate action: abort if '0' entered, set validity flag and index is valid entry, else return invalid index and the still unset validity flag. Args: entry_int (int): a number entered or 999 if a non-int was entered. num_inst (int): the largest valid number that can be entered. command (str): program command to display in prompt. Returns: entry_idx(int): the dictionary index number of the targeted instance valid_entry (bool): specifies if entry_idx is valid. Raises: SystemExit: if the user enters 0 when they are choosing from the list it triggers the "abort" option offered to the user. """ valid_entry = False if not entry_int: print("{}aborting{} - {} instance\n". format(C_ERR, C_NORM, command)) sys.exit() elif entry_int >= 1 and entry_int <= num_inst: entry_idx = entry_int - 1 valid_entry = True else: print("{}Invalid entry:{} enter a number between 1" " and {}.".format(C_ERR, C_NORM, num_inst)) entry_idx = entry_int return (entry_idx, valid_entry) if __name__ == '__main__': main()
robertpeteuil/aws-shortcuts
awss/core.py
cmd_ssh_user
python
def cmd_ssh_user(tar_aminame, inst_name): if tar_aminame == "Unknown": tar_aminame = inst_name # first 5 chars of AMI-name can be anywhere in AMI-Name userlu = {"ubunt": "ubuntu", "debia": "admin", "fedor": "root", "cento": "centos", "openb": "root"} usertemp = ['name'] + [value for key, value in list(userlu.items()) if key in tar_aminame.lower()] usertemp = dict(zip(usertemp[::2], usertemp[1::2])) username = usertemp.get('name', 'ec2-user') debg.dprint("loginuser Calculated: ", username) return username
Calculate instance login-username based on image-name. Args: tar_aminame (str): name of the image instance created with. inst_name (str): name of the instance. Returns: username (str): name for ssh based on AMI-name.
train
https://github.com/robertpeteuil/aws-shortcuts/blob/cf453ca996978a4d88015d1cf6125bce8ca4873b/awss/core.py#L245-L265
[ "def dprint(item1, item2=\"\"):\n \"\"\"Print Text if DEBUG set.\n\n Args:\n item1 (str): item to print\n item2 (str): optional 2nd item to print\n\n \"\"\"\n if DEBUG:\n print(item1, \"%s%s%s\" % (C_TI, item2, C_NORM))\n" ]
"""Control and connect to AWS EC2 instances from command line. The AWS Shortcuts (awss) library is a CLI utility allowing listing, starting, stopping and connecting to AWS EC2 instances by Name or ID. License: AWSS - Control and connect to AWS EC2 instances from command line Copyright (C) 2017-2018 Robert Peteuil This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. URL: https://github.com/robertpeteuil/aws-shortcuts Author: Robert Peteuil """ from __future__ import print_function from builtins import input from builtins import range import argparse import sys import operator import awss.awsc as awsc import awss.debg as debg from awss.colors import C_NORM, C_HEAD2, C_TI, C_WARN, C_ERR, C_STAT __version__ = '0.9.13' def main(): """Collect user args and call command funct. Collect command line args and setup environment then call function for command specified in args. """ parser = parser_setup() options = parser.parse_args() debug = bool(options.debug > 0) debugall = bool(options.debug > 1) awsc.init() debg.init(debug, debugall) print(C_NORM) options.func(options) sys.exit() def parser_setup(): """Create ArgumentParser object to parse command line arguments. Returns: parser (object): containing ArgumentParser data and methods. Raises: SystemExit: if the user enters invalid args. """ parser = argparse.ArgumentParser(description="Control AWS instances from" " the command line with: list, start," " stop or ssh.", prog='awss', usage="\tawss {command} [ 'NAME' ] " "[ '-i ID' ] [ OPTIONS ]\n\t{command} =" " list | start | stop | ssh") parser.add_argument('-v', '--version', action="version", version="awss {0}".format(__version__)) subparsers = parser.add_subparsers(title="For additional help on" " command parameters", dest='command', description="type 'awss {command} -h'," " where {command} is: list, start," " stop or ssh") # Parser for LIST command parser_list = subparsers.add_parser('list', description="List AWS " "instances from the command line. " "'awss list' will list instances" " specified using combinations of " "NAME, instance-id and current-state." " If no specifications are given, " " all instances will be listed." " ex: 'awss list TEST " "-r' will list instances named 'TEST'" " that are currently running.", usage="\tawss list [none] [NAME] [-i " "ID] [-r] [-s] [OPTIONS]") parser_list.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_list.add_argument('-i', '--id', action="store", help='specify instance by id') parser_list.add_argument('-s', '--stopped', action='store_const', dest="inst_state", const="stopped", help='list stopped instances') parser_list.add_argument('-r', '--running', action='store_const', dest="inst_state", const="running", help='list running instances') parser_list.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_list.set_defaults(func=cmd_list) # Parser for START command parser_start = subparsers.add_parser('start', usage="\tawss start [NAME]" " [-i ID] [-h]", description="Start an AWS instance" " from the command line.") parser_start.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_start.add_argument('-i', '--id', action="store", help='specify instance-id') parser_start.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_start.set_defaults(func=cmd_startstop) # Parser for STOP command parser_stop = subparsers.add_parser('stop', usage="\tawss stop [NAME]" " [-i ID] [-h]", description="Stop an AWS instance" " from the command line.") parser_stop.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_stop.add_argument('-i', '--id', action="store", help='specify instance-id') parser_stop.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_stop.set_defaults(func=cmd_startstop) # Parser for SSH command parser_ssh = subparsers.add_parser('ssh', usage="\tawss ssh [NAME]" " [-i ID] [-u USER] [-p] [-h]", description="Connect to an AWS i" "nstance via ssh.") parser_ssh.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_ssh.add_argument('-i', '--id', action="store", help='specify instance-id') parser_ssh.add_argument('-u', '--user', action="store", help='override default username for ssh') parser_ssh.add_argument('-p', '--nopem', action="store_true", default=False, help='connect without PEM key') parser_ssh.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_ssh.set_defaults(func=cmd_ssh) return parser def cmd_list(options): """Gather data for instances matching args and call display func. Args: options (object): contains args and data from parser. """ (i_info, param_str) = gather_data(options) if i_info: awsc.get_all_aminames(i_info) param_str = "Instance List - " + param_str + "\n" list_instances(i_info, param_str) else: print("No instances found with parameters: {}".format(param_str)) def cmd_startstop(options): """Start or Stop the specified instance. Finds instances that match args and instance-state expected by the command. Then, the target instance is determined, the action is performed on the instance, and the eturn information is displayed. Args: options (object): contains args and data from parser. """ statelu = {"start": "stopped", "stop": "running"} options.inst_state = statelu[options.command] debg.dprint("toggle set state: ", options.inst_state) (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) response = awsc.startstop(tar_inst, options.command) responselu = {"start": "StartingInstances", "stop": "StoppingInstances"} filt = responselu[options.command] resp = {} state_term = ('CurrentState', 'PreviousState') for i, j in enumerate(state_term): resp[i] = response["{0}".format(filt)][0]["{0}".format(j)]['Name'] print("Current State: {}{}{} - Previous State: {}{}{}\n". format(C_STAT[resp[0]], resp[0], C_NORM, C_STAT[resp[1]], resp[1], C_NORM)) def cmd_ssh(options): """Connect to the specified instance via ssh. Finds instances that match the user specified args that are also in the 'running' state. The target instance is determined, the required connection information is retreived (IP, key and ssh user-name), then an 'ssh' connection is made to the instance. Args: options (object): contains args and data from parser """ import os import subprocess from os.path import expanduser options.inst_state = "running" (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) home_dir = expanduser("~") if options.user is None: tar_aminame = awsc.get_one_aminame(i_info[tar_idx]['ami']) options.user = cmd_ssh_user(tar_aminame, i_info[tar_idx]['tag']['Name']) else: debg.dprint("LoginUser set by user: ", options.user) os_spec = {"nt": ["powershell plink", "\\", "ppk"]} c_itm = os_spec.get(os.name, ["ssh", "/", "pem"]) cmd_ssh_run = c_itm[0] if not options.nopem: cmd_ssh_run += (" -i {0}{1}.aws{1}{2}.{3}". format(home_dir, c_itm[1], i_info[tar_idx]['ssh_key'], c_itm[2])) else: debg.dprint("Connect string: ", "ssh {}@{}". format(options.user, i_info[tar_idx]['pub_dns_name'])) cmd_ssh_run += " {0}@{1}".format(options.user, i_info[tar_idx]['pub_dns_name']) print(cmd_ssh_run) subprocess.call(cmd_ssh_run, shell=True) def gather_data(options): """Get Data specific for command selected. Create ec2 specific query and output title based on options specified, retrieves the raw response data from aws, then processes it into the i_info dict, which is used throughout this module. Args: options (object): contains args and data from parser, that has been adjusted by the command specific functions as appropriate. Returns: i_info (dict): information on instances and details. param_str (str): the title to display before the list. """ (qry_string, param_str) = qry_create(options) qry_results = awsc.get_inst_info(qry_string) i_info = process_results(qry_results) return (i_info, param_str) def process_results(qry_results): """Generate dictionary of results from query. Decodes the large dict recturned from the AWS query. Args: qry_results (dict): results from awsc.get_inst_info Returns: i_info (dict): information on instances and details. """ i_info = {} for i, j in enumerate(qry_results['Reservations']): i_info[i] = {'id': j['Instances'][0]['InstanceId']} i_info[i]['state'] = j['Instances'][0]['State']['Name'] i_info[i]['ami'] = j['Instances'][0]['ImageId'] i_info[i]['ssh_key'] = j['Instances'][0]['KeyName'] i_info[i]['pub_dns_name'] = j['Instances'][0]['PublicDnsName'] try: i_info[i]['tag'] = process_tags(j['Instances'][0]['Tags']) except KeyError: i_info[i]['tag'] = {"Name": ""} debg.dprint("numInstances: ", len(i_info)) debg.dprintx("Details except AMI-name") debg.dprintx(i_info, True) return i_info def process_tags(inst_tags): """Create dict of instance tags as only name:value pairs.""" tag_dict = {} for k in range(len(inst_tags)): tag_dict[inst_tags[k]['Key']] = inst_tags[k]['Value'] return tag_dict def qry_create(options): """Create query from the args specified and command chosen. Creates a query string that incorporates the args in the options object, and creates the title for the 'list' function. Args: options (object): contains args and data from parser Returns: qry_string (str): the query to be used against the aws ec2 client. param_str (str): the title to display before the list. """ qry_string = filt_end = param_str = "" filt_st = "Filters=[" param_str_default = "All" if options.id: qry_string += "InstanceIds=['%s']" % (options.id) param_str += "id: '%s'" % (options.id) param_str_default = "" if options.instname: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str) filt_end = "]" param_str_default = "" qry_string += filt_st + ("{'Name': 'tag:Name', 'Values': ['%s']}" % (options.instname)) param_str += "name: '%s'" % (options.instname) if options.inst_state: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str, bool(options.instname), filt_st) qry_string += ("{'Name': 'instance-state-name'," "'Values': ['%s']}" % (options.inst_state)) param_str += "state: '%s'" % (options.inst_state) filt_end = "]" param_str_default = "" qry_string += filt_end param_str += param_str_default debg.dprintx("\nQuery String") debg.dprintx(qry_string, True) debg.dprint("param_str: ", param_str) return(qry_string, param_str) def qry_helper(flag_id, qry_string, param_str, flag_filt=False, filt_st=""): """Dynamically add syntaxtical elements to query. This functions adds syntactical elements to the query string, and report title, based on the types and number of items added thus far. Args: flag_filt (bool): at least one filter item specified. qry_string (str): portion of the query constructed thus far. param_str (str): the title to display before the list. flag_id (bool): optional - instance-id was specified. filt_st (str): optional - syntax to add on end if filter specified. Returns: qry_string (str): the portion of the query that was passed in with the appropriate syntactical elements added. param_str (str): the title to display before the list. """ if flag_id or flag_filt: qry_string += ", " param_str += ", " if not flag_filt: qry_string += filt_st return (qry_string, param_str) def list_instances(i_info, param_str, numbered=False): """Display a list of all instances and their details. Iterates through all the instances in the dict, and displays information for each instance. Args: i_info (dict): information on instances and details. param_str (str): the title to display before the list. numbered (bool): optional - indicates wheter the list should be displayed with numbers before each instance. This is used when called from user_picklist. """ print(param_str) for i in i_info: if numbered: print("Instance {}#{}{}".format(C_WARN, i + 1, C_NORM)) print(" {6}Name: {1}{3:<22}{1}ID: {0}{4:<20}{1:<18}Status: {2}{5}{1}". format(C_TI, C_NORM, C_STAT[i_info[i]['state']], i_info[i]['tag']['Name'], i_info[i]['id'], i_info[i]['state'], C_HEAD2)) print(" AMI: {0}{2:<23}{1}AMI Name: {0}{3:.41}{1}". format(C_TI, C_NORM, i_info[i]['ami'], i_info[i]['aminame'])) list_tags(i_info[i]['tag']) debg.dprintx("All Data") debg.dprintx(i_info, True) def list_tags(tags): """Print tags in dict so they allign with listing above.""" tags_sorted = sorted(list(tags.items()), key=operator.itemgetter(0)) tag_sec_spacer = "" c = 1 ignored_keys = ["Name", "aws:ec2spot:fleet-request-id"] pad_col = {1: 38, 2: 49} for k, v in tags_sorted: # if k != "Name": if k not in ignored_keys: if c < 3: padamt = pad_col[c] sys.stdout.write(" {2}{0}:{3} {1}". format(k, v, C_HEAD2, C_NORM).ljust(padamt)) c += 1 tag_sec_spacer = "\n" else: sys.stdout.write("{2}{0}:{3} {1}\n".format(k, v, C_HEAD2, C_NORM)) c = 1 tag_sec_spacer = "" print(tag_sec_spacer) def determine_inst(i_info, param_str, command): """Determine the instance-id of the target instance. Inspect the number of instance-ids collected and take the appropriate action: exit if no ids, return if single id, and call user_picklist function if multiple ids exist. Args: i_info (dict): information and details for instances. param_str (str): the title to display in the listing. command (str): command specified on the command line. Returns: tar_inst (str): the AWS instance-id of the target. Raises: SystemExit: if no instances are match parameters specified. """ qty_instances = len(i_info) if not qty_instances: print("No instances found with parameters: {}".format(param_str)) sys.exit(1) if qty_instances > 1: print("{} instances match these parameters:".format(qty_instances)) tar_idx = user_picklist(i_info, command) else: tar_idx = 0 tar_inst = i_info[tar_idx]['id'] print("{0}{3}ing{1} instance id {2}{4}{1}". format(C_STAT[command], C_NORM, C_TI, command, tar_inst)) return (tar_inst, tar_idx) def user_picklist(i_info, command): """Display list of instances matching args and ask user to select target. Instance list displayed and user asked to enter the number corresponding to the desired target instance, or '0' to abort. Args: i_info (dict): information on instances and details. command (str): command specified on the command line. Returns: tar_idx (int): the dictionary index number of the targeted instance. """ valid_entry = False awsc.get_all_aminames(i_info) list_instances(i_info, "", True) msg_txt = ("Enter {0}#{1} of instance to {3} ({0}1{1}-{0}{4}{1})" " [{2}0 aborts{1}]: ".format(C_WARN, C_NORM, C_TI, command, len(i_info))) while not valid_entry: entry_raw = obtain_input(msg_txt) try: entry_int = int(entry_raw) except ValueError: entry_int = 999 (tar_idx, valid_entry) = user_entry(entry_int, len(i_info), command) return tar_idx def obtain_input(message_text): # pragma: no cover """Perform input command as a function so it can be mocked.""" return (input(message_text)) def user_entry(entry_int, num_inst, command): """Validate user entry and returns index and validity flag. Processes the user entry and take the appropriate action: abort if '0' entered, set validity flag and index is valid entry, else return invalid index and the still unset validity flag. Args: entry_int (int): a number entered or 999 if a non-int was entered. num_inst (int): the largest valid number that can be entered. command (str): program command to display in prompt. Returns: entry_idx(int): the dictionary index number of the targeted instance valid_entry (bool): specifies if entry_idx is valid. Raises: SystemExit: if the user enters 0 when they are choosing from the list it triggers the "abort" option offered to the user. """ valid_entry = False if not entry_int: print("{}aborting{} - {} instance\n". format(C_ERR, C_NORM, command)) sys.exit() elif entry_int >= 1 and entry_int <= num_inst: entry_idx = entry_int - 1 valid_entry = True else: print("{}Invalid entry:{} enter a number between 1" " and {}.".format(C_ERR, C_NORM, num_inst)) entry_idx = entry_int return (entry_idx, valid_entry) if __name__ == '__main__': main()
robertpeteuil/aws-shortcuts
awss/core.py
gather_data
python
def gather_data(options): (qry_string, param_str) = qry_create(options) qry_results = awsc.get_inst_info(qry_string) i_info = process_results(qry_results) return (i_info, param_str)
Get Data specific for command selected. Create ec2 specific query and output title based on options specified, retrieves the raw response data from aws, then processes it into the i_info dict, which is used throughout this module. Args: options (object): contains args and data from parser, that has been adjusted by the command specific functions as appropriate. Returns: i_info (dict): information on instances and details. param_str (str): the title to display before the list.
train
https://github.com/robertpeteuil/aws-shortcuts/blob/cf453ca996978a4d88015d1cf6125bce8ca4873b/awss/core.py#L268-L288
[ "def get_inst_info(qry_string):\n \"\"\"Get details for instances that match the qry_string.\n\n Execute a query against the AWS EC2 client object, that is\n based on the contents of qry_string.\n\n Args:\n qry_string (str): the query to be used against the aws ec2 client.\n Returns:\n ...
"""Control and connect to AWS EC2 instances from command line. The AWS Shortcuts (awss) library is a CLI utility allowing listing, starting, stopping and connecting to AWS EC2 instances by Name or ID. License: AWSS - Control and connect to AWS EC2 instances from command line Copyright (C) 2017-2018 Robert Peteuil This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. URL: https://github.com/robertpeteuil/aws-shortcuts Author: Robert Peteuil """ from __future__ import print_function from builtins import input from builtins import range import argparse import sys import operator import awss.awsc as awsc import awss.debg as debg from awss.colors import C_NORM, C_HEAD2, C_TI, C_WARN, C_ERR, C_STAT __version__ = '0.9.13' def main(): """Collect user args and call command funct. Collect command line args and setup environment then call function for command specified in args. """ parser = parser_setup() options = parser.parse_args() debug = bool(options.debug > 0) debugall = bool(options.debug > 1) awsc.init() debg.init(debug, debugall) print(C_NORM) options.func(options) sys.exit() def parser_setup(): """Create ArgumentParser object to parse command line arguments. Returns: parser (object): containing ArgumentParser data and methods. Raises: SystemExit: if the user enters invalid args. """ parser = argparse.ArgumentParser(description="Control AWS instances from" " the command line with: list, start," " stop or ssh.", prog='awss', usage="\tawss {command} [ 'NAME' ] " "[ '-i ID' ] [ OPTIONS ]\n\t{command} =" " list | start | stop | ssh") parser.add_argument('-v', '--version', action="version", version="awss {0}".format(__version__)) subparsers = parser.add_subparsers(title="For additional help on" " command parameters", dest='command', description="type 'awss {command} -h'," " where {command} is: list, start," " stop or ssh") # Parser for LIST command parser_list = subparsers.add_parser('list', description="List AWS " "instances from the command line. " "'awss list' will list instances" " specified using combinations of " "NAME, instance-id and current-state." " If no specifications are given, " " all instances will be listed." " ex: 'awss list TEST " "-r' will list instances named 'TEST'" " that are currently running.", usage="\tawss list [none] [NAME] [-i " "ID] [-r] [-s] [OPTIONS]") parser_list.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_list.add_argument('-i', '--id', action="store", help='specify instance by id') parser_list.add_argument('-s', '--stopped', action='store_const', dest="inst_state", const="stopped", help='list stopped instances') parser_list.add_argument('-r', '--running', action='store_const', dest="inst_state", const="running", help='list running instances') parser_list.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_list.set_defaults(func=cmd_list) # Parser for START command parser_start = subparsers.add_parser('start', usage="\tawss start [NAME]" " [-i ID] [-h]", description="Start an AWS instance" " from the command line.") parser_start.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_start.add_argument('-i', '--id', action="store", help='specify instance-id') parser_start.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_start.set_defaults(func=cmd_startstop) # Parser for STOP command parser_stop = subparsers.add_parser('stop', usage="\tawss stop [NAME]" " [-i ID] [-h]", description="Stop an AWS instance" " from the command line.") parser_stop.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_stop.add_argument('-i', '--id', action="store", help='specify instance-id') parser_stop.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_stop.set_defaults(func=cmd_startstop) # Parser for SSH command parser_ssh = subparsers.add_parser('ssh', usage="\tawss ssh [NAME]" " [-i ID] [-u USER] [-p] [-h]", description="Connect to an AWS i" "nstance via ssh.") parser_ssh.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_ssh.add_argument('-i', '--id', action="store", help='specify instance-id') parser_ssh.add_argument('-u', '--user', action="store", help='override default username for ssh') parser_ssh.add_argument('-p', '--nopem', action="store_true", default=False, help='connect without PEM key') parser_ssh.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_ssh.set_defaults(func=cmd_ssh) return parser def cmd_list(options): """Gather data for instances matching args and call display func. Args: options (object): contains args and data from parser. """ (i_info, param_str) = gather_data(options) if i_info: awsc.get_all_aminames(i_info) param_str = "Instance List - " + param_str + "\n" list_instances(i_info, param_str) else: print("No instances found with parameters: {}".format(param_str)) def cmd_startstop(options): """Start or Stop the specified instance. Finds instances that match args and instance-state expected by the command. Then, the target instance is determined, the action is performed on the instance, and the eturn information is displayed. Args: options (object): contains args and data from parser. """ statelu = {"start": "stopped", "stop": "running"} options.inst_state = statelu[options.command] debg.dprint("toggle set state: ", options.inst_state) (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) response = awsc.startstop(tar_inst, options.command) responselu = {"start": "StartingInstances", "stop": "StoppingInstances"} filt = responselu[options.command] resp = {} state_term = ('CurrentState', 'PreviousState') for i, j in enumerate(state_term): resp[i] = response["{0}".format(filt)][0]["{0}".format(j)]['Name'] print("Current State: {}{}{} - Previous State: {}{}{}\n". format(C_STAT[resp[0]], resp[0], C_NORM, C_STAT[resp[1]], resp[1], C_NORM)) def cmd_ssh(options): """Connect to the specified instance via ssh. Finds instances that match the user specified args that are also in the 'running' state. The target instance is determined, the required connection information is retreived (IP, key and ssh user-name), then an 'ssh' connection is made to the instance. Args: options (object): contains args and data from parser """ import os import subprocess from os.path import expanduser options.inst_state = "running" (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) home_dir = expanduser("~") if options.user is None: tar_aminame = awsc.get_one_aminame(i_info[tar_idx]['ami']) options.user = cmd_ssh_user(tar_aminame, i_info[tar_idx]['tag']['Name']) else: debg.dprint("LoginUser set by user: ", options.user) os_spec = {"nt": ["powershell plink", "\\", "ppk"]} c_itm = os_spec.get(os.name, ["ssh", "/", "pem"]) cmd_ssh_run = c_itm[0] if not options.nopem: cmd_ssh_run += (" -i {0}{1}.aws{1}{2}.{3}". format(home_dir, c_itm[1], i_info[tar_idx]['ssh_key'], c_itm[2])) else: debg.dprint("Connect string: ", "ssh {}@{}". format(options.user, i_info[tar_idx]['pub_dns_name'])) cmd_ssh_run += " {0}@{1}".format(options.user, i_info[tar_idx]['pub_dns_name']) print(cmd_ssh_run) subprocess.call(cmd_ssh_run, shell=True) def cmd_ssh_user(tar_aminame, inst_name): """Calculate instance login-username based on image-name. Args: tar_aminame (str): name of the image instance created with. inst_name (str): name of the instance. Returns: username (str): name for ssh based on AMI-name. """ if tar_aminame == "Unknown": tar_aminame = inst_name # first 5 chars of AMI-name can be anywhere in AMI-Name userlu = {"ubunt": "ubuntu", "debia": "admin", "fedor": "root", "cento": "centos", "openb": "root"} usertemp = ['name'] + [value for key, value in list(userlu.items()) if key in tar_aminame.lower()] usertemp = dict(zip(usertemp[::2], usertemp[1::2])) username = usertemp.get('name', 'ec2-user') debg.dprint("loginuser Calculated: ", username) return username def process_results(qry_results): """Generate dictionary of results from query. Decodes the large dict recturned from the AWS query. Args: qry_results (dict): results from awsc.get_inst_info Returns: i_info (dict): information on instances and details. """ i_info = {} for i, j in enumerate(qry_results['Reservations']): i_info[i] = {'id': j['Instances'][0]['InstanceId']} i_info[i]['state'] = j['Instances'][0]['State']['Name'] i_info[i]['ami'] = j['Instances'][0]['ImageId'] i_info[i]['ssh_key'] = j['Instances'][0]['KeyName'] i_info[i]['pub_dns_name'] = j['Instances'][0]['PublicDnsName'] try: i_info[i]['tag'] = process_tags(j['Instances'][0]['Tags']) except KeyError: i_info[i]['tag'] = {"Name": ""} debg.dprint("numInstances: ", len(i_info)) debg.dprintx("Details except AMI-name") debg.dprintx(i_info, True) return i_info def process_tags(inst_tags): """Create dict of instance tags as only name:value pairs.""" tag_dict = {} for k in range(len(inst_tags)): tag_dict[inst_tags[k]['Key']] = inst_tags[k]['Value'] return tag_dict def qry_create(options): """Create query from the args specified and command chosen. Creates a query string that incorporates the args in the options object, and creates the title for the 'list' function. Args: options (object): contains args and data from parser Returns: qry_string (str): the query to be used against the aws ec2 client. param_str (str): the title to display before the list. """ qry_string = filt_end = param_str = "" filt_st = "Filters=[" param_str_default = "All" if options.id: qry_string += "InstanceIds=['%s']" % (options.id) param_str += "id: '%s'" % (options.id) param_str_default = "" if options.instname: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str) filt_end = "]" param_str_default = "" qry_string += filt_st + ("{'Name': 'tag:Name', 'Values': ['%s']}" % (options.instname)) param_str += "name: '%s'" % (options.instname) if options.inst_state: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str, bool(options.instname), filt_st) qry_string += ("{'Name': 'instance-state-name'," "'Values': ['%s']}" % (options.inst_state)) param_str += "state: '%s'" % (options.inst_state) filt_end = "]" param_str_default = "" qry_string += filt_end param_str += param_str_default debg.dprintx("\nQuery String") debg.dprintx(qry_string, True) debg.dprint("param_str: ", param_str) return(qry_string, param_str) def qry_helper(flag_id, qry_string, param_str, flag_filt=False, filt_st=""): """Dynamically add syntaxtical elements to query. This functions adds syntactical elements to the query string, and report title, based on the types and number of items added thus far. Args: flag_filt (bool): at least one filter item specified. qry_string (str): portion of the query constructed thus far. param_str (str): the title to display before the list. flag_id (bool): optional - instance-id was specified. filt_st (str): optional - syntax to add on end if filter specified. Returns: qry_string (str): the portion of the query that was passed in with the appropriate syntactical elements added. param_str (str): the title to display before the list. """ if flag_id or flag_filt: qry_string += ", " param_str += ", " if not flag_filt: qry_string += filt_st return (qry_string, param_str) def list_instances(i_info, param_str, numbered=False): """Display a list of all instances and their details. Iterates through all the instances in the dict, and displays information for each instance. Args: i_info (dict): information on instances and details. param_str (str): the title to display before the list. numbered (bool): optional - indicates wheter the list should be displayed with numbers before each instance. This is used when called from user_picklist. """ print(param_str) for i in i_info: if numbered: print("Instance {}#{}{}".format(C_WARN, i + 1, C_NORM)) print(" {6}Name: {1}{3:<22}{1}ID: {0}{4:<20}{1:<18}Status: {2}{5}{1}". format(C_TI, C_NORM, C_STAT[i_info[i]['state']], i_info[i]['tag']['Name'], i_info[i]['id'], i_info[i]['state'], C_HEAD2)) print(" AMI: {0}{2:<23}{1}AMI Name: {0}{3:.41}{1}". format(C_TI, C_NORM, i_info[i]['ami'], i_info[i]['aminame'])) list_tags(i_info[i]['tag']) debg.dprintx("All Data") debg.dprintx(i_info, True) def list_tags(tags): """Print tags in dict so they allign with listing above.""" tags_sorted = sorted(list(tags.items()), key=operator.itemgetter(0)) tag_sec_spacer = "" c = 1 ignored_keys = ["Name", "aws:ec2spot:fleet-request-id"] pad_col = {1: 38, 2: 49} for k, v in tags_sorted: # if k != "Name": if k not in ignored_keys: if c < 3: padamt = pad_col[c] sys.stdout.write(" {2}{0}:{3} {1}". format(k, v, C_HEAD2, C_NORM).ljust(padamt)) c += 1 tag_sec_spacer = "\n" else: sys.stdout.write("{2}{0}:{3} {1}\n".format(k, v, C_HEAD2, C_NORM)) c = 1 tag_sec_spacer = "" print(tag_sec_spacer) def determine_inst(i_info, param_str, command): """Determine the instance-id of the target instance. Inspect the number of instance-ids collected and take the appropriate action: exit if no ids, return if single id, and call user_picklist function if multiple ids exist. Args: i_info (dict): information and details for instances. param_str (str): the title to display in the listing. command (str): command specified on the command line. Returns: tar_inst (str): the AWS instance-id of the target. Raises: SystemExit: if no instances are match parameters specified. """ qty_instances = len(i_info) if not qty_instances: print("No instances found with parameters: {}".format(param_str)) sys.exit(1) if qty_instances > 1: print("{} instances match these parameters:".format(qty_instances)) tar_idx = user_picklist(i_info, command) else: tar_idx = 0 tar_inst = i_info[tar_idx]['id'] print("{0}{3}ing{1} instance id {2}{4}{1}". format(C_STAT[command], C_NORM, C_TI, command, tar_inst)) return (tar_inst, tar_idx) def user_picklist(i_info, command): """Display list of instances matching args and ask user to select target. Instance list displayed and user asked to enter the number corresponding to the desired target instance, or '0' to abort. Args: i_info (dict): information on instances and details. command (str): command specified on the command line. Returns: tar_idx (int): the dictionary index number of the targeted instance. """ valid_entry = False awsc.get_all_aminames(i_info) list_instances(i_info, "", True) msg_txt = ("Enter {0}#{1} of instance to {3} ({0}1{1}-{0}{4}{1})" " [{2}0 aborts{1}]: ".format(C_WARN, C_NORM, C_TI, command, len(i_info))) while not valid_entry: entry_raw = obtain_input(msg_txt) try: entry_int = int(entry_raw) except ValueError: entry_int = 999 (tar_idx, valid_entry) = user_entry(entry_int, len(i_info), command) return tar_idx def obtain_input(message_text): # pragma: no cover """Perform input command as a function so it can be mocked.""" return (input(message_text)) def user_entry(entry_int, num_inst, command): """Validate user entry and returns index and validity flag. Processes the user entry and take the appropriate action: abort if '0' entered, set validity flag and index is valid entry, else return invalid index and the still unset validity flag. Args: entry_int (int): a number entered or 999 if a non-int was entered. num_inst (int): the largest valid number that can be entered. command (str): program command to display in prompt. Returns: entry_idx(int): the dictionary index number of the targeted instance valid_entry (bool): specifies if entry_idx is valid. Raises: SystemExit: if the user enters 0 when they are choosing from the list it triggers the "abort" option offered to the user. """ valid_entry = False if not entry_int: print("{}aborting{} - {} instance\n". format(C_ERR, C_NORM, command)) sys.exit() elif entry_int >= 1 and entry_int <= num_inst: entry_idx = entry_int - 1 valid_entry = True else: print("{}Invalid entry:{} enter a number between 1" " and {}.".format(C_ERR, C_NORM, num_inst)) entry_idx = entry_int return (entry_idx, valid_entry) if __name__ == '__main__': main()
robertpeteuil/aws-shortcuts
awss/core.py
process_results
python
def process_results(qry_results): i_info = {} for i, j in enumerate(qry_results['Reservations']): i_info[i] = {'id': j['Instances'][0]['InstanceId']} i_info[i]['state'] = j['Instances'][0]['State']['Name'] i_info[i]['ami'] = j['Instances'][0]['ImageId'] i_info[i]['ssh_key'] = j['Instances'][0]['KeyName'] i_info[i]['pub_dns_name'] = j['Instances'][0]['PublicDnsName'] try: i_info[i]['tag'] = process_tags(j['Instances'][0]['Tags']) except KeyError: i_info[i]['tag'] = {"Name": ""} debg.dprint("numInstances: ", len(i_info)) debg.dprintx("Details except AMI-name") debg.dprintx(i_info, True) return i_info
Generate dictionary of results from query. Decodes the large dict recturned from the AWS query. Args: qry_results (dict): results from awsc.get_inst_info Returns: i_info (dict): information on instances and details.
train
https://github.com/robertpeteuil/aws-shortcuts/blob/cf453ca996978a4d88015d1cf6125bce8ca4873b/awss/core.py#L291-L316
[ "def dprint(item1, item2=\"\"):\n \"\"\"Print Text if DEBUG set.\n\n Args:\n item1 (str): item to print\n item2 (str): optional 2nd item to print\n\n \"\"\"\n if DEBUG:\n print(item1, \"%s%s%s\" % (C_TI, item2, C_NORM))\n", "def process_tags(inst_tags):\n \"\"\"Create dict of i...
"""Control and connect to AWS EC2 instances from command line. The AWS Shortcuts (awss) library is a CLI utility allowing listing, starting, stopping and connecting to AWS EC2 instances by Name or ID. License: AWSS - Control and connect to AWS EC2 instances from command line Copyright (C) 2017-2018 Robert Peteuil This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. URL: https://github.com/robertpeteuil/aws-shortcuts Author: Robert Peteuil """ from __future__ import print_function from builtins import input from builtins import range import argparse import sys import operator import awss.awsc as awsc import awss.debg as debg from awss.colors import C_NORM, C_HEAD2, C_TI, C_WARN, C_ERR, C_STAT __version__ = '0.9.13' def main(): """Collect user args and call command funct. Collect command line args and setup environment then call function for command specified in args. """ parser = parser_setup() options = parser.parse_args() debug = bool(options.debug > 0) debugall = bool(options.debug > 1) awsc.init() debg.init(debug, debugall) print(C_NORM) options.func(options) sys.exit() def parser_setup(): """Create ArgumentParser object to parse command line arguments. Returns: parser (object): containing ArgumentParser data and methods. Raises: SystemExit: if the user enters invalid args. """ parser = argparse.ArgumentParser(description="Control AWS instances from" " the command line with: list, start," " stop or ssh.", prog='awss', usage="\tawss {command} [ 'NAME' ] " "[ '-i ID' ] [ OPTIONS ]\n\t{command} =" " list | start | stop | ssh") parser.add_argument('-v', '--version', action="version", version="awss {0}".format(__version__)) subparsers = parser.add_subparsers(title="For additional help on" " command parameters", dest='command', description="type 'awss {command} -h'," " where {command} is: list, start," " stop or ssh") # Parser for LIST command parser_list = subparsers.add_parser('list', description="List AWS " "instances from the command line. " "'awss list' will list instances" " specified using combinations of " "NAME, instance-id and current-state." " If no specifications are given, " " all instances will be listed." " ex: 'awss list TEST " "-r' will list instances named 'TEST'" " that are currently running.", usage="\tawss list [none] [NAME] [-i " "ID] [-r] [-s] [OPTIONS]") parser_list.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_list.add_argument('-i', '--id', action="store", help='specify instance by id') parser_list.add_argument('-s', '--stopped', action='store_const', dest="inst_state", const="stopped", help='list stopped instances') parser_list.add_argument('-r', '--running', action='store_const', dest="inst_state", const="running", help='list running instances') parser_list.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_list.set_defaults(func=cmd_list) # Parser for START command parser_start = subparsers.add_parser('start', usage="\tawss start [NAME]" " [-i ID] [-h]", description="Start an AWS instance" " from the command line.") parser_start.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_start.add_argument('-i', '--id', action="store", help='specify instance-id') parser_start.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_start.set_defaults(func=cmd_startstop) # Parser for STOP command parser_stop = subparsers.add_parser('stop', usage="\tawss stop [NAME]" " [-i ID] [-h]", description="Stop an AWS instance" " from the command line.") parser_stop.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_stop.add_argument('-i', '--id', action="store", help='specify instance-id') parser_stop.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_stop.set_defaults(func=cmd_startstop) # Parser for SSH command parser_ssh = subparsers.add_parser('ssh', usage="\tawss ssh [NAME]" " [-i ID] [-u USER] [-p] [-h]", description="Connect to an AWS i" "nstance via ssh.") parser_ssh.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_ssh.add_argument('-i', '--id', action="store", help='specify instance-id') parser_ssh.add_argument('-u', '--user', action="store", help='override default username for ssh') parser_ssh.add_argument('-p', '--nopem', action="store_true", default=False, help='connect without PEM key') parser_ssh.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_ssh.set_defaults(func=cmd_ssh) return parser def cmd_list(options): """Gather data for instances matching args and call display func. Args: options (object): contains args and data from parser. """ (i_info, param_str) = gather_data(options) if i_info: awsc.get_all_aminames(i_info) param_str = "Instance List - " + param_str + "\n" list_instances(i_info, param_str) else: print("No instances found with parameters: {}".format(param_str)) def cmd_startstop(options): """Start or Stop the specified instance. Finds instances that match args and instance-state expected by the command. Then, the target instance is determined, the action is performed on the instance, and the eturn information is displayed. Args: options (object): contains args and data from parser. """ statelu = {"start": "stopped", "stop": "running"} options.inst_state = statelu[options.command] debg.dprint("toggle set state: ", options.inst_state) (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) response = awsc.startstop(tar_inst, options.command) responselu = {"start": "StartingInstances", "stop": "StoppingInstances"} filt = responselu[options.command] resp = {} state_term = ('CurrentState', 'PreviousState') for i, j in enumerate(state_term): resp[i] = response["{0}".format(filt)][0]["{0}".format(j)]['Name'] print("Current State: {}{}{} - Previous State: {}{}{}\n". format(C_STAT[resp[0]], resp[0], C_NORM, C_STAT[resp[1]], resp[1], C_NORM)) def cmd_ssh(options): """Connect to the specified instance via ssh. Finds instances that match the user specified args that are also in the 'running' state. The target instance is determined, the required connection information is retreived (IP, key and ssh user-name), then an 'ssh' connection is made to the instance. Args: options (object): contains args and data from parser """ import os import subprocess from os.path import expanduser options.inst_state = "running" (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) home_dir = expanduser("~") if options.user is None: tar_aminame = awsc.get_one_aminame(i_info[tar_idx]['ami']) options.user = cmd_ssh_user(tar_aminame, i_info[tar_idx]['tag']['Name']) else: debg.dprint("LoginUser set by user: ", options.user) os_spec = {"nt": ["powershell plink", "\\", "ppk"]} c_itm = os_spec.get(os.name, ["ssh", "/", "pem"]) cmd_ssh_run = c_itm[0] if not options.nopem: cmd_ssh_run += (" -i {0}{1}.aws{1}{2}.{3}". format(home_dir, c_itm[1], i_info[tar_idx]['ssh_key'], c_itm[2])) else: debg.dprint("Connect string: ", "ssh {}@{}". format(options.user, i_info[tar_idx]['pub_dns_name'])) cmd_ssh_run += " {0}@{1}".format(options.user, i_info[tar_idx]['pub_dns_name']) print(cmd_ssh_run) subprocess.call(cmd_ssh_run, shell=True) def cmd_ssh_user(tar_aminame, inst_name): """Calculate instance login-username based on image-name. Args: tar_aminame (str): name of the image instance created with. inst_name (str): name of the instance. Returns: username (str): name for ssh based on AMI-name. """ if tar_aminame == "Unknown": tar_aminame = inst_name # first 5 chars of AMI-name can be anywhere in AMI-Name userlu = {"ubunt": "ubuntu", "debia": "admin", "fedor": "root", "cento": "centos", "openb": "root"} usertemp = ['name'] + [value for key, value in list(userlu.items()) if key in tar_aminame.lower()] usertemp = dict(zip(usertemp[::2], usertemp[1::2])) username = usertemp.get('name', 'ec2-user') debg.dprint("loginuser Calculated: ", username) return username def gather_data(options): """Get Data specific for command selected. Create ec2 specific query and output title based on options specified, retrieves the raw response data from aws, then processes it into the i_info dict, which is used throughout this module. Args: options (object): contains args and data from parser, that has been adjusted by the command specific functions as appropriate. Returns: i_info (dict): information on instances and details. param_str (str): the title to display before the list. """ (qry_string, param_str) = qry_create(options) qry_results = awsc.get_inst_info(qry_string) i_info = process_results(qry_results) return (i_info, param_str) def process_tags(inst_tags): """Create dict of instance tags as only name:value pairs.""" tag_dict = {} for k in range(len(inst_tags)): tag_dict[inst_tags[k]['Key']] = inst_tags[k]['Value'] return tag_dict def qry_create(options): """Create query from the args specified and command chosen. Creates a query string that incorporates the args in the options object, and creates the title for the 'list' function. Args: options (object): contains args and data from parser Returns: qry_string (str): the query to be used against the aws ec2 client. param_str (str): the title to display before the list. """ qry_string = filt_end = param_str = "" filt_st = "Filters=[" param_str_default = "All" if options.id: qry_string += "InstanceIds=['%s']" % (options.id) param_str += "id: '%s'" % (options.id) param_str_default = "" if options.instname: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str) filt_end = "]" param_str_default = "" qry_string += filt_st + ("{'Name': 'tag:Name', 'Values': ['%s']}" % (options.instname)) param_str += "name: '%s'" % (options.instname) if options.inst_state: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str, bool(options.instname), filt_st) qry_string += ("{'Name': 'instance-state-name'," "'Values': ['%s']}" % (options.inst_state)) param_str += "state: '%s'" % (options.inst_state) filt_end = "]" param_str_default = "" qry_string += filt_end param_str += param_str_default debg.dprintx("\nQuery String") debg.dprintx(qry_string, True) debg.dprint("param_str: ", param_str) return(qry_string, param_str) def qry_helper(flag_id, qry_string, param_str, flag_filt=False, filt_st=""): """Dynamically add syntaxtical elements to query. This functions adds syntactical elements to the query string, and report title, based on the types and number of items added thus far. Args: flag_filt (bool): at least one filter item specified. qry_string (str): portion of the query constructed thus far. param_str (str): the title to display before the list. flag_id (bool): optional - instance-id was specified. filt_st (str): optional - syntax to add on end if filter specified. Returns: qry_string (str): the portion of the query that was passed in with the appropriate syntactical elements added. param_str (str): the title to display before the list. """ if flag_id or flag_filt: qry_string += ", " param_str += ", " if not flag_filt: qry_string += filt_st return (qry_string, param_str) def list_instances(i_info, param_str, numbered=False): """Display a list of all instances and their details. Iterates through all the instances in the dict, and displays information for each instance. Args: i_info (dict): information on instances and details. param_str (str): the title to display before the list. numbered (bool): optional - indicates wheter the list should be displayed with numbers before each instance. This is used when called from user_picklist. """ print(param_str) for i in i_info: if numbered: print("Instance {}#{}{}".format(C_WARN, i + 1, C_NORM)) print(" {6}Name: {1}{3:<22}{1}ID: {0}{4:<20}{1:<18}Status: {2}{5}{1}". format(C_TI, C_NORM, C_STAT[i_info[i]['state']], i_info[i]['tag']['Name'], i_info[i]['id'], i_info[i]['state'], C_HEAD2)) print(" AMI: {0}{2:<23}{1}AMI Name: {0}{3:.41}{1}". format(C_TI, C_NORM, i_info[i]['ami'], i_info[i]['aminame'])) list_tags(i_info[i]['tag']) debg.dprintx("All Data") debg.dprintx(i_info, True) def list_tags(tags): """Print tags in dict so they allign with listing above.""" tags_sorted = sorted(list(tags.items()), key=operator.itemgetter(0)) tag_sec_spacer = "" c = 1 ignored_keys = ["Name", "aws:ec2spot:fleet-request-id"] pad_col = {1: 38, 2: 49} for k, v in tags_sorted: # if k != "Name": if k not in ignored_keys: if c < 3: padamt = pad_col[c] sys.stdout.write(" {2}{0}:{3} {1}". format(k, v, C_HEAD2, C_NORM).ljust(padamt)) c += 1 tag_sec_spacer = "\n" else: sys.stdout.write("{2}{0}:{3} {1}\n".format(k, v, C_HEAD2, C_NORM)) c = 1 tag_sec_spacer = "" print(tag_sec_spacer) def determine_inst(i_info, param_str, command): """Determine the instance-id of the target instance. Inspect the number of instance-ids collected and take the appropriate action: exit if no ids, return if single id, and call user_picklist function if multiple ids exist. Args: i_info (dict): information and details for instances. param_str (str): the title to display in the listing. command (str): command specified on the command line. Returns: tar_inst (str): the AWS instance-id of the target. Raises: SystemExit: if no instances are match parameters specified. """ qty_instances = len(i_info) if not qty_instances: print("No instances found with parameters: {}".format(param_str)) sys.exit(1) if qty_instances > 1: print("{} instances match these parameters:".format(qty_instances)) tar_idx = user_picklist(i_info, command) else: tar_idx = 0 tar_inst = i_info[tar_idx]['id'] print("{0}{3}ing{1} instance id {2}{4}{1}". format(C_STAT[command], C_NORM, C_TI, command, tar_inst)) return (tar_inst, tar_idx) def user_picklist(i_info, command): """Display list of instances matching args and ask user to select target. Instance list displayed and user asked to enter the number corresponding to the desired target instance, or '0' to abort. Args: i_info (dict): information on instances and details. command (str): command specified on the command line. Returns: tar_idx (int): the dictionary index number of the targeted instance. """ valid_entry = False awsc.get_all_aminames(i_info) list_instances(i_info, "", True) msg_txt = ("Enter {0}#{1} of instance to {3} ({0}1{1}-{0}{4}{1})" " [{2}0 aborts{1}]: ".format(C_WARN, C_NORM, C_TI, command, len(i_info))) while not valid_entry: entry_raw = obtain_input(msg_txt) try: entry_int = int(entry_raw) except ValueError: entry_int = 999 (tar_idx, valid_entry) = user_entry(entry_int, len(i_info), command) return tar_idx def obtain_input(message_text): # pragma: no cover """Perform input command as a function so it can be mocked.""" return (input(message_text)) def user_entry(entry_int, num_inst, command): """Validate user entry and returns index and validity flag. Processes the user entry and take the appropriate action: abort if '0' entered, set validity flag and index is valid entry, else return invalid index and the still unset validity flag. Args: entry_int (int): a number entered or 999 if a non-int was entered. num_inst (int): the largest valid number that can be entered. command (str): program command to display in prompt. Returns: entry_idx(int): the dictionary index number of the targeted instance valid_entry (bool): specifies if entry_idx is valid. Raises: SystemExit: if the user enters 0 when they are choosing from the list it triggers the "abort" option offered to the user. """ valid_entry = False if not entry_int: print("{}aborting{} - {} instance\n". format(C_ERR, C_NORM, command)) sys.exit() elif entry_int >= 1 and entry_int <= num_inst: entry_idx = entry_int - 1 valid_entry = True else: print("{}Invalid entry:{} enter a number between 1" " and {}.".format(C_ERR, C_NORM, num_inst)) entry_idx = entry_int return (entry_idx, valid_entry) if __name__ == '__main__': main()
robertpeteuil/aws-shortcuts
awss/core.py
process_tags
python
def process_tags(inst_tags): tag_dict = {} for k in range(len(inst_tags)): tag_dict[inst_tags[k]['Key']] = inst_tags[k]['Value'] return tag_dict
Create dict of instance tags as only name:value pairs.
train
https://github.com/robertpeteuil/aws-shortcuts/blob/cf453ca996978a4d88015d1cf6125bce8ca4873b/awss/core.py#L319-L324
null
"""Control and connect to AWS EC2 instances from command line. The AWS Shortcuts (awss) library is a CLI utility allowing listing, starting, stopping and connecting to AWS EC2 instances by Name or ID. License: AWSS - Control and connect to AWS EC2 instances from command line Copyright (C) 2017-2018 Robert Peteuil This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. URL: https://github.com/robertpeteuil/aws-shortcuts Author: Robert Peteuil """ from __future__ import print_function from builtins import input from builtins import range import argparse import sys import operator import awss.awsc as awsc import awss.debg as debg from awss.colors import C_NORM, C_HEAD2, C_TI, C_WARN, C_ERR, C_STAT __version__ = '0.9.13' def main(): """Collect user args and call command funct. Collect command line args and setup environment then call function for command specified in args. """ parser = parser_setup() options = parser.parse_args() debug = bool(options.debug > 0) debugall = bool(options.debug > 1) awsc.init() debg.init(debug, debugall) print(C_NORM) options.func(options) sys.exit() def parser_setup(): """Create ArgumentParser object to parse command line arguments. Returns: parser (object): containing ArgumentParser data and methods. Raises: SystemExit: if the user enters invalid args. """ parser = argparse.ArgumentParser(description="Control AWS instances from" " the command line with: list, start," " stop or ssh.", prog='awss', usage="\tawss {command} [ 'NAME' ] " "[ '-i ID' ] [ OPTIONS ]\n\t{command} =" " list | start | stop | ssh") parser.add_argument('-v', '--version', action="version", version="awss {0}".format(__version__)) subparsers = parser.add_subparsers(title="For additional help on" " command parameters", dest='command', description="type 'awss {command} -h'," " where {command} is: list, start," " stop or ssh") # Parser for LIST command parser_list = subparsers.add_parser('list', description="List AWS " "instances from the command line. " "'awss list' will list instances" " specified using combinations of " "NAME, instance-id and current-state." " If no specifications are given, " " all instances will be listed." " ex: 'awss list TEST " "-r' will list instances named 'TEST'" " that are currently running.", usage="\tawss list [none] [NAME] [-i " "ID] [-r] [-s] [OPTIONS]") parser_list.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_list.add_argument('-i', '--id', action="store", help='specify instance by id') parser_list.add_argument('-s', '--stopped', action='store_const', dest="inst_state", const="stopped", help='list stopped instances') parser_list.add_argument('-r', '--running', action='store_const', dest="inst_state", const="running", help='list running instances') parser_list.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_list.set_defaults(func=cmd_list) # Parser for START command parser_start = subparsers.add_parser('start', usage="\tawss start [NAME]" " [-i ID] [-h]", description="Start an AWS instance" " from the command line.") parser_start.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_start.add_argument('-i', '--id', action="store", help='specify instance-id') parser_start.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_start.set_defaults(func=cmd_startstop) # Parser for STOP command parser_stop = subparsers.add_parser('stop', usage="\tawss stop [NAME]" " [-i ID] [-h]", description="Stop an AWS instance" " from the command line.") parser_stop.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_stop.add_argument('-i', '--id', action="store", help='specify instance-id') parser_stop.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_stop.set_defaults(func=cmd_startstop) # Parser for SSH command parser_ssh = subparsers.add_parser('ssh', usage="\tawss ssh [NAME]" " [-i ID] [-u USER] [-p] [-h]", description="Connect to an AWS i" "nstance via ssh.") parser_ssh.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_ssh.add_argument('-i', '--id', action="store", help='specify instance-id') parser_ssh.add_argument('-u', '--user', action="store", help='override default username for ssh') parser_ssh.add_argument('-p', '--nopem', action="store_true", default=False, help='connect without PEM key') parser_ssh.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_ssh.set_defaults(func=cmd_ssh) return parser def cmd_list(options): """Gather data for instances matching args and call display func. Args: options (object): contains args and data from parser. """ (i_info, param_str) = gather_data(options) if i_info: awsc.get_all_aminames(i_info) param_str = "Instance List - " + param_str + "\n" list_instances(i_info, param_str) else: print("No instances found with parameters: {}".format(param_str)) def cmd_startstop(options): """Start or Stop the specified instance. Finds instances that match args and instance-state expected by the command. Then, the target instance is determined, the action is performed on the instance, and the eturn information is displayed. Args: options (object): contains args and data from parser. """ statelu = {"start": "stopped", "stop": "running"} options.inst_state = statelu[options.command] debg.dprint("toggle set state: ", options.inst_state) (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) response = awsc.startstop(tar_inst, options.command) responselu = {"start": "StartingInstances", "stop": "StoppingInstances"} filt = responselu[options.command] resp = {} state_term = ('CurrentState', 'PreviousState') for i, j in enumerate(state_term): resp[i] = response["{0}".format(filt)][0]["{0}".format(j)]['Name'] print("Current State: {}{}{} - Previous State: {}{}{}\n". format(C_STAT[resp[0]], resp[0], C_NORM, C_STAT[resp[1]], resp[1], C_NORM)) def cmd_ssh(options): """Connect to the specified instance via ssh. Finds instances that match the user specified args that are also in the 'running' state. The target instance is determined, the required connection information is retreived (IP, key and ssh user-name), then an 'ssh' connection is made to the instance. Args: options (object): contains args and data from parser """ import os import subprocess from os.path import expanduser options.inst_state = "running" (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) home_dir = expanduser("~") if options.user is None: tar_aminame = awsc.get_one_aminame(i_info[tar_idx]['ami']) options.user = cmd_ssh_user(tar_aminame, i_info[tar_idx]['tag']['Name']) else: debg.dprint("LoginUser set by user: ", options.user) os_spec = {"nt": ["powershell plink", "\\", "ppk"]} c_itm = os_spec.get(os.name, ["ssh", "/", "pem"]) cmd_ssh_run = c_itm[0] if not options.nopem: cmd_ssh_run += (" -i {0}{1}.aws{1}{2}.{3}". format(home_dir, c_itm[1], i_info[tar_idx]['ssh_key'], c_itm[2])) else: debg.dprint("Connect string: ", "ssh {}@{}". format(options.user, i_info[tar_idx]['pub_dns_name'])) cmd_ssh_run += " {0}@{1}".format(options.user, i_info[tar_idx]['pub_dns_name']) print(cmd_ssh_run) subprocess.call(cmd_ssh_run, shell=True) def cmd_ssh_user(tar_aminame, inst_name): """Calculate instance login-username based on image-name. Args: tar_aminame (str): name of the image instance created with. inst_name (str): name of the instance. Returns: username (str): name for ssh based on AMI-name. """ if tar_aminame == "Unknown": tar_aminame = inst_name # first 5 chars of AMI-name can be anywhere in AMI-Name userlu = {"ubunt": "ubuntu", "debia": "admin", "fedor": "root", "cento": "centos", "openb": "root"} usertemp = ['name'] + [value for key, value in list(userlu.items()) if key in tar_aminame.lower()] usertemp = dict(zip(usertemp[::2], usertemp[1::2])) username = usertemp.get('name', 'ec2-user') debg.dprint("loginuser Calculated: ", username) return username def gather_data(options): """Get Data specific for command selected. Create ec2 specific query and output title based on options specified, retrieves the raw response data from aws, then processes it into the i_info dict, which is used throughout this module. Args: options (object): contains args and data from parser, that has been adjusted by the command specific functions as appropriate. Returns: i_info (dict): information on instances and details. param_str (str): the title to display before the list. """ (qry_string, param_str) = qry_create(options) qry_results = awsc.get_inst_info(qry_string) i_info = process_results(qry_results) return (i_info, param_str) def process_results(qry_results): """Generate dictionary of results from query. Decodes the large dict recturned from the AWS query. Args: qry_results (dict): results from awsc.get_inst_info Returns: i_info (dict): information on instances and details. """ i_info = {} for i, j in enumerate(qry_results['Reservations']): i_info[i] = {'id': j['Instances'][0]['InstanceId']} i_info[i]['state'] = j['Instances'][0]['State']['Name'] i_info[i]['ami'] = j['Instances'][0]['ImageId'] i_info[i]['ssh_key'] = j['Instances'][0]['KeyName'] i_info[i]['pub_dns_name'] = j['Instances'][0]['PublicDnsName'] try: i_info[i]['tag'] = process_tags(j['Instances'][0]['Tags']) except KeyError: i_info[i]['tag'] = {"Name": ""} debg.dprint("numInstances: ", len(i_info)) debg.dprintx("Details except AMI-name") debg.dprintx(i_info, True) return i_info def qry_create(options): """Create query from the args specified and command chosen. Creates a query string that incorporates the args in the options object, and creates the title for the 'list' function. Args: options (object): contains args and data from parser Returns: qry_string (str): the query to be used against the aws ec2 client. param_str (str): the title to display before the list. """ qry_string = filt_end = param_str = "" filt_st = "Filters=[" param_str_default = "All" if options.id: qry_string += "InstanceIds=['%s']" % (options.id) param_str += "id: '%s'" % (options.id) param_str_default = "" if options.instname: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str) filt_end = "]" param_str_default = "" qry_string += filt_st + ("{'Name': 'tag:Name', 'Values': ['%s']}" % (options.instname)) param_str += "name: '%s'" % (options.instname) if options.inst_state: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str, bool(options.instname), filt_st) qry_string += ("{'Name': 'instance-state-name'," "'Values': ['%s']}" % (options.inst_state)) param_str += "state: '%s'" % (options.inst_state) filt_end = "]" param_str_default = "" qry_string += filt_end param_str += param_str_default debg.dprintx("\nQuery String") debg.dprintx(qry_string, True) debg.dprint("param_str: ", param_str) return(qry_string, param_str) def qry_helper(flag_id, qry_string, param_str, flag_filt=False, filt_st=""): """Dynamically add syntaxtical elements to query. This functions adds syntactical elements to the query string, and report title, based on the types and number of items added thus far. Args: flag_filt (bool): at least one filter item specified. qry_string (str): portion of the query constructed thus far. param_str (str): the title to display before the list. flag_id (bool): optional - instance-id was specified. filt_st (str): optional - syntax to add on end if filter specified. Returns: qry_string (str): the portion of the query that was passed in with the appropriate syntactical elements added. param_str (str): the title to display before the list. """ if flag_id or flag_filt: qry_string += ", " param_str += ", " if not flag_filt: qry_string += filt_st return (qry_string, param_str) def list_instances(i_info, param_str, numbered=False): """Display a list of all instances and their details. Iterates through all the instances in the dict, and displays information for each instance. Args: i_info (dict): information on instances and details. param_str (str): the title to display before the list. numbered (bool): optional - indicates wheter the list should be displayed with numbers before each instance. This is used when called from user_picklist. """ print(param_str) for i in i_info: if numbered: print("Instance {}#{}{}".format(C_WARN, i + 1, C_NORM)) print(" {6}Name: {1}{3:<22}{1}ID: {0}{4:<20}{1:<18}Status: {2}{5}{1}". format(C_TI, C_NORM, C_STAT[i_info[i]['state']], i_info[i]['tag']['Name'], i_info[i]['id'], i_info[i]['state'], C_HEAD2)) print(" AMI: {0}{2:<23}{1}AMI Name: {0}{3:.41}{1}". format(C_TI, C_NORM, i_info[i]['ami'], i_info[i]['aminame'])) list_tags(i_info[i]['tag']) debg.dprintx("All Data") debg.dprintx(i_info, True) def list_tags(tags): """Print tags in dict so they allign with listing above.""" tags_sorted = sorted(list(tags.items()), key=operator.itemgetter(0)) tag_sec_spacer = "" c = 1 ignored_keys = ["Name", "aws:ec2spot:fleet-request-id"] pad_col = {1: 38, 2: 49} for k, v in tags_sorted: # if k != "Name": if k not in ignored_keys: if c < 3: padamt = pad_col[c] sys.stdout.write(" {2}{0}:{3} {1}". format(k, v, C_HEAD2, C_NORM).ljust(padamt)) c += 1 tag_sec_spacer = "\n" else: sys.stdout.write("{2}{0}:{3} {1}\n".format(k, v, C_HEAD2, C_NORM)) c = 1 tag_sec_spacer = "" print(tag_sec_spacer) def determine_inst(i_info, param_str, command): """Determine the instance-id of the target instance. Inspect the number of instance-ids collected and take the appropriate action: exit if no ids, return if single id, and call user_picklist function if multiple ids exist. Args: i_info (dict): information and details for instances. param_str (str): the title to display in the listing. command (str): command specified on the command line. Returns: tar_inst (str): the AWS instance-id of the target. Raises: SystemExit: if no instances are match parameters specified. """ qty_instances = len(i_info) if not qty_instances: print("No instances found with parameters: {}".format(param_str)) sys.exit(1) if qty_instances > 1: print("{} instances match these parameters:".format(qty_instances)) tar_idx = user_picklist(i_info, command) else: tar_idx = 0 tar_inst = i_info[tar_idx]['id'] print("{0}{3}ing{1} instance id {2}{4}{1}". format(C_STAT[command], C_NORM, C_TI, command, tar_inst)) return (tar_inst, tar_idx) def user_picklist(i_info, command): """Display list of instances matching args and ask user to select target. Instance list displayed and user asked to enter the number corresponding to the desired target instance, or '0' to abort. Args: i_info (dict): information on instances and details. command (str): command specified on the command line. Returns: tar_idx (int): the dictionary index number of the targeted instance. """ valid_entry = False awsc.get_all_aminames(i_info) list_instances(i_info, "", True) msg_txt = ("Enter {0}#{1} of instance to {3} ({0}1{1}-{0}{4}{1})" " [{2}0 aborts{1}]: ".format(C_WARN, C_NORM, C_TI, command, len(i_info))) while not valid_entry: entry_raw = obtain_input(msg_txt) try: entry_int = int(entry_raw) except ValueError: entry_int = 999 (tar_idx, valid_entry) = user_entry(entry_int, len(i_info), command) return tar_idx def obtain_input(message_text): # pragma: no cover """Perform input command as a function so it can be mocked.""" return (input(message_text)) def user_entry(entry_int, num_inst, command): """Validate user entry and returns index and validity flag. Processes the user entry and take the appropriate action: abort if '0' entered, set validity flag and index is valid entry, else return invalid index and the still unset validity flag. Args: entry_int (int): a number entered or 999 if a non-int was entered. num_inst (int): the largest valid number that can be entered. command (str): program command to display in prompt. Returns: entry_idx(int): the dictionary index number of the targeted instance valid_entry (bool): specifies if entry_idx is valid. Raises: SystemExit: if the user enters 0 when they are choosing from the list it triggers the "abort" option offered to the user. """ valid_entry = False if not entry_int: print("{}aborting{} - {} instance\n". format(C_ERR, C_NORM, command)) sys.exit() elif entry_int >= 1 and entry_int <= num_inst: entry_idx = entry_int - 1 valid_entry = True else: print("{}Invalid entry:{} enter a number between 1" " and {}.".format(C_ERR, C_NORM, num_inst)) entry_idx = entry_int return (entry_idx, valid_entry) if __name__ == '__main__': main()
robertpeteuil/aws-shortcuts
awss/core.py
qry_create
python
def qry_create(options): qry_string = filt_end = param_str = "" filt_st = "Filters=[" param_str_default = "All" if options.id: qry_string += "InstanceIds=['%s']" % (options.id) param_str += "id: '%s'" % (options.id) param_str_default = "" if options.instname: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str) filt_end = "]" param_str_default = "" qry_string += filt_st + ("{'Name': 'tag:Name', 'Values': ['%s']}" % (options.instname)) param_str += "name: '%s'" % (options.instname) if options.inst_state: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str, bool(options.instname), filt_st) qry_string += ("{'Name': 'instance-state-name'," "'Values': ['%s']}" % (options.inst_state)) param_str += "state: '%s'" % (options.inst_state) filt_end = "]" param_str_default = "" qry_string += filt_end param_str += param_str_default debg.dprintx("\nQuery String") debg.dprintx(qry_string, True) debg.dprint("param_str: ", param_str) return(qry_string, param_str)
Create query from the args specified and command chosen. Creates a query string that incorporates the args in the options object, and creates the title for the 'list' function. Args: options (object): contains args and data from parser Returns: qry_string (str): the query to be used against the aws ec2 client. param_str (str): the title to display before the list.
train
https://github.com/robertpeteuil/aws-shortcuts/blob/cf453ca996978a4d88015d1cf6125bce8ca4873b/awss/core.py#L327-L373
[ "def dprint(item1, item2=\"\"):\n \"\"\"Print Text if DEBUG set.\n\n Args:\n item1 (str): item to print\n item2 (str): optional 2nd item to print\n\n \"\"\"\n if DEBUG:\n print(item1, \"%s%s%s\" % (C_TI, item2, C_NORM))\n", "def dprintx(passeditem, special=False):\n \"\"\"Print...
"""Control and connect to AWS EC2 instances from command line. The AWS Shortcuts (awss) library is a CLI utility allowing listing, starting, stopping and connecting to AWS EC2 instances by Name or ID. License: AWSS - Control and connect to AWS EC2 instances from command line Copyright (C) 2017-2018 Robert Peteuil This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. URL: https://github.com/robertpeteuil/aws-shortcuts Author: Robert Peteuil """ from __future__ import print_function from builtins import input from builtins import range import argparse import sys import operator import awss.awsc as awsc import awss.debg as debg from awss.colors import C_NORM, C_HEAD2, C_TI, C_WARN, C_ERR, C_STAT __version__ = '0.9.13' def main(): """Collect user args and call command funct. Collect command line args and setup environment then call function for command specified in args. """ parser = parser_setup() options = parser.parse_args() debug = bool(options.debug > 0) debugall = bool(options.debug > 1) awsc.init() debg.init(debug, debugall) print(C_NORM) options.func(options) sys.exit() def parser_setup(): """Create ArgumentParser object to parse command line arguments. Returns: parser (object): containing ArgumentParser data and methods. Raises: SystemExit: if the user enters invalid args. """ parser = argparse.ArgumentParser(description="Control AWS instances from" " the command line with: list, start," " stop or ssh.", prog='awss', usage="\tawss {command} [ 'NAME' ] " "[ '-i ID' ] [ OPTIONS ]\n\t{command} =" " list | start | stop | ssh") parser.add_argument('-v', '--version', action="version", version="awss {0}".format(__version__)) subparsers = parser.add_subparsers(title="For additional help on" " command parameters", dest='command', description="type 'awss {command} -h'," " where {command} is: list, start," " stop or ssh") # Parser for LIST command parser_list = subparsers.add_parser('list', description="List AWS " "instances from the command line. " "'awss list' will list instances" " specified using combinations of " "NAME, instance-id and current-state." " If no specifications are given, " " all instances will be listed." " ex: 'awss list TEST " "-r' will list instances named 'TEST'" " that are currently running.", usage="\tawss list [none] [NAME] [-i " "ID] [-r] [-s] [OPTIONS]") parser_list.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_list.add_argument('-i', '--id', action="store", help='specify instance by id') parser_list.add_argument('-s', '--stopped', action='store_const', dest="inst_state", const="stopped", help='list stopped instances') parser_list.add_argument('-r', '--running', action='store_const', dest="inst_state", const="running", help='list running instances') parser_list.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_list.set_defaults(func=cmd_list) # Parser for START command parser_start = subparsers.add_parser('start', usage="\tawss start [NAME]" " [-i ID] [-h]", description="Start an AWS instance" " from the command line.") parser_start.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_start.add_argument('-i', '--id', action="store", help='specify instance-id') parser_start.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_start.set_defaults(func=cmd_startstop) # Parser for STOP command parser_stop = subparsers.add_parser('stop', usage="\tawss stop [NAME]" " [-i ID] [-h]", description="Stop an AWS instance" " from the command line.") parser_stop.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_stop.add_argument('-i', '--id', action="store", help='specify instance-id') parser_stop.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_stop.set_defaults(func=cmd_startstop) # Parser for SSH command parser_ssh = subparsers.add_parser('ssh', usage="\tawss ssh [NAME]" " [-i ID] [-u USER] [-p] [-h]", description="Connect to an AWS i" "nstance via ssh.") parser_ssh.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_ssh.add_argument('-i', '--id', action="store", help='specify instance-id') parser_ssh.add_argument('-u', '--user', action="store", help='override default username for ssh') parser_ssh.add_argument('-p', '--nopem', action="store_true", default=False, help='connect without PEM key') parser_ssh.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_ssh.set_defaults(func=cmd_ssh) return parser def cmd_list(options): """Gather data for instances matching args and call display func. Args: options (object): contains args and data from parser. """ (i_info, param_str) = gather_data(options) if i_info: awsc.get_all_aminames(i_info) param_str = "Instance List - " + param_str + "\n" list_instances(i_info, param_str) else: print("No instances found with parameters: {}".format(param_str)) def cmd_startstop(options): """Start or Stop the specified instance. Finds instances that match args and instance-state expected by the command. Then, the target instance is determined, the action is performed on the instance, and the eturn information is displayed. Args: options (object): contains args and data from parser. """ statelu = {"start": "stopped", "stop": "running"} options.inst_state = statelu[options.command] debg.dprint("toggle set state: ", options.inst_state) (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) response = awsc.startstop(tar_inst, options.command) responselu = {"start": "StartingInstances", "stop": "StoppingInstances"} filt = responselu[options.command] resp = {} state_term = ('CurrentState', 'PreviousState') for i, j in enumerate(state_term): resp[i] = response["{0}".format(filt)][0]["{0}".format(j)]['Name'] print("Current State: {}{}{} - Previous State: {}{}{}\n". format(C_STAT[resp[0]], resp[0], C_NORM, C_STAT[resp[1]], resp[1], C_NORM)) def cmd_ssh(options): """Connect to the specified instance via ssh. Finds instances that match the user specified args that are also in the 'running' state. The target instance is determined, the required connection information is retreived (IP, key and ssh user-name), then an 'ssh' connection is made to the instance. Args: options (object): contains args and data from parser """ import os import subprocess from os.path import expanduser options.inst_state = "running" (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) home_dir = expanduser("~") if options.user is None: tar_aminame = awsc.get_one_aminame(i_info[tar_idx]['ami']) options.user = cmd_ssh_user(tar_aminame, i_info[tar_idx]['tag']['Name']) else: debg.dprint("LoginUser set by user: ", options.user) os_spec = {"nt": ["powershell plink", "\\", "ppk"]} c_itm = os_spec.get(os.name, ["ssh", "/", "pem"]) cmd_ssh_run = c_itm[0] if not options.nopem: cmd_ssh_run += (" -i {0}{1}.aws{1}{2}.{3}". format(home_dir, c_itm[1], i_info[tar_idx]['ssh_key'], c_itm[2])) else: debg.dprint("Connect string: ", "ssh {}@{}". format(options.user, i_info[tar_idx]['pub_dns_name'])) cmd_ssh_run += " {0}@{1}".format(options.user, i_info[tar_idx]['pub_dns_name']) print(cmd_ssh_run) subprocess.call(cmd_ssh_run, shell=True) def cmd_ssh_user(tar_aminame, inst_name): """Calculate instance login-username based on image-name. Args: tar_aminame (str): name of the image instance created with. inst_name (str): name of the instance. Returns: username (str): name for ssh based on AMI-name. """ if tar_aminame == "Unknown": tar_aminame = inst_name # first 5 chars of AMI-name can be anywhere in AMI-Name userlu = {"ubunt": "ubuntu", "debia": "admin", "fedor": "root", "cento": "centos", "openb": "root"} usertemp = ['name'] + [value for key, value in list(userlu.items()) if key in tar_aminame.lower()] usertemp = dict(zip(usertemp[::2], usertemp[1::2])) username = usertemp.get('name', 'ec2-user') debg.dprint("loginuser Calculated: ", username) return username def gather_data(options): """Get Data specific for command selected. Create ec2 specific query and output title based on options specified, retrieves the raw response data from aws, then processes it into the i_info dict, which is used throughout this module. Args: options (object): contains args and data from parser, that has been adjusted by the command specific functions as appropriate. Returns: i_info (dict): information on instances and details. param_str (str): the title to display before the list. """ (qry_string, param_str) = qry_create(options) qry_results = awsc.get_inst_info(qry_string) i_info = process_results(qry_results) return (i_info, param_str) def process_results(qry_results): """Generate dictionary of results from query. Decodes the large dict recturned from the AWS query. Args: qry_results (dict): results from awsc.get_inst_info Returns: i_info (dict): information on instances and details. """ i_info = {} for i, j in enumerate(qry_results['Reservations']): i_info[i] = {'id': j['Instances'][0]['InstanceId']} i_info[i]['state'] = j['Instances'][0]['State']['Name'] i_info[i]['ami'] = j['Instances'][0]['ImageId'] i_info[i]['ssh_key'] = j['Instances'][0]['KeyName'] i_info[i]['pub_dns_name'] = j['Instances'][0]['PublicDnsName'] try: i_info[i]['tag'] = process_tags(j['Instances'][0]['Tags']) except KeyError: i_info[i]['tag'] = {"Name": ""} debg.dprint("numInstances: ", len(i_info)) debg.dprintx("Details except AMI-name") debg.dprintx(i_info, True) return i_info def process_tags(inst_tags): """Create dict of instance tags as only name:value pairs.""" tag_dict = {} for k in range(len(inst_tags)): tag_dict[inst_tags[k]['Key']] = inst_tags[k]['Value'] return tag_dict def qry_helper(flag_id, qry_string, param_str, flag_filt=False, filt_st=""): """Dynamically add syntaxtical elements to query. This functions adds syntactical elements to the query string, and report title, based on the types and number of items added thus far. Args: flag_filt (bool): at least one filter item specified. qry_string (str): portion of the query constructed thus far. param_str (str): the title to display before the list. flag_id (bool): optional - instance-id was specified. filt_st (str): optional - syntax to add on end if filter specified. Returns: qry_string (str): the portion of the query that was passed in with the appropriate syntactical elements added. param_str (str): the title to display before the list. """ if flag_id or flag_filt: qry_string += ", " param_str += ", " if not flag_filt: qry_string += filt_st return (qry_string, param_str) def list_instances(i_info, param_str, numbered=False): """Display a list of all instances and their details. Iterates through all the instances in the dict, and displays information for each instance. Args: i_info (dict): information on instances and details. param_str (str): the title to display before the list. numbered (bool): optional - indicates wheter the list should be displayed with numbers before each instance. This is used when called from user_picklist. """ print(param_str) for i in i_info: if numbered: print("Instance {}#{}{}".format(C_WARN, i + 1, C_NORM)) print(" {6}Name: {1}{3:<22}{1}ID: {0}{4:<20}{1:<18}Status: {2}{5}{1}". format(C_TI, C_NORM, C_STAT[i_info[i]['state']], i_info[i]['tag']['Name'], i_info[i]['id'], i_info[i]['state'], C_HEAD2)) print(" AMI: {0}{2:<23}{1}AMI Name: {0}{3:.41}{1}". format(C_TI, C_NORM, i_info[i]['ami'], i_info[i]['aminame'])) list_tags(i_info[i]['tag']) debg.dprintx("All Data") debg.dprintx(i_info, True) def list_tags(tags): """Print tags in dict so they allign with listing above.""" tags_sorted = sorted(list(tags.items()), key=operator.itemgetter(0)) tag_sec_spacer = "" c = 1 ignored_keys = ["Name", "aws:ec2spot:fleet-request-id"] pad_col = {1: 38, 2: 49} for k, v in tags_sorted: # if k != "Name": if k not in ignored_keys: if c < 3: padamt = pad_col[c] sys.stdout.write(" {2}{0}:{3} {1}". format(k, v, C_HEAD2, C_NORM).ljust(padamt)) c += 1 tag_sec_spacer = "\n" else: sys.stdout.write("{2}{0}:{3} {1}\n".format(k, v, C_HEAD2, C_NORM)) c = 1 tag_sec_spacer = "" print(tag_sec_spacer) def determine_inst(i_info, param_str, command): """Determine the instance-id of the target instance. Inspect the number of instance-ids collected and take the appropriate action: exit if no ids, return if single id, and call user_picklist function if multiple ids exist. Args: i_info (dict): information and details for instances. param_str (str): the title to display in the listing. command (str): command specified on the command line. Returns: tar_inst (str): the AWS instance-id of the target. Raises: SystemExit: if no instances are match parameters specified. """ qty_instances = len(i_info) if not qty_instances: print("No instances found with parameters: {}".format(param_str)) sys.exit(1) if qty_instances > 1: print("{} instances match these parameters:".format(qty_instances)) tar_idx = user_picklist(i_info, command) else: tar_idx = 0 tar_inst = i_info[tar_idx]['id'] print("{0}{3}ing{1} instance id {2}{4}{1}". format(C_STAT[command], C_NORM, C_TI, command, tar_inst)) return (tar_inst, tar_idx) def user_picklist(i_info, command): """Display list of instances matching args and ask user to select target. Instance list displayed and user asked to enter the number corresponding to the desired target instance, or '0' to abort. Args: i_info (dict): information on instances and details. command (str): command specified on the command line. Returns: tar_idx (int): the dictionary index number of the targeted instance. """ valid_entry = False awsc.get_all_aminames(i_info) list_instances(i_info, "", True) msg_txt = ("Enter {0}#{1} of instance to {3} ({0}1{1}-{0}{4}{1})" " [{2}0 aborts{1}]: ".format(C_WARN, C_NORM, C_TI, command, len(i_info))) while not valid_entry: entry_raw = obtain_input(msg_txt) try: entry_int = int(entry_raw) except ValueError: entry_int = 999 (tar_idx, valid_entry) = user_entry(entry_int, len(i_info), command) return tar_idx def obtain_input(message_text): # pragma: no cover """Perform input command as a function so it can be mocked.""" return (input(message_text)) def user_entry(entry_int, num_inst, command): """Validate user entry and returns index and validity flag. Processes the user entry and take the appropriate action: abort if '0' entered, set validity flag and index is valid entry, else return invalid index and the still unset validity flag. Args: entry_int (int): a number entered or 999 if a non-int was entered. num_inst (int): the largest valid number that can be entered. command (str): program command to display in prompt. Returns: entry_idx(int): the dictionary index number of the targeted instance valid_entry (bool): specifies if entry_idx is valid. Raises: SystemExit: if the user enters 0 when they are choosing from the list it triggers the "abort" option offered to the user. """ valid_entry = False if not entry_int: print("{}aborting{} - {} instance\n". format(C_ERR, C_NORM, command)) sys.exit() elif entry_int >= 1 and entry_int <= num_inst: entry_idx = entry_int - 1 valid_entry = True else: print("{}Invalid entry:{} enter a number between 1" " and {}.".format(C_ERR, C_NORM, num_inst)) entry_idx = entry_int return (entry_idx, valid_entry) if __name__ == '__main__': main()
robertpeteuil/aws-shortcuts
awss/core.py
qry_helper
python
def qry_helper(flag_id, qry_string, param_str, flag_filt=False, filt_st=""): if flag_id or flag_filt: qry_string += ", " param_str += ", " if not flag_filt: qry_string += filt_st return (qry_string, param_str)
Dynamically add syntaxtical elements to query. This functions adds syntactical elements to the query string, and report title, based on the types and number of items added thus far. Args: flag_filt (bool): at least one filter item specified. qry_string (str): portion of the query constructed thus far. param_str (str): the title to display before the list. flag_id (bool): optional - instance-id was specified. filt_st (str): optional - syntax to add on end if filter specified. Returns: qry_string (str): the portion of the query that was passed in with the appropriate syntactical elements added. param_str (str): the title to display before the list.
train
https://github.com/robertpeteuil/aws-shortcuts/blob/cf453ca996978a4d88015d1cf6125bce8ca4873b/awss/core.py#L376-L400
null
"""Control and connect to AWS EC2 instances from command line. The AWS Shortcuts (awss) library is a CLI utility allowing listing, starting, stopping and connecting to AWS EC2 instances by Name or ID. License: AWSS - Control and connect to AWS EC2 instances from command line Copyright (C) 2017-2018 Robert Peteuil This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. URL: https://github.com/robertpeteuil/aws-shortcuts Author: Robert Peteuil """ from __future__ import print_function from builtins import input from builtins import range import argparse import sys import operator import awss.awsc as awsc import awss.debg as debg from awss.colors import C_NORM, C_HEAD2, C_TI, C_WARN, C_ERR, C_STAT __version__ = '0.9.13' def main(): """Collect user args and call command funct. Collect command line args and setup environment then call function for command specified in args. """ parser = parser_setup() options = parser.parse_args() debug = bool(options.debug > 0) debugall = bool(options.debug > 1) awsc.init() debg.init(debug, debugall) print(C_NORM) options.func(options) sys.exit() def parser_setup(): """Create ArgumentParser object to parse command line arguments. Returns: parser (object): containing ArgumentParser data and methods. Raises: SystemExit: if the user enters invalid args. """ parser = argparse.ArgumentParser(description="Control AWS instances from" " the command line with: list, start," " stop or ssh.", prog='awss', usage="\tawss {command} [ 'NAME' ] " "[ '-i ID' ] [ OPTIONS ]\n\t{command} =" " list | start | stop | ssh") parser.add_argument('-v', '--version', action="version", version="awss {0}".format(__version__)) subparsers = parser.add_subparsers(title="For additional help on" " command parameters", dest='command', description="type 'awss {command} -h'," " where {command} is: list, start," " stop or ssh") # Parser for LIST command parser_list = subparsers.add_parser('list', description="List AWS " "instances from the command line. " "'awss list' will list instances" " specified using combinations of " "NAME, instance-id and current-state." " If no specifications are given, " " all instances will be listed." " ex: 'awss list TEST " "-r' will list instances named 'TEST'" " that are currently running.", usage="\tawss list [none] [NAME] [-i " "ID] [-r] [-s] [OPTIONS]") parser_list.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_list.add_argument('-i', '--id', action="store", help='specify instance by id') parser_list.add_argument('-s', '--stopped', action='store_const', dest="inst_state", const="stopped", help='list stopped instances') parser_list.add_argument('-r', '--running', action='store_const', dest="inst_state", const="running", help='list running instances') parser_list.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_list.set_defaults(func=cmd_list) # Parser for START command parser_start = subparsers.add_parser('start', usage="\tawss start [NAME]" " [-i ID] [-h]", description="Start an AWS instance" " from the command line.") parser_start.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_start.add_argument('-i', '--id', action="store", help='specify instance-id') parser_start.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_start.set_defaults(func=cmd_startstop) # Parser for STOP command parser_stop = subparsers.add_parser('stop', usage="\tawss stop [NAME]" " [-i ID] [-h]", description="Stop an AWS instance" " from the command line.") parser_stop.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_stop.add_argument('-i', '--id', action="store", help='specify instance-id') parser_stop.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_stop.set_defaults(func=cmd_startstop) # Parser for SSH command parser_ssh = subparsers.add_parser('ssh', usage="\tawss ssh [NAME]" " [-i ID] [-u USER] [-p] [-h]", description="Connect to an AWS i" "nstance via ssh.") parser_ssh.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_ssh.add_argument('-i', '--id', action="store", help='specify instance-id') parser_ssh.add_argument('-u', '--user', action="store", help='override default username for ssh') parser_ssh.add_argument('-p', '--nopem', action="store_true", default=False, help='connect without PEM key') parser_ssh.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_ssh.set_defaults(func=cmd_ssh) return parser def cmd_list(options): """Gather data for instances matching args and call display func. Args: options (object): contains args and data from parser. """ (i_info, param_str) = gather_data(options) if i_info: awsc.get_all_aminames(i_info) param_str = "Instance List - " + param_str + "\n" list_instances(i_info, param_str) else: print("No instances found with parameters: {}".format(param_str)) def cmd_startstop(options): """Start or Stop the specified instance. Finds instances that match args and instance-state expected by the command. Then, the target instance is determined, the action is performed on the instance, and the eturn information is displayed. Args: options (object): contains args and data from parser. """ statelu = {"start": "stopped", "stop": "running"} options.inst_state = statelu[options.command] debg.dprint("toggle set state: ", options.inst_state) (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) response = awsc.startstop(tar_inst, options.command) responselu = {"start": "StartingInstances", "stop": "StoppingInstances"} filt = responselu[options.command] resp = {} state_term = ('CurrentState', 'PreviousState') for i, j in enumerate(state_term): resp[i] = response["{0}".format(filt)][0]["{0}".format(j)]['Name'] print("Current State: {}{}{} - Previous State: {}{}{}\n". format(C_STAT[resp[0]], resp[0], C_NORM, C_STAT[resp[1]], resp[1], C_NORM)) def cmd_ssh(options): """Connect to the specified instance via ssh. Finds instances that match the user specified args that are also in the 'running' state. The target instance is determined, the required connection information is retreived (IP, key and ssh user-name), then an 'ssh' connection is made to the instance. Args: options (object): contains args and data from parser """ import os import subprocess from os.path import expanduser options.inst_state = "running" (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) home_dir = expanduser("~") if options.user is None: tar_aminame = awsc.get_one_aminame(i_info[tar_idx]['ami']) options.user = cmd_ssh_user(tar_aminame, i_info[tar_idx]['tag']['Name']) else: debg.dprint("LoginUser set by user: ", options.user) os_spec = {"nt": ["powershell plink", "\\", "ppk"]} c_itm = os_spec.get(os.name, ["ssh", "/", "pem"]) cmd_ssh_run = c_itm[0] if not options.nopem: cmd_ssh_run += (" -i {0}{1}.aws{1}{2}.{3}". format(home_dir, c_itm[1], i_info[tar_idx]['ssh_key'], c_itm[2])) else: debg.dprint("Connect string: ", "ssh {}@{}". format(options.user, i_info[tar_idx]['pub_dns_name'])) cmd_ssh_run += " {0}@{1}".format(options.user, i_info[tar_idx]['pub_dns_name']) print(cmd_ssh_run) subprocess.call(cmd_ssh_run, shell=True) def cmd_ssh_user(tar_aminame, inst_name): """Calculate instance login-username based on image-name. Args: tar_aminame (str): name of the image instance created with. inst_name (str): name of the instance. Returns: username (str): name for ssh based on AMI-name. """ if tar_aminame == "Unknown": tar_aminame = inst_name # first 5 chars of AMI-name can be anywhere in AMI-Name userlu = {"ubunt": "ubuntu", "debia": "admin", "fedor": "root", "cento": "centos", "openb": "root"} usertemp = ['name'] + [value for key, value in list(userlu.items()) if key in tar_aminame.lower()] usertemp = dict(zip(usertemp[::2], usertemp[1::2])) username = usertemp.get('name', 'ec2-user') debg.dprint("loginuser Calculated: ", username) return username def gather_data(options): """Get Data specific for command selected. Create ec2 specific query and output title based on options specified, retrieves the raw response data from aws, then processes it into the i_info dict, which is used throughout this module. Args: options (object): contains args and data from parser, that has been adjusted by the command specific functions as appropriate. Returns: i_info (dict): information on instances and details. param_str (str): the title to display before the list. """ (qry_string, param_str) = qry_create(options) qry_results = awsc.get_inst_info(qry_string) i_info = process_results(qry_results) return (i_info, param_str) def process_results(qry_results): """Generate dictionary of results from query. Decodes the large dict recturned from the AWS query. Args: qry_results (dict): results from awsc.get_inst_info Returns: i_info (dict): information on instances and details. """ i_info = {} for i, j in enumerate(qry_results['Reservations']): i_info[i] = {'id': j['Instances'][0]['InstanceId']} i_info[i]['state'] = j['Instances'][0]['State']['Name'] i_info[i]['ami'] = j['Instances'][0]['ImageId'] i_info[i]['ssh_key'] = j['Instances'][0]['KeyName'] i_info[i]['pub_dns_name'] = j['Instances'][0]['PublicDnsName'] try: i_info[i]['tag'] = process_tags(j['Instances'][0]['Tags']) except KeyError: i_info[i]['tag'] = {"Name": ""} debg.dprint("numInstances: ", len(i_info)) debg.dprintx("Details except AMI-name") debg.dprintx(i_info, True) return i_info def process_tags(inst_tags): """Create dict of instance tags as only name:value pairs.""" tag_dict = {} for k in range(len(inst_tags)): tag_dict[inst_tags[k]['Key']] = inst_tags[k]['Value'] return tag_dict def qry_create(options): """Create query from the args specified and command chosen. Creates a query string that incorporates the args in the options object, and creates the title for the 'list' function. Args: options (object): contains args and data from parser Returns: qry_string (str): the query to be used against the aws ec2 client. param_str (str): the title to display before the list. """ qry_string = filt_end = param_str = "" filt_st = "Filters=[" param_str_default = "All" if options.id: qry_string += "InstanceIds=['%s']" % (options.id) param_str += "id: '%s'" % (options.id) param_str_default = "" if options.instname: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str) filt_end = "]" param_str_default = "" qry_string += filt_st + ("{'Name': 'tag:Name', 'Values': ['%s']}" % (options.instname)) param_str += "name: '%s'" % (options.instname) if options.inst_state: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str, bool(options.instname), filt_st) qry_string += ("{'Name': 'instance-state-name'," "'Values': ['%s']}" % (options.inst_state)) param_str += "state: '%s'" % (options.inst_state) filt_end = "]" param_str_default = "" qry_string += filt_end param_str += param_str_default debg.dprintx("\nQuery String") debg.dprintx(qry_string, True) debg.dprint("param_str: ", param_str) return(qry_string, param_str) def list_instances(i_info, param_str, numbered=False): """Display a list of all instances and their details. Iterates through all the instances in the dict, and displays information for each instance. Args: i_info (dict): information on instances and details. param_str (str): the title to display before the list. numbered (bool): optional - indicates wheter the list should be displayed with numbers before each instance. This is used when called from user_picklist. """ print(param_str) for i in i_info: if numbered: print("Instance {}#{}{}".format(C_WARN, i + 1, C_NORM)) print(" {6}Name: {1}{3:<22}{1}ID: {0}{4:<20}{1:<18}Status: {2}{5}{1}". format(C_TI, C_NORM, C_STAT[i_info[i]['state']], i_info[i]['tag']['Name'], i_info[i]['id'], i_info[i]['state'], C_HEAD2)) print(" AMI: {0}{2:<23}{1}AMI Name: {0}{3:.41}{1}". format(C_TI, C_NORM, i_info[i]['ami'], i_info[i]['aminame'])) list_tags(i_info[i]['tag']) debg.dprintx("All Data") debg.dprintx(i_info, True) def list_tags(tags): """Print tags in dict so they allign with listing above.""" tags_sorted = sorted(list(tags.items()), key=operator.itemgetter(0)) tag_sec_spacer = "" c = 1 ignored_keys = ["Name", "aws:ec2spot:fleet-request-id"] pad_col = {1: 38, 2: 49} for k, v in tags_sorted: # if k != "Name": if k not in ignored_keys: if c < 3: padamt = pad_col[c] sys.stdout.write(" {2}{0}:{3} {1}". format(k, v, C_HEAD2, C_NORM).ljust(padamt)) c += 1 tag_sec_spacer = "\n" else: sys.stdout.write("{2}{0}:{3} {1}\n".format(k, v, C_HEAD2, C_NORM)) c = 1 tag_sec_spacer = "" print(tag_sec_spacer) def determine_inst(i_info, param_str, command): """Determine the instance-id of the target instance. Inspect the number of instance-ids collected and take the appropriate action: exit if no ids, return if single id, and call user_picklist function if multiple ids exist. Args: i_info (dict): information and details for instances. param_str (str): the title to display in the listing. command (str): command specified on the command line. Returns: tar_inst (str): the AWS instance-id of the target. Raises: SystemExit: if no instances are match parameters specified. """ qty_instances = len(i_info) if not qty_instances: print("No instances found with parameters: {}".format(param_str)) sys.exit(1) if qty_instances > 1: print("{} instances match these parameters:".format(qty_instances)) tar_idx = user_picklist(i_info, command) else: tar_idx = 0 tar_inst = i_info[tar_idx]['id'] print("{0}{3}ing{1} instance id {2}{4}{1}". format(C_STAT[command], C_NORM, C_TI, command, tar_inst)) return (tar_inst, tar_idx) def user_picklist(i_info, command): """Display list of instances matching args and ask user to select target. Instance list displayed and user asked to enter the number corresponding to the desired target instance, or '0' to abort. Args: i_info (dict): information on instances and details. command (str): command specified on the command line. Returns: tar_idx (int): the dictionary index number of the targeted instance. """ valid_entry = False awsc.get_all_aminames(i_info) list_instances(i_info, "", True) msg_txt = ("Enter {0}#{1} of instance to {3} ({0}1{1}-{0}{4}{1})" " [{2}0 aborts{1}]: ".format(C_WARN, C_NORM, C_TI, command, len(i_info))) while not valid_entry: entry_raw = obtain_input(msg_txt) try: entry_int = int(entry_raw) except ValueError: entry_int = 999 (tar_idx, valid_entry) = user_entry(entry_int, len(i_info), command) return tar_idx def obtain_input(message_text): # pragma: no cover """Perform input command as a function so it can be mocked.""" return (input(message_text)) def user_entry(entry_int, num_inst, command): """Validate user entry and returns index and validity flag. Processes the user entry and take the appropriate action: abort if '0' entered, set validity flag and index is valid entry, else return invalid index and the still unset validity flag. Args: entry_int (int): a number entered or 999 if a non-int was entered. num_inst (int): the largest valid number that can be entered. command (str): program command to display in prompt. Returns: entry_idx(int): the dictionary index number of the targeted instance valid_entry (bool): specifies if entry_idx is valid. Raises: SystemExit: if the user enters 0 when they are choosing from the list it triggers the "abort" option offered to the user. """ valid_entry = False if not entry_int: print("{}aborting{} - {} instance\n". format(C_ERR, C_NORM, command)) sys.exit() elif entry_int >= 1 and entry_int <= num_inst: entry_idx = entry_int - 1 valid_entry = True else: print("{}Invalid entry:{} enter a number between 1" " and {}.".format(C_ERR, C_NORM, num_inst)) entry_idx = entry_int return (entry_idx, valid_entry) if __name__ == '__main__': main()
robertpeteuil/aws-shortcuts
awss/core.py
list_instances
python
def list_instances(i_info, param_str, numbered=False): print(param_str) for i in i_info: if numbered: print("Instance {}#{}{}".format(C_WARN, i + 1, C_NORM)) print(" {6}Name: {1}{3:<22}{1}ID: {0}{4:<20}{1:<18}Status: {2}{5}{1}". format(C_TI, C_NORM, C_STAT[i_info[i]['state']], i_info[i]['tag']['Name'], i_info[i]['id'], i_info[i]['state'], C_HEAD2)) print(" AMI: {0}{2:<23}{1}AMI Name: {0}{3:.41}{1}". format(C_TI, C_NORM, i_info[i]['ami'], i_info[i]['aminame'])) list_tags(i_info[i]['tag']) debg.dprintx("All Data") debg.dprintx(i_info, True)
Display a list of all instances and their details. Iterates through all the instances in the dict, and displays information for each instance. Args: i_info (dict): information on instances and details. param_str (str): the title to display before the list. numbered (bool): optional - indicates wheter the list should be displayed with numbers before each instance. This is used when called from user_picklist.
train
https://github.com/robertpeteuil/aws-shortcuts/blob/cf453ca996978a4d88015d1cf6125bce8ca4873b/awss/core.py#L403-L431
[ "def dprintx(passeditem, special=False):\n \"\"\"Print Text if DEBUGALL set, optionally with PrettyPrint.\n\n Args:\n passeditem (str): item to print\n special (bool): determines if item prints with PrettyPrint\n or regular print.\n\n \"\"\"\n if DEBUGALL:\n i...
"""Control and connect to AWS EC2 instances from command line. The AWS Shortcuts (awss) library is a CLI utility allowing listing, starting, stopping and connecting to AWS EC2 instances by Name or ID. License: AWSS - Control and connect to AWS EC2 instances from command line Copyright (C) 2017-2018 Robert Peteuil This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. URL: https://github.com/robertpeteuil/aws-shortcuts Author: Robert Peteuil """ from __future__ import print_function from builtins import input from builtins import range import argparse import sys import operator import awss.awsc as awsc import awss.debg as debg from awss.colors import C_NORM, C_HEAD2, C_TI, C_WARN, C_ERR, C_STAT __version__ = '0.9.13' def main(): """Collect user args and call command funct. Collect command line args and setup environment then call function for command specified in args. """ parser = parser_setup() options = parser.parse_args() debug = bool(options.debug > 0) debugall = bool(options.debug > 1) awsc.init() debg.init(debug, debugall) print(C_NORM) options.func(options) sys.exit() def parser_setup(): """Create ArgumentParser object to parse command line arguments. Returns: parser (object): containing ArgumentParser data and methods. Raises: SystemExit: if the user enters invalid args. """ parser = argparse.ArgumentParser(description="Control AWS instances from" " the command line with: list, start," " stop or ssh.", prog='awss', usage="\tawss {command} [ 'NAME' ] " "[ '-i ID' ] [ OPTIONS ]\n\t{command} =" " list | start | stop | ssh") parser.add_argument('-v', '--version', action="version", version="awss {0}".format(__version__)) subparsers = parser.add_subparsers(title="For additional help on" " command parameters", dest='command', description="type 'awss {command} -h'," " where {command} is: list, start," " stop or ssh") # Parser for LIST command parser_list = subparsers.add_parser('list', description="List AWS " "instances from the command line. " "'awss list' will list instances" " specified using combinations of " "NAME, instance-id and current-state." " If no specifications are given, " " all instances will be listed." " ex: 'awss list TEST " "-r' will list instances named 'TEST'" " that are currently running.", usage="\tawss list [none] [NAME] [-i " "ID] [-r] [-s] [OPTIONS]") parser_list.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_list.add_argument('-i', '--id', action="store", help='specify instance by id') parser_list.add_argument('-s', '--stopped', action='store_const', dest="inst_state", const="stopped", help='list stopped instances') parser_list.add_argument('-r', '--running', action='store_const', dest="inst_state", const="running", help='list running instances') parser_list.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_list.set_defaults(func=cmd_list) # Parser for START command parser_start = subparsers.add_parser('start', usage="\tawss start [NAME]" " [-i ID] [-h]", description="Start an AWS instance" " from the command line.") parser_start.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_start.add_argument('-i', '--id', action="store", help='specify instance-id') parser_start.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_start.set_defaults(func=cmd_startstop) # Parser for STOP command parser_stop = subparsers.add_parser('stop', usage="\tawss stop [NAME]" " [-i ID] [-h]", description="Stop an AWS instance" " from the command line.") parser_stop.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_stop.add_argument('-i', '--id', action="store", help='specify instance-id') parser_stop.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_stop.set_defaults(func=cmd_startstop) # Parser for SSH command parser_ssh = subparsers.add_parser('ssh', usage="\tawss ssh [NAME]" " [-i ID] [-u USER] [-p] [-h]", description="Connect to an AWS i" "nstance via ssh.") parser_ssh.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_ssh.add_argument('-i', '--id', action="store", help='specify instance-id') parser_ssh.add_argument('-u', '--user', action="store", help='override default username for ssh') parser_ssh.add_argument('-p', '--nopem', action="store_true", default=False, help='connect without PEM key') parser_ssh.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_ssh.set_defaults(func=cmd_ssh) return parser def cmd_list(options): """Gather data for instances matching args and call display func. Args: options (object): contains args and data from parser. """ (i_info, param_str) = gather_data(options) if i_info: awsc.get_all_aminames(i_info) param_str = "Instance List - " + param_str + "\n" list_instances(i_info, param_str) else: print("No instances found with parameters: {}".format(param_str)) def cmd_startstop(options): """Start or Stop the specified instance. Finds instances that match args and instance-state expected by the command. Then, the target instance is determined, the action is performed on the instance, and the eturn information is displayed. Args: options (object): contains args and data from parser. """ statelu = {"start": "stopped", "stop": "running"} options.inst_state = statelu[options.command] debg.dprint("toggle set state: ", options.inst_state) (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) response = awsc.startstop(tar_inst, options.command) responselu = {"start": "StartingInstances", "stop": "StoppingInstances"} filt = responselu[options.command] resp = {} state_term = ('CurrentState', 'PreviousState') for i, j in enumerate(state_term): resp[i] = response["{0}".format(filt)][0]["{0}".format(j)]['Name'] print("Current State: {}{}{} - Previous State: {}{}{}\n". format(C_STAT[resp[0]], resp[0], C_NORM, C_STAT[resp[1]], resp[1], C_NORM)) def cmd_ssh(options): """Connect to the specified instance via ssh. Finds instances that match the user specified args that are also in the 'running' state. The target instance is determined, the required connection information is retreived (IP, key and ssh user-name), then an 'ssh' connection is made to the instance. Args: options (object): contains args and data from parser """ import os import subprocess from os.path import expanduser options.inst_state = "running" (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) home_dir = expanduser("~") if options.user is None: tar_aminame = awsc.get_one_aminame(i_info[tar_idx]['ami']) options.user = cmd_ssh_user(tar_aminame, i_info[tar_idx]['tag']['Name']) else: debg.dprint("LoginUser set by user: ", options.user) os_spec = {"nt": ["powershell plink", "\\", "ppk"]} c_itm = os_spec.get(os.name, ["ssh", "/", "pem"]) cmd_ssh_run = c_itm[0] if not options.nopem: cmd_ssh_run += (" -i {0}{1}.aws{1}{2}.{3}". format(home_dir, c_itm[1], i_info[tar_idx]['ssh_key'], c_itm[2])) else: debg.dprint("Connect string: ", "ssh {}@{}". format(options.user, i_info[tar_idx]['pub_dns_name'])) cmd_ssh_run += " {0}@{1}".format(options.user, i_info[tar_idx]['pub_dns_name']) print(cmd_ssh_run) subprocess.call(cmd_ssh_run, shell=True) def cmd_ssh_user(tar_aminame, inst_name): """Calculate instance login-username based on image-name. Args: tar_aminame (str): name of the image instance created with. inst_name (str): name of the instance. Returns: username (str): name for ssh based on AMI-name. """ if tar_aminame == "Unknown": tar_aminame = inst_name # first 5 chars of AMI-name can be anywhere in AMI-Name userlu = {"ubunt": "ubuntu", "debia": "admin", "fedor": "root", "cento": "centos", "openb": "root"} usertemp = ['name'] + [value for key, value in list(userlu.items()) if key in tar_aminame.lower()] usertemp = dict(zip(usertemp[::2], usertemp[1::2])) username = usertemp.get('name', 'ec2-user') debg.dprint("loginuser Calculated: ", username) return username def gather_data(options): """Get Data specific for command selected. Create ec2 specific query and output title based on options specified, retrieves the raw response data from aws, then processes it into the i_info dict, which is used throughout this module. Args: options (object): contains args and data from parser, that has been adjusted by the command specific functions as appropriate. Returns: i_info (dict): information on instances and details. param_str (str): the title to display before the list. """ (qry_string, param_str) = qry_create(options) qry_results = awsc.get_inst_info(qry_string) i_info = process_results(qry_results) return (i_info, param_str) def process_results(qry_results): """Generate dictionary of results from query. Decodes the large dict recturned from the AWS query. Args: qry_results (dict): results from awsc.get_inst_info Returns: i_info (dict): information on instances and details. """ i_info = {} for i, j in enumerate(qry_results['Reservations']): i_info[i] = {'id': j['Instances'][0]['InstanceId']} i_info[i]['state'] = j['Instances'][0]['State']['Name'] i_info[i]['ami'] = j['Instances'][0]['ImageId'] i_info[i]['ssh_key'] = j['Instances'][0]['KeyName'] i_info[i]['pub_dns_name'] = j['Instances'][0]['PublicDnsName'] try: i_info[i]['tag'] = process_tags(j['Instances'][0]['Tags']) except KeyError: i_info[i]['tag'] = {"Name": ""} debg.dprint("numInstances: ", len(i_info)) debg.dprintx("Details except AMI-name") debg.dprintx(i_info, True) return i_info def process_tags(inst_tags): """Create dict of instance tags as only name:value pairs.""" tag_dict = {} for k in range(len(inst_tags)): tag_dict[inst_tags[k]['Key']] = inst_tags[k]['Value'] return tag_dict def qry_create(options): """Create query from the args specified and command chosen. Creates a query string that incorporates the args in the options object, and creates the title for the 'list' function. Args: options (object): contains args and data from parser Returns: qry_string (str): the query to be used against the aws ec2 client. param_str (str): the title to display before the list. """ qry_string = filt_end = param_str = "" filt_st = "Filters=[" param_str_default = "All" if options.id: qry_string += "InstanceIds=['%s']" % (options.id) param_str += "id: '%s'" % (options.id) param_str_default = "" if options.instname: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str) filt_end = "]" param_str_default = "" qry_string += filt_st + ("{'Name': 'tag:Name', 'Values': ['%s']}" % (options.instname)) param_str += "name: '%s'" % (options.instname) if options.inst_state: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str, bool(options.instname), filt_st) qry_string += ("{'Name': 'instance-state-name'," "'Values': ['%s']}" % (options.inst_state)) param_str += "state: '%s'" % (options.inst_state) filt_end = "]" param_str_default = "" qry_string += filt_end param_str += param_str_default debg.dprintx("\nQuery String") debg.dprintx(qry_string, True) debg.dprint("param_str: ", param_str) return(qry_string, param_str) def qry_helper(flag_id, qry_string, param_str, flag_filt=False, filt_st=""): """Dynamically add syntaxtical elements to query. This functions adds syntactical elements to the query string, and report title, based on the types and number of items added thus far. Args: flag_filt (bool): at least one filter item specified. qry_string (str): portion of the query constructed thus far. param_str (str): the title to display before the list. flag_id (bool): optional - instance-id was specified. filt_st (str): optional - syntax to add on end if filter specified. Returns: qry_string (str): the portion of the query that was passed in with the appropriate syntactical elements added. param_str (str): the title to display before the list. """ if flag_id or flag_filt: qry_string += ", " param_str += ", " if not flag_filt: qry_string += filt_st return (qry_string, param_str) def list_tags(tags): """Print tags in dict so they allign with listing above.""" tags_sorted = sorted(list(tags.items()), key=operator.itemgetter(0)) tag_sec_spacer = "" c = 1 ignored_keys = ["Name", "aws:ec2spot:fleet-request-id"] pad_col = {1: 38, 2: 49} for k, v in tags_sorted: # if k != "Name": if k not in ignored_keys: if c < 3: padamt = pad_col[c] sys.stdout.write(" {2}{0}:{3} {1}". format(k, v, C_HEAD2, C_NORM).ljust(padamt)) c += 1 tag_sec_spacer = "\n" else: sys.stdout.write("{2}{0}:{3} {1}\n".format(k, v, C_HEAD2, C_NORM)) c = 1 tag_sec_spacer = "" print(tag_sec_spacer) def determine_inst(i_info, param_str, command): """Determine the instance-id of the target instance. Inspect the number of instance-ids collected and take the appropriate action: exit if no ids, return if single id, and call user_picklist function if multiple ids exist. Args: i_info (dict): information and details for instances. param_str (str): the title to display in the listing. command (str): command specified on the command line. Returns: tar_inst (str): the AWS instance-id of the target. Raises: SystemExit: if no instances are match parameters specified. """ qty_instances = len(i_info) if not qty_instances: print("No instances found with parameters: {}".format(param_str)) sys.exit(1) if qty_instances > 1: print("{} instances match these parameters:".format(qty_instances)) tar_idx = user_picklist(i_info, command) else: tar_idx = 0 tar_inst = i_info[tar_idx]['id'] print("{0}{3}ing{1} instance id {2}{4}{1}". format(C_STAT[command], C_NORM, C_TI, command, tar_inst)) return (tar_inst, tar_idx) def user_picklist(i_info, command): """Display list of instances matching args and ask user to select target. Instance list displayed and user asked to enter the number corresponding to the desired target instance, or '0' to abort. Args: i_info (dict): information on instances and details. command (str): command specified on the command line. Returns: tar_idx (int): the dictionary index number of the targeted instance. """ valid_entry = False awsc.get_all_aminames(i_info) list_instances(i_info, "", True) msg_txt = ("Enter {0}#{1} of instance to {3} ({0}1{1}-{0}{4}{1})" " [{2}0 aborts{1}]: ".format(C_WARN, C_NORM, C_TI, command, len(i_info))) while not valid_entry: entry_raw = obtain_input(msg_txt) try: entry_int = int(entry_raw) except ValueError: entry_int = 999 (tar_idx, valid_entry) = user_entry(entry_int, len(i_info), command) return tar_idx def obtain_input(message_text): # pragma: no cover """Perform input command as a function so it can be mocked.""" return (input(message_text)) def user_entry(entry_int, num_inst, command): """Validate user entry and returns index and validity flag. Processes the user entry and take the appropriate action: abort if '0' entered, set validity flag and index is valid entry, else return invalid index and the still unset validity flag. Args: entry_int (int): a number entered or 999 if a non-int was entered. num_inst (int): the largest valid number that can be entered. command (str): program command to display in prompt. Returns: entry_idx(int): the dictionary index number of the targeted instance valid_entry (bool): specifies if entry_idx is valid. Raises: SystemExit: if the user enters 0 when they are choosing from the list it triggers the "abort" option offered to the user. """ valid_entry = False if not entry_int: print("{}aborting{} - {} instance\n". format(C_ERR, C_NORM, command)) sys.exit() elif entry_int >= 1 and entry_int <= num_inst: entry_idx = entry_int - 1 valid_entry = True else: print("{}Invalid entry:{} enter a number between 1" " and {}.".format(C_ERR, C_NORM, num_inst)) entry_idx = entry_int return (entry_idx, valid_entry) if __name__ == '__main__': main()
robertpeteuil/aws-shortcuts
awss/core.py
list_tags
python
def list_tags(tags): tags_sorted = sorted(list(tags.items()), key=operator.itemgetter(0)) tag_sec_spacer = "" c = 1 ignored_keys = ["Name", "aws:ec2spot:fleet-request-id"] pad_col = {1: 38, 2: 49} for k, v in tags_sorted: # if k != "Name": if k not in ignored_keys: if c < 3: padamt = pad_col[c] sys.stdout.write(" {2}{0}:{3} {1}". format(k, v, C_HEAD2, C_NORM).ljust(padamt)) c += 1 tag_sec_spacer = "\n" else: sys.stdout.write("{2}{0}:{3} {1}\n".format(k, v, C_HEAD2, C_NORM)) c = 1 tag_sec_spacer = "" print(tag_sec_spacer)
Print tags in dict so they allign with listing above.
train
https://github.com/robertpeteuil/aws-shortcuts/blob/cf453ca996978a4d88015d1cf6125bce8ca4873b/awss/core.py#L434-L455
null
"""Control and connect to AWS EC2 instances from command line. The AWS Shortcuts (awss) library is a CLI utility allowing listing, starting, stopping and connecting to AWS EC2 instances by Name or ID. License: AWSS - Control and connect to AWS EC2 instances from command line Copyright (C) 2017-2018 Robert Peteuil This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. URL: https://github.com/robertpeteuil/aws-shortcuts Author: Robert Peteuil """ from __future__ import print_function from builtins import input from builtins import range import argparse import sys import operator import awss.awsc as awsc import awss.debg as debg from awss.colors import C_NORM, C_HEAD2, C_TI, C_WARN, C_ERR, C_STAT __version__ = '0.9.13' def main(): """Collect user args and call command funct. Collect command line args and setup environment then call function for command specified in args. """ parser = parser_setup() options = parser.parse_args() debug = bool(options.debug > 0) debugall = bool(options.debug > 1) awsc.init() debg.init(debug, debugall) print(C_NORM) options.func(options) sys.exit() def parser_setup(): """Create ArgumentParser object to parse command line arguments. Returns: parser (object): containing ArgumentParser data and methods. Raises: SystemExit: if the user enters invalid args. """ parser = argparse.ArgumentParser(description="Control AWS instances from" " the command line with: list, start," " stop or ssh.", prog='awss', usage="\tawss {command} [ 'NAME' ] " "[ '-i ID' ] [ OPTIONS ]\n\t{command} =" " list | start | stop | ssh") parser.add_argument('-v', '--version', action="version", version="awss {0}".format(__version__)) subparsers = parser.add_subparsers(title="For additional help on" " command parameters", dest='command', description="type 'awss {command} -h'," " where {command} is: list, start," " stop or ssh") # Parser for LIST command parser_list = subparsers.add_parser('list', description="List AWS " "instances from the command line. " "'awss list' will list instances" " specified using combinations of " "NAME, instance-id and current-state." " If no specifications are given, " " all instances will be listed." " ex: 'awss list TEST " "-r' will list instances named 'TEST'" " that are currently running.", usage="\tawss list [none] [NAME] [-i " "ID] [-r] [-s] [OPTIONS]") parser_list.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_list.add_argument('-i', '--id', action="store", help='specify instance by id') parser_list.add_argument('-s', '--stopped', action='store_const', dest="inst_state", const="stopped", help='list stopped instances') parser_list.add_argument('-r', '--running', action='store_const', dest="inst_state", const="running", help='list running instances') parser_list.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_list.set_defaults(func=cmd_list) # Parser for START command parser_start = subparsers.add_parser('start', usage="\tawss start [NAME]" " [-i ID] [-h]", description="Start an AWS instance" " from the command line.") parser_start.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_start.add_argument('-i', '--id', action="store", help='specify instance-id') parser_start.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_start.set_defaults(func=cmd_startstop) # Parser for STOP command parser_stop = subparsers.add_parser('stop', usage="\tawss stop [NAME]" " [-i ID] [-h]", description="Stop an AWS instance" " from the command line.") parser_stop.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_stop.add_argument('-i', '--id', action="store", help='specify instance-id') parser_stop.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_stop.set_defaults(func=cmd_startstop) # Parser for SSH command parser_ssh = subparsers.add_parser('ssh', usage="\tawss ssh [NAME]" " [-i ID] [-u USER] [-p] [-h]", description="Connect to an AWS i" "nstance via ssh.") parser_ssh.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_ssh.add_argument('-i', '--id', action="store", help='specify instance-id') parser_ssh.add_argument('-u', '--user', action="store", help='override default username for ssh') parser_ssh.add_argument('-p', '--nopem', action="store_true", default=False, help='connect without PEM key') parser_ssh.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_ssh.set_defaults(func=cmd_ssh) return parser def cmd_list(options): """Gather data for instances matching args and call display func. Args: options (object): contains args and data from parser. """ (i_info, param_str) = gather_data(options) if i_info: awsc.get_all_aminames(i_info) param_str = "Instance List - " + param_str + "\n" list_instances(i_info, param_str) else: print("No instances found with parameters: {}".format(param_str)) def cmd_startstop(options): """Start or Stop the specified instance. Finds instances that match args and instance-state expected by the command. Then, the target instance is determined, the action is performed on the instance, and the eturn information is displayed. Args: options (object): contains args and data from parser. """ statelu = {"start": "stopped", "stop": "running"} options.inst_state = statelu[options.command] debg.dprint("toggle set state: ", options.inst_state) (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) response = awsc.startstop(tar_inst, options.command) responselu = {"start": "StartingInstances", "stop": "StoppingInstances"} filt = responselu[options.command] resp = {} state_term = ('CurrentState', 'PreviousState') for i, j in enumerate(state_term): resp[i] = response["{0}".format(filt)][0]["{0}".format(j)]['Name'] print("Current State: {}{}{} - Previous State: {}{}{}\n". format(C_STAT[resp[0]], resp[0], C_NORM, C_STAT[resp[1]], resp[1], C_NORM)) def cmd_ssh(options): """Connect to the specified instance via ssh. Finds instances that match the user specified args that are also in the 'running' state. The target instance is determined, the required connection information is retreived (IP, key and ssh user-name), then an 'ssh' connection is made to the instance. Args: options (object): contains args and data from parser """ import os import subprocess from os.path import expanduser options.inst_state = "running" (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) home_dir = expanduser("~") if options.user is None: tar_aminame = awsc.get_one_aminame(i_info[tar_idx]['ami']) options.user = cmd_ssh_user(tar_aminame, i_info[tar_idx]['tag']['Name']) else: debg.dprint("LoginUser set by user: ", options.user) os_spec = {"nt": ["powershell plink", "\\", "ppk"]} c_itm = os_spec.get(os.name, ["ssh", "/", "pem"]) cmd_ssh_run = c_itm[0] if not options.nopem: cmd_ssh_run += (" -i {0}{1}.aws{1}{2}.{3}". format(home_dir, c_itm[1], i_info[tar_idx]['ssh_key'], c_itm[2])) else: debg.dprint("Connect string: ", "ssh {}@{}". format(options.user, i_info[tar_idx]['pub_dns_name'])) cmd_ssh_run += " {0}@{1}".format(options.user, i_info[tar_idx]['pub_dns_name']) print(cmd_ssh_run) subprocess.call(cmd_ssh_run, shell=True) def cmd_ssh_user(tar_aminame, inst_name): """Calculate instance login-username based on image-name. Args: tar_aminame (str): name of the image instance created with. inst_name (str): name of the instance. Returns: username (str): name for ssh based on AMI-name. """ if tar_aminame == "Unknown": tar_aminame = inst_name # first 5 chars of AMI-name can be anywhere in AMI-Name userlu = {"ubunt": "ubuntu", "debia": "admin", "fedor": "root", "cento": "centos", "openb": "root"} usertemp = ['name'] + [value for key, value in list(userlu.items()) if key in tar_aminame.lower()] usertemp = dict(zip(usertemp[::2], usertemp[1::2])) username = usertemp.get('name', 'ec2-user') debg.dprint("loginuser Calculated: ", username) return username def gather_data(options): """Get Data specific for command selected. Create ec2 specific query and output title based on options specified, retrieves the raw response data from aws, then processes it into the i_info dict, which is used throughout this module. Args: options (object): contains args and data from parser, that has been adjusted by the command specific functions as appropriate. Returns: i_info (dict): information on instances and details. param_str (str): the title to display before the list. """ (qry_string, param_str) = qry_create(options) qry_results = awsc.get_inst_info(qry_string) i_info = process_results(qry_results) return (i_info, param_str) def process_results(qry_results): """Generate dictionary of results from query. Decodes the large dict recturned from the AWS query. Args: qry_results (dict): results from awsc.get_inst_info Returns: i_info (dict): information on instances and details. """ i_info = {} for i, j in enumerate(qry_results['Reservations']): i_info[i] = {'id': j['Instances'][0]['InstanceId']} i_info[i]['state'] = j['Instances'][0]['State']['Name'] i_info[i]['ami'] = j['Instances'][0]['ImageId'] i_info[i]['ssh_key'] = j['Instances'][0]['KeyName'] i_info[i]['pub_dns_name'] = j['Instances'][0]['PublicDnsName'] try: i_info[i]['tag'] = process_tags(j['Instances'][0]['Tags']) except KeyError: i_info[i]['tag'] = {"Name": ""} debg.dprint("numInstances: ", len(i_info)) debg.dprintx("Details except AMI-name") debg.dprintx(i_info, True) return i_info def process_tags(inst_tags): """Create dict of instance tags as only name:value pairs.""" tag_dict = {} for k in range(len(inst_tags)): tag_dict[inst_tags[k]['Key']] = inst_tags[k]['Value'] return tag_dict def qry_create(options): """Create query from the args specified and command chosen. Creates a query string that incorporates the args in the options object, and creates the title for the 'list' function. Args: options (object): contains args and data from parser Returns: qry_string (str): the query to be used against the aws ec2 client. param_str (str): the title to display before the list. """ qry_string = filt_end = param_str = "" filt_st = "Filters=[" param_str_default = "All" if options.id: qry_string += "InstanceIds=['%s']" % (options.id) param_str += "id: '%s'" % (options.id) param_str_default = "" if options.instname: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str) filt_end = "]" param_str_default = "" qry_string += filt_st + ("{'Name': 'tag:Name', 'Values': ['%s']}" % (options.instname)) param_str += "name: '%s'" % (options.instname) if options.inst_state: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str, bool(options.instname), filt_st) qry_string += ("{'Name': 'instance-state-name'," "'Values': ['%s']}" % (options.inst_state)) param_str += "state: '%s'" % (options.inst_state) filt_end = "]" param_str_default = "" qry_string += filt_end param_str += param_str_default debg.dprintx("\nQuery String") debg.dprintx(qry_string, True) debg.dprint("param_str: ", param_str) return(qry_string, param_str) def qry_helper(flag_id, qry_string, param_str, flag_filt=False, filt_st=""): """Dynamically add syntaxtical elements to query. This functions adds syntactical elements to the query string, and report title, based on the types and number of items added thus far. Args: flag_filt (bool): at least one filter item specified. qry_string (str): portion of the query constructed thus far. param_str (str): the title to display before the list. flag_id (bool): optional - instance-id was specified. filt_st (str): optional - syntax to add on end if filter specified. Returns: qry_string (str): the portion of the query that was passed in with the appropriate syntactical elements added. param_str (str): the title to display before the list. """ if flag_id or flag_filt: qry_string += ", " param_str += ", " if not flag_filt: qry_string += filt_st return (qry_string, param_str) def list_instances(i_info, param_str, numbered=False): """Display a list of all instances and their details. Iterates through all the instances in the dict, and displays information for each instance. Args: i_info (dict): information on instances and details. param_str (str): the title to display before the list. numbered (bool): optional - indicates wheter the list should be displayed with numbers before each instance. This is used when called from user_picklist. """ print(param_str) for i in i_info: if numbered: print("Instance {}#{}{}".format(C_WARN, i + 1, C_NORM)) print(" {6}Name: {1}{3:<22}{1}ID: {0}{4:<20}{1:<18}Status: {2}{5}{1}". format(C_TI, C_NORM, C_STAT[i_info[i]['state']], i_info[i]['tag']['Name'], i_info[i]['id'], i_info[i]['state'], C_HEAD2)) print(" AMI: {0}{2:<23}{1}AMI Name: {0}{3:.41}{1}". format(C_TI, C_NORM, i_info[i]['ami'], i_info[i]['aminame'])) list_tags(i_info[i]['tag']) debg.dprintx("All Data") debg.dprintx(i_info, True) def determine_inst(i_info, param_str, command): """Determine the instance-id of the target instance. Inspect the number of instance-ids collected and take the appropriate action: exit if no ids, return if single id, and call user_picklist function if multiple ids exist. Args: i_info (dict): information and details for instances. param_str (str): the title to display in the listing. command (str): command specified on the command line. Returns: tar_inst (str): the AWS instance-id of the target. Raises: SystemExit: if no instances are match parameters specified. """ qty_instances = len(i_info) if not qty_instances: print("No instances found with parameters: {}".format(param_str)) sys.exit(1) if qty_instances > 1: print("{} instances match these parameters:".format(qty_instances)) tar_idx = user_picklist(i_info, command) else: tar_idx = 0 tar_inst = i_info[tar_idx]['id'] print("{0}{3}ing{1} instance id {2}{4}{1}". format(C_STAT[command], C_NORM, C_TI, command, tar_inst)) return (tar_inst, tar_idx) def user_picklist(i_info, command): """Display list of instances matching args and ask user to select target. Instance list displayed and user asked to enter the number corresponding to the desired target instance, or '0' to abort. Args: i_info (dict): information on instances and details. command (str): command specified on the command line. Returns: tar_idx (int): the dictionary index number of the targeted instance. """ valid_entry = False awsc.get_all_aminames(i_info) list_instances(i_info, "", True) msg_txt = ("Enter {0}#{1} of instance to {3} ({0}1{1}-{0}{4}{1})" " [{2}0 aborts{1}]: ".format(C_WARN, C_NORM, C_TI, command, len(i_info))) while not valid_entry: entry_raw = obtain_input(msg_txt) try: entry_int = int(entry_raw) except ValueError: entry_int = 999 (tar_idx, valid_entry) = user_entry(entry_int, len(i_info), command) return tar_idx def obtain_input(message_text): # pragma: no cover """Perform input command as a function so it can be mocked.""" return (input(message_text)) def user_entry(entry_int, num_inst, command): """Validate user entry and returns index and validity flag. Processes the user entry and take the appropriate action: abort if '0' entered, set validity flag and index is valid entry, else return invalid index and the still unset validity flag. Args: entry_int (int): a number entered or 999 if a non-int was entered. num_inst (int): the largest valid number that can be entered. command (str): program command to display in prompt. Returns: entry_idx(int): the dictionary index number of the targeted instance valid_entry (bool): specifies if entry_idx is valid. Raises: SystemExit: if the user enters 0 when they are choosing from the list it triggers the "abort" option offered to the user. """ valid_entry = False if not entry_int: print("{}aborting{} - {} instance\n". format(C_ERR, C_NORM, command)) sys.exit() elif entry_int >= 1 and entry_int <= num_inst: entry_idx = entry_int - 1 valid_entry = True else: print("{}Invalid entry:{} enter a number between 1" " and {}.".format(C_ERR, C_NORM, num_inst)) entry_idx = entry_int return (entry_idx, valid_entry) if __name__ == '__main__': main()
robertpeteuil/aws-shortcuts
awss/core.py
determine_inst
python
def determine_inst(i_info, param_str, command): qty_instances = len(i_info) if not qty_instances: print("No instances found with parameters: {}".format(param_str)) sys.exit(1) if qty_instances > 1: print("{} instances match these parameters:".format(qty_instances)) tar_idx = user_picklist(i_info, command) else: tar_idx = 0 tar_inst = i_info[tar_idx]['id'] print("{0}{3}ing{1} instance id {2}{4}{1}". format(C_STAT[command], C_NORM, C_TI, command, tar_inst)) return (tar_inst, tar_idx)
Determine the instance-id of the target instance. Inspect the number of instance-ids collected and take the appropriate action: exit if no ids, return if single id, and call user_picklist function if multiple ids exist. Args: i_info (dict): information and details for instances. param_str (str): the title to display in the listing. command (str): command specified on the command line. Returns: tar_inst (str): the AWS instance-id of the target. Raises: SystemExit: if no instances are match parameters specified.
train
https://github.com/robertpeteuil/aws-shortcuts/blob/cf453ca996978a4d88015d1cf6125bce8ca4873b/awss/core.py#L458-L489
[ "def user_picklist(i_info, command):\n \"\"\"Display list of instances matching args and ask user to select target.\n\n Instance list displayed and user asked to enter the number corresponding\n to the desired target instance, or '0' to abort.\n\n Args:\n i_info (dict): information on instances a...
"""Control and connect to AWS EC2 instances from command line. The AWS Shortcuts (awss) library is a CLI utility allowing listing, starting, stopping and connecting to AWS EC2 instances by Name or ID. License: AWSS - Control and connect to AWS EC2 instances from command line Copyright (C) 2017-2018 Robert Peteuil This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. URL: https://github.com/robertpeteuil/aws-shortcuts Author: Robert Peteuil """ from __future__ import print_function from builtins import input from builtins import range import argparse import sys import operator import awss.awsc as awsc import awss.debg as debg from awss.colors import C_NORM, C_HEAD2, C_TI, C_WARN, C_ERR, C_STAT __version__ = '0.9.13' def main(): """Collect user args and call command funct. Collect command line args and setup environment then call function for command specified in args. """ parser = parser_setup() options = parser.parse_args() debug = bool(options.debug > 0) debugall = bool(options.debug > 1) awsc.init() debg.init(debug, debugall) print(C_NORM) options.func(options) sys.exit() def parser_setup(): """Create ArgumentParser object to parse command line arguments. Returns: parser (object): containing ArgumentParser data and methods. Raises: SystemExit: if the user enters invalid args. """ parser = argparse.ArgumentParser(description="Control AWS instances from" " the command line with: list, start," " stop or ssh.", prog='awss', usage="\tawss {command} [ 'NAME' ] " "[ '-i ID' ] [ OPTIONS ]\n\t{command} =" " list | start | stop | ssh") parser.add_argument('-v', '--version', action="version", version="awss {0}".format(__version__)) subparsers = parser.add_subparsers(title="For additional help on" " command parameters", dest='command', description="type 'awss {command} -h'," " where {command} is: list, start," " stop or ssh") # Parser for LIST command parser_list = subparsers.add_parser('list', description="List AWS " "instances from the command line. " "'awss list' will list instances" " specified using combinations of " "NAME, instance-id and current-state." " If no specifications are given, " " all instances will be listed." " ex: 'awss list TEST " "-r' will list instances named 'TEST'" " that are currently running.", usage="\tawss list [none] [NAME] [-i " "ID] [-r] [-s] [OPTIONS]") parser_list.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_list.add_argument('-i', '--id', action="store", help='specify instance by id') parser_list.add_argument('-s', '--stopped', action='store_const', dest="inst_state", const="stopped", help='list stopped instances') parser_list.add_argument('-r', '--running', action='store_const', dest="inst_state", const="running", help='list running instances') parser_list.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_list.set_defaults(func=cmd_list) # Parser for START command parser_start = subparsers.add_parser('start', usage="\tawss start [NAME]" " [-i ID] [-h]", description="Start an AWS instance" " from the command line.") parser_start.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_start.add_argument('-i', '--id', action="store", help='specify instance-id') parser_start.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_start.set_defaults(func=cmd_startstop) # Parser for STOP command parser_stop = subparsers.add_parser('stop', usage="\tawss stop [NAME]" " [-i ID] [-h]", description="Stop an AWS instance" " from the command line.") parser_stop.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_stop.add_argument('-i', '--id', action="store", help='specify instance-id') parser_stop.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_stop.set_defaults(func=cmd_startstop) # Parser for SSH command parser_ssh = subparsers.add_parser('ssh', usage="\tawss ssh [NAME]" " [-i ID] [-u USER] [-p] [-h]", description="Connect to an AWS i" "nstance via ssh.") parser_ssh.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_ssh.add_argument('-i', '--id', action="store", help='specify instance-id') parser_ssh.add_argument('-u', '--user', action="store", help='override default username for ssh') parser_ssh.add_argument('-p', '--nopem', action="store_true", default=False, help='connect without PEM key') parser_ssh.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_ssh.set_defaults(func=cmd_ssh) return parser def cmd_list(options): """Gather data for instances matching args and call display func. Args: options (object): contains args and data from parser. """ (i_info, param_str) = gather_data(options) if i_info: awsc.get_all_aminames(i_info) param_str = "Instance List - " + param_str + "\n" list_instances(i_info, param_str) else: print("No instances found with parameters: {}".format(param_str)) def cmd_startstop(options): """Start or Stop the specified instance. Finds instances that match args and instance-state expected by the command. Then, the target instance is determined, the action is performed on the instance, and the eturn information is displayed. Args: options (object): contains args and data from parser. """ statelu = {"start": "stopped", "stop": "running"} options.inst_state = statelu[options.command] debg.dprint("toggle set state: ", options.inst_state) (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) response = awsc.startstop(tar_inst, options.command) responselu = {"start": "StartingInstances", "stop": "StoppingInstances"} filt = responselu[options.command] resp = {} state_term = ('CurrentState', 'PreviousState') for i, j in enumerate(state_term): resp[i] = response["{0}".format(filt)][0]["{0}".format(j)]['Name'] print("Current State: {}{}{} - Previous State: {}{}{}\n". format(C_STAT[resp[0]], resp[0], C_NORM, C_STAT[resp[1]], resp[1], C_NORM)) def cmd_ssh(options): """Connect to the specified instance via ssh. Finds instances that match the user specified args that are also in the 'running' state. The target instance is determined, the required connection information is retreived (IP, key and ssh user-name), then an 'ssh' connection is made to the instance. Args: options (object): contains args and data from parser """ import os import subprocess from os.path import expanduser options.inst_state = "running" (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) home_dir = expanduser("~") if options.user is None: tar_aminame = awsc.get_one_aminame(i_info[tar_idx]['ami']) options.user = cmd_ssh_user(tar_aminame, i_info[tar_idx]['tag']['Name']) else: debg.dprint("LoginUser set by user: ", options.user) os_spec = {"nt": ["powershell plink", "\\", "ppk"]} c_itm = os_spec.get(os.name, ["ssh", "/", "pem"]) cmd_ssh_run = c_itm[0] if not options.nopem: cmd_ssh_run += (" -i {0}{1}.aws{1}{2}.{3}". format(home_dir, c_itm[1], i_info[tar_idx]['ssh_key'], c_itm[2])) else: debg.dprint("Connect string: ", "ssh {}@{}". format(options.user, i_info[tar_idx]['pub_dns_name'])) cmd_ssh_run += " {0}@{1}".format(options.user, i_info[tar_idx]['pub_dns_name']) print(cmd_ssh_run) subprocess.call(cmd_ssh_run, shell=True) def cmd_ssh_user(tar_aminame, inst_name): """Calculate instance login-username based on image-name. Args: tar_aminame (str): name of the image instance created with. inst_name (str): name of the instance. Returns: username (str): name for ssh based on AMI-name. """ if tar_aminame == "Unknown": tar_aminame = inst_name # first 5 chars of AMI-name can be anywhere in AMI-Name userlu = {"ubunt": "ubuntu", "debia": "admin", "fedor": "root", "cento": "centos", "openb": "root"} usertemp = ['name'] + [value for key, value in list(userlu.items()) if key in tar_aminame.lower()] usertemp = dict(zip(usertemp[::2], usertemp[1::2])) username = usertemp.get('name', 'ec2-user') debg.dprint("loginuser Calculated: ", username) return username def gather_data(options): """Get Data specific for command selected. Create ec2 specific query and output title based on options specified, retrieves the raw response data from aws, then processes it into the i_info dict, which is used throughout this module. Args: options (object): contains args and data from parser, that has been adjusted by the command specific functions as appropriate. Returns: i_info (dict): information on instances and details. param_str (str): the title to display before the list. """ (qry_string, param_str) = qry_create(options) qry_results = awsc.get_inst_info(qry_string) i_info = process_results(qry_results) return (i_info, param_str) def process_results(qry_results): """Generate dictionary of results from query. Decodes the large dict recturned from the AWS query. Args: qry_results (dict): results from awsc.get_inst_info Returns: i_info (dict): information on instances and details. """ i_info = {} for i, j in enumerate(qry_results['Reservations']): i_info[i] = {'id': j['Instances'][0]['InstanceId']} i_info[i]['state'] = j['Instances'][0]['State']['Name'] i_info[i]['ami'] = j['Instances'][0]['ImageId'] i_info[i]['ssh_key'] = j['Instances'][0]['KeyName'] i_info[i]['pub_dns_name'] = j['Instances'][0]['PublicDnsName'] try: i_info[i]['tag'] = process_tags(j['Instances'][0]['Tags']) except KeyError: i_info[i]['tag'] = {"Name": ""} debg.dprint("numInstances: ", len(i_info)) debg.dprintx("Details except AMI-name") debg.dprintx(i_info, True) return i_info def process_tags(inst_tags): """Create dict of instance tags as only name:value pairs.""" tag_dict = {} for k in range(len(inst_tags)): tag_dict[inst_tags[k]['Key']] = inst_tags[k]['Value'] return tag_dict def qry_create(options): """Create query from the args specified and command chosen. Creates a query string that incorporates the args in the options object, and creates the title for the 'list' function. Args: options (object): contains args and data from parser Returns: qry_string (str): the query to be used against the aws ec2 client. param_str (str): the title to display before the list. """ qry_string = filt_end = param_str = "" filt_st = "Filters=[" param_str_default = "All" if options.id: qry_string += "InstanceIds=['%s']" % (options.id) param_str += "id: '%s'" % (options.id) param_str_default = "" if options.instname: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str) filt_end = "]" param_str_default = "" qry_string += filt_st + ("{'Name': 'tag:Name', 'Values': ['%s']}" % (options.instname)) param_str += "name: '%s'" % (options.instname) if options.inst_state: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str, bool(options.instname), filt_st) qry_string += ("{'Name': 'instance-state-name'," "'Values': ['%s']}" % (options.inst_state)) param_str += "state: '%s'" % (options.inst_state) filt_end = "]" param_str_default = "" qry_string += filt_end param_str += param_str_default debg.dprintx("\nQuery String") debg.dprintx(qry_string, True) debg.dprint("param_str: ", param_str) return(qry_string, param_str) def qry_helper(flag_id, qry_string, param_str, flag_filt=False, filt_st=""): """Dynamically add syntaxtical elements to query. This functions adds syntactical elements to the query string, and report title, based on the types and number of items added thus far. Args: flag_filt (bool): at least one filter item specified. qry_string (str): portion of the query constructed thus far. param_str (str): the title to display before the list. flag_id (bool): optional - instance-id was specified. filt_st (str): optional - syntax to add on end if filter specified. Returns: qry_string (str): the portion of the query that was passed in with the appropriate syntactical elements added. param_str (str): the title to display before the list. """ if flag_id or flag_filt: qry_string += ", " param_str += ", " if not flag_filt: qry_string += filt_st return (qry_string, param_str) def list_instances(i_info, param_str, numbered=False): """Display a list of all instances and their details. Iterates through all the instances in the dict, and displays information for each instance. Args: i_info (dict): information on instances and details. param_str (str): the title to display before the list. numbered (bool): optional - indicates wheter the list should be displayed with numbers before each instance. This is used when called from user_picklist. """ print(param_str) for i in i_info: if numbered: print("Instance {}#{}{}".format(C_WARN, i + 1, C_NORM)) print(" {6}Name: {1}{3:<22}{1}ID: {0}{4:<20}{1:<18}Status: {2}{5}{1}". format(C_TI, C_NORM, C_STAT[i_info[i]['state']], i_info[i]['tag']['Name'], i_info[i]['id'], i_info[i]['state'], C_HEAD2)) print(" AMI: {0}{2:<23}{1}AMI Name: {0}{3:.41}{1}". format(C_TI, C_NORM, i_info[i]['ami'], i_info[i]['aminame'])) list_tags(i_info[i]['tag']) debg.dprintx("All Data") debg.dprintx(i_info, True) def list_tags(tags): """Print tags in dict so they allign with listing above.""" tags_sorted = sorted(list(tags.items()), key=operator.itemgetter(0)) tag_sec_spacer = "" c = 1 ignored_keys = ["Name", "aws:ec2spot:fleet-request-id"] pad_col = {1: 38, 2: 49} for k, v in tags_sorted: # if k != "Name": if k not in ignored_keys: if c < 3: padamt = pad_col[c] sys.stdout.write(" {2}{0}:{3} {1}". format(k, v, C_HEAD2, C_NORM).ljust(padamt)) c += 1 tag_sec_spacer = "\n" else: sys.stdout.write("{2}{0}:{3} {1}\n".format(k, v, C_HEAD2, C_NORM)) c = 1 tag_sec_spacer = "" print(tag_sec_spacer) def user_picklist(i_info, command): """Display list of instances matching args and ask user to select target. Instance list displayed and user asked to enter the number corresponding to the desired target instance, or '0' to abort. Args: i_info (dict): information on instances and details. command (str): command specified on the command line. Returns: tar_idx (int): the dictionary index number of the targeted instance. """ valid_entry = False awsc.get_all_aminames(i_info) list_instances(i_info, "", True) msg_txt = ("Enter {0}#{1} of instance to {3} ({0}1{1}-{0}{4}{1})" " [{2}0 aborts{1}]: ".format(C_WARN, C_NORM, C_TI, command, len(i_info))) while not valid_entry: entry_raw = obtain_input(msg_txt) try: entry_int = int(entry_raw) except ValueError: entry_int = 999 (tar_idx, valid_entry) = user_entry(entry_int, len(i_info), command) return tar_idx def obtain_input(message_text): # pragma: no cover """Perform input command as a function so it can be mocked.""" return (input(message_text)) def user_entry(entry_int, num_inst, command): """Validate user entry and returns index and validity flag. Processes the user entry and take the appropriate action: abort if '0' entered, set validity flag and index is valid entry, else return invalid index and the still unset validity flag. Args: entry_int (int): a number entered or 999 if a non-int was entered. num_inst (int): the largest valid number that can be entered. command (str): program command to display in prompt. Returns: entry_idx(int): the dictionary index number of the targeted instance valid_entry (bool): specifies if entry_idx is valid. Raises: SystemExit: if the user enters 0 when they are choosing from the list it triggers the "abort" option offered to the user. """ valid_entry = False if not entry_int: print("{}aborting{} - {} instance\n". format(C_ERR, C_NORM, command)) sys.exit() elif entry_int >= 1 and entry_int <= num_inst: entry_idx = entry_int - 1 valid_entry = True else: print("{}Invalid entry:{} enter a number between 1" " and {}.".format(C_ERR, C_NORM, num_inst)) entry_idx = entry_int return (entry_idx, valid_entry) if __name__ == '__main__': main()
robertpeteuil/aws-shortcuts
awss/core.py
user_picklist
python
def user_picklist(i_info, command): valid_entry = False awsc.get_all_aminames(i_info) list_instances(i_info, "", True) msg_txt = ("Enter {0}#{1} of instance to {3} ({0}1{1}-{0}{4}{1})" " [{2}0 aborts{1}]: ".format(C_WARN, C_NORM, C_TI, command, len(i_info))) while not valid_entry: entry_raw = obtain_input(msg_txt) try: entry_int = int(entry_raw) except ValueError: entry_int = 999 (tar_idx, valid_entry) = user_entry(entry_int, len(i_info), command) return tar_idx
Display list of instances matching args and ask user to select target. Instance list displayed and user asked to enter the number corresponding to the desired target instance, or '0' to abort. Args: i_info (dict): information on instances and details. command (str): command specified on the command line. Returns: tar_idx (int): the dictionary index number of the targeted instance.
train
https://github.com/robertpeteuil/aws-shortcuts/blob/cf453ca996978a4d88015d1cf6125bce8ca4873b/awss/core.py#L492-L518
[ "def get_all_aminames(i_info):\n \"\"\"Get Image_Name for each instance in i_info.\n\n Args:\n i_info (dict): information on instances and details.\n Returns:\n i_info (dict): i_info is returned with the aminame\n added for each instance.\n\n \"\"\"\n for i in i_in...
"""Control and connect to AWS EC2 instances from command line. The AWS Shortcuts (awss) library is a CLI utility allowing listing, starting, stopping and connecting to AWS EC2 instances by Name or ID. License: AWSS - Control and connect to AWS EC2 instances from command line Copyright (C) 2017-2018 Robert Peteuil This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. URL: https://github.com/robertpeteuil/aws-shortcuts Author: Robert Peteuil """ from __future__ import print_function from builtins import input from builtins import range import argparse import sys import operator import awss.awsc as awsc import awss.debg as debg from awss.colors import C_NORM, C_HEAD2, C_TI, C_WARN, C_ERR, C_STAT __version__ = '0.9.13' def main(): """Collect user args and call command funct. Collect command line args and setup environment then call function for command specified in args. """ parser = parser_setup() options = parser.parse_args() debug = bool(options.debug > 0) debugall = bool(options.debug > 1) awsc.init() debg.init(debug, debugall) print(C_NORM) options.func(options) sys.exit() def parser_setup(): """Create ArgumentParser object to parse command line arguments. Returns: parser (object): containing ArgumentParser data and methods. Raises: SystemExit: if the user enters invalid args. """ parser = argparse.ArgumentParser(description="Control AWS instances from" " the command line with: list, start," " stop or ssh.", prog='awss', usage="\tawss {command} [ 'NAME' ] " "[ '-i ID' ] [ OPTIONS ]\n\t{command} =" " list | start | stop | ssh") parser.add_argument('-v', '--version', action="version", version="awss {0}".format(__version__)) subparsers = parser.add_subparsers(title="For additional help on" " command parameters", dest='command', description="type 'awss {command} -h'," " where {command} is: list, start," " stop or ssh") # Parser for LIST command parser_list = subparsers.add_parser('list', description="List AWS " "instances from the command line. " "'awss list' will list instances" " specified using combinations of " "NAME, instance-id and current-state." " If no specifications are given, " " all instances will be listed." " ex: 'awss list TEST " "-r' will list instances named 'TEST'" " that are currently running.", usage="\tawss list [none] [NAME] [-i " "ID] [-r] [-s] [OPTIONS]") parser_list.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_list.add_argument('-i', '--id', action="store", help='specify instance by id') parser_list.add_argument('-s', '--stopped', action='store_const', dest="inst_state", const="stopped", help='list stopped instances') parser_list.add_argument('-r', '--running', action='store_const', dest="inst_state", const="running", help='list running instances') parser_list.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_list.set_defaults(func=cmd_list) # Parser for START command parser_start = subparsers.add_parser('start', usage="\tawss start [NAME]" " [-i ID] [-h]", description="Start an AWS instance" " from the command line.") parser_start.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_start.add_argument('-i', '--id', action="store", help='specify instance-id') parser_start.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_start.set_defaults(func=cmd_startstop) # Parser for STOP command parser_stop = subparsers.add_parser('stop', usage="\tawss stop [NAME]" " [-i ID] [-h]", description="Stop an AWS instance" " from the command line.") parser_stop.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_stop.add_argument('-i', '--id', action="store", help='specify instance-id') parser_stop.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_stop.set_defaults(func=cmd_startstop) # Parser for SSH command parser_ssh = subparsers.add_parser('ssh', usage="\tawss ssh [NAME]" " [-i ID] [-u USER] [-p] [-h]", description="Connect to an AWS i" "nstance via ssh.") parser_ssh.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_ssh.add_argument('-i', '--id', action="store", help='specify instance-id') parser_ssh.add_argument('-u', '--user', action="store", help='override default username for ssh') parser_ssh.add_argument('-p', '--nopem', action="store_true", default=False, help='connect without PEM key') parser_ssh.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_ssh.set_defaults(func=cmd_ssh) return parser def cmd_list(options): """Gather data for instances matching args and call display func. Args: options (object): contains args and data from parser. """ (i_info, param_str) = gather_data(options) if i_info: awsc.get_all_aminames(i_info) param_str = "Instance List - " + param_str + "\n" list_instances(i_info, param_str) else: print("No instances found with parameters: {}".format(param_str)) def cmd_startstop(options): """Start or Stop the specified instance. Finds instances that match args and instance-state expected by the command. Then, the target instance is determined, the action is performed on the instance, and the eturn information is displayed. Args: options (object): contains args and data from parser. """ statelu = {"start": "stopped", "stop": "running"} options.inst_state = statelu[options.command] debg.dprint("toggle set state: ", options.inst_state) (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) response = awsc.startstop(tar_inst, options.command) responselu = {"start": "StartingInstances", "stop": "StoppingInstances"} filt = responselu[options.command] resp = {} state_term = ('CurrentState', 'PreviousState') for i, j in enumerate(state_term): resp[i] = response["{0}".format(filt)][0]["{0}".format(j)]['Name'] print("Current State: {}{}{} - Previous State: {}{}{}\n". format(C_STAT[resp[0]], resp[0], C_NORM, C_STAT[resp[1]], resp[1], C_NORM)) def cmd_ssh(options): """Connect to the specified instance via ssh. Finds instances that match the user specified args that are also in the 'running' state. The target instance is determined, the required connection information is retreived (IP, key and ssh user-name), then an 'ssh' connection is made to the instance. Args: options (object): contains args and data from parser """ import os import subprocess from os.path import expanduser options.inst_state = "running" (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) home_dir = expanduser("~") if options.user is None: tar_aminame = awsc.get_one_aminame(i_info[tar_idx]['ami']) options.user = cmd_ssh_user(tar_aminame, i_info[tar_idx]['tag']['Name']) else: debg.dprint("LoginUser set by user: ", options.user) os_spec = {"nt": ["powershell plink", "\\", "ppk"]} c_itm = os_spec.get(os.name, ["ssh", "/", "pem"]) cmd_ssh_run = c_itm[0] if not options.nopem: cmd_ssh_run += (" -i {0}{1}.aws{1}{2}.{3}". format(home_dir, c_itm[1], i_info[tar_idx]['ssh_key'], c_itm[2])) else: debg.dprint("Connect string: ", "ssh {}@{}". format(options.user, i_info[tar_idx]['pub_dns_name'])) cmd_ssh_run += " {0}@{1}".format(options.user, i_info[tar_idx]['pub_dns_name']) print(cmd_ssh_run) subprocess.call(cmd_ssh_run, shell=True) def cmd_ssh_user(tar_aminame, inst_name): """Calculate instance login-username based on image-name. Args: tar_aminame (str): name of the image instance created with. inst_name (str): name of the instance. Returns: username (str): name for ssh based on AMI-name. """ if tar_aminame == "Unknown": tar_aminame = inst_name # first 5 chars of AMI-name can be anywhere in AMI-Name userlu = {"ubunt": "ubuntu", "debia": "admin", "fedor": "root", "cento": "centos", "openb": "root"} usertemp = ['name'] + [value for key, value in list(userlu.items()) if key in tar_aminame.lower()] usertemp = dict(zip(usertemp[::2], usertemp[1::2])) username = usertemp.get('name', 'ec2-user') debg.dprint("loginuser Calculated: ", username) return username def gather_data(options): """Get Data specific for command selected. Create ec2 specific query and output title based on options specified, retrieves the raw response data from aws, then processes it into the i_info dict, which is used throughout this module. Args: options (object): contains args and data from parser, that has been adjusted by the command specific functions as appropriate. Returns: i_info (dict): information on instances and details. param_str (str): the title to display before the list. """ (qry_string, param_str) = qry_create(options) qry_results = awsc.get_inst_info(qry_string) i_info = process_results(qry_results) return (i_info, param_str) def process_results(qry_results): """Generate dictionary of results from query. Decodes the large dict recturned from the AWS query. Args: qry_results (dict): results from awsc.get_inst_info Returns: i_info (dict): information on instances and details. """ i_info = {} for i, j in enumerate(qry_results['Reservations']): i_info[i] = {'id': j['Instances'][0]['InstanceId']} i_info[i]['state'] = j['Instances'][0]['State']['Name'] i_info[i]['ami'] = j['Instances'][0]['ImageId'] i_info[i]['ssh_key'] = j['Instances'][0]['KeyName'] i_info[i]['pub_dns_name'] = j['Instances'][0]['PublicDnsName'] try: i_info[i]['tag'] = process_tags(j['Instances'][0]['Tags']) except KeyError: i_info[i]['tag'] = {"Name": ""} debg.dprint("numInstances: ", len(i_info)) debg.dprintx("Details except AMI-name") debg.dprintx(i_info, True) return i_info def process_tags(inst_tags): """Create dict of instance tags as only name:value pairs.""" tag_dict = {} for k in range(len(inst_tags)): tag_dict[inst_tags[k]['Key']] = inst_tags[k]['Value'] return tag_dict def qry_create(options): """Create query from the args specified and command chosen. Creates a query string that incorporates the args in the options object, and creates the title for the 'list' function. Args: options (object): contains args and data from parser Returns: qry_string (str): the query to be used against the aws ec2 client. param_str (str): the title to display before the list. """ qry_string = filt_end = param_str = "" filt_st = "Filters=[" param_str_default = "All" if options.id: qry_string += "InstanceIds=['%s']" % (options.id) param_str += "id: '%s'" % (options.id) param_str_default = "" if options.instname: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str) filt_end = "]" param_str_default = "" qry_string += filt_st + ("{'Name': 'tag:Name', 'Values': ['%s']}" % (options.instname)) param_str += "name: '%s'" % (options.instname) if options.inst_state: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str, bool(options.instname), filt_st) qry_string += ("{'Name': 'instance-state-name'," "'Values': ['%s']}" % (options.inst_state)) param_str += "state: '%s'" % (options.inst_state) filt_end = "]" param_str_default = "" qry_string += filt_end param_str += param_str_default debg.dprintx("\nQuery String") debg.dprintx(qry_string, True) debg.dprint("param_str: ", param_str) return(qry_string, param_str) def qry_helper(flag_id, qry_string, param_str, flag_filt=False, filt_st=""): """Dynamically add syntaxtical elements to query. This functions adds syntactical elements to the query string, and report title, based on the types and number of items added thus far. Args: flag_filt (bool): at least one filter item specified. qry_string (str): portion of the query constructed thus far. param_str (str): the title to display before the list. flag_id (bool): optional - instance-id was specified. filt_st (str): optional - syntax to add on end if filter specified. Returns: qry_string (str): the portion of the query that was passed in with the appropriate syntactical elements added. param_str (str): the title to display before the list. """ if flag_id or flag_filt: qry_string += ", " param_str += ", " if not flag_filt: qry_string += filt_st return (qry_string, param_str) def list_instances(i_info, param_str, numbered=False): """Display a list of all instances and their details. Iterates through all the instances in the dict, and displays information for each instance. Args: i_info (dict): information on instances and details. param_str (str): the title to display before the list. numbered (bool): optional - indicates wheter the list should be displayed with numbers before each instance. This is used when called from user_picklist. """ print(param_str) for i in i_info: if numbered: print("Instance {}#{}{}".format(C_WARN, i + 1, C_NORM)) print(" {6}Name: {1}{3:<22}{1}ID: {0}{4:<20}{1:<18}Status: {2}{5}{1}". format(C_TI, C_NORM, C_STAT[i_info[i]['state']], i_info[i]['tag']['Name'], i_info[i]['id'], i_info[i]['state'], C_HEAD2)) print(" AMI: {0}{2:<23}{1}AMI Name: {0}{3:.41}{1}". format(C_TI, C_NORM, i_info[i]['ami'], i_info[i]['aminame'])) list_tags(i_info[i]['tag']) debg.dprintx("All Data") debg.dprintx(i_info, True) def list_tags(tags): """Print tags in dict so they allign with listing above.""" tags_sorted = sorted(list(tags.items()), key=operator.itemgetter(0)) tag_sec_spacer = "" c = 1 ignored_keys = ["Name", "aws:ec2spot:fleet-request-id"] pad_col = {1: 38, 2: 49} for k, v in tags_sorted: # if k != "Name": if k not in ignored_keys: if c < 3: padamt = pad_col[c] sys.stdout.write(" {2}{0}:{3} {1}". format(k, v, C_HEAD2, C_NORM).ljust(padamt)) c += 1 tag_sec_spacer = "\n" else: sys.stdout.write("{2}{0}:{3} {1}\n".format(k, v, C_HEAD2, C_NORM)) c = 1 tag_sec_spacer = "" print(tag_sec_spacer) def determine_inst(i_info, param_str, command): """Determine the instance-id of the target instance. Inspect the number of instance-ids collected and take the appropriate action: exit if no ids, return if single id, and call user_picklist function if multiple ids exist. Args: i_info (dict): information and details for instances. param_str (str): the title to display in the listing. command (str): command specified on the command line. Returns: tar_inst (str): the AWS instance-id of the target. Raises: SystemExit: if no instances are match parameters specified. """ qty_instances = len(i_info) if not qty_instances: print("No instances found with parameters: {}".format(param_str)) sys.exit(1) if qty_instances > 1: print("{} instances match these parameters:".format(qty_instances)) tar_idx = user_picklist(i_info, command) else: tar_idx = 0 tar_inst = i_info[tar_idx]['id'] print("{0}{3}ing{1} instance id {2}{4}{1}". format(C_STAT[command], C_NORM, C_TI, command, tar_inst)) return (tar_inst, tar_idx) def obtain_input(message_text): # pragma: no cover """Perform input command as a function so it can be mocked.""" return (input(message_text)) def user_entry(entry_int, num_inst, command): """Validate user entry and returns index and validity flag. Processes the user entry and take the appropriate action: abort if '0' entered, set validity flag and index is valid entry, else return invalid index and the still unset validity flag. Args: entry_int (int): a number entered or 999 if a non-int was entered. num_inst (int): the largest valid number that can be entered. command (str): program command to display in prompt. Returns: entry_idx(int): the dictionary index number of the targeted instance valid_entry (bool): specifies if entry_idx is valid. Raises: SystemExit: if the user enters 0 when they are choosing from the list it triggers the "abort" option offered to the user. """ valid_entry = False if not entry_int: print("{}aborting{} - {} instance\n". format(C_ERR, C_NORM, command)) sys.exit() elif entry_int >= 1 and entry_int <= num_inst: entry_idx = entry_int - 1 valid_entry = True else: print("{}Invalid entry:{} enter a number between 1" " and {}.".format(C_ERR, C_NORM, num_inst)) entry_idx = entry_int return (entry_idx, valid_entry) if __name__ == '__main__': main()
robertpeteuil/aws-shortcuts
awss/core.py
user_entry
python
def user_entry(entry_int, num_inst, command): valid_entry = False if not entry_int: print("{}aborting{} - {} instance\n". format(C_ERR, C_NORM, command)) sys.exit() elif entry_int >= 1 and entry_int <= num_inst: entry_idx = entry_int - 1 valid_entry = True else: print("{}Invalid entry:{} enter a number between 1" " and {}.".format(C_ERR, C_NORM, num_inst)) entry_idx = entry_int return (entry_idx, valid_entry)
Validate user entry and returns index and validity flag. Processes the user entry and take the appropriate action: abort if '0' entered, set validity flag and index is valid entry, else return invalid index and the still unset validity flag. Args: entry_int (int): a number entered or 999 if a non-int was entered. num_inst (int): the largest valid number that can be entered. command (str): program command to display in prompt. Returns: entry_idx(int): the dictionary index number of the targeted instance valid_entry (bool): specifies if entry_idx is valid. Raises: SystemExit: if the user enters 0 when they are choosing from the list it triggers the "abort" option offered to the user.
train
https://github.com/robertpeteuil/aws-shortcuts/blob/cf453ca996978a4d88015d1cf6125bce8ca4873b/awss/core.py#L526-L557
null
"""Control and connect to AWS EC2 instances from command line. The AWS Shortcuts (awss) library is a CLI utility allowing listing, starting, stopping and connecting to AWS EC2 instances by Name or ID. License: AWSS - Control and connect to AWS EC2 instances from command line Copyright (C) 2017-2018 Robert Peteuil This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. URL: https://github.com/robertpeteuil/aws-shortcuts Author: Robert Peteuil """ from __future__ import print_function from builtins import input from builtins import range import argparse import sys import operator import awss.awsc as awsc import awss.debg as debg from awss.colors import C_NORM, C_HEAD2, C_TI, C_WARN, C_ERR, C_STAT __version__ = '0.9.13' def main(): """Collect user args and call command funct. Collect command line args and setup environment then call function for command specified in args. """ parser = parser_setup() options = parser.parse_args() debug = bool(options.debug > 0) debugall = bool(options.debug > 1) awsc.init() debg.init(debug, debugall) print(C_NORM) options.func(options) sys.exit() def parser_setup(): """Create ArgumentParser object to parse command line arguments. Returns: parser (object): containing ArgumentParser data and methods. Raises: SystemExit: if the user enters invalid args. """ parser = argparse.ArgumentParser(description="Control AWS instances from" " the command line with: list, start," " stop or ssh.", prog='awss', usage="\tawss {command} [ 'NAME' ] " "[ '-i ID' ] [ OPTIONS ]\n\t{command} =" " list | start | stop | ssh") parser.add_argument('-v', '--version', action="version", version="awss {0}".format(__version__)) subparsers = parser.add_subparsers(title="For additional help on" " command parameters", dest='command', description="type 'awss {command} -h'," " where {command} is: list, start," " stop or ssh") # Parser for LIST command parser_list = subparsers.add_parser('list', description="List AWS " "instances from the command line. " "'awss list' will list instances" " specified using combinations of " "NAME, instance-id and current-state." " If no specifications are given, " " all instances will be listed." " ex: 'awss list TEST " "-r' will list instances named 'TEST'" " that are currently running.", usage="\tawss list [none] [NAME] [-i " "ID] [-r] [-s] [OPTIONS]") parser_list.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_list.add_argument('-i', '--id', action="store", help='specify instance by id') parser_list.add_argument('-s', '--stopped', action='store_const', dest="inst_state", const="stopped", help='list stopped instances') parser_list.add_argument('-r', '--running', action='store_const', dest="inst_state", const="running", help='list running instances') parser_list.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_list.set_defaults(func=cmd_list) # Parser for START command parser_start = subparsers.add_parser('start', usage="\tawss start [NAME]" " [-i ID] [-h]", description="Start an AWS instance" " from the command line.") parser_start.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_start.add_argument('-i', '--id', action="store", help='specify instance-id') parser_start.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_start.set_defaults(func=cmd_startstop) # Parser for STOP command parser_stop = subparsers.add_parser('stop', usage="\tawss stop [NAME]" " [-i ID] [-h]", description="Stop an AWS instance" " from the command line.") parser_stop.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_stop.add_argument('-i', '--id', action="store", help='specify instance-id') parser_stop.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_stop.set_defaults(func=cmd_startstop) # Parser for SSH command parser_ssh = subparsers.add_parser('ssh', usage="\tawss ssh [NAME]" " [-i ID] [-u USER] [-p] [-h]", description="Connect to an AWS i" "nstance via ssh.") parser_ssh.add_argument('instname', nargs='?', metavar='NAME', help='specify instance by name') parser_ssh.add_argument('-i', '--id', action="store", help='specify instance-id') parser_ssh.add_argument('-u', '--user', action="store", help='override default username for ssh') parser_ssh.add_argument('-p', '--nopem', action="store_true", default=False, help='connect without PEM key') parser_ssh.add_argument('-d', '--debug', action="count", default=0, help=argparse.SUPPRESS) parser_ssh.set_defaults(func=cmd_ssh) return parser def cmd_list(options): """Gather data for instances matching args and call display func. Args: options (object): contains args and data from parser. """ (i_info, param_str) = gather_data(options) if i_info: awsc.get_all_aminames(i_info) param_str = "Instance List - " + param_str + "\n" list_instances(i_info, param_str) else: print("No instances found with parameters: {}".format(param_str)) def cmd_startstop(options): """Start or Stop the specified instance. Finds instances that match args and instance-state expected by the command. Then, the target instance is determined, the action is performed on the instance, and the eturn information is displayed. Args: options (object): contains args and data from parser. """ statelu = {"start": "stopped", "stop": "running"} options.inst_state = statelu[options.command] debg.dprint("toggle set state: ", options.inst_state) (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) response = awsc.startstop(tar_inst, options.command) responselu = {"start": "StartingInstances", "stop": "StoppingInstances"} filt = responselu[options.command] resp = {} state_term = ('CurrentState', 'PreviousState') for i, j in enumerate(state_term): resp[i] = response["{0}".format(filt)][0]["{0}".format(j)]['Name'] print("Current State: {}{}{} - Previous State: {}{}{}\n". format(C_STAT[resp[0]], resp[0], C_NORM, C_STAT[resp[1]], resp[1], C_NORM)) def cmd_ssh(options): """Connect to the specified instance via ssh. Finds instances that match the user specified args that are also in the 'running' state. The target instance is determined, the required connection information is retreived (IP, key and ssh user-name), then an 'ssh' connection is made to the instance. Args: options (object): contains args and data from parser """ import os import subprocess from os.path import expanduser options.inst_state = "running" (i_info, param_str) = gather_data(options) (tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command) home_dir = expanduser("~") if options.user is None: tar_aminame = awsc.get_one_aminame(i_info[tar_idx]['ami']) options.user = cmd_ssh_user(tar_aminame, i_info[tar_idx]['tag']['Name']) else: debg.dprint("LoginUser set by user: ", options.user) os_spec = {"nt": ["powershell plink", "\\", "ppk"]} c_itm = os_spec.get(os.name, ["ssh", "/", "pem"]) cmd_ssh_run = c_itm[0] if not options.nopem: cmd_ssh_run += (" -i {0}{1}.aws{1}{2}.{3}". format(home_dir, c_itm[1], i_info[tar_idx]['ssh_key'], c_itm[2])) else: debg.dprint("Connect string: ", "ssh {}@{}". format(options.user, i_info[tar_idx]['pub_dns_name'])) cmd_ssh_run += " {0}@{1}".format(options.user, i_info[tar_idx]['pub_dns_name']) print(cmd_ssh_run) subprocess.call(cmd_ssh_run, shell=True) def cmd_ssh_user(tar_aminame, inst_name): """Calculate instance login-username based on image-name. Args: tar_aminame (str): name of the image instance created with. inst_name (str): name of the instance. Returns: username (str): name for ssh based on AMI-name. """ if tar_aminame == "Unknown": tar_aminame = inst_name # first 5 chars of AMI-name can be anywhere in AMI-Name userlu = {"ubunt": "ubuntu", "debia": "admin", "fedor": "root", "cento": "centos", "openb": "root"} usertemp = ['name'] + [value for key, value in list(userlu.items()) if key in tar_aminame.lower()] usertemp = dict(zip(usertemp[::2], usertemp[1::2])) username = usertemp.get('name', 'ec2-user') debg.dprint("loginuser Calculated: ", username) return username def gather_data(options): """Get Data specific for command selected. Create ec2 specific query and output title based on options specified, retrieves the raw response data from aws, then processes it into the i_info dict, which is used throughout this module. Args: options (object): contains args and data from parser, that has been adjusted by the command specific functions as appropriate. Returns: i_info (dict): information on instances and details. param_str (str): the title to display before the list. """ (qry_string, param_str) = qry_create(options) qry_results = awsc.get_inst_info(qry_string) i_info = process_results(qry_results) return (i_info, param_str) def process_results(qry_results): """Generate dictionary of results from query. Decodes the large dict recturned from the AWS query. Args: qry_results (dict): results from awsc.get_inst_info Returns: i_info (dict): information on instances and details. """ i_info = {} for i, j in enumerate(qry_results['Reservations']): i_info[i] = {'id': j['Instances'][0]['InstanceId']} i_info[i]['state'] = j['Instances'][0]['State']['Name'] i_info[i]['ami'] = j['Instances'][0]['ImageId'] i_info[i]['ssh_key'] = j['Instances'][0]['KeyName'] i_info[i]['pub_dns_name'] = j['Instances'][0]['PublicDnsName'] try: i_info[i]['tag'] = process_tags(j['Instances'][0]['Tags']) except KeyError: i_info[i]['tag'] = {"Name": ""} debg.dprint("numInstances: ", len(i_info)) debg.dprintx("Details except AMI-name") debg.dprintx(i_info, True) return i_info def process_tags(inst_tags): """Create dict of instance tags as only name:value pairs.""" tag_dict = {} for k in range(len(inst_tags)): tag_dict[inst_tags[k]['Key']] = inst_tags[k]['Value'] return tag_dict def qry_create(options): """Create query from the args specified and command chosen. Creates a query string that incorporates the args in the options object, and creates the title for the 'list' function. Args: options (object): contains args and data from parser Returns: qry_string (str): the query to be used against the aws ec2 client. param_str (str): the title to display before the list. """ qry_string = filt_end = param_str = "" filt_st = "Filters=[" param_str_default = "All" if options.id: qry_string += "InstanceIds=['%s']" % (options.id) param_str += "id: '%s'" % (options.id) param_str_default = "" if options.instname: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str) filt_end = "]" param_str_default = "" qry_string += filt_st + ("{'Name': 'tag:Name', 'Values': ['%s']}" % (options.instname)) param_str += "name: '%s'" % (options.instname) if options.inst_state: (qry_string, param_str) = qry_helper(bool(options.id), qry_string, param_str, bool(options.instname), filt_st) qry_string += ("{'Name': 'instance-state-name'," "'Values': ['%s']}" % (options.inst_state)) param_str += "state: '%s'" % (options.inst_state) filt_end = "]" param_str_default = "" qry_string += filt_end param_str += param_str_default debg.dprintx("\nQuery String") debg.dprintx(qry_string, True) debg.dprint("param_str: ", param_str) return(qry_string, param_str) def qry_helper(flag_id, qry_string, param_str, flag_filt=False, filt_st=""): """Dynamically add syntaxtical elements to query. This functions adds syntactical elements to the query string, and report title, based on the types and number of items added thus far. Args: flag_filt (bool): at least one filter item specified. qry_string (str): portion of the query constructed thus far. param_str (str): the title to display before the list. flag_id (bool): optional - instance-id was specified. filt_st (str): optional - syntax to add on end if filter specified. Returns: qry_string (str): the portion of the query that was passed in with the appropriate syntactical elements added. param_str (str): the title to display before the list. """ if flag_id or flag_filt: qry_string += ", " param_str += ", " if not flag_filt: qry_string += filt_st return (qry_string, param_str) def list_instances(i_info, param_str, numbered=False): """Display a list of all instances and their details. Iterates through all the instances in the dict, and displays information for each instance. Args: i_info (dict): information on instances and details. param_str (str): the title to display before the list. numbered (bool): optional - indicates wheter the list should be displayed with numbers before each instance. This is used when called from user_picklist. """ print(param_str) for i in i_info: if numbered: print("Instance {}#{}{}".format(C_WARN, i + 1, C_NORM)) print(" {6}Name: {1}{3:<22}{1}ID: {0}{4:<20}{1:<18}Status: {2}{5}{1}". format(C_TI, C_NORM, C_STAT[i_info[i]['state']], i_info[i]['tag']['Name'], i_info[i]['id'], i_info[i]['state'], C_HEAD2)) print(" AMI: {0}{2:<23}{1}AMI Name: {0}{3:.41}{1}". format(C_TI, C_NORM, i_info[i]['ami'], i_info[i]['aminame'])) list_tags(i_info[i]['tag']) debg.dprintx("All Data") debg.dprintx(i_info, True) def list_tags(tags): """Print tags in dict so they allign with listing above.""" tags_sorted = sorted(list(tags.items()), key=operator.itemgetter(0)) tag_sec_spacer = "" c = 1 ignored_keys = ["Name", "aws:ec2spot:fleet-request-id"] pad_col = {1: 38, 2: 49} for k, v in tags_sorted: # if k != "Name": if k not in ignored_keys: if c < 3: padamt = pad_col[c] sys.stdout.write(" {2}{0}:{3} {1}". format(k, v, C_HEAD2, C_NORM).ljust(padamt)) c += 1 tag_sec_spacer = "\n" else: sys.stdout.write("{2}{0}:{3} {1}\n".format(k, v, C_HEAD2, C_NORM)) c = 1 tag_sec_spacer = "" print(tag_sec_spacer) def determine_inst(i_info, param_str, command): """Determine the instance-id of the target instance. Inspect the number of instance-ids collected and take the appropriate action: exit if no ids, return if single id, and call user_picklist function if multiple ids exist. Args: i_info (dict): information and details for instances. param_str (str): the title to display in the listing. command (str): command specified on the command line. Returns: tar_inst (str): the AWS instance-id of the target. Raises: SystemExit: if no instances are match parameters specified. """ qty_instances = len(i_info) if not qty_instances: print("No instances found with parameters: {}".format(param_str)) sys.exit(1) if qty_instances > 1: print("{} instances match these parameters:".format(qty_instances)) tar_idx = user_picklist(i_info, command) else: tar_idx = 0 tar_inst = i_info[tar_idx]['id'] print("{0}{3}ing{1} instance id {2}{4}{1}". format(C_STAT[command], C_NORM, C_TI, command, tar_inst)) return (tar_inst, tar_idx) def user_picklist(i_info, command): """Display list of instances matching args and ask user to select target. Instance list displayed and user asked to enter the number corresponding to the desired target instance, or '0' to abort. Args: i_info (dict): information on instances and details. command (str): command specified on the command line. Returns: tar_idx (int): the dictionary index number of the targeted instance. """ valid_entry = False awsc.get_all_aminames(i_info) list_instances(i_info, "", True) msg_txt = ("Enter {0}#{1} of instance to {3} ({0}1{1}-{0}{4}{1})" " [{2}0 aborts{1}]: ".format(C_WARN, C_NORM, C_TI, command, len(i_info))) while not valid_entry: entry_raw = obtain_input(msg_txt) try: entry_int = int(entry_raw) except ValueError: entry_int = 999 (tar_idx, valid_entry) = user_entry(entry_int, len(i_info), command) return tar_idx def obtain_input(message_text): # pragma: no cover """Perform input command as a function so it can be mocked.""" return (input(message_text)) if __name__ == '__main__': main()
5monkeys/content-io
cio/backends/base.py
CacheBackend.get
python
def get(self, uri): cache_key = self._build_cache_key(uri) value = self._get(cache_key) if value is not None: return self._decode_node(uri, value)
Return node for uri or None if not exists: {uri: x, content: y}
train
https://github.com/5monkeys/content-io/blob/8c8519c74cbadab871f7151c0e02252cb5753759/cio/backends/base.py#L26-L34
[ "def _build_cache_key(self, uri):\n \"\"\"\n Build sha1 hex cache key to handle key length and whitespace to be compatible with Memcached\n \"\"\"\n key = uri.clone(ext=None, version=None)\n\n if six.PY3:\n key = key.encode('utf-8')\n\n return sha1(key).hexdigest()\n", "def _get(self, key...
class CacheBackend(BaseBackend): NONE = '__None__' def get_many(self, uris): """ Return request uri map of found nodes as dicts: {requested_uri: {uri: x, content: y}} """ cache_keys = dict((self._build_cache_key(uri), uri) for uri in uris) result = self._get_many(cache_keys) nodes = {} for cache_key in result: uri = cache_keys[cache_key] value = result[cache_key] node = self._decode_node(uri, value) if node: nodes[uri] = node return nodes def set(self, uri, content): """ Cache node content for uri. No return. """ key, value = self._prepare_node(uri, content) self._set(key, value) def set_many(self, nodes): """ Takes nodes dict {uri: content, ...} as argument. No return. """ data = self._prepare_nodes(nodes) self._set_many(data) def delete(self, uri): """ Remove node uri from cache. No return. """ cache_key = self._build_cache_key(uri) self._delete(cache_key) def delete_many(self, uris): """ Remove many nodes from cache. No return. """ cache_keys = (self._build_cache_key(uri) for uri in uris) self._delete_many(cache_keys) def clear(self): """ Removes all nodes from cache """ raise NotImplementedError # pragma: no cover def _build_cache_key(self, uri): """ Build sha1 hex cache key to handle key length and whitespace to be compatible with Memcached """ key = uri.clone(ext=None, version=None) if six.PY3: key = key.encode('utf-8') return sha1(key).hexdigest() def _get(self, key): raise NotImplementedError # pragma: no cover def _get_many(self, keys): raise NotImplementedError # pragma: no cover def _set(self, key, value): raise NotImplementedError # pragma: no cover def _set_many(self, data): raise NotImplementedError # pragma: no cover def _delete(self, key): raise NotImplementedError # pragma: no cover def _delete_many(self, keys): raise NotImplementedError # pragma: no cover def _encode_content(self, uri, content): """ Encode/pack node uri and content in a way that the cache backend are able to persist. """ return uri, content def _decode_content(self, content): """ Decode/unpack cached node to uri and unicode content. """ uri, content = content return uri, content def _decode_node(self, uri, content): _uri, _content = self._decode_content(content) if _uri: _uri = URI(_uri) if uri.ext in (None, _uri.ext): # Validate plugin return { 'uri': _uri, 'content': _content } def _prepare_node(self, uri, content): key = self._build_cache_key(uri) value = self._encode_content(uri, content) return key, value def _prepare_nodes(self, nodes): return dict(self._prepare_node(uri, content) for uri, content in six.iteritems(nodes))
5monkeys/content-io
cio/backends/base.py
CacheBackend.get_many
python
def get_many(self, uris): cache_keys = dict((self._build_cache_key(uri), uri) for uri in uris) result = self._get_many(cache_keys) nodes = {} for cache_key in result: uri = cache_keys[cache_key] value = result[cache_key] node = self._decode_node(uri, value) if node: nodes[uri] = node return nodes
Return request uri map of found nodes as dicts: {requested_uri: {uri: x, content: y}}
train
https://github.com/5monkeys/content-io/blob/8c8519c74cbadab871f7151c0e02252cb5753759/cio/backends/base.py#L36-L50
[ "def _get_many(self, keys):\n raise NotImplementedError # pragma: no cover\n", "def _decode_node(self, uri, content):\n _uri, _content = self._decode_content(content)\n if _uri:\n _uri = URI(_uri)\n if uri.ext in (None, _uri.ext): # Validate plugin\n return {\n '...
class CacheBackend(BaseBackend): NONE = '__None__' def get(self, uri): """ Return node for uri or None if not exists: {uri: x, content: y} """ cache_key = self._build_cache_key(uri) value = self._get(cache_key) if value is not None: return self._decode_node(uri, value) def set(self, uri, content): """ Cache node content for uri. No return. """ key, value = self._prepare_node(uri, content) self._set(key, value) def set_many(self, nodes): """ Takes nodes dict {uri: content, ...} as argument. No return. """ data = self._prepare_nodes(nodes) self._set_many(data) def delete(self, uri): """ Remove node uri from cache. No return. """ cache_key = self._build_cache_key(uri) self._delete(cache_key) def delete_many(self, uris): """ Remove many nodes from cache. No return. """ cache_keys = (self._build_cache_key(uri) for uri in uris) self._delete_many(cache_keys) def clear(self): """ Removes all nodes from cache """ raise NotImplementedError # pragma: no cover def _build_cache_key(self, uri): """ Build sha1 hex cache key to handle key length and whitespace to be compatible with Memcached """ key = uri.clone(ext=None, version=None) if six.PY3: key = key.encode('utf-8') return sha1(key).hexdigest() def _get(self, key): raise NotImplementedError # pragma: no cover def _get_many(self, keys): raise NotImplementedError # pragma: no cover def _set(self, key, value): raise NotImplementedError # pragma: no cover def _set_many(self, data): raise NotImplementedError # pragma: no cover def _delete(self, key): raise NotImplementedError # pragma: no cover def _delete_many(self, keys): raise NotImplementedError # pragma: no cover def _encode_content(self, uri, content): """ Encode/pack node uri and content in a way that the cache backend are able to persist. """ return uri, content def _decode_content(self, content): """ Decode/unpack cached node to uri and unicode content. """ uri, content = content return uri, content def _decode_node(self, uri, content): _uri, _content = self._decode_content(content) if _uri: _uri = URI(_uri) if uri.ext in (None, _uri.ext): # Validate plugin return { 'uri': _uri, 'content': _content } def _prepare_node(self, uri, content): key = self._build_cache_key(uri) value = self._encode_content(uri, content) return key, value def _prepare_nodes(self, nodes): return dict(self._prepare_node(uri, content) for uri, content in six.iteritems(nodes))
5monkeys/content-io
cio/backends/base.py
CacheBackend.set
python
def set(self, uri, content): key, value = self._prepare_node(uri, content) self._set(key, value)
Cache node content for uri. No return.
train
https://github.com/5monkeys/content-io/blob/8c8519c74cbadab871f7151c0e02252cb5753759/cio/backends/base.py#L52-L58
[ "def _set(self, key, value):\n raise NotImplementedError # pragma: no cover\n", "def _prepare_node(self, uri, content):\n key = self._build_cache_key(uri)\n value = self._encode_content(uri, content)\n return key, value\n" ]
class CacheBackend(BaseBackend): NONE = '__None__' def get(self, uri): """ Return node for uri or None if not exists: {uri: x, content: y} """ cache_key = self._build_cache_key(uri) value = self._get(cache_key) if value is not None: return self._decode_node(uri, value) def get_many(self, uris): """ Return request uri map of found nodes as dicts: {requested_uri: {uri: x, content: y}} """ cache_keys = dict((self._build_cache_key(uri), uri) for uri in uris) result = self._get_many(cache_keys) nodes = {} for cache_key in result: uri = cache_keys[cache_key] value = result[cache_key] node = self._decode_node(uri, value) if node: nodes[uri] = node return nodes def set_many(self, nodes): """ Takes nodes dict {uri: content, ...} as argument. No return. """ data = self._prepare_nodes(nodes) self._set_many(data) def delete(self, uri): """ Remove node uri from cache. No return. """ cache_key = self._build_cache_key(uri) self._delete(cache_key) def delete_many(self, uris): """ Remove many nodes from cache. No return. """ cache_keys = (self._build_cache_key(uri) for uri in uris) self._delete_many(cache_keys) def clear(self): """ Removes all nodes from cache """ raise NotImplementedError # pragma: no cover def _build_cache_key(self, uri): """ Build sha1 hex cache key to handle key length and whitespace to be compatible with Memcached """ key = uri.clone(ext=None, version=None) if six.PY3: key = key.encode('utf-8') return sha1(key).hexdigest() def _get(self, key): raise NotImplementedError # pragma: no cover def _get_many(self, keys): raise NotImplementedError # pragma: no cover def _set(self, key, value): raise NotImplementedError # pragma: no cover def _set_many(self, data): raise NotImplementedError # pragma: no cover def _delete(self, key): raise NotImplementedError # pragma: no cover def _delete_many(self, keys): raise NotImplementedError # pragma: no cover def _encode_content(self, uri, content): """ Encode/pack node uri and content in a way that the cache backend are able to persist. """ return uri, content def _decode_content(self, content): """ Decode/unpack cached node to uri and unicode content. """ uri, content = content return uri, content def _decode_node(self, uri, content): _uri, _content = self._decode_content(content) if _uri: _uri = URI(_uri) if uri.ext in (None, _uri.ext): # Validate plugin return { 'uri': _uri, 'content': _content } def _prepare_node(self, uri, content): key = self._build_cache_key(uri) value = self._encode_content(uri, content) return key, value def _prepare_nodes(self, nodes): return dict(self._prepare_node(uri, content) for uri, content in six.iteritems(nodes))
5monkeys/content-io
cio/backends/base.py
CacheBackend.set_many
python
def set_many(self, nodes): data = self._prepare_nodes(nodes) self._set_many(data)
Takes nodes dict {uri: content, ...} as argument. No return.
train
https://github.com/5monkeys/content-io/blob/8c8519c74cbadab871f7151c0e02252cb5753759/cio/backends/base.py#L60-L66
[ "def _set_many(self, data):\n raise NotImplementedError # pragma: no cover\n", "def _prepare_nodes(self, nodes):\n return dict(self._prepare_node(uri, content) for uri, content in six.iteritems(nodes))\n" ]
class CacheBackend(BaseBackend): NONE = '__None__' def get(self, uri): """ Return node for uri or None if not exists: {uri: x, content: y} """ cache_key = self._build_cache_key(uri) value = self._get(cache_key) if value is not None: return self._decode_node(uri, value) def get_many(self, uris): """ Return request uri map of found nodes as dicts: {requested_uri: {uri: x, content: y}} """ cache_keys = dict((self._build_cache_key(uri), uri) for uri in uris) result = self._get_many(cache_keys) nodes = {} for cache_key in result: uri = cache_keys[cache_key] value = result[cache_key] node = self._decode_node(uri, value) if node: nodes[uri] = node return nodes def set(self, uri, content): """ Cache node content for uri. No return. """ key, value = self._prepare_node(uri, content) self._set(key, value) def delete(self, uri): """ Remove node uri from cache. No return. """ cache_key = self._build_cache_key(uri) self._delete(cache_key) def delete_many(self, uris): """ Remove many nodes from cache. No return. """ cache_keys = (self._build_cache_key(uri) for uri in uris) self._delete_many(cache_keys) def clear(self): """ Removes all nodes from cache """ raise NotImplementedError # pragma: no cover def _build_cache_key(self, uri): """ Build sha1 hex cache key to handle key length and whitespace to be compatible with Memcached """ key = uri.clone(ext=None, version=None) if six.PY3: key = key.encode('utf-8') return sha1(key).hexdigest() def _get(self, key): raise NotImplementedError # pragma: no cover def _get_many(self, keys): raise NotImplementedError # pragma: no cover def _set(self, key, value): raise NotImplementedError # pragma: no cover def _set_many(self, data): raise NotImplementedError # pragma: no cover def _delete(self, key): raise NotImplementedError # pragma: no cover def _delete_many(self, keys): raise NotImplementedError # pragma: no cover def _encode_content(self, uri, content): """ Encode/pack node uri and content in a way that the cache backend are able to persist. """ return uri, content def _decode_content(self, content): """ Decode/unpack cached node to uri and unicode content. """ uri, content = content return uri, content def _decode_node(self, uri, content): _uri, _content = self._decode_content(content) if _uri: _uri = URI(_uri) if uri.ext in (None, _uri.ext): # Validate plugin return { 'uri': _uri, 'content': _content } def _prepare_node(self, uri, content): key = self._build_cache_key(uri) value = self._encode_content(uri, content) return key, value def _prepare_nodes(self, nodes): return dict(self._prepare_node(uri, content) for uri, content in six.iteritems(nodes))
5monkeys/content-io
cio/backends/base.py
CacheBackend.delete
python
def delete(self, uri): cache_key = self._build_cache_key(uri) self._delete(cache_key)
Remove node uri from cache. No return.
train
https://github.com/5monkeys/content-io/blob/8c8519c74cbadab871f7151c0e02252cb5753759/cio/backends/base.py#L68-L74
[ "def _build_cache_key(self, uri):\n \"\"\"\n Build sha1 hex cache key to handle key length and whitespace to be compatible with Memcached\n \"\"\"\n key = uri.clone(ext=None, version=None)\n\n if six.PY3:\n key = key.encode('utf-8')\n\n return sha1(key).hexdigest()\n", "def _delete(self, ...
class CacheBackend(BaseBackend): NONE = '__None__' def get(self, uri): """ Return node for uri or None if not exists: {uri: x, content: y} """ cache_key = self._build_cache_key(uri) value = self._get(cache_key) if value is not None: return self._decode_node(uri, value) def get_many(self, uris): """ Return request uri map of found nodes as dicts: {requested_uri: {uri: x, content: y}} """ cache_keys = dict((self._build_cache_key(uri), uri) for uri in uris) result = self._get_many(cache_keys) nodes = {} for cache_key in result: uri = cache_keys[cache_key] value = result[cache_key] node = self._decode_node(uri, value) if node: nodes[uri] = node return nodes def set(self, uri, content): """ Cache node content for uri. No return. """ key, value = self._prepare_node(uri, content) self._set(key, value) def set_many(self, nodes): """ Takes nodes dict {uri: content, ...} as argument. No return. """ data = self._prepare_nodes(nodes) self._set_many(data) def delete_many(self, uris): """ Remove many nodes from cache. No return. """ cache_keys = (self._build_cache_key(uri) for uri in uris) self._delete_many(cache_keys) def clear(self): """ Removes all nodes from cache """ raise NotImplementedError # pragma: no cover def _build_cache_key(self, uri): """ Build sha1 hex cache key to handle key length and whitespace to be compatible with Memcached """ key = uri.clone(ext=None, version=None) if six.PY3: key = key.encode('utf-8') return sha1(key).hexdigest() def _get(self, key): raise NotImplementedError # pragma: no cover def _get_many(self, keys): raise NotImplementedError # pragma: no cover def _set(self, key, value): raise NotImplementedError # pragma: no cover def _set_many(self, data): raise NotImplementedError # pragma: no cover def _delete(self, key): raise NotImplementedError # pragma: no cover def _delete_many(self, keys): raise NotImplementedError # pragma: no cover def _encode_content(self, uri, content): """ Encode/pack node uri and content in a way that the cache backend are able to persist. """ return uri, content def _decode_content(self, content): """ Decode/unpack cached node to uri and unicode content. """ uri, content = content return uri, content def _decode_node(self, uri, content): _uri, _content = self._decode_content(content) if _uri: _uri = URI(_uri) if uri.ext in (None, _uri.ext): # Validate plugin return { 'uri': _uri, 'content': _content } def _prepare_node(self, uri, content): key = self._build_cache_key(uri) value = self._encode_content(uri, content) return key, value def _prepare_nodes(self, nodes): return dict(self._prepare_node(uri, content) for uri, content in six.iteritems(nodes))
5monkeys/content-io
cio/backends/base.py
CacheBackend.delete_many
python
def delete_many(self, uris): cache_keys = (self._build_cache_key(uri) for uri in uris) self._delete_many(cache_keys)
Remove many nodes from cache. No return.
train
https://github.com/5monkeys/content-io/blob/8c8519c74cbadab871f7151c0e02252cb5753759/cio/backends/base.py#L76-L82
[ "def _delete_many(self, keys):\n raise NotImplementedError # pragma: no cover\n" ]
class CacheBackend(BaseBackend): NONE = '__None__' def get(self, uri): """ Return node for uri or None if not exists: {uri: x, content: y} """ cache_key = self._build_cache_key(uri) value = self._get(cache_key) if value is not None: return self._decode_node(uri, value) def get_many(self, uris): """ Return request uri map of found nodes as dicts: {requested_uri: {uri: x, content: y}} """ cache_keys = dict((self._build_cache_key(uri), uri) for uri in uris) result = self._get_many(cache_keys) nodes = {} for cache_key in result: uri = cache_keys[cache_key] value = result[cache_key] node = self._decode_node(uri, value) if node: nodes[uri] = node return nodes def set(self, uri, content): """ Cache node content for uri. No return. """ key, value = self._prepare_node(uri, content) self._set(key, value) def set_many(self, nodes): """ Takes nodes dict {uri: content, ...} as argument. No return. """ data = self._prepare_nodes(nodes) self._set_many(data) def delete(self, uri): """ Remove node uri from cache. No return. """ cache_key = self._build_cache_key(uri) self._delete(cache_key) def clear(self): """ Removes all nodes from cache """ raise NotImplementedError # pragma: no cover def _build_cache_key(self, uri): """ Build sha1 hex cache key to handle key length and whitespace to be compatible with Memcached """ key = uri.clone(ext=None, version=None) if six.PY3: key = key.encode('utf-8') return sha1(key).hexdigest() def _get(self, key): raise NotImplementedError # pragma: no cover def _get_many(self, keys): raise NotImplementedError # pragma: no cover def _set(self, key, value): raise NotImplementedError # pragma: no cover def _set_many(self, data): raise NotImplementedError # pragma: no cover def _delete(self, key): raise NotImplementedError # pragma: no cover def _delete_many(self, keys): raise NotImplementedError # pragma: no cover def _encode_content(self, uri, content): """ Encode/pack node uri and content in a way that the cache backend are able to persist. """ return uri, content def _decode_content(self, content): """ Decode/unpack cached node to uri and unicode content. """ uri, content = content return uri, content def _decode_node(self, uri, content): _uri, _content = self._decode_content(content) if _uri: _uri = URI(_uri) if uri.ext in (None, _uri.ext): # Validate plugin return { 'uri': _uri, 'content': _content } def _prepare_node(self, uri, content): key = self._build_cache_key(uri) value = self._encode_content(uri, content) return key, value def _prepare_nodes(self, nodes): return dict(self._prepare_node(uri, content) for uri, content in six.iteritems(nodes))
5monkeys/content-io
cio/backends/base.py
CacheBackend._build_cache_key
python
def _build_cache_key(self, uri): key = uri.clone(ext=None, version=None) if six.PY3: key = key.encode('utf-8') return sha1(key).hexdigest()
Build sha1 hex cache key to handle key length and whitespace to be compatible with Memcached
train
https://github.com/5monkeys/content-io/blob/8c8519c74cbadab871f7151c0e02252cb5753759/cio/backends/base.py#L90-L99
null
class CacheBackend(BaseBackend): NONE = '__None__' def get(self, uri): """ Return node for uri or None if not exists: {uri: x, content: y} """ cache_key = self._build_cache_key(uri) value = self._get(cache_key) if value is not None: return self._decode_node(uri, value) def get_many(self, uris): """ Return request uri map of found nodes as dicts: {requested_uri: {uri: x, content: y}} """ cache_keys = dict((self._build_cache_key(uri), uri) for uri in uris) result = self._get_many(cache_keys) nodes = {} for cache_key in result: uri = cache_keys[cache_key] value = result[cache_key] node = self._decode_node(uri, value) if node: nodes[uri] = node return nodes def set(self, uri, content): """ Cache node content for uri. No return. """ key, value = self._prepare_node(uri, content) self._set(key, value) def set_many(self, nodes): """ Takes nodes dict {uri: content, ...} as argument. No return. """ data = self._prepare_nodes(nodes) self._set_many(data) def delete(self, uri): """ Remove node uri from cache. No return. """ cache_key = self._build_cache_key(uri) self._delete(cache_key) def delete_many(self, uris): """ Remove many nodes from cache. No return. """ cache_keys = (self._build_cache_key(uri) for uri in uris) self._delete_many(cache_keys) def clear(self): """ Removes all nodes from cache """ raise NotImplementedError # pragma: no cover def _get(self, key): raise NotImplementedError # pragma: no cover def _get_many(self, keys): raise NotImplementedError # pragma: no cover def _set(self, key, value): raise NotImplementedError # pragma: no cover def _set_many(self, data): raise NotImplementedError # pragma: no cover def _delete(self, key): raise NotImplementedError # pragma: no cover def _delete_many(self, keys): raise NotImplementedError # pragma: no cover def _encode_content(self, uri, content): """ Encode/pack node uri and content in a way that the cache backend are able to persist. """ return uri, content def _decode_content(self, content): """ Decode/unpack cached node to uri and unicode content. """ uri, content = content return uri, content def _decode_node(self, uri, content): _uri, _content = self._decode_content(content) if _uri: _uri = URI(_uri) if uri.ext in (None, _uri.ext): # Validate plugin return { 'uri': _uri, 'content': _content } def _prepare_node(self, uri, content): key = self._build_cache_key(uri) value = self._encode_content(uri, content) return key, value def _prepare_nodes(self, nodes): return dict(self._prepare_node(uri, content) for uri, content in six.iteritems(nodes))
5monkeys/content-io
cio/backends/base.py
DatabaseBackend.get_many
python
def get_many(self, uris): nodes = {} for uri in uris: try: node = self.get(uri) except NodeDoesNotExist: continue else: nodes[uri] = node return nodes
Simple implementation, could be better implemented by backend not hitting db for every uri.
train
https://github.com/5monkeys/content-io/blob/8c8519c74cbadab871f7151c0e02252cb5753759/cio/backends/base.py#L217-L232
[ "def get(self, uri):\n node = self._get(uri)\n return self._serialize(uri, node)\n" ]
class DatabaseBackend(StorageBackend): def get(self, uri): node = self._get(uri) return self._serialize(uri, node) def set(self, uri, content, **meta): """ Dispatches private update/create handlers """ try: node = self._update(uri, content, **meta) created = False except NodeDoesNotExist: node = self._create(uri, content, **meta) created = True return self._serialize(uri, node), created def delete(self, uri): node = None try: _node = self._get(uri) except NodeDoesNotExist: logger.warn('Tried to delete non existing node from storage: "%s"', uri) else: node = self._serialize(uri, _node) self._delete(_node) return node def delete_many(self, uris): """ Simple implementation, could be better implemented by backend not hitting db for every uri. """ deleted_nodes = {} for uri in uris: node = self.delete(uri) if node: deleted_nodes[uri] = node return deleted_nodes def _get(self, uri): raise NotImplementedError # pragma: no cover def _create(self, uri, content, **meta): raise NotImplementedError # pragma: no cover def _update(self, uri, content, **meta): raise NotImplementedError # pragma: no cover def _delete(self, node): raise NotImplementedError # pragma: no cover def _build_key(self, uri): """ Build node identifying key for base uri. """ return uri.clone(ext=None, version=None) def _serialize(self, uri, node): """ Serialize node result as dict """ meta = self._decode_meta(node['meta'], is_published=bool(node['is_published'])) return { 'uri': uri.clone(ext=node['plugin'], version=node['version']), 'content': node['content'], 'meta': meta } def _decode_meta(self, meta, **extra): """ Decode and load underlying meta structure to dict and apply optional extra values. """ _meta = json.loads(meta) if meta else {} _meta.update(extra) return _meta def _encode_meta(self, meta): """ Encode meta dict for underlying persistence. """ return json.dumps(meta) if meta else None def _merge_meta(self, encoded_meta, meta): """ Merge new meta dict into encoded meta. Returns new encoded meta. """ new_meta = None if meta: _meta = self._decode_meta(encoded_meta) for key, value in six.iteritems(meta): if value is None: _meta.pop(key, None) else: _meta[key] = value new_meta = self._encode_meta(_meta) return new_meta def _get_next_version(self, revisions): """ Calculates new version number based on existing numeric ones. """ versions = [0] for v in revisions: if v.isdigit(): versions.append(int(v)) return six.text_type(sorted(versions)[-1] + 1)
5monkeys/content-io
cio/backends/base.py
DatabaseBackend.set
python
def set(self, uri, content, **meta): try: node = self._update(uri, content, **meta) created = False except NodeDoesNotExist: node = self._create(uri, content, **meta) created = True return self._serialize(uri, node), created
Dispatches private update/create handlers
train
https://github.com/5monkeys/content-io/blob/8c8519c74cbadab871f7151c0e02252cb5753759/cio/backends/base.py#L234-L244
[ "def _create(self, uri, content, **meta):\n raise NotImplementedError # pragma: no cover\n", "def _update(self, uri, content, **meta):\n raise NotImplementedError # pragma: no cover\n", "def _serialize(self, uri, node):\n \"\"\"\n Serialize node result as dict\n \"\"\"\n meta = self._decode_...
class DatabaseBackend(StorageBackend): def get(self, uri): node = self._get(uri) return self._serialize(uri, node) def get_many(self, uris): """ Simple implementation, could be better implemented by backend not hitting db for every uri. """ nodes = {} for uri in uris: try: node = self.get(uri) except NodeDoesNotExist: continue else: nodes[uri] = node return nodes def delete(self, uri): node = None try: _node = self._get(uri) except NodeDoesNotExist: logger.warn('Tried to delete non existing node from storage: "%s"', uri) else: node = self._serialize(uri, _node) self._delete(_node) return node def delete_many(self, uris): """ Simple implementation, could be better implemented by backend not hitting db for every uri. """ deleted_nodes = {} for uri in uris: node = self.delete(uri) if node: deleted_nodes[uri] = node return deleted_nodes def _get(self, uri): raise NotImplementedError # pragma: no cover def _create(self, uri, content, **meta): raise NotImplementedError # pragma: no cover def _update(self, uri, content, **meta): raise NotImplementedError # pragma: no cover def _delete(self, node): raise NotImplementedError # pragma: no cover def _build_key(self, uri): """ Build node identifying key for base uri. """ return uri.clone(ext=None, version=None) def _serialize(self, uri, node): """ Serialize node result as dict """ meta = self._decode_meta(node['meta'], is_published=bool(node['is_published'])) return { 'uri': uri.clone(ext=node['plugin'], version=node['version']), 'content': node['content'], 'meta': meta } def _decode_meta(self, meta, **extra): """ Decode and load underlying meta structure to dict and apply optional extra values. """ _meta = json.loads(meta) if meta else {} _meta.update(extra) return _meta def _encode_meta(self, meta): """ Encode meta dict for underlying persistence. """ return json.dumps(meta) if meta else None def _merge_meta(self, encoded_meta, meta): """ Merge new meta dict into encoded meta. Returns new encoded meta. """ new_meta = None if meta: _meta = self._decode_meta(encoded_meta) for key, value in six.iteritems(meta): if value is None: _meta.pop(key, None) else: _meta[key] = value new_meta = self._encode_meta(_meta) return new_meta def _get_next_version(self, revisions): """ Calculates new version number based on existing numeric ones. """ versions = [0] for v in revisions: if v.isdigit(): versions.append(int(v)) return six.text_type(sorted(versions)[-1] + 1)
5monkeys/content-io
cio/backends/base.py
DatabaseBackend.delete_many
python
def delete_many(self, uris): deleted_nodes = {} for uri in uris: node = self.delete(uri) if node: deleted_nodes[uri] = node return deleted_nodes
Simple implementation, could be better implemented by backend not hitting db for every uri.
train
https://github.com/5monkeys/content-io/blob/8c8519c74cbadab871f7151c0e02252cb5753759/cio/backends/base.py#L257-L269
[ "def delete(self, uri):\n node = None\n try:\n _node = self._get(uri)\n except NodeDoesNotExist:\n logger.warn('Tried to delete non existing node from storage: \"%s\"', uri)\n else:\n node = self._serialize(uri, _node)\n self._delete(_node)\n return node\n" ]
class DatabaseBackend(StorageBackend): def get(self, uri): node = self._get(uri) return self._serialize(uri, node) def get_many(self, uris): """ Simple implementation, could be better implemented by backend not hitting db for every uri. """ nodes = {} for uri in uris: try: node = self.get(uri) except NodeDoesNotExist: continue else: nodes[uri] = node return nodes def set(self, uri, content, **meta): """ Dispatches private update/create handlers """ try: node = self._update(uri, content, **meta) created = False except NodeDoesNotExist: node = self._create(uri, content, **meta) created = True return self._serialize(uri, node), created def delete(self, uri): node = None try: _node = self._get(uri) except NodeDoesNotExist: logger.warn('Tried to delete non existing node from storage: "%s"', uri) else: node = self._serialize(uri, _node) self._delete(_node) return node def _get(self, uri): raise NotImplementedError # pragma: no cover def _create(self, uri, content, **meta): raise NotImplementedError # pragma: no cover def _update(self, uri, content, **meta): raise NotImplementedError # pragma: no cover def _delete(self, node): raise NotImplementedError # pragma: no cover def _build_key(self, uri): """ Build node identifying key for base uri. """ return uri.clone(ext=None, version=None) def _serialize(self, uri, node): """ Serialize node result as dict """ meta = self._decode_meta(node['meta'], is_published=bool(node['is_published'])) return { 'uri': uri.clone(ext=node['plugin'], version=node['version']), 'content': node['content'], 'meta': meta } def _decode_meta(self, meta, **extra): """ Decode and load underlying meta structure to dict and apply optional extra values. """ _meta = json.loads(meta) if meta else {} _meta.update(extra) return _meta def _encode_meta(self, meta): """ Encode meta dict for underlying persistence. """ return json.dumps(meta) if meta else None def _merge_meta(self, encoded_meta, meta): """ Merge new meta dict into encoded meta. Returns new encoded meta. """ new_meta = None if meta: _meta = self._decode_meta(encoded_meta) for key, value in six.iteritems(meta): if value is None: _meta.pop(key, None) else: _meta[key] = value new_meta = self._encode_meta(_meta) return new_meta def _get_next_version(self, revisions): """ Calculates new version number based on existing numeric ones. """ versions = [0] for v in revisions: if v.isdigit(): versions.append(int(v)) return six.text_type(sorted(versions)[-1] + 1)
5monkeys/content-io
cio/backends/base.py
DatabaseBackend._serialize
python
def _serialize(self, uri, node): meta = self._decode_meta(node['meta'], is_published=bool(node['is_published'])) return { 'uri': uri.clone(ext=node['plugin'], version=node['version']), 'content': node['content'], 'meta': meta }
Serialize node result as dict
train
https://github.com/5monkeys/content-io/blob/8c8519c74cbadab871f7151c0e02252cb5753759/cio/backends/base.py#L289-L298
[ "def _decode_meta(self, meta, **extra):\n \"\"\"\n Decode and load underlying meta structure to dict and apply optional extra values.\n \"\"\"\n _meta = json.loads(meta) if meta else {}\n _meta.update(extra)\n return _meta\n" ]
class DatabaseBackend(StorageBackend): def get(self, uri): node = self._get(uri) return self._serialize(uri, node) def get_many(self, uris): """ Simple implementation, could be better implemented by backend not hitting db for every uri. """ nodes = {} for uri in uris: try: node = self.get(uri) except NodeDoesNotExist: continue else: nodes[uri] = node return nodes def set(self, uri, content, **meta): """ Dispatches private update/create handlers """ try: node = self._update(uri, content, **meta) created = False except NodeDoesNotExist: node = self._create(uri, content, **meta) created = True return self._serialize(uri, node), created def delete(self, uri): node = None try: _node = self._get(uri) except NodeDoesNotExist: logger.warn('Tried to delete non existing node from storage: "%s"', uri) else: node = self._serialize(uri, _node) self._delete(_node) return node def delete_many(self, uris): """ Simple implementation, could be better implemented by backend not hitting db for every uri. """ deleted_nodes = {} for uri in uris: node = self.delete(uri) if node: deleted_nodes[uri] = node return deleted_nodes def _get(self, uri): raise NotImplementedError # pragma: no cover def _create(self, uri, content, **meta): raise NotImplementedError # pragma: no cover def _update(self, uri, content, **meta): raise NotImplementedError # pragma: no cover def _delete(self, node): raise NotImplementedError # pragma: no cover def _build_key(self, uri): """ Build node identifying key for base uri. """ return uri.clone(ext=None, version=None) def _decode_meta(self, meta, **extra): """ Decode and load underlying meta structure to dict and apply optional extra values. """ _meta = json.loads(meta) if meta else {} _meta.update(extra) return _meta def _encode_meta(self, meta): """ Encode meta dict for underlying persistence. """ return json.dumps(meta) if meta else None def _merge_meta(self, encoded_meta, meta): """ Merge new meta dict into encoded meta. Returns new encoded meta. """ new_meta = None if meta: _meta = self._decode_meta(encoded_meta) for key, value in six.iteritems(meta): if value is None: _meta.pop(key, None) else: _meta[key] = value new_meta = self._encode_meta(_meta) return new_meta def _get_next_version(self, revisions): """ Calculates new version number based on existing numeric ones. """ versions = [0] for v in revisions: if v.isdigit(): versions.append(int(v)) return six.text_type(sorted(versions)[-1] + 1)
5monkeys/content-io
cio/backends/base.py
DatabaseBackend._decode_meta
python
def _decode_meta(self, meta, **extra): _meta = json.loads(meta) if meta else {} _meta.update(extra) return _meta
Decode and load underlying meta structure to dict and apply optional extra values.
train
https://github.com/5monkeys/content-io/blob/8c8519c74cbadab871f7151c0e02252cb5753759/cio/backends/base.py#L300-L306
null
class DatabaseBackend(StorageBackend): def get(self, uri): node = self._get(uri) return self._serialize(uri, node) def get_many(self, uris): """ Simple implementation, could be better implemented by backend not hitting db for every uri. """ nodes = {} for uri in uris: try: node = self.get(uri) except NodeDoesNotExist: continue else: nodes[uri] = node return nodes def set(self, uri, content, **meta): """ Dispatches private update/create handlers """ try: node = self._update(uri, content, **meta) created = False except NodeDoesNotExist: node = self._create(uri, content, **meta) created = True return self._serialize(uri, node), created def delete(self, uri): node = None try: _node = self._get(uri) except NodeDoesNotExist: logger.warn('Tried to delete non existing node from storage: "%s"', uri) else: node = self._serialize(uri, _node) self._delete(_node) return node def delete_many(self, uris): """ Simple implementation, could be better implemented by backend not hitting db for every uri. """ deleted_nodes = {} for uri in uris: node = self.delete(uri) if node: deleted_nodes[uri] = node return deleted_nodes def _get(self, uri): raise NotImplementedError # pragma: no cover def _create(self, uri, content, **meta): raise NotImplementedError # pragma: no cover def _update(self, uri, content, **meta): raise NotImplementedError # pragma: no cover def _delete(self, node): raise NotImplementedError # pragma: no cover def _build_key(self, uri): """ Build node identifying key for base uri. """ return uri.clone(ext=None, version=None) def _serialize(self, uri, node): """ Serialize node result as dict """ meta = self._decode_meta(node['meta'], is_published=bool(node['is_published'])) return { 'uri': uri.clone(ext=node['plugin'], version=node['version']), 'content': node['content'], 'meta': meta } def _encode_meta(self, meta): """ Encode meta dict for underlying persistence. """ return json.dumps(meta) if meta else None def _merge_meta(self, encoded_meta, meta): """ Merge new meta dict into encoded meta. Returns new encoded meta. """ new_meta = None if meta: _meta = self._decode_meta(encoded_meta) for key, value in six.iteritems(meta): if value is None: _meta.pop(key, None) else: _meta[key] = value new_meta = self._encode_meta(_meta) return new_meta def _get_next_version(self, revisions): """ Calculates new version number based on existing numeric ones. """ versions = [0] for v in revisions: if v.isdigit(): versions.append(int(v)) return six.text_type(sorted(versions)[-1] + 1)
5monkeys/content-io
cio/backends/base.py
DatabaseBackend._merge_meta
python
def _merge_meta(self, encoded_meta, meta): new_meta = None if meta: _meta = self._decode_meta(encoded_meta) for key, value in six.iteritems(meta): if value is None: _meta.pop(key, None) else: _meta[key] = value new_meta = self._encode_meta(_meta) return new_meta
Merge new meta dict into encoded meta. Returns new encoded meta.
train
https://github.com/5monkeys/content-io/blob/8c8519c74cbadab871f7151c0e02252cb5753759/cio/backends/base.py#L314-L329
[ "def _decode_meta(self, meta, **extra):\n \"\"\"\n Decode and load underlying meta structure to dict and apply optional extra values.\n \"\"\"\n _meta = json.loads(meta) if meta else {}\n _meta.update(extra)\n return _meta\n", "def _encode_meta(self, meta):\n \"\"\"\n Encode meta dict for ...
class DatabaseBackend(StorageBackend): def get(self, uri): node = self._get(uri) return self._serialize(uri, node) def get_many(self, uris): """ Simple implementation, could be better implemented by backend not hitting db for every uri. """ nodes = {} for uri in uris: try: node = self.get(uri) except NodeDoesNotExist: continue else: nodes[uri] = node return nodes def set(self, uri, content, **meta): """ Dispatches private update/create handlers """ try: node = self._update(uri, content, **meta) created = False except NodeDoesNotExist: node = self._create(uri, content, **meta) created = True return self._serialize(uri, node), created def delete(self, uri): node = None try: _node = self._get(uri) except NodeDoesNotExist: logger.warn('Tried to delete non existing node from storage: "%s"', uri) else: node = self._serialize(uri, _node) self._delete(_node) return node def delete_many(self, uris): """ Simple implementation, could be better implemented by backend not hitting db for every uri. """ deleted_nodes = {} for uri in uris: node = self.delete(uri) if node: deleted_nodes[uri] = node return deleted_nodes def _get(self, uri): raise NotImplementedError # pragma: no cover def _create(self, uri, content, **meta): raise NotImplementedError # pragma: no cover def _update(self, uri, content, **meta): raise NotImplementedError # pragma: no cover def _delete(self, node): raise NotImplementedError # pragma: no cover def _build_key(self, uri): """ Build node identifying key for base uri. """ return uri.clone(ext=None, version=None) def _serialize(self, uri, node): """ Serialize node result as dict """ meta = self._decode_meta(node['meta'], is_published=bool(node['is_published'])) return { 'uri': uri.clone(ext=node['plugin'], version=node['version']), 'content': node['content'], 'meta': meta } def _decode_meta(self, meta, **extra): """ Decode and load underlying meta structure to dict and apply optional extra values. """ _meta = json.loads(meta) if meta else {} _meta.update(extra) return _meta def _encode_meta(self, meta): """ Encode meta dict for underlying persistence. """ return json.dumps(meta) if meta else None def _get_next_version(self, revisions): """ Calculates new version number based on existing numeric ones. """ versions = [0] for v in revisions: if v.isdigit(): versions.append(int(v)) return six.text_type(sorted(versions)[-1] + 1)
5monkeys/content-io
cio/backends/base.py
DatabaseBackend._get_next_version
python
def _get_next_version(self, revisions): versions = [0] for v in revisions: if v.isdigit(): versions.append(int(v)) return six.text_type(sorted(versions)[-1] + 1)
Calculates new version number based on existing numeric ones.
train
https://github.com/5monkeys/content-io/blob/8c8519c74cbadab871f7151c0e02252cb5753759/cio/backends/base.py#L331-L339
null
class DatabaseBackend(StorageBackend): def get(self, uri): node = self._get(uri) return self._serialize(uri, node) def get_many(self, uris): """ Simple implementation, could be better implemented by backend not hitting db for every uri. """ nodes = {} for uri in uris: try: node = self.get(uri) except NodeDoesNotExist: continue else: nodes[uri] = node return nodes def set(self, uri, content, **meta): """ Dispatches private update/create handlers """ try: node = self._update(uri, content, **meta) created = False except NodeDoesNotExist: node = self._create(uri, content, **meta) created = True return self._serialize(uri, node), created def delete(self, uri): node = None try: _node = self._get(uri) except NodeDoesNotExist: logger.warn('Tried to delete non existing node from storage: "%s"', uri) else: node = self._serialize(uri, _node) self._delete(_node) return node def delete_many(self, uris): """ Simple implementation, could be better implemented by backend not hitting db for every uri. """ deleted_nodes = {} for uri in uris: node = self.delete(uri) if node: deleted_nodes[uri] = node return deleted_nodes def _get(self, uri): raise NotImplementedError # pragma: no cover def _create(self, uri, content, **meta): raise NotImplementedError # pragma: no cover def _update(self, uri, content, **meta): raise NotImplementedError # pragma: no cover def _delete(self, node): raise NotImplementedError # pragma: no cover def _build_key(self, uri): """ Build node identifying key for base uri. """ return uri.clone(ext=None, version=None) def _serialize(self, uri, node): """ Serialize node result as dict """ meta = self._decode_meta(node['meta'], is_published=bool(node['is_published'])) return { 'uri': uri.clone(ext=node['plugin'], version=node['version']), 'content': node['content'], 'meta': meta } def _decode_meta(self, meta, **extra): """ Decode and load underlying meta structure to dict and apply optional extra values. """ _meta = json.loads(meta) if meta else {} _meta.update(extra) return _meta def _encode_meta(self, meta): """ Encode meta dict for underlying persistence. """ return json.dumps(meta) if meta else None def _merge_meta(self, encoded_meta, meta): """ Merge new meta dict into encoded meta. Returns new encoded meta. """ new_meta = None if meta: _meta = self._decode_meta(encoded_meta) for key, value in six.iteritems(meta): if value is None: _meta.pop(key, None) else: _meta[key] = value new_meta = self._encode_meta(_meta) return new_meta
5monkeys/content-io
cio/pipeline/pipes/base.py
BasePipe.materialize_node
python
def materialize_node(self, node, uri, content, meta=None): node.uri = uri node.content = content node.meta = meta if meta is not None else {}
Set node uri and content from backend
train
https://github.com/5monkeys/content-io/blob/8c8519c74cbadab871f7151c0e02252cb5753759/cio/pipeline/pipes/base.py#L34-L40
null
class BasePipe(object): """ Optional implementable pipe methods: def get_request(self, request): pass def get_response(self, response): return response def set_request(self, request): pass def set_response(self, response): return response def delete_request(self, request): pass def delete_response(self, response): return response def publish_request(self, request): pass def publish_response(self, response): return response """
5monkeys/content-io
cio/node.py
Node.namespace_uri
python
def namespace_uri(self): try: return next( iter(filter(lambda uri: URI(uri).namespace, self._uri)) ) except StopIteration: return None
Finds and returns first applied URI of this node that has a namespace. :return str: uri
train
https://github.com/5monkeys/content-io/blob/8c8519c74cbadab871f7151c0e02252cb5753759/cio/node.py#L70-L81
null
class Node(object): _formatter = ContentFormatter() def __init__(self, uri, content=None, **meta): self.env = env.state self._uri = [uri, URI(uri)] self._content = [content] self.meta = meta def __repr__(self): return '<Node: %s>' % self.uri def __bytes__(self): content = self.render() if isinstance(content, six.text_type): content = content.encode('utf-8') return content or b'' def __unicode__(self): return self.render() or '' __str__ = __bytes__ if six.PY2 else __unicode__ def render(self, **context): if self.content is not None: if context: return self._formatter.format(self.content, **context) else: return self.content def get_uri(self): return self._uri[-1] def set_uri(self, uri): if uri != self.get_uri(): self._uri.append(URI(uri)) uri = property(get_uri, set_uri) def get_content(self): return self._content[-1] def set_content(self, content): if content != self.get_content(): self._content.append(content) content = property(get_content, set_content) @property def initial(self): return self._content[0] @property def initial_uri(self): return self._uri[0] @property def for_json(self): return { 'uri': six.text_type(self.uri), 'content': self.content, 'meta': self.meta if self.meta is not None else {} }
5monkeys/content-io
cio/utils/formatters.py
ContentFormatter._brace_key
python
def _brace_key(self, key): if isinstance(key, six.integer_types): t = str key = t(key) else: t = type(key) return t(u'{') + key + t(u'}')
key: 'x' -> '{x}'
train
https://github.com/5monkeys/content-io/blob/8c8519c74cbadab871f7151c0e02252cb5753759/cio/utils/formatters.py#L52-L61
null
class ContentFormatter(Formatter): """ ContentFormatter uses string formatting as a template engine, not raising key/index/value errors, and keeps braces and variable-like parts in place. """ def get_value(self, key, args, kwargs): try: return super(ContentFormatter, self).get_value(key, args, kwargs) except (IndexError, KeyError): if (PY26 or six.PY3) and key == u'\0': # PY26: Handle of non-indexed variable -> Turn null byte into {} return type(key)(u'{}') else: # PY27: Not a context variable -> Keep braces return self._brace_key(key) def convert_field(self, value, conversion): if conversion and isinstance(value, six.string_types) and value[0] == u'{' and value[-1] == u'}': # Value is wrapped with braces and therefore not a context variable -> Keep conversion as value return self._inject_conversion(value, conversion) else: return super(ContentFormatter, self).convert_field(value, conversion) def format_field(self, value, format_spec): try: return super(ContentFormatter, self).format_field(value, format_spec) except ValueError: # Unable to format value and therefore not a context variable -> Keep format_spec as value return self._inject_format_spec(value, format_spec) def parse(self, format_string): if PY26 or six.PY3: # PY26 does not support non-indexed variables -> Place null byte for later removal # PY3 does not like mixing non-indexed and indexed variables to we disable them here too. format_string = format_string.replace('{}', '{\0}') parsed_bits = super(ContentFormatter, self).parse(format_string) # Double braces are treated as escaped -> re-duplicate when parsed return self._escape(parsed_bits) def get_field(self, field_name, args, kwargs): return super(ContentFormatter, self).get_field(field_name, args, kwargs) def _inject_conversion(self, value, conversion): """ value: '{x}', conversion: 's' -> '{x!s}' """ t = type(value) return value[:-1] + t(u'!') + conversion + t(u'}') def _inject_format_spec(self, value, format_spec): """ value: '{x}', format_spec: 'f' -> '{x:f}' """ t = type(value) return value[:-1] + t(u':') + format_spec + t(u'}') def _escape(self, bits): """ value: 'foobar {' -> 'foobar {{' value: 'x}' -> 'x}}' """ # for value, field_name, format_spec, conversion in bits: while True: try: value, field_name, format_spec, conversion = next(bits) if value: end = value[-1] if end in (u'{', u'}'): value += end yield value, field_name, format_spec, conversion except StopIteration: break
5monkeys/content-io
cio/utils/formatters.py
ContentFormatter._inject_conversion
python
def _inject_conversion(self, value, conversion): t = type(value) return value[:-1] + t(u'!') + conversion + t(u'}')
value: '{x}', conversion: 's' -> '{x!s}'
train
https://github.com/5monkeys/content-io/blob/8c8519c74cbadab871f7151c0e02252cb5753759/cio/utils/formatters.py#L63-L68
null
class ContentFormatter(Formatter): """ ContentFormatter uses string formatting as a template engine, not raising key/index/value errors, and keeps braces and variable-like parts in place. """ def get_value(self, key, args, kwargs): try: return super(ContentFormatter, self).get_value(key, args, kwargs) except (IndexError, KeyError): if (PY26 or six.PY3) and key == u'\0': # PY26: Handle of non-indexed variable -> Turn null byte into {} return type(key)(u'{}') else: # PY27: Not a context variable -> Keep braces return self._brace_key(key) def convert_field(self, value, conversion): if conversion and isinstance(value, six.string_types) and value[0] == u'{' and value[-1] == u'}': # Value is wrapped with braces and therefore not a context variable -> Keep conversion as value return self._inject_conversion(value, conversion) else: return super(ContentFormatter, self).convert_field(value, conversion) def format_field(self, value, format_spec): try: return super(ContentFormatter, self).format_field(value, format_spec) except ValueError: # Unable to format value and therefore not a context variable -> Keep format_spec as value return self._inject_format_spec(value, format_spec) def parse(self, format_string): if PY26 or six.PY3: # PY26 does not support non-indexed variables -> Place null byte for later removal # PY3 does not like mixing non-indexed and indexed variables to we disable them here too. format_string = format_string.replace('{}', '{\0}') parsed_bits = super(ContentFormatter, self).parse(format_string) # Double braces are treated as escaped -> re-duplicate when parsed return self._escape(parsed_bits) def get_field(self, field_name, args, kwargs): return super(ContentFormatter, self).get_field(field_name, args, kwargs) def _brace_key(self, key): """ key: 'x' -> '{x}' """ if isinstance(key, six.integer_types): t = str key = t(key) else: t = type(key) return t(u'{') + key + t(u'}') def _inject_format_spec(self, value, format_spec): """ value: '{x}', format_spec: 'f' -> '{x:f}' """ t = type(value) return value[:-1] + t(u':') + format_spec + t(u'}') def _escape(self, bits): """ value: 'foobar {' -> 'foobar {{' value: 'x}' -> 'x}}' """ # for value, field_name, format_spec, conversion in bits: while True: try: value, field_name, format_spec, conversion = next(bits) if value: end = value[-1] if end in (u'{', u'}'): value += end yield value, field_name, format_spec, conversion except StopIteration: break