repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
django-cumulus/django-cumulus | cumulus/storage.py | get_content_type | python | def get_content_type(name, content):
if hasattr(content, "content_type"):
content_type = content.content_type
else:
mime_type, encoding = mimetypes.guess_type(name)
content_type = mime_type
return content_type | Checks if the content_type is already set.
Otherwise uses the mimetypes library to guess. | train | https://github.com/django-cumulus/django-cumulus/blob/64feb07b857af28f226be4899e875c29405e261d/cumulus/storage.py#L42-L52 | null | import mimetypes
import pyrax
import re
import warnings
from gzip import GzipFile
import hmac
from time import time
try:
from haslib import sha1 as sha
except:
import sha
try:
from cStringIO import StringIO
except ImportError:
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from django.core.files.storage import Storage
from django.core.files.base import File, ContentFile
try:
from django.utils.deconstruct import deconstructible
except ImportError:
# Make a no-op decorator to avoid errors
def deconstructible(*args, **kwargs):
def decorator(klass):
return klass
if not args:
return decorator
return decorator(*args, **kwargs)
from cumulus.authentication import Auth
from cumulus.settings import CUMULUS
HEADER_PATTERNS = tuple((re.compile(p), h) for p, h in CUMULUS.get("HEADERS", {}))
def get_headers(name, content_type):
headers = {"Content-Type": content_type}
# gzip the file if its of the right content type
if content_type in CUMULUS.get("GZIP_CONTENT_TYPES", []):
headers["Content-Encoding"] = "gzip"
if CUMULUS["HEADERS"]:
for pattern, pattern_headers in HEADER_PATTERNS:
if pattern.match(name):
headers.update(pattern_headers.copy())
return headers
def sync_headers(cloud_obj, headers=None, header_patterns=HEADER_PATTERNS):
"""
Overwrites the given cloud_obj's headers with the ones given as ``headers`
and adds additional headers as defined in the HEADERS setting depending on
the cloud_obj's file name.
"""
if headers is None:
headers = {}
# don't set headers on directories
content_type = getattr(cloud_obj, "content_type", None)
if content_type == "application/directory":
return
matched_headers = {}
for pattern, pattern_headers in header_patterns:
if pattern.match(cloud_obj.name):
matched_headers.update(pattern_headers.copy())
# preserve headers already set
matched_headers.update(cloud_obj.headers)
# explicitly set headers overwrite matches and already set headers
matched_headers.update(headers)
if matched_headers != cloud_obj.headers:
cloud_obj.headers = matched_headers
cloud_obj.sync_metadata()
def get_gzipped_contents(input_file):
"""
Returns a gzipped version of a previously opened file's buffer.
"""
zbuf = StringIO()
zfile = GzipFile(mode="wb", compresslevel=6, fileobj=zbuf)
zfile.write(input_file.read())
zfile.close()
return ContentFile(zbuf.getvalue())
@deconstructible
class CumulusStorage(Auth, Storage):
"""
Custom storage for Cumulus.
"""
default_quick_listdir = True
container_name = CUMULUS["CONTAINER"]
container_uri = CUMULUS["CONTAINER_URI"]
container_ssl_uri = CUMULUS["CONTAINER_SSL_URI"]
ttl = CUMULUS["TTL"]
file_ttl = CUMULUS["FILE_TTL"]
use_ssl = CUMULUS["USE_SSL"]
public = CUMULUS['PUBLIC']
x_meta_temp_url_key = CUMULUS['X_ACCOUNT_META_TEMP_URL_KEY']
x_storage_url = CUMULUS['X_STORAGE_URL']
x_temp_url_timeout = CUMULUS['X_TEMP_URL_TIMEOUT']
base_url = CUMULUS['X_TEMP_URL_BASE']
def _open(self, name, mode="rb"):
"""
Returns the CumulusStorageFile.
"""
return ContentFile(self._get_object(name).get())
def _save(self, name, content):
"""
Uses the Cumulus service to write ``content`` to a remote
file (called ``name``).
"""
content_type = get_content_type(name, content.file)
headers = get_headers(name, content_type)
if self.use_pyrax:
if headers.get("Content-Encoding") == "gzip":
content = get_gzipped_contents(content)
self.connection.store_object(container=self.container_name,
obj_name=name,
data=content.read(),
content_type=content_type,
content_encoding=headers.get("Content-Encoding", None),
ttl=self.file_ttl,
etag=None)
# set headers/object metadata
self.connection.set_object_metadata(container=self.container_name,
obj=name,
metadata=headers,
prefix='',
clear=True)
else:
# TODO gzipped content when using swift client
self.connection.put_object(self.container_name, name,
content, headers=headers)
return name
def delete(self, name):
"""
Deletes the specified file from the storage system.
Deleting a model doesn't delete associated files: bit.ly/12s6Oox
"""
try:
self.connection.delete_object(self.container_name, name)
except pyrax.exceptions.ClientException as exc:
if exc.http_status == 404:
pass
else:
raise
except pyrax.exceptions.NoSuchObject:
pass
def exists(self, name):
"""
Returns True if a file referenced by the given name already
exists in the storage system, or False if the name is
available for a new file.
"""
return bool(self._get_object(name))
def size(self, name):
"""
Returns the total size, in bytes, of the file specified by name.
"""
file_object = self._get_object(name)
if file_object:
return file_object.total_bytes
else:
return 0
def url(self, name):
"""
Returns an absolute URL where the content of each file can be
accessed directly by a web browser.
"""
if self.public:
return u"{0}/{1}".format(self.container_url, name)
else:
return self._get_temp_url(name)
def _get_temp_url(self, name):
"""
Returns an absolute, temporary URL where the file's contents can be
accessed directly by a web browser.
"""
method = 'GET'
expires = int(time() + self.x_temp_url_timeout)
key = self.x_meta_temp_url_key
path = u'{0}/{1}/{2}'.format(self.x_storage_url, self.container_name, name)
hmac_body = u'{0}\n{1}\n{2}'.format(method, expires, path)
sig = hmac.new(key, hmac_body, sha).hexdigest()
return u'{0}{1}?temp_url_sig={2}&temp_url_expires={3}'.format(self.base_url, path, sig,
expires)
def listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple;
the first being an empty list of directories (not available
for quick-listing), the second being a list of filenames.
If the list of directories is required, use the full_listdir method.
"""
files = []
if path and not path.endswith("/"):
path = u"{0}/".format(path)
path_len = len(path)
for name in [x["name"] for x in
self.connection.get_container(self.container_name, full_listing=True)[1]]:
files.append(name[path_len:])
return ([], files)
def full_listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple
of lists; the first item being directories, the second item
being files.
"""
dirs = set()
files = []
if path and not path.endswith("/"):
path = u"{0}/".format(path)
path_len = len(path)
for name in [x["name"] for x in
self.connection.get_container(self.container_name, full_listing=True)[1]]:
name = name[path_len:]
slash = name[1:-1].find("/") + 1
if slash:
dirs.add(name[:slash])
elif name:
files.append(name)
dirs = list(dirs)
dirs.sort()
return (dirs, files)
class CumulusStaticStorage(CumulusStorage):
"""
Subclasses CumulusStorage to automatically set the container
to the one specified in CUMULUS["STATIC_CONTAINER"]. This provides
the ability to specify a separate storage backend for Django's
collectstatic command.
To use, make sure CUMULUS["STATIC_CONTAINER"] is set to something other
than CUMULUS["CONTAINER"]. Then, tell Django's staticfiles app by setting
STATICFILES_STORAGE = "cumulus.storage.CumulusStaticStorage".
"""
container_name = CUMULUS["STATIC_CONTAINER"]
container_uri = CUMULUS["STATIC_CONTAINER_URI"]
container_ssl_uri = CUMULUS["STATIC_CONTAINER_SSL_URI"]
class ThreadSafeCumulusStorage(CumulusStorage):
"""
Extends CumulusStorage to make it mostly thread safe.
As long as you do not pass container or cloud objects between
threads, you will be thread safe.
Uses one connection/container per thread.
"""
def __init__(self, *args, **kwargs):
super(ThreadSafeCumulusStorage, self).__init__(*args, **kwargs)
import threading
self.local_cache = threading.local()
def _get_connection(self):
if not hasattr(self.local_cache, "connection"):
super(ThreadSafeSwiftclientStorage, self)._get_connection()
self.local_cache.connection = connection
return self.local_cache.connection
connection = property(_get_connection, CumulusStorage._set_connection)
def _get_container(self):
if not hasattr(self.local_cache, "container"):
container = self.connection.create_container(self.container_name)
self.local_cache.container = container
return self.local_cache.container
container = property(_get_container, CumulusStorage._set_container)
class SwiftclientStorage(CumulusStorage):
def __init__(self, *args, **kwargs):
warnings.warn("SwiftclientStorage is deprecated and will be removed in django-cumulus==1.3: \
Use CumulusStorage instead.", DeprecationWarning)
super(SwiftclientStorage, self).__init__()
class SwiftclientStaticStorage(CumulusStaticStorage):
def __init__(self, *args, **kwargs):
warnings.warn("SwiftclientStaticStorage is deprecated and will be removed in django-cumulus==1.3: \
Use CumulusStaticStorage instead.", DeprecationWarning)
super(SwiftclientStaticStorage, self).__init__()
class ThreadSafeSwiftclientStorage(ThreadSafeCumulusStorage):
def __init__(self, *args, **kwargs):
warnings.warn("ThreadSafeSwiftclientStorage is deprecated and will be removed in django-cumulus==1.3: \
Use ThreadSafeCumulusStorage instead.", DeprecationWarning)
super(ThreadSafeSwiftclientStorage, self).__init__()
|
django-cumulus/django-cumulus | cumulus/storage.py | sync_headers | python | def sync_headers(cloud_obj, headers=None, header_patterns=HEADER_PATTERNS):
if headers is None:
headers = {}
# don't set headers on directories
content_type = getattr(cloud_obj, "content_type", None)
if content_type == "application/directory":
return
matched_headers = {}
for pattern, pattern_headers in header_patterns:
if pattern.match(cloud_obj.name):
matched_headers.update(pattern_headers.copy())
# preserve headers already set
matched_headers.update(cloud_obj.headers)
# explicitly set headers overwrite matches and already set headers
matched_headers.update(headers)
if matched_headers != cloud_obj.headers:
cloud_obj.headers = matched_headers
cloud_obj.sync_metadata() | Overwrites the given cloud_obj's headers with the ones given as ``headers`
and adds additional headers as defined in the HEADERS setting depending on
the cloud_obj's file name. | train | https://github.com/django-cumulus/django-cumulus/blob/64feb07b857af28f226be4899e875c29405e261d/cumulus/storage.py#L67-L90 | null | import mimetypes
import pyrax
import re
import warnings
from gzip import GzipFile
import hmac
from time import time
try:
from haslib import sha1 as sha
except:
import sha
try:
from cStringIO import StringIO
except ImportError:
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from django.core.files.storage import Storage
from django.core.files.base import File, ContentFile
try:
from django.utils.deconstruct import deconstructible
except ImportError:
# Make a no-op decorator to avoid errors
def deconstructible(*args, **kwargs):
def decorator(klass):
return klass
if not args:
return decorator
return decorator(*args, **kwargs)
from cumulus.authentication import Auth
from cumulus.settings import CUMULUS
HEADER_PATTERNS = tuple((re.compile(p), h) for p, h in CUMULUS.get("HEADERS", {}))
def get_content_type(name, content):
"""
Checks if the content_type is already set.
Otherwise uses the mimetypes library to guess.
"""
if hasattr(content, "content_type"):
content_type = content.content_type
else:
mime_type, encoding = mimetypes.guess_type(name)
content_type = mime_type
return content_type
def get_headers(name, content_type):
headers = {"Content-Type": content_type}
# gzip the file if its of the right content type
if content_type in CUMULUS.get("GZIP_CONTENT_TYPES", []):
headers["Content-Encoding"] = "gzip"
if CUMULUS["HEADERS"]:
for pattern, pattern_headers in HEADER_PATTERNS:
if pattern.match(name):
headers.update(pattern_headers.copy())
return headers
def get_gzipped_contents(input_file):
"""
Returns a gzipped version of a previously opened file's buffer.
"""
zbuf = StringIO()
zfile = GzipFile(mode="wb", compresslevel=6, fileobj=zbuf)
zfile.write(input_file.read())
zfile.close()
return ContentFile(zbuf.getvalue())
@deconstructible
class CumulusStorage(Auth, Storage):
"""
Custom storage for Cumulus.
"""
default_quick_listdir = True
container_name = CUMULUS["CONTAINER"]
container_uri = CUMULUS["CONTAINER_URI"]
container_ssl_uri = CUMULUS["CONTAINER_SSL_URI"]
ttl = CUMULUS["TTL"]
file_ttl = CUMULUS["FILE_TTL"]
use_ssl = CUMULUS["USE_SSL"]
public = CUMULUS['PUBLIC']
x_meta_temp_url_key = CUMULUS['X_ACCOUNT_META_TEMP_URL_KEY']
x_storage_url = CUMULUS['X_STORAGE_URL']
x_temp_url_timeout = CUMULUS['X_TEMP_URL_TIMEOUT']
base_url = CUMULUS['X_TEMP_URL_BASE']
def _open(self, name, mode="rb"):
"""
Returns the CumulusStorageFile.
"""
return ContentFile(self._get_object(name).get())
def _save(self, name, content):
"""
Uses the Cumulus service to write ``content`` to a remote
file (called ``name``).
"""
content_type = get_content_type(name, content.file)
headers = get_headers(name, content_type)
if self.use_pyrax:
if headers.get("Content-Encoding") == "gzip":
content = get_gzipped_contents(content)
self.connection.store_object(container=self.container_name,
obj_name=name,
data=content.read(),
content_type=content_type,
content_encoding=headers.get("Content-Encoding", None),
ttl=self.file_ttl,
etag=None)
# set headers/object metadata
self.connection.set_object_metadata(container=self.container_name,
obj=name,
metadata=headers,
prefix='',
clear=True)
else:
# TODO gzipped content when using swift client
self.connection.put_object(self.container_name, name,
content, headers=headers)
return name
def delete(self, name):
"""
Deletes the specified file from the storage system.
Deleting a model doesn't delete associated files: bit.ly/12s6Oox
"""
try:
self.connection.delete_object(self.container_name, name)
except pyrax.exceptions.ClientException as exc:
if exc.http_status == 404:
pass
else:
raise
except pyrax.exceptions.NoSuchObject:
pass
def exists(self, name):
"""
Returns True if a file referenced by the given name already
exists in the storage system, or False if the name is
available for a new file.
"""
return bool(self._get_object(name))
def size(self, name):
"""
Returns the total size, in bytes, of the file specified by name.
"""
file_object = self._get_object(name)
if file_object:
return file_object.total_bytes
else:
return 0
def url(self, name):
"""
Returns an absolute URL where the content of each file can be
accessed directly by a web browser.
"""
if self.public:
return u"{0}/{1}".format(self.container_url, name)
else:
return self._get_temp_url(name)
def _get_temp_url(self, name):
"""
Returns an absolute, temporary URL where the file's contents can be
accessed directly by a web browser.
"""
method = 'GET'
expires = int(time() + self.x_temp_url_timeout)
key = self.x_meta_temp_url_key
path = u'{0}/{1}/{2}'.format(self.x_storage_url, self.container_name, name)
hmac_body = u'{0}\n{1}\n{2}'.format(method, expires, path)
sig = hmac.new(key, hmac_body, sha).hexdigest()
return u'{0}{1}?temp_url_sig={2}&temp_url_expires={3}'.format(self.base_url, path, sig,
expires)
def listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple;
the first being an empty list of directories (not available
for quick-listing), the second being a list of filenames.
If the list of directories is required, use the full_listdir method.
"""
files = []
if path and not path.endswith("/"):
path = u"{0}/".format(path)
path_len = len(path)
for name in [x["name"] for x in
self.connection.get_container(self.container_name, full_listing=True)[1]]:
files.append(name[path_len:])
return ([], files)
def full_listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple
of lists; the first item being directories, the second item
being files.
"""
dirs = set()
files = []
if path and not path.endswith("/"):
path = u"{0}/".format(path)
path_len = len(path)
for name in [x["name"] for x in
self.connection.get_container(self.container_name, full_listing=True)[1]]:
name = name[path_len:]
slash = name[1:-1].find("/") + 1
if slash:
dirs.add(name[:slash])
elif name:
files.append(name)
dirs = list(dirs)
dirs.sort()
return (dirs, files)
class CumulusStaticStorage(CumulusStorage):
"""
Subclasses CumulusStorage to automatically set the container
to the one specified in CUMULUS["STATIC_CONTAINER"]. This provides
the ability to specify a separate storage backend for Django's
collectstatic command.
To use, make sure CUMULUS["STATIC_CONTAINER"] is set to something other
than CUMULUS["CONTAINER"]. Then, tell Django's staticfiles app by setting
STATICFILES_STORAGE = "cumulus.storage.CumulusStaticStorage".
"""
container_name = CUMULUS["STATIC_CONTAINER"]
container_uri = CUMULUS["STATIC_CONTAINER_URI"]
container_ssl_uri = CUMULUS["STATIC_CONTAINER_SSL_URI"]
class ThreadSafeCumulusStorage(CumulusStorage):
"""
Extends CumulusStorage to make it mostly thread safe.
As long as you do not pass container or cloud objects between
threads, you will be thread safe.
Uses one connection/container per thread.
"""
def __init__(self, *args, **kwargs):
super(ThreadSafeCumulusStorage, self).__init__(*args, **kwargs)
import threading
self.local_cache = threading.local()
def _get_connection(self):
if not hasattr(self.local_cache, "connection"):
super(ThreadSafeSwiftclientStorage, self)._get_connection()
self.local_cache.connection = connection
return self.local_cache.connection
connection = property(_get_connection, CumulusStorage._set_connection)
def _get_container(self):
if not hasattr(self.local_cache, "container"):
container = self.connection.create_container(self.container_name)
self.local_cache.container = container
return self.local_cache.container
container = property(_get_container, CumulusStorage._set_container)
class SwiftclientStorage(CumulusStorage):
def __init__(self, *args, **kwargs):
warnings.warn("SwiftclientStorage is deprecated and will be removed in django-cumulus==1.3: \
Use CumulusStorage instead.", DeprecationWarning)
super(SwiftclientStorage, self).__init__()
class SwiftclientStaticStorage(CumulusStaticStorage):
def __init__(self, *args, **kwargs):
warnings.warn("SwiftclientStaticStorage is deprecated and will be removed in django-cumulus==1.3: \
Use CumulusStaticStorage instead.", DeprecationWarning)
super(SwiftclientStaticStorage, self).__init__()
class ThreadSafeSwiftclientStorage(ThreadSafeCumulusStorage):
def __init__(self, *args, **kwargs):
warnings.warn("ThreadSafeSwiftclientStorage is deprecated and will be removed in django-cumulus==1.3: \
Use ThreadSafeCumulusStorage instead.", DeprecationWarning)
super(ThreadSafeSwiftclientStorage, self).__init__()
|
django-cumulus/django-cumulus | cumulus/storage.py | get_gzipped_contents | python | def get_gzipped_contents(input_file):
zbuf = StringIO()
zfile = GzipFile(mode="wb", compresslevel=6, fileobj=zbuf)
zfile.write(input_file.read())
zfile.close()
return ContentFile(zbuf.getvalue()) | Returns a gzipped version of a previously opened file's buffer. | train | https://github.com/django-cumulus/django-cumulus/blob/64feb07b857af28f226be4899e875c29405e261d/cumulus/storage.py#L93-L101 | null | import mimetypes
import pyrax
import re
import warnings
from gzip import GzipFile
import hmac
from time import time
try:
from haslib import sha1 as sha
except:
import sha
try:
from cStringIO import StringIO
except ImportError:
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from django.core.files.storage import Storage
from django.core.files.base import File, ContentFile
try:
from django.utils.deconstruct import deconstructible
except ImportError:
# Make a no-op decorator to avoid errors
def deconstructible(*args, **kwargs):
def decorator(klass):
return klass
if not args:
return decorator
return decorator(*args, **kwargs)
from cumulus.authentication import Auth
from cumulus.settings import CUMULUS
HEADER_PATTERNS = tuple((re.compile(p), h) for p, h in CUMULUS.get("HEADERS", {}))
def get_content_type(name, content):
"""
Checks if the content_type is already set.
Otherwise uses the mimetypes library to guess.
"""
if hasattr(content, "content_type"):
content_type = content.content_type
else:
mime_type, encoding = mimetypes.guess_type(name)
content_type = mime_type
return content_type
def get_headers(name, content_type):
headers = {"Content-Type": content_type}
# gzip the file if its of the right content type
if content_type in CUMULUS.get("GZIP_CONTENT_TYPES", []):
headers["Content-Encoding"] = "gzip"
if CUMULUS["HEADERS"]:
for pattern, pattern_headers in HEADER_PATTERNS:
if pattern.match(name):
headers.update(pattern_headers.copy())
return headers
def sync_headers(cloud_obj, headers=None, header_patterns=HEADER_PATTERNS):
"""
Overwrites the given cloud_obj's headers with the ones given as ``headers`
and adds additional headers as defined in the HEADERS setting depending on
the cloud_obj's file name.
"""
if headers is None:
headers = {}
# don't set headers on directories
content_type = getattr(cloud_obj, "content_type", None)
if content_type == "application/directory":
return
matched_headers = {}
for pattern, pattern_headers in header_patterns:
if pattern.match(cloud_obj.name):
matched_headers.update(pattern_headers.copy())
# preserve headers already set
matched_headers.update(cloud_obj.headers)
# explicitly set headers overwrite matches and already set headers
matched_headers.update(headers)
if matched_headers != cloud_obj.headers:
cloud_obj.headers = matched_headers
cloud_obj.sync_metadata()
@deconstructible
class CumulusStorage(Auth, Storage):
"""
Custom storage for Cumulus.
"""
default_quick_listdir = True
container_name = CUMULUS["CONTAINER"]
container_uri = CUMULUS["CONTAINER_URI"]
container_ssl_uri = CUMULUS["CONTAINER_SSL_URI"]
ttl = CUMULUS["TTL"]
file_ttl = CUMULUS["FILE_TTL"]
use_ssl = CUMULUS["USE_SSL"]
public = CUMULUS['PUBLIC']
x_meta_temp_url_key = CUMULUS['X_ACCOUNT_META_TEMP_URL_KEY']
x_storage_url = CUMULUS['X_STORAGE_URL']
x_temp_url_timeout = CUMULUS['X_TEMP_URL_TIMEOUT']
base_url = CUMULUS['X_TEMP_URL_BASE']
def _open(self, name, mode="rb"):
"""
Returns the CumulusStorageFile.
"""
return ContentFile(self._get_object(name).get())
def _save(self, name, content):
"""
Uses the Cumulus service to write ``content`` to a remote
file (called ``name``).
"""
content_type = get_content_type(name, content.file)
headers = get_headers(name, content_type)
if self.use_pyrax:
if headers.get("Content-Encoding") == "gzip":
content = get_gzipped_contents(content)
self.connection.store_object(container=self.container_name,
obj_name=name,
data=content.read(),
content_type=content_type,
content_encoding=headers.get("Content-Encoding", None),
ttl=self.file_ttl,
etag=None)
# set headers/object metadata
self.connection.set_object_metadata(container=self.container_name,
obj=name,
metadata=headers,
prefix='',
clear=True)
else:
# TODO gzipped content when using swift client
self.connection.put_object(self.container_name, name,
content, headers=headers)
return name
def delete(self, name):
"""
Deletes the specified file from the storage system.
Deleting a model doesn't delete associated files: bit.ly/12s6Oox
"""
try:
self.connection.delete_object(self.container_name, name)
except pyrax.exceptions.ClientException as exc:
if exc.http_status == 404:
pass
else:
raise
except pyrax.exceptions.NoSuchObject:
pass
def exists(self, name):
"""
Returns True if a file referenced by the given name already
exists in the storage system, or False if the name is
available for a new file.
"""
return bool(self._get_object(name))
def size(self, name):
"""
Returns the total size, in bytes, of the file specified by name.
"""
file_object = self._get_object(name)
if file_object:
return file_object.total_bytes
else:
return 0
def url(self, name):
"""
Returns an absolute URL where the content of each file can be
accessed directly by a web browser.
"""
if self.public:
return u"{0}/{1}".format(self.container_url, name)
else:
return self._get_temp_url(name)
def _get_temp_url(self, name):
"""
Returns an absolute, temporary URL where the file's contents can be
accessed directly by a web browser.
"""
method = 'GET'
expires = int(time() + self.x_temp_url_timeout)
key = self.x_meta_temp_url_key
path = u'{0}/{1}/{2}'.format(self.x_storage_url, self.container_name, name)
hmac_body = u'{0}\n{1}\n{2}'.format(method, expires, path)
sig = hmac.new(key, hmac_body, sha).hexdigest()
return u'{0}{1}?temp_url_sig={2}&temp_url_expires={3}'.format(self.base_url, path, sig,
expires)
def listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple;
the first being an empty list of directories (not available
for quick-listing), the second being a list of filenames.
If the list of directories is required, use the full_listdir method.
"""
files = []
if path and not path.endswith("/"):
path = u"{0}/".format(path)
path_len = len(path)
for name in [x["name"] for x in
self.connection.get_container(self.container_name, full_listing=True)[1]]:
files.append(name[path_len:])
return ([], files)
def full_listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple
of lists; the first item being directories, the second item
being files.
"""
dirs = set()
files = []
if path and not path.endswith("/"):
path = u"{0}/".format(path)
path_len = len(path)
for name in [x["name"] for x in
self.connection.get_container(self.container_name, full_listing=True)[1]]:
name = name[path_len:]
slash = name[1:-1].find("/") + 1
if slash:
dirs.add(name[:slash])
elif name:
files.append(name)
dirs = list(dirs)
dirs.sort()
return (dirs, files)
class CumulusStaticStorage(CumulusStorage):
"""
Subclasses CumulusStorage to automatically set the container
to the one specified in CUMULUS["STATIC_CONTAINER"]. This provides
the ability to specify a separate storage backend for Django's
collectstatic command.
To use, make sure CUMULUS["STATIC_CONTAINER"] is set to something other
than CUMULUS["CONTAINER"]. Then, tell Django's staticfiles app by setting
STATICFILES_STORAGE = "cumulus.storage.CumulusStaticStorage".
"""
container_name = CUMULUS["STATIC_CONTAINER"]
container_uri = CUMULUS["STATIC_CONTAINER_URI"]
container_ssl_uri = CUMULUS["STATIC_CONTAINER_SSL_URI"]
class ThreadSafeCumulusStorage(CumulusStorage):
"""
Extends CumulusStorage to make it mostly thread safe.
As long as you do not pass container or cloud objects between
threads, you will be thread safe.
Uses one connection/container per thread.
"""
def __init__(self, *args, **kwargs):
super(ThreadSafeCumulusStorage, self).__init__(*args, **kwargs)
import threading
self.local_cache = threading.local()
def _get_connection(self):
if not hasattr(self.local_cache, "connection"):
super(ThreadSafeSwiftclientStorage, self)._get_connection()
self.local_cache.connection = connection
return self.local_cache.connection
connection = property(_get_connection, CumulusStorage._set_connection)
def _get_container(self):
if not hasattr(self.local_cache, "container"):
container = self.connection.create_container(self.container_name)
self.local_cache.container = container
return self.local_cache.container
container = property(_get_container, CumulusStorage._set_container)
class SwiftclientStorage(CumulusStorage):
def __init__(self, *args, **kwargs):
warnings.warn("SwiftclientStorage is deprecated and will be removed in django-cumulus==1.3: \
Use CumulusStorage instead.", DeprecationWarning)
super(SwiftclientStorage, self).__init__()
class SwiftclientStaticStorage(CumulusStaticStorage):
def __init__(self, *args, **kwargs):
warnings.warn("SwiftclientStaticStorage is deprecated and will be removed in django-cumulus==1.3: \
Use CumulusStaticStorage instead.", DeprecationWarning)
super(SwiftclientStaticStorage, self).__init__()
class ThreadSafeSwiftclientStorage(ThreadSafeCumulusStorage):
def __init__(self, *args, **kwargs):
warnings.warn("ThreadSafeSwiftclientStorage is deprecated and will be removed in django-cumulus==1.3: \
Use ThreadSafeCumulusStorage instead.", DeprecationWarning)
super(ThreadSafeSwiftclientStorage, self).__init__()
|
django-cumulus/django-cumulus | cumulus/authentication.py | Auth._get_container | python | def _get_container(self):
if not hasattr(self, "_container"):
if self.use_pyrax:
self._container = self.connection.create_container(self.container_name)
else:
self._container = None
return self._container | Gets or creates the container. | train | https://github.com/django-cumulus/django-cumulus/blob/64feb07b857af28f226be4899e875c29405e261d/cumulus/authentication.py#L101-L110 | null | class Auth(object):
connection_kwargs = {}
use_pyrax = CUMULUS["USE_PYRAX"]
use_snet = CUMULUS["SERVICENET"]
region = CUMULUS["REGION"]
username = CUMULUS["USERNAME"]
api_key = CUMULUS["API_KEY"]
auth_url = CUMULUS["AUTH_URL"]
auth_tenant_id = CUMULUS["AUTH_TENANT_ID"]
auth_tenant_name = CUMULUS["AUTH_TENANT_NAME"]
auth_version = CUMULUS["AUTH_VERSION"]
pyrax_identity_type = CUMULUS["PYRAX_IDENTITY_TYPE"]
def __init__(self, username=None, api_key=None, container=None,
connection_kwargs=None, container_uri=None):
"""
Initializes the settings for the connection and container.
"""
if username is not None:
self.username = username
if api_key is not None:
self.api_key = api_key
if container is not None:
self.container_name = container
if connection_kwargs is not None:
self.connection_kwargs = connection_kwargs
# connect
if self.use_pyrax:
self.pyrax = pyrax
if self.pyrax_identity_type:
self.pyrax.set_setting("identity_type", self.pyrax_identity_type)
if self.auth_url:
self.pyrax.set_setting("auth_endpoint", self.auth_url)
if self.auth_tenant_id:
self.pyrax.set_setting("tenant_id", self.auth_tenant_id)
self.pyrax.set_setting("region", self.region)
try:
self.pyrax.set_credentials(self.username, self.api_key)
except (Error, PyraxException, RequestException) as e:
logging.warning('Error in pyrax.set_credentials, %s: %s', e.__class__.__name__, str(e))
except Exception as e:
logging.exception(
"""Pyrax Connect Error in `django_cumulus.cumulus.authentication.Auth`::
self.pyrax.set_credentials(self.username, self.api_key)
""")
# else:
# headers = {"X-Container-Read": ".r:*"}
# self._connection.post_container(self.container_name, headers=headers)
def _get_connection(self):
if not hasattr(self, "_connection"):
if self.use_pyrax:
public = not self.use_snet # invert
self._connection = pyrax.connect_to_cloudfiles(public=public)
elif swiftclient:
self._connection = swiftclient.Connection(
authurl=self.auth_url,
user=self.username,
key=self.api_key,
snet=self.use_snet,
auth_version=self.auth_version,
tenant_name=self.auth_tenant_name,
)
else:
raise NotImplementedError("Cloud connection is not correctly configured.")
return self._connection
def _set_connection(self, value):
self._connection = value
connection = property(_get_connection, _set_connection)
def __getstate__(self):
"""
Return a picklable representation of the storage.
"""
return {
"username": self.username,
"api_key": self.api_key,
"container_name": self.container_name,
"use_snet": self.use_snet,
"connection_kwargs": self.connection_kwargs
}
def _set_container(self, container):
"""
Sets the container (and, if needed, the configured TTL on it), making
the container publicly available.
"""
if self.use_pyrax:
if container.cdn_ttl != self.ttl or not container.cdn_enabled:
container.make_public(ttl=self.ttl)
if hasattr(self, "_container_public_uri"):
delattr(self, "_container_public_uri")
self._container = container
container = property(_get_container, _set_container)
def get_cname(self, uri):
if not CUMULUS['CNAMES'] or uri not in CUMULUS['CNAMES']:
return uri
return CUMULUS['CNAMES'][uri]
@cached_property
def container_cdn_ssl_uri(self):
if self.container_ssl_uri:
uri = self.container_ssl_uri
else:
uri = self.container.cdn_ssl_uri
return self.get_cname(uri)
@cached_property
def container_cdn_uri(self):
if self.container_uri:
uri = self.container_uri
else:
uri = self.container.cdn_uri
return self.get_cname(uri)
@property
def container_url(self):
if self.use_ssl:
return self.container_cdn_ssl_uri
else:
return self.container_cdn_uri
def _get_object(self, name):
"""
Helper function to retrieve the requested Object.
"""
if self.use_pyrax:
try:
return self.container.get_object(name)
except pyrax.exceptions.NoSuchObject:
return None
elif swiftclient:
try:
return self.container.get_object(name)
except swiftclient.exceptions.ClientException:
return None
else:
return self.container.get_object(name)
|
django-cumulus/django-cumulus | cumulus/authentication.py | Auth._set_container | python | def _set_container(self, container):
if self.use_pyrax:
if container.cdn_ttl != self.ttl or not container.cdn_enabled:
container.make_public(ttl=self.ttl)
if hasattr(self, "_container_public_uri"):
delattr(self, "_container_public_uri")
self._container = container | Sets the container (and, if needed, the configured TTL on it), making
the container publicly available. | train | https://github.com/django-cumulus/django-cumulus/blob/64feb07b857af28f226be4899e875c29405e261d/cumulus/authentication.py#L112-L122 | null | class Auth(object):
connection_kwargs = {}
use_pyrax = CUMULUS["USE_PYRAX"]
use_snet = CUMULUS["SERVICENET"]
region = CUMULUS["REGION"]
username = CUMULUS["USERNAME"]
api_key = CUMULUS["API_KEY"]
auth_url = CUMULUS["AUTH_URL"]
auth_tenant_id = CUMULUS["AUTH_TENANT_ID"]
auth_tenant_name = CUMULUS["AUTH_TENANT_NAME"]
auth_version = CUMULUS["AUTH_VERSION"]
pyrax_identity_type = CUMULUS["PYRAX_IDENTITY_TYPE"]
def __init__(self, username=None, api_key=None, container=None,
connection_kwargs=None, container_uri=None):
"""
Initializes the settings for the connection and container.
"""
if username is not None:
self.username = username
if api_key is not None:
self.api_key = api_key
if container is not None:
self.container_name = container
if connection_kwargs is not None:
self.connection_kwargs = connection_kwargs
# connect
if self.use_pyrax:
self.pyrax = pyrax
if self.pyrax_identity_type:
self.pyrax.set_setting("identity_type", self.pyrax_identity_type)
if self.auth_url:
self.pyrax.set_setting("auth_endpoint", self.auth_url)
if self.auth_tenant_id:
self.pyrax.set_setting("tenant_id", self.auth_tenant_id)
self.pyrax.set_setting("region", self.region)
try:
self.pyrax.set_credentials(self.username, self.api_key)
except (Error, PyraxException, RequestException) as e:
logging.warning('Error in pyrax.set_credentials, %s: %s', e.__class__.__name__, str(e))
except Exception as e:
logging.exception(
"""Pyrax Connect Error in `django_cumulus.cumulus.authentication.Auth`::
self.pyrax.set_credentials(self.username, self.api_key)
""")
# else:
# headers = {"X-Container-Read": ".r:*"}
# self._connection.post_container(self.container_name, headers=headers)
def _get_connection(self):
if not hasattr(self, "_connection"):
if self.use_pyrax:
public = not self.use_snet # invert
self._connection = pyrax.connect_to_cloudfiles(public=public)
elif swiftclient:
self._connection = swiftclient.Connection(
authurl=self.auth_url,
user=self.username,
key=self.api_key,
snet=self.use_snet,
auth_version=self.auth_version,
tenant_name=self.auth_tenant_name,
)
else:
raise NotImplementedError("Cloud connection is not correctly configured.")
return self._connection
def _set_connection(self, value):
self._connection = value
connection = property(_get_connection, _set_connection)
def __getstate__(self):
"""
Return a picklable representation of the storage.
"""
return {
"username": self.username,
"api_key": self.api_key,
"container_name": self.container_name,
"use_snet": self.use_snet,
"connection_kwargs": self.connection_kwargs
}
def _get_container(self):
"""
Gets or creates the container.
"""
if not hasattr(self, "_container"):
if self.use_pyrax:
self._container = self.connection.create_container(self.container_name)
else:
self._container = None
return self._container
container = property(_get_container, _set_container)
def get_cname(self, uri):
if not CUMULUS['CNAMES'] or uri not in CUMULUS['CNAMES']:
return uri
return CUMULUS['CNAMES'][uri]
@cached_property
def container_cdn_ssl_uri(self):
if self.container_ssl_uri:
uri = self.container_ssl_uri
else:
uri = self.container.cdn_ssl_uri
return self.get_cname(uri)
@cached_property
def container_cdn_uri(self):
if self.container_uri:
uri = self.container_uri
else:
uri = self.container.cdn_uri
return self.get_cname(uri)
@property
def container_url(self):
if self.use_ssl:
return self.container_cdn_ssl_uri
else:
return self.container_cdn_uri
def _get_object(self, name):
"""
Helper function to retrieve the requested Object.
"""
if self.use_pyrax:
try:
return self.container.get_object(name)
except pyrax.exceptions.NoSuchObject:
return None
elif swiftclient:
try:
return self.container.get_object(name)
except swiftclient.exceptions.ClientException:
return None
else:
return self.container.get_object(name)
|
django-cumulus/django-cumulus | cumulus/authentication.py | Auth._get_object | python | def _get_object(self, name):
if self.use_pyrax:
try:
return self.container.get_object(name)
except pyrax.exceptions.NoSuchObject:
return None
elif swiftclient:
try:
return self.container.get_object(name)
except swiftclient.exceptions.ClientException:
return None
else:
return self.container.get_object(name) | Helper function to retrieve the requested Object. | train | https://github.com/django-cumulus/django-cumulus/blob/64feb07b857af28f226be4899e875c29405e261d/cumulus/authentication.py#L157-L172 | null | class Auth(object):
connection_kwargs = {}
use_pyrax = CUMULUS["USE_PYRAX"]
use_snet = CUMULUS["SERVICENET"]
region = CUMULUS["REGION"]
username = CUMULUS["USERNAME"]
api_key = CUMULUS["API_KEY"]
auth_url = CUMULUS["AUTH_URL"]
auth_tenant_id = CUMULUS["AUTH_TENANT_ID"]
auth_tenant_name = CUMULUS["AUTH_TENANT_NAME"]
auth_version = CUMULUS["AUTH_VERSION"]
pyrax_identity_type = CUMULUS["PYRAX_IDENTITY_TYPE"]
def __init__(self, username=None, api_key=None, container=None,
connection_kwargs=None, container_uri=None):
"""
Initializes the settings for the connection and container.
"""
if username is not None:
self.username = username
if api_key is not None:
self.api_key = api_key
if container is not None:
self.container_name = container
if connection_kwargs is not None:
self.connection_kwargs = connection_kwargs
# connect
if self.use_pyrax:
self.pyrax = pyrax
if self.pyrax_identity_type:
self.pyrax.set_setting("identity_type", self.pyrax_identity_type)
if self.auth_url:
self.pyrax.set_setting("auth_endpoint", self.auth_url)
if self.auth_tenant_id:
self.pyrax.set_setting("tenant_id", self.auth_tenant_id)
self.pyrax.set_setting("region", self.region)
try:
self.pyrax.set_credentials(self.username, self.api_key)
except (Error, PyraxException, RequestException) as e:
logging.warning('Error in pyrax.set_credentials, %s: %s', e.__class__.__name__, str(e))
except Exception as e:
logging.exception(
"""Pyrax Connect Error in `django_cumulus.cumulus.authentication.Auth`::
self.pyrax.set_credentials(self.username, self.api_key)
""")
# else:
# headers = {"X-Container-Read": ".r:*"}
# self._connection.post_container(self.container_name, headers=headers)
def _get_connection(self):
if not hasattr(self, "_connection"):
if self.use_pyrax:
public = not self.use_snet # invert
self._connection = pyrax.connect_to_cloudfiles(public=public)
elif swiftclient:
self._connection = swiftclient.Connection(
authurl=self.auth_url,
user=self.username,
key=self.api_key,
snet=self.use_snet,
auth_version=self.auth_version,
tenant_name=self.auth_tenant_name,
)
else:
raise NotImplementedError("Cloud connection is not correctly configured.")
return self._connection
def _set_connection(self, value):
self._connection = value
connection = property(_get_connection, _set_connection)
def __getstate__(self):
"""
Return a picklable representation of the storage.
"""
return {
"username": self.username,
"api_key": self.api_key,
"container_name": self.container_name,
"use_snet": self.use_snet,
"connection_kwargs": self.connection_kwargs
}
def _get_container(self):
"""
Gets or creates the container.
"""
if not hasattr(self, "_container"):
if self.use_pyrax:
self._container = self.connection.create_container(self.container_name)
else:
self._container = None
return self._container
def _set_container(self, container):
"""
Sets the container (and, if needed, the configured TTL on it), making
the container publicly available.
"""
if self.use_pyrax:
if container.cdn_ttl != self.ttl or not container.cdn_enabled:
container.make_public(ttl=self.ttl)
if hasattr(self, "_container_public_uri"):
delattr(self, "_container_public_uri")
self._container = container
container = property(_get_container, _set_container)
def get_cname(self, uri):
if not CUMULUS['CNAMES'] or uri not in CUMULUS['CNAMES']:
return uri
return CUMULUS['CNAMES'][uri]
@cached_property
def container_cdn_ssl_uri(self):
if self.container_ssl_uri:
uri = self.container_ssl_uri
else:
uri = self.container.cdn_ssl_uri
return self.get_cname(uri)
@cached_property
def container_cdn_uri(self):
if self.container_uri:
uri = self.container_uri
else:
uri = self.container.cdn_uri
return self.get_cname(uri)
@property
def container_url(self):
if self.use_ssl:
return self.container_cdn_ssl_uri
else:
return self.container_cdn_uri
|
django-cumulus/django-cumulus | cumulus/context_processors.py | cdn_url | python | def cdn_url(request):
cdn_url, ssl_url = _get_container_urls(CumulusStorage())
static_url = settings.STATIC_URL
return {
"CDN_URL": cdn_url + static_url,
"CDN_SSL_URL": ssl_url + static_url,
} | A context processor that exposes the full CDN URL in templates. | train | https://github.com/django-cumulus/django-cumulus/blob/64feb07b857af28f226be4899e875c29405e261d/cumulus/context_processors.py#L19-L29 | [
"def _get_container_urls(swiftclient_storage):\n cdn_url = swiftclient_storage.container.cdn_uri\n ssl_url = swiftclient_storage.container.cdn_ssl_uri\n\n return cdn_url, ssl_url\n"
] | from urlparse import urlparse
from django.conf import settings
from cumulus.storage import CumulusStorage, CumulusStaticStorage
def _is_ssl_uri(uri):
return urlparse(uri).scheme == "https"
def _get_container_urls(swiftclient_storage):
cdn_url = swiftclient_storage.container.cdn_uri
ssl_url = swiftclient_storage.container.cdn_ssl_uri
return cdn_url, ssl_url
def static_cdn_url(request):
"""
A context processor that exposes the full static CDN URL
as static URL in templates.
"""
cdn_url, ssl_url = _get_container_urls(CumulusStaticStorage())
static_url = settings.STATIC_URL
return {
"STATIC_URL": cdn_url + static_url,
"STATIC_SSL_URL": ssl_url + static_url,
"LOCAL_STATIC_URL": static_url,
}
|
django-cumulus/django-cumulus | cumulus/context_processors.py | static_cdn_url | python | def static_cdn_url(request):
cdn_url, ssl_url = _get_container_urls(CumulusStaticStorage())
static_url = settings.STATIC_URL
return {
"STATIC_URL": cdn_url + static_url,
"STATIC_SSL_URL": ssl_url + static_url,
"LOCAL_STATIC_URL": static_url,
} | A context processor that exposes the full static CDN URL
as static URL in templates. | train | https://github.com/django-cumulus/django-cumulus/blob/64feb07b857af28f226be4899e875c29405e261d/cumulus/context_processors.py#L32-L44 | [
"def _get_container_urls(swiftclient_storage):\n cdn_url = swiftclient_storage.container.cdn_uri\n ssl_url = swiftclient_storage.container.cdn_ssl_uri\n\n return cdn_url, ssl_url\n"
] | from urlparse import urlparse
from django.conf import settings
from cumulus.storage import CumulusStorage, CumulusStaticStorage
def _is_ssl_uri(uri):
return urlparse(uri).scheme == "https"
def _get_container_urls(swiftclient_storage):
cdn_url = swiftclient_storage.container.cdn_uri
ssl_url = swiftclient_storage.container.cdn_ssl_uri
return cdn_url, ssl_url
def cdn_url(request):
"""
A context processor that exposes the full CDN URL in templates.
"""
cdn_url, ssl_url = _get_container_urls(CumulusStorage())
static_url = settings.STATIC_URL
return {
"CDN_URL": cdn_url + static_url,
"CDN_SSL_URL": ssl_url + static_url,
}
|
marrow/cinje | cinje/inline/comment.py | Comment.match | python | def match(self, context, line):
stripped = line.stripped
return stripped.startswith('#') and not stripped.startswith('#{') | Match lines prefixed with a hash ("#") mark that don't look like text. | train | https://github.com/marrow/cinje/blob/413bdac7242020ce8379d272720c649a9196daa2/cinje/inline/comment.py#L18-L21 | null | class Comment(object):
"""Line comment handler.
This handles not emitting double-hash comments and has a high priority to prevent other processing of
commented-out lines.
Syntax:
# <comment>
## <hidden comment>
"""
priority = -90
def __call__(self, context):
"""Emit comments into the final code that aren't marked as hidden/private."""
try:
line = context.input.next()
except StopIteration:
return
if not line.stripped.startswith('##'):
yield line
|
marrow/cinje | cinje/util.py | stream | python | def stream(input, encoding=None, errors='strict'):
input = (i for i in input if i) # Omits `None` (empty wrappers) and empty chunks.
if encoding: # Automatically, and iteratively, encode the text if requested.
input = iterencode(input, encoding, errors=errors)
return input | Safely iterate a template generator, ignoring ``None`` values and optionally stream encoding.
Used internally by ``cinje.flatten``, this allows for easy use of a template generator as a WSGI body. | train | https://github.com/marrow/cinje/blob/413bdac7242020ce8379d272720c649a9196daa2/cinje/util.py#L71-L82 | null | # encoding: utf-8
from __future__ import unicode_literals
"""Convienent utilities."""
# ## Imports
import sys
from codecs import iterencode
from inspect import isfunction, isclass
from operator import methodcaller
from collections import deque, namedtuple, Sized, Iterable
from pkg_resources import iter_entry_points
from xml.sax.saxutils import quoteattr
try: # pragma: no cover
from html.parser import HTMLParser
except ImportError: # pragma: no cover
from HTMLParser import HTMLParser
# ## Python Cross-Compatibility
#
# These allow us to detect relevant version differences for code generation, and overcome some of the minor
# differences in labels between Python 2 and Python 3 compatible runtimes.
#
# The differences, in practice, are minor, and are easily overcome through a small block of version-dependant
# code. Handily, even built-in labels are not sacrosanct; they can be easily assigned to and re-mapped.
#
try: # Python 2
from types import StringTypes as stringy
try:
from cStringIO import StringIO
except: # pragma: no cover
from StringIO import StringIO # This never really happens. Still, nice to be defensive.
bytes = str
str = unicode
py = 2
reduce = reduce
except: # Python 3
from io import StringIO
stringy = str
bytes = bytes
str = str
py = 3
# There are some additional complications for the Pypy runtime.
try:
from sys import pypy_version_info
pypy = True
except ImportError:
pypy = False
# ## Type Definitions
# A tuple representing a single step of fancy iteration.
Iteration = namedtuple('Iteration', ['first', 'last', 'index', 'total', 'value'])
# ## Simple Utility Functions
def stream(input, encoding=None, errors='strict'):
"""Safely iterate a template generator, ignoring ``None`` values and optionally stream encoding.
Used internally by ``cinje.flatten``, this allows for easy use of a template generator as a WSGI body.
"""
input = (i for i in input if i) # Omits `None` (empty wrappers) and empty chunks.
if encoding: # Automatically, and iteratively, encode the text if requested.
input = iterencode(input, encoding, errors=errors)
return input
def flatten(input, file=None, encoding=None, errors='strict'):
"""Return a flattened representation of a cinje chunk stream.
This has several modes of operation. If no `file` argument is given, output will be returned as a string.
The type of string will be determined by the presence of an `encoding`; if one is given the returned value is a
binary string, otherwise the native unicode representation. If a `file` is present, chunks will be written
iteratively through repeated calls to `file.write()`, and the amount of data (characters or bytes) written
returned. The type of string written will be determined by `encoding`, just as the return value is when not
writing to a file-like object. The `errors` argument is passed through when encoding.
We can highly recommend using the various stremaing IO containers available in the
[`io`](https://docs.python.org/3/library/io.html) module, though
[`tempfile`](https://docs.python.org/3/library/tempfile.html) classes are also quite useful.
"""
input = stream(input, encoding, errors)
if file is None: # Exit early if we're not writing to a file.
return b''.join(input) if encoding else ''.join(input)
counter = 0
for chunk in input:
file.write(chunk)
counter += len(chunk)
return counter
def fragment(string, name="anonymous", **context):
"""Translate a template fragment into a callable function.
**Note:** Use of this function is discouraged everywhere except tests, as no caching is implemented at this time.
Only one function may be declared, either manually, or automatically. If automatic defintition is chosen the
resulting function takes no arguments. Additional keyword arguments are passed through as global variables.
"""
if isinstance(string, bytes):
string = string.decode('utf-8')
if ": def" in string or ":def" in string:
code = string.encode('utf8').decode('cinje')
name = None
else:
code = ": def {name}\n\n{string}".format(
name = name,
string = string,
).encode('utf8').decode('cinje')
environ = dict(context)
exec(code, environ)
if name is None: # We need to dig it out of the `__tmpl__` list.
if __debug__ and not environ.get('__tmpl__', None):
raise RuntimeError("Template fragment does not contain a function: " + repr(environ.get('__tmpl__', None)) + \
"\n\n" + code)
return environ[environ['__tmpl__'][-1]] # Super secret sauce: you _can_ define more than one function...
return environ[name]
def interruptable(iterable):
"""Allow easy catching of a generator interrupting operation when using "yield from"."""
for i in iterable:
if i is None:
return
yield i
def iterate(obj):
"""Loop over an iterable and track progress, including first and last state.
On each iteration yield an Iteration named tuple with the first and last flags, current element index, total
iterable length (if possible to acquire), and value, in that order.
for iteration in iterate(something):
iteration.value # Do something.
You can unpack these safely:
for first, last, index, total, value in iterate(something):
pass
If you want to unpack the values you are iterating across, you can by wrapping the nested unpacking in parenthesis:
for first, last, index, total, (foo, bar, baz) in iterate(something):
pass
Even if the length of the iterable can't be reliably determined this function will still capture the "last" state
of the final loop iteration. (Basically: this works with generators.)
This process is about 10x slower than simple enumeration on CPython 3.4, so only use it where you actually need to
track state. Use `enumerate()` elsewhere.
"""
global next, Iteration
next = next
Iteration = Iteration
total = len(obj) if isinstance(obj, Sized) else None
iterator = iter(obj)
first = True
last = False
i = 0
try:
value = next(iterator)
except StopIteration:
return
while True:
try:
next_value = next(iterator)
except StopIteration:
last = True
yield Iteration(first, last, i, total, value)
if last: return
value = next_value
i += 1
first = False
def xmlargs(_source=None, **values):
from cinje.helpers import bless
# Optimize by binding these names to the local scope, saving a lookup on each call.
global str, Iterable, stringy
str = str
Iterable = Iterable
stringy = stringy
ejoin = " ".join
parts = []
pappend = parts.append
# If a data source is provided it overrides the keyword arguments which are treated as defaults.
if _source:
values.update(_source)
for k in sorted(values):
# We technically allow non-string values for keys. They're just converted to strings first.
key = str(k).rstrip('_').replace('__', ':').replace('_', '-')
value = values[k]
# We skip empty, None, False, and other falsy values other than zero.
if k[0] == '_' or (not value and (value is False or value != 0)): # False == 0, so, uh, work around that.
continue
if value is True: # For explicitly True values, we don't have a value for the attribute.
pappend(key)
continue
# Non-string iterables (such as lists, sets, tuples, etc.) are treated as space-separated strings.
if isinstance(value, Iterable) and not isinstance(value, stringy):
value = ejoin(str(i) for i in value)
pappend(key + "=" + quoteattr(str(value)))
return bless(" " + ejoin(parts)) if parts else ''
def chunk(line, mapping={None: 'text', '${': 'escape', '#{': 'bless', '&{': 'args', '%{': 'format', '@{': 'json'}):
"""Chunkify and "tag" a block of text into plain text and code sections.
The first delimeter is blank to represent text sections, and keep the indexes aligned with the tags.
Values are yielded in the form (tag, text).
"""
skipping = 0 # How many closing parenthesis will we need to skip?
start = None # Starting position of current match.
last = 0
i = 0
text = line.line
while i < len(text):
if start is not None:
if text[i] == '{':
skipping += 1
elif text[i] == '}':
if skipping:
skipping -= 1
else:
yield line.clone(kind=mapping[text[start-2:start]], line=text[start:i])
start = None
last = i = i + 1
continue
elif text[i:i+2] in mapping:
if last is not None and last != i:
yield line.clone(kind=mapping[None], line=text[last:i])
last = None
start = i = i + 2
continue
i += 1
if last < len(text):
yield line.clone(kind=mapping[None], line=text[last:])
def ensure_buffer(context, separate=True):
if 'text' in context.flag or 'buffer' not in context.flag:
return
if separate: yield Line(0, "")
yield Line(0, "_buffer = []")
if not pypy:
yield Line(0, "__w, __ws = _buffer.extend, _buffer.append")
yield Line(0, "")
context.flag.add('text')
# ## Common Classes
class Line(object):
"""A rich description for a line of input, allowing for annotation."""
__slots__ = ('number', 'line', 'scope', 'kind', 'continued')
def __init__(self, number, line, scope=None, kind=None):
if isinstance(line, bytes):
line = line.decode('utf-8')
self.number = number
self.line = line
self.scope = scope
self.kind = kind
self.continued = self.stripped.endswith('\\')
if not kind: self.process()
super(Line, self).__init__()
def process(self):
if self.stripped.startswith('#') and not self.stripped.startswith('#{'):
self.kind = 'comment'
elif self.stripped.startswith(':'):
self.kind = 'code'
self.line = self.stripped[1:].lstrip()
else:
self.kind = 'text'
@property
def stripped(self):
return self.line.strip()
@property
def partitioned(self):
prefix, _, remainder = self.stripped.partition(' ')
return prefix.rstrip(), remainder.lstrip()
def __repr__(self):
return '{0.__class__.__name__}({0.number}, {0.kind}, "{0.stripped}")'.format(self)
def __bytes__(self):
return str(self).encode('utf8')
def __str__(self):
if self.scope is None:
return self.line
return '\t' * self.scope + self.line.lstrip()
if py == 2: # pragma: no cover
__unicode__ = __str__
__str__ = __bytes__
del __bytes__
def clone(self, **kw):
values = dict(
number = self.number,
line = self.line,
scope = self.scope,
kind = self.kind,
)
values.update(kw)
instance = self.__class__(**values)
return instance
class Lines(object):
"""Iterate input lines of source, with the ability to push lines back."""
__slots__ = ['Line', 'source', 'buffer']
def __init__(self, input=None, Line=Line):
self.Line = Line
if input is None:
self.source = None
self.buffer = deque()
elif hasattr(input, 'readlines'):
self.source = list(self.Line(i + 1, j) for i, j in enumerate(input.readlines()))
self.buffer = deque(self.source)
else:
self.source = list(self.Line(i + 1, j) for i, j in enumerate(input.split('\n')))
self.buffer = deque(self.source)
super(Lines, self).__init__()
@property
def count(self):
return len(self.buffer)
def __len__(self):
return self.count
def __repr__(self):
return 'Lines({0.count})'.format(self)
def __iter__(self):
return self
def __next__(self):
return self.next()
def __str__(self):
return "\n".join(str(i) for i in self)
def next(self):
if not self.buffer:
raise StopIteration()
return self.buffer.popleft()
def peek(self):
return self.buffer[0] if self.buffer else None
def push(self, *lines):
self.buffer.extendleft((i if isinstance(i, self.Line) else self.Line(self.buffer[0].number if self.buffer else 0, i)) for i in reversed(lines))
def reset(self):
self.buffer = deque(self.source)
def append(self, *lines):
self.buffer.extend((i if isinstance(i, self.Line) else self.Line(self.buffer[-1].number if self.buffer else 0, i)) for i in lines)
class Context(object):
"""The processing context for translating cinje source into Python source.
This is the primary entry point for translation.
"""
__slots__ = ('input', 'scope', 'flag', '_handler', 'templates', 'handlers', 'mapping')
def __init__(self, input):
self.input = Lines(input.decode('utf8') if isinstance(input, bytes) else input)
self.scope = 0
self.flag = set()
self._handler = []
self.handlers = []
self.templates = []
self.mapping = None
for translator in map(methodcaller('load'), iter_entry_points('cinje.translator')):
self.handlers.append(translator)
def __repr__(self):
return "Context({!r}, {}, {})".format(self.input, self.scope, self.flag)
def prepare(self):
"""Prepare the ordered list of transformers and reset context state to initial."""
self.scope = 0
self.mapping = deque([0])
self._handler = [i() for i in sorted(self.handlers, key=lambda handler: handler.priority)]
@property
def stream(self):
"""The workhorse of cinje: transform input lines and emit output lines.
After constructing an instance with a set of input lines iterate this property to generate the template.
"""
if 'init' not in self.flag:
root = True
self.prepare()
else:
root = False
# Track which lines were generated in response to which lines of source code.
# The end result is that there is one entry here for every line emitted, each integer representing the source
# line number that triggered it. If any lines are returned with missing line numbers, they're inferred from
# the last entry already in the list.
# Fun fact: this list is backwards; we optimize by using a deque and appending to the left edge. this updates
# the head of a linked list; the whole thing needs to be reversed to make sense.
mapping = self.mapping
for line in self.input:
handler = self.classify(line)
if line.kind == 'code' and line.stripped == 'end': # Exit the current child scope.
return
assert handler, "Unable to identify handler for line; this should be impossible!"
self.input.push(line) # Put it back so it can be consumed by the handler.
for line in handler(self): # This re-indents the code to match, if missing explicit scope.
if root: mapping.appendleft(line.number or mapping[0]) # Track source line number.
if line.scope is None:
line = line.clone(scope=self.scope)
yield line
def classify(self, line):
"""Identify the correct handler for a given line of input."""
for handler in self._handler:
if handler.match(self, line):
return handler
class Pipe(object):
"""An object representing a pipe-able callable, optionally with preserved arguments.
Using this you can custruct custom subclasses (define a method named "callable") or use it as a decorator:
@Pipe
def s(text):
return str(text)
"""
__slots__ = ('callable', 'args', 'kwargs')
def __init__(self, callable, *args, **kw):
super(Pipe, self).__init__()
self.callable = callable
self.args = args if args else ()
self.kwargs = kw if kw else {}
def __repr__(self):
return "Pipe({self.callable!r}{0}{1})".format(
(', ' + ', '.join(repr(i) for i in self.args)) if self.args else '',
(', ' + ', '.join("{0}={1!r}".format(i, j) for i, j in self.kwargs.items())) if self.kwargs else '',
self = self,
)
def __ror__(self, other):
"""The main machinery of the Pipe, calling the chosen callable with the recorded arguments."""
return self.callable(*(self.args + (other, )), **self.kwargs)
def __call__(self, *args, **kw):
"""Allow for the preserved args and kwargs to be updated, returning a mutated copy.
This allows for usage with arguments, as in the following example:
"Hello!" | encode('utf8')
This also allows for easy construction of custom mutated copies for use later, a la:
utf8 = encode('utf8')
"Hello!" | utf8
"""
return self.__class__(self.callable, *args, **kw)
# ## Tag Stripper
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.strict = False
self.convert_charrefs = True
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
|
marrow/cinje | cinje/util.py | flatten | python | def flatten(input, file=None, encoding=None, errors='strict'):
input = stream(input, encoding, errors)
if file is None: # Exit early if we're not writing to a file.
return b''.join(input) if encoding else ''.join(input)
counter = 0
for chunk in input:
file.write(chunk)
counter += len(chunk)
return counter | Return a flattened representation of a cinje chunk stream.
This has several modes of operation. If no `file` argument is given, output will be returned as a string.
The type of string will be determined by the presence of an `encoding`; if one is given the returned value is a
binary string, otherwise the native unicode representation. If a `file` is present, chunks will be written
iteratively through repeated calls to `file.write()`, and the amount of data (characters or bytes) written
returned. The type of string written will be determined by `encoding`, just as the return value is when not
writing to a file-like object. The `errors` argument is passed through when encoding.
We can highly recommend using the various stremaing IO containers available in the
[`io`](https://docs.python.org/3/library/io.html) module, though
[`tempfile`](https://docs.python.org/3/library/tempfile.html) classes are also quite useful. | train | https://github.com/marrow/cinje/blob/413bdac7242020ce8379d272720c649a9196daa2/cinje/util.py#L85-L111 | [
"def stream(input, encoding=None, errors='strict'):\n\t\"\"\"Safely iterate a template generator, ignoring ``None`` values and optionally stream encoding.\n\n\tUsed internally by ``cinje.flatten``, this allows for easy use of a template generator as a WSGI body.\n\t\"\"\"\n\n\tinput = (i for i in input if i) # Omits `None` (empty wrappers) and empty chunks.\n\n\tif encoding: # Automatically, and iteratively, encode the text if requested.\n\t\tinput = iterencode(input, encoding, errors=errors)\n\n\treturn input\n"
] | # encoding: utf-8
from __future__ import unicode_literals
"""Convienent utilities."""
# ## Imports
import sys
from codecs import iterencode
from inspect import isfunction, isclass
from operator import methodcaller
from collections import deque, namedtuple, Sized, Iterable
from pkg_resources import iter_entry_points
from xml.sax.saxutils import quoteattr
try: # pragma: no cover
from html.parser import HTMLParser
except ImportError: # pragma: no cover
from HTMLParser import HTMLParser
# ## Python Cross-Compatibility
#
# These allow us to detect relevant version differences for code generation, and overcome some of the minor
# differences in labels between Python 2 and Python 3 compatible runtimes.
#
# The differences, in practice, are minor, and are easily overcome through a small block of version-dependant
# code. Handily, even built-in labels are not sacrosanct; they can be easily assigned to and re-mapped.
#
try: # Python 2
from types import StringTypes as stringy
try:
from cStringIO import StringIO
except: # pragma: no cover
from StringIO import StringIO # This never really happens. Still, nice to be defensive.
bytes = str
str = unicode
py = 2
reduce = reduce
except: # Python 3
from io import StringIO
stringy = str
bytes = bytes
str = str
py = 3
# There are some additional complications for the Pypy runtime.
try:
from sys import pypy_version_info
pypy = True
except ImportError:
pypy = False
# ## Type Definitions
# A tuple representing a single step of fancy iteration.
Iteration = namedtuple('Iteration', ['first', 'last', 'index', 'total', 'value'])
# ## Simple Utility Functions
def stream(input, encoding=None, errors='strict'):
"""Safely iterate a template generator, ignoring ``None`` values and optionally stream encoding.
Used internally by ``cinje.flatten``, this allows for easy use of a template generator as a WSGI body.
"""
input = (i for i in input if i) # Omits `None` (empty wrappers) and empty chunks.
if encoding: # Automatically, and iteratively, encode the text if requested.
input = iterencode(input, encoding, errors=errors)
return input
def flatten(input, file=None, encoding=None, errors='strict'):
"""Return a flattened representation of a cinje chunk stream.
This has several modes of operation. If no `file` argument is given, output will be returned as a string.
The type of string will be determined by the presence of an `encoding`; if one is given the returned value is a
binary string, otherwise the native unicode representation. If a `file` is present, chunks will be written
iteratively through repeated calls to `file.write()`, and the amount of data (characters or bytes) written
returned. The type of string written will be determined by `encoding`, just as the return value is when not
writing to a file-like object. The `errors` argument is passed through when encoding.
We can highly recommend using the various stremaing IO containers available in the
[`io`](https://docs.python.org/3/library/io.html) module, though
[`tempfile`](https://docs.python.org/3/library/tempfile.html) classes are also quite useful.
"""
input = stream(input, encoding, errors)
if file is None: # Exit early if we're not writing to a file.
return b''.join(input) if encoding else ''.join(input)
counter = 0
for chunk in input:
file.write(chunk)
counter += len(chunk)
return counter
def fragment(string, name="anonymous", **context):
"""Translate a template fragment into a callable function.
**Note:** Use of this function is discouraged everywhere except tests, as no caching is implemented at this time.
Only one function may be declared, either manually, or automatically. If automatic defintition is chosen the
resulting function takes no arguments. Additional keyword arguments are passed through as global variables.
"""
if isinstance(string, bytes):
string = string.decode('utf-8')
if ": def" in string or ":def" in string:
code = string.encode('utf8').decode('cinje')
name = None
else:
code = ": def {name}\n\n{string}".format(
name = name,
string = string,
).encode('utf8').decode('cinje')
environ = dict(context)
exec(code, environ)
if name is None: # We need to dig it out of the `__tmpl__` list.
if __debug__ and not environ.get('__tmpl__', None):
raise RuntimeError("Template fragment does not contain a function: " + repr(environ.get('__tmpl__', None)) + \
"\n\n" + code)
return environ[environ['__tmpl__'][-1]] # Super secret sauce: you _can_ define more than one function...
return environ[name]
def interruptable(iterable):
"""Allow easy catching of a generator interrupting operation when using "yield from"."""
for i in iterable:
if i is None:
return
yield i
def iterate(obj):
"""Loop over an iterable and track progress, including first and last state.
On each iteration yield an Iteration named tuple with the first and last flags, current element index, total
iterable length (if possible to acquire), and value, in that order.
for iteration in iterate(something):
iteration.value # Do something.
You can unpack these safely:
for first, last, index, total, value in iterate(something):
pass
If you want to unpack the values you are iterating across, you can by wrapping the nested unpacking in parenthesis:
for first, last, index, total, (foo, bar, baz) in iterate(something):
pass
Even if the length of the iterable can't be reliably determined this function will still capture the "last" state
of the final loop iteration. (Basically: this works with generators.)
This process is about 10x slower than simple enumeration on CPython 3.4, so only use it where you actually need to
track state. Use `enumerate()` elsewhere.
"""
global next, Iteration
next = next
Iteration = Iteration
total = len(obj) if isinstance(obj, Sized) else None
iterator = iter(obj)
first = True
last = False
i = 0
try:
value = next(iterator)
except StopIteration:
return
while True:
try:
next_value = next(iterator)
except StopIteration:
last = True
yield Iteration(first, last, i, total, value)
if last: return
value = next_value
i += 1
first = False
def xmlargs(_source=None, **values):
from cinje.helpers import bless
# Optimize by binding these names to the local scope, saving a lookup on each call.
global str, Iterable, stringy
str = str
Iterable = Iterable
stringy = stringy
ejoin = " ".join
parts = []
pappend = parts.append
# If a data source is provided it overrides the keyword arguments which are treated as defaults.
if _source:
values.update(_source)
for k in sorted(values):
# We technically allow non-string values for keys. They're just converted to strings first.
key = str(k).rstrip('_').replace('__', ':').replace('_', '-')
value = values[k]
# We skip empty, None, False, and other falsy values other than zero.
if k[0] == '_' or (not value and (value is False or value != 0)): # False == 0, so, uh, work around that.
continue
if value is True: # For explicitly True values, we don't have a value for the attribute.
pappend(key)
continue
# Non-string iterables (such as lists, sets, tuples, etc.) are treated as space-separated strings.
if isinstance(value, Iterable) and not isinstance(value, stringy):
value = ejoin(str(i) for i in value)
pappend(key + "=" + quoteattr(str(value)))
return bless(" " + ejoin(parts)) if parts else ''
def chunk(line, mapping={None: 'text', '${': 'escape', '#{': 'bless', '&{': 'args', '%{': 'format', '@{': 'json'}):
"""Chunkify and "tag" a block of text into plain text and code sections.
The first delimeter is blank to represent text sections, and keep the indexes aligned with the tags.
Values are yielded in the form (tag, text).
"""
skipping = 0 # How many closing parenthesis will we need to skip?
start = None # Starting position of current match.
last = 0
i = 0
text = line.line
while i < len(text):
if start is not None:
if text[i] == '{':
skipping += 1
elif text[i] == '}':
if skipping:
skipping -= 1
else:
yield line.clone(kind=mapping[text[start-2:start]], line=text[start:i])
start = None
last = i = i + 1
continue
elif text[i:i+2] in mapping:
if last is not None and last != i:
yield line.clone(kind=mapping[None], line=text[last:i])
last = None
start = i = i + 2
continue
i += 1
if last < len(text):
yield line.clone(kind=mapping[None], line=text[last:])
def ensure_buffer(context, separate=True):
if 'text' in context.flag or 'buffer' not in context.flag:
return
if separate: yield Line(0, "")
yield Line(0, "_buffer = []")
if not pypy:
yield Line(0, "__w, __ws = _buffer.extend, _buffer.append")
yield Line(0, "")
context.flag.add('text')
# ## Common Classes
class Line(object):
"""A rich description for a line of input, allowing for annotation."""
__slots__ = ('number', 'line', 'scope', 'kind', 'continued')
def __init__(self, number, line, scope=None, kind=None):
if isinstance(line, bytes):
line = line.decode('utf-8')
self.number = number
self.line = line
self.scope = scope
self.kind = kind
self.continued = self.stripped.endswith('\\')
if not kind: self.process()
super(Line, self).__init__()
def process(self):
if self.stripped.startswith('#') and not self.stripped.startswith('#{'):
self.kind = 'comment'
elif self.stripped.startswith(':'):
self.kind = 'code'
self.line = self.stripped[1:].lstrip()
else:
self.kind = 'text'
@property
def stripped(self):
return self.line.strip()
@property
def partitioned(self):
prefix, _, remainder = self.stripped.partition(' ')
return prefix.rstrip(), remainder.lstrip()
def __repr__(self):
return '{0.__class__.__name__}({0.number}, {0.kind}, "{0.stripped}")'.format(self)
def __bytes__(self):
return str(self).encode('utf8')
def __str__(self):
if self.scope is None:
return self.line
return '\t' * self.scope + self.line.lstrip()
if py == 2: # pragma: no cover
__unicode__ = __str__
__str__ = __bytes__
del __bytes__
def clone(self, **kw):
values = dict(
number = self.number,
line = self.line,
scope = self.scope,
kind = self.kind,
)
values.update(kw)
instance = self.__class__(**values)
return instance
class Lines(object):
"""Iterate input lines of source, with the ability to push lines back."""
__slots__ = ['Line', 'source', 'buffer']
def __init__(self, input=None, Line=Line):
self.Line = Line
if input is None:
self.source = None
self.buffer = deque()
elif hasattr(input, 'readlines'):
self.source = list(self.Line(i + 1, j) for i, j in enumerate(input.readlines()))
self.buffer = deque(self.source)
else:
self.source = list(self.Line(i + 1, j) for i, j in enumerate(input.split('\n')))
self.buffer = deque(self.source)
super(Lines, self).__init__()
@property
def count(self):
return len(self.buffer)
def __len__(self):
return self.count
def __repr__(self):
return 'Lines({0.count})'.format(self)
def __iter__(self):
return self
def __next__(self):
return self.next()
def __str__(self):
return "\n".join(str(i) for i in self)
def next(self):
if not self.buffer:
raise StopIteration()
return self.buffer.popleft()
def peek(self):
return self.buffer[0] if self.buffer else None
def push(self, *lines):
self.buffer.extendleft((i if isinstance(i, self.Line) else self.Line(self.buffer[0].number if self.buffer else 0, i)) for i in reversed(lines))
def reset(self):
self.buffer = deque(self.source)
def append(self, *lines):
self.buffer.extend((i if isinstance(i, self.Line) else self.Line(self.buffer[-1].number if self.buffer else 0, i)) for i in lines)
class Context(object):
"""The processing context for translating cinje source into Python source.
This is the primary entry point for translation.
"""
__slots__ = ('input', 'scope', 'flag', '_handler', 'templates', 'handlers', 'mapping')
def __init__(self, input):
self.input = Lines(input.decode('utf8') if isinstance(input, bytes) else input)
self.scope = 0
self.flag = set()
self._handler = []
self.handlers = []
self.templates = []
self.mapping = None
for translator in map(methodcaller('load'), iter_entry_points('cinje.translator')):
self.handlers.append(translator)
def __repr__(self):
return "Context({!r}, {}, {})".format(self.input, self.scope, self.flag)
def prepare(self):
"""Prepare the ordered list of transformers and reset context state to initial."""
self.scope = 0
self.mapping = deque([0])
self._handler = [i() for i in sorted(self.handlers, key=lambda handler: handler.priority)]
@property
def stream(self):
"""The workhorse of cinje: transform input lines and emit output lines.
After constructing an instance with a set of input lines iterate this property to generate the template.
"""
if 'init' not in self.flag:
root = True
self.prepare()
else:
root = False
# Track which lines were generated in response to which lines of source code.
# The end result is that there is one entry here for every line emitted, each integer representing the source
# line number that triggered it. If any lines are returned with missing line numbers, they're inferred from
# the last entry already in the list.
# Fun fact: this list is backwards; we optimize by using a deque and appending to the left edge. this updates
# the head of a linked list; the whole thing needs to be reversed to make sense.
mapping = self.mapping
for line in self.input:
handler = self.classify(line)
if line.kind == 'code' and line.stripped == 'end': # Exit the current child scope.
return
assert handler, "Unable to identify handler for line; this should be impossible!"
self.input.push(line) # Put it back so it can be consumed by the handler.
for line in handler(self): # This re-indents the code to match, if missing explicit scope.
if root: mapping.appendleft(line.number or mapping[0]) # Track source line number.
if line.scope is None:
line = line.clone(scope=self.scope)
yield line
def classify(self, line):
"""Identify the correct handler for a given line of input."""
for handler in self._handler:
if handler.match(self, line):
return handler
class Pipe(object):
"""An object representing a pipe-able callable, optionally with preserved arguments.
Using this you can custruct custom subclasses (define a method named "callable") or use it as a decorator:
@Pipe
def s(text):
return str(text)
"""
__slots__ = ('callable', 'args', 'kwargs')
def __init__(self, callable, *args, **kw):
super(Pipe, self).__init__()
self.callable = callable
self.args = args if args else ()
self.kwargs = kw if kw else {}
def __repr__(self):
return "Pipe({self.callable!r}{0}{1})".format(
(', ' + ', '.join(repr(i) for i in self.args)) if self.args else '',
(', ' + ', '.join("{0}={1!r}".format(i, j) for i, j in self.kwargs.items())) if self.kwargs else '',
self = self,
)
def __ror__(self, other):
"""The main machinery of the Pipe, calling the chosen callable with the recorded arguments."""
return self.callable(*(self.args + (other, )), **self.kwargs)
def __call__(self, *args, **kw):
"""Allow for the preserved args and kwargs to be updated, returning a mutated copy.
This allows for usage with arguments, as in the following example:
"Hello!" | encode('utf8')
This also allows for easy construction of custom mutated copies for use later, a la:
utf8 = encode('utf8')
"Hello!" | utf8
"""
return self.__class__(self.callable, *args, **kw)
# ## Tag Stripper
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.strict = False
self.convert_charrefs = True
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
|
marrow/cinje | cinje/util.py | fragment | python | def fragment(string, name="anonymous", **context):
if isinstance(string, bytes):
string = string.decode('utf-8')
if ": def" in string or ":def" in string:
code = string.encode('utf8').decode('cinje')
name = None
else:
code = ": def {name}\n\n{string}".format(
name = name,
string = string,
).encode('utf8').decode('cinje')
environ = dict(context)
exec(code, environ)
if name is None: # We need to dig it out of the `__tmpl__` list.
if __debug__ and not environ.get('__tmpl__', None):
raise RuntimeError("Template fragment does not contain a function: " + repr(environ.get('__tmpl__', None)) + \
"\n\n" + code)
return environ[environ['__tmpl__'][-1]] # Super secret sauce: you _can_ define more than one function...
return environ[name] | Translate a template fragment into a callable function.
**Note:** Use of this function is discouraged everywhere except tests, as no caching is implemented at this time.
Only one function may be declared, either manually, or automatically. If automatic defintition is chosen the
resulting function takes no arguments. Additional keyword arguments are passed through as global variables. | train | https://github.com/marrow/cinje/blob/413bdac7242020ce8379d272720c649a9196daa2/cinje/util.py#L114-L146 | null | # encoding: utf-8
from __future__ import unicode_literals
"""Convienent utilities."""
# ## Imports
import sys
from codecs import iterencode
from inspect import isfunction, isclass
from operator import methodcaller
from collections import deque, namedtuple, Sized, Iterable
from pkg_resources import iter_entry_points
from xml.sax.saxutils import quoteattr
try: # pragma: no cover
from html.parser import HTMLParser
except ImportError: # pragma: no cover
from HTMLParser import HTMLParser
# ## Python Cross-Compatibility
#
# These allow us to detect relevant version differences for code generation, and overcome some of the minor
# differences in labels between Python 2 and Python 3 compatible runtimes.
#
# The differences, in practice, are minor, and are easily overcome through a small block of version-dependant
# code. Handily, even built-in labels are not sacrosanct; they can be easily assigned to and re-mapped.
#
try: # Python 2
from types import StringTypes as stringy
try:
from cStringIO import StringIO
except: # pragma: no cover
from StringIO import StringIO # This never really happens. Still, nice to be defensive.
bytes = str
str = unicode
py = 2
reduce = reduce
except: # Python 3
from io import StringIO
stringy = str
bytes = bytes
str = str
py = 3
# There are some additional complications for the Pypy runtime.
try:
from sys import pypy_version_info
pypy = True
except ImportError:
pypy = False
# ## Type Definitions
# A tuple representing a single step of fancy iteration.
Iteration = namedtuple('Iteration', ['first', 'last', 'index', 'total', 'value'])
# ## Simple Utility Functions
def stream(input, encoding=None, errors='strict'):
"""Safely iterate a template generator, ignoring ``None`` values and optionally stream encoding.
Used internally by ``cinje.flatten``, this allows for easy use of a template generator as a WSGI body.
"""
input = (i for i in input if i) # Omits `None` (empty wrappers) and empty chunks.
if encoding: # Automatically, and iteratively, encode the text if requested.
input = iterencode(input, encoding, errors=errors)
return input
def flatten(input, file=None, encoding=None, errors='strict'):
"""Return a flattened representation of a cinje chunk stream.
This has several modes of operation. If no `file` argument is given, output will be returned as a string.
The type of string will be determined by the presence of an `encoding`; if one is given the returned value is a
binary string, otherwise the native unicode representation. If a `file` is present, chunks will be written
iteratively through repeated calls to `file.write()`, and the amount of data (characters or bytes) written
returned. The type of string written will be determined by `encoding`, just as the return value is when not
writing to a file-like object. The `errors` argument is passed through when encoding.
We can highly recommend using the various stremaing IO containers available in the
[`io`](https://docs.python.org/3/library/io.html) module, though
[`tempfile`](https://docs.python.org/3/library/tempfile.html) classes are also quite useful.
"""
input = stream(input, encoding, errors)
if file is None: # Exit early if we're not writing to a file.
return b''.join(input) if encoding else ''.join(input)
counter = 0
for chunk in input:
file.write(chunk)
counter += len(chunk)
return counter
def fragment(string, name="anonymous", **context):
"""Translate a template fragment into a callable function.
**Note:** Use of this function is discouraged everywhere except tests, as no caching is implemented at this time.
Only one function may be declared, either manually, or automatically. If automatic defintition is chosen the
resulting function takes no arguments. Additional keyword arguments are passed through as global variables.
"""
if isinstance(string, bytes):
string = string.decode('utf-8')
if ": def" in string or ":def" in string:
code = string.encode('utf8').decode('cinje')
name = None
else:
code = ": def {name}\n\n{string}".format(
name = name,
string = string,
).encode('utf8').decode('cinje')
environ = dict(context)
exec(code, environ)
if name is None: # We need to dig it out of the `__tmpl__` list.
if __debug__ and not environ.get('__tmpl__', None):
raise RuntimeError("Template fragment does not contain a function: " + repr(environ.get('__tmpl__', None)) + \
"\n\n" + code)
return environ[environ['__tmpl__'][-1]] # Super secret sauce: you _can_ define more than one function...
return environ[name]
def interruptable(iterable):
"""Allow easy catching of a generator interrupting operation when using "yield from"."""
for i in iterable:
if i is None:
return
yield i
def iterate(obj):
"""Loop over an iterable and track progress, including first and last state.
On each iteration yield an Iteration named tuple with the first and last flags, current element index, total
iterable length (if possible to acquire), and value, in that order.
for iteration in iterate(something):
iteration.value # Do something.
You can unpack these safely:
for first, last, index, total, value in iterate(something):
pass
If you want to unpack the values you are iterating across, you can by wrapping the nested unpacking in parenthesis:
for first, last, index, total, (foo, bar, baz) in iterate(something):
pass
Even if the length of the iterable can't be reliably determined this function will still capture the "last" state
of the final loop iteration. (Basically: this works with generators.)
This process is about 10x slower than simple enumeration on CPython 3.4, so only use it where you actually need to
track state. Use `enumerate()` elsewhere.
"""
global next, Iteration
next = next
Iteration = Iteration
total = len(obj) if isinstance(obj, Sized) else None
iterator = iter(obj)
first = True
last = False
i = 0
try:
value = next(iterator)
except StopIteration:
return
while True:
try:
next_value = next(iterator)
except StopIteration:
last = True
yield Iteration(first, last, i, total, value)
if last: return
value = next_value
i += 1
first = False
def xmlargs(_source=None, **values):
from cinje.helpers import bless
# Optimize by binding these names to the local scope, saving a lookup on each call.
global str, Iterable, stringy
str = str
Iterable = Iterable
stringy = stringy
ejoin = " ".join
parts = []
pappend = parts.append
# If a data source is provided it overrides the keyword arguments which are treated as defaults.
if _source:
values.update(_source)
for k in sorted(values):
# We technically allow non-string values for keys. They're just converted to strings first.
key = str(k).rstrip('_').replace('__', ':').replace('_', '-')
value = values[k]
# We skip empty, None, False, and other falsy values other than zero.
if k[0] == '_' or (not value and (value is False or value != 0)): # False == 0, so, uh, work around that.
continue
if value is True: # For explicitly True values, we don't have a value for the attribute.
pappend(key)
continue
# Non-string iterables (such as lists, sets, tuples, etc.) are treated as space-separated strings.
if isinstance(value, Iterable) and not isinstance(value, stringy):
value = ejoin(str(i) for i in value)
pappend(key + "=" + quoteattr(str(value)))
return bless(" " + ejoin(parts)) if parts else ''
def chunk(line, mapping={None: 'text', '${': 'escape', '#{': 'bless', '&{': 'args', '%{': 'format', '@{': 'json'}):
"""Chunkify and "tag" a block of text into plain text and code sections.
The first delimeter is blank to represent text sections, and keep the indexes aligned with the tags.
Values are yielded in the form (tag, text).
"""
skipping = 0 # How many closing parenthesis will we need to skip?
start = None # Starting position of current match.
last = 0
i = 0
text = line.line
while i < len(text):
if start is not None:
if text[i] == '{':
skipping += 1
elif text[i] == '}':
if skipping:
skipping -= 1
else:
yield line.clone(kind=mapping[text[start-2:start]], line=text[start:i])
start = None
last = i = i + 1
continue
elif text[i:i+2] in mapping:
if last is not None and last != i:
yield line.clone(kind=mapping[None], line=text[last:i])
last = None
start = i = i + 2
continue
i += 1
if last < len(text):
yield line.clone(kind=mapping[None], line=text[last:])
def ensure_buffer(context, separate=True):
if 'text' in context.flag or 'buffer' not in context.flag:
return
if separate: yield Line(0, "")
yield Line(0, "_buffer = []")
if not pypy:
yield Line(0, "__w, __ws = _buffer.extend, _buffer.append")
yield Line(0, "")
context.flag.add('text')
# ## Common Classes
class Line(object):
"""A rich description for a line of input, allowing for annotation."""
__slots__ = ('number', 'line', 'scope', 'kind', 'continued')
def __init__(self, number, line, scope=None, kind=None):
if isinstance(line, bytes):
line = line.decode('utf-8')
self.number = number
self.line = line
self.scope = scope
self.kind = kind
self.continued = self.stripped.endswith('\\')
if not kind: self.process()
super(Line, self).__init__()
def process(self):
if self.stripped.startswith('#') and not self.stripped.startswith('#{'):
self.kind = 'comment'
elif self.stripped.startswith(':'):
self.kind = 'code'
self.line = self.stripped[1:].lstrip()
else:
self.kind = 'text'
@property
def stripped(self):
return self.line.strip()
@property
def partitioned(self):
prefix, _, remainder = self.stripped.partition(' ')
return prefix.rstrip(), remainder.lstrip()
def __repr__(self):
return '{0.__class__.__name__}({0.number}, {0.kind}, "{0.stripped}")'.format(self)
def __bytes__(self):
return str(self).encode('utf8')
def __str__(self):
if self.scope is None:
return self.line
return '\t' * self.scope + self.line.lstrip()
if py == 2: # pragma: no cover
__unicode__ = __str__
__str__ = __bytes__
del __bytes__
def clone(self, **kw):
values = dict(
number = self.number,
line = self.line,
scope = self.scope,
kind = self.kind,
)
values.update(kw)
instance = self.__class__(**values)
return instance
class Lines(object):
"""Iterate input lines of source, with the ability to push lines back."""
__slots__ = ['Line', 'source', 'buffer']
def __init__(self, input=None, Line=Line):
self.Line = Line
if input is None:
self.source = None
self.buffer = deque()
elif hasattr(input, 'readlines'):
self.source = list(self.Line(i + 1, j) for i, j in enumerate(input.readlines()))
self.buffer = deque(self.source)
else:
self.source = list(self.Line(i + 1, j) for i, j in enumerate(input.split('\n')))
self.buffer = deque(self.source)
super(Lines, self).__init__()
@property
def count(self):
return len(self.buffer)
def __len__(self):
return self.count
def __repr__(self):
return 'Lines({0.count})'.format(self)
def __iter__(self):
return self
def __next__(self):
return self.next()
def __str__(self):
return "\n".join(str(i) for i in self)
def next(self):
if not self.buffer:
raise StopIteration()
return self.buffer.popleft()
def peek(self):
return self.buffer[0] if self.buffer else None
def push(self, *lines):
self.buffer.extendleft((i if isinstance(i, self.Line) else self.Line(self.buffer[0].number if self.buffer else 0, i)) for i in reversed(lines))
def reset(self):
self.buffer = deque(self.source)
def append(self, *lines):
self.buffer.extend((i if isinstance(i, self.Line) else self.Line(self.buffer[-1].number if self.buffer else 0, i)) for i in lines)
class Context(object):
"""The processing context for translating cinje source into Python source.
This is the primary entry point for translation.
"""
__slots__ = ('input', 'scope', 'flag', '_handler', 'templates', 'handlers', 'mapping')
def __init__(self, input):
self.input = Lines(input.decode('utf8') if isinstance(input, bytes) else input)
self.scope = 0
self.flag = set()
self._handler = []
self.handlers = []
self.templates = []
self.mapping = None
for translator in map(methodcaller('load'), iter_entry_points('cinje.translator')):
self.handlers.append(translator)
def __repr__(self):
return "Context({!r}, {}, {})".format(self.input, self.scope, self.flag)
def prepare(self):
"""Prepare the ordered list of transformers and reset context state to initial."""
self.scope = 0
self.mapping = deque([0])
self._handler = [i() for i in sorted(self.handlers, key=lambda handler: handler.priority)]
@property
def stream(self):
"""The workhorse of cinje: transform input lines and emit output lines.
After constructing an instance with a set of input lines iterate this property to generate the template.
"""
if 'init' not in self.flag:
root = True
self.prepare()
else:
root = False
# Track which lines were generated in response to which lines of source code.
# The end result is that there is one entry here for every line emitted, each integer representing the source
# line number that triggered it. If any lines are returned with missing line numbers, they're inferred from
# the last entry already in the list.
# Fun fact: this list is backwards; we optimize by using a deque and appending to the left edge. this updates
# the head of a linked list; the whole thing needs to be reversed to make sense.
mapping = self.mapping
for line in self.input:
handler = self.classify(line)
if line.kind == 'code' and line.stripped == 'end': # Exit the current child scope.
return
assert handler, "Unable to identify handler for line; this should be impossible!"
self.input.push(line) # Put it back so it can be consumed by the handler.
for line in handler(self): # This re-indents the code to match, if missing explicit scope.
if root: mapping.appendleft(line.number or mapping[0]) # Track source line number.
if line.scope is None:
line = line.clone(scope=self.scope)
yield line
def classify(self, line):
"""Identify the correct handler for a given line of input."""
for handler in self._handler:
if handler.match(self, line):
return handler
class Pipe(object):
"""An object representing a pipe-able callable, optionally with preserved arguments.
Using this you can custruct custom subclasses (define a method named "callable") or use it as a decorator:
@Pipe
def s(text):
return str(text)
"""
__slots__ = ('callable', 'args', 'kwargs')
def __init__(self, callable, *args, **kw):
super(Pipe, self).__init__()
self.callable = callable
self.args = args if args else ()
self.kwargs = kw if kw else {}
def __repr__(self):
return "Pipe({self.callable!r}{0}{1})".format(
(', ' + ', '.join(repr(i) for i in self.args)) if self.args else '',
(', ' + ', '.join("{0}={1!r}".format(i, j) for i, j in self.kwargs.items())) if self.kwargs else '',
self = self,
)
def __ror__(self, other):
"""The main machinery of the Pipe, calling the chosen callable with the recorded arguments."""
return self.callable(*(self.args + (other, )), **self.kwargs)
def __call__(self, *args, **kw):
"""Allow for the preserved args and kwargs to be updated, returning a mutated copy.
This allows for usage with arguments, as in the following example:
"Hello!" | encode('utf8')
This also allows for easy construction of custom mutated copies for use later, a la:
utf8 = encode('utf8')
"Hello!" | utf8
"""
return self.__class__(self.callable, *args, **kw)
# ## Tag Stripper
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.strict = False
self.convert_charrefs = True
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
|
marrow/cinje | cinje/util.py | iterate | python | def iterate(obj):
global next, Iteration
next = next
Iteration = Iteration
total = len(obj) if isinstance(obj, Sized) else None
iterator = iter(obj)
first = True
last = False
i = 0
try:
value = next(iterator)
except StopIteration:
return
while True:
try:
next_value = next(iterator)
except StopIteration:
last = True
yield Iteration(first, last, i, total, value)
if last: return
value = next_value
i += 1
first = False | Loop over an iterable and track progress, including first and last state.
On each iteration yield an Iteration named tuple with the first and last flags, current element index, total
iterable length (if possible to acquire), and value, in that order.
for iteration in iterate(something):
iteration.value # Do something.
You can unpack these safely:
for first, last, index, total, value in iterate(something):
pass
If you want to unpack the values you are iterating across, you can by wrapping the nested unpacking in parenthesis:
for first, last, index, total, (foo, bar, baz) in iterate(something):
pass
Even if the length of the iterable can't be reliably determined this function will still capture the "last" state
of the final loop iteration. (Basically: this works with generators.)
This process is about 10x slower than simple enumeration on CPython 3.4, so only use it where you actually need to
track state. Use `enumerate()` elsewhere. | train | https://github.com/marrow/cinje/blob/413bdac7242020ce8379d272720c649a9196daa2/cinje/util.py#L159-L211 | null | # encoding: utf-8
from __future__ import unicode_literals
"""Convienent utilities."""
# ## Imports
import sys
from codecs import iterencode
from inspect import isfunction, isclass
from operator import methodcaller
from collections import deque, namedtuple, Sized, Iterable
from pkg_resources import iter_entry_points
from xml.sax.saxutils import quoteattr
try: # pragma: no cover
from html.parser import HTMLParser
except ImportError: # pragma: no cover
from HTMLParser import HTMLParser
# ## Python Cross-Compatibility
#
# These allow us to detect relevant version differences for code generation, and overcome some of the minor
# differences in labels between Python 2 and Python 3 compatible runtimes.
#
# The differences, in practice, are minor, and are easily overcome through a small block of version-dependant
# code. Handily, even built-in labels are not sacrosanct; they can be easily assigned to and re-mapped.
#
try: # Python 2
from types import StringTypes as stringy
try:
from cStringIO import StringIO
except: # pragma: no cover
from StringIO import StringIO # This never really happens. Still, nice to be defensive.
bytes = str
str = unicode
py = 2
reduce = reduce
except: # Python 3
from io import StringIO
stringy = str
bytes = bytes
str = str
py = 3
# There are some additional complications for the Pypy runtime.
try:
from sys import pypy_version_info
pypy = True
except ImportError:
pypy = False
# ## Type Definitions
# A tuple representing a single step of fancy iteration.
Iteration = namedtuple('Iteration', ['first', 'last', 'index', 'total', 'value'])
# ## Simple Utility Functions
def stream(input, encoding=None, errors='strict'):
"""Safely iterate a template generator, ignoring ``None`` values and optionally stream encoding.
Used internally by ``cinje.flatten``, this allows for easy use of a template generator as a WSGI body.
"""
input = (i for i in input if i) # Omits `None` (empty wrappers) and empty chunks.
if encoding: # Automatically, and iteratively, encode the text if requested.
input = iterencode(input, encoding, errors=errors)
return input
def flatten(input, file=None, encoding=None, errors='strict'):
"""Return a flattened representation of a cinje chunk stream.
This has several modes of operation. If no `file` argument is given, output will be returned as a string.
The type of string will be determined by the presence of an `encoding`; if one is given the returned value is a
binary string, otherwise the native unicode representation. If a `file` is present, chunks will be written
iteratively through repeated calls to `file.write()`, and the amount of data (characters or bytes) written
returned. The type of string written will be determined by `encoding`, just as the return value is when not
writing to a file-like object. The `errors` argument is passed through when encoding.
We can highly recommend using the various stremaing IO containers available in the
[`io`](https://docs.python.org/3/library/io.html) module, though
[`tempfile`](https://docs.python.org/3/library/tempfile.html) classes are also quite useful.
"""
input = stream(input, encoding, errors)
if file is None: # Exit early if we're not writing to a file.
return b''.join(input) if encoding else ''.join(input)
counter = 0
for chunk in input:
file.write(chunk)
counter += len(chunk)
return counter
def fragment(string, name="anonymous", **context):
"""Translate a template fragment into a callable function.
**Note:** Use of this function is discouraged everywhere except tests, as no caching is implemented at this time.
Only one function may be declared, either manually, or automatically. If automatic defintition is chosen the
resulting function takes no arguments. Additional keyword arguments are passed through as global variables.
"""
if isinstance(string, bytes):
string = string.decode('utf-8')
if ": def" in string or ":def" in string:
code = string.encode('utf8').decode('cinje')
name = None
else:
code = ": def {name}\n\n{string}".format(
name = name,
string = string,
).encode('utf8').decode('cinje')
environ = dict(context)
exec(code, environ)
if name is None: # We need to dig it out of the `__tmpl__` list.
if __debug__ and not environ.get('__tmpl__', None):
raise RuntimeError("Template fragment does not contain a function: " + repr(environ.get('__tmpl__', None)) + \
"\n\n" + code)
return environ[environ['__tmpl__'][-1]] # Super secret sauce: you _can_ define more than one function...
return environ[name]
def interruptable(iterable):
"""Allow easy catching of a generator interrupting operation when using "yield from"."""
for i in iterable:
if i is None:
return
yield i
def iterate(obj):
"""Loop over an iterable and track progress, including first and last state.
On each iteration yield an Iteration named tuple with the first and last flags, current element index, total
iterable length (if possible to acquire), and value, in that order.
for iteration in iterate(something):
iteration.value # Do something.
You can unpack these safely:
for first, last, index, total, value in iterate(something):
pass
If you want to unpack the values you are iterating across, you can by wrapping the nested unpacking in parenthesis:
for first, last, index, total, (foo, bar, baz) in iterate(something):
pass
Even if the length of the iterable can't be reliably determined this function will still capture the "last" state
of the final loop iteration. (Basically: this works with generators.)
This process is about 10x slower than simple enumeration on CPython 3.4, so only use it where you actually need to
track state. Use `enumerate()` elsewhere.
"""
global next, Iteration
next = next
Iteration = Iteration
total = len(obj) if isinstance(obj, Sized) else None
iterator = iter(obj)
first = True
last = False
i = 0
try:
value = next(iterator)
except StopIteration:
return
while True:
try:
next_value = next(iterator)
except StopIteration:
last = True
yield Iteration(first, last, i, total, value)
if last: return
value = next_value
i += 1
first = False
def xmlargs(_source=None, **values):
from cinje.helpers import bless
# Optimize by binding these names to the local scope, saving a lookup on each call.
global str, Iterable, stringy
str = str
Iterable = Iterable
stringy = stringy
ejoin = " ".join
parts = []
pappend = parts.append
# If a data source is provided it overrides the keyword arguments which are treated as defaults.
if _source:
values.update(_source)
for k in sorted(values):
# We technically allow non-string values for keys. They're just converted to strings first.
key = str(k).rstrip('_').replace('__', ':').replace('_', '-')
value = values[k]
# We skip empty, None, False, and other falsy values other than zero.
if k[0] == '_' or (not value and (value is False or value != 0)): # False == 0, so, uh, work around that.
continue
if value is True: # For explicitly True values, we don't have a value for the attribute.
pappend(key)
continue
# Non-string iterables (such as lists, sets, tuples, etc.) are treated as space-separated strings.
if isinstance(value, Iterable) and not isinstance(value, stringy):
value = ejoin(str(i) for i in value)
pappend(key + "=" + quoteattr(str(value)))
return bless(" " + ejoin(parts)) if parts else ''
def chunk(line, mapping={None: 'text', '${': 'escape', '#{': 'bless', '&{': 'args', '%{': 'format', '@{': 'json'}):
"""Chunkify and "tag" a block of text into plain text and code sections.
The first delimeter is blank to represent text sections, and keep the indexes aligned with the tags.
Values are yielded in the form (tag, text).
"""
skipping = 0 # How many closing parenthesis will we need to skip?
start = None # Starting position of current match.
last = 0
i = 0
text = line.line
while i < len(text):
if start is not None:
if text[i] == '{':
skipping += 1
elif text[i] == '}':
if skipping:
skipping -= 1
else:
yield line.clone(kind=mapping[text[start-2:start]], line=text[start:i])
start = None
last = i = i + 1
continue
elif text[i:i+2] in mapping:
if last is not None and last != i:
yield line.clone(kind=mapping[None], line=text[last:i])
last = None
start = i = i + 2
continue
i += 1
if last < len(text):
yield line.clone(kind=mapping[None], line=text[last:])
def ensure_buffer(context, separate=True):
if 'text' in context.flag or 'buffer' not in context.flag:
return
if separate: yield Line(0, "")
yield Line(0, "_buffer = []")
if not pypy:
yield Line(0, "__w, __ws = _buffer.extend, _buffer.append")
yield Line(0, "")
context.flag.add('text')
# ## Common Classes
class Line(object):
"""A rich description for a line of input, allowing for annotation."""
__slots__ = ('number', 'line', 'scope', 'kind', 'continued')
def __init__(self, number, line, scope=None, kind=None):
if isinstance(line, bytes):
line = line.decode('utf-8')
self.number = number
self.line = line
self.scope = scope
self.kind = kind
self.continued = self.stripped.endswith('\\')
if not kind: self.process()
super(Line, self).__init__()
def process(self):
if self.stripped.startswith('#') and not self.stripped.startswith('#{'):
self.kind = 'comment'
elif self.stripped.startswith(':'):
self.kind = 'code'
self.line = self.stripped[1:].lstrip()
else:
self.kind = 'text'
@property
def stripped(self):
return self.line.strip()
@property
def partitioned(self):
prefix, _, remainder = self.stripped.partition(' ')
return prefix.rstrip(), remainder.lstrip()
def __repr__(self):
return '{0.__class__.__name__}({0.number}, {0.kind}, "{0.stripped}")'.format(self)
def __bytes__(self):
return str(self).encode('utf8')
def __str__(self):
if self.scope is None:
return self.line
return '\t' * self.scope + self.line.lstrip()
if py == 2: # pragma: no cover
__unicode__ = __str__
__str__ = __bytes__
del __bytes__
def clone(self, **kw):
values = dict(
number = self.number,
line = self.line,
scope = self.scope,
kind = self.kind,
)
values.update(kw)
instance = self.__class__(**values)
return instance
class Lines(object):
"""Iterate input lines of source, with the ability to push lines back."""
__slots__ = ['Line', 'source', 'buffer']
def __init__(self, input=None, Line=Line):
self.Line = Line
if input is None:
self.source = None
self.buffer = deque()
elif hasattr(input, 'readlines'):
self.source = list(self.Line(i + 1, j) for i, j in enumerate(input.readlines()))
self.buffer = deque(self.source)
else:
self.source = list(self.Line(i + 1, j) for i, j in enumerate(input.split('\n')))
self.buffer = deque(self.source)
super(Lines, self).__init__()
@property
def count(self):
return len(self.buffer)
def __len__(self):
return self.count
def __repr__(self):
return 'Lines({0.count})'.format(self)
def __iter__(self):
return self
def __next__(self):
return self.next()
def __str__(self):
return "\n".join(str(i) for i in self)
def next(self):
if not self.buffer:
raise StopIteration()
return self.buffer.popleft()
def peek(self):
return self.buffer[0] if self.buffer else None
def push(self, *lines):
self.buffer.extendleft((i if isinstance(i, self.Line) else self.Line(self.buffer[0].number if self.buffer else 0, i)) for i in reversed(lines))
def reset(self):
self.buffer = deque(self.source)
def append(self, *lines):
self.buffer.extend((i if isinstance(i, self.Line) else self.Line(self.buffer[-1].number if self.buffer else 0, i)) for i in lines)
class Context(object):
"""The processing context for translating cinje source into Python source.
This is the primary entry point for translation.
"""
__slots__ = ('input', 'scope', 'flag', '_handler', 'templates', 'handlers', 'mapping')
def __init__(self, input):
self.input = Lines(input.decode('utf8') if isinstance(input, bytes) else input)
self.scope = 0
self.flag = set()
self._handler = []
self.handlers = []
self.templates = []
self.mapping = None
for translator in map(methodcaller('load'), iter_entry_points('cinje.translator')):
self.handlers.append(translator)
def __repr__(self):
return "Context({!r}, {}, {})".format(self.input, self.scope, self.flag)
def prepare(self):
"""Prepare the ordered list of transformers and reset context state to initial."""
self.scope = 0
self.mapping = deque([0])
self._handler = [i() for i in sorted(self.handlers, key=lambda handler: handler.priority)]
@property
def stream(self):
"""The workhorse of cinje: transform input lines and emit output lines.
After constructing an instance with a set of input lines iterate this property to generate the template.
"""
if 'init' not in self.flag:
root = True
self.prepare()
else:
root = False
# Track which lines were generated in response to which lines of source code.
# The end result is that there is one entry here for every line emitted, each integer representing the source
# line number that triggered it. If any lines are returned with missing line numbers, they're inferred from
# the last entry already in the list.
# Fun fact: this list is backwards; we optimize by using a deque and appending to the left edge. this updates
# the head of a linked list; the whole thing needs to be reversed to make sense.
mapping = self.mapping
for line in self.input:
handler = self.classify(line)
if line.kind == 'code' and line.stripped == 'end': # Exit the current child scope.
return
assert handler, "Unable to identify handler for line; this should be impossible!"
self.input.push(line) # Put it back so it can be consumed by the handler.
for line in handler(self): # This re-indents the code to match, if missing explicit scope.
if root: mapping.appendleft(line.number or mapping[0]) # Track source line number.
if line.scope is None:
line = line.clone(scope=self.scope)
yield line
def classify(self, line):
"""Identify the correct handler for a given line of input."""
for handler in self._handler:
if handler.match(self, line):
return handler
class Pipe(object):
"""An object representing a pipe-able callable, optionally with preserved arguments.
Using this you can custruct custom subclasses (define a method named "callable") or use it as a decorator:
@Pipe
def s(text):
return str(text)
"""
__slots__ = ('callable', 'args', 'kwargs')
def __init__(self, callable, *args, **kw):
super(Pipe, self).__init__()
self.callable = callable
self.args = args if args else ()
self.kwargs = kw if kw else {}
def __repr__(self):
return "Pipe({self.callable!r}{0}{1})".format(
(', ' + ', '.join(repr(i) for i in self.args)) if self.args else '',
(', ' + ', '.join("{0}={1!r}".format(i, j) for i, j in self.kwargs.items())) if self.kwargs else '',
self = self,
)
def __ror__(self, other):
"""The main machinery of the Pipe, calling the chosen callable with the recorded arguments."""
return self.callable(*(self.args + (other, )), **self.kwargs)
def __call__(self, *args, **kw):
"""Allow for the preserved args and kwargs to be updated, returning a mutated copy.
This allows for usage with arguments, as in the following example:
"Hello!" | encode('utf8')
This also allows for easy construction of custom mutated copies for use later, a la:
utf8 = encode('utf8')
"Hello!" | utf8
"""
return self.__class__(self.callable, *args, **kw)
# ## Tag Stripper
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.strict = False
self.convert_charrefs = True
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
|
marrow/cinje | cinje/util.py | chunk | python | def chunk(line, mapping={None: 'text', '${': 'escape', '#{': 'bless', '&{': 'args', '%{': 'format', '@{': 'json'}):
skipping = 0 # How many closing parenthesis will we need to skip?
start = None # Starting position of current match.
last = 0
i = 0
text = line.line
while i < len(text):
if start is not None:
if text[i] == '{':
skipping += 1
elif text[i] == '}':
if skipping:
skipping -= 1
else:
yield line.clone(kind=mapping[text[start-2:start]], line=text[start:i])
start = None
last = i = i + 1
continue
elif text[i:i+2] in mapping:
if last is not None and last != i:
yield line.clone(kind=mapping[None], line=text[last:i])
last = None
start = i = i + 2
continue
i += 1
if last < len(text):
yield line.clone(kind=mapping[None], line=text[last:]) | Chunkify and "tag" a block of text into plain text and code sections.
The first delimeter is blank to represent text sections, and keep the indexes aligned with the tags.
Values are yielded in the form (tag, text). | train | https://github.com/marrow/cinje/blob/413bdac7242020ce8379d272720c649a9196daa2/cinje/util.py#L253-L294 | [
"def clone(self, **kw):\n\tvalues = dict(\n\t\t\tnumber = self.number,\n\t\t\tline = self.line,\n\t\t\tscope = self.scope,\n\t\t\tkind = self.kind,\n\t\t)\n\n\tvalues.update(kw)\n\n\tinstance = self.__class__(**values)\n\n\treturn instance\n"
] | # encoding: utf-8
from __future__ import unicode_literals
"""Convienent utilities."""
# ## Imports
import sys
from codecs import iterencode
from inspect import isfunction, isclass
from operator import methodcaller
from collections import deque, namedtuple, Sized, Iterable
from pkg_resources import iter_entry_points
from xml.sax.saxutils import quoteattr
try: # pragma: no cover
from html.parser import HTMLParser
except ImportError: # pragma: no cover
from HTMLParser import HTMLParser
# ## Python Cross-Compatibility
#
# These allow us to detect relevant version differences for code generation, and overcome some of the minor
# differences in labels between Python 2 and Python 3 compatible runtimes.
#
# The differences, in practice, are minor, and are easily overcome through a small block of version-dependant
# code. Handily, even built-in labels are not sacrosanct; they can be easily assigned to and re-mapped.
#
try: # Python 2
from types import StringTypes as stringy
try:
from cStringIO import StringIO
except: # pragma: no cover
from StringIO import StringIO # This never really happens. Still, nice to be defensive.
bytes = str
str = unicode
py = 2
reduce = reduce
except: # Python 3
from io import StringIO
stringy = str
bytes = bytes
str = str
py = 3
# There are some additional complications for the Pypy runtime.
try:
from sys import pypy_version_info
pypy = True
except ImportError:
pypy = False
# ## Type Definitions
# A tuple representing a single step of fancy iteration.
Iteration = namedtuple('Iteration', ['first', 'last', 'index', 'total', 'value'])
# ## Simple Utility Functions
def stream(input, encoding=None, errors='strict'):
"""Safely iterate a template generator, ignoring ``None`` values and optionally stream encoding.
Used internally by ``cinje.flatten``, this allows for easy use of a template generator as a WSGI body.
"""
input = (i for i in input if i) # Omits `None` (empty wrappers) and empty chunks.
if encoding: # Automatically, and iteratively, encode the text if requested.
input = iterencode(input, encoding, errors=errors)
return input
def flatten(input, file=None, encoding=None, errors='strict'):
"""Return a flattened representation of a cinje chunk stream.
This has several modes of operation. If no `file` argument is given, output will be returned as a string.
The type of string will be determined by the presence of an `encoding`; if one is given the returned value is a
binary string, otherwise the native unicode representation. If a `file` is present, chunks will be written
iteratively through repeated calls to `file.write()`, and the amount of data (characters or bytes) written
returned. The type of string written will be determined by `encoding`, just as the return value is when not
writing to a file-like object. The `errors` argument is passed through when encoding.
We can highly recommend using the various stremaing IO containers available in the
[`io`](https://docs.python.org/3/library/io.html) module, though
[`tempfile`](https://docs.python.org/3/library/tempfile.html) classes are also quite useful.
"""
input = stream(input, encoding, errors)
if file is None: # Exit early if we're not writing to a file.
return b''.join(input) if encoding else ''.join(input)
counter = 0
for chunk in input:
file.write(chunk)
counter += len(chunk)
return counter
def fragment(string, name="anonymous", **context):
"""Translate a template fragment into a callable function.
**Note:** Use of this function is discouraged everywhere except tests, as no caching is implemented at this time.
Only one function may be declared, either manually, or automatically. If automatic defintition is chosen the
resulting function takes no arguments. Additional keyword arguments are passed through as global variables.
"""
if isinstance(string, bytes):
string = string.decode('utf-8')
if ": def" in string or ":def" in string:
code = string.encode('utf8').decode('cinje')
name = None
else:
code = ": def {name}\n\n{string}".format(
name = name,
string = string,
).encode('utf8').decode('cinje')
environ = dict(context)
exec(code, environ)
if name is None: # We need to dig it out of the `__tmpl__` list.
if __debug__ and not environ.get('__tmpl__', None):
raise RuntimeError("Template fragment does not contain a function: " + repr(environ.get('__tmpl__', None)) + \
"\n\n" + code)
return environ[environ['__tmpl__'][-1]] # Super secret sauce: you _can_ define more than one function...
return environ[name]
def interruptable(iterable):
"""Allow easy catching of a generator interrupting operation when using "yield from"."""
for i in iterable:
if i is None:
return
yield i
def iterate(obj):
"""Loop over an iterable and track progress, including first and last state.
On each iteration yield an Iteration named tuple with the first and last flags, current element index, total
iterable length (if possible to acquire), and value, in that order.
for iteration in iterate(something):
iteration.value # Do something.
You can unpack these safely:
for first, last, index, total, value in iterate(something):
pass
If you want to unpack the values you are iterating across, you can by wrapping the nested unpacking in parenthesis:
for first, last, index, total, (foo, bar, baz) in iterate(something):
pass
Even if the length of the iterable can't be reliably determined this function will still capture the "last" state
of the final loop iteration. (Basically: this works with generators.)
This process is about 10x slower than simple enumeration on CPython 3.4, so only use it where you actually need to
track state. Use `enumerate()` elsewhere.
"""
global next, Iteration
next = next
Iteration = Iteration
total = len(obj) if isinstance(obj, Sized) else None
iterator = iter(obj)
first = True
last = False
i = 0
try:
value = next(iterator)
except StopIteration:
return
while True:
try:
next_value = next(iterator)
except StopIteration:
last = True
yield Iteration(first, last, i, total, value)
if last: return
value = next_value
i += 1
first = False
def xmlargs(_source=None, **values):
from cinje.helpers import bless
# Optimize by binding these names to the local scope, saving a lookup on each call.
global str, Iterable, stringy
str = str
Iterable = Iterable
stringy = stringy
ejoin = " ".join
parts = []
pappend = parts.append
# If a data source is provided it overrides the keyword arguments which are treated as defaults.
if _source:
values.update(_source)
for k in sorted(values):
# We technically allow non-string values for keys. They're just converted to strings first.
key = str(k).rstrip('_').replace('__', ':').replace('_', '-')
value = values[k]
# We skip empty, None, False, and other falsy values other than zero.
if k[0] == '_' or (not value and (value is False or value != 0)): # False == 0, so, uh, work around that.
continue
if value is True: # For explicitly True values, we don't have a value for the attribute.
pappend(key)
continue
# Non-string iterables (such as lists, sets, tuples, etc.) are treated as space-separated strings.
if isinstance(value, Iterable) and not isinstance(value, stringy):
value = ejoin(str(i) for i in value)
pappend(key + "=" + quoteattr(str(value)))
return bless(" " + ejoin(parts)) if parts else ''
def chunk(line, mapping={None: 'text', '${': 'escape', '#{': 'bless', '&{': 'args', '%{': 'format', '@{': 'json'}):
"""Chunkify and "tag" a block of text into plain text and code sections.
The first delimeter is blank to represent text sections, and keep the indexes aligned with the tags.
Values are yielded in the form (tag, text).
"""
skipping = 0 # How many closing parenthesis will we need to skip?
start = None # Starting position of current match.
last = 0
i = 0
text = line.line
while i < len(text):
if start is not None:
if text[i] == '{':
skipping += 1
elif text[i] == '}':
if skipping:
skipping -= 1
else:
yield line.clone(kind=mapping[text[start-2:start]], line=text[start:i])
start = None
last = i = i + 1
continue
elif text[i:i+2] in mapping:
if last is not None and last != i:
yield line.clone(kind=mapping[None], line=text[last:i])
last = None
start = i = i + 2
continue
i += 1
if last < len(text):
yield line.clone(kind=mapping[None], line=text[last:])
def ensure_buffer(context, separate=True):
if 'text' in context.flag or 'buffer' not in context.flag:
return
if separate: yield Line(0, "")
yield Line(0, "_buffer = []")
if not pypy:
yield Line(0, "__w, __ws = _buffer.extend, _buffer.append")
yield Line(0, "")
context.flag.add('text')
# ## Common Classes
class Line(object):
"""A rich description for a line of input, allowing for annotation."""
__slots__ = ('number', 'line', 'scope', 'kind', 'continued')
def __init__(self, number, line, scope=None, kind=None):
if isinstance(line, bytes):
line = line.decode('utf-8')
self.number = number
self.line = line
self.scope = scope
self.kind = kind
self.continued = self.stripped.endswith('\\')
if not kind: self.process()
super(Line, self).__init__()
def process(self):
if self.stripped.startswith('#') and not self.stripped.startswith('#{'):
self.kind = 'comment'
elif self.stripped.startswith(':'):
self.kind = 'code'
self.line = self.stripped[1:].lstrip()
else:
self.kind = 'text'
@property
def stripped(self):
return self.line.strip()
@property
def partitioned(self):
prefix, _, remainder = self.stripped.partition(' ')
return prefix.rstrip(), remainder.lstrip()
def __repr__(self):
return '{0.__class__.__name__}({0.number}, {0.kind}, "{0.stripped}")'.format(self)
def __bytes__(self):
return str(self).encode('utf8')
def __str__(self):
if self.scope is None:
return self.line
return '\t' * self.scope + self.line.lstrip()
if py == 2: # pragma: no cover
__unicode__ = __str__
__str__ = __bytes__
del __bytes__
def clone(self, **kw):
values = dict(
number = self.number,
line = self.line,
scope = self.scope,
kind = self.kind,
)
values.update(kw)
instance = self.__class__(**values)
return instance
class Lines(object):
"""Iterate input lines of source, with the ability to push lines back."""
__slots__ = ['Line', 'source', 'buffer']
def __init__(self, input=None, Line=Line):
self.Line = Line
if input is None:
self.source = None
self.buffer = deque()
elif hasattr(input, 'readlines'):
self.source = list(self.Line(i + 1, j) for i, j in enumerate(input.readlines()))
self.buffer = deque(self.source)
else:
self.source = list(self.Line(i + 1, j) for i, j in enumerate(input.split('\n')))
self.buffer = deque(self.source)
super(Lines, self).__init__()
@property
def count(self):
return len(self.buffer)
def __len__(self):
return self.count
def __repr__(self):
return 'Lines({0.count})'.format(self)
def __iter__(self):
return self
def __next__(self):
return self.next()
def __str__(self):
return "\n".join(str(i) for i in self)
def next(self):
if not self.buffer:
raise StopIteration()
return self.buffer.popleft()
def peek(self):
return self.buffer[0] if self.buffer else None
def push(self, *lines):
self.buffer.extendleft((i if isinstance(i, self.Line) else self.Line(self.buffer[0].number if self.buffer else 0, i)) for i in reversed(lines))
def reset(self):
self.buffer = deque(self.source)
def append(self, *lines):
self.buffer.extend((i if isinstance(i, self.Line) else self.Line(self.buffer[-1].number if self.buffer else 0, i)) for i in lines)
class Context(object):
"""The processing context for translating cinje source into Python source.
This is the primary entry point for translation.
"""
__slots__ = ('input', 'scope', 'flag', '_handler', 'templates', 'handlers', 'mapping')
def __init__(self, input):
self.input = Lines(input.decode('utf8') if isinstance(input, bytes) else input)
self.scope = 0
self.flag = set()
self._handler = []
self.handlers = []
self.templates = []
self.mapping = None
for translator in map(methodcaller('load'), iter_entry_points('cinje.translator')):
self.handlers.append(translator)
def __repr__(self):
return "Context({!r}, {}, {})".format(self.input, self.scope, self.flag)
def prepare(self):
"""Prepare the ordered list of transformers and reset context state to initial."""
self.scope = 0
self.mapping = deque([0])
self._handler = [i() for i in sorted(self.handlers, key=lambda handler: handler.priority)]
@property
def stream(self):
"""The workhorse of cinje: transform input lines and emit output lines.
After constructing an instance with a set of input lines iterate this property to generate the template.
"""
if 'init' not in self.flag:
root = True
self.prepare()
else:
root = False
# Track which lines were generated in response to which lines of source code.
# The end result is that there is one entry here for every line emitted, each integer representing the source
# line number that triggered it. If any lines are returned with missing line numbers, they're inferred from
# the last entry already in the list.
# Fun fact: this list is backwards; we optimize by using a deque and appending to the left edge. this updates
# the head of a linked list; the whole thing needs to be reversed to make sense.
mapping = self.mapping
for line in self.input:
handler = self.classify(line)
if line.kind == 'code' and line.stripped == 'end': # Exit the current child scope.
return
assert handler, "Unable to identify handler for line; this should be impossible!"
self.input.push(line) # Put it back so it can be consumed by the handler.
for line in handler(self): # This re-indents the code to match, if missing explicit scope.
if root: mapping.appendleft(line.number or mapping[0]) # Track source line number.
if line.scope is None:
line = line.clone(scope=self.scope)
yield line
def classify(self, line):
"""Identify the correct handler for a given line of input."""
for handler in self._handler:
if handler.match(self, line):
return handler
class Pipe(object):
"""An object representing a pipe-able callable, optionally with preserved arguments.
Using this you can custruct custom subclasses (define a method named "callable") or use it as a decorator:
@Pipe
def s(text):
return str(text)
"""
__slots__ = ('callable', 'args', 'kwargs')
def __init__(self, callable, *args, **kw):
super(Pipe, self).__init__()
self.callable = callable
self.args = args if args else ()
self.kwargs = kw if kw else {}
def __repr__(self):
return "Pipe({self.callable!r}{0}{1})".format(
(', ' + ', '.join(repr(i) for i in self.args)) if self.args else '',
(', ' + ', '.join("{0}={1!r}".format(i, j) for i, j in self.kwargs.items())) if self.kwargs else '',
self = self,
)
def __ror__(self, other):
"""The main machinery of the Pipe, calling the chosen callable with the recorded arguments."""
return self.callable(*(self.args + (other, )), **self.kwargs)
def __call__(self, *args, **kw):
"""Allow for the preserved args and kwargs to be updated, returning a mutated copy.
This allows for usage with arguments, as in the following example:
"Hello!" | encode('utf8')
This also allows for easy construction of custom mutated copies for use later, a la:
utf8 = encode('utf8')
"Hello!" | utf8
"""
return self.__class__(self.callable, *args, **kw)
# ## Tag Stripper
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.strict = False
self.convert_charrefs = True
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
|
marrow/cinje | cinje/util.py | Context.prepare | python | def prepare(self):
self.scope = 0
self.mapping = deque([0])
self._handler = [i() for i in sorted(self.handlers, key=lambda handler: handler.priority)] | Prepare the ordered list of transformers and reset context state to initial. | train | https://github.com/marrow/cinje/blob/413bdac7242020ce8379d272720c649a9196daa2/cinje/util.py#L468-L472 | null | class Context(object):
"""The processing context for translating cinje source into Python source.
This is the primary entry point for translation.
"""
__slots__ = ('input', 'scope', 'flag', '_handler', 'templates', 'handlers', 'mapping')
def __init__(self, input):
self.input = Lines(input.decode('utf8') if isinstance(input, bytes) else input)
self.scope = 0
self.flag = set()
self._handler = []
self.handlers = []
self.templates = []
self.mapping = None
for translator in map(methodcaller('load'), iter_entry_points('cinje.translator')):
self.handlers.append(translator)
def __repr__(self):
return "Context({!r}, {}, {})".format(self.input, self.scope, self.flag)
@property
def stream(self):
"""The workhorse of cinje: transform input lines and emit output lines.
After constructing an instance with a set of input lines iterate this property to generate the template.
"""
if 'init' not in self.flag:
root = True
self.prepare()
else:
root = False
# Track which lines were generated in response to which lines of source code.
# The end result is that there is one entry here for every line emitted, each integer representing the source
# line number that triggered it. If any lines are returned with missing line numbers, they're inferred from
# the last entry already in the list.
# Fun fact: this list is backwards; we optimize by using a deque and appending to the left edge. this updates
# the head of a linked list; the whole thing needs to be reversed to make sense.
mapping = self.mapping
for line in self.input:
handler = self.classify(line)
if line.kind == 'code' and line.stripped == 'end': # Exit the current child scope.
return
assert handler, "Unable to identify handler for line; this should be impossible!"
self.input.push(line) # Put it back so it can be consumed by the handler.
for line in handler(self): # This re-indents the code to match, if missing explicit scope.
if root: mapping.appendleft(line.number or mapping[0]) # Track source line number.
if line.scope is None:
line = line.clone(scope=self.scope)
yield line
def classify(self, line):
"""Identify the correct handler for a given line of input."""
for handler in self._handler:
if handler.match(self, line):
return handler
|
marrow/cinje | cinje/util.py | Context.stream | python | def stream(self):
if 'init' not in self.flag:
root = True
self.prepare()
else:
root = False
# Track which lines were generated in response to which lines of source code.
# The end result is that there is one entry here for every line emitted, each integer representing the source
# line number that triggered it. If any lines are returned with missing line numbers, they're inferred from
# the last entry already in the list.
# Fun fact: this list is backwards; we optimize by using a deque and appending to the left edge. this updates
# the head of a linked list; the whole thing needs to be reversed to make sense.
mapping = self.mapping
for line in self.input:
handler = self.classify(line)
if line.kind == 'code' and line.stripped == 'end': # Exit the current child scope.
return
assert handler, "Unable to identify handler for line; this should be impossible!"
self.input.push(line) # Put it back so it can be consumed by the handler.
for line in handler(self): # This re-indents the code to match, if missing explicit scope.
if root: mapping.appendleft(line.number or mapping[0]) # Track source line number.
if line.scope is None:
line = line.clone(scope=self.scope)
yield line | The workhorse of cinje: transform input lines and emit output lines.
After constructing an instance with a set of input lines iterate this property to generate the template. | train | https://github.com/marrow/cinje/blob/413bdac7242020ce8379d272720c649a9196daa2/cinje/util.py#L475-L511 | [
"def prepare(self):\n\t\"\"\"Prepare the ordered list of transformers and reset context state to initial.\"\"\"\n\tself.scope = 0\n\tself.mapping = deque([0])\n\tself._handler = [i() for i in sorted(self.handlers, key=lambda handler: handler.priority)]\n",
"def classify(self, line):\n\t\"\"\"Identify the correct handler for a given line of input.\"\"\"\n\n\tfor handler in self._handler:\n\t\tif handler.match(self, line):\n\t\t\treturn handler\n"
] | class Context(object):
"""The processing context for translating cinje source into Python source.
This is the primary entry point for translation.
"""
__slots__ = ('input', 'scope', 'flag', '_handler', 'templates', 'handlers', 'mapping')
def __init__(self, input):
self.input = Lines(input.decode('utf8') if isinstance(input, bytes) else input)
self.scope = 0
self.flag = set()
self._handler = []
self.handlers = []
self.templates = []
self.mapping = None
for translator in map(methodcaller('load'), iter_entry_points('cinje.translator')):
self.handlers.append(translator)
def __repr__(self):
return "Context({!r}, {}, {})".format(self.input, self.scope, self.flag)
def prepare(self):
"""Prepare the ordered list of transformers and reset context state to initial."""
self.scope = 0
self.mapping = deque([0])
self._handler = [i() for i in sorted(self.handlers, key=lambda handler: handler.priority)]
@property
def stream(self):
"""The workhorse of cinje: transform input lines and emit output lines.
After constructing an instance with a set of input lines iterate this property to generate the template.
"""
if 'init' not in self.flag:
root = True
self.prepare()
else:
root = False
# Track which lines were generated in response to which lines of source code.
# The end result is that there is one entry here for every line emitted, each integer representing the source
# line number that triggered it. If any lines are returned with missing line numbers, they're inferred from
# the last entry already in the list.
# Fun fact: this list is backwards; we optimize by using a deque and appending to the left edge. this updates
# the head of a linked list; the whole thing needs to be reversed to make sense.
mapping = self.mapping
for line in self.input:
handler = self.classify(line)
if line.kind == 'code' and line.stripped == 'end': # Exit the current child scope.
return
assert handler, "Unable to identify handler for line; this should be impossible!"
self.input.push(line) # Put it back so it can be consumed by the handler.
for line in handler(self): # This re-indents the code to match, if missing explicit scope.
if root: mapping.appendleft(line.number or mapping[0]) # Track source line number.
if line.scope is None:
line = line.clone(scope=self.scope)
yield line
def classify(self, line):
"""Identify the correct handler for a given line of input."""
for handler in self._handler:
if handler.match(self, line):
return handler
|
marrow/cinje | cinje/util.py | Context.classify | python | def classify(self, line):
for handler in self._handler:
if handler.match(self, line):
return handler | Identify the correct handler for a given line of input. | train | https://github.com/marrow/cinje/blob/413bdac7242020ce8379d272720c649a9196daa2/cinje/util.py#L513-L518 | null | class Context(object):
"""The processing context for translating cinje source into Python source.
This is the primary entry point for translation.
"""
__slots__ = ('input', 'scope', 'flag', '_handler', 'templates', 'handlers', 'mapping')
def __init__(self, input):
self.input = Lines(input.decode('utf8') if isinstance(input, bytes) else input)
self.scope = 0
self.flag = set()
self._handler = []
self.handlers = []
self.templates = []
self.mapping = None
for translator in map(methodcaller('load'), iter_entry_points('cinje.translator')):
self.handlers.append(translator)
def __repr__(self):
return "Context({!r}, {}, {})".format(self.input, self.scope, self.flag)
def prepare(self):
"""Prepare the ordered list of transformers and reset context state to initial."""
self.scope = 0
self.mapping = deque([0])
self._handler = [i() for i in sorted(self.handlers, key=lambda handler: handler.priority)]
@property
def stream(self):
"""The workhorse of cinje: transform input lines and emit output lines.
After constructing an instance with a set of input lines iterate this property to generate the template.
"""
if 'init' not in self.flag:
root = True
self.prepare()
else:
root = False
# Track which lines were generated in response to which lines of source code.
# The end result is that there is one entry here for every line emitted, each integer representing the source
# line number that triggered it. If any lines are returned with missing line numbers, they're inferred from
# the last entry already in the list.
# Fun fact: this list is backwards; we optimize by using a deque and appending to the left edge. this updates
# the head of a linked list; the whole thing needs to be reversed to make sense.
mapping = self.mapping
for line in self.input:
handler = self.classify(line)
if line.kind == 'code' and line.stripped == 'end': # Exit the current child scope.
return
assert handler, "Unable to identify handler for line; this should be impossible!"
self.input.push(line) # Put it back so it can be consumed by the handler.
for line in handler(self): # This re-indents the code to match, if missing explicit scope.
if root: mapping.appendleft(line.number or mapping[0]) # Track source line number.
if line.scope is None:
line = line.clone(scope=self.scope)
yield line
def classify(self, line):
"""Identify the correct handler for a given line of input."""
for handler in self._handler:
if handler.match(self, line):
return handler
|
marrow/cinje | cinje/block/module.py | red | python | def red(numbers):
line = 0
deltas = []
for value in numbers:
deltas.append(value - line)
line = value
return b64encode(compress(b''.join(chr(i).encode('latin1') for i in deltas))).decode('latin1') | Encode the deltas to reduce entropy. | train | https://github.com/marrow/cinje/blob/413bdac7242020ce8379d272720c649a9196daa2/cinje/block/module.py#L12-L22 | null | # encoding: utf-8
from __future__ import unicode_literals
from zlib import compress
from base64 import b64encode
from collections import deque
from ..util import py, Line
def red(numbers):
"""Encode the deltas to reduce entropy."""
line = 0
deltas = []
for value in numbers:
deltas.append(value - line)
line = value
return b64encode(compress(b''.join(chr(i).encode('latin1') for i in deltas))).decode('latin1')
class Module(object):
"""Module handler.
This is the initial scope, and the highest priority to ensure its processing of the preamble happens first.
"""
priority = -100
def match(self, context, line):
return 'init' not in context.flag
def __call__(self, context):
input = context.input
context.flag.add('init')
context.flag.add('buffer')
imported = False
for line in input:
if not line.stripped or line.stripped[0] == '#':
if not line.stripped.startswith('##') and 'coding:' not in line.stripped:
yield line
continue
input.push(line) # We're out of the preamble, so put that line back and stop.
break
# After any existing preamble, but before other imports, we inject our own.
if py == 2:
yield Line(0, 'from __future__ import unicode_literals')
yield Line(0, '')
yield Line(0, 'import cinje')
yield Line(0, 'from cinje.helpers import escape as _escape, bless as _bless, iterate, xmlargs as _args, _interrupt, _json')
yield Line(0, '')
yield Line(0, '')
yield Line(0, '__tmpl__ = [] # Exported template functions.')
yield Line(0, '')
for i in context.stream:
yield i
if context.templates:
yield Line(0, '')
yield Line(0, '__tmpl__.extend(["' + '", "'.join(context.templates) + '"])')
context.templates = []
# Snapshot the line number mapping.
mapping = deque(context.mapping)
mapping.reverse()
yield Line(0, '')
if __debug__:
yield Line(0, '__mapping__ = [' + ','.join(str(i) for i in mapping) + ']')
yield Line(0, '__gzmapping__ = b"' + red(mapping).replace('"', '\"') + '"')
context.flag.remove('init')
|
marrow/cinje | cinje/block/generic.py | Generic.match | python | def match(self, context, line):
return line.kind == 'code' and line.partitioned[0] in self._both | Match code lines prefixed with a variety of keywords. | train | https://github.com/marrow/cinje/blob/413bdac7242020ce8379d272720c649a9196daa2/cinje/block/generic.py#L56-L59 | null | class Generic(object):
"""Block-level passthrough. Blocks must be terminated by ": end" markers.
Support is included for chains of blocks of the expected types, without requiring ": end" markers between them.
This block-level transformer handles: "if", "elif", and "else" conditional scopes; "while" and "for" loops,
including the optional "else" clause to "for"; "with" context managers; and the exception management machinery of
"try", "except", "finally", and "else". (Any given intermediary component is optional, of course.)
Syntax::
: if ...
: elif ...
: else
: end
: while ...
: end
: for ...
: else
: end
: with ...
: end
: try
: except ...
: finally
: else
: end
Single-line conditionals and loops are not allowed, and the declaration should not include a trailing colon.
"""
priority = 50
_keywords = (
'if',
'while',
'for',
'with',
'try',
)
_continuation = (
'elif',
'else',
'except',
'finally',
)
_both = _keywords + _continuation
def match(self, context, line):
"""Match code lines prefixed with a variety of keywords."""
return line.kind == 'code' and line.partitioned[0] in self._both
def __call__(self, context):
"""Process conditional declarations."""
input = context.input
try:
declaration = input.next()
except StopIteration:
return
stripped = declaration.stripped
prefix, _ = declaration.partitioned
if prefix in self._continuation: # We're handling an alternate section...
yield declaration.clone(line=stripped + ':', scope=context.scope - 1)
return # We're done here.
yield declaration.clone(line=stripped + ':')
context.scope += 1
for i in context.stream:
yield i
context.scope -= 1
|
marrow/cinje | cinje/inline/flush.py | flush_template | python | def flush_template(context, declaration=None, reconstruct=True):
if declaration is None:
declaration = Line(0, '')
if {'text', 'dirty'}.issubset(context.flag):
yield declaration.clone(line='yield "".join(_buffer)')
context.flag.remove('text') # This will force a new buffer to be constructed.
context.flag.remove('dirty')
if reconstruct:
for i in ensure_buffer(context):
yield i
if declaration.stripped == 'yield':
yield declaration | Emit the code needed to flush the buffer.
Will only emit the yield and clear if the buffer is known to be dirty. | train | https://github.com/marrow/cinje/blob/413bdac7242020ce8379d272720c649a9196daa2/cinje/inline/flush.py#L6-L26 | [
"def ensure_buffer(context, separate=True):\n\tif 'text' in context.flag or 'buffer' not in context.flag:\n\t\treturn\n\n\tif separate: yield Line(0, \"\")\n\tyield Line(0, \"_buffer = []\")\n\n\tif not pypy:\n\t\tyield Line(0, \"__w, __ws = _buffer.extend, _buffer.append\")\n\n\tyield Line(0, \"\")\n\n\tcontext.flag.add('text')\n",
"def clone(self, **kw):\n\tvalues = dict(\n\t\t\tnumber = self.number,\n\t\t\tline = self.line,\n\t\t\tscope = self.scope,\n\t\t\tkind = self.kind,\n\t\t)\n\n\tvalues.update(kw)\n\n\tinstance = self.__class__(**values)\n\n\treturn instance\n"
] | # encoding: utf-8
from ..util import Line, ensure_buffer
def flush_template(context, declaration=None, reconstruct=True):
"""Emit the code needed to flush the buffer.
Will only emit the yield and clear if the buffer is known to be dirty.
"""
if declaration is None:
declaration = Line(0, '')
if {'text', 'dirty'}.issubset(context.flag):
yield declaration.clone(line='yield "".join(_buffer)')
context.flag.remove('text') # This will force a new buffer to be constructed.
context.flag.remove('dirty')
if reconstruct:
for i in ensure_buffer(context):
yield i
if declaration.stripped == 'yield':
yield declaration
class Flush(object):
"""Allow mid-stream flushing of the template buffer.
This is generally used to flush sections of a page to the client to allow for content pre-loading of CSS,
JavaScript, images, etc., as well as to provide a more responsive experience for a user during longer operations.
Syntax:
: flush
Note: this will only emit the code needed to flush and clear the buffer if there is a buffer to flush, and the
buffer is known to be "dirty" by the translator. I.e. following ": use" or ": uses", or after some template
text has been defined. Unlike most other commands involving the buffer, this one will not create a buffer if
missing.
This also handles flushing prior to yielding, for wrapper templates.
"""
priority = 25
def match(self, context, line):
"""Match exact "flush" command usage."""
return line.kind == 'code' and line.stripped in ("flush", "yield")
def __call__(self, context):
try:
line = context.input.next()
except StopIteration:
return
return flush_template(context, line)
|
marrow/cinje | cinje/block/function.py | Function._optimize | python | def _optimize(self, context, argspec):
argspec = argspec.strip()
optimization = ", ".join(i + "=" + i for i in self.OPTIMIZE)
split = None
prefix = ''
suffix = ''
if argspec:
matches = list(self.STARARGS.finditer(argspec))
if matches:
split = matches[-1].span()[1] # Inject after, a la "*args>_<", as we're positional-only arguments.
if split != len(argspec):
prefix = ', ' if argspec[split] == ',' else ''
suffix = '' if argspec[split] == ',' else ', '
else: # Ok, we can do this a different way…
matches = list(self.STARSTARARGS.finditer(argspec))
prefix = ', *, '
suffix = ', '
if matches:
split = matches[-1].span()[0] # Inject before, a la ">_<**kwargs". We're positional-only arguments.
if split == 0:
prefix = '*, '
else:
suffix = ''
else:
split = len(argspec)
suffix = ''
else:
prefix = '*, '
if split is None:
return prefix + optimization + suffix
return argspec[:split] + prefix + optimization + suffix + argspec[split:] | Inject speedup shortcut bindings into the argument specification for a function.
This assigns these labels to the local scope, avoiding a cascade through to globals(), saving time.
This also has some unfortunate side-effects for using these sentinels in argument default values! | train | https://github.com/marrow/cinje/blob/413bdac7242020ce8379d272720c649a9196daa2/cinje/block/function.py#L33-L76 | null | class Function(object):
"""Proces function declarations within templates.
Syntax:
: def <name> <arguments>
: end
"""
priority = -50
# Patterns to search for bare *, *args, or **kwargs declarations.
STARARGS = re.compile(r'(^|,\s*)\*([^*\s,]+|\s*,|$)')
STARSTARARGS = re.compile(r'(^|,\s*)\*\*\S+')
# Automatically add these as keyword-only scope assignments.
OPTIMIZE = ['_escape', '_bless', '_args']
def match(self, context, line):
"""Match code lines using the "def" keyword."""
return line.kind == 'code' and line.partitioned[0] == 'def'
def _optimize(self, context, argspec):
"""Inject speedup shortcut bindings into the argument specification for a function.
This assigns these labels to the local scope, avoiding a cascade through to globals(), saving time.
This also has some unfortunate side-effects for using these sentinels in argument default values!
"""
argspec = argspec.strip()
optimization = ", ".join(i + "=" + i for i in self.OPTIMIZE)
split = None
prefix = ''
suffix = ''
if argspec:
matches = list(self.STARARGS.finditer(argspec))
if matches:
split = matches[-1].span()[1] # Inject after, a la "*args>_<", as we're positional-only arguments.
if split != len(argspec):
prefix = ', ' if argspec[split] == ',' else ''
suffix = '' if argspec[split] == ',' else ', '
else: # Ok, we can do this a different way…
matches = list(self.STARSTARARGS.finditer(argspec))
prefix = ', *, '
suffix = ', '
if matches:
split = matches[-1].span()[0] # Inject before, a la ">_<**kwargs". We're positional-only arguments.
if split == 0:
prefix = '*, '
else:
suffix = ''
else:
split = len(argspec)
suffix = ''
else:
prefix = '*, '
if split is None:
return prefix + optimization + suffix
return argspec[:split] + prefix + optimization + suffix + argspec[split:]
def __call__(self, context):
input = context.input
try:
declaration = input.next()
except StopIteration:
return
line = declaration.partitioned[1] # We don't care about the "def".
line, _, annotation = line.rpartition('->')
if annotation and not line: # Swap the values back.
line = annotation
annotation = ''
name, _, line = line.partition(' ') # Split the function name.
argspec = line.rstrip()
name = name.strip()
annotation = annotation.lstrip()
added_flags = []
removed_flags = []
if annotation:
for flag in (i.lower().strip() for i in annotation.split()):
if not flag.strip('!'): continue # Handle standalone exclamation marks.
if flag[0] == '!':
flag = flag[1:]
if flag in context.flag:
context.flag.remove(flag)
removed_flags.append(flag)
continue
if flag not in context.flag:
context.flag.add(flag)
added_flags.append(flag)
if py == 3 and not pypy:
argspec = self._optimize(context, argspec)
# Reconstruct the line.
line = 'def ' + name + '(' + argspec + '):'
# yield declaration.clone(line='@cinje.Function.prepare') # This lets us do some work before and after runtime.
yield declaration.clone(line=line)
context.scope += 1
for i in ensure_buffer(context, False):
yield i
for i in context.stream:
yield i
if 'using' in context.flag: # Clean up that we were using things.
context.flag.remove('using')
if 'text' in context.flag:
context.templates.append(name)
for i in flush_template(context, reconstruct=False): # Handle the final buffer yield if any content was generated.
yield i
if 'text' in context.flag:
context.flag.remove('text')
for flag in added_flags:
if flag in context.flag:
context.flag.remove(flag)
for flag in removed_flags:
if flag not in context.flag:
context.flag.add(flag)
context.scope -= 1
|
marrow/cinje | cinje/inline/text.py | Text.wrap | python | def wrap(scope, lines, format=BARE_FORMAT):
for line in iterate(lines):
prefix = suffix = ''
if line.first and line.last:
prefix = format.single.prefix
suffix = format.single.suffix
else:
prefix = format.multiple.prefix if line.first else format.intra.prefix
suffix = format.multiple.suffix if line.last else format.intra.suffix
yield line.value.clone(line=prefix + line.value.stripped + suffix, scope=scope + (0 if line.first else format.indent)) | Wrap a stream of lines in armour.
Takes a stream of lines, for example, the following single line:
Line(1, "Lorem ipsum dolor.")
Or the following multiple lines:
Line(1, "Lorem ipsum")
Line(2, "dolor")
Line(3, "sit amet.")
Provides a generator of wrapped lines. For a single line, the following format is utilized:
{format.single.prefix}{line.stripped}{format.single.suffix}
In the above multi-line example, the following format would be utilized:
{format.multiple.prefix}{line[1].stripped}{format.intra.suffix}
{format.intra.prefix}{line[2].stripped}{format.intra.suffix}
{format.intra.prefix}{line[3].stripped}{format.multiple.suffix} | train | https://github.com/marrow/cinje/blob/413bdac7242020ce8379d272720c649a9196daa2/cinje/inline/text.py#L36-L70 | [
"def iterate(obj):\n\t\"\"\"Loop over an iterable and track progress, including first and last state.\n\n\tOn each iteration yield an Iteration named tuple with the first and last flags, current element index, total\n\titerable length (if possible to acquire), and value, in that order.\n\n\t\tfor iteration in iterate(something):\n\t\t\titeration.value # Do something.\n\n\tYou can unpack these safely:\n\n\t\tfor first, last, index, total, value in iterate(something):\n\t\t\tpass\n\n\tIf you want to unpack the values you are iterating across, you can by wrapping the nested unpacking in parenthesis:\n\n\t\tfor first, last, index, total, (foo, bar, baz) in iterate(something):\n\t\t\tpass\n\n\tEven if the length of the iterable can't be reliably determined this function will still capture the \"last\" state\n\tof the final loop iteration. (Basically: this works with generators.)\n\n\tThis process is about 10x slower than simple enumeration on CPython 3.4, so only use it where you actually need to\n\ttrack state. Use `enumerate()` elsewhere.\n\t\"\"\"\n\n\tglobal next, Iteration\n\tnext = next\n\tIteration = Iteration\n\n\ttotal = len(obj) if isinstance(obj, Sized) else None\n\titerator = iter(obj)\n\tfirst = True\n\tlast = False\n\ti = 0\n\n\ttry:\n\t\tvalue = next(iterator)\n\texcept StopIteration:\n\t\treturn\n\n\twhile True:\n\t\ttry:\n\t\t\tnext_value = next(iterator)\n\t\texcept StopIteration:\n\t\t\tlast = True\n\n\t\tyield Iteration(first, last, i, total, value)\n\t\tif last: return\n\n\t\tvalue = next_value\n\t\ti += 1\n\t\tfirst = False\n"
] | class Text(object):
"""Identify and process contiguous blocks of template text."""
UNBUFFERED = UNBUFFERED_FORMAT
BUFFERED = BUFFERED_FORMAT
priority = -25
def match(self, context, line):
"""Identify if a line to be processed can be processed by this transformer."""
return line.kind == 'text' # This is common enough to short-circuit.
@staticmethod
def wrap(scope, lines, format=BARE_FORMAT):
"""Wrap a stream of lines in armour.
Takes a stream of lines, for example, the following single line:
Line(1, "Lorem ipsum dolor.")
Or the following multiple lines:
Line(1, "Lorem ipsum")
Line(2, "dolor")
Line(3, "sit amet.")
Provides a generator of wrapped lines. For a single line, the following format is utilized:
{format.single.prefix}{line.stripped}{format.single.suffix}
In the above multi-line example, the following format would be utilized:
{format.multiple.prefix}{line[1].stripped}{format.intra.suffix}
{format.intra.prefix}{line[2].stripped}{format.intra.suffix}
{format.intra.prefix}{line[3].stripped}{format.multiple.suffix}
"""
for line in iterate(lines):
prefix = suffix = ''
if line.first and line.last:
prefix = format.single.prefix
suffix = format.single.suffix
else:
prefix = format.multiple.prefix if line.first else format.intra.prefix
suffix = format.multiple.suffix if line.last else format.intra.suffix
yield line.value.clone(line=prefix + line.value.stripped + suffix, scope=scope + (0 if line.first else format.indent))
@staticmethod
def gather(input):
"""Collect contiguous lines of text, preserving line numbers."""
try:
line = input.next()
except StopIteration:
return
lead = True
buffer = []
# Gather contiguous (uninterrupted) lines of template text.
while line.kind == 'text':
value = line.line.rstrip().rstrip('\\') + ('' if line.continued else '\n')
if lead and line.stripped:
yield Line(line.number, value)
lead = False
elif not lead:
if line.stripped:
for buf in buffer:
yield buf
buffer = []
yield Line(line.number, value)
else:
buffer.append(Line(line.number, value))
try:
line = input.next()
except StopIteration:
line = None
break
if line:
input.push(line) # Put the last line back, as it won't be a text line.
def process(self, context, lines):
"""Chop up individual lines into static and dynamic parts.
Applies light optimizations, such as empty chunk removal, and calls out to other methods to process different
chunk types.
The processor protocol here requires the method to accept values by yielding resulting lines while accepting
sent chunks. Deferral of multiple chunks is possible by yielding None. The processor will be sent None to
be given a chance to yield a final line and perform any clean-up.
"""
handler = None
for line in lines:
for chunk in chunk_(line):
if 'strip' in context.flag:
chunk.line = chunk.stripped
if not chunk.line: continue # Eliminate empty chunks, i.e. trailing text segments, ${}, etc.
if not handler or handler[0] != chunk.kind:
if handler:
try:
result = next(handler[1])
except StopIteration:
result = None
if result: yield result
handler = getattr(self, 'process_' + chunk.kind, self.process_generic)(chunk.kind, context)
handler = (chunk.kind, handler)
try:
next(handler[1]) # We fast-forward to the first yield.
except StopIteration:
return
result = handler[1].send(chunk) # Send the handler the next contiguous chunk.
if result: yield result
if __debug__: # In development mode we skip the contiguous chunk compaction optimization.
handler = (None, handler[1])
# Clean up the final iteration.
if handler:
try:
result = next(handler[1])
except StopIteration:
return
if result: yield result
def process_text(self, kind, context):
"""Combine multiple lines of bare text and emit as a Python string literal."""
result = None
while True:
chunk = yield None
if chunk is None:
if result:
yield result.clone(line=repr(result.line))
return
if not result:
result = chunk
continue
result.line += chunk.line # Append contiguous lines together.
# TODO: Preserve line number range().
def process_generic(self, kind, context):
"""Transform otherwise unhandled kinds of chunks by calling an underscore prefixed function by that name."""
result = None
while True:
chunk = yield result
if chunk is None:
return
result = chunk.clone(line='_' + kind + '(' + chunk.line + ')')
def process_format(self, kind, context):
"""Handle transforming format string + arguments into Python code."""
result = None
while True:
chunk = yield result
if chunk is None:
return
# We need to split the expression defining the format string from the values to pass when formatting.
# We want to allow any Python expression, so we'll need to piggyback on Python's own parser in order
# to exploit the currently available syntax. Apologies, this is probably the scariest thing in here.
split = -1
line = chunk.line
try:
ast.parse(line)
except SyntaxError as e: # We expect this, and catch it. It'll have exploded after the first expr.
split = line.rfind(' ', 0, e.offset)
result = chunk.clone(line='_bless(' + line[:split].rstrip() + ').format(' + line[split:].lstrip() + ')')
def __call__(self, context):
# Make sure we have a buffer to write to, if we're operating in buffered mode.
for i in ensure_buffer(context):
yield i
dirty = False
lines = self.gather(context.input)
lines = self.process(context, lines)
lines = self.wrap(context.scope, lines, self.BUFFERED if 'buffer' in context.flag else self.UNBUFFERED)
# Armour the lines as appropriate and emit them as generated.
for line in lines:
dirty = True
yield line
if dirty and 'text' in context.flag and 'dirty' not in context.flag:
context.flag.add('dirty')
|
marrow/cinje | cinje/inline/text.py | Text.gather | python | def gather(input):
try:
line = input.next()
except StopIteration:
return
lead = True
buffer = []
# Gather contiguous (uninterrupted) lines of template text.
while line.kind == 'text':
value = line.line.rstrip().rstrip('\\') + ('' if line.continued else '\n')
if lead and line.stripped:
yield Line(line.number, value)
lead = False
elif not lead:
if line.stripped:
for buf in buffer:
yield buf
buffer = []
yield Line(line.number, value)
else:
buffer.append(Line(line.number, value))
try:
line = input.next()
except StopIteration:
line = None
break
if line:
input.push(line) | Collect contiguous lines of text, preserving line numbers. | train | https://github.com/marrow/cinje/blob/413bdac7242020ce8379d272720c649a9196daa2/cinje/inline/text.py#L73-L110 | null | class Text(object):
"""Identify and process contiguous blocks of template text."""
UNBUFFERED = UNBUFFERED_FORMAT
BUFFERED = BUFFERED_FORMAT
priority = -25
def match(self, context, line):
"""Identify if a line to be processed can be processed by this transformer."""
return line.kind == 'text' # This is common enough to short-circuit.
@staticmethod
def wrap(scope, lines, format=BARE_FORMAT):
"""Wrap a stream of lines in armour.
Takes a stream of lines, for example, the following single line:
Line(1, "Lorem ipsum dolor.")
Or the following multiple lines:
Line(1, "Lorem ipsum")
Line(2, "dolor")
Line(3, "sit amet.")
Provides a generator of wrapped lines. For a single line, the following format is utilized:
{format.single.prefix}{line.stripped}{format.single.suffix}
In the above multi-line example, the following format would be utilized:
{format.multiple.prefix}{line[1].stripped}{format.intra.suffix}
{format.intra.prefix}{line[2].stripped}{format.intra.suffix}
{format.intra.prefix}{line[3].stripped}{format.multiple.suffix}
"""
for line in iterate(lines):
prefix = suffix = ''
if line.first and line.last:
prefix = format.single.prefix
suffix = format.single.suffix
else:
prefix = format.multiple.prefix if line.first else format.intra.prefix
suffix = format.multiple.suffix if line.last else format.intra.suffix
yield line.value.clone(line=prefix + line.value.stripped + suffix, scope=scope + (0 if line.first else format.indent))
@staticmethod
def gather(input):
"""Collect contiguous lines of text, preserving line numbers."""
try:
line = input.next()
except StopIteration:
return
lead = True
buffer = []
# Gather contiguous (uninterrupted) lines of template text.
while line.kind == 'text':
value = line.line.rstrip().rstrip('\\') + ('' if line.continued else '\n')
if lead and line.stripped:
yield Line(line.number, value)
lead = False
elif not lead:
if line.stripped:
for buf in buffer:
yield buf
buffer = []
yield Line(line.number, value)
else:
buffer.append(Line(line.number, value))
try:
line = input.next()
except StopIteration:
line = None
break
if line:
input.push(line) # Put the last line back, as it won't be a text line.
def process(self, context, lines):
"""Chop up individual lines into static and dynamic parts.
Applies light optimizations, such as empty chunk removal, and calls out to other methods to process different
chunk types.
The processor protocol here requires the method to accept values by yielding resulting lines while accepting
sent chunks. Deferral of multiple chunks is possible by yielding None. The processor will be sent None to
be given a chance to yield a final line and perform any clean-up.
"""
handler = None
for line in lines:
for chunk in chunk_(line):
if 'strip' in context.flag:
chunk.line = chunk.stripped
if not chunk.line: continue # Eliminate empty chunks, i.e. trailing text segments, ${}, etc.
if not handler or handler[0] != chunk.kind:
if handler:
try:
result = next(handler[1])
except StopIteration:
result = None
if result: yield result
handler = getattr(self, 'process_' + chunk.kind, self.process_generic)(chunk.kind, context)
handler = (chunk.kind, handler)
try:
next(handler[1]) # We fast-forward to the first yield.
except StopIteration:
return
result = handler[1].send(chunk) # Send the handler the next contiguous chunk.
if result: yield result
if __debug__: # In development mode we skip the contiguous chunk compaction optimization.
handler = (None, handler[1])
# Clean up the final iteration.
if handler:
try:
result = next(handler[1])
except StopIteration:
return
if result: yield result
def process_text(self, kind, context):
"""Combine multiple lines of bare text and emit as a Python string literal."""
result = None
while True:
chunk = yield None
if chunk is None:
if result:
yield result.clone(line=repr(result.line))
return
if not result:
result = chunk
continue
result.line += chunk.line # Append contiguous lines together.
# TODO: Preserve line number range().
def process_generic(self, kind, context):
"""Transform otherwise unhandled kinds of chunks by calling an underscore prefixed function by that name."""
result = None
while True:
chunk = yield result
if chunk is None:
return
result = chunk.clone(line='_' + kind + '(' + chunk.line + ')')
def process_format(self, kind, context):
"""Handle transforming format string + arguments into Python code."""
result = None
while True:
chunk = yield result
if chunk is None:
return
# We need to split the expression defining the format string from the values to pass when formatting.
# We want to allow any Python expression, so we'll need to piggyback on Python's own parser in order
# to exploit the currently available syntax. Apologies, this is probably the scariest thing in here.
split = -1
line = chunk.line
try:
ast.parse(line)
except SyntaxError as e: # We expect this, and catch it. It'll have exploded after the first expr.
split = line.rfind(' ', 0, e.offset)
result = chunk.clone(line='_bless(' + line[:split].rstrip() + ').format(' + line[split:].lstrip() + ')')
def __call__(self, context):
# Make sure we have a buffer to write to, if we're operating in buffered mode.
for i in ensure_buffer(context):
yield i
dirty = False
lines = self.gather(context.input)
lines = self.process(context, lines)
lines = self.wrap(context.scope, lines, self.BUFFERED if 'buffer' in context.flag else self.UNBUFFERED)
# Armour the lines as appropriate and emit them as generated.
for line in lines:
dirty = True
yield line
if dirty and 'text' in context.flag and 'dirty' not in context.flag:
context.flag.add('dirty')
|
marrow/cinje | cinje/inline/text.py | Text.process | python | def process(self, context, lines):
handler = None
for line in lines:
for chunk in chunk_(line):
if 'strip' in context.flag:
chunk.line = chunk.stripped
if not chunk.line: continue # Eliminate empty chunks, i.e. trailing text segments, ${}, etc.
if not handler or handler[0] != chunk.kind:
if handler:
try:
result = next(handler[1])
except StopIteration:
result = None
if result: yield result
handler = getattr(self, 'process_' + chunk.kind, self.process_generic)(chunk.kind, context)
handler = (chunk.kind, handler)
try:
next(handler[1]) # We fast-forward to the first yield.
except StopIteration:
return
result = handler[1].send(chunk) # Send the handler the next contiguous chunk.
if result: yield result
if __debug__: # In development mode we skip the contiguous chunk compaction optimization.
handler = (None, handler[1])
# Clean up the final iteration.
if handler:
try:
result = next(handler[1])
except StopIteration:
return
if result: yield result | Chop up individual lines into static and dynamic parts.
Applies light optimizations, such as empty chunk removal, and calls out to other methods to process different
chunk types.
The processor protocol here requires the method to accept values by yielding resulting lines while accepting
sent chunks. Deferral of multiple chunks is possible by yielding None. The processor will be sent None to
be given a chance to yield a final line and perform any clean-up. | train | https://github.com/marrow/cinje/blob/413bdac7242020ce8379d272720c649a9196daa2/cinje/inline/text.py#L112-L162 | [
"def chunk(line, mapping={None: 'text', '${': 'escape', '#{': 'bless', '&{': 'args', '%{': 'format', '@{': 'json'}):\n\t\"\"\"Chunkify and \"tag\" a block of text into plain text and code sections.\n\n\tThe first delimeter is blank to represent text sections, and keep the indexes aligned with the tags.\n\n\tValues are yielded in the form (tag, text).\n\t\"\"\"\n\n\tskipping = 0 # How many closing parenthesis will we need to skip?\n\tstart = None # Starting position of current match.\n\tlast = 0\n\n\ti = 0\n\n\ttext = line.line\n\n\twhile i < len(text):\n\t\tif start is not None:\n\t\t\tif text[i] == '{':\n\t\t\t\tskipping += 1\n\n\t\t\telif text[i] == '}':\n\t\t\t\tif skipping:\n\t\t\t\t\tskipping -= 1\n\t\t\t\telse:\n\t\t\t\t\tyield line.clone(kind=mapping[text[start-2:start]], line=text[start:i])\n\t\t\t\t\tstart = None\n\t\t\t\t\tlast = i = i + 1\n\t\t\t\t\tcontinue\n\n\t\telif text[i:i+2] in mapping:\n\t\t\tif last is not None and last != i:\n\t\t\t\tyield line.clone(kind=mapping[None], line=text[last:i])\n\t\t\t\tlast = None\n\n\t\t\tstart = i = i + 2\n\t\t\tcontinue\n\n\t\ti += 1\n\n\tif last < len(text):\n\t\tyield line.clone(kind=mapping[None], line=text[last:])\n"
] | class Text(object):
"""Identify and process contiguous blocks of template text."""
UNBUFFERED = UNBUFFERED_FORMAT
BUFFERED = BUFFERED_FORMAT
priority = -25
def match(self, context, line):
"""Identify if a line to be processed can be processed by this transformer."""
return line.kind == 'text' # This is common enough to short-circuit.
@staticmethod
def wrap(scope, lines, format=BARE_FORMAT):
"""Wrap a stream of lines in armour.
Takes a stream of lines, for example, the following single line:
Line(1, "Lorem ipsum dolor.")
Or the following multiple lines:
Line(1, "Lorem ipsum")
Line(2, "dolor")
Line(3, "sit amet.")
Provides a generator of wrapped lines. For a single line, the following format is utilized:
{format.single.prefix}{line.stripped}{format.single.suffix}
In the above multi-line example, the following format would be utilized:
{format.multiple.prefix}{line[1].stripped}{format.intra.suffix}
{format.intra.prefix}{line[2].stripped}{format.intra.suffix}
{format.intra.prefix}{line[3].stripped}{format.multiple.suffix}
"""
for line in iterate(lines):
prefix = suffix = ''
if line.first and line.last:
prefix = format.single.prefix
suffix = format.single.suffix
else:
prefix = format.multiple.prefix if line.first else format.intra.prefix
suffix = format.multiple.suffix if line.last else format.intra.suffix
yield line.value.clone(line=prefix + line.value.stripped + suffix, scope=scope + (0 if line.first else format.indent))
@staticmethod
def gather(input):
"""Collect contiguous lines of text, preserving line numbers."""
try:
line = input.next()
except StopIteration:
return
lead = True
buffer = []
# Gather contiguous (uninterrupted) lines of template text.
while line.kind == 'text':
value = line.line.rstrip().rstrip('\\') + ('' if line.continued else '\n')
if lead and line.stripped:
yield Line(line.number, value)
lead = False
elif not lead:
if line.stripped:
for buf in buffer:
yield buf
buffer = []
yield Line(line.number, value)
else:
buffer.append(Line(line.number, value))
try:
line = input.next()
except StopIteration:
line = None
break
if line:
input.push(line) # Put the last line back, as it won't be a text line.
def process(self, context, lines):
"""Chop up individual lines into static and dynamic parts.
Applies light optimizations, such as empty chunk removal, and calls out to other methods to process different
chunk types.
The processor protocol here requires the method to accept values by yielding resulting lines while accepting
sent chunks. Deferral of multiple chunks is possible by yielding None. The processor will be sent None to
be given a chance to yield a final line and perform any clean-up.
"""
handler = None
for line in lines:
for chunk in chunk_(line):
if 'strip' in context.flag:
chunk.line = chunk.stripped
if not chunk.line: continue # Eliminate empty chunks, i.e. trailing text segments, ${}, etc.
if not handler or handler[0] != chunk.kind:
if handler:
try:
result = next(handler[1])
except StopIteration:
result = None
if result: yield result
handler = getattr(self, 'process_' + chunk.kind, self.process_generic)(chunk.kind, context)
handler = (chunk.kind, handler)
try:
next(handler[1]) # We fast-forward to the first yield.
except StopIteration:
return
result = handler[1].send(chunk) # Send the handler the next contiguous chunk.
if result: yield result
if __debug__: # In development mode we skip the contiguous chunk compaction optimization.
handler = (None, handler[1])
# Clean up the final iteration.
if handler:
try:
result = next(handler[1])
except StopIteration:
return
if result: yield result
def process_text(self, kind, context):
"""Combine multiple lines of bare text and emit as a Python string literal."""
result = None
while True:
chunk = yield None
if chunk is None:
if result:
yield result.clone(line=repr(result.line))
return
if not result:
result = chunk
continue
result.line += chunk.line # Append contiguous lines together.
# TODO: Preserve line number range().
def process_generic(self, kind, context):
"""Transform otherwise unhandled kinds of chunks by calling an underscore prefixed function by that name."""
result = None
while True:
chunk = yield result
if chunk is None:
return
result = chunk.clone(line='_' + kind + '(' + chunk.line + ')')
def process_format(self, kind, context):
"""Handle transforming format string + arguments into Python code."""
result = None
while True:
chunk = yield result
if chunk is None:
return
# We need to split the expression defining the format string from the values to pass when formatting.
# We want to allow any Python expression, so we'll need to piggyback on Python's own parser in order
# to exploit the currently available syntax. Apologies, this is probably the scariest thing in here.
split = -1
line = chunk.line
try:
ast.parse(line)
except SyntaxError as e: # We expect this, and catch it. It'll have exploded after the first expr.
split = line.rfind(' ', 0, e.offset)
result = chunk.clone(line='_bless(' + line[:split].rstrip() + ').format(' + line[split:].lstrip() + ')')
def __call__(self, context):
# Make sure we have a buffer to write to, if we're operating in buffered mode.
for i in ensure_buffer(context):
yield i
dirty = False
lines = self.gather(context.input)
lines = self.process(context, lines)
lines = self.wrap(context.scope, lines, self.BUFFERED if 'buffer' in context.flag else self.UNBUFFERED)
# Armour the lines as appropriate and emit them as generated.
for line in lines:
dirty = True
yield line
if dirty and 'text' in context.flag and 'dirty' not in context.flag:
context.flag.add('dirty')
|
marrow/cinje | cinje/inline/text.py | Text.process_text | python | def process_text(self, kind, context):
result = None
while True:
chunk = yield None
if chunk is None:
if result:
yield result.clone(line=repr(result.line))
return
if not result:
result = chunk
continue
result.line += chunk.line | Combine multiple lines of bare text and emit as a Python string literal. | train | https://github.com/marrow/cinje/blob/413bdac7242020ce8379d272720c649a9196daa2/cinje/inline/text.py#L164-L182 | null | class Text(object):
"""Identify and process contiguous blocks of template text."""
UNBUFFERED = UNBUFFERED_FORMAT
BUFFERED = BUFFERED_FORMAT
priority = -25
def match(self, context, line):
"""Identify if a line to be processed can be processed by this transformer."""
return line.kind == 'text' # This is common enough to short-circuit.
@staticmethod
def wrap(scope, lines, format=BARE_FORMAT):
"""Wrap a stream of lines in armour.
Takes a stream of lines, for example, the following single line:
Line(1, "Lorem ipsum dolor.")
Or the following multiple lines:
Line(1, "Lorem ipsum")
Line(2, "dolor")
Line(3, "sit amet.")
Provides a generator of wrapped lines. For a single line, the following format is utilized:
{format.single.prefix}{line.stripped}{format.single.suffix}
In the above multi-line example, the following format would be utilized:
{format.multiple.prefix}{line[1].stripped}{format.intra.suffix}
{format.intra.prefix}{line[2].stripped}{format.intra.suffix}
{format.intra.prefix}{line[3].stripped}{format.multiple.suffix}
"""
for line in iterate(lines):
prefix = suffix = ''
if line.first and line.last:
prefix = format.single.prefix
suffix = format.single.suffix
else:
prefix = format.multiple.prefix if line.first else format.intra.prefix
suffix = format.multiple.suffix if line.last else format.intra.suffix
yield line.value.clone(line=prefix + line.value.stripped + suffix, scope=scope + (0 if line.first else format.indent))
@staticmethod
def gather(input):
"""Collect contiguous lines of text, preserving line numbers."""
try:
line = input.next()
except StopIteration:
return
lead = True
buffer = []
# Gather contiguous (uninterrupted) lines of template text.
while line.kind == 'text':
value = line.line.rstrip().rstrip('\\') + ('' if line.continued else '\n')
if lead and line.stripped:
yield Line(line.number, value)
lead = False
elif not lead:
if line.stripped:
for buf in buffer:
yield buf
buffer = []
yield Line(line.number, value)
else:
buffer.append(Line(line.number, value))
try:
line = input.next()
except StopIteration:
line = None
break
if line:
input.push(line) # Put the last line back, as it won't be a text line.
def process(self, context, lines):
"""Chop up individual lines into static and dynamic parts.
Applies light optimizations, such as empty chunk removal, and calls out to other methods to process different
chunk types.
The processor protocol here requires the method to accept values by yielding resulting lines while accepting
sent chunks. Deferral of multiple chunks is possible by yielding None. The processor will be sent None to
be given a chance to yield a final line and perform any clean-up.
"""
handler = None
for line in lines:
for chunk in chunk_(line):
if 'strip' in context.flag:
chunk.line = chunk.stripped
if not chunk.line: continue # Eliminate empty chunks, i.e. trailing text segments, ${}, etc.
if not handler or handler[0] != chunk.kind:
if handler:
try:
result = next(handler[1])
except StopIteration:
result = None
if result: yield result
handler = getattr(self, 'process_' + chunk.kind, self.process_generic)(chunk.kind, context)
handler = (chunk.kind, handler)
try:
next(handler[1]) # We fast-forward to the first yield.
except StopIteration:
return
result = handler[1].send(chunk) # Send the handler the next contiguous chunk.
if result: yield result
if __debug__: # In development mode we skip the contiguous chunk compaction optimization.
handler = (None, handler[1])
# Clean up the final iteration.
if handler:
try:
result = next(handler[1])
except StopIteration:
return
if result: yield result
def process_text(self, kind, context):
"""Combine multiple lines of bare text and emit as a Python string literal."""
result = None
while True:
chunk = yield None
if chunk is None:
if result:
yield result.clone(line=repr(result.line))
return
if not result:
result = chunk
continue
result.line += chunk.line # Append contiguous lines together.
# TODO: Preserve line number range().
def process_generic(self, kind, context):
"""Transform otherwise unhandled kinds of chunks by calling an underscore prefixed function by that name."""
result = None
while True:
chunk = yield result
if chunk is None:
return
result = chunk.clone(line='_' + kind + '(' + chunk.line + ')')
def process_format(self, kind, context):
"""Handle transforming format string + arguments into Python code."""
result = None
while True:
chunk = yield result
if chunk is None:
return
# We need to split the expression defining the format string from the values to pass when formatting.
# We want to allow any Python expression, so we'll need to piggyback on Python's own parser in order
# to exploit the currently available syntax. Apologies, this is probably the scariest thing in here.
split = -1
line = chunk.line
try:
ast.parse(line)
except SyntaxError as e: # We expect this, and catch it. It'll have exploded after the first expr.
split = line.rfind(' ', 0, e.offset)
result = chunk.clone(line='_bless(' + line[:split].rstrip() + ').format(' + line[split:].lstrip() + ')')
def __call__(self, context):
# Make sure we have a buffer to write to, if we're operating in buffered mode.
for i in ensure_buffer(context):
yield i
dirty = False
lines = self.gather(context.input)
lines = self.process(context, lines)
lines = self.wrap(context.scope, lines, self.BUFFERED if 'buffer' in context.flag else self.UNBUFFERED)
# Armour the lines as appropriate and emit them as generated.
for line in lines:
dirty = True
yield line
if dirty and 'text' in context.flag and 'dirty' not in context.flag:
context.flag.add('dirty')
|
marrow/cinje | cinje/inline/text.py | Text.process_generic | python | def process_generic(self, kind, context):
result = None
while True:
chunk = yield result
if chunk is None:
return
result = chunk.clone(line='_' + kind + '(' + chunk.line + ')') | Transform otherwise unhandled kinds of chunks by calling an underscore prefixed function by that name. | train | https://github.com/marrow/cinje/blob/413bdac7242020ce8379d272720c649a9196daa2/cinje/inline/text.py#L185-L196 | null | class Text(object):
"""Identify and process contiguous blocks of template text."""
UNBUFFERED = UNBUFFERED_FORMAT
BUFFERED = BUFFERED_FORMAT
priority = -25
def match(self, context, line):
"""Identify if a line to be processed can be processed by this transformer."""
return line.kind == 'text' # This is common enough to short-circuit.
@staticmethod
def wrap(scope, lines, format=BARE_FORMAT):
"""Wrap a stream of lines in armour.
Takes a stream of lines, for example, the following single line:
Line(1, "Lorem ipsum dolor.")
Or the following multiple lines:
Line(1, "Lorem ipsum")
Line(2, "dolor")
Line(3, "sit amet.")
Provides a generator of wrapped lines. For a single line, the following format is utilized:
{format.single.prefix}{line.stripped}{format.single.suffix}
In the above multi-line example, the following format would be utilized:
{format.multiple.prefix}{line[1].stripped}{format.intra.suffix}
{format.intra.prefix}{line[2].stripped}{format.intra.suffix}
{format.intra.prefix}{line[3].stripped}{format.multiple.suffix}
"""
for line in iterate(lines):
prefix = suffix = ''
if line.first and line.last:
prefix = format.single.prefix
suffix = format.single.suffix
else:
prefix = format.multiple.prefix if line.first else format.intra.prefix
suffix = format.multiple.suffix if line.last else format.intra.suffix
yield line.value.clone(line=prefix + line.value.stripped + suffix, scope=scope + (0 if line.first else format.indent))
@staticmethod
def gather(input):
"""Collect contiguous lines of text, preserving line numbers."""
try:
line = input.next()
except StopIteration:
return
lead = True
buffer = []
# Gather contiguous (uninterrupted) lines of template text.
while line.kind == 'text':
value = line.line.rstrip().rstrip('\\') + ('' if line.continued else '\n')
if lead and line.stripped:
yield Line(line.number, value)
lead = False
elif not lead:
if line.stripped:
for buf in buffer:
yield buf
buffer = []
yield Line(line.number, value)
else:
buffer.append(Line(line.number, value))
try:
line = input.next()
except StopIteration:
line = None
break
if line:
input.push(line) # Put the last line back, as it won't be a text line.
def process(self, context, lines):
"""Chop up individual lines into static and dynamic parts.
Applies light optimizations, such as empty chunk removal, and calls out to other methods to process different
chunk types.
The processor protocol here requires the method to accept values by yielding resulting lines while accepting
sent chunks. Deferral of multiple chunks is possible by yielding None. The processor will be sent None to
be given a chance to yield a final line and perform any clean-up.
"""
handler = None
for line in lines:
for chunk in chunk_(line):
if 'strip' in context.flag:
chunk.line = chunk.stripped
if not chunk.line: continue # Eliminate empty chunks, i.e. trailing text segments, ${}, etc.
if not handler or handler[0] != chunk.kind:
if handler:
try:
result = next(handler[1])
except StopIteration:
result = None
if result: yield result
handler = getattr(self, 'process_' + chunk.kind, self.process_generic)(chunk.kind, context)
handler = (chunk.kind, handler)
try:
next(handler[1]) # We fast-forward to the first yield.
except StopIteration:
return
result = handler[1].send(chunk) # Send the handler the next contiguous chunk.
if result: yield result
if __debug__: # In development mode we skip the contiguous chunk compaction optimization.
handler = (None, handler[1])
# Clean up the final iteration.
if handler:
try:
result = next(handler[1])
except StopIteration:
return
if result: yield result
def process_text(self, kind, context):
"""Combine multiple lines of bare text and emit as a Python string literal."""
result = None
while True:
chunk = yield None
if chunk is None:
if result:
yield result.clone(line=repr(result.line))
return
if not result:
result = chunk
continue
result.line += chunk.line # Append contiguous lines together.
# TODO: Preserve line number range().
def process_generic(self, kind, context):
"""Transform otherwise unhandled kinds of chunks by calling an underscore prefixed function by that name."""
result = None
while True:
chunk = yield result
if chunk is None:
return
result = chunk.clone(line='_' + kind + '(' + chunk.line + ')')
def process_format(self, kind, context):
"""Handle transforming format string + arguments into Python code."""
result = None
while True:
chunk = yield result
if chunk is None:
return
# We need to split the expression defining the format string from the values to pass when formatting.
# We want to allow any Python expression, so we'll need to piggyback on Python's own parser in order
# to exploit the currently available syntax. Apologies, this is probably the scariest thing in here.
split = -1
line = chunk.line
try:
ast.parse(line)
except SyntaxError as e: # We expect this, and catch it. It'll have exploded after the first expr.
split = line.rfind(' ', 0, e.offset)
result = chunk.clone(line='_bless(' + line[:split].rstrip() + ').format(' + line[split:].lstrip() + ')')
def __call__(self, context):
# Make sure we have a buffer to write to, if we're operating in buffered mode.
for i in ensure_buffer(context):
yield i
dirty = False
lines = self.gather(context.input)
lines = self.process(context, lines)
lines = self.wrap(context.scope, lines, self.BUFFERED if 'buffer' in context.flag else self.UNBUFFERED)
# Armour the lines as appropriate and emit them as generated.
for line in lines:
dirty = True
yield line
if dirty and 'text' in context.flag and 'dirty' not in context.flag:
context.flag.add('dirty')
|
marrow/cinje | cinje/inline/text.py | Text.process_format | python | def process_format(self, kind, context):
result = None
while True:
chunk = yield result
if chunk is None:
return
# We need to split the expression defining the format string from the values to pass when formatting.
# We want to allow any Python expression, so we'll need to piggyback on Python's own parser in order
# to exploit the currently available syntax. Apologies, this is probably the scariest thing in here.
split = -1
line = chunk.line
try:
ast.parse(line)
except SyntaxError as e: # We expect this, and catch it. It'll have exploded after the first expr.
split = line.rfind(' ', 0, e.offset)
result = chunk.clone(line='_bless(' + line[:split].rstrip() + ').format(' + line[split:].lstrip() + ')') | Handle transforming format string + arguments into Python code. | train | https://github.com/marrow/cinje/blob/413bdac7242020ce8379d272720c649a9196daa2/cinje/inline/text.py#L198-L220 | null | class Text(object):
"""Identify and process contiguous blocks of template text."""
UNBUFFERED = UNBUFFERED_FORMAT
BUFFERED = BUFFERED_FORMAT
priority = -25
def match(self, context, line):
"""Identify if a line to be processed can be processed by this transformer."""
return line.kind == 'text' # This is common enough to short-circuit.
@staticmethod
def wrap(scope, lines, format=BARE_FORMAT):
"""Wrap a stream of lines in armour.
Takes a stream of lines, for example, the following single line:
Line(1, "Lorem ipsum dolor.")
Or the following multiple lines:
Line(1, "Lorem ipsum")
Line(2, "dolor")
Line(3, "sit amet.")
Provides a generator of wrapped lines. For a single line, the following format is utilized:
{format.single.prefix}{line.stripped}{format.single.suffix}
In the above multi-line example, the following format would be utilized:
{format.multiple.prefix}{line[1].stripped}{format.intra.suffix}
{format.intra.prefix}{line[2].stripped}{format.intra.suffix}
{format.intra.prefix}{line[3].stripped}{format.multiple.suffix}
"""
for line in iterate(lines):
prefix = suffix = ''
if line.first and line.last:
prefix = format.single.prefix
suffix = format.single.suffix
else:
prefix = format.multiple.prefix if line.first else format.intra.prefix
suffix = format.multiple.suffix if line.last else format.intra.suffix
yield line.value.clone(line=prefix + line.value.stripped + suffix, scope=scope + (0 if line.first else format.indent))
@staticmethod
def gather(input):
"""Collect contiguous lines of text, preserving line numbers."""
try:
line = input.next()
except StopIteration:
return
lead = True
buffer = []
# Gather contiguous (uninterrupted) lines of template text.
while line.kind == 'text':
value = line.line.rstrip().rstrip('\\') + ('' if line.continued else '\n')
if lead and line.stripped:
yield Line(line.number, value)
lead = False
elif not lead:
if line.stripped:
for buf in buffer:
yield buf
buffer = []
yield Line(line.number, value)
else:
buffer.append(Line(line.number, value))
try:
line = input.next()
except StopIteration:
line = None
break
if line:
input.push(line) # Put the last line back, as it won't be a text line.
def process(self, context, lines):
"""Chop up individual lines into static and dynamic parts.
Applies light optimizations, such as empty chunk removal, and calls out to other methods to process different
chunk types.
The processor protocol here requires the method to accept values by yielding resulting lines while accepting
sent chunks. Deferral of multiple chunks is possible by yielding None. The processor will be sent None to
be given a chance to yield a final line and perform any clean-up.
"""
handler = None
for line in lines:
for chunk in chunk_(line):
if 'strip' in context.flag:
chunk.line = chunk.stripped
if not chunk.line: continue # Eliminate empty chunks, i.e. trailing text segments, ${}, etc.
if not handler or handler[0] != chunk.kind:
if handler:
try:
result = next(handler[1])
except StopIteration:
result = None
if result: yield result
handler = getattr(self, 'process_' + chunk.kind, self.process_generic)(chunk.kind, context)
handler = (chunk.kind, handler)
try:
next(handler[1]) # We fast-forward to the first yield.
except StopIteration:
return
result = handler[1].send(chunk) # Send the handler the next contiguous chunk.
if result: yield result
if __debug__: # In development mode we skip the contiguous chunk compaction optimization.
handler = (None, handler[1])
# Clean up the final iteration.
if handler:
try:
result = next(handler[1])
except StopIteration:
return
if result: yield result
def process_text(self, kind, context):
"""Combine multiple lines of bare text and emit as a Python string literal."""
result = None
while True:
chunk = yield None
if chunk is None:
if result:
yield result.clone(line=repr(result.line))
return
if not result:
result = chunk
continue
result.line += chunk.line # Append contiguous lines together.
# TODO: Preserve line number range().
def process_generic(self, kind, context):
"""Transform otherwise unhandled kinds of chunks by calling an underscore prefixed function by that name."""
result = None
while True:
chunk = yield result
if chunk is None:
return
result = chunk.clone(line='_' + kind + '(' + chunk.line + ')')
def process_format(self, kind, context):
"""Handle transforming format string + arguments into Python code."""
result = None
while True:
chunk = yield result
if chunk is None:
return
# We need to split the expression defining the format string from the values to pass when formatting.
# We want to allow any Python expression, so we'll need to piggyback on Python's own parser in order
# to exploit the currently available syntax. Apologies, this is probably the scariest thing in here.
split = -1
line = chunk.line
try:
ast.parse(line)
except SyntaxError as e: # We expect this, and catch it. It'll have exploded after the first expr.
split = line.rfind(' ', 0, e.offset)
result = chunk.clone(line='_bless(' + line[:split].rstrip() + ').format(' + line[split:].lstrip() + ')')
def __call__(self, context):
# Make sure we have a buffer to write to, if we're operating in buffered mode.
for i in ensure_buffer(context):
yield i
dirty = False
lines = self.gather(context.input)
lines = self.process(context, lines)
lines = self.wrap(context.scope, lines, self.BUFFERED if 'buffer' in context.flag else self.UNBUFFERED)
# Armour the lines as appropriate and emit them as generated.
for line in lines:
dirty = True
yield line
if dirty and 'text' in context.flag and 'dirty' not in context.flag:
context.flag.add('dirty')
|
KyleJamesWalker/yamlsettings | yamlsettings/extensions/base.py | YamlSettingsExtension.conform_query | python | def conform_query(cls, query):
query = parse_qs(query, keep_blank_values=True)
# Load yaml of passed values
for key, vals in query.items():
# Multiple values of the same name could be passed use first
# Also params without strings will be treated as true values
query[key] = yaml.load(vals[0] or 'true', Loader=yaml.FullLoader)
# If expected, populate with defaults
for key, val in cls.default_query.items():
if key not in query:
query[key] = val
return query | Converts the query string from a target uri, uses
cls.default_query to populate default arguments.
:param query: Unparsed query string
:type query: urllib.parse.unsplit(uri).query
:returns: Dictionary of parsed values, everything in cls.default_query
will be set if not passed. | train | https://github.com/KyleJamesWalker/yamlsettings/blob/ddd7df2ca995ddf191b24c4d35e9dd28186e4535/yamlsettings/extensions/base.py#L15-L38 | null | class YamlSettingsExtension:
"""Extension Interface"""
protocols = ()
# Dictionary of expected kwargs and flag for json loading values (bool/int)
default_query = {}
not_found_exception = IOError
@classmethod
@classmethod
def load_target(cls, scheme, path, fragment, username,
password, hostname, port, query,
load_method, **kwargs):
"""Override this method to use values from the parsed uri to initialize
the expected target.
"""
raise NotImplementedError("load_target must be overridden")
|
KyleJamesWalker/yamlsettings | yamlsettings/extensions/base.py | YamlSettingsExtension.load_target | python | def load_target(cls, scheme, path, fragment, username,
password, hostname, port, query,
load_method, **kwargs):
raise NotImplementedError("load_target must be overridden") | Override this method to use values from the parsed uri to initialize
the expected target. | train | https://github.com/KyleJamesWalker/yamlsettings/blob/ddd7df2ca995ddf191b24c4d35e9dd28186e4535/yamlsettings/extensions/base.py#L41-L48 | null | class YamlSettingsExtension:
"""Extension Interface"""
protocols = ()
# Dictionary of expected kwargs and flag for json loading values (bool/int)
default_query = {}
not_found_exception = IOError
@classmethod
def conform_query(cls, query):
"""Converts the query string from a target uri, uses
cls.default_query to populate default arguments.
:param query: Unparsed query string
:type query: urllib.parse.unsplit(uri).query
:returns: Dictionary of parsed values, everything in cls.default_query
will be set if not passed.
"""
query = parse_qs(query, keep_blank_values=True)
# Load yaml of passed values
for key, vals in query.items():
# Multiple values of the same name could be passed use first
# Also params without strings will be treated as true values
query[key] = yaml.load(vals[0] or 'true', Loader=yaml.FullLoader)
# If expected, populate with defaults
for key, val in cls.default_query.items():
if key not in query:
query[key] = val
return query
@classmethod
|
KyleJamesWalker/yamlsettings | yamlsettings/extensions/registry.py | ExtensionRegistry._discover | python | def _discover(self):
for ep in pkg_resources.iter_entry_points('yamlsettings10'):
ext = ep.load()
if callable(ext):
ext = ext()
self.add(ext) | Find and install all extensions | train | https://github.com/KyleJamesWalker/yamlsettings/blob/ddd7df2ca995ddf191b24c4d35e9dd28186e4535/yamlsettings/extensions/registry.py#L39-L45 | [
"def add(self, extension):\n \"\"\"Adds an extension to the registry\n\n :param extension: Extension object\n :type extension: yamlsettings.extensions.base.YamlSettingsExtension\n\n \"\"\"\n index = len(self.extensions)\n self.extensions[index] = extension\n for protocol in extension.protocols:\n self.registry[protocol] = index\n"
] | class ExtensionRegistry(object):
def __init__(self, extensions):
"""A registry that stores extensions to open and parse Target URIs
:param extensions: A list of extensions.
:type extensions: yamlsettings.extensions.base.YamlSettingsExtension
"""
self.registry = {}
self.extensions = {}
self.default_protocol = 'file'
for extension in extensions:
self.add(extension)
self._discover()
def get_extension(self, protocol):
"""Retrieve extension for the given protocol
:param protocol: name of the protocol
:type protocol: string
:raises NoProtocolError: no extension registered for protocol
"""
if protocol not in self.registry:
raise NoProtocolError("No protocol for %s" % protocol)
index = self.registry[protocol]
return self.extensions[index]
def add(self, extension):
"""Adds an extension to the registry
:param extension: Extension object
:type extension: yamlsettings.extensions.base.YamlSettingsExtension
"""
index = len(self.extensions)
self.extensions[index] = extension
for protocol in extension.protocols:
self.registry[protocol] = index
def _load_first(self, target_uris, load_method, **kwargs):
"""Load first yamldict target found in uri list.
:param target_uris: Uris to try and open
:param load_method: load callback
:type target_uri: list or string
:type load_method: callback
:returns: yamldict
"""
if isinstance(target_uris, string_types):
target_uris = [target_uris]
# TODO: Move the list logic into the extension, otherwise a
# load will always try all missing files first.
# TODO: How would multiple protocols work, should the registry hold
# persist copies?
for target_uri in target_uris:
target = urlsplit(target_uri, scheme=self.default_protocol)
extension = self.get_extension(target.scheme)
query = extension.conform_query(target.query)
try:
yaml_dict = extension.load_target(
target.scheme,
target.path,
target.fragment,
target.username,
target.password,
target.hostname,
target.port,
query,
load_method,
**kwargs
)
return yaml_dict
except extension.not_found_exception:
pass
raise IOError("unable to load: {0}".format(target_uris))
def load(self, target_uris, fields=None, **kwargs):
"""Load first yamldict target found in uri.
:param target_uris: Uris to try and open
:param fields: Fields to filter. Default: None
:type target_uri: list or string
:type fields: list
:returns: yamldict
"""
yaml_dict = self._load_first(
target_uris, yamlsettings.yamldict.load, **kwargs
)
# TODO: Move this into the extension, otherwise every load from
# a persistant location will refilter fields.
if fields:
yaml_dict.limit(fields)
return yaml_dict
def load_all(self, target_uris, **kwargs):
'''
Load *all* YAML settings from a list of file paths given.
- File paths in the list gets the priority by their orders
of the list.
'''
yaml_series = self._load_first(
target_uris, yamlsettings.yamldict.load_all, **kwargs
)
yaml_dicts = []
for yaml_dict in yaml_series:
yaml_dicts.append(yaml_dict)
# return YAMLDict objects
return yaml_dicts
|
KyleJamesWalker/yamlsettings | yamlsettings/extensions/registry.py | ExtensionRegistry.get_extension | python | def get_extension(self, protocol):
if protocol not in self.registry:
raise NoProtocolError("No protocol for %s" % protocol)
index = self.registry[protocol]
return self.extensions[index] | Retrieve extension for the given protocol
:param protocol: name of the protocol
:type protocol: string
:raises NoProtocolError: no extension registered for protocol | train | https://github.com/KyleJamesWalker/yamlsettings/blob/ddd7df2ca995ddf191b24c4d35e9dd28186e4535/yamlsettings/extensions/registry.py#L47-L58 | null | class ExtensionRegistry(object):
def __init__(self, extensions):
"""A registry that stores extensions to open and parse Target URIs
:param extensions: A list of extensions.
:type extensions: yamlsettings.extensions.base.YamlSettingsExtension
"""
self.registry = {}
self.extensions = {}
self.default_protocol = 'file'
for extension in extensions:
self.add(extension)
self._discover()
def _discover(self):
"""Find and install all extensions"""
for ep in pkg_resources.iter_entry_points('yamlsettings10'):
ext = ep.load()
if callable(ext):
ext = ext()
self.add(ext)
def add(self, extension):
"""Adds an extension to the registry
:param extension: Extension object
:type extension: yamlsettings.extensions.base.YamlSettingsExtension
"""
index = len(self.extensions)
self.extensions[index] = extension
for protocol in extension.protocols:
self.registry[protocol] = index
def _load_first(self, target_uris, load_method, **kwargs):
"""Load first yamldict target found in uri list.
:param target_uris: Uris to try and open
:param load_method: load callback
:type target_uri: list or string
:type load_method: callback
:returns: yamldict
"""
if isinstance(target_uris, string_types):
target_uris = [target_uris]
# TODO: Move the list logic into the extension, otherwise a
# load will always try all missing files first.
# TODO: How would multiple protocols work, should the registry hold
# persist copies?
for target_uri in target_uris:
target = urlsplit(target_uri, scheme=self.default_protocol)
extension = self.get_extension(target.scheme)
query = extension.conform_query(target.query)
try:
yaml_dict = extension.load_target(
target.scheme,
target.path,
target.fragment,
target.username,
target.password,
target.hostname,
target.port,
query,
load_method,
**kwargs
)
return yaml_dict
except extension.not_found_exception:
pass
raise IOError("unable to load: {0}".format(target_uris))
def load(self, target_uris, fields=None, **kwargs):
"""Load first yamldict target found in uri.
:param target_uris: Uris to try and open
:param fields: Fields to filter. Default: None
:type target_uri: list or string
:type fields: list
:returns: yamldict
"""
yaml_dict = self._load_first(
target_uris, yamlsettings.yamldict.load, **kwargs
)
# TODO: Move this into the extension, otherwise every load from
# a persistant location will refilter fields.
if fields:
yaml_dict.limit(fields)
return yaml_dict
def load_all(self, target_uris, **kwargs):
'''
Load *all* YAML settings from a list of file paths given.
- File paths in the list gets the priority by their orders
of the list.
'''
yaml_series = self._load_first(
target_uris, yamlsettings.yamldict.load_all, **kwargs
)
yaml_dicts = []
for yaml_dict in yaml_series:
yaml_dicts.append(yaml_dict)
# return YAMLDict objects
return yaml_dicts
|
KyleJamesWalker/yamlsettings | yamlsettings/extensions/registry.py | ExtensionRegistry.add | python | def add(self, extension):
index = len(self.extensions)
self.extensions[index] = extension
for protocol in extension.protocols:
self.registry[protocol] = index | Adds an extension to the registry
:param extension: Extension object
:type extension: yamlsettings.extensions.base.YamlSettingsExtension | train | https://github.com/KyleJamesWalker/yamlsettings/blob/ddd7df2ca995ddf191b24c4d35e9dd28186e4535/yamlsettings/extensions/registry.py#L60-L70 | null | class ExtensionRegistry(object):
def __init__(self, extensions):
"""A registry that stores extensions to open and parse Target URIs
:param extensions: A list of extensions.
:type extensions: yamlsettings.extensions.base.YamlSettingsExtension
"""
self.registry = {}
self.extensions = {}
self.default_protocol = 'file'
for extension in extensions:
self.add(extension)
self._discover()
def _discover(self):
"""Find and install all extensions"""
for ep in pkg_resources.iter_entry_points('yamlsettings10'):
ext = ep.load()
if callable(ext):
ext = ext()
self.add(ext)
def get_extension(self, protocol):
"""Retrieve extension for the given protocol
:param protocol: name of the protocol
:type protocol: string
:raises NoProtocolError: no extension registered for protocol
"""
if protocol not in self.registry:
raise NoProtocolError("No protocol for %s" % protocol)
index = self.registry[protocol]
return self.extensions[index]
def _load_first(self, target_uris, load_method, **kwargs):
"""Load first yamldict target found in uri list.
:param target_uris: Uris to try and open
:param load_method: load callback
:type target_uri: list or string
:type load_method: callback
:returns: yamldict
"""
if isinstance(target_uris, string_types):
target_uris = [target_uris]
# TODO: Move the list logic into the extension, otherwise a
# load will always try all missing files first.
# TODO: How would multiple protocols work, should the registry hold
# persist copies?
for target_uri in target_uris:
target = urlsplit(target_uri, scheme=self.default_protocol)
extension = self.get_extension(target.scheme)
query = extension.conform_query(target.query)
try:
yaml_dict = extension.load_target(
target.scheme,
target.path,
target.fragment,
target.username,
target.password,
target.hostname,
target.port,
query,
load_method,
**kwargs
)
return yaml_dict
except extension.not_found_exception:
pass
raise IOError("unable to load: {0}".format(target_uris))
def load(self, target_uris, fields=None, **kwargs):
"""Load first yamldict target found in uri.
:param target_uris: Uris to try and open
:param fields: Fields to filter. Default: None
:type target_uri: list or string
:type fields: list
:returns: yamldict
"""
yaml_dict = self._load_first(
target_uris, yamlsettings.yamldict.load, **kwargs
)
# TODO: Move this into the extension, otherwise every load from
# a persistant location will refilter fields.
if fields:
yaml_dict.limit(fields)
return yaml_dict
def load_all(self, target_uris, **kwargs):
'''
Load *all* YAML settings from a list of file paths given.
- File paths in the list gets the priority by their orders
of the list.
'''
yaml_series = self._load_first(
target_uris, yamlsettings.yamldict.load_all, **kwargs
)
yaml_dicts = []
for yaml_dict in yaml_series:
yaml_dicts.append(yaml_dict)
# return YAMLDict objects
return yaml_dicts
|
KyleJamesWalker/yamlsettings | yamlsettings/extensions/registry.py | ExtensionRegistry._load_first | python | def _load_first(self, target_uris, load_method, **kwargs):
if isinstance(target_uris, string_types):
target_uris = [target_uris]
# TODO: Move the list logic into the extension, otherwise a
# load will always try all missing files first.
# TODO: How would multiple protocols work, should the registry hold
# persist copies?
for target_uri in target_uris:
target = urlsplit(target_uri, scheme=self.default_protocol)
extension = self.get_extension(target.scheme)
query = extension.conform_query(target.query)
try:
yaml_dict = extension.load_target(
target.scheme,
target.path,
target.fragment,
target.username,
target.password,
target.hostname,
target.port,
query,
load_method,
**kwargs
)
return yaml_dict
except extension.not_found_exception:
pass
raise IOError("unable to load: {0}".format(target_uris)) | Load first yamldict target found in uri list.
:param target_uris: Uris to try and open
:param load_method: load callback
:type target_uri: list or string
:type load_method: callback
:returns: yamldict | train | https://github.com/KyleJamesWalker/yamlsettings/blob/ddd7df2ca995ddf191b24c4d35e9dd28186e4535/yamlsettings/extensions/registry.py#L72-L112 | [
"def get_extension(self, protocol):\n \"\"\"Retrieve extension for the given protocol\n\n :param protocol: name of the protocol\n :type protocol: string\n :raises NoProtocolError: no extension registered for protocol\n\n \"\"\"\n if protocol not in self.registry:\n raise NoProtocolError(\"No protocol for %s\" % protocol)\n index = self.registry[protocol]\n return self.extensions[index]\n"
] | class ExtensionRegistry(object):
def __init__(self, extensions):
"""A registry that stores extensions to open and parse Target URIs
:param extensions: A list of extensions.
:type extensions: yamlsettings.extensions.base.YamlSettingsExtension
"""
self.registry = {}
self.extensions = {}
self.default_protocol = 'file'
for extension in extensions:
self.add(extension)
self._discover()
def _discover(self):
"""Find and install all extensions"""
for ep in pkg_resources.iter_entry_points('yamlsettings10'):
ext = ep.load()
if callable(ext):
ext = ext()
self.add(ext)
def get_extension(self, protocol):
"""Retrieve extension for the given protocol
:param protocol: name of the protocol
:type protocol: string
:raises NoProtocolError: no extension registered for protocol
"""
if protocol not in self.registry:
raise NoProtocolError("No protocol for %s" % protocol)
index = self.registry[protocol]
return self.extensions[index]
def add(self, extension):
"""Adds an extension to the registry
:param extension: Extension object
:type extension: yamlsettings.extensions.base.YamlSettingsExtension
"""
index = len(self.extensions)
self.extensions[index] = extension
for protocol in extension.protocols:
self.registry[protocol] = index
def load(self, target_uris, fields=None, **kwargs):
"""Load first yamldict target found in uri.
:param target_uris: Uris to try and open
:param fields: Fields to filter. Default: None
:type target_uri: list or string
:type fields: list
:returns: yamldict
"""
yaml_dict = self._load_first(
target_uris, yamlsettings.yamldict.load, **kwargs
)
# TODO: Move this into the extension, otherwise every load from
# a persistant location will refilter fields.
if fields:
yaml_dict.limit(fields)
return yaml_dict
def load_all(self, target_uris, **kwargs):
'''
Load *all* YAML settings from a list of file paths given.
- File paths in the list gets the priority by their orders
of the list.
'''
yaml_series = self._load_first(
target_uris, yamlsettings.yamldict.load_all, **kwargs
)
yaml_dicts = []
for yaml_dict in yaml_series:
yaml_dicts.append(yaml_dict)
# return YAMLDict objects
return yaml_dicts
|
KyleJamesWalker/yamlsettings | yamlsettings/extensions/registry.py | ExtensionRegistry.load | python | def load(self, target_uris, fields=None, **kwargs):
yaml_dict = self._load_first(
target_uris, yamlsettings.yamldict.load, **kwargs
)
# TODO: Move this into the extension, otherwise every load from
# a persistant location will refilter fields.
if fields:
yaml_dict.limit(fields)
return yaml_dict | Load first yamldict target found in uri.
:param target_uris: Uris to try and open
:param fields: Fields to filter. Default: None
:type target_uri: list or string
:type fields: list
:returns: yamldict | train | https://github.com/KyleJamesWalker/yamlsettings/blob/ddd7df2ca995ddf191b24c4d35e9dd28186e4535/yamlsettings/extensions/registry.py#L114-L133 | [
"def _load_first(self, target_uris, load_method, **kwargs):\n \"\"\"Load first yamldict target found in uri list.\n\n :param target_uris: Uris to try and open\n :param load_method: load callback\n :type target_uri: list or string\n :type load_method: callback\n\n :returns: yamldict\n\n \"\"\"\n if isinstance(target_uris, string_types):\n target_uris = [target_uris]\n\n # TODO: Move the list logic into the extension, otherwise a\n # load will always try all missing files first.\n # TODO: How would multiple protocols work, should the registry hold\n # persist copies?\n for target_uri in target_uris:\n target = urlsplit(target_uri, scheme=self.default_protocol)\n\n extension = self.get_extension(target.scheme)\n query = extension.conform_query(target.query)\n try:\n yaml_dict = extension.load_target(\n target.scheme,\n target.path,\n target.fragment,\n target.username,\n target.password,\n target.hostname,\n target.port,\n query,\n load_method,\n **kwargs\n )\n return yaml_dict\n except extension.not_found_exception:\n pass\n\n raise IOError(\"unable to load: {0}\".format(target_uris))\n"
] | class ExtensionRegistry(object):
def __init__(self, extensions):
"""A registry that stores extensions to open and parse Target URIs
:param extensions: A list of extensions.
:type extensions: yamlsettings.extensions.base.YamlSettingsExtension
"""
self.registry = {}
self.extensions = {}
self.default_protocol = 'file'
for extension in extensions:
self.add(extension)
self._discover()
def _discover(self):
"""Find and install all extensions"""
for ep in pkg_resources.iter_entry_points('yamlsettings10'):
ext = ep.load()
if callable(ext):
ext = ext()
self.add(ext)
def get_extension(self, protocol):
"""Retrieve extension for the given protocol
:param protocol: name of the protocol
:type protocol: string
:raises NoProtocolError: no extension registered for protocol
"""
if protocol not in self.registry:
raise NoProtocolError("No protocol for %s" % protocol)
index = self.registry[protocol]
return self.extensions[index]
def add(self, extension):
"""Adds an extension to the registry
:param extension: Extension object
:type extension: yamlsettings.extensions.base.YamlSettingsExtension
"""
index = len(self.extensions)
self.extensions[index] = extension
for protocol in extension.protocols:
self.registry[protocol] = index
def _load_first(self, target_uris, load_method, **kwargs):
"""Load first yamldict target found in uri list.
:param target_uris: Uris to try and open
:param load_method: load callback
:type target_uri: list or string
:type load_method: callback
:returns: yamldict
"""
if isinstance(target_uris, string_types):
target_uris = [target_uris]
# TODO: Move the list logic into the extension, otherwise a
# load will always try all missing files first.
# TODO: How would multiple protocols work, should the registry hold
# persist copies?
for target_uri in target_uris:
target = urlsplit(target_uri, scheme=self.default_protocol)
extension = self.get_extension(target.scheme)
query = extension.conform_query(target.query)
try:
yaml_dict = extension.load_target(
target.scheme,
target.path,
target.fragment,
target.username,
target.password,
target.hostname,
target.port,
query,
load_method,
**kwargs
)
return yaml_dict
except extension.not_found_exception:
pass
raise IOError("unable to load: {0}".format(target_uris))
def load_all(self, target_uris, **kwargs):
'''
Load *all* YAML settings from a list of file paths given.
- File paths in the list gets the priority by their orders
of the list.
'''
yaml_series = self._load_first(
target_uris, yamlsettings.yamldict.load_all, **kwargs
)
yaml_dicts = []
for yaml_dict in yaml_series:
yaml_dicts.append(yaml_dict)
# return YAMLDict objects
return yaml_dicts
|
KyleJamesWalker/yamlsettings | yamlsettings/extensions/registry.py | ExtensionRegistry.load_all | python | def load_all(self, target_uris, **kwargs):
'''
Load *all* YAML settings from a list of file paths given.
- File paths in the list gets the priority by their orders
of the list.
'''
yaml_series = self._load_first(
target_uris, yamlsettings.yamldict.load_all, **kwargs
)
yaml_dicts = []
for yaml_dict in yaml_series:
yaml_dicts.append(yaml_dict)
# return YAMLDict objects
return yaml_dicts | Load *all* YAML settings from a list of file paths given.
- File paths in the list gets the priority by their orders
of the list. | train | https://github.com/KyleJamesWalker/yamlsettings/blob/ddd7df2ca995ddf191b24c4d35e9dd28186e4535/yamlsettings/extensions/registry.py#L135-L149 | [
"def _load_first(self, target_uris, load_method, **kwargs):\n \"\"\"Load first yamldict target found in uri list.\n\n :param target_uris: Uris to try and open\n :param load_method: load callback\n :type target_uri: list or string\n :type load_method: callback\n\n :returns: yamldict\n\n \"\"\"\n if isinstance(target_uris, string_types):\n target_uris = [target_uris]\n\n # TODO: Move the list logic into the extension, otherwise a\n # load will always try all missing files first.\n # TODO: How would multiple protocols work, should the registry hold\n # persist copies?\n for target_uri in target_uris:\n target = urlsplit(target_uri, scheme=self.default_protocol)\n\n extension = self.get_extension(target.scheme)\n query = extension.conform_query(target.query)\n try:\n yaml_dict = extension.load_target(\n target.scheme,\n target.path,\n target.fragment,\n target.username,\n target.password,\n target.hostname,\n target.port,\n query,\n load_method,\n **kwargs\n )\n return yaml_dict\n except extension.not_found_exception:\n pass\n\n raise IOError(\"unable to load: {0}\".format(target_uris))\n"
] | class ExtensionRegistry(object):
def __init__(self, extensions):
"""A registry that stores extensions to open and parse Target URIs
:param extensions: A list of extensions.
:type extensions: yamlsettings.extensions.base.YamlSettingsExtension
"""
self.registry = {}
self.extensions = {}
self.default_protocol = 'file'
for extension in extensions:
self.add(extension)
self._discover()
def _discover(self):
"""Find and install all extensions"""
for ep in pkg_resources.iter_entry_points('yamlsettings10'):
ext = ep.load()
if callable(ext):
ext = ext()
self.add(ext)
def get_extension(self, protocol):
"""Retrieve extension for the given protocol
:param protocol: name of the protocol
:type protocol: string
:raises NoProtocolError: no extension registered for protocol
"""
if protocol not in self.registry:
raise NoProtocolError("No protocol for %s" % protocol)
index = self.registry[protocol]
return self.extensions[index]
def add(self, extension):
"""Adds an extension to the registry
:param extension: Extension object
:type extension: yamlsettings.extensions.base.YamlSettingsExtension
"""
index = len(self.extensions)
self.extensions[index] = extension
for protocol in extension.protocols:
self.registry[protocol] = index
def _load_first(self, target_uris, load_method, **kwargs):
"""Load first yamldict target found in uri list.
:param target_uris: Uris to try and open
:param load_method: load callback
:type target_uri: list or string
:type load_method: callback
:returns: yamldict
"""
if isinstance(target_uris, string_types):
target_uris = [target_uris]
# TODO: Move the list logic into the extension, otherwise a
# load will always try all missing files first.
# TODO: How would multiple protocols work, should the registry hold
# persist copies?
for target_uri in target_uris:
target = urlsplit(target_uri, scheme=self.default_protocol)
extension = self.get_extension(target.scheme)
query = extension.conform_query(target.query)
try:
yaml_dict = extension.load_target(
target.scheme,
target.path,
target.fragment,
target.username,
target.password,
target.hostname,
target.port,
query,
load_method,
**kwargs
)
return yaml_dict
except extension.not_found_exception:
pass
raise IOError("unable to load: {0}".format(target_uris))
def load(self, target_uris, fields=None, **kwargs):
"""Load first yamldict target found in uri.
:param target_uris: Uris to try and open
:param fields: Fields to filter. Default: None
:type target_uri: list or string
:type fields: list
:returns: yamldict
"""
yaml_dict = self._load_first(
target_uris, yamlsettings.yamldict.load, **kwargs
)
# TODO: Move this into the extension, otherwise every load from
# a persistant location will refilter fields.
if fields:
yaml_dict.limit(fields)
return yaml_dict
|
KyleJamesWalker/yamlsettings | yamlsettings/yamldict.py | load_all | python | def load_all(stream):
loader = YAMLDictLoader(stream)
try:
while loader.check_data():
yield loader.get_data()
finally:
loader.dispose() | Parse all YAML documents in a stream
and produce corresponding YAMLDict objects. | train | https://github.com/KyleJamesWalker/yamlsettings/blob/ddd7df2ca995ddf191b24c4d35e9dd28186e4535/yamlsettings/yamldict.py#L192-L202 | null | """Order-preserved, attribute-accessible dictionary object for YAML files
"""
# -*- coding: utf-8 -*-
import yaml
import yaml.constructor
import collections
class YAMLDict(collections.OrderedDict):
'''
Order-preserved, attribute-accessible dictionary object for YAML settings
Improved from:
https://github.com/mk-fg/layered-yaml-attrdict-config
'''
def __init__(self, *args, **kwargs):
super(YAMLDict, self).__init__(*args, **kwargs)
# Reset types of all sub-nodes through the hierarchy
self.update(self)
def __getattribute__(self, k):
try:
return super(YAMLDict, self).__getattribute__(k)
except AttributeError:
try:
return self[k]
except KeyError:
raise AttributeError
def __setattr__(self, k, v):
if k.startswith('_OrderedDict__'):
return super(YAMLDict, self).__setattr__(k, v)
self[k] = v
def __str__(self):
return dump(self, stream=None, default_flow_style=False)
def __repr__(self):
return '{' + ', '.join(['{0}: {1}'.format(repr(k), repr(v))
for k, v in self.items()]) + '}'
def __dir__(self):
return self.keys()
def traverse(self, callback):
''' Traverse through all keys and values (in-order)
and replace keys and values with the return values
from the callback function.
'''
def _traverse_node(path, node, callback):
ret_val = callback(path, node)
if ret_val is not None:
# replace node with the return value
node = ret_val
else:
# traverse deep into the hierarchy
if isinstance(node, YAMLDict):
for k, v in node.items():
node[k] = _traverse_node(path + [k], v,
callback)
elif isinstance(node, list):
for i, v in enumerate(node):
node[i] = _traverse_node(path + ['[{0}]'.format(i)], v,
callback)
else:
pass
return node
_traverse_node([], self, callback)
def update(self, yaml_dict):
''' Update the content (i.e. keys and values) with yaml_dict.
'''
def _update_node(base_node, update_node):
if isinstance(update_node, YAMLDict) or \
isinstance(update_node, dict):
if not (isinstance(base_node, YAMLDict)):
# NOTE: A regular dictionary is replaced by a new
# YAMLDict object.
new_node = YAMLDict()
else:
new_node = base_node
for k, v in update_node.items():
new_node[k] = _update_node(new_node.get(k), v)
elif isinstance(update_node, list) or isinstance(
update_node, tuple
):
# NOTE: A list/tuple is replaced by a new list/tuple.
new_node = []
for v in update_node:
new_node.append(_update_node(None, v))
if isinstance(update_node, tuple):
new_node = tuple(new_node)
else:
new_node = update_node
return new_node
# Convert non-YAMLDict objects to a YAMLDict
if not (isinstance(yaml_dict, YAMLDict) or
isinstance(yaml_dict, dict)):
yaml_dict = YAMLDict(yaml_dict)
_update_node(self, yaml_dict)
def clone(self):
''' Creates and returns a new copy of self.
'''
clone = YAMLDict()
clone.update(self)
return clone
def rebase(self, yaml_dict):
''' Use yaml_dict as self's new base and update with existing
reverse of update.
'''
base = yaml_dict.clone()
base.update(self)
self.clear()
self.update(base)
def limit(self, keys):
''' Remove all keys other than the keys specified.
'''
if not isinstance(keys, list) and not isinstance(keys, tuple):
keys = [keys]
remove_keys = [k for k in self.keys() if k not in keys]
for k in remove_keys:
self.pop(k)
class YAMLDictLoader(yaml.FullLoader, yaml.constructor.UnsafeConstructor):
'''
Loader for YAMLDict object
Adopted from:
https://gist.github.com/844388
'''
def __init__(self, *args, **kwargs):
super(YAMLDictLoader, self).__init__(*args, **kwargs)
# override constructors for maps (i.e. dictionaries)
self.add_constructor(u'tag:yaml.org,2002:map',
type(self).construct_yaml_map)
self.add_constructor(u'tag:yaml.org,2002:omap',
type(self).construct_yaml_map)
# Method override to create YAMLDict rather than dict
def construct_yaml_map(self, node):
data = YAMLDict()
yield data
value = self.construct_mapping(node)
# Call the original update() function here to maintain YAMLDict
super(YAMLDict, data).update(value)
# method override to create YAMLDict rather than dict
def construct_mapping(self, node, deep=False):
if isinstance(node, yaml.MappingNode):
self.flatten_mapping(node)
if not isinstance(node, yaml.MappingNode):
raise yaml.constructor.ConstructorError(
None,
None,
'expected a mapping node, but found {0}'.format(node.id),
node.start_mark
)
mapping = YAMLDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise yaml.constructor.ConstructorError(
'while constructing a mapping',
node.start_mark,
'found unacceptable key ({0})'.format(exc),
key_node.start_mark
)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def load(stream):
"""
Parse the first YAML document in a stream
and produce the corresponding YAMLDict object.
"""
loader = YAMLDictLoader(stream)
try:
return loader.get_single_data()
finally:
loader.dispose()
class YAMLDictRepresenter(yaml.representer.Representer):
def represent_YAMLDict(self, mapping):
value = []
node = yaml.MappingNode(u'tag:yaml.org,2002:map',
value, flow_style=None)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = mapping.items()
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, yaml.ScalarNode) and
not node_key.style):
best_style = False
if not (isinstance(node_value, yaml.ScalarNode)
and not node_value.style):
best_style = False
value.append((node_key, node_value))
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
YAMLDictRepresenter.add_representer(YAMLDict,
YAMLDictRepresenter.represent_YAMLDict)
class YAMLDictDumper(yaml.emitter.Emitter,
yaml.serializer.Serializer,
YAMLDictRepresenter,
yaml.resolver.Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, version=None, tags=None,
explicit_start=None, explicit_end=None, sort_keys=None):
yaml.emitter.Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode,
line_break=line_break)
yaml.serializer.Serializer.__init__(self,
encoding=encoding,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version,
tags=tags)
YAMLDictRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
yaml.resolver.Resolver.__init__(self)
def dump(data, stream=None, **kwargs):
"""
Serialize YAMLDict into a YAML stream.
If stream is None, return the produced string instead.
"""
return yaml.dump_all(
[data],
stream=stream,
Dumper=YAMLDictDumper,
**kwargs
)
def dump_all(data_list, stream=None, **kwargs):
"""
Serialize YAMLDict into a YAML stream.
If stream is None, return the produced string instead.
"""
return yaml.dump_all(
data_list,
stream=stream,
Dumper=YAMLDictDumper,
**kwargs
)
|
KyleJamesWalker/yamlsettings | yamlsettings/yamldict.py | dump | python | def dump(data, stream=None, **kwargs):
return yaml.dump_all(
[data],
stream=stream,
Dumper=YAMLDictDumper,
**kwargs
) | Serialize YAMLDict into a YAML stream.
If stream is None, return the produced string instead. | train | https://github.com/KyleJamesWalker/yamlsettings/blob/ddd7df2ca995ddf191b24c4d35e9dd28186e4535/yamlsettings/yamldict.py#L263-L273 | null | """Order-preserved, attribute-accessible dictionary object for YAML files
"""
# -*- coding: utf-8 -*-
import yaml
import yaml.constructor
import collections
class YAMLDict(collections.OrderedDict):
'''
Order-preserved, attribute-accessible dictionary object for YAML settings
Improved from:
https://github.com/mk-fg/layered-yaml-attrdict-config
'''
def __init__(self, *args, **kwargs):
super(YAMLDict, self).__init__(*args, **kwargs)
# Reset types of all sub-nodes through the hierarchy
self.update(self)
def __getattribute__(self, k):
try:
return super(YAMLDict, self).__getattribute__(k)
except AttributeError:
try:
return self[k]
except KeyError:
raise AttributeError
def __setattr__(self, k, v):
if k.startswith('_OrderedDict__'):
return super(YAMLDict, self).__setattr__(k, v)
self[k] = v
def __str__(self):
return dump(self, stream=None, default_flow_style=False)
def __repr__(self):
return '{' + ', '.join(['{0}: {1}'.format(repr(k), repr(v))
for k, v in self.items()]) + '}'
def __dir__(self):
return self.keys()
def traverse(self, callback):
''' Traverse through all keys and values (in-order)
and replace keys and values with the return values
from the callback function.
'''
def _traverse_node(path, node, callback):
ret_val = callback(path, node)
if ret_val is not None:
# replace node with the return value
node = ret_val
else:
# traverse deep into the hierarchy
if isinstance(node, YAMLDict):
for k, v in node.items():
node[k] = _traverse_node(path + [k], v,
callback)
elif isinstance(node, list):
for i, v in enumerate(node):
node[i] = _traverse_node(path + ['[{0}]'.format(i)], v,
callback)
else:
pass
return node
_traverse_node([], self, callback)
def update(self, yaml_dict):
''' Update the content (i.e. keys and values) with yaml_dict.
'''
def _update_node(base_node, update_node):
if isinstance(update_node, YAMLDict) or \
isinstance(update_node, dict):
if not (isinstance(base_node, YAMLDict)):
# NOTE: A regular dictionary is replaced by a new
# YAMLDict object.
new_node = YAMLDict()
else:
new_node = base_node
for k, v in update_node.items():
new_node[k] = _update_node(new_node.get(k), v)
elif isinstance(update_node, list) or isinstance(
update_node, tuple
):
# NOTE: A list/tuple is replaced by a new list/tuple.
new_node = []
for v in update_node:
new_node.append(_update_node(None, v))
if isinstance(update_node, tuple):
new_node = tuple(new_node)
else:
new_node = update_node
return new_node
# Convert non-YAMLDict objects to a YAMLDict
if not (isinstance(yaml_dict, YAMLDict) or
isinstance(yaml_dict, dict)):
yaml_dict = YAMLDict(yaml_dict)
_update_node(self, yaml_dict)
def clone(self):
''' Creates and returns a new copy of self.
'''
clone = YAMLDict()
clone.update(self)
return clone
def rebase(self, yaml_dict):
''' Use yaml_dict as self's new base and update with existing
reverse of update.
'''
base = yaml_dict.clone()
base.update(self)
self.clear()
self.update(base)
def limit(self, keys):
''' Remove all keys other than the keys specified.
'''
if not isinstance(keys, list) and not isinstance(keys, tuple):
keys = [keys]
remove_keys = [k for k in self.keys() if k not in keys]
for k in remove_keys:
self.pop(k)
class YAMLDictLoader(yaml.FullLoader, yaml.constructor.UnsafeConstructor):
'''
Loader for YAMLDict object
Adopted from:
https://gist.github.com/844388
'''
def __init__(self, *args, **kwargs):
super(YAMLDictLoader, self).__init__(*args, **kwargs)
# override constructors for maps (i.e. dictionaries)
self.add_constructor(u'tag:yaml.org,2002:map',
type(self).construct_yaml_map)
self.add_constructor(u'tag:yaml.org,2002:omap',
type(self).construct_yaml_map)
# Method override to create YAMLDict rather than dict
def construct_yaml_map(self, node):
data = YAMLDict()
yield data
value = self.construct_mapping(node)
# Call the original update() function here to maintain YAMLDict
super(YAMLDict, data).update(value)
# method override to create YAMLDict rather than dict
def construct_mapping(self, node, deep=False):
if isinstance(node, yaml.MappingNode):
self.flatten_mapping(node)
if not isinstance(node, yaml.MappingNode):
raise yaml.constructor.ConstructorError(
None,
None,
'expected a mapping node, but found {0}'.format(node.id),
node.start_mark
)
mapping = YAMLDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise yaml.constructor.ConstructorError(
'while constructing a mapping',
node.start_mark,
'found unacceptable key ({0})'.format(exc),
key_node.start_mark
)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def load(stream):
"""
Parse the first YAML document in a stream
and produce the corresponding YAMLDict object.
"""
loader = YAMLDictLoader(stream)
try:
return loader.get_single_data()
finally:
loader.dispose()
def load_all(stream):
"""
Parse all YAML documents in a stream
and produce corresponding YAMLDict objects.
"""
loader = YAMLDictLoader(stream)
try:
while loader.check_data():
yield loader.get_data()
finally:
loader.dispose()
class YAMLDictRepresenter(yaml.representer.Representer):
def represent_YAMLDict(self, mapping):
value = []
node = yaml.MappingNode(u'tag:yaml.org,2002:map',
value, flow_style=None)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = mapping.items()
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, yaml.ScalarNode) and
not node_key.style):
best_style = False
if not (isinstance(node_value, yaml.ScalarNode)
and not node_value.style):
best_style = False
value.append((node_key, node_value))
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
YAMLDictRepresenter.add_representer(YAMLDict,
YAMLDictRepresenter.represent_YAMLDict)
class YAMLDictDumper(yaml.emitter.Emitter,
yaml.serializer.Serializer,
YAMLDictRepresenter,
yaml.resolver.Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, version=None, tags=None,
explicit_start=None, explicit_end=None, sort_keys=None):
yaml.emitter.Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode,
line_break=line_break)
yaml.serializer.Serializer.__init__(self,
encoding=encoding,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version,
tags=tags)
YAMLDictRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
yaml.resolver.Resolver.__init__(self)
def dump_all(data_list, stream=None, **kwargs):
"""
Serialize YAMLDict into a YAML stream.
If stream is None, return the produced string instead.
"""
return yaml.dump_all(
data_list,
stream=stream,
Dumper=YAMLDictDumper,
**kwargs
)
|
KyleJamesWalker/yamlsettings | yamlsettings/yamldict.py | dump_all | python | def dump_all(data_list, stream=None, **kwargs):
return yaml.dump_all(
data_list,
stream=stream,
Dumper=YAMLDictDumper,
**kwargs
) | Serialize YAMLDict into a YAML stream.
If stream is None, return the produced string instead. | train | https://github.com/KyleJamesWalker/yamlsettings/blob/ddd7df2ca995ddf191b24c4d35e9dd28186e4535/yamlsettings/yamldict.py#L276-L286 | null | """Order-preserved, attribute-accessible dictionary object for YAML files
"""
# -*- coding: utf-8 -*-
import yaml
import yaml.constructor
import collections
class YAMLDict(collections.OrderedDict):
'''
Order-preserved, attribute-accessible dictionary object for YAML settings
Improved from:
https://github.com/mk-fg/layered-yaml-attrdict-config
'''
def __init__(self, *args, **kwargs):
super(YAMLDict, self).__init__(*args, **kwargs)
# Reset types of all sub-nodes through the hierarchy
self.update(self)
def __getattribute__(self, k):
try:
return super(YAMLDict, self).__getattribute__(k)
except AttributeError:
try:
return self[k]
except KeyError:
raise AttributeError
def __setattr__(self, k, v):
if k.startswith('_OrderedDict__'):
return super(YAMLDict, self).__setattr__(k, v)
self[k] = v
def __str__(self):
return dump(self, stream=None, default_flow_style=False)
def __repr__(self):
return '{' + ', '.join(['{0}: {1}'.format(repr(k), repr(v))
for k, v in self.items()]) + '}'
def __dir__(self):
return self.keys()
def traverse(self, callback):
''' Traverse through all keys and values (in-order)
and replace keys and values with the return values
from the callback function.
'''
def _traverse_node(path, node, callback):
ret_val = callback(path, node)
if ret_val is not None:
# replace node with the return value
node = ret_val
else:
# traverse deep into the hierarchy
if isinstance(node, YAMLDict):
for k, v in node.items():
node[k] = _traverse_node(path + [k], v,
callback)
elif isinstance(node, list):
for i, v in enumerate(node):
node[i] = _traverse_node(path + ['[{0}]'.format(i)], v,
callback)
else:
pass
return node
_traverse_node([], self, callback)
def update(self, yaml_dict):
''' Update the content (i.e. keys and values) with yaml_dict.
'''
def _update_node(base_node, update_node):
if isinstance(update_node, YAMLDict) or \
isinstance(update_node, dict):
if not (isinstance(base_node, YAMLDict)):
# NOTE: A regular dictionary is replaced by a new
# YAMLDict object.
new_node = YAMLDict()
else:
new_node = base_node
for k, v in update_node.items():
new_node[k] = _update_node(new_node.get(k), v)
elif isinstance(update_node, list) or isinstance(
update_node, tuple
):
# NOTE: A list/tuple is replaced by a new list/tuple.
new_node = []
for v in update_node:
new_node.append(_update_node(None, v))
if isinstance(update_node, tuple):
new_node = tuple(new_node)
else:
new_node = update_node
return new_node
# Convert non-YAMLDict objects to a YAMLDict
if not (isinstance(yaml_dict, YAMLDict) or
isinstance(yaml_dict, dict)):
yaml_dict = YAMLDict(yaml_dict)
_update_node(self, yaml_dict)
def clone(self):
''' Creates and returns a new copy of self.
'''
clone = YAMLDict()
clone.update(self)
return clone
def rebase(self, yaml_dict):
''' Use yaml_dict as self's new base and update with existing
reverse of update.
'''
base = yaml_dict.clone()
base.update(self)
self.clear()
self.update(base)
def limit(self, keys):
''' Remove all keys other than the keys specified.
'''
if not isinstance(keys, list) and not isinstance(keys, tuple):
keys = [keys]
remove_keys = [k for k in self.keys() if k not in keys]
for k in remove_keys:
self.pop(k)
class YAMLDictLoader(yaml.FullLoader, yaml.constructor.UnsafeConstructor):
'''
Loader for YAMLDict object
Adopted from:
https://gist.github.com/844388
'''
def __init__(self, *args, **kwargs):
super(YAMLDictLoader, self).__init__(*args, **kwargs)
# override constructors for maps (i.e. dictionaries)
self.add_constructor(u'tag:yaml.org,2002:map',
type(self).construct_yaml_map)
self.add_constructor(u'tag:yaml.org,2002:omap',
type(self).construct_yaml_map)
# Method override to create YAMLDict rather than dict
def construct_yaml_map(self, node):
data = YAMLDict()
yield data
value = self.construct_mapping(node)
# Call the original update() function here to maintain YAMLDict
super(YAMLDict, data).update(value)
# method override to create YAMLDict rather than dict
def construct_mapping(self, node, deep=False):
if isinstance(node, yaml.MappingNode):
self.flatten_mapping(node)
if not isinstance(node, yaml.MappingNode):
raise yaml.constructor.ConstructorError(
None,
None,
'expected a mapping node, but found {0}'.format(node.id),
node.start_mark
)
mapping = YAMLDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise yaml.constructor.ConstructorError(
'while constructing a mapping',
node.start_mark,
'found unacceptable key ({0})'.format(exc),
key_node.start_mark
)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def load(stream):
"""
Parse the first YAML document in a stream
and produce the corresponding YAMLDict object.
"""
loader = YAMLDictLoader(stream)
try:
return loader.get_single_data()
finally:
loader.dispose()
def load_all(stream):
"""
Parse all YAML documents in a stream
and produce corresponding YAMLDict objects.
"""
loader = YAMLDictLoader(stream)
try:
while loader.check_data():
yield loader.get_data()
finally:
loader.dispose()
class YAMLDictRepresenter(yaml.representer.Representer):
def represent_YAMLDict(self, mapping):
value = []
node = yaml.MappingNode(u'tag:yaml.org,2002:map',
value, flow_style=None)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = mapping.items()
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, yaml.ScalarNode) and
not node_key.style):
best_style = False
if not (isinstance(node_value, yaml.ScalarNode)
and not node_value.style):
best_style = False
value.append((node_key, node_value))
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
YAMLDictRepresenter.add_representer(YAMLDict,
YAMLDictRepresenter.represent_YAMLDict)
class YAMLDictDumper(yaml.emitter.Emitter,
yaml.serializer.Serializer,
YAMLDictRepresenter,
yaml.resolver.Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, version=None, tags=None,
explicit_start=None, explicit_end=None, sort_keys=None):
yaml.emitter.Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode,
line_break=line_break)
yaml.serializer.Serializer.__init__(self,
encoding=encoding,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version,
tags=tags)
YAMLDictRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
yaml.resolver.Resolver.__init__(self)
def dump(data, stream=None, **kwargs):
"""
Serialize YAMLDict into a YAML stream.
If stream is None, return the produced string instead.
"""
return yaml.dump_all(
[data],
stream=stream,
Dumper=YAMLDictDumper,
**kwargs
)
|
KyleJamesWalker/yamlsettings | yamlsettings/yamldict.py | YAMLDict.traverse | python | def traverse(self, callback):
''' Traverse through all keys and values (in-order)
and replace keys and values with the return values
from the callback function.
'''
def _traverse_node(path, node, callback):
ret_val = callback(path, node)
if ret_val is not None:
# replace node with the return value
node = ret_val
else:
# traverse deep into the hierarchy
if isinstance(node, YAMLDict):
for k, v in node.items():
node[k] = _traverse_node(path + [k], v,
callback)
elif isinstance(node, list):
for i, v in enumerate(node):
node[i] = _traverse_node(path + ['[{0}]'.format(i)], v,
callback)
else:
pass
return node
_traverse_node([], self, callback) | Traverse through all keys and values (in-order)
and replace keys and values with the return values
from the callback function. | train | https://github.com/KyleJamesWalker/yamlsettings/blob/ddd7df2ca995ddf191b24c4d35e9dd28186e4535/yamlsettings/yamldict.py#L46-L69 | [
"def _traverse_node(path, node, callback):\n ret_val = callback(path, node)\n if ret_val is not None:\n # replace node with the return value\n node = ret_val\n else:\n # traverse deep into the hierarchy\n if isinstance(node, YAMLDict):\n for k, v in node.items():\n node[k] = _traverse_node(path + [k], v,\n callback)\n elif isinstance(node, list):\n for i, v in enumerate(node):\n node[i] = _traverse_node(path + ['[{0}]'.format(i)], v,\n callback)\n else:\n pass\n return node\n"
] | class YAMLDict(collections.OrderedDict):
'''
Order-preserved, attribute-accessible dictionary object for YAML settings
Improved from:
https://github.com/mk-fg/layered-yaml-attrdict-config
'''
def __init__(self, *args, **kwargs):
super(YAMLDict, self).__init__(*args, **kwargs)
# Reset types of all sub-nodes through the hierarchy
self.update(self)
def __getattribute__(self, k):
try:
return super(YAMLDict, self).__getattribute__(k)
except AttributeError:
try:
return self[k]
except KeyError:
raise AttributeError
def __setattr__(self, k, v):
if k.startswith('_OrderedDict__'):
return super(YAMLDict, self).__setattr__(k, v)
self[k] = v
def __str__(self):
return dump(self, stream=None, default_flow_style=False)
def __repr__(self):
return '{' + ', '.join(['{0}: {1}'.format(repr(k), repr(v))
for k, v in self.items()]) + '}'
def __dir__(self):
return self.keys()
def update(self, yaml_dict):
''' Update the content (i.e. keys and values) with yaml_dict.
'''
def _update_node(base_node, update_node):
if isinstance(update_node, YAMLDict) or \
isinstance(update_node, dict):
if not (isinstance(base_node, YAMLDict)):
# NOTE: A regular dictionary is replaced by a new
# YAMLDict object.
new_node = YAMLDict()
else:
new_node = base_node
for k, v in update_node.items():
new_node[k] = _update_node(new_node.get(k), v)
elif isinstance(update_node, list) or isinstance(
update_node, tuple
):
# NOTE: A list/tuple is replaced by a new list/tuple.
new_node = []
for v in update_node:
new_node.append(_update_node(None, v))
if isinstance(update_node, tuple):
new_node = tuple(new_node)
else:
new_node = update_node
return new_node
# Convert non-YAMLDict objects to a YAMLDict
if not (isinstance(yaml_dict, YAMLDict) or
isinstance(yaml_dict, dict)):
yaml_dict = YAMLDict(yaml_dict)
_update_node(self, yaml_dict)
def clone(self):
''' Creates and returns a new copy of self.
'''
clone = YAMLDict()
clone.update(self)
return clone
def rebase(self, yaml_dict):
''' Use yaml_dict as self's new base and update with existing
reverse of update.
'''
base = yaml_dict.clone()
base.update(self)
self.clear()
self.update(base)
def limit(self, keys):
''' Remove all keys other than the keys specified.
'''
if not isinstance(keys, list) and not isinstance(keys, tuple):
keys = [keys]
remove_keys = [k for k in self.keys() if k not in keys]
for k in remove_keys:
self.pop(k)
|
KyleJamesWalker/yamlsettings | yamlsettings/yamldict.py | YAMLDict.update | python | def update(self, yaml_dict):
''' Update the content (i.e. keys and values) with yaml_dict.
'''
def _update_node(base_node, update_node):
if isinstance(update_node, YAMLDict) or \
isinstance(update_node, dict):
if not (isinstance(base_node, YAMLDict)):
# NOTE: A regular dictionary is replaced by a new
# YAMLDict object.
new_node = YAMLDict()
else:
new_node = base_node
for k, v in update_node.items():
new_node[k] = _update_node(new_node.get(k), v)
elif isinstance(update_node, list) or isinstance(
update_node, tuple
):
# NOTE: A list/tuple is replaced by a new list/tuple.
new_node = []
for v in update_node:
new_node.append(_update_node(None, v))
if isinstance(update_node, tuple):
new_node = tuple(new_node)
else:
new_node = update_node
return new_node
# Convert non-YAMLDict objects to a YAMLDict
if not (isinstance(yaml_dict, YAMLDict) or
isinstance(yaml_dict, dict)):
yaml_dict = YAMLDict(yaml_dict)
_update_node(self, yaml_dict) | Update the content (i.e. keys and values) with yaml_dict. | train | https://github.com/KyleJamesWalker/yamlsettings/blob/ddd7df2ca995ddf191b24c4d35e9dd28186e4535/yamlsettings/yamldict.py#L71-L101 | [
"def _update_node(base_node, update_node):\n if isinstance(update_node, YAMLDict) or \\\n isinstance(update_node, dict):\n if not (isinstance(base_node, YAMLDict)):\n # NOTE: A regular dictionary is replaced by a new\n # YAMLDict object.\n new_node = YAMLDict()\n else:\n new_node = base_node\n for k, v in update_node.items():\n new_node[k] = _update_node(new_node.get(k), v)\n elif isinstance(update_node, list) or isinstance(\n update_node, tuple\n ):\n # NOTE: A list/tuple is replaced by a new list/tuple.\n new_node = []\n for v in update_node:\n new_node.append(_update_node(None, v))\n if isinstance(update_node, tuple):\n new_node = tuple(new_node)\n else:\n new_node = update_node\n return new_node\n"
] | class YAMLDict(collections.OrderedDict):
'''
Order-preserved, attribute-accessible dictionary object for YAML settings
Improved from:
https://github.com/mk-fg/layered-yaml-attrdict-config
'''
def __init__(self, *args, **kwargs):
super(YAMLDict, self).__init__(*args, **kwargs)
# Reset types of all sub-nodes through the hierarchy
self.update(self)
def __getattribute__(self, k):
try:
return super(YAMLDict, self).__getattribute__(k)
except AttributeError:
try:
return self[k]
except KeyError:
raise AttributeError
def __setattr__(self, k, v):
if k.startswith('_OrderedDict__'):
return super(YAMLDict, self).__setattr__(k, v)
self[k] = v
def __str__(self):
return dump(self, stream=None, default_flow_style=False)
def __repr__(self):
return '{' + ', '.join(['{0}: {1}'.format(repr(k), repr(v))
for k, v in self.items()]) + '}'
def __dir__(self):
return self.keys()
def traverse(self, callback):
''' Traverse through all keys and values (in-order)
and replace keys and values with the return values
from the callback function.
'''
def _traverse_node(path, node, callback):
ret_val = callback(path, node)
if ret_val is not None:
# replace node with the return value
node = ret_val
else:
# traverse deep into the hierarchy
if isinstance(node, YAMLDict):
for k, v in node.items():
node[k] = _traverse_node(path + [k], v,
callback)
elif isinstance(node, list):
for i, v in enumerate(node):
node[i] = _traverse_node(path + ['[{0}]'.format(i)], v,
callback)
else:
pass
return node
_traverse_node([], self, callback)
def clone(self):
''' Creates and returns a new copy of self.
'''
clone = YAMLDict()
clone.update(self)
return clone
def rebase(self, yaml_dict):
''' Use yaml_dict as self's new base and update with existing
reverse of update.
'''
base = yaml_dict.clone()
base.update(self)
self.clear()
self.update(base)
def limit(self, keys):
''' Remove all keys other than the keys specified.
'''
if not isinstance(keys, list) and not isinstance(keys, tuple):
keys = [keys]
remove_keys = [k for k in self.keys() if k not in keys]
for k in remove_keys:
self.pop(k)
|
KyleJamesWalker/yamlsettings | yamlsettings/yamldict.py | YAMLDict.rebase | python | def rebase(self, yaml_dict):
''' Use yaml_dict as self's new base and update with existing
reverse of update.
'''
base = yaml_dict.clone()
base.update(self)
self.clear()
self.update(base) | Use yaml_dict as self's new base and update with existing
reverse of update. | train | https://github.com/KyleJamesWalker/yamlsettings/blob/ddd7df2ca995ddf191b24c4d35e9dd28186e4535/yamlsettings/yamldict.py#L110-L117 | [
"def update(self, yaml_dict):\n ''' Update the content (i.e. keys and values) with yaml_dict.\n '''\n def _update_node(base_node, update_node):\n if isinstance(update_node, YAMLDict) or \\\n isinstance(update_node, dict):\n if not (isinstance(base_node, YAMLDict)):\n # NOTE: A regular dictionary is replaced by a new\n # YAMLDict object.\n new_node = YAMLDict()\n else:\n new_node = base_node\n for k, v in update_node.items():\n new_node[k] = _update_node(new_node.get(k), v)\n elif isinstance(update_node, list) or isinstance(\n update_node, tuple\n ):\n # NOTE: A list/tuple is replaced by a new list/tuple.\n new_node = []\n for v in update_node:\n new_node.append(_update_node(None, v))\n if isinstance(update_node, tuple):\n new_node = tuple(new_node)\n else:\n new_node = update_node\n return new_node\n # Convert non-YAMLDict objects to a YAMLDict\n if not (isinstance(yaml_dict, YAMLDict) or\n isinstance(yaml_dict, dict)):\n yaml_dict = YAMLDict(yaml_dict)\n _update_node(self, yaml_dict)\n"
] | class YAMLDict(collections.OrderedDict):
'''
Order-preserved, attribute-accessible dictionary object for YAML settings
Improved from:
https://github.com/mk-fg/layered-yaml-attrdict-config
'''
def __init__(self, *args, **kwargs):
super(YAMLDict, self).__init__(*args, **kwargs)
# Reset types of all sub-nodes through the hierarchy
self.update(self)
def __getattribute__(self, k):
try:
return super(YAMLDict, self).__getattribute__(k)
except AttributeError:
try:
return self[k]
except KeyError:
raise AttributeError
def __setattr__(self, k, v):
if k.startswith('_OrderedDict__'):
return super(YAMLDict, self).__setattr__(k, v)
self[k] = v
def __str__(self):
return dump(self, stream=None, default_flow_style=False)
def __repr__(self):
return '{' + ', '.join(['{0}: {1}'.format(repr(k), repr(v))
for k, v in self.items()]) + '}'
def __dir__(self):
return self.keys()
def traverse(self, callback):
''' Traverse through all keys and values (in-order)
and replace keys and values with the return values
from the callback function.
'''
def _traverse_node(path, node, callback):
ret_val = callback(path, node)
if ret_val is not None:
# replace node with the return value
node = ret_val
else:
# traverse deep into the hierarchy
if isinstance(node, YAMLDict):
for k, v in node.items():
node[k] = _traverse_node(path + [k], v,
callback)
elif isinstance(node, list):
for i, v in enumerate(node):
node[i] = _traverse_node(path + ['[{0}]'.format(i)], v,
callback)
else:
pass
return node
_traverse_node([], self, callback)
def update(self, yaml_dict):
''' Update the content (i.e. keys and values) with yaml_dict.
'''
def _update_node(base_node, update_node):
if isinstance(update_node, YAMLDict) or \
isinstance(update_node, dict):
if not (isinstance(base_node, YAMLDict)):
# NOTE: A regular dictionary is replaced by a new
# YAMLDict object.
new_node = YAMLDict()
else:
new_node = base_node
for k, v in update_node.items():
new_node[k] = _update_node(new_node.get(k), v)
elif isinstance(update_node, list) or isinstance(
update_node, tuple
):
# NOTE: A list/tuple is replaced by a new list/tuple.
new_node = []
for v in update_node:
new_node.append(_update_node(None, v))
if isinstance(update_node, tuple):
new_node = tuple(new_node)
else:
new_node = update_node
return new_node
# Convert non-YAMLDict objects to a YAMLDict
if not (isinstance(yaml_dict, YAMLDict) or
isinstance(yaml_dict, dict)):
yaml_dict = YAMLDict(yaml_dict)
_update_node(self, yaml_dict)
def clone(self):
''' Creates and returns a new copy of self.
'''
clone = YAMLDict()
clone.update(self)
return clone
def limit(self, keys):
''' Remove all keys other than the keys specified.
'''
if not isinstance(keys, list) and not isinstance(keys, tuple):
keys = [keys]
remove_keys = [k for k in self.keys() if k not in keys]
for k in remove_keys:
self.pop(k)
|
KyleJamesWalker/yamlsettings | yamlsettings/yamldict.py | YAMLDict.limit | python | def limit(self, keys):
''' Remove all keys other than the keys specified.
'''
if not isinstance(keys, list) and not isinstance(keys, tuple):
keys = [keys]
remove_keys = [k for k in self.keys() if k not in keys]
for k in remove_keys:
self.pop(k) | Remove all keys other than the keys specified. | train | https://github.com/KyleJamesWalker/yamlsettings/blob/ddd7df2ca995ddf191b24c4d35e9dd28186e4535/yamlsettings/yamldict.py#L119-L126 | null | class YAMLDict(collections.OrderedDict):
'''
Order-preserved, attribute-accessible dictionary object for YAML settings
Improved from:
https://github.com/mk-fg/layered-yaml-attrdict-config
'''
def __init__(self, *args, **kwargs):
super(YAMLDict, self).__init__(*args, **kwargs)
# Reset types of all sub-nodes through the hierarchy
self.update(self)
def __getattribute__(self, k):
try:
return super(YAMLDict, self).__getattribute__(k)
except AttributeError:
try:
return self[k]
except KeyError:
raise AttributeError
def __setattr__(self, k, v):
if k.startswith('_OrderedDict__'):
return super(YAMLDict, self).__setattr__(k, v)
self[k] = v
def __str__(self):
return dump(self, stream=None, default_flow_style=False)
def __repr__(self):
return '{' + ', '.join(['{0}: {1}'.format(repr(k), repr(v))
for k, v in self.items()]) + '}'
def __dir__(self):
return self.keys()
def traverse(self, callback):
''' Traverse through all keys and values (in-order)
and replace keys and values with the return values
from the callback function.
'''
def _traverse_node(path, node, callback):
ret_val = callback(path, node)
if ret_val is not None:
# replace node with the return value
node = ret_val
else:
# traverse deep into the hierarchy
if isinstance(node, YAMLDict):
for k, v in node.items():
node[k] = _traverse_node(path + [k], v,
callback)
elif isinstance(node, list):
for i, v in enumerate(node):
node[i] = _traverse_node(path + ['[{0}]'.format(i)], v,
callback)
else:
pass
return node
_traverse_node([], self, callback)
def update(self, yaml_dict):
''' Update the content (i.e. keys and values) with yaml_dict.
'''
def _update_node(base_node, update_node):
if isinstance(update_node, YAMLDict) or \
isinstance(update_node, dict):
if not (isinstance(base_node, YAMLDict)):
# NOTE: A regular dictionary is replaced by a new
# YAMLDict object.
new_node = YAMLDict()
else:
new_node = base_node
for k, v in update_node.items():
new_node[k] = _update_node(new_node.get(k), v)
elif isinstance(update_node, list) or isinstance(
update_node, tuple
):
# NOTE: A list/tuple is replaced by a new list/tuple.
new_node = []
for v in update_node:
new_node.append(_update_node(None, v))
if isinstance(update_node, tuple):
new_node = tuple(new_node)
else:
new_node = update_node
return new_node
# Convert non-YAMLDict objects to a YAMLDict
if not (isinstance(yaml_dict, YAMLDict) or
isinstance(yaml_dict, dict)):
yaml_dict = YAMLDict(yaml_dict)
_update_node(self, yaml_dict)
def clone(self):
''' Creates and returns a new copy of self.
'''
clone = YAMLDict()
clone.update(self)
return clone
def rebase(self, yaml_dict):
''' Use yaml_dict as self's new base and update with existing
reverse of update.
'''
base = yaml_dict.clone()
base.update(self)
self.clear()
self.update(base)
|
KyleJamesWalker/yamlsettings | yamlsettings/helpers.py | save | python | def save(yaml_dict, filepath):
'''
Save YAML settings to the specified file path.
'''
yamldict.dump(yaml_dict, open(filepath, 'w'), default_flow_style=False) | Save YAML settings to the specified file path. | train | https://github.com/KyleJamesWalker/yamlsettings/blob/ddd7df2ca995ddf191b24c4d35e9dd28186e4535/yamlsettings/helpers.py#L13-L17 | [
"def dump(data, stream=None, **kwargs):\n \"\"\"\n Serialize YAMLDict into a YAML stream.\n If stream is None, return the produced string instead.\n \"\"\"\n return yaml.dump_all(\n [data],\n stream=stream,\n Dumper=YAMLDictDumper,\n **kwargs\n )\n"
] | """Helper functions
"""
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
from yamlsettings import yamldict
from yamlsettings.extensions import registry
def save_all(yaml_dicts, filepath):
'''
Save *all* YAML settings to the specified file path.
'''
yamldict.dump_all(yaml_dicts, open(filepath, 'w'),
default_flow_style=False)
def update_from_file(yaml_dict, filepaths):
'''
Override YAML settings with loaded values from filepaths.
- File paths in the list gets the priority by their orders of the list.
'''
# load YAML settings with only fields in yaml_dict
yaml_dict.update(registry.load(filepaths, list(yaml_dict)))
def update_from_env(yaml_dict, prefix=None):
'''
Override YAML settings with values from the environment variables.
- The letter '_' is delimit the hierarchy of the YAML settings such
that the value of 'config.databases.local' will be overridden
by CONFIG_DATABASES_LOCAL.
'''
prefix = prefix or ""
def _set_env_var(path, node):
env_path = "{0}{1}{2}".format(
prefix.upper(),
'_' if prefix else '',
'_'.join([str(key).upper() for key in path])
)
env_val = os.environ.get(env_path, None)
if env_val is not None:
# convert the value to a YAML-defined type
env_dict = yamldict.load('val: {0}'.format(env_val))
return env_dict.val
else:
return None
# traverse yaml_dict with the callback function
yaml_dict.traverse(_set_env_var)
class YamlSettings(object):
"""Deprecated: Old helper class to load settings in a opinionated way.
It's recommended to write or use an opinionated extension now.
"""
def __init__(self, default_settings, override_settings, override_envs=True,
default_section=None, cur_section=None,
param_callback=None, override_required=False,
envs_override_defaults_only=False,
single_section_load=False):
defaults = registry.load(default_settings)
if override_envs and envs_override_defaults_only:
if default_section:
prefix = default_section
section = defaults[default_section]
else:
prefix = ""
section = defaults
update_from_env(section, prefix)
self.cur_section = default_section
if default_section is None:
# No section support simply update with overrides
self.settings = defaults
try:
# WAS:
# self.settings.update_yaml(override_settings)
self.settings.update(registry.load(override_settings))
except IOError:
if override_required:
raise
if override_envs and not envs_override_defaults_only:
update_from_env(self.settings, "")
else:
# Load Overrides first and merge with defaults
try:
self.settings = registry.load(override_settings)
except IOError:
if override_required:
raise
# Note this will copy to itself right now, but
# will allows for simpler logic to get environment
# variables to work
self.settings = defaults
for cur_section in self.settings:
cur = self.settings[cur_section]
cur.rebase(defaults[self.cur_section])
if override_envs and not envs_override_defaults_only:
update_from_env(cur, default_section)
# Make sure default section is created
if default_section not in self.settings:
self.settings[default_section] = defaults[default_section]
if override_envs and not envs_override_defaults_only:
update_from_env(self.settings[default_section],
default_section)
def get_settings(self, section_name=None):
if section_name is None:
section_name = self.cur_section
if section_name is None:
return self.settings
else:
return self.settings[section_name]
|
KyleJamesWalker/yamlsettings | yamlsettings/helpers.py | save_all | python | def save_all(yaml_dicts, filepath):
'''
Save *all* YAML settings to the specified file path.
'''
yamldict.dump_all(yaml_dicts, open(filepath, 'w'),
default_flow_style=False) | Save *all* YAML settings to the specified file path. | train | https://github.com/KyleJamesWalker/yamlsettings/blob/ddd7df2ca995ddf191b24c4d35e9dd28186e4535/yamlsettings/helpers.py#L20-L25 | [
"def dump_all(data_list, stream=None, **kwargs):\n \"\"\"\n Serialize YAMLDict into a YAML stream.\n If stream is None, return the produced string instead.\n \"\"\"\n return yaml.dump_all(\n data_list,\n stream=stream,\n Dumper=YAMLDictDumper,\n **kwargs\n )\n"
] | """Helper functions
"""
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
from yamlsettings import yamldict
from yamlsettings.extensions import registry
def save(yaml_dict, filepath):
'''
Save YAML settings to the specified file path.
'''
yamldict.dump(yaml_dict, open(filepath, 'w'), default_flow_style=False)
def update_from_file(yaml_dict, filepaths):
'''
Override YAML settings with loaded values from filepaths.
- File paths in the list gets the priority by their orders of the list.
'''
# load YAML settings with only fields in yaml_dict
yaml_dict.update(registry.load(filepaths, list(yaml_dict)))
def update_from_env(yaml_dict, prefix=None):
'''
Override YAML settings with values from the environment variables.
- The letter '_' is delimit the hierarchy of the YAML settings such
that the value of 'config.databases.local' will be overridden
by CONFIG_DATABASES_LOCAL.
'''
prefix = prefix or ""
def _set_env_var(path, node):
env_path = "{0}{1}{2}".format(
prefix.upper(),
'_' if prefix else '',
'_'.join([str(key).upper() for key in path])
)
env_val = os.environ.get(env_path, None)
if env_val is not None:
# convert the value to a YAML-defined type
env_dict = yamldict.load('val: {0}'.format(env_val))
return env_dict.val
else:
return None
# traverse yaml_dict with the callback function
yaml_dict.traverse(_set_env_var)
class YamlSettings(object):
"""Deprecated: Old helper class to load settings in a opinionated way.
It's recommended to write or use an opinionated extension now.
"""
def __init__(self, default_settings, override_settings, override_envs=True,
default_section=None, cur_section=None,
param_callback=None, override_required=False,
envs_override_defaults_only=False,
single_section_load=False):
defaults = registry.load(default_settings)
if override_envs and envs_override_defaults_only:
if default_section:
prefix = default_section
section = defaults[default_section]
else:
prefix = ""
section = defaults
update_from_env(section, prefix)
self.cur_section = default_section
if default_section is None:
# No section support simply update with overrides
self.settings = defaults
try:
# WAS:
# self.settings.update_yaml(override_settings)
self.settings.update(registry.load(override_settings))
except IOError:
if override_required:
raise
if override_envs and not envs_override_defaults_only:
update_from_env(self.settings, "")
else:
# Load Overrides first and merge with defaults
try:
self.settings = registry.load(override_settings)
except IOError:
if override_required:
raise
# Note this will copy to itself right now, but
# will allows for simpler logic to get environment
# variables to work
self.settings = defaults
for cur_section in self.settings:
cur = self.settings[cur_section]
cur.rebase(defaults[self.cur_section])
if override_envs and not envs_override_defaults_only:
update_from_env(cur, default_section)
# Make sure default section is created
if default_section not in self.settings:
self.settings[default_section] = defaults[default_section]
if override_envs and not envs_override_defaults_only:
update_from_env(self.settings[default_section],
default_section)
def get_settings(self, section_name=None):
if section_name is None:
section_name = self.cur_section
if section_name is None:
return self.settings
else:
return self.settings[section_name]
|
KyleJamesWalker/yamlsettings | yamlsettings/helpers.py | update_from_file | python | def update_from_file(yaml_dict, filepaths):
'''
Override YAML settings with loaded values from filepaths.
- File paths in the list gets the priority by their orders of the list.
'''
# load YAML settings with only fields in yaml_dict
yaml_dict.update(registry.load(filepaths, list(yaml_dict))) | Override YAML settings with loaded values from filepaths.
- File paths in the list gets the priority by their orders of the list. | train | https://github.com/KyleJamesWalker/yamlsettings/blob/ddd7df2ca995ddf191b24c4d35e9dd28186e4535/yamlsettings/helpers.py#L28-L35 | [
"def load(self, target_uris, fields=None, **kwargs):\n \"\"\"Load first yamldict target found in uri.\n\n :param target_uris: Uris to try and open\n :param fields: Fields to filter. Default: None\n :type target_uri: list or string\n :type fields: list\n\n :returns: yamldict\n\n \"\"\"\n yaml_dict = self._load_first(\n target_uris, yamlsettings.yamldict.load, **kwargs\n )\n # TODO: Move this into the extension, otherwise every load from\n # a persistant location will refilter fields.\n if fields:\n yaml_dict.limit(fields)\n\n return yaml_dict\n"
] | """Helper functions
"""
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
from yamlsettings import yamldict
from yamlsettings.extensions import registry
def save(yaml_dict, filepath):
'''
Save YAML settings to the specified file path.
'''
yamldict.dump(yaml_dict, open(filepath, 'w'), default_flow_style=False)
def save_all(yaml_dicts, filepath):
'''
Save *all* YAML settings to the specified file path.
'''
yamldict.dump_all(yaml_dicts, open(filepath, 'w'),
default_flow_style=False)
def update_from_env(yaml_dict, prefix=None):
'''
Override YAML settings with values from the environment variables.
- The letter '_' is delimit the hierarchy of the YAML settings such
that the value of 'config.databases.local' will be overridden
by CONFIG_DATABASES_LOCAL.
'''
prefix = prefix or ""
def _set_env_var(path, node):
env_path = "{0}{1}{2}".format(
prefix.upper(),
'_' if prefix else '',
'_'.join([str(key).upper() for key in path])
)
env_val = os.environ.get(env_path, None)
if env_val is not None:
# convert the value to a YAML-defined type
env_dict = yamldict.load('val: {0}'.format(env_val))
return env_dict.val
else:
return None
# traverse yaml_dict with the callback function
yaml_dict.traverse(_set_env_var)
class YamlSettings(object):
"""Deprecated: Old helper class to load settings in a opinionated way.
It's recommended to write or use an opinionated extension now.
"""
def __init__(self, default_settings, override_settings, override_envs=True,
default_section=None, cur_section=None,
param_callback=None, override_required=False,
envs_override_defaults_only=False,
single_section_load=False):
defaults = registry.load(default_settings)
if override_envs and envs_override_defaults_only:
if default_section:
prefix = default_section
section = defaults[default_section]
else:
prefix = ""
section = defaults
update_from_env(section, prefix)
self.cur_section = default_section
if default_section is None:
# No section support simply update with overrides
self.settings = defaults
try:
# WAS:
# self.settings.update_yaml(override_settings)
self.settings.update(registry.load(override_settings))
except IOError:
if override_required:
raise
if override_envs and not envs_override_defaults_only:
update_from_env(self.settings, "")
else:
# Load Overrides first and merge with defaults
try:
self.settings = registry.load(override_settings)
except IOError:
if override_required:
raise
# Note this will copy to itself right now, but
# will allows for simpler logic to get environment
# variables to work
self.settings = defaults
for cur_section in self.settings:
cur = self.settings[cur_section]
cur.rebase(defaults[self.cur_section])
if override_envs and not envs_override_defaults_only:
update_from_env(cur, default_section)
# Make sure default section is created
if default_section not in self.settings:
self.settings[default_section] = defaults[default_section]
if override_envs and not envs_override_defaults_only:
update_from_env(self.settings[default_section],
default_section)
def get_settings(self, section_name=None):
if section_name is None:
section_name = self.cur_section
if section_name is None:
return self.settings
else:
return self.settings[section_name]
|
KyleJamesWalker/yamlsettings | yamlsettings/helpers.py | update_from_env | python | def update_from_env(yaml_dict, prefix=None):
'''
Override YAML settings with values from the environment variables.
- The letter '_' is delimit the hierarchy of the YAML settings such
that the value of 'config.databases.local' will be overridden
by CONFIG_DATABASES_LOCAL.
'''
prefix = prefix or ""
def _set_env_var(path, node):
env_path = "{0}{1}{2}".format(
prefix.upper(),
'_' if prefix else '',
'_'.join([str(key).upper() for key in path])
)
env_val = os.environ.get(env_path, None)
if env_val is not None:
# convert the value to a YAML-defined type
env_dict = yamldict.load('val: {0}'.format(env_val))
return env_dict.val
else:
return None
# traverse yaml_dict with the callback function
yaml_dict.traverse(_set_env_var) | Override YAML settings with values from the environment variables.
- The letter '_' is delimit the hierarchy of the YAML settings such
that the value of 'config.databases.local' will be overridden
by CONFIG_DATABASES_LOCAL. | train | https://github.com/KyleJamesWalker/yamlsettings/blob/ddd7df2ca995ddf191b24c4d35e9dd28186e4535/yamlsettings/helpers.py#L38-L63 | null | """Helper functions
"""
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
from yamlsettings import yamldict
from yamlsettings.extensions import registry
def save(yaml_dict, filepath):
'''
Save YAML settings to the specified file path.
'''
yamldict.dump(yaml_dict, open(filepath, 'w'), default_flow_style=False)
def save_all(yaml_dicts, filepath):
'''
Save *all* YAML settings to the specified file path.
'''
yamldict.dump_all(yaml_dicts, open(filepath, 'w'),
default_flow_style=False)
def update_from_file(yaml_dict, filepaths):
'''
Override YAML settings with loaded values from filepaths.
- File paths in the list gets the priority by their orders of the list.
'''
# load YAML settings with only fields in yaml_dict
yaml_dict.update(registry.load(filepaths, list(yaml_dict)))
class YamlSettings(object):
"""Deprecated: Old helper class to load settings in a opinionated way.
It's recommended to write or use an opinionated extension now.
"""
def __init__(self, default_settings, override_settings, override_envs=True,
default_section=None, cur_section=None,
param_callback=None, override_required=False,
envs_override_defaults_only=False,
single_section_load=False):
defaults = registry.load(default_settings)
if override_envs and envs_override_defaults_only:
if default_section:
prefix = default_section
section = defaults[default_section]
else:
prefix = ""
section = defaults
update_from_env(section, prefix)
self.cur_section = default_section
if default_section is None:
# No section support simply update with overrides
self.settings = defaults
try:
# WAS:
# self.settings.update_yaml(override_settings)
self.settings.update(registry.load(override_settings))
except IOError:
if override_required:
raise
if override_envs and not envs_override_defaults_only:
update_from_env(self.settings, "")
else:
# Load Overrides first and merge with defaults
try:
self.settings = registry.load(override_settings)
except IOError:
if override_required:
raise
# Note this will copy to itself right now, but
# will allows for simpler logic to get environment
# variables to work
self.settings = defaults
for cur_section in self.settings:
cur = self.settings[cur_section]
cur.rebase(defaults[self.cur_section])
if override_envs and not envs_override_defaults_only:
update_from_env(cur, default_section)
# Make sure default section is created
if default_section not in self.settings:
self.settings[default_section] = defaults[default_section]
if override_envs and not envs_override_defaults_only:
update_from_env(self.settings[default_section],
default_section)
def get_settings(self, section_name=None):
if section_name is None:
section_name = self.cur_section
if section_name is None:
return self.settings
else:
return self.settings[section_name]
|
fmenabe/python-dokuwiki | dokuwiki.py | date | python | def date(date):
date = date.value
return (datetime.strptime(date[:-5], '%Y-%m-%dT%H:%M:%S')
if len(date) == 24
else datetime.strptime(date, '%Y%m%dT%H:%M:%S')) | DokuWiki returns dates of `xmlrpclib`/`xmlrpc.client` ``DateTime``
type and the format changes between DokuWiki versions ... This function
convert *date* to a `datetime` object. | train | https://github.com/fmenabe/python-dokuwiki/blob/7b5b13b764912b36f49a03a445c88f0934260eb1/dokuwiki.py#L36-L44 | null | # -*- coding: utf-8 -*-
"""This python module aims to manage
`DokuWiki <https://www.dokuwiki.org/dokuwiki>`_ wikis by using the
provided `XML-RPC API <https://www.dokuwiki.org/devel:xmlrpc>`_. It is
compatible with python2.7 and python3+.
Installation
------------
It is on `PyPi <https://pypi.python.org/pypi/dokuwiki>`_ so you can use
the ``pip`` command to install it::
pip install dokuwiki
Otherwise sources are in `github <https://github.com/fmenabe/python-dokuwiki>`_
"""
import re
import sys
import base64
import weakref
from xml.parsers.expat import ExpatError
if sys.version_info[0] == 3:
from xmlrpc.client import ServerProxy, Binary, Fault, Transport
from urllib.parse import urlencode
else:
from xmlrpclib import ServerProxy, Binary, Fault, Transport
from urllib import urlencode
from datetime import datetime, timedelta
ERR = 'XML or text declaration not at start of entity: line 2, column 0'
_URL_RE = re.compile(r'(?P<proto>https?)://(?P<host>[^/]*)(?P<uri>/.*)?')
def utc2local(date):
"""DokuWiki returns date with a +0000 timezone. This function convert *date*
to the local time.
"""
date_offset = (datetime.now() - datetime.utcnow())
# Python < 2.7 don't have the 'total_seconds' method so calculate it by hand!
date_offset = (date_offset.microseconds +
(date_offset.seconds + date_offset.days * 24 * 3600) * 1e6) / 1e6
date_offset = int(round(date_offset / 60 / 60))
return date + timedelta(hours=date_offset)
class DokuWikiError(Exception):
"""Exception raised by this module when there is an error."""
pass
class CookiesTransport(Transport):
"""A Python3 xmlrpc.client.Transport subclass that retains cookies."""
def __init__(self):
Transport.__init__(self)
self._cookies = dict()
def send_headers(self, connection, headers):
if self._cookies:
cookies = map(lambda x: x[0] + '=' + x[1], self._cookies.items())
connection.putheader("Cookie", "; ".join(cookies))
Transport.send_headers(self, connection, headers)
def parse_response(self, response):
"""parse and store cookie"""
try:
for header in response.msg.get_all("Set-Cookie"):
cookie = header.split(";", 1)[0]
cookieKey, cookieValue = cookie.split("=", 1)
self._cookies[cookieKey] = cookieValue
finally:
return Transport.parse_response(self, response)
class CookiesTransport2(Transport):
"""A Python2 xmlrpclib.Transport subclass that retains cookies."""
def __init__(self):
Transport.__init__(self)
self._cookies = dict()
def send_request(self, connection, handler, request_body):
Transport.send_request(self, connection, handler, request_body)
# set cookie below handler
if self._cookies:
cookies = map(lambda x: x[0] + '=' + x[1], self._cookies.items())
connection.putheader("Cookie", "; ".join(cookies))
def parse_response(self, response):
"""parse and store cookie"""
try:
for header in response.getheader("set-cookie").split(", "):
# filter 'expire' information
if not header.startswith("D"):
continue
cookie = header.split(";", 1)[0]
cookieKey, cookieValue = cookie.split("=", 1)
self._cookies[cookieKey] = cookieValue
finally:
return Transport.parse_response(self, response)
class DokuWiki(object):
"""Initialize a connection to a DokuWiki wiki. *url*, *user* and
*password* are respectively the URL, the login and the password for
connecting to the wiki. *kwargs* are `xmlrpclib`/`xmlrpc.client`
**ServerProxy** parameters.
The exception `DokuWikiError` is raised if the authentification
fails but others exceptions (like ``gaierror`` for invalid domain,
``ProtocolError`` for an invalid wiki, ...) are not catched.
.. code::
try:
wiki = dokuwiki.DokuWiki('URL', 'USER', 'PASSWORD', cookieAuth=False)
except (DokuWikiError, Exception) as err:
print('unable to connect: %s' % err)
.. note::
The URL format is: `PROTO://FQDN[/PATH]` (*https://www.example.com/dokuwiki*
for example).
"""
def __init__(self, url, user, password, cookieAuth=False, **kwargs):
"""Initialize the object by connecting to the XMLRPC server."""
# Initialize XMLRPC client.
try:
params = _URL_RE.search(url).groupdict()
if cookieAuth == False:
url = '%s://%s:%s@%s%s/lib/exe/xmlrpc.php' % (
params['proto'], user, password, params['host'], params['uri'] or '')
else:
url = '%s://%s%s/lib/exe/xmlrpc.php' % (
params['proto'], params['host'], params['uri'] or '')
except AttributeError:
raise DokuWikiError("invalid url '%s'" % url)
if cookieAuth == False:
self.proxy = ServerProxy(url, **kwargs)
else:
if sys.version_info[0] == 3:
self.proxy = ServerProxy(url, CookiesTransport(), **kwargs)
else:
self.proxy = ServerProxy(url, CookiesTransport2(), **kwargs)
# Force login to check the connection.
if not self.login(user, password):
raise DokuWikiError('invalid login or password!')
# Set "namespaces" for pages and medias functions.
self.pages = _Pages(weakref.ref(self)())
self.medias = _Medias(weakref.ref(self)())
def send(self, command, *args, **kwargs):
"""Generic method for executing an XML-RPC *command*. *args* and
*kwargs* are the arguments and parameters needed by the command.
"""
args = list(args)
if kwargs:
args.append(kwargs)
method = self.proxy
for elt in command.split('.'):
method = getattr(method, elt)
try:
return method(*args)
except Fault as err:
if err.faultCode == 121:
return {}
elif err.faultCode == 321:
return []
raise DokuWikiError(err)
except ExpatError as err:
if str(err) != ERR:
raise DokuWikiError(err)
@property
def version(self):
"""Property that returns the DokuWiki version of the remote Wiki."""
return self.send('dokuwiki.getVersion')
@property
def time(self):
"""Property that returns the current time at the remote wiki server as
Unix timestamp.
"""
return self.send('dokuwiki.getTime')
@property
def xmlrpc_version(self):
"""Property that returns the XML RPC interface version of the remote
Wiki. This is DokuWiki implementation specific and independent of the
supported standard API version returned by ``wiki.getRPCVersionSupported``.
"""
return self.send('dokuwiki.getXMLRPCAPIVersion')
@property
def xmlrpc_supported_version(self):
"""Property that returns *2* with the supported RPC API version."""
return self.send('wiki.getRPCVersionSupported')
@property
def title(self):
"""Property that returns the title of the wiki."""
return self.send('dokuwiki.getTitle')
def login(self, user, password):
"""Log to the wiki using *user* and *password* credentials. It returns
a boolean that indicates if the user succesfully authenticate."""
return self.send('dokuwiki.login', user, password)
def add_acl(self, scope, user, permission):
"""Add an `ACL <https://www.dokuwiki.org/acl>`_ rule that restricts
the page/namespace *scope* to *user* (use *@group* syntax for groups)
with *permission* level. It returns a boolean that indicate if the rule
was correctly added.
"""
return self.send('plugin.acl.addAcl', scope, user, permission)
def del_acl(self, scope, user):
"""Delete any ACL matching the given *scope* and *user* (or group if
*@group* syntax is used). It returns a boolean that indicate if the rule
was correctly removed.
"""
return self.send('plugin.acl.delAcl', scope, user)
class _Pages(object):
"""This object regroup methods for managing pages of a DokuWiki. This object
is accessible from the ``pages`` property of an `DokuWiki` instance::
wiki = dokuwiki.DokuWiki('URL', 'User', 'Password')
wiki.pages.list()
"""
def __init__(self, dokuwiki):
self._dokuwiki = dokuwiki
def list(self, namespace='/', **options):
"""List all pages of the given *namespace*.
Valid *options* are:
* *depth*: (int) recursion level, 0 for all
* *hash*: (bool) do an md5 sum of content
* *skipacl*: (bool) list everything regardless of ACL
"""
return self._dokuwiki.send('dokuwiki.getPagelist', namespace, options)
def changes(self, timestamp):
"""Returns a list of changes since given *timestamp*.
For example, for returning all changes since *2016-01-01*::
from datetime import datetime
wiki.pages.changes(datetime(2016, 1, 1).timestamp())
"""
return self._dokuwiki.send('wiki.getRecentChanges', timestamp)
def search(self, string):
"""Performs a fulltext search on *string* and returns the first 15
results.
"""
return self._dokuwiki.send('dokuwiki.search', string)
def versions(self, page, offset=0):
"""Returns the available versions of *page*. *offset* can be used to
list earlier versions in the history.
"""
return self._dokuwiki.send('wiki.getPageVersions', page, offset)
def info(self, page, version=None):
"""Returns informations of *page*. Informations of the last version
is returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageInfoVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPageInfo', page))
def get(self, page, version=None):
"""Returns the content of *page*. The content of the last version is
returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPage', page))
def append(self, page, content, **options):
"""Appends *content* text to *page*.
Valid *options* are:
* *sum*: (str) change summary
* *minor*: (bool) whether this is a minor change
"""
return self._dokuwiki.send('dokuwiki.appendPage', page, content, options)
def html(self, page, version=None):
"""Returns HTML content of *page*. The HTML content of the last version
of the page is returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageHTMLVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPageHTML', page))
def set(self, page, content, **options):
"""Set/replace the *content* of *page*.
Valid *options* are:
* *sum*: (str) change summary
* *minor*: (bool) whether this is a minor change
"""
try:
return self._dokuwiki.send('wiki.putPage', page, content, options)
except ExpatError as err:
# Sometime the first line of the XML response is blank which raise
# the 'ExpatError' exception although the change has been done. This
# allow to ignore the error.
if str(err) != ERR:
raise DokuWikiError(err)
def delete(self, page):
"""Delete *page* by setting an empty content."""
return self.set(page, '')
def lock(self, page):
"""Locks *page*."""
result = self._dokuwiki.send('dokuwiki.setLocks',
lock=[page], unlock=[])
if result['lockfail']:
raise DokuWikiError('unable to lock page')
def unlock(self, page):
"""Unlocks *page*."""
result = self._dokuwiki.send('dokuwiki.setLocks',
lock=[], unlock=[page])
if result['unlockfail']:
raise DokuWikiError('unable to unlock page')
def permission(self, page):
"""Returns the permission level of *page*."""
return self._dokuwiki.send('wiki.aclCheck', page)
def links(self, page):
"""Returns a list of all links contained in *page*."""
return self._dokuwiki.send('wiki.listLinks', page)
def backlinks(self, page):
"""Returns a list of all links referencing *page*."""
return self._dokuwiki.send('wiki.getBackLinks', page)
class _Medias(object):
"""This object regroup methods for managing medias of a DokuWiki. This
object is accessible from the ``medias`` property of an `DokuWiki`
instance::
wiki = dokuwiki.DokuWiki('URL', 'User', 'Password')
wiki.medias.list()
"""
def __init__(self, dokuwiki):
self._dokuwiki = dokuwiki
def list(self, namespace='/', **options):
"""Returns all medias of the given *namespace*.
Valid *options* are:
* *depth*: (int) recursion level, 0 for all
* *skipacl*: (bool) skip acl checking
* *pattern*: (str) check given pattern
* *hash*: (bool) add hashes to result list
"""
return self._dokuwiki.send('wiki.getAttachments', namespace, options)
def changes(self, timestamp):
"""Returns the list of medias changed since given *timestamp*.
For example, for returning all changes since *2016-01-01*::
from datetime import datetime
wiki.medias.changes(datetime(2016, 1, 1).timestamp())
"""
return self._dokuwiki.send('wiki.getRecentMediaChanges', timestamp)
def get(self, media, dirpath=None, filename=None, overwrite=False, b64decode=False):
"""Returns the binary data of *media* or save it to a file. If *dirpath*
is not set the binary data is returned, otherwise the data is saved
to a file. By default, the filename is the name of the media but it can
be changed with *filename* parameter. *overwrite* parameter allow to
overwrite the file if it already exists locally.
"""
import os
data = self._dokuwiki.send('wiki.getAttachment', media)
data = base64.b64decode(data) if b64decode else data.data
if dirpath is None:
return data
if filename is None:
filename = media.replace('/', ':').split(':')[-1]
if not os.path.exists(dirpath):
os.makedirs(dirpath)
filepath = os.path.join(dirpath, filename)
if os.path.exists(filepath) and not overwrite:
raise FileExistsError("[Errno 17] File exists: '%s'" % filepath)
with open(filepath, 'wb') as fhandler:
fhandler.write(data)
def info(self, media):
"""Returns informations of *media*."""
return self._dokuwiki.send('wiki.getAttachmentInfo', media)
def add(self, media, filepath, overwrite=True):
"""Set *media* from local file *filepath*. *overwrite* parameter specify
if the media must be overwrite if it exists remotely.
"""
with open(filepath, 'rb') as fhandler:
self._dokuwiki.send('wiki.putAttachment', media,
Binary(fhandler.read()), ow=overwrite)
def set(self, media, _bytes, overwrite=True, b64encode=False):
"""Set *media* from *_bytes*. *overwrite* parameter specify if the media
must be overwrite if it exists remotely.
"""
data = base64.b64encode(_bytes) if b64encode else Binary(_bytes)
self._dokuwiki.send('wiki.putAttachment', media, data, ow=overwrite)
def delete(self, media):
"""Delete *media*."""
return self._dokuwiki.send('wiki.deleteAttachment', media)
class Dataentry(object):
"""Object that manage `data entries <https://www.dokuwiki.org/plugin:data>`_."""
@staticmethod
def get(content, keep_order=False):
"""Get dataentry from *content*. *keep_order* indicates whether to
return an ordered dictionnay."""
if keep_order:
from collections import OrderedDict
dataentry = OrderedDict()
else:
dataentry = {}
found = False
for line in content.split('\n'):
if line.strip().startswith('---- dataentry'):
found = True
continue
elif line == '----':
break
elif not found:
continue
line_split = line.split(':')
key = line_split[0].strip()
value = re.sub('#.*$', '', ':'.join(line_split[1:])).strip()
dataentry.setdefault(key, value)
if not found:
raise DokuWikiError('no dataentry found')
return dataentry
@staticmethod
def gen(name, data):
"""Generate dataentry *name* from *data*."""
return '---- dataentry %s ----\n%s\n----' % (name, '\n'.join(
'%s:%s' % (attr, value) for attr, value in data.items()))
@staticmethod
def ignore(content):
"""Remove dataentry from *content*."""
page_content = []
start = False
for line in content.split('\n'):
if line == '----' and not start:
start = True
continue
if start:
page_content.append(line)
return '\n'.join(page_content) if page_content else content
|
fmenabe/python-dokuwiki | dokuwiki.py | utc2local | python | def utc2local(date):
date_offset = (datetime.now() - datetime.utcnow())
# Python < 2.7 don't have the 'total_seconds' method so calculate it by hand!
date_offset = (date_offset.microseconds +
(date_offset.seconds + date_offset.days * 24 * 3600) * 1e6) / 1e6
date_offset = int(round(date_offset / 60 / 60))
return date + timedelta(hours=date_offset) | DokuWiki returns date with a +0000 timezone. This function convert *date*
to the local time. | train | https://github.com/fmenabe/python-dokuwiki/blob/7b5b13b764912b36f49a03a445c88f0934260eb1/dokuwiki.py#L46-L55 | null | # -*- coding: utf-8 -*-
"""This python module aims to manage
`DokuWiki <https://www.dokuwiki.org/dokuwiki>`_ wikis by using the
provided `XML-RPC API <https://www.dokuwiki.org/devel:xmlrpc>`_. It is
compatible with python2.7 and python3+.
Installation
------------
It is on `PyPi <https://pypi.python.org/pypi/dokuwiki>`_ so you can use
the ``pip`` command to install it::
pip install dokuwiki
Otherwise sources are in `github <https://github.com/fmenabe/python-dokuwiki>`_
"""
import re
import sys
import base64
import weakref
from xml.parsers.expat import ExpatError
if sys.version_info[0] == 3:
from xmlrpc.client import ServerProxy, Binary, Fault, Transport
from urllib.parse import urlencode
else:
from xmlrpclib import ServerProxy, Binary, Fault, Transport
from urllib import urlencode
from datetime import datetime, timedelta
ERR = 'XML or text declaration not at start of entity: line 2, column 0'
_URL_RE = re.compile(r'(?P<proto>https?)://(?P<host>[^/]*)(?P<uri>/.*)?')
def date(date):
"""DokuWiki returns dates of `xmlrpclib`/`xmlrpc.client` ``DateTime``
type and the format changes between DokuWiki versions ... This function
convert *date* to a `datetime` object.
"""
date = date.value
return (datetime.strptime(date[:-5], '%Y-%m-%dT%H:%M:%S')
if len(date) == 24
else datetime.strptime(date, '%Y%m%dT%H:%M:%S'))
class DokuWikiError(Exception):
"""Exception raised by this module when there is an error."""
pass
class CookiesTransport(Transport):
"""A Python3 xmlrpc.client.Transport subclass that retains cookies."""
def __init__(self):
Transport.__init__(self)
self._cookies = dict()
def send_headers(self, connection, headers):
if self._cookies:
cookies = map(lambda x: x[0] + '=' + x[1], self._cookies.items())
connection.putheader("Cookie", "; ".join(cookies))
Transport.send_headers(self, connection, headers)
def parse_response(self, response):
"""parse and store cookie"""
try:
for header in response.msg.get_all("Set-Cookie"):
cookie = header.split(";", 1)[0]
cookieKey, cookieValue = cookie.split("=", 1)
self._cookies[cookieKey] = cookieValue
finally:
return Transport.parse_response(self, response)
class CookiesTransport2(Transport):
"""A Python2 xmlrpclib.Transport subclass that retains cookies."""
def __init__(self):
Transport.__init__(self)
self._cookies = dict()
def send_request(self, connection, handler, request_body):
Transport.send_request(self, connection, handler, request_body)
# set cookie below handler
if self._cookies:
cookies = map(lambda x: x[0] + '=' + x[1], self._cookies.items())
connection.putheader("Cookie", "; ".join(cookies))
def parse_response(self, response):
"""parse and store cookie"""
try:
for header in response.getheader("set-cookie").split(", "):
# filter 'expire' information
if not header.startswith("D"):
continue
cookie = header.split(";", 1)[0]
cookieKey, cookieValue = cookie.split("=", 1)
self._cookies[cookieKey] = cookieValue
finally:
return Transport.parse_response(self, response)
class DokuWiki(object):
"""Initialize a connection to a DokuWiki wiki. *url*, *user* and
*password* are respectively the URL, the login and the password for
connecting to the wiki. *kwargs* are `xmlrpclib`/`xmlrpc.client`
**ServerProxy** parameters.
The exception `DokuWikiError` is raised if the authentification
fails but others exceptions (like ``gaierror`` for invalid domain,
``ProtocolError`` for an invalid wiki, ...) are not catched.
.. code::
try:
wiki = dokuwiki.DokuWiki('URL', 'USER', 'PASSWORD', cookieAuth=False)
except (DokuWikiError, Exception) as err:
print('unable to connect: %s' % err)
.. note::
The URL format is: `PROTO://FQDN[/PATH]` (*https://www.example.com/dokuwiki*
for example).
"""
def __init__(self, url, user, password, cookieAuth=False, **kwargs):
"""Initialize the object by connecting to the XMLRPC server."""
# Initialize XMLRPC client.
try:
params = _URL_RE.search(url).groupdict()
if cookieAuth == False:
url = '%s://%s:%s@%s%s/lib/exe/xmlrpc.php' % (
params['proto'], user, password, params['host'], params['uri'] or '')
else:
url = '%s://%s%s/lib/exe/xmlrpc.php' % (
params['proto'], params['host'], params['uri'] or '')
except AttributeError:
raise DokuWikiError("invalid url '%s'" % url)
if cookieAuth == False:
self.proxy = ServerProxy(url, **kwargs)
else:
if sys.version_info[0] == 3:
self.proxy = ServerProxy(url, CookiesTransport(), **kwargs)
else:
self.proxy = ServerProxy(url, CookiesTransport2(), **kwargs)
# Force login to check the connection.
if not self.login(user, password):
raise DokuWikiError('invalid login or password!')
# Set "namespaces" for pages and medias functions.
self.pages = _Pages(weakref.ref(self)())
self.medias = _Medias(weakref.ref(self)())
def send(self, command, *args, **kwargs):
"""Generic method for executing an XML-RPC *command*. *args* and
*kwargs* are the arguments and parameters needed by the command.
"""
args = list(args)
if kwargs:
args.append(kwargs)
method = self.proxy
for elt in command.split('.'):
method = getattr(method, elt)
try:
return method(*args)
except Fault as err:
if err.faultCode == 121:
return {}
elif err.faultCode == 321:
return []
raise DokuWikiError(err)
except ExpatError as err:
if str(err) != ERR:
raise DokuWikiError(err)
@property
def version(self):
"""Property that returns the DokuWiki version of the remote Wiki."""
return self.send('dokuwiki.getVersion')
@property
def time(self):
"""Property that returns the current time at the remote wiki server as
Unix timestamp.
"""
return self.send('dokuwiki.getTime')
@property
def xmlrpc_version(self):
"""Property that returns the XML RPC interface version of the remote
Wiki. This is DokuWiki implementation specific and independent of the
supported standard API version returned by ``wiki.getRPCVersionSupported``.
"""
return self.send('dokuwiki.getXMLRPCAPIVersion')
@property
def xmlrpc_supported_version(self):
"""Property that returns *2* with the supported RPC API version."""
return self.send('wiki.getRPCVersionSupported')
@property
def title(self):
"""Property that returns the title of the wiki."""
return self.send('dokuwiki.getTitle')
def login(self, user, password):
"""Log to the wiki using *user* and *password* credentials. It returns
a boolean that indicates if the user succesfully authenticate."""
return self.send('dokuwiki.login', user, password)
def add_acl(self, scope, user, permission):
"""Add an `ACL <https://www.dokuwiki.org/acl>`_ rule that restricts
the page/namespace *scope* to *user* (use *@group* syntax for groups)
with *permission* level. It returns a boolean that indicate if the rule
was correctly added.
"""
return self.send('plugin.acl.addAcl', scope, user, permission)
def del_acl(self, scope, user):
"""Delete any ACL matching the given *scope* and *user* (or group if
*@group* syntax is used). It returns a boolean that indicate if the rule
was correctly removed.
"""
return self.send('plugin.acl.delAcl', scope, user)
class _Pages(object):
"""This object regroup methods for managing pages of a DokuWiki. This object
is accessible from the ``pages`` property of an `DokuWiki` instance::
wiki = dokuwiki.DokuWiki('URL', 'User', 'Password')
wiki.pages.list()
"""
def __init__(self, dokuwiki):
self._dokuwiki = dokuwiki
def list(self, namespace='/', **options):
"""List all pages of the given *namespace*.
Valid *options* are:
* *depth*: (int) recursion level, 0 for all
* *hash*: (bool) do an md5 sum of content
* *skipacl*: (bool) list everything regardless of ACL
"""
return self._dokuwiki.send('dokuwiki.getPagelist', namespace, options)
def changes(self, timestamp):
"""Returns a list of changes since given *timestamp*.
For example, for returning all changes since *2016-01-01*::
from datetime import datetime
wiki.pages.changes(datetime(2016, 1, 1).timestamp())
"""
return self._dokuwiki.send('wiki.getRecentChanges', timestamp)
def search(self, string):
"""Performs a fulltext search on *string* and returns the first 15
results.
"""
return self._dokuwiki.send('dokuwiki.search', string)
def versions(self, page, offset=0):
"""Returns the available versions of *page*. *offset* can be used to
list earlier versions in the history.
"""
return self._dokuwiki.send('wiki.getPageVersions', page, offset)
def info(self, page, version=None):
"""Returns informations of *page*. Informations of the last version
is returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageInfoVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPageInfo', page))
def get(self, page, version=None):
"""Returns the content of *page*. The content of the last version is
returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPage', page))
def append(self, page, content, **options):
"""Appends *content* text to *page*.
Valid *options* are:
* *sum*: (str) change summary
* *minor*: (bool) whether this is a minor change
"""
return self._dokuwiki.send('dokuwiki.appendPage', page, content, options)
def html(self, page, version=None):
"""Returns HTML content of *page*. The HTML content of the last version
of the page is returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageHTMLVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPageHTML', page))
def set(self, page, content, **options):
"""Set/replace the *content* of *page*.
Valid *options* are:
* *sum*: (str) change summary
* *minor*: (bool) whether this is a minor change
"""
try:
return self._dokuwiki.send('wiki.putPage', page, content, options)
except ExpatError as err:
# Sometime the first line of the XML response is blank which raise
# the 'ExpatError' exception although the change has been done. This
# allow to ignore the error.
if str(err) != ERR:
raise DokuWikiError(err)
def delete(self, page):
"""Delete *page* by setting an empty content."""
return self.set(page, '')
def lock(self, page):
"""Locks *page*."""
result = self._dokuwiki.send('dokuwiki.setLocks',
lock=[page], unlock=[])
if result['lockfail']:
raise DokuWikiError('unable to lock page')
def unlock(self, page):
"""Unlocks *page*."""
result = self._dokuwiki.send('dokuwiki.setLocks',
lock=[], unlock=[page])
if result['unlockfail']:
raise DokuWikiError('unable to unlock page')
def permission(self, page):
"""Returns the permission level of *page*."""
return self._dokuwiki.send('wiki.aclCheck', page)
def links(self, page):
"""Returns a list of all links contained in *page*."""
return self._dokuwiki.send('wiki.listLinks', page)
def backlinks(self, page):
"""Returns a list of all links referencing *page*."""
return self._dokuwiki.send('wiki.getBackLinks', page)
class _Medias(object):
"""This object regroup methods for managing medias of a DokuWiki. This
object is accessible from the ``medias`` property of an `DokuWiki`
instance::
wiki = dokuwiki.DokuWiki('URL', 'User', 'Password')
wiki.medias.list()
"""
def __init__(self, dokuwiki):
self._dokuwiki = dokuwiki
def list(self, namespace='/', **options):
"""Returns all medias of the given *namespace*.
Valid *options* are:
* *depth*: (int) recursion level, 0 for all
* *skipacl*: (bool) skip acl checking
* *pattern*: (str) check given pattern
* *hash*: (bool) add hashes to result list
"""
return self._dokuwiki.send('wiki.getAttachments', namespace, options)
def changes(self, timestamp):
"""Returns the list of medias changed since given *timestamp*.
For example, for returning all changes since *2016-01-01*::
from datetime import datetime
wiki.medias.changes(datetime(2016, 1, 1).timestamp())
"""
return self._dokuwiki.send('wiki.getRecentMediaChanges', timestamp)
def get(self, media, dirpath=None, filename=None, overwrite=False, b64decode=False):
"""Returns the binary data of *media* or save it to a file. If *dirpath*
is not set the binary data is returned, otherwise the data is saved
to a file. By default, the filename is the name of the media but it can
be changed with *filename* parameter. *overwrite* parameter allow to
overwrite the file if it already exists locally.
"""
import os
data = self._dokuwiki.send('wiki.getAttachment', media)
data = base64.b64decode(data) if b64decode else data.data
if dirpath is None:
return data
if filename is None:
filename = media.replace('/', ':').split(':')[-1]
if not os.path.exists(dirpath):
os.makedirs(dirpath)
filepath = os.path.join(dirpath, filename)
if os.path.exists(filepath) and not overwrite:
raise FileExistsError("[Errno 17] File exists: '%s'" % filepath)
with open(filepath, 'wb') as fhandler:
fhandler.write(data)
def info(self, media):
"""Returns informations of *media*."""
return self._dokuwiki.send('wiki.getAttachmentInfo', media)
def add(self, media, filepath, overwrite=True):
"""Set *media* from local file *filepath*. *overwrite* parameter specify
if the media must be overwrite if it exists remotely.
"""
with open(filepath, 'rb') as fhandler:
self._dokuwiki.send('wiki.putAttachment', media,
Binary(fhandler.read()), ow=overwrite)
def set(self, media, _bytes, overwrite=True, b64encode=False):
"""Set *media* from *_bytes*. *overwrite* parameter specify if the media
must be overwrite if it exists remotely.
"""
data = base64.b64encode(_bytes) if b64encode else Binary(_bytes)
self._dokuwiki.send('wiki.putAttachment', media, data, ow=overwrite)
def delete(self, media):
"""Delete *media*."""
return self._dokuwiki.send('wiki.deleteAttachment', media)
class Dataentry(object):
"""Object that manage `data entries <https://www.dokuwiki.org/plugin:data>`_."""
@staticmethod
def get(content, keep_order=False):
"""Get dataentry from *content*. *keep_order* indicates whether to
return an ordered dictionnay."""
if keep_order:
from collections import OrderedDict
dataentry = OrderedDict()
else:
dataentry = {}
found = False
for line in content.split('\n'):
if line.strip().startswith('---- dataentry'):
found = True
continue
elif line == '----':
break
elif not found:
continue
line_split = line.split(':')
key = line_split[0].strip()
value = re.sub('#.*$', '', ':'.join(line_split[1:])).strip()
dataentry.setdefault(key, value)
if not found:
raise DokuWikiError('no dataentry found')
return dataentry
@staticmethod
def gen(name, data):
"""Generate dataentry *name* from *data*."""
return '---- dataentry %s ----\n%s\n----' % (name, '\n'.join(
'%s:%s' % (attr, value) for attr, value in data.items()))
@staticmethod
def ignore(content):
"""Remove dataentry from *content*."""
page_content = []
start = False
for line in content.split('\n'):
if line == '----' and not start:
start = True
continue
if start:
page_content.append(line)
return '\n'.join(page_content) if page_content else content
|
fmenabe/python-dokuwiki | dokuwiki.py | CookiesTransport.parse_response | python | def parse_response(self, response):
try:
for header in response.msg.get_all("Set-Cookie"):
cookie = header.split(";", 1)[0]
cookieKey, cookieValue = cookie.split("=", 1)
self._cookies[cookieKey] = cookieValue
finally:
return Transport.parse_response(self, response) | parse and store cookie | train | https://github.com/fmenabe/python-dokuwiki/blob/7b5b13b764912b36f49a03a445c88f0934260eb1/dokuwiki.py#L74-L82 | null | class CookiesTransport(Transport):
"""A Python3 xmlrpc.client.Transport subclass that retains cookies."""
def __init__(self):
Transport.__init__(self)
self._cookies = dict()
def send_headers(self, connection, headers):
if self._cookies:
cookies = map(lambda x: x[0] + '=' + x[1], self._cookies.items())
connection.putheader("Cookie", "; ".join(cookies))
Transport.send_headers(self, connection, headers)
|
fmenabe/python-dokuwiki | dokuwiki.py | CookiesTransport2.parse_response | python | def parse_response(self, response):
try:
for header in response.getheader("set-cookie").split(", "):
# filter 'expire' information
if not header.startswith("D"):
continue
cookie = header.split(";", 1)[0]
cookieKey, cookieValue = cookie.split("=", 1)
self._cookies[cookieKey] = cookieValue
finally:
return Transport.parse_response(self, response) | parse and store cookie | train | https://github.com/fmenabe/python-dokuwiki/blob/7b5b13b764912b36f49a03a445c88f0934260eb1/dokuwiki.py#L97-L108 | null | class CookiesTransport2(Transport):
"""A Python2 xmlrpclib.Transport subclass that retains cookies."""
def __init__(self):
Transport.__init__(self)
self._cookies = dict()
def send_request(self, connection, handler, request_body):
Transport.send_request(self, connection, handler, request_body)
# set cookie below handler
if self._cookies:
cookies = map(lambda x: x[0] + '=' + x[1], self._cookies.items())
connection.putheader("Cookie", "; ".join(cookies))
|
fmenabe/python-dokuwiki | dokuwiki.py | DokuWiki.send | python | def send(self, command, *args, **kwargs):
args = list(args)
if kwargs:
args.append(kwargs)
method = self.proxy
for elt in command.split('.'):
method = getattr(method, elt)
try:
return method(*args)
except Fault as err:
if err.faultCode == 121:
return {}
elif err.faultCode == 321:
return []
raise DokuWikiError(err)
except ExpatError as err:
if str(err) != ERR:
raise DokuWikiError(err) | Generic method for executing an XML-RPC *command*. *args* and
*kwargs* are the arguments and parameters needed by the command. | train | https://github.com/fmenabe/python-dokuwiki/blob/7b5b13b764912b36f49a03a445c88f0934260eb1/dokuwiki.py#L163-L185 | null | class DokuWiki(object):
"""Initialize a connection to a DokuWiki wiki. *url*, *user* and
*password* are respectively the URL, the login and the password for
connecting to the wiki. *kwargs* are `xmlrpclib`/`xmlrpc.client`
**ServerProxy** parameters.
The exception `DokuWikiError` is raised if the authentification
fails but others exceptions (like ``gaierror`` for invalid domain,
``ProtocolError`` for an invalid wiki, ...) are not catched.
.. code::
try:
wiki = dokuwiki.DokuWiki('URL', 'USER', 'PASSWORD', cookieAuth=False)
except (DokuWikiError, Exception) as err:
print('unable to connect: %s' % err)
.. note::
The URL format is: `PROTO://FQDN[/PATH]` (*https://www.example.com/dokuwiki*
for example).
"""
def __init__(self, url, user, password, cookieAuth=False, **kwargs):
"""Initialize the object by connecting to the XMLRPC server."""
# Initialize XMLRPC client.
try:
params = _URL_RE.search(url).groupdict()
if cookieAuth == False:
url = '%s://%s:%s@%s%s/lib/exe/xmlrpc.php' % (
params['proto'], user, password, params['host'], params['uri'] or '')
else:
url = '%s://%s%s/lib/exe/xmlrpc.php' % (
params['proto'], params['host'], params['uri'] or '')
except AttributeError:
raise DokuWikiError("invalid url '%s'" % url)
if cookieAuth == False:
self.proxy = ServerProxy(url, **kwargs)
else:
if sys.version_info[0] == 3:
self.proxy = ServerProxy(url, CookiesTransport(), **kwargs)
else:
self.proxy = ServerProxy(url, CookiesTransport2(), **kwargs)
# Force login to check the connection.
if not self.login(user, password):
raise DokuWikiError('invalid login or password!')
# Set "namespaces" for pages and medias functions.
self.pages = _Pages(weakref.ref(self)())
self.medias = _Medias(weakref.ref(self)())
@property
def version(self):
"""Property that returns the DokuWiki version of the remote Wiki."""
return self.send('dokuwiki.getVersion')
@property
def time(self):
"""Property that returns the current time at the remote wiki server as
Unix timestamp.
"""
return self.send('dokuwiki.getTime')
@property
def xmlrpc_version(self):
"""Property that returns the XML RPC interface version of the remote
Wiki. This is DokuWiki implementation specific and independent of the
supported standard API version returned by ``wiki.getRPCVersionSupported``.
"""
return self.send('dokuwiki.getXMLRPCAPIVersion')
@property
def xmlrpc_supported_version(self):
"""Property that returns *2* with the supported RPC API version."""
return self.send('wiki.getRPCVersionSupported')
@property
def title(self):
"""Property that returns the title of the wiki."""
return self.send('dokuwiki.getTitle')
def login(self, user, password):
"""Log to the wiki using *user* and *password* credentials. It returns
a boolean that indicates if the user succesfully authenticate."""
return self.send('dokuwiki.login', user, password)
def add_acl(self, scope, user, permission):
"""Add an `ACL <https://www.dokuwiki.org/acl>`_ rule that restricts
the page/namespace *scope* to *user* (use *@group* syntax for groups)
with *permission* level. It returns a boolean that indicate if the rule
was correctly added.
"""
return self.send('plugin.acl.addAcl', scope, user, permission)
def del_acl(self, scope, user):
"""Delete any ACL matching the given *scope* and *user* (or group if
*@group* syntax is used). It returns a boolean that indicate if the rule
was correctly removed.
"""
return self.send('plugin.acl.delAcl', scope, user)
|
fmenabe/python-dokuwiki | dokuwiki.py | DokuWiki.add_acl | python | def add_acl(self, scope, user, permission):
return self.send('plugin.acl.addAcl', scope, user, permission) | Add an `ACL <https://www.dokuwiki.org/acl>`_ rule that restricts
the page/namespace *scope* to *user* (use *@group* syntax for groups)
with *permission* level. It returns a boolean that indicate if the rule
was correctly added. | train | https://github.com/fmenabe/python-dokuwiki/blob/7b5b13b764912b36f49a03a445c88f0934260eb1/dokuwiki.py#L221-L227 | [
"def send(self, command, *args, **kwargs):\n \"\"\"Generic method for executing an XML-RPC *command*. *args* and\n *kwargs* are the arguments and parameters needed by the command.\n \"\"\"\n args = list(args)\n if kwargs:\n args.append(kwargs)\n\n method = self.proxy\n for elt in command.split('.'):\n method = getattr(method, elt)\n\n try:\n return method(*args)\n except Fault as err:\n if err.faultCode == 121:\n return {}\n elif err.faultCode == 321:\n return []\n raise DokuWikiError(err)\n except ExpatError as err:\n if str(err) != ERR:\n raise DokuWikiError(err)\n"
] | class DokuWiki(object):
"""Initialize a connection to a DokuWiki wiki. *url*, *user* and
*password* are respectively the URL, the login and the password for
connecting to the wiki. *kwargs* are `xmlrpclib`/`xmlrpc.client`
**ServerProxy** parameters.
The exception `DokuWikiError` is raised if the authentification
fails but others exceptions (like ``gaierror`` for invalid domain,
``ProtocolError`` for an invalid wiki, ...) are not catched.
.. code::
try:
wiki = dokuwiki.DokuWiki('URL', 'USER', 'PASSWORD', cookieAuth=False)
except (DokuWikiError, Exception) as err:
print('unable to connect: %s' % err)
.. note::
The URL format is: `PROTO://FQDN[/PATH]` (*https://www.example.com/dokuwiki*
for example).
"""
def __init__(self, url, user, password, cookieAuth=False, **kwargs):
"""Initialize the object by connecting to the XMLRPC server."""
# Initialize XMLRPC client.
try:
params = _URL_RE.search(url).groupdict()
if cookieAuth == False:
url = '%s://%s:%s@%s%s/lib/exe/xmlrpc.php' % (
params['proto'], user, password, params['host'], params['uri'] or '')
else:
url = '%s://%s%s/lib/exe/xmlrpc.php' % (
params['proto'], params['host'], params['uri'] or '')
except AttributeError:
raise DokuWikiError("invalid url '%s'" % url)
if cookieAuth == False:
self.proxy = ServerProxy(url, **kwargs)
else:
if sys.version_info[0] == 3:
self.proxy = ServerProxy(url, CookiesTransport(), **kwargs)
else:
self.proxy = ServerProxy(url, CookiesTransport2(), **kwargs)
# Force login to check the connection.
if not self.login(user, password):
raise DokuWikiError('invalid login or password!')
# Set "namespaces" for pages and medias functions.
self.pages = _Pages(weakref.ref(self)())
self.medias = _Medias(weakref.ref(self)())
def send(self, command, *args, **kwargs):
"""Generic method for executing an XML-RPC *command*. *args* and
*kwargs* are the arguments and parameters needed by the command.
"""
args = list(args)
if kwargs:
args.append(kwargs)
method = self.proxy
for elt in command.split('.'):
method = getattr(method, elt)
try:
return method(*args)
except Fault as err:
if err.faultCode == 121:
return {}
elif err.faultCode == 321:
return []
raise DokuWikiError(err)
except ExpatError as err:
if str(err) != ERR:
raise DokuWikiError(err)
@property
def version(self):
"""Property that returns the DokuWiki version of the remote Wiki."""
return self.send('dokuwiki.getVersion')
@property
def time(self):
"""Property that returns the current time at the remote wiki server as
Unix timestamp.
"""
return self.send('dokuwiki.getTime')
@property
def xmlrpc_version(self):
"""Property that returns the XML RPC interface version of the remote
Wiki. This is DokuWiki implementation specific and independent of the
supported standard API version returned by ``wiki.getRPCVersionSupported``.
"""
return self.send('dokuwiki.getXMLRPCAPIVersion')
@property
def xmlrpc_supported_version(self):
"""Property that returns *2* with the supported RPC API version."""
return self.send('wiki.getRPCVersionSupported')
@property
def title(self):
"""Property that returns the title of the wiki."""
return self.send('dokuwiki.getTitle')
def login(self, user, password):
"""Log to the wiki using *user* and *password* credentials. It returns
a boolean that indicates if the user succesfully authenticate."""
return self.send('dokuwiki.login', user, password)
def del_acl(self, scope, user):
"""Delete any ACL matching the given *scope* and *user* (or group if
*@group* syntax is used). It returns a boolean that indicate if the rule
was correctly removed.
"""
return self.send('plugin.acl.delAcl', scope, user)
|
fmenabe/python-dokuwiki | dokuwiki.py | _Pages.info | python | def info(self, page, version=None):
return (self._dokuwiki.send('wiki.getPageInfoVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPageInfo', page)) | Returns informations of *page*. Informations of the last version
is returned if *version* is not set. | train | https://github.com/fmenabe/python-dokuwiki/blob/7b5b13b764912b36f49a03a445c88f0934260eb1/dokuwiki.py#L281-L287 | null | class _Pages(object):
"""This object regroup methods for managing pages of a DokuWiki. This object
is accessible from the ``pages`` property of an `DokuWiki` instance::
wiki = dokuwiki.DokuWiki('URL', 'User', 'Password')
wiki.pages.list()
"""
def __init__(self, dokuwiki):
self._dokuwiki = dokuwiki
def list(self, namespace='/', **options):
"""List all pages of the given *namespace*.
Valid *options* are:
* *depth*: (int) recursion level, 0 for all
* *hash*: (bool) do an md5 sum of content
* *skipacl*: (bool) list everything regardless of ACL
"""
return self._dokuwiki.send('dokuwiki.getPagelist', namespace, options)
def changes(self, timestamp):
"""Returns a list of changes since given *timestamp*.
For example, for returning all changes since *2016-01-01*::
from datetime import datetime
wiki.pages.changes(datetime(2016, 1, 1).timestamp())
"""
return self._dokuwiki.send('wiki.getRecentChanges', timestamp)
def search(self, string):
"""Performs a fulltext search on *string* and returns the first 15
results.
"""
return self._dokuwiki.send('dokuwiki.search', string)
def versions(self, page, offset=0):
"""Returns the available versions of *page*. *offset* can be used to
list earlier versions in the history.
"""
return self._dokuwiki.send('wiki.getPageVersions', page, offset)
def get(self, page, version=None):
"""Returns the content of *page*. The content of the last version is
returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPage', page))
def append(self, page, content, **options):
"""Appends *content* text to *page*.
Valid *options* are:
* *sum*: (str) change summary
* *minor*: (bool) whether this is a minor change
"""
return self._dokuwiki.send('dokuwiki.appendPage', page, content, options)
def html(self, page, version=None):
"""Returns HTML content of *page*. The HTML content of the last version
of the page is returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageHTMLVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPageHTML', page))
def set(self, page, content, **options):
"""Set/replace the *content* of *page*.
Valid *options* are:
* *sum*: (str) change summary
* *minor*: (bool) whether this is a minor change
"""
try:
return self._dokuwiki.send('wiki.putPage', page, content, options)
except ExpatError as err:
# Sometime the first line of the XML response is blank which raise
# the 'ExpatError' exception although the change has been done. This
# allow to ignore the error.
if str(err) != ERR:
raise DokuWikiError(err)
def delete(self, page):
"""Delete *page* by setting an empty content."""
return self.set(page, '')
def lock(self, page):
"""Locks *page*."""
result = self._dokuwiki.send('dokuwiki.setLocks',
lock=[page], unlock=[])
if result['lockfail']:
raise DokuWikiError('unable to lock page')
def unlock(self, page):
"""Unlocks *page*."""
result = self._dokuwiki.send('dokuwiki.setLocks',
lock=[], unlock=[page])
if result['unlockfail']:
raise DokuWikiError('unable to unlock page')
def permission(self, page):
"""Returns the permission level of *page*."""
return self._dokuwiki.send('wiki.aclCheck', page)
def links(self, page):
"""Returns a list of all links contained in *page*."""
return self._dokuwiki.send('wiki.listLinks', page)
def backlinks(self, page):
"""Returns a list of all links referencing *page*."""
return self._dokuwiki.send('wiki.getBackLinks', page)
|
fmenabe/python-dokuwiki | dokuwiki.py | _Pages.get | python | def get(self, page, version=None):
return (self._dokuwiki.send('wiki.getPageVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPage', page)) | Returns the content of *page*. The content of the last version is
returned if *version* is not set. | train | https://github.com/fmenabe/python-dokuwiki/blob/7b5b13b764912b36f49a03a445c88f0934260eb1/dokuwiki.py#L289-L295 | null | class _Pages(object):
"""This object regroup methods for managing pages of a DokuWiki. This object
is accessible from the ``pages`` property of an `DokuWiki` instance::
wiki = dokuwiki.DokuWiki('URL', 'User', 'Password')
wiki.pages.list()
"""
def __init__(self, dokuwiki):
self._dokuwiki = dokuwiki
def list(self, namespace='/', **options):
"""List all pages of the given *namespace*.
Valid *options* are:
* *depth*: (int) recursion level, 0 for all
* *hash*: (bool) do an md5 sum of content
* *skipacl*: (bool) list everything regardless of ACL
"""
return self._dokuwiki.send('dokuwiki.getPagelist', namespace, options)
def changes(self, timestamp):
"""Returns a list of changes since given *timestamp*.
For example, for returning all changes since *2016-01-01*::
from datetime import datetime
wiki.pages.changes(datetime(2016, 1, 1).timestamp())
"""
return self._dokuwiki.send('wiki.getRecentChanges', timestamp)
def search(self, string):
"""Performs a fulltext search on *string* and returns the first 15
results.
"""
return self._dokuwiki.send('dokuwiki.search', string)
def versions(self, page, offset=0):
"""Returns the available versions of *page*. *offset* can be used to
list earlier versions in the history.
"""
return self._dokuwiki.send('wiki.getPageVersions', page, offset)
def info(self, page, version=None):
"""Returns informations of *page*. Informations of the last version
is returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageInfoVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPageInfo', page))
def append(self, page, content, **options):
"""Appends *content* text to *page*.
Valid *options* are:
* *sum*: (str) change summary
* *minor*: (bool) whether this is a minor change
"""
return self._dokuwiki.send('dokuwiki.appendPage', page, content, options)
def html(self, page, version=None):
"""Returns HTML content of *page*. The HTML content of the last version
of the page is returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageHTMLVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPageHTML', page))
def set(self, page, content, **options):
"""Set/replace the *content* of *page*.
Valid *options* are:
* *sum*: (str) change summary
* *minor*: (bool) whether this is a minor change
"""
try:
return self._dokuwiki.send('wiki.putPage', page, content, options)
except ExpatError as err:
# Sometime the first line of the XML response is blank which raise
# the 'ExpatError' exception although the change has been done. This
# allow to ignore the error.
if str(err) != ERR:
raise DokuWikiError(err)
def delete(self, page):
"""Delete *page* by setting an empty content."""
return self.set(page, '')
def lock(self, page):
"""Locks *page*."""
result = self._dokuwiki.send('dokuwiki.setLocks',
lock=[page], unlock=[])
if result['lockfail']:
raise DokuWikiError('unable to lock page')
def unlock(self, page):
"""Unlocks *page*."""
result = self._dokuwiki.send('dokuwiki.setLocks',
lock=[], unlock=[page])
if result['unlockfail']:
raise DokuWikiError('unable to unlock page')
def permission(self, page):
"""Returns the permission level of *page*."""
return self._dokuwiki.send('wiki.aclCheck', page)
def links(self, page):
"""Returns a list of all links contained in *page*."""
return self._dokuwiki.send('wiki.listLinks', page)
def backlinks(self, page):
"""Returns a list of all links referencing *page*."""
return self._dokuwiki.send('wiki.getBackLinks', page)
|
fmenabe/python-dokuwiki | dokuwiki.py | _Pages.append | python | def append(self, page, content, **options):
return self._dokuwiki.send('dokuwiki.appendPage', page, content, options) | Appends *content* text to *page*.
Valid *options* are:
* *sum*: (str) change summary
* *minor*: (bool) whether this is a minor change | train | https://github.com/fmenabe/python-dokuwiki/blob/7b5b13b764912b36f49a03a445c88f0934260eb1/dokuwiki.py#L298-L306 | null | class _Pages(object):
"""This object regroup methods for managing pages of a DokuWiki. This object
is accessible from the ``pages`` property of an `DokuWiki` instance::
wiki = dokuwiki.DokuWiki('URL', 'User', 'Password')
wiki.pages.list()
"""
def __init__(self, dokuwiki):
self._dokuwiki = dokuwiki
def list(self, namespace='/', **options):
"""List all pages of the given *namespace*.
Valid *options* are:
* *depth*: (int) recursion level, 0 for all
* *hash*: (bool) do an md5 sum of content
* *skipacl*: (bool) list everything regardless of ACL
"""
return self._dokuwiki.send('dokuwiki.getPagelist', namespace, options)
def changes(self, timestamp):
"""Returns a list of changes since given *timestamp*.
For example, for returning all changes since *2016-01-01*::
from datetime import datetime
wiki.pages.changes(datetime(2016, 1, 1).timestamp())
"""
return self._dokuwiki.send('wiki.getRecentChanges', timestamp)
def search(self, string):
"""Performs a fulltext search on *string* and returns the first 15
results.
"""
return self._dokuwiki.send('dokuwiki.search', string)
def versions(self, page, offset=0):
"""Returns the available versions of *page*. *offset* can be used to
list earlier versions in the history.
"""
return self._dokuwiki.send('wiki.getPageVersions', page, offset)
def info(self, page, version=None):
"""Returns informations of *page*. Informations of the last version
is returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageInfoVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPageInfo', page))
def get(self, page, version=None):
"""Returns the content of *page*. The content of the last version is
returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPage', page))
def html(self, page, version=None):
"""Returns HTML content of *page*. The HTML content of the last version
of the page is returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageHTMLVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPageHTML', page))
def set(self, page, content, **options):
"""Set/replace the *content* of *page*.
Valid *options* are:
* *sum*: (str) change summary
* *minor*: (bool) whether this is a minor change
"""
try:
return self._dokuwiki.send('wiki.putPage', page, content, options)
except ExpatError as err:
# Sometime the first line of the XML response is blank which raise
# the 'ExpatError' exception although the change has been done. This
# allow to ignore the error.
if str(err) != ERR:
raise DokuWikiError(err)
def delete(self, page):
"""Delete *page* by setting an empty content."""
return self.set(page, '')
def lock(self, page):
"""Locks *page*."""
result = self._dokuwiki.send('dokuwiki.setLocks',
lock=[page], unlock=[])
if result['lockfail']:
raise DokuWikiError('unable to lock page')
def unlock(self, page):
"""Unlocks *page*."""
result = self._dokuwiki.send('dokuwiki.setLocks',
lock=[], unlock=[page])
if result['unlockfail']:
raise DokuWikiError('unable to unlock page')
def permission(self, page):
"""Returns the permission level of *page*."""
return self._dokuwiki.send('wiki.aclCheck', page)
def links(self, page):
"""Returns a list of all links contained in *page*."""
return self._dokuwiki.send('wiki.listLinks', page)
def backlinks(self, page):
"""Returns a list of all links referencing *page*."""
return self._dokuwiki.send('wiki.getBackLinks', page)
|
fmenabe/python-dokuwiki | dokuwiki.py | _Pages.html | python | def html(self, page, version=None):
return (self._dokuwiki.send('wiki.getPageHTMLVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPageHTML', page)) | Returns HTML content of *page*. The HTML content of the last version
of the page is returned if *version* is not set. | train | https://github.com/fmenabe/python-dokuwiki/blob/7b5b13b764912b36f49a03a445c88f0934260eb1/dokuwiki.py#L308-L314 | null | class _Pages(object):
"""This object regroup methods for managing pages of a DokuWiki. This object
is accessible from the ``pages`` property of an `DokuWiki` instance::
wiki = dokuwiki.DokuWiki('URL', 'User', 'Password')
wiki.pages.list()
"""
def __init__(self, dokuwiki):
self._dokuwiki = dokuwiki
def list(self, namespace='/', **options):
"""List all pages of the given *namespace*.
Valid *options* are:
* *depth*: (int) recursion level, 0 for all
* *hash*: (bool) do an md5 sum of content
* *skipacl*: (bool) list everything regardless of ACL
"""
return self._dokuwiki.send('dokuwiki.getPagelist', namespace, options)
def changes(self, timestamp):
"""Returns a list of changes since given *timestamp*.
For example, for returning all changes since *2016-01-01*::
from datetime import datetime
wiki.pages.changes(datetime(2016, 1, 1).timestamp())
"""
return self._dokuwiki.send('wiki.getRecentChanges', timestamp)
def search(self, string):
"""Performs a fulltext search on *string* and returns the first 15
results.
"""
return self._dokuwiki.send('dokuwiki.search', string)
def versions(self, page, offset=0):
"""Returns the available versions of *page*. *offset* can be used to
list earlier versions in the history.
"""
return self._dokuwiki.send('wiki.getPageVersions', page, offset)
def info(self, page, version=None):
"""Returns informations of *page*. Informations of the last version
is returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageInfoVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPageInfo', page))
def get(self, page, version=None):
"""Returns the content of *page*. The content of the last version is
returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPage', page))
def append(self, page, content, **options):
"""Appends *content* text to *page*.
Valid *options* are:
* *sum*: (str) change summary
* *minor*: (bool) whether this is a minor change
"""
return self._dokuwiki.send('dokuwiki.appendPage', page, content, options)
def set(self, page, content, **options):
"""Set/replace the *content* of *page*.
Valid *options* are:
* *sum*: (str) change summary
* *minor*: (bool) whether this is a minor change
"""
try:
return self._dokuwiki.send('wiki.putPage', page, content, options)
except ExpatError as err:
# Sometime the first line of the XML response is blank which raise
# the 'ExpatError' exception although the change has been done. This
# allow to ignore the error.
if str(err) != ERR:
raise DokuWikiError(err)
def delete(self, page):
"""Delete *page* by setting an empty content."""
return self.set(page, '')
def lock(self, page):
"""Locks *page*."""
result = self._dokuwiki.send('dokuwiki.setLocks',
lock=[page], unlock=[])
if result['lockfail']:
raise DokuWikiError('unable to lock page')
def unlock(self, page):
"""Unlocks *page*."""
result = self._dokuwiki.send('dokuwiki.setLocks',
lock=[], unlock=[page])
if result['unlockfail']:
raise DokuWikiError('unable to unlock page')
def permission(self, page):
"""Returns the permission level of *page*."""
return self._dokuwiki.send('wiki.aclCheck', page)
def links(self, page):
"""Returns a list of all links contained in *page*."""
return self._dokuwiki.send('wiki.listLinks', page)
def backlinks(self, page):
"""Returns a list of all links referencing *page*."""
return self._dokuwiki.send('wiki.getBackLinks', page)
|
fmenabe/python-dokuwiki | dokuwiki.py | _Pages.set | python | def set(self, page, content, **options):
try:
return self._dokuwiki.send('wiki.putPage', page, content, options)
except ExpatError as err:
# Sometime the first line of the XML response is blank which raise
# the 'ExpatError' exception although the change has been done. This
# allow to ignore the error.
if str(err) != ERR:
raise DokuWikiError(err) | Set/replace the *content* of *page*.
Valid *options* are:
* *sum*: (str) change summary
* *minor*: (bool) whether this is a minor change | train | https://github.com/fmenabe/python-dokuwiki/blob/7b5b13b764912b36f49a03a445c88f0934260eb1/dokuwiki.py#L316-L331 | null | class _Pages(object):
"""This object regroup methods for managing pages of a DokuWiki. This object
is accessible from the ``pages`` property of an `DokuWiki` instance::
wiki = dokuwiki.DokuWiki('URL', 'User', 'Password')
wiki.pages.list()
"""
def __init__(self, dokuwiki):
self._dokuwiki = dokuwiki
def list(self, namespace='/', **options):
"""List all pages of the given *namespace*.
Valid *options* are:
* *depth*: (int) recursion level, 0 for all
* *hash*: (bool) do an md5 sum of content
* *skipacl*: (bool) list everything regardless of ACL
"""
return self._dokuwiki.send('dokuwiki.getPagelist', namespace, options)
def changes(self, timestamp):
"""Returns a list of changes since given *timestamp*.
For example, for returning all changes since *2016-01-01*::
from datetime import datetime
wiki.pages.changes(datetime(2016, 1, 1).timestamp())
"""
return self._dokuwiki.send('wiki.getRecentChanges', timestamp)
def search(self, string):
"""Performs a fulltext search on *string* and returns the first 15
results.
"""
return self._dokuwiki.send('dokuwiki.search', string)
def versions(self, page, offset=0):
"""Returns the available versions of *page*. *offset* can be used to
list earlier versions in the history.
"""
return self._dokuwiki.send('wiki.getPageVersions', page, offset)
def info(self, page, version=None):
"""Returns informations of *page*. Informations of the last version
is returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageInfoVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPageInfo', page))
def get(self, page, version=None):
"""Returns the content of *page*. The content of the last version is
returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPage', page))
def append(self, page, content, **options):
"""Appends *content* text to *page*.
Valid *options* are:
* *sum*: (str) change summary
* *minor*: (bool) whether this is a minor change
"""
return self._dokuwiki.send('dokuwiki.appendPage', page, content, options)
def html(self, page, version=None):
"""Returns HTML content of *page*. The HTML content of the last version
of the page is returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageHTMLVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPageHTML', page))
def delete(self, page):
"""Delete *page* by setting an empty content."""
return self.set(page, '')
def lock(self, page):
"""Locks *page*."""
result = self._dokuwiki.send('dokuwiki.setLocks',
lock=[page], unlock=[])
if result['lockfail']:
raise DokuWikiError('unable to lock page')
def unlock(self, page):
"""Unlocks *page*."""
result = self._dokuwiki.send('dokuwiki.setLocks',
lock=[], unlock=[page])
if result['unlockfail']:
raise DokuWikiError('unable to unlock page')
def permission(self, page):
"""Returns the permission level of *page*."""
return self._dokuwiki.send('wiki.aclCheck', page)
def links(self, page):
"""Returns a list of all links contained in *page*."""
return self._dokuwiki.send('wiki.listLinks', page)
def backlinks(self, page):
"""Returns a list of all links referencing *page*."""
return self._dokuwiki.send('wiki.getBackLinks', page)
|
fmenabe/python-dokuwiki | dokuwiki.py | _Pages.lock | python | def lock(self, page):
result = self._dokuwiki.send('dokuwiki.setLocks',
lock=[page], unlock=[])
if result['lockfail']:
raise DokuWikiError('unable to lock page') | Locks *page*. | train | https://github.com/fmenabe/python-dokuwiki/blob/7b5b13b764912b36f49a03a445c88f0934260eb1/dokuwiki.py#L337-L342 | null | class _Pages(object):
"""This object regroup methods for managing pages of a DokuWiki. This object
is accessible from the ``pages`` property of an `DokuWiki` instance::
wiki = dokuwiki.DokuWiki('URL', 'User', 'Password')
wiki.pages.list()
"""
def __init__(self, dokuwiki):
self._dokuwiki = dokuwiki
def list(self, namespace='/', **options):
"""List all pages of the given *namespace*.
Valid *options* are:
* *depth*: (int) recursion level, 0 for all
* *hash*: (bool) do an md5 sum of content
* *skipacl*: (bool) list everything regardless of ACL
"""
return self._dokuwiki.send('dokuwiki.getPagelist', namespace, options)
def changes(self, timestamp):
"""Returns a list of changes since given *timestamp*.
For example, for returning all changes since *2016-01-01*::
from datetime import datetime
wiki.pages.changes(datetime(2016, 1, 1).timestamp())
"""
return self._dokuwiki.send('wiki.getRecentChanges', timestamp)
def search(self, string):
"""Performs a fulltext search on *string* and returns the first 15
results.
"""
return self._dokuwiki.send('dokuwiki.search', string)
def versions(self, page, offset=0):
"""Returns the available versions of *page*. *offset* can be used to
list earlier versions in the history.
"""
return self._dokuwiki.send('wiki.getPageVersions', page, offset)
def info(self, page, version=None):
"""Returns informations of *page*. Informations of the last version
is returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageInfoVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPageInfo', page))
def get(self, page, version=None):
"""Returns the content of *page*. The content of the last version is
returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPage', page))
def append(self, page, content, **options):
"""Appends *content* text to *page*.
Valid *options* are:
* *sum*: (str) change summary
* *minor*: (bool) whether this is a minor change
"""
return self._dokuwiki.send('dokuwiki.appendPage', page, content, options)
def html(self, page, version=None):
"""Returns HTML content of *page*. The HTML content of the last version
of the page is returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageHTMLVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPageHTML', page))
def set(self, page, content, **options):
"""Set/replace the *content* of *page*.
Valid *options* are:
* *sum*: (str) change summary
* *minor*: (bool) whether this is a minor change
"""
try:
return self._dokuwiki.send('wiki.putPage', page, content, options)
except ExpatError as err:
# Sometime the first line of the XML response is blank which raise
# the 'ExpatError' exception although the change has been done. This
# allow to ignore the error.
if str(err) != ERR:
raise DokuWikiError(err)
def delete(self, page):
"""Delete *page* by setting an empty content."""
return self.set(page, '')
def unlock(self, page):
"""Unlocks *page*."""
result = self._dokuwiki.send('dokuwiki.setLocks',
lock=[], unlock=[page])
if result['unlockfail']:
raise DokuWikiError('unable to unlock page')
def permission(self, page):
"""Returns the permission level of *page*."""
return self._dokuwiki.send('wiki.aclCheck', page)
def links(self, page):
"""Returns a list of all links contained in *page*."""
return self._dokuwiki.send('wiki.listLinks', page)
def backlinks(self, page):
"""Returns a list of all links referencing *page*."""
return self._dokuwiki.send('wiki.getBackLinks', page)
|
fmenabe/python-dokuwiki | dokuwiki.py | _Medias.get | python | def get(self, media, dirpath=None, filename=None, overwrite=False, b64decode=False):
import os
data = self._dokuwiki.send('wiki.getAttachment', media)
data = base64.b64decode(data) if b64decode else data.data
if dirpath is None:
return data
if filename is None:
filename = media.replace('/', ':').split(':')[-1]
if not os.path.exists(dirpath):
os.makedirs(dirpath)
filepath = os.path.join(dirpath, filename)
if os.path.exists(filepath) and not overwrite:
raise FileExistsError("[Errno 17] File exists: '%s'" % filepath)
with open(filepath, 'wb') as fhandler:
fhandler.write(data) | Returns the binary data of *media* or save it to a file. If *dirpath*
is not set the binary data is returned, otherwise the data is saved
to a file. By default, the filename is the name of the media but it can
be changed with *filename* parameter. *overwrite* parameter allow to
overwrite the file if it already exists locally. | train | https://github.com/fmenabe/python-dokuwiki/blob/7b5b13b764912b36f49a03a445c88f0934260eb1/dokuwiki.py#L397-L419 | null | class _Medias(object):
"""This object regroup methods for managing medias of a DokuWiki. This
object is accessible from the ``medias`` property of an `DokuWiki`
instance::
wiki = dokuwiki.DokuWiki('URL', 'User', 'Password')
wiki.medias.list()
"""
def __init__(self, dokuwiki):
self._dokuwiki = dokuwiki
def list(self, namespace='/', **options):
"""Returns all medias of the given *namespace*.
Valid *options* are:
* *depth*: (int) recursion level, 0 for all
* *skipacl*: (bool) skip acl checking
* *pattern*: (str) check given pattern
* *hash*: (bool) add hashes to result list
"""
return self._dokuwiki.send('wiki.getAttachments', namespace, options)
def changes(self, timestamp):
"""Returns the list of medias changed since given *timestamp*.
For example, for returning all changes since *2016-01-01*::
from datetime import datetime
wiki.medias.changes(datetime(2016, 1, 1).timestamp())
"""
return self._dokuwiki.send('wiki.getRecentMediaChanges', timestamp)
def info(self, media):
"""Returns informations of *media*."""
return self._dokuwiki.send('wiki.getAttachmentInfo', media)
def add(self, media, filepath, overwrite=True):
"""Set *media* from local file *filepath*. *overwrite* parameter specify
if the media must be overwrite if it exists remotely.
"""
with open(filepath, 'rb') as fhandler:
self._dokuwiki.send('wiki.putAttachment', media,
Binary(fhandler.read()), ow=overwrite)
def set(self, media, _bytes, overwrite=True, b64encode=False):
"""Set *media* from *_bytes*. *overwrite* parameter specify if the media
must be overwrite if it exists remotely.
"""
data = base64.b64encode(_bytes) if b64encode else Binary(_bytes)
self._dokuwiki.send('wiki.putAttachment', media, data, ow=overwrite)
def delete(self, media):
"""Delete *media*."""
return self._dokuwiki.send('wiki.deleteAttachment', media)
|
fmenabe/python-dokuwiki | dokuwiki.py | _Medias.add | python | def add(self, media, filepath, overwrite=True):
with open(filepath, 'rb') as fhandler:
self._dokuwiki.send('wiki.putAttachment', media,
Binary(fhandler.read()), ow=overwrite) | Set *media* from local file *filepath*. *overwrite* parameter specify
if the media must be overwrite if it exists remotely. | train | https://github.com/fmenabe/python-dokuwiki/blob/7b5b13b764912b36f49a03a445c88f0934260eb1/dokuwiki.py#L425-L431 | null | class _Medias(object):
"""This object regroup methods for managing medias of a DokuWiki. This
object is accessible from the ``medias`` property of an `DokuWiki`
instance::
wiki = dokuwiki.DokuWiki('URL', 'User', 'Password')
wiki.medias.list()
"""
def __init__(self, dokuwiki):
self._dokuwiki = dokuwiki
def list(self, namespace='/', **options):
"""Returns all medias of the given *namespace*.
Valid *options* are:
* *depth*: (int) recursion level, 0 for all
* *skipacl*: (bool) skip acl checking
* *pattern*: (str) check given pattern
* *hash*: (bool) add hashes to result list
"""
return self._dokuwiki.send('wiki.getAttachments', namespace, options)
def changes(self, timestamp):
"""Returns the list of medias changed since given *timestamp*.
For example, for returning all changes since *2016-01-01*::
from datetime import datetime
wiki.medias.changes(datetime(2016, 1, 1).timestamp())
"""
return self._dokuwiki.send('wiki.getRecentMediaChanges', timestamp)
def get(self, media, dirpath=None, filename=None, overwrite=False, b64decode=False):
"""Returns the binary data of *media* or save it to a file. If *dirpath*
is not set the binary data is returned, otherwise the data is saved
to a file. By default, the filename is the name of the media but it can
be changed with *filename* parameter. *overwrite* parameter allow to
overwrite the file if it already exists locally.
"""
import os
data = self._dokuwiki.send('wiki.getAttachment', media)
data = base64.b64decode(data) if b64decode else data.data
if dirpath is None:
return data
if filename is None:
filename = media.replace('/', ':').split(':')[-1]
if not os.path.exists(dirpath):
os.makedirs(dirpath)
filepath = os.path.join(dirpath, filename)
if os.path.exists(filepath) and not overwrite:
raise FileExistsError("[Errno 17] File exists: '%s'" % filepath)
with open(filepath, 'wb') as fhandler:
fhandler.write(data)
def info(self, media):
"""Returns informations of *media*."""
return self._dokuwiki.send('wiki.getAttachmentInfo', media)
def set(self, media, _bytes, overwrite=True, b64encode=False):
"""Set *media* from *_bytes*. *overwrite* parameter specify if the media
must be overwrite if it exists remotely.
"""
data = base64.b64encode(_bytes) if b64encode else Binary(_bytes)
self._dokuwiki.send('wiki.putAttachment', media, data, ow=overwrite)
def delete(self, media):
"""Delete *media*."""
return self._dokuwiki.send('wiki.deleteAttachment', media)
|
fmenabe/python-dokuwiki | dokuwiki.py | _Medias.set | python | def set(self, media, _bytes, overwrite=True, b64encode=False):
data = base64.b64encode(_bytes) if b64encode else Binary(_bytes)
self._dokuwiki.send('wiki.putAttachment', media, data, ow=overwrite) | Set *media* from *_bytes*. *overwrite* parameter specify if the media
must be overwrite if it exists remotely. | train | https://github.com/fmenabe/python-dokuwiki/blob/7b5b13b764912b36f49a03a445c88f0934260eb1/dokuwiki.py#L433-L438 | null | class _Medias(object):
"""This object regroup methods for managing medias of a DokuWiki. This
object is accessible from the ``medias`` property of an `DokuWiki`
instance::
wiki = dokuwiki.DokuWiki('URL', 'User', 'Password')
wiki.medias.list()
"""
def __init__(self, dokuwiki):
self._dokuwiki = dokuwiki
def list(self, namespace='/', **options):
"""Returns all medias of the given *namespace*.
Valid *options* are:
* *depth*: (int) recursion level, 0 for all
* *skipacl*: (bool) skip acl checking
* *pattern*: (str) check given pattern
* *hash*: (bool) add hashes to result list
"""
return self._dokuwiki.send('wiki.getAttachments', namespace, options)
def changes(self, timestamp):
"""Returns the list of medias changed since given *timestamp*.
For example, for returning all changes since *2016-01-01*::
from datetime import datetime
wiki.medias.changes(datetime(2016, 1, 1).timestamp())
"""
return self._dokuwiki.send('wiki.getRecentMediaChanges', timestamp)
def get(self, media, dirpath=None, filename=None, overwrite=False, b64decode=False):
"""Returns the binary data of *media* or save it to a file. If *dirpath*
is not set the binary data is returned, otherwise the data is saved
to a file. By default, the filename is the name of the media but it can
be changed with *filename* parameter. *overwrite* parameter allow to
overwrite the file if it already exists locally.
"""
import os
data = self._dokuwiki.send('wiki.getAttachment', media)
data = base64.b64decode(data) if b64decode else data.data
if dirpath is None:
return data
if filename is None:
filename = media.replace('/', ':').split(':')[-1]
if not os.path.exists(dirpath):
os.makedirs(dirpath)
filepath = os.path.join(dirpath, filename)
if os.path.exists(filepath) and not overwrite:
raise FileExistsError("[Errno 17] File exists: '%s'" % filepath)
with open(filepath, 'wb') as fhandler:
fhandler.write(data)
def info(self, media):
"""Returns informations of *media*."""
return self._dokuwiki.send('wiki.getAttachmentInfo', media)
def add(self, media, filepath, overwrite=True):
"""Set *media* from local file *filepath*. *overwrite* parameter specify
if the media must be overwrite if it exists remotely.
"""
with open(filepath, 'rb') as fhandler:
self._dokuwiki.send('wiki.putAttachment', media,
Binary(fhandler.read()), ow=overwrite)
def delete(self, media):
"""Delete *media*."""
return self._dokuwiki.send('wiki.deleteAttachment', media)
|
fmenabe/python-dokuwiki | dokuwiki.py | Dataentry.get | python | def get(content, keep_order=False):
if keep_order:
from collections import OrderedDict
dataentry = OrderedDict()
else:
dataentry = {}
found = False
for line in content.split('\n'):
if line.strip().startswith('---- dataentry'):
found = True
continue
elif line == '----':
break
elif not found:
continue
line_split = line.split(':')
key = line_split[0].strip()
value = re.sub('#.*$', '', ':'.join(line_split[1:])).strip()
dataentry.setdefault(key, value)
if not found:
raise DokuWikiError('no dataentry found')
return dataentry | Get dataentry from *content*. *keep_order* indicates whether to
return an ordered dictionnay. | train | https://github.com/fmenabe/python-dokuwiki/blob/7b5b13b764912b36f49a03a445c88f0934260eb1/dokuwiki.py#L449-L475 | null | class Dataentry(object):
"""Object that manage `data entries <https://www.dokuwiki.org/plugin:data>`_."""
@staticmethod
@staticmethod
def gen(name, data):
"""Generate dataentry *name* from *data*."""
return '---- dataentry %s ----\n%s\n----' % (name, '\n'.join(
'%s:%s' % (attr, value) for attr, value in data.items()))
@staticmethod
def ignore(content):
"""Remove dataentry from *content*."""
page_content = []
start = False
for line in content.split('\n'):
if line == '----' and not start:
start = True
continue
if start:
page_content.append(line)
return '\n'.join(page_content) if page_content else content
|
fmenabe/python-dokuwiki | dokuwiki.py | Dataentry.gen | python | def gen(name, data):
return '---- dataentry %s ----\n%s\n----' % (name, '\n'.join(
'%s:%s' % (attr, value) for attr, value in data.items())) | Generate dataentry *name* from *data*. | train | https://github.com/fmenabe/python-dokuwiki/blob/7b5b13b764912b36f49a03a445c88f0934260eb1/dokuwiki.py#L478-L481 | null | class Dataentry(object):
"""Object that manage `data entries <https://www.dokuwiki.org/plugin:data>`_."""
@staticmethod
def get(content, keep_order=False):
"""Get dataentry from *content*. *keep_order* indicates whether to
return an ordered dictionnay."""
if keep_order:
from collections import OrderedDict
dataentry = OrderedDict()
else:
dataentry = {}
found = False
for line in content.split('\n'):
if line.strip().startswith('---- dataentry'):
found = True
continue
elif line == '----':
break
elif not found:
continue
line_split = line.split(':')
key = line_split[0].strip()
value = re.sub('#.*$', '', ':'.join(line_split[1:])).strip()
dataentry.setdefault(key, value)
if not found:
raise DokuWikiError('no dataentry found')
return dataentry
@staticmethod
@staticmethod
def ignore(content):
"""Remove dataentry from *content*."""
page_content = []
start = False
for line in content.split('\n'):
if line == '----' and not start:
start = True
continue
if start:
page_content.append(line)
return '\n'.join(page_content) if page_content else content
|
fmenabe/python-dokuwiki | dokuwiki.py | Dataentry.ignore | python | def ignore(content):
page_content = []
start = False
for line in content.split('\n'):
if line == '----' and not start:
start = True
continue
if start:
page_content.append(line)
return '\n'.join(page_content) if page_content else content | Remove dataentry from *content*. | train | https://github.com/fmenabe/python-dokuwiki/blob/7b5b13b764912b36f49a03a445c88f0934260eb1/dokuwiki.py#L484-L494 | null | class Dataentry(object):
"""Object that manage `data entries <https://www.dokuwiki.org/plugin:data>`_."""
@staticmethod
def get(content, keep_order=False):
"""Get dataentry from *content*. *keep_order* indicates whether to
return an ordered dictionnay."""
if keep_order:
from collections import OrderedDict
dataentry = OrderedDict()
else:
dataentry = {}
found = False
for line in content.split('\n'):
if line.strip().startswith('---- dataentry'):
found = True
continue
elif line == '----':
break
elif not found:
continue
line_split = line.split(':')
key = line_split[0].strip()
value = re.sub('#.*$', '', ':'.join(line_split[1:])).strip()
dataentry.setdefault(key, value)
if not found:
raise DokuWikiError('no dataentry found')
return dataentry
@staticmethod
def gen(name, data):
"""Generate dataentry *name* from *data*."""
return '---- dataentry %s ----\n%s\n----' % (name, '\n'.join(
'%s:%s' % (attr, value) for attr, value in data.items()))
@staticmethod
|
fmenabe/python-dokuwiki | doc/source/conf.py | linkcode_resolve | python | def linkcode_resolve(domain, info):
module_name = info['module']
fullname = info['fullname']
attribute_name = fullname.split('.')[-1]
base_url = 'https://github.com/fmenabe/python-dokuwiki/blob/'
if release.endswith('-dev'):
base_url += 'master/'
else:
base_url += version + '/'
filename = module_name.replace('.', '/') + '.py'
module = sys.modules.get(module_name)
# Get the actual object
try:
actual_object = module
for obj in fullname.split('.'):
parent = actual_object
actual_object = getattr(actual_object, obj)
except AttributeError:
return None
# Fix property methods by using their getter method
if isinstance(actual_object, property):
actual_object = actual_object.fget
# Try to get the linenumber of the object
try:
source, start_line = inspect.getsourcelines(actual_object)
except TypeError:
# If it can not be found, try to find it anyway in the parents its
# source code
parent_source, parent_start_line = inspect.getsourcelines(parent)
for i, line in enumerate(parent_source):
if line.strip().startswith(attribute_name):
start_line = parent_start_line + i
end_line = start_line
break
else:
return None
else:
end_line = start_line + len(source) - 1
line_anchor = '#L%d-L%d' % (start_line, end_line)
return base_url + filename + line_anchor | A simple function to find matching source code. | train | https://github.com/fmenabe/python-dokuwiki/blob/7b5b13b764912b36f49a03a445c88f0934260eb1/doc/source/conf.py#L48-L96 | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# dokuwiki documentation build configuration file, created by
# sphinx-quickstart on Sat Feb 6 14:16:16 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import inspect
from collections import OrderedDict
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
]
intersphinx_mapping = OrderedDict((
('python3', ('https://docs.python.org/3', None)),
('python2', ('https://docs.python.org/2', None)),
))
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'dokuwiki'
copyright = u'2016, François Ménabé'
author = u'François Ménabé'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.1.0'
# The full version, including alpha/beta/rc tags.
release = '1.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'py:obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'dokuwikidoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'dokuwiki.tex', 'dokuwiki Documentation',
'François Ménabé', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'dokuwiki', 'dokuwiki Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'dokuwiki', 'dokuwiki Documentation',
author, 'dokuwiki', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
deep-compute/deeputil | deeputil/timer.py | FunctionTimer | python | def FunctionTimer(on_done=None):
'''
To check execution time of a function
borrowed from https://medium.com/pythonhive/python-decorator-to-measure-the-execution-time-of-methods-fa04cb6bb36d
>>> def logger(details, args, kwargs): #some function that uses the time output
... print(details)
...
>>> @FunctionTimer(on_done= logger)
... def foo(t=10):
... print('foo executing...')
... time.sleep(t)
...
>>> @FunctionTimer(on_done= logger)
... def bar(t, n):
... for i in range(n):
... print('bar executing...')
... time.sleep(1)
... foo(t)
...
>>> bar(3,2)
bar executing...
bar executing...
foo executing...
('foo', 3)
('bar', 5)
'''
def decfn(fn):
def timed(*args, **kwargs):
ts = time.time()
result = fn(*args, **kwargs)
te = time.time()
if on_done:
on_done((fn.__name__,int(te - ts)), args, kwargs)
else:
print(('%r %d sec(s)' % (fn.__name__, (te - ts))))
return result
return timed
return decfn | To check execution time of a function
borrowed from https://medium.com/pythonhive/python-decorator-to-measure-the-execution-time-of-methods-fa04cb6bb36d
>>> def logger(details, args, kwargs): #some function that uses the time output
... print(details)
...
>>> @FunctionTimer(on_done= logger)
... def foo(t=10):
... print('foo executing...')
... time.sleep(t)
...
>>> @FunctionTimer(on_done= logger)
... def bar(t, n):
... for i in range(n):
... print('bar executing...')
... time.sleep(1)
... foo(t)
...
>>> bar(3,2)
bar executing...
bar executing...
foo executing...
('foo', 3)
('bar', 5) | train | https://github.com/deep-compute/deeputil/blob/9af5702bc3fd990688bf2aed16c20fa104be66df/deeputil/timer.py#L5-L47 | null | import time
class FunctionTimerTerminate(Exception): pass
FunctionTimer.terminate = FunctionTimerTerminate
class BlockTimer:
'''
To check execution time of a code.
borrowed from:
http://preshing.com/20110924/timing-your-code-using-pythons-with-statement/
>>> with Timer.block() as t:
... time.sleep(1)
...
>>> int(t.interval)
1
'''
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.interval = self.end - self.start
class Timer(object):
decorator = staticmethod(FunctionTimer)
block = BlockTimer
|
deep-compute/deeputil | deeputil/priority_dict.py | PriorityDict.smallest | python | def smallest(self):
heap = self._heap
v, k = heap[0]
while k not in self or self[k] != v:
heappop(heap)
v, k = heap[0]
return k | Return the item with the lowest priority.
Raises IndexError if the object is empty. | train | https://github.com/deep-compute/deeputil/blob/9af5702bc3fd990688bf2aed16c20fa104be66df/deeputil/priority_dict.py#L76-L88 | null | class PriorityDict(dict):
"""Dictionary that can be used as a priority queue.
Keys of the dictionary are items to be put into the queue, and values
are their respective priorities. All dictionary methods work as expected.
The advantage over a standard heapq-based priority queue is
that priorities of items can be efficiently updated (amortized O(1))
using code as 'thedict[item] = new_priority.'
The 'smallest' method can be used to return the object with lowest
priority, and 'pop_smallest' also removes it.
The 'sorted_iter' method provides a destructive sorted iterator.
>>> x = PriorityDict({'id1': 22, 'id2': 13, 'id3': 29, 'id4': 25, 'id5': 19})
>>> x.smallest()
'id2'
>>> x.pop_smallest()
'id2'
>>> from pprint import pprint
>>> pprint(x)
{'id1': 22, 'id3': 29, 'id4': 25, 'id5': 19}
>>> x = PriorityDict({})
>>> x.smallest()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "build/bdist.linux-x86_64/egg/rsslurp/priority_dict.py", line 83, in smallest
v, k = heap[0]
IndexError: list index out of range
>>> x.pop_smallest()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "build/bdist.linux-x86_64/egg/rsslurp/priority_dict.py", line 96, in pop_smallest
v, k = heappop(heap)
IndexError: index out of range
"""
def __init__(self, *args, **kwargs):
super(PriorityDict, self).__init__(*args, **kwargs)
self._rebuild_heap()
def _rebuild_heap(self):
self._heap = [(v, k) for k, v in list(self.items())]
heapify(self._heap)
def pop_smallest(self):
"""Return the item with the lowest priority and remove it.
Raises IndexError if the object is empty.
"""
heap = self._heap
v, k = heappop(heap)
while k not in self or self[k] != v:
v, k = heappop(heap)
del self[k]
return k
def __setitem__(self, key, val):
# We are not going to remove the previous value from the heap,
# since this would have a cost O(n).
super(PriorityDict, self).__setitem__(key, val)
if len(self._heap) < 2 * len(self):
heappush(self._heap, (val, key))
else:
# When the heap grows larger than 2 * len(self), we rebuild it
# from scratch to avoid wasting too much memory.
self._rebuild_heap()
def setdefault(self, key, val):
if key not in self:
self[key] = val
return val
return self[key]
def update(self, *args, **kwargs):
# Reimplementing dict.update is tricky -- see e.g.
# http://mail.python.org/pipermail/python-ideas/2007-May/000744.html
# We just rebuild the heap from scratch after passing to super.
super(PriorityDict, self).update(*args, **kwargs)
self._rebuild_heap()
def sorted_iter(self):
"""Sorted iterator of the priority dictionary items.
Beware: this will destroy elements as they are returned.
"""
while self:
yield self.pop_smallest()
|
deep-compute/deeputil | deeputil/priority_dict.py | PriorityDict.pop_smallest | python | def pop_smallest(self):
heap = self._heap
v, k = heappop(heap)
while k not in self or self[k] != v:
v, k = heappop(heap)
del self[k]
return k | Return the item with the lowest priority and remove it.
Raises IndexError if the object is empty. | train | https://github.com/deep-compute/deeputil/blob/9af5702bc3fd990688bf2aed16c20fa104be66df/deeputil/priority_dict.py#L90-L101 | null | class PriorityDict(dict):
"""Dictionary that can be used as a priority queue.
Keys of the dictionary are items to be put into the queue, and values
are their respective priorities. All dictionary methods work as expected.
The advantage over a standard heapq-based priority queue is
that priorities of items can be efficiently updated (amortized O(1))
using code as 'thedict[item] = new_priority.'
The 'smallest' method can be used to return the object with lowest
priority, and 'pop_smallest' also removes it.
The 'sorted_iter' method provides a destructive sorted iterator.
>>> x = PriorityDict({'id1': 22, 'id2': 13, 'id3': 29, 'id4': 25, 'id5': 19})
>>> x.smallest()
'id2'
>>> x.pop_smallest()
'id2'
>>> from pprint import pprint
>>> pprint(x)
{'id1': 22, 'id3': 29, 'id4': 25, 'id5': 19}
>>> x = PriorityDict({})
>>> x.smallest()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "build/bdist.linux-x86_64/egg/rsslurp/priority_dict.py", line 83, in smallest
v, k = heap[0]
IndexError: list index out of range
>>> x.pop_smallest()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "build/bdist.linux-x86_64/egg/rsslurp/priority_dict.py", line 96, in pop_smallest
v, k = heappop(heap)
IndexError: index out of range
"""
def __init__(self, *args, **kwargs):
super(PriorityDict, self).__init__(*args, **kwargs)
self._rebuild_heap()
def _rebuild_heap(self):
self._heap = [(v, k) for k, v in list(self.items())]
heapify(self._heap)
def smallest(self):
"""Return the item with the lowest priority.
Raises IndexError if the object is empty.
"""
heap = self._heap
v, k = heap[0]
while k not in self or self[k] != v:
heappop(heap)
v, k = heap[0]
return k
def __setitem__(self, key, val):
# We are not going to remove the previous value from the heap,
# since this would have a cost O(n).
super(PriorityDict, self).__setitem__(key, val)
if len(self._heap) < 2 * len(self):
heappush(self._heap, (val, key))
else:
# When the heap grows larger than 2 * len(self), we rebuild it
# from scratch to avoid wasting too much memory.
self._rebuild_heap()
def setdefault(self, key, val):
if key not in self:
self[key] = val
return val
return self[key]
def update(self, *args, **kwargs):
# Reimplementing dict.update is tricky -- see e.g.
# http://mail.python.org/pipermail/python-ideas/2007-May/000744.html
# We just rebuild the heap from scratch after passing to super.
super(PriorityDict, self).update(*args, **kwargs)
self._rebuild_heap()
def sorted_iter(self):
"""Sorted iterator of the priority dictionary items.
Beware: this will destroy elements as they are returned.
"""
while self:
yield self.pop_smallest()
|
deep-compute/deeputil | deeputil/streamcounter.py | StreamCounter.add | python | def add(self, item, count=1):
'''
When we receive stream of data, we add them in the chunk
which has limit on the no. of items that it will store.
>>> s = StreamCounter(5,5)
>>> data_stream = ['a','b','c','d']
>>> for item in data_stream:
... s.add(item)
>>> s.chunk_size
5
>>> s.n_items_seen
4
>>> s.n_chunk_items_seen
4
>>> s.n_chunks
0
>>> from pprint import pprint
>>> pprint(s.chunked_counts.get(s.n_chunks, {}))
{'a': 1, 'b': 1, 'c': 1, 'd': 1}
>>> s.counts_total
4
>>> data_stream = ['a','b','c','d','e','f','g','e']
>>> for item in data_stream:
... s.add(item)
>>> s.chunk_size
5
>>> s.n_items_seen
12
>>> s.n_chunk_items_seen
2
>>> s.n_chunks
2
>>> s.chunked_counts.get(s.n_chunks, {})
{'g': 1, 'e': 1}
'''
self.n_items_seen += count
self.n_chunk_items_seen += count
# get current chunk
chunk_id = self.n_chunks
chunk = self.chunked_counts.get(chunk_id, {})
self.chunked_counts[chunk_id] = chunk
# update count in the current chunk counter dict
if item in chunk:
chunk[item] += count
else:
self.n_counts += 1
chunk[item] = count
# is the current chunk done?
if self.n_chunk_items_seen >= self.chunk_size:
self.n_chunks += 1
self.n_chunk_items_seen = 0
# In case we reached max capacity in count entries,
# drop oldest chunks until we come back within limit
while self.n_counts >= self.max_counts:
self._drop_oldest_chunk() | When we receive stream of data, we add them in the chunk
which has limit on the no. of items that it will store.
>>> s = StreamCounter(5,5)
>>> data_stream = ['a','b','c','d']
>>> for item in data_stream:
... s.add(item)
>>> s.chunk_size
5
>>> s.n_items_seen
4
>>> s.n_chunk_items_seen
4
>>> s.n_chunks
0
>>> from pprint import pprint
>>> pprint(s.chunked_counts.get(s.n_chunks, {}))
{'a': 1, 'b': 1, 'c': 1, 'd': 1}
>>> s.counts_total
4
>>> data_stream = ['a','b','c','d','e','f','g','e']
>>> for item in data_stream:
... s.add(item)
>>> s.chunk_size
5
>>> s.n_items_seen
12
>>> s.n_chunk_items_seen
2
>>> s.n_chunks
2
>>> s.chunked_counts.get(s.n_chunks, {})
{'g': 1, 'e': 1} | train | https://github.com/deep-compute/deeputil/blob/9af5702bc3fd990688bf2aed16c20fa104be66df/deeputil/streamcounter.py#L47-L105 | [
"def _drop_oldest_chunk(self):\n '''\n To handle the case when the items comming in the chunk\n is more than the maximum capacity of the chunk. Our intent\n behind is to remove the oldest chunk. So that the items come\n flowing in.\n >>> s = StreamCounter(5,5)\n >>> data_stream = ['a','b','c','d']\n >>> for item in data_stream:\n ... s.add(item)\n >>> min(s.chunked_counts.keys())\n 0\n >>> s.chunked_counts\n {0: {'a': 1, 'b': 1, 'c': 1, 'd': 1}}\n >>> data_stream = ['a','b','c','d','a','e','f']\n >>> for item in data_stream:\n ... s.add(item)\n >>> min(s.chunked_counts.keys())\n 2\n >>> s.chunked_counts\n {2: {'f': 1}}\n '''\n chunk_id = min(self.chunked_counts.keys())\n chunk = self.chunked_counts.pop(chunk_id)\n\n self.n_counts -= len(chunk)\n for k, v in list(chunk.items()):\n self.counts[k] -= v\n self.counts_total -= v\n"
] | class StreamCounter(object):
'''
A class whose responsibility is to get the count of items
in data comming as a stream.
'''
#TODO Doctests and examples
# When we receive a stream of data, we fix the max size of chunk
# Think of chunk as a container, which can only fit a fixed no. of items
# This will help us to keep control over RAM usage
DEFAULT_CHUNK_SIZE = 1000000
# When we have a container, we also want to count the occurences of items
# Max count will be maximum occurence of an item
DEFAULT_MAX_COUNTS = 1000000
def __init__(self, chunk_size=DEFAULT_CHUNK_SIZE,
max_counts=DEFAULT_MAX_COUNTS):
self.chunk_size = chunk_size
self.max_counts = max_counts
# Counts of items stored on a per chunk basis
# Dict of dictionaries. Outer dict has chunk id as key
# Inner dict has items as keys and values are counts within
# that chunk
self.chunked_counts = {}
# Overall counts (keys are items and values are counts)
self.counts = Counter()
# Total chunks seen so far
self.n_chunks = 0
# Total items seen so far
self.n_items_seen = 0
# Total items seen in current chunk
self.n_chunk_items_seen = 0
# Total count entries
self.n_counts = 0
# Counts total
self.counts_total = 0
def _drop_oldest_chunk(self):
'''
To handle the case when the items comming in the chunk
is more than the maximum capacity of the chunk. Our intent
behind is to remove the oldest chunk. So that the items come
flowing in.
>>> s = StreamCounter(5,5)
>>> data_stream = ['a','b','c','d']
>>> for item in data_stream:
... s.add(item)
>>> min(s.chunked_counts.keys())
0
>>> s.chunked_counts
{0: {'a': 1, 'b': 1, 'c': 1, 'd': 1}}
>>> data_stream = ['a','b','c','d','a','e','f']
>>> for item in data_stream:
... s.add(item)
>>> min(s.chunked_counts.keys())
2
>>> s.chunked_counts
{2: {'f': 1}}
'''
chunk_id = min(self.chunked_counts.keys())
chunk = self.chunked_counts.pop(chunk_id)
self.n_counts -= len(chunk)
for k, v in list(chunk.items()):
self.counts[k] -= v
self.counts_total -= v
def get(self, item, default=0, normalized=False):
'''
When we have the stream of data pushed in the chunk
we can retrive count of an item using this method.
>>> stream_counter_obj = StreamCounter(5,5)
>>> data_stream = ['a','b','c']
>>> for item in data_stream:
... stream_counter_obj.add(item)
>>> stream_counter_obj.get('a')
1
>>> stream_counter_obj.get('b')
1
>>> stream_counter_obj.get('c')
1
>>> stream_counter_obj.get('d')
0
>>> data_stream.extend(['d','e','f'])
>>> for item in data_stream:
... stream_counter_obj.add(item)
>>> stream_counter_obj.get('a')
0
>>> stream_counter_obj.get('b')
0
>>> stream_counter_obj.get('c')
1
>>> stream_counter_obj.get('d')
1
>>> stream_counter_obj.get('e')
1
>>> stream_counter_obj.get('f')
1
'''
c = self.counts.get(item, default)
if not normalized:
return c
return c / float(self.counts_total)
def __getitem__(self, k):
return self.get(k)
|
deep-compute/deeputil | deeputil/streamcounter.py | StreamCounter._drop_oldest_chunk | python | def _drop_oldest_chunk(self):
'''
To handle the case when the items comming in the chunk
is more than the maximum capacity of the chunk. Our intent
behind is to remove the oldest chunk. So that the items come
flowing in.
>>> s = StreamCounter(5,5)
>>> data_stream = ['a','b','c','d']
>>> for item in data_stream:
... s.add(item)
>>> min(s.chunked_counts.keys())
0
>>> s.chunked_counts
{0: {'a': 1, 'b': 1, 'c': 1, 'd': 1}}
>>> data_stream = ['a','b','c','d','a','e','f']
>>> for item in data_stream:
... s.add(item)
>>> min(s.chunked_counts.keys())
2
>>> s.chunked_counts
{2: {'f': 1}}
'''
chunk_id = min(self.chunked_counts.keys())
chunk = self.chunked_counts.pop(chunk_id)
self.n_counts -= len(chunk)
for k, v in list(chunk.items()):
self.counts[k] -= v
self.counts_total -= v | To handle the case when the items comming in the chunk
is more than the maximum capacity of the chunk. Our intent
behind is to remove the oldest chunk. So that the items come
flowing in.
>>> s = StreamCounter(5,5)
>>> data_stream = ['a','b','c','d']
>>> for item in data_stream:
... s.add(item)
>>> min(s.chunked_counts.keys())
0
>>> s.chunked_counts
{0: {'a': 1, 'b': 1, 'c': 1, 'd': 1}}
>>> data_stream = ['a','b','c','d','a','e','f']
>>> for item in data_stream:
... s.add(item)
>>> min(s.chunked_counts.keys())
2
>>> s.chunked_counts
{2: {'f': 1}} | train | https://github.com/deep-compute/deeputil/blob/9af5702bc3fd990688bf2aed16c20fa104be66df/deeputil/streamcounter.py#L107-L135 | null | class StreamCounter(object):
'''
A class whose responsibility is to get the count of items
in data comming as a stream.
'''
#TODO Doctests and examples
# When we receive a stream of data, we fix the max size of chunk
# Think of chunk as a container, which can only fit a fixed no. of items
# This will help us to keep control over RAM usage
DEFAULT_CHUNK_SIZE = 1000000
# When we have a container, we also want to count the occurences of items
# Max count will be maximum occurence of an item
DEFAULT_MAX_COUNTS = 1000000
def __init__(self, chunk_size=DEFAULT_CHUNK_SIZE,
max_counts=DEFAULT_MAX_COUNTS):
self.chunk_size = chunk_size
self.max_counts = max_counts
# Counts of items stored on a per chunk basis
# Dict of dictionaries. Outer dict has chunk id as key
# Inner dict has items as keys and values are counts within
# that chunk
self.chunked_counts = {}
# Overall counts (keys are items and values are counts)
self.counts = Counter()
# Total chunks seen so far
self.n_chunks = 0
# Total items seen so far
self.n_items_seen = 0
# Total items seen in current chunk
self.n_chunk_items_seen = 0
# Total count entries
self.n_counts = 0
# Counts total
self.counts_total = 0
def add(self, item, count=1):
'''
When we receive stream of data, we add them in the chunk
which has limit on the no. of items that it will store.
>>> s = StreamCounter(5,5)
>>> data_stream = ['a','b','c','d']
>>> for item in data_stream:
... s.add(item)
>>> s.chunk_size
5
>>> s.n_items_seen
4
>>> s.n_chunk_items_seen
4
>>> s.n_chunks
0
>>> from pprint import pprint
>>> pprint(s.chunked_counts.get(s.n_chunks, {}))
{'a': 1, 'b': 1, 'c': 1, 'd': 1}
>>> s.counts_total
4
>>> data_stream = ['a','b','c','d','e','f','g','e']
>>> for item in data_stream:
... s.add(item)
>>> s.chunk_size
5
>>> s.n_items_seen
12
>>> s.n_chunk_items_seen
2
>>> s.n_chunks
2
>>> s.chunked_counts.get(s.n_chunks, {})
{'g': 1, 'e': 1}
'''
self.n_items_seen += count
self.n_chunk_items_seen += count
# get current chunk
chunk_id = self.n_chunks
chunk = self.chunked_counts.get(chunk_id, {})
self.chunked_counts[chunk_id] = chunk
# update count in the current chunk counter dict
if item in chunk:
chunk[item] += count
else:
self.n_counts += 1
chunk[item] = count
# is the current chunk done?
if self.n_chunk_items_seen >= self.chunk_size:
self.n_chunks += 1
self.n_chunk_items_seen = 0
# In case we reached max capacity in count entries,
# drop oldest chunks until we come back within limit
while self.n_counts >= self.max_counts:
self._drop_oldest_chunk()
def get(self, item, default=0, normalized=False):
'''
When we have the stream of data pushed in the chunk
we can retrive count of an item using this method.
>>> stream_counter_obj = StreamCounter(5,5)
>>> data_stream = ['a','b','c']
>>> for item in data_stream:
... stream_counter_obj.add(item)
>>> stream_counter_obj.get('a')
1
>>> stream_counter_obj.get('b')
1
>>> stream_counter_obj.get('c')
1
>>> stream_counter_obj.get('d')
0
>>> data_stream.extend(['d','e','f'])
>>> for item in data_stream:
... stream_counter_obj.add(item)
>>> stream_counter_obj.get('a')
0
>>> stream_counter_obj.get('b')
0
>>> stream_counter_obj.get('c')
1
>>> stream_counter_obj.get('d')
1
>>> stream_counter_obj.get('e')
1
>>> stream_counter_obj.get('f')
1
'''
c = self.counts.get(item, default)
if not normalized:
return c
return c / float(self.counts_total)
def __getitem__(self, k):
return self.get(k)
|
deep-compute/deeputil | deeputil/streamcounter.py | StreamCounter.get | python | def get(self, item, default=0, normalized=False):
'''
When we have the stream of data pushed in the chunk
we can retrive count of an item using this method.
>>> stream_counter_obj = StreamCounter(5,5)
>>> data_stream = ['a','b','c']
>>> for item in data_stream:
... stream_counter_obj.add(item)
>>> stream_counter_obj.get('a')
1
>>> stream_counter_obj.get('b')
1
>>> stream_counter_obj.get('c')
1
>>> stream_counter_obj.get('d')
0
>>> data_stream.extend(['d','e','f'])
>>> for item in data_stream:
... stream_counter_obj.add(item)
>>> stream_counter_obj.get('a')
0
>>> stream_counter_obj.get('b')
0
>>> stream_counter_obj.get('c')
1
>>> stream_counter_obj.get('d')
1
>>> stream_counter_obj.get('e')
1
>>> stream_counter_obj.get('f')
1
'''
c = self.counts.get(item, default)
if not normalized:
return c
return c / float(self.counts_total) | When we have the stream of data pushed in the chunk
we can retrive count of an item using this method.
>>> stream_counter_obj = StreamCounter(5,5)
>>> data_stream = ['a','b','c']
>>> for item in data_stream:
... stream_counter_obj.add(item)
>>> stream_counter_obj.get('a')
1
>>> stream_counter_obj.get('b')
1
>>> stream_counter_obj.get('c')
1
>>> stream_counter_obj.get('d')
0
>>> data_stream.extend(['d','e','f'])
>>> for item in data_stream:
... stream_counter_obj.add(item)
>>> stream_counter_obj.get('a')
0
>>> stream_counter_obj.get('b')
0
>>> stream_counter_obj.get('c')
1
>>> stream_counter_obj.get('d')
1
>>> stream_counter_obj.get('e')
1
>>> stream_counter_obj.get('f')
1 | train | https://github.com/deep-compute/deeputil/blob/9af5702bc3fd990688bf2aed16c20fa104be66df/deeputil/streamcounter.py#L137-L173 | null | class StreamCounter(object):
'''
A class whose responsibility is to get the count of items
in data comming as a stream.
'''
#TODO Doctests and examples
# When we receive a stream of data, we fix the max size of chunk
# Think of chunk as a container, which can only fit a fixed no. of items
# This will help us to keep control over RAM usage
DEFAULT_CHUNK_SIZE = 1000000
# When we have a container, we also want to count the occurences of items
# Max count will be maximum occurence of an item
DEFAULT_MAX_COUNTS = 1000000
def __init__(self, chunk_size=DEFAULT_CHUNK_SIZE,
max_counts=DEFAULT_MAX_COUNTS):
self.chunk_size = chunk_size
self.max_counts = max_counts
# Counts of items stored on a per chunk basis
# Dict of dictionaries. Outer dict has chunk id as key
# Inner dict has items as keys and values are counts within
# that chunk
self.chunked_counts = {}
# Overall counts (keys are items and values are counts)
self.counts = Counter()
# Total chunks seen so far
self.n_chunks = 0
# Total items seen so far
self.n_items_seen = 0
# Total items seen in current chunk
self.n_chunk_items_seen = 0
# Total count entries
self.n_counts = 0
# Counts total
self.counts_total = 0
def add(self, item, count=1):
'''
When we receive stream of data, we add them in the chunk
which has limit on the no. of items that it will store.
>>> s = StreamCounter(5,5)
>>> data_stream = ['a','b','c','d']
>>> for item in data_stream:
... s.add(item)
>>> s.chunk_size
5
>>> s.n_items_seen
4
>>> s.n_chunk_items_seen
4
>>> s.n_chunks
0
>>> from pprint import pprint
>>> pprint(s.chunked_counts.get(s.n_chunks, {}))
{'a': 1, 'b': 1, 'c': 1, 'd': 1}
>>> s.counts_total
4
>>> data_stream = ['a','b','c','d','e','f','g','e']
>>> for item in data_stream:
... s.add(item)
>>> s.chunk_size
5
>>> s.n_items_seen
12
>>> s.n_chunk_items_seen
2
>>> s.n_chunks
2
>>> s.chunked_counts.get(s.n_chunks, {})
{'g': 1, 'e': 1}
'''
self.n_items_seen += count
self.n_chunk_items_seen += count
# get current chunk
chunk_id = self.n_chunks
chunk = self.chunked_counts.get(chunk_id, {})
self.chunked_counts[chunk_id] = chunk
# update count in the current chunk counter dict
if item in chunk:
chunk[item] += count
else:
self.n_counts += 1
chunk[item] = count
# is the current chunk done?
if self.n_chunk_items_seen >= self.chunk_size:
self.n_chunks += 1
self.n_chunk_items_seen = 0
# In case we reached max capacity in count entries,
# drop oldest chunks until we come back within limit
while self.n_counts >= self.max_counts:
self._drop_oldest_chunk()
def _drop_oldest_chunk(self):
'''
To handle the case when the items comming in the chunk
is more than the maximum capacity of the chunk. Our intent
behind is to remove the oldest chunk. So that the items come
flowing in.
>>> s = StreamCounter(5,5)
>>> data_stream = ['a','b','c','d']
>>> for item in data_stream:
... s.add(item)
>>> min(s.chunked_counts.keys())
0
>>> s.chunked_counts
{0: {'a': 1, 'b': 1, 'c': 1, 'd': 1}}
>>> data_stream = ['a','b','c','d','a','e','f']
>>> for item in data_stream:
... s.add(item)
>>> min(s.chunked_counts.keys())
2
>>> s.chunked_counts
{2: {'f': 1}}
'''
chunk_id = min(self.chunked_counts.keys())
chunk = self.chunked_counts.pop(chunk_id)
self.n_counts -= len(chunk)
for k, v in list(chunk.items()):
self.counts[k] -= v
self.counts_total -= v
def __getitem__(self, k):
return self.get(k)
|
deep-compute/deeputil | deeputil/misc.py | generate_random_string | python | def generate_random_string(length=6):
'''
Returns a random string of a specified length.
>>> len(generate_random_string(length=25))
25
Test randomness. Try N times and observe no duplicaton
>>> N = 100
>>> len(set(generate_random_string(10) for i in range(N))) == N
True
'''
n = int(length / 2 + 1)
x = binascii.hexlify(os.urandom(n))
s = x[:length]
return s.decode('utf-8') | Returns a random string of a specified length.
>>> len(generate_random_string(length=25))
25
Test randomness. Try N times and observe no duplicaton
>>> N = 100
>>> len(set(generate_random_string(10) for i in range(N))) == N
True | train | https://github.com/deep-compute/deeputil/blob/9af5702bc3fd990688bf2aed16c20fa104be66df/deeputil/misc.py#L16-L31 | null | import time
import datetime
import calendar
import os
import inspect
import random
import string
import itertools
from six import iteritems as items
import sys
from operator import attrgetter
import binascii
from functools import reduce, wraps
def get_timestamp(dt=None):
'''
Return current timestamp if @dt is None
else return timestamp of @dt.
>>> t = datetime.datetime(2015, 0o5, 21)
>>> get_timestamp(t)
1432166400
'''
if dt is None: dt = datetime.datetime.utcnow()
t = dt.utctimetuple()
return calendar.timegm(t)
def get_datetime(epoch):
'''
get datetime from an epoch timestamp
>>> get_datetime(1432188772)
datetime.datetime(2015, 5, 21, 6, 12, 52)
'''
t = time.gmtime(epoch)
dt = datetime.datetime(*t[:6])
return dt
def convert_ts(tt):
'''
tt: time.struct_time(tm_year=2012, tm_mon=10, tm_mday=23, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=1, tm_yday=297, tm_isdst=-1)
>>> tt = time.strptime("23.10.2012", "%d.%m.%Y")
>>> convert_ts(tt)
1350950400
tt: time.struct_time(tm_year=1513, tm_mon=1, tm_mday=1, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=2, tm_yday=1, tm_isdst=0)
>>> tt = time.strptime("1.1.1513", "%d.%m.%Y")
>>> convert_ts(tt)
0
>>> tt = 12
>>> convert_ts(tt)
'''
try:
ts = calendar.timegm(tt)
'''
As from the github issue https://github.com/prashanthellina/rsslurp/issues/680,
there are some cases where we might get timestamp in negative values, so consider
0 if the converted timestamp is negative value.
'''
if ts < 0: ts = 0
except TypeError:
ts = None
return ts
#FIXME No unicode in python 3
def xcode(text, encoding='utf8', mode='ignore'):
'''
Converts unicode encoding to str
>>> xcode(b'hello')
b'hello'
>>> xcode('hello')
b'hello'
'''
return text.encode(encoding, mode) if isinstance(text, str) else text
# For python 2 & 3 compatiblity
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
def parse_location(loc, default_port):
'''
loc can be of the format http://<ip/domain>[:<port>]
eg:
http://localhost:8888
http://localhost/
return ip (str), port (int)
>>> parse_location('http://localhost/', 6379)
('localhost', 6379)
>>> parse_location('http://localhost:8888', 6379)
('localhost', 8888)
'''
parsed = urlparse(loc)
if ':' in parsed.netloc:
ip, port = parsed.netloc.split(':')
port = int(port)
else:
ip, port = parsed.netloc, default_port
return ip, port
from repoze.lru import ExpiringLRUCache
class ExpiringCache(ExpiringLRUCache):
'''
Return value for key. If not in cache or expired, return default
>>> c = ExpiringCache(10, default_timeout=1)
>>> c.put('a', 100)
>>> c.get('a')
100
>>> time.sleep(1)
>>> c.get('a')
'''
def get(self, key, default=None):
self.lookups += 1
try:
pos, val, expires = self.data[key]
except KeyError:
self.misses += 1
return default
if expires > time.time():
# cache entry still valid
self.hits += 1
# Not updating clock_refs to disable
# LRU logic as we just want expiry without LRU
# self.clock_refs[pos] = True
return val
else:
# cache entry has expired. Make sure the space in the cache can
# be recycled soon.
self.misses += 1
self.clock_refs[pos] = False
return default
def serialize_dict_keys(d, prefix=""):
"""returns all the keys in nested a dictionary.
>>> sorted(serialize_dict_keys({"a": {"b": {"c": 1, "b": 2} } }))
['a', 'a.b', 'a.b.b', 'a.b.c']
"""
keys = []
for k, v in d.items():
fqk = '{}{}'.format(prefix, k)
keys.append(fqk)
if isinstance(v, dict):
keys.extend(serialize_dict_keys(v, prefix="{}.".format(fqk)))
return keys
import collections
class MarkValue(str):
pass
def flatten_dict(d,
parent_key='', sep='.',
ignore_under_prefixed=True,
mark_value=True):
'''
Flattens a nested dictionary
>>> from pprint import pprint
>>> d = {"a": {"b": {"c": 1, "b": 2, "__d": 'ignore', "_e": "mark"} } }
>>> fd = flatten_dict(d)
>>> pprint(fd)
{'a.b._e': "'mark'", 'a.b.b': 2, 'a.b.c': 1}
'''
items = {}
for k in d:
if ignore_under_prefixed and k.startswith('__'): continue
v = d[k]
if mark_value and k.startswith('_') and not k.startswith('__'):
v = MarkValue(repr(v))
new_key = sep.join((parent_key, k)) if parent_key else k
if isinstance(v, collections.MutableMapping):
items.update(flatten_dict(v, new_key, sep=sep,
ignore_under_prefixed=True,
mark_value=True)
)
else:
items[new_key] = v
return items
def deepgetattr(obj, attr, default=AttributeError):
"""
Recurses through an attribute chain to get the ultimate value (obj/data/member/value)
from: http://pingfive.typepad.com/blog/2010/04/deep-getattr-python-function.html
>>> class Universe(object):
... def __init__(self, galaxy):
... self.galaxy = galaxy
...
>>> class Galaxy(object):
... def __init__(self, solarsystem):
... self.solarsystem = solarsystem
...
>>> class SolarSystem(object):
... def __init__(self, planet):
... self.planet = planet
...
>>> class Planet(object):
... def __init__(self, name):
... self.name = name
...
>>> universe = Universe(Galaxy(SolarSystem(Planet('Earth'))))
>>> deepgetattr(universe, 'galaxy.solarsystem.planet.name')
'Earth'
>>> deepgetattr(universe, 'solarsystem.planet.name', default=TypeError)
<class 'TypeError'>
"""
try:
return reduce(getattr, attr.split('.'), obj)
except AttributeError:
if default is not AttributeError:
return default
raise
class AttrDict(dict):
'''
A dictionary with attribute-style access. It maps attribute access to
the real dictionary.
from: http://code.activestate.com/recipes/473786-dictionary-with-attribute-style-access/
In a plain old dict, we can store values against keys like this
>>> d = {}
>>> d['a'] = 10
>>> d['a']
10
However, sometimes it is convenient to interact with a dict as
though it is an object. eg: d.a = 10 and access as d.a, this does
not work in a dict
>>> d = {}
>>> d.a = 10
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'dict' object has no attribute 'a'
This is where you can use an `AttrDict`
>>> d = AttrDict()
>>> d.a = 1
>>> d.a
1
>>> d
AttrDict({'a': 1})
>>> d['b'] = 2
>>> d['b']
2
>>> d.b
2
>>> del d['a']
>>> d
AttrDict({'b': 2})
>>> dd = d.copy()
>>> dd
AttrDict({'b': 2})
'''
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
def __getstate__(self):
return list(self.__dict__.items())
def __setstate__(self, items):
for key, val in items:
self.__dict__[key] = val
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, dict.__repr__(self))
def __setitem__(self, key, value):
return super(AttrDict, self).__setitem__(key, value)
def __getitem__(self, name):
item = super(AttrDict, self).__getitem__(name)
return AttrDict(item) if isinstance(item, dict) else item
def __delitem__(self, name):
return super(AttrDict, self).__delitem__(name)
__getattr__ = __getitem__
__setattr__ = __setitem__
def copy(self):
ch = AttrDict(self)
return ch
class IterAsFile(object):
'''
Wraps an iterator in a file-like API,
i.e. if you have a generator producing a list of strings,
this could make it look like a file.
from: http://stackoverflow.com/a/12593795/332313
>>> def str_fn():
... for c in 'a', 'b', 'c':
... yield c * 3
...
>>> IAF = IterAsFile(str_fn())
>>> IAF.read(6)
'aaabbb'
>>> IAF.read(4)
'ccc'
>>> IAF.read(2)
'''
def __init__(self, it):
self.it = it
self.next_chunk = ''
def _grow_chunk(self):
self.next_chunk = self.next_chunk + next(self.it)
def read(self, n):
if self.next_chunk == None:
return None
try:
while len(self.next_chunk) < n:
self._grow_chunk()
rv = self.next_chunk[:n]
self.next_chunk = self.next_chunk[n:]
return rv
except StopIteration:
rv = self.next_chunk
self.next_chunk = None
return rv
class LineReader(object):
def __init__(self, it, linesep='\n'):
self.parts = []
self.it = it
self.linesep = linesep
def __iter__(self):
for chunk in self.it:
loc = end_loc = 0
while loc <= len(chunk):
end_loc = chunk.find(self.linesep, loc)
if end_loc == -1:
self.parts.append(chunk[loc:])
break
else:
yield ''.join(self.parts) + chunk[loc:end_loc+1]
self.parts = []
loc = end_loc + 1
if self.parts:
yield ''.join(self.parts)
from .priority_dict import PriorityDict
class ExpiringCounter(object):
'''
>>> c = ExpiringCounter(duration=1)
>>> c.put('name')
>>> c.get('name')
1
>>> time.sleep(2)
>>> c.get('name')
0
>>> c.put('name')
>>> c.put('name')
>>> c.get('name')
2
'''
DEFAULT_DURATION = 60 #seconds
def __init__(self, duration=DEFAULT_DURATION):
self.duration = duration
self.latest_ts = int(time.time())
self.counts = PriorityDict()
self.count = 0
self.history = {}
def put(self, key):
self.update()
ts = int(time.time())
hcounts = self.history.get(ts, {})
hcounts[key] = hcounts.get(key, 0) + 1
self.history[ts] = hcounts
self.counts[key] = self.counts.get(key, 0) + 1
self.count += 1
def get(self, key):
self.update()
return self.counts.get(key, 0)
def update(self):
ts = int(time.time() - self.duration)
ts_keys = [x for x in self.history if x < ts]
for ts_key in ts_keys:
hcounts = self.history.pop(ts_key)
for key, count in list(hcounts.items()):
kcount = self.counts[key]
kcount -= count
if kcount <= 0: del self.counts[key]
else: self.counts[key] = kcount
self.count -= count
#TODO Examples and Readme.md
import resource
def set_file_limits(n):
'''
Set the limit on number of file descriptors
that this process can open.
'''
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (n, n))
return True
except ValueError:
return False
class Dummy(object):
'''
Abstraction that creates a dummy object
that does no-operations on method invocations
but logs all interactions
Let us create a dummy object and perform some
random operations on it
>>> d = Dummy(1, a=5)
>>> d.foo() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
>>> d.bar() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
>>> d.foo.bar() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
Now do the same as above but ask Dummy to print the activity
>>> d = Dummy(1, a=5, __quiet__=False) # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (1,), 'kwargs': {'a': 5}, 'prefix': []})
>>> d.foo() # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'foo'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__call__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
<deeputil.misc.Dummy object at ...>
>>> d.bar # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'bar'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['bar']})
<deeputil.misc.Dummy object at ...>
>>> d.foo.bar() # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'foo'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'bar'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
(<deeputil.misc.Dummy object at ...>, '__call__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
<deeputil.misc.Dummy object at ...>
'''
def _log(self, event, data):
if not self._quiet: print((self, event, data))
def __init__(self, *args, **kwargs):
self._prefix = kwargs.pop('__prefix__', [])
self._quiet = kwargs.pop('__quiet__', True)
self._log('__init__', dict(args=args, kwargs=kwargs, prefix=self._prefix))
def __getattr__(self, attr):
if attr == '__wrapped__': raise AttributeError
self._log('__getattr__', dict(attr=attr))
return Dummy(__prefix__=self._prefix + [attr], __quiet__=self._quiet)
def __call__(self, *args, **kwargs):
self._log('__call__', dict(args=args, kwargs=kwargs, prefix=self._prefix))
return Dummy(__prefix__=self._prefix, __quiet__=self._quiet)
def memoize(f):
'''
Caches result of a function
From: https://goo.gl/aXt4Qy
>>> import time
>>> @memoize
... def test(msg):
... # Processing for result that takes time
... time.sleep(1)
... return msg
>>>
>>> for i in range(5):
... start = time.time()
... test('calling memoized function')
... time_taken = time.time() - start
... # For first time it takes usual time
... if i == 0 and time_taken >= 1: print('ok')
... # Faster from the 2nd time
... elif i != 0 and time_taken <= 1: print('ok')
... else: print('NOT ok!')
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'''
class memodict(dict):
@wraps(f)
def __getitem__(self, *args):
return super(memodict, self).__getitem__(*args)
def __missing__(self, key):
self[key] = ret = f(key)
return ret
return memodict().__getitem__
@memoize
def load_object(imp_path):
'''
Given a python import path, load the object
For dynamic imports in a program
>>> isdir = load_object('os.path.isdir')
>>> isdir('/tmp')
True
>>> num = load_object('numbers.Number')
>>> isinstance('x', num)
False
>>> isinstance(777, num)
True
'''
module_name, obj_name = imp_path.split('.', 1)
module = __import__(module_name)
obj = attrgetter(obj_name)(module)
return obj
def grouper(n, iterable):
'''
Iterate over an iterator by chunks
'''
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
|
deep-compute/deeputil | deeputil/misc.py | get_timestamp | python | def get_timestamp(dt=None):
'''
Return current timestamp if @dt is None
else return timestamp of @dt.
>>> t = datetime.datetime(2015, 0o5, 21)
>>> get_timestamp(t)
1432166400
'''
if dt is None: dt = datetime.datetime.utcnow()
t = dt.utctimetuple()
return calendar.timegm(t) | Return current timestamp if @dt is None
else return timestamp of @dt.
>>> t = datetime.datetime(2015, 0o5, 21)
>>> get_timestamp(t)
1432166400 | train | https://github.com/deep-compute/deeputil/blob/9af5702bc3fd990688bf2aed16c20fa104be66df/deeputil/misc.py#L33-L46 | null | import time
import datetime
import calendar
import os
import inspect
import random
import string
import itertools
from six import iteritems as items
import sys
from operator import attrgetter
import binascii
from functools import reduce, wraps
def generate_random_string(length=6):
'''
Returns a random string of a specified length.
>>> len(generate_random_string(length=25))
25
Test randomness. Try N times and observe no duplicaton
>>> N = 100
>>> len(set(generate_random_string(10) for i in range(N))) == N
True
'''
n = int(length / 2 + 1)
x = binascii.hexlify(os.urandom(n))
s = x[:length]
return s.decode('utf-8')
def get_datetime(epoch):
'''
get datetime from an epoch timestamp
>>> get_datetime(1432188772)
datetime.datetime(2015, 5, 21, 6, 12, 52)
'''
t = time.gmtime(epoch)
dt = datetime.datetime(*t[:6])
return dt
def convert_ts(tt):
'''
tt: time.struct_time(tm_year=2012, tm_mon=10, tm_mday=23, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=1, tm_yday=297, tm_isdst=-1)
>>> tt = time.strptime("23.10.2012", "%d.%m.%Y")
>>> convert_ts(tt)
1350950400
tt: time.struct_time(tm_year=1513, tm_mon=1, tm_mday=1, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=2, tm_yday=1, tm_isdst=0)
>>> tt = time.strptime("1.1.1513", "%d.%m.%Y")
>>> convert_ts(tt)
0
>>> tt = 12
>>> convert_ts(tt)
'''
try:
ts = calendar.timegm(tt)
'''
As from the github issue https://github.com/prashanthellina/rsslurp/issues/680,
there are some cases where we might get timestamp in negative values, so consider
0 if the converted timestamp is negative value.
'''
if ts < 0: ts = 0
except TypeError:
ts = None
return ts
#FIXME No unicode in python 3
def xcode(text, encoding='utf8', mode='ignore'):
'''
Converts unicode encoding to str
>>> xcode(b'hello')
b'hello'
>>> xcode('hello')
b'hello'
'''
return text.encode(encoding, mode) if isinstance(text, str) else text
# For python 2 & 3 compatiblity
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
def parse_location(loc, default_port):
'''
loc can be of the format http://<ip/domain>[:<port>]
eg:
http://localhost:8888
http://localhost/
return ip (str), port (int)
>>> parse_location('http://localhost/', 6379)
('localhost', 6379)
>>> parse_location('http://localhost:8888', 6379)
('localhost', 8888)
'''
parsed = urlparse(loc)
if ':' in parsed.netloc:
ip, port = parsed.netloc.split(':')
port = int(port)
else:
ip, port = parsed.netloc, default_port
return ip, port
from repoze.lru import ExpiringLRUCache
class ExpiringCache(ExpiringLRUCache):
'''
Return value for key. If not in cache or expired, return default
>>> c = ExpiringCache(10, default_timeout=1)
>>> c.put('a', 100)
>>> c.get('a')
100
>>> time.sleep(1)
>>> c.get('a')
'''
def get(self, key, default=None):
self.lookups += 1
try:
pos, val, expires = self.data[key]
except KeyError:
self.misses += 1
return default
if expires > time.time():
# cache entry still valid
self.hits += 1
# Not updating clock_refs to disable
# LRU logic as we just want expiry without LRU
# self.clock_refs[pos] = True
return val
else:
# cache entry has expired. Make sure the space in the cache can
# be recycled soon.
self.misses += 1
self.clock_refs[pos] = False
return default
def serialize_dict_keys(d, prefix=""):
"""returns all the keys in nested a dictionary.
>>> sorted(serialize_dict_keys({"a": {"b": {"c": 1, "b": 2} } }))
['a', 'a.b', 'a.b.b', 'a.b.c']
"""
keys = []
for k, v in d.items():
fqk = '{}{}'.format(prefix, k)
keys.append(fqk)
if isinstance(v, dict):
keys.extend(serialize_dict_keys(v, prefix="{}.".format(fqk)))
return keys
import collections
class MarkValue(str):
pass
def flatten_dict(d,
parent_key='', sep='.',
ignore_under_prefixed=True,
mark_value=True):
'''
Flattens a nested dictionary
>>> from pprint import pprint
>>> d = {"a": {"b": {"c": 1, "b": 2, "__d": 'ignore', "_e": "mark"} } }
>>> fd = flatten_dict(d)
>>> pprint(fd)
{'a.b._e': "'mark'", 'a.b.b': 2, 'a.b.c': 1}
'''
items = {}
for k in d:
if ignore_under_prefixed and k.startswith('__'): continue
v = d[k]
if mark_value and k.startswith('_') and not k.startswith('__'):
v = MarkValue(repr(v))
new_key = sep.join((parent_key, k)) if parent_key else k
if isinstance(v, collections.MutableMapping):
items.update(flatten_dict(v, new_key, sep=sep,
ignore_under_prefixed=True,
mark_value=True)
)
else:
items[new_key] = v
return items
def deepgetattr(obj, attr, default=AttributeError):
"""
Recurses through an attribute chain to get the ultimate value (obj/data/member/value)
from: http://pingfive.typepad.com/blog/2010/04/deep-getattr-python-function.html
>>> class Universe(object):
... def __init__(self, galaxy):
... self.galaxy = galaxy
...
>>> class Galaxy(object):
... def __init__(self, solarsystem):
... self.solarsystem = solarsystem
...
>>> class SolarSystem(object):
... def __init__(self, planet):
... self.planet = planet
...
>>> class Planet(object):
... def __init__(self, name):
... self.name = name
...
>>> universe = Universe(Galaxy(SolarSystem(Planet('Earth'))))
>>> deepgetattr(universe, 'galaxy.solarsystem.planet.name')
'Earth'
>>> deepgetattr(universe, 'solarsystem.planet.name', default=TypeError)
<class 'TypeError'>
"""
try:
return reduce(getattr, attr.split('.'), obj)
except AttributeError:
if default is not AttributeError:
return default
raise
class AttrDict(dict):
'''
A dictionary with attribute-style access. It maps attribute access to
the real dictionary.
from: http://code.activestate.com/recipes/473786-dictionary-with-attribute-style-access/
In a plain old dict, we can store values against keys like this
>>> d = {}
>>> d['a'] = 10
>>> d['a']
10
However, sometimes it is convenient to interact with a dict as
though it is an object. eg: d.a = 10 and access as d.a, this does
not work in a dict
>>> d = {}
>>> d.a = 10
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'dict' object has no attribute 'a'
This is where you can use an `AttrDict`
>>> d = AttrDict()
>>> d.a = 1
>>> d.a
1
>>> d
AttrDict({'a': 1})
>>> d['b'] = 2
>>> d['b']
2
>>> d.b
2
>>> del d['a']
>>> d
AttrDict({'b': 2})
>>> dd = d.copy()
>>> dd
AttrDict({'b': 2})
'''
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
def __getstate__(self):
return list(self.__dict__.items())
def __setstate__(self, items):
for key, val in items:
self.__dict__[key] = val
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, dict.__repr__(self))
def __setitem__(self, key, value):
return super(AttrDict, self).__setitem__(key, value)
def __getitem__(self, name):
item = super(AttrDict, self).__getitem__(name)
return AttrDict(item) if isinstance(item, dict) else item
def __delitem__(self, name):
return super(AttrDict, self).__delitem__(name)
__getattr__ = __getitem__
__setattr__ = __setitem__
def copy(self):
ch = AttrDict(self)
return ch
class IterAsFile(object):
'''
Wraps an iterator in a file-like API,
i.e. if you have a generator producing a list of strings,
this could make it look like a file.
from: http://stackoverflow.com/a/12593795/332313
>>> def str_fn():
... for c in 'a', 'b', 'c':
... yield c * 3
...
>>> IAF = IterAsFile(str_fn())
>>> IAF.read(6)
'aaabbb'
>>> IAF.read(4)
'ccc'
>>> IAF.read(2)
'''
def __init__(self, it):
self.it = it
self.next_chunk = ''
def _grow_chunk(self):
self.next_chunk = self.next_chunk + next(self.it)
def read(self, n):
if self.next_chunk == None:
return None
try:
while len(self.next_chunk) < n:
self._grow_chunk()
rv = self.next_chunk[:n]
self.next_chunk = self.next_chunk[n:]
return rv
except StopIteration:
rv = self.next_chunk
self.next_chunk = None
return rv
class LineReader(object):
def __init__(self, it, linesep='\n'):
self.parts = []
self.it = it
self.linesep = linesep
def __iter__(self):
for chunk in self.it:
loc = end_loc = 0
while loc <= len(chunk):
end_loc = chunk.find(self.linesep, loc)
if end_loc == -1:
self.parts.append(chunk[loc:])
break
else:
yield ''.join(self.parts) + chunk[loc:end_loc+1]
self.parts = []
loc = end_loc + 1
if self.parts:
yield ''.join(self.parts)
from .priority_dict import PriorityDict
class ExpiringCounter(object):
'''
>>> c = ExpiringCounter(duration=1)
>>> c.put('name')
>>> c.get('name')
1
>>> time.sleep(2)
>>> c.get('name')
0
>>> c.put('name')
>>> c.put('name')
>>> c.get('name')
2
'''
DEFAULT_DURATION = 60 #seconds
def __init__(self, duration=DEFAULT_DURATION):
self.duration = duration
self.latest_ts = int(time.time())
self.counts = PriorityDict()
self.count = 0
self.history = {}
def put(self, key):
self.update()
ts = int(time.time())
hcounts = self.history.get(ts, {})
hcounts[key] = hcounts.get(key, 0) + 1
self.history[ts] = hcounts
self.counts[key] = self.counts.get(key, 0) + 1
self.count += 1
def get(self, key):
self.update()
return self.counts.get(key, 0)
def update(self):
ts = int(time.time() - self.duration)
ts_keys = [x for x in self.history if x < ts]
for ts_key in ts_keys:
hcounts = self.history.pop(ts_key)
for key, count in list(hcounts.items()):
kcount = self.counts[key]
kcount -= count
if kcount <= 0: del self.counts[key]
else: self.counts[key] = kcount
self.count -= count
#TODO Examples and Readme.md
import resource
def set_file_limits(n):
'''
Set the limit on number of file descriptors
that this process can open.
'''
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (n, n))
return True
except ValueError:
return False
class Dummy(object):
'''
Abstraction that creates a dummy object
that does no-operations on method invocations
but logs all interactions
Let us create a dummy object and perform some
random operations on it
>>> d = Dummy(1, a=5)
>>> d.foo() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
>>> d.bar() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
>>> d.foo.bar() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
Now do the same as above but ask Dummy to print the activity
>>> d = Dummy(1, a=5, __quiet__=False) # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (1,), 'kwargs': {'a': 5}, 'prefix': []})
>>> d.foo() # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'foo'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__call__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
<deeputil.misc.Dummy object at ...>
>>> d.bar # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'bar'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['bar']})
<deeputil.misc.Dummy object at ...>
>>> d.foo.bar() # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'foo'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'bar'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
(<deeputil.misc.Dummy object at ...>, '__call__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
<deeputil.misc.Dummy object at ...>
'''
def _log(self, event, data):
if not self._quiet: print((self, event, data))
def __init__(self, *args, **kwargs):
self._prefix = kwargs.pop('__prefix__', [])
self._quiet = kwargs.pop('__quiet__', True)
self._log('__init__', dict(args=args, kwargs=kwargs, prefix=self._prefix))
def __getattr__(self, attr):
if attr == '__wrapped__': raise AttributeError
self._log('__getattr__', dict(attr=attr))
return Dummy(__prefix__=self._prefix + [attr], __quiet__=self._quiet)
def __call__(self, *args, **kwargs):
self._log('__call__', dict(args=args, kwargs=kwargs, prefix=self._prefix))
return Dummy(__prefix__=self._prefix, __quiet__=self._quiet)
def memoize(f):
'''
Caches result of a function
From: https://goo.gl/aXt4Qy
>>> import time
>>> @memoize
... def test(msg):
... # Processing for result that takes time
... time.sleep(1)
... return msg
>>>
>>> for i in range(5):
... start = time.time()
... test('calling memoized function')
... time_taken = time.time() - start
... # For first time it takes usual time
... if i == 0 and time_taken >= 1: print('ok')
... # Faster from the 2nd time
... elif i != 0 and time_taken <= 1: print('ok')
... else: print('NOT ok!')
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'''
class memodict(dict):
@wraps(f)
def __getitem__(self, *args):
return super(memodict, self).__getitem__(*args)
def __missing__(self, key):
self[key] = ret = f(key)
return ret
return memodict().__getitem__
@memoize
def load_object(imp_path):
'''
Given a python import path, load the object
For dynamic imports in a program
>>> isdir = load_object('os.path.isdir')
>>> isdir('/tmp')
True
>>> num = load_object('numbers.Number')
>>> isinstance('x', num)
False
>>> isinstance(777, num)
True
'''
module_name, obj_name = imp_path.split('.', 1)
module = __import__(module_name)
obj = attrgetter(obj_name)(module)
return obj
def grouper(n, iterable):
'''
Iterate over an iterator by chunks
'''
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
|
deep-compute/deeputil | deeputil/misc.py | get_datetime | python | def get_datetime(epoch):
'''
get datetime from an epoch timestamp
>>> get_datetime(1432188772)
datetime.datetime(2015, 5, 21, 6, 12, 52)
'''
t = time.gmtime(epoch)
dt = datetime.datetime(*t[:6])
return dt | get datetime from an epoch timestamp
>>> get_datetime(1432188772)
datetime.datetime(2015, 5, 21, 6, 12, 52) | train | https://github.com/deep-compute/deeputil/blob/9af5702bc3fd990688bf2aed16c20fa104be66df/deeputil/misc.py#L48-L59 | null | import time
import datetime
import calendar
import os
import inspect
import random
import string
import itertools
from six import iteritems as items
import sys
from operator import attrgetter
import binascii
from functools import reduce, wraps
def generate_random_string(length=6):
'''
Returns a random string of a specified length.
>>> len(generate_random_string(length=25))
25
Test randomness. Try N times and observe no duplicaton
>>> N = 100
>>> len(set(generate_random_string(10) for i in range(N))) == N
True
'''
n = int(length / 2 + 1)
x = binascii.hexlify(os.urandom(n))
s = x[:length]
return s.decode('utf-8')
def get_timestamp(dt=None):
'''
Return current timestamp if @dt is None
else return timestamp of @dt.
>>> t = datetime.datetime(2015, 0o5, 21)
>>> get_timestamp(t)
1432166400
'''
if dt is None: dt = datetime.datetime.utcnow()
t = dt.utctimetuple()
return calendar.timegm(t)
def convert_ts(tt):
'''
tt: time.struct_time(tm_year=2012, tm_mon=10, tm_mday=23, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=1, tm_yday=297, tm_isdst=-1)
>>> tt = time.strptime("23.10.2012", "%d.%m.%Y")
>>> convert_ts(tt)
1350950400
tt: time.struct_time(tm_year=1513, tm_mon=1, tm_mday=1, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=2, tm_yday=1, tm_isdst=0)
>>> tt = time.strptime("1.1.1513", "%d.%m.%Y")
>>> convert_ts(tt)
0
>>> tt = 12
>>> convert_ts(tt)
'''
try:
ts = calendar.timegm(tt)
'''
As from the github issue https://github.com/prashanthellina/rsslurp/issues/680,
there are some cases where we might get timestamp in negative values, so consider
0 if the converted timestamp is negative value.
'''
if ts < 0: ts = 0
except TypeError:
ts = None
return ts
#FIXME No unicode in python 3
def xcode(text, encoding='utf8', mode='ignore'):
'''
Converts unicode encoding to str
>>> xcode(b'hello')
b'hello'
>>> xcode('hello')
b'hello'
'''
return text.encode(encoding, mode) if isinstance(text, str) else text
# For python 2 & 3 compatiblity
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
def parse_location(loc, default_port):
'''
loc can be of the format http://<ip/domain>[:<port>]
eg:
http://localhost:8888
http://localhost/
return ip (str), port (int)
>>> parse_location('http://localhost/', 6379)
('localhost', 6379)
>>> parse_location('http://localhost:8888', 6379)
('localhost', 8888)
'''
parsed = urlparse(loc)
if ':' in parsed.netloc:
ip, port = parsed.netloc.split(':')
port = int(port)
else:
ip, port = parsed.netloc, default_port
return ip, port
from repoze.lru import ExpiringLRUCache
class ExpiringCache(ExpiringLRUCache):
'''
Return value for key. If not in cache or expired, return default
>>> c = ExpiringCache(10, default_timeout=1)
>>> c.put('a', 100)
>>> c.get('a')
100
>>> time.sleep(1)
>>> c.get('a')
'''
def get(self, key, default=None):
self.lookups += 1
try:
pos, val, expires = self.data[key]
except KeyError:
self.misses += 1
return default
if expires > time.time():
# cache entry still valid
self.hits += 1
# Not updating clock_refs to disable
# LRU logic as we just want expiry without LRU
# self.clock_refs[pos] = True
return val
else:
# cache entry has expired. Make sure the space in the cache can
# be recycled soon.
self.misses += 1
self.clock_refs[pos] = False
return default
def serialize_dict_keys(d, prefix=""):
"""returns all the keys in nested a dictionary.
>>> sorted(serialize_dict_keys({"a": {"b": {"c": 1, "b": 2} } }))
['a', 'a.b', 'a.b.b', 'a.b.c']
"""
keys = []
for k, v in d.items():
fqk = '{}{}'.format(prefix, k)
keys.append(fqk)
if isinstance(v, dict):
keys.extend(serialize_dict_keys(v, prefix="{}.".format(fqk)))
return keys
import collections
class MarkValue(str):
pass
def flatten_dict(d,
parent_key='', sep='.',
ignore_under_prefixed=True,
mark_value=True):
'''
Flattens a nested dictionary
>>> from pprint import pprint
>>> d = {"a": {"b": {"c": 1, "b": 2, "__d": 'ignore', "_e": "mark"} } }
>>> fd = flatten_dict(d)
>>> pprint(fd)
{'a.b._e': "'mark'", 'a.b.b': 2, 'a.b.c': 1}
'''
items = {}
for k in d:
if ignore_under_prefixed and k.startswith('__'): continue
v = d[k]
if mark_value and k.startswith('_') and not k.startswith('__'):
v = MarkValue(repr(v))
new_key = sep.join((parent_key, k)) if parent_key else k
if isinstance(v, collections.MutableMapping):
items.update(flatten_dict(v, new_key, sep=sep,
ignore_under_prefixed=True,
mark_value=True)
)
else:
items[new_key] = v
return items
def deepgetattr(obj, attr, default=AttributeError):
"""
Recurses through an attribute chain to get the ultimate value (obj/data/member/value)
from: http://pingfive.typepad.com/blog/2010/04/deep-getattr-python-function.html
>>> class Universe(object):
... def __init__(self, galaxy):
... self.galaxy = galaxy
...
>>> class Galaxy(object):
... def __init__(self, solarsystem):
... self.solarsystem = solarsystem
...
>>> class SolarSystem(object):
... def __init__(self, planet):
... self.planet = planet
...
>>> class Planet(object):
... def __init__(self, name):
... self.name = name
...
>>> universe = Universe(Galaxy(SolarSystem(Planet('Earth'))))
>>> deepgetattr(universe, 'galaxy.solarsystem.planet.name')
'Earth'
>>> deepgetattr(universe, 'solarsystem.planet.name', default=TypeError)
<class 'TypeError'>
"""
try:
return reduce(getattr, attr.split('.'), obj)
except AttributeError:
if default is not AttributeError:
return default
raise
class AttrDict(dict):
'''
A dictionary with attribute-style access. It maps attribute access to
the real dictionary.
from: http://code.activestate.com/recipes/473786-dictionary-with-attribute-style-access/
In a plain old dict, we can store values against keys like this
>>> d = {}
>>> d['a'] = 10
>>> d['a']
10
However, sometimes it is convenient to interact with a dict as
though it is an object. eg: d.a = 10 and access as d.a, this does
not work in a dict
>>> d = {}
>>> d.a = 10
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'dict' object has no attribute 'a'
This is where you can use an `AttrDict`
>>> d = AttrDict()
>>> d.a = 1
>>> d.a
1
>>> d
AttrDict({'a': 1})
>>> d['b'] = 2
>>> d['b']
2
>>> d.b
2
>>> del d['a']
>>> d
AttrDict({'b': 2})
>>> dd = d.copy()
>>> dd
AttrDict({'b': 2})
'''
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
def __getstate__(self):
return list(self.__dict__.items())
def __setstate__(self, items):
for key, val in items:
self.__dict__[key] = val
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, dict.__repr__(self))
def __setitem__(self, key, value):
return super(AttrDict, self).__setitem__(key, value)
def __getitem__(self, name):
item = super(AttrDict, self).__getitem__(name)
return AttrDict(item) if isinstance(item, dict) else item
def __delitem__(self, name):
return super(AttrDict, self).__delitem__(name)
__getattr__ = __getitem__
__setattr__ = __setitem__
def copy(self):
ch = AttrDict(self)
return ch
class IterAsFile(object):
'''
Wraps an iterator in a file-like API,
i.e. if you have a generator producing a list of strings,
this could make it look like a file.
from: http://stackoverflow.com/a/12593795/332313
>>> def str_fn():
... for c in 'a', 'b', 'c':
... yield c * 3
...
>>> IAF = IterAsFile(str_fn())
>>> IAF.read(6)
'aaabbb'
>>> IAF.read(4)
'ccc'
>>> IAF.read(2)
'''
def __init__(self, it):
self.it = it
self.next_chunk = ''
def _grow_chunk(self):
self.next_chunk = self.next_chunk + next(self.it)
def read(self, n):
if self.next_chunk == None:
return None
try:
while len(self.next_chunk) < n:
self._grow_chunk()
rv = self.next_chunk[:n]
self.next_chunk = self.next_chunk[n:]
return rv
except StopIteration:
rv = self.next_chunk
self.next_chunk = None
return rv
class LineReader(object):
def __init__(self, it, linesep='\n'):
self.parts = []
self.it = it
self.linesep = linesep
def __iter__(self):
for chunk in self.it:
loc = end_loc = 0
while loc <= len(chunk):
end_loc = chunk.find(self.linesep, loc)
if end_loc == -1:
self.parts.append(chunk[loc:])
break
else:
yield ''.join(self.parts) + chunk[loc:end_loc+1]
self.parts = []
loc = end_loc + 1
if self.parts:
yield ''.join(self.parts)
from .priority_dict import PriorityDict
class ExpiringCounter(object):
'''
>>> c = ExpiringCounter(duration=1)
>>> c.put('name')
>>> c.get('name')
1
>>> time.sleep(2)
>>> c.get('name')
0
>>> c.put('name')
>>> c.put('name')
>>> c.get('name')
2
'''
DEFAULT_DURATION = 60 #seconds
def __init__(self, duration=DEFAULT_DURATION):
self.duration = duration
self.latest_ts = int(time.time())
self.counts = PriorityDict()
self.count = 0
self.history = {}
def put(self, key):
self.update()
ts = int(time.time())
hcounts = self.history.get(ts, {})
hcounts[key] = hcounts.get(key, 0) + 1
self.history[ts] = hcounts
self.counts[key] = self.counts.get(key, 0) + 1
self.count += 1
def get(self, key):
self.update()
return self.counts.get(key, 0)
def update(self):
ts = int(time.time() - self.duration)
ts_keys = [x for x in self.history if x < ts]
for ts_key in ts_keys:
hcounts = self.history.pop(ts_key)
for key, count in list(hcounts.items()):
kcount = self.counts[key]
kcount -= count
if kcount <= 0: del self.counts[key]
else: self.counts[key] = kcount
self.count -= count
#TODO Examples and Readme.md
import resource
def set_file_limits(n):
'''
Set the limit on number of file descriptors
that this process can open.
'''
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (n, n))
return True
except ValueError:
return False
class Dummy(object):
'''
Abstraction that creates a dummy object
that does no-operations on method invocations
but logs all interactions
Let us create a dummy object and perform some
random operations on it
>>> d = Dummy(1, a=5)
>>> d.foo() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
>>> d.bar() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
>>> d.foo.bar() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
Now do the same as above but ask Dummy to print the activity
>>> d = Dummy(1, a=5, __quiet__=False) # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (1,), 'kwargs': {'a': 5}, 'prefix': []})
>>> d.foo() # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'foo'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__call__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
<deeputil.misc.Dummy object at ...>
>>> d.bar # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'bar'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['bar']})
<deeputil.misc.Dummy object at ...>
>>> d.foo.bar() # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'foo'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'bar'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
(<deeputil.misc.Dummy object at ...>, '__call__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
<deeputil.misc.Dummy object at ...>
'''
def _log(self, event, data):
if not self._quiet: print((self, event, data))
def __init__(self, *args, **kwargs):
self._prefix = kwargs.pop('__prefix__', [])
self._quiet = kwargs.pop('__quiet__', True)
self._log('__init__', dict(args=args, kwargs=kwargs, prefix=self._prefix))
def __getattr__(self, attr):
if attr == '__wrapped__': raise AttributeError
self._log('__getattr__', dict(attr=attr))
return Dummy(__prefix__=self._prefix + [attr], __quiet__=self._quiet)
def __call__(self, *args, **kwargs):
self._log('__call__', dict(args=args, kwargs=kwargs, prefix=self._prefix))
return Dummy(__prefix__=self._prefix, __quiet__=self._quiet)
def memoize(f):
'''
Caches result of a function
From: https://goo.gl/aXt4Qy
>>> import time
>>> @memoize
... def test(msg):
... # Processing for result that takes time
... time.sleep(1)
... return msg
>>>
>>> for i in range(5):
... start = time.time()
... test('calling memoized function')
... time_taken = time.time() - start
... # For first time it takes usual time
... if i == 0 and time_taken >= 1: print('ok')
... # Faster from the 2nd time
... elif i != 0 and time_taken <= 1: print('ok')
... else: print('NOT ok!')
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'''
class memodict(dict):
@wraps(f)
def __getitem__(self, *args):
return super(memodict, self).__getitem__(*args)
def __missing__(self, key):
self[key] = ret = f(key)
return ret
return memodict().__getitem__
@memoize
def load_object(imp_path):
'''
Given a python import path, load the object
For dynamic imports in a program
>>> isdir = load_object('os.path.isdir')
>>> isdir('/tmp')
True
>>> num = load_object('numbers.Number')
>>> isinstance('x', num)
False
>>> isinstance(777, num)
True
'''
module_name, obj_name = imp_path.split('.', 1)
module = __import__(module_name)
obj = attrgetter(obj_name)(module)
return obj
def grouper(n, iterable):
'''
Iterate over an iterator by chunks
'''
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
|
deep-compute/deeputil | deeputil/misc.py | convert_ts | python | def convert_ts(tt):
'''
tt: time.struct_time(tm_year=2012, tm_mon=10, tm_mday=23, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=1, tm_yday=297, tm_isdst=-1)
>>> tt = time.strptime("23.10.2012", "%d.%m.%Y")
>>> convert_ts(tt)
1350950400
tt: time.struct_time(tm_year=1513, tm_mon=1, tm_mday=1, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=2, tm_yday=1, tm_isdst=0)
>>> tt = time.strptime("1.1.1513", "%d.%m.%Y")
>>> convert_ts(tt)
0
>>> tt = 12
>>> convert_ts(tt)
'''
try:
ts = calendar.timegm(tt)
'''
As from the github issue https://github.com/prashanthellina/rsslurp/issues/680,
there are some cases where we might get timestamp in negative values, so consider
0 if the converted timestamp is negative value.
'''
if ts < 0: ts = 0
except TypeError:
ts = None
return ts | tt: time.struct_time(tm_year=2012, tm_mon=10, tm_mday=23, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=1, tm_yday=297, tm_isdst=-1)
>>> tt = time.strptime("23.10.2012", "%d.%m.%Y")
>>> convert_ts(tt)
1350950400
tt: time.struct_time(tm_year=1513, tm_mon=1, tm_mday=1, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=2, tm_yday=1, tm_isdst=0)
>>> tt = time.strptime("1.1.1513", "%d.%m.%Y")
>>> convert_ts(tt)
0
>>> tt = 12
>>> convert_ts(tt) | train | https://github.com/deep-compute/deeputil/blob/9af5702bc3fd990688bf2aed16c20fa104be66df/deeputil/misc.py#L61-L90 | null | import time
import datetime
import calendar
import os
import inspect
import random
import string
import itertools
from six import iteritems as items
import sys
from operator import attrgetter
import binascii
from functools import reduce, wraps
def generate_random_string(length=6):
'''
Returns a random string of a specified length.
>>> len(generate_random_string(length=25))
25
Test randomness. Try N times and observe no duplicaton
>>> N = 100
>>> len(set(generate_random_string(10) for i in range(N))) == N
True
'''
n = int(length / 2 + 1)
x = binascii.hexlify(os.urandom(n))
s = x[:length]
return s.decode('utf-8')
def get_timestamp(dt=None):
'''
Return current timestamp if @dt is None
else return timestamp of @dt.
>>> t = datetime.datetime(2015, 0o5, 21)
>>> get_timestamp(t)
1432166400
'''
if dt is None: dt = datetime.datetime.utcnow()
t = dt.utctimetuple()
return calendar.timegm(t)
def get_datetime(epoch):
'''
get datetime from an epoch timestamp
>>> get_datetime(1432188772)
datetime.datetime(2015, 5, 21, 6, 12, 52)
'''
t = time.gmtime(epoch)
dt = datetime.datetime(*t[:6])
return dt
#FIXME No unicode in python 3
def xcode(text, encoding='utf8', mode='ignore'):
'''
Converts unicode encoding to str
>>> xcode(b'hello')
b'hello'
>>> xcode('hello')
b'hello'
'''
return text.encode(encoding, mode) if isinstance(text, str) else text
# For python 2 & 3 compatiblity
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
def parse_location(loc, default_port):
'''
loc can be of the format http://<ip/domain>[:<port>]
eg:
http://localhost:8888
http://localhost/
return ip (str), port (int)
>>> parse_location('http://localhost/', 6379)
('localhost', 6379)
>>> parse_location('http://localhost:8888', 6379)
('localhost', 8888)
'''
parsed = urlparse(loc)
if ':' in parsed.netloc:
ip, port = parsed.netloc.split(':')
port = int(port)
else:
ip, port = parsed.netloc, default_port
return ip, port
from repoze.lru import ExpiringLRUCache
class ExpiringCache(ExpiringLRUCache):
'''
Return value for key. If not in cache or expired, return default
>>> c = ExpiringCache(10, default_timeout=1)
>>> c.put('a', 100)
>>> c.get('a')
100
>>> time.sleep(1)
>>> c.get('a')
'''
def get(self, key, default=None):
self.lookups += 1
try:
pos, val, expires = self.data[key]
except KeyError:
self.misses += 1
return default
if expires > time.time():
# cache entry still valid
self.hits += 1
# Not updating clock_refs to disable
# LRU logic as we just want expiry without LRU
# self.clock_refs[pos] = True
return val
else:
# cache entry has expired. Make sure the space in the cache can
# be recycled soon.
self.misses += 1
self.clock_refs[pos] = False
return default
def serialize_dict_keys(d, prefix=""):
"""returns all the keys in nested a dictionary.
>>> sorted(serialize_dict_keys({"a": {"b": {"c": 1, "b": 2} } }))
['a', 'a.b', 'a.b.b', 'a.b.c']
"""
keys = []
for k, v in d.items():
fqk = '{}{}'.format(prefix, k)
keys.append(fqk)
if isinstance(v, dict):
keys.extend(serialize_dict_keys(v, prefix="{}.".format(fqk)))
return keys
import collections
class MarkValue(str):
pass
def flatten_dict(d,
parent_key='', sep='.',
ignore_under_prefixed=True,
mark_value=True):
'''
Flattens a nested dictionary
>>> from pprint import pprint
>>> d = {"a": {"b": {"c": 1, "b": 2, "__d": 'ignore', "_e": "mark"} } }
>>> fd = flatten_dict(d)
>>> pprint(fd)
{'a.b._e': "'mark'", 'a.b.b': 2, 'a.b.c': 1}
'''
items = {}
for k in d:
if ignore_under_prefixed and k.startswith('__'): continue
v = d[k]
if mark_value and k.startswith('_') and not k.startswith('__'):
v = MarkValue(repr(v))
new_key = sep.join((parent_key, k)) if parent_key else k
if isinstance(v, collections.MutableMapping):
items.update(flatten_dict(v, new_key, sep=sep,
ignore_under_prefixed=True,
mark_value=True)
)
else:
items[new_key] = v
return items
def deepgetattr(obj, attr, default=AttributeError):
"""
Recurses through an attribute chain to get the ultimate value (obj/data/member/value)
from: http://pingfive.typepad.com/blog/2010/04/deep-getattr-python-function.html
>>> class Universe(object):
... def __init__(self, galaxy):
... self.galaxy = galaxy
...
>>> class Galaxy(object):
... def __init__(self, solarsystem):
... self.solarsystem = solarsystem
...
>>> class SolarSystem(object):
... def __init__(self, planet):
... self.planet = planet
...
>>> class Planet(object):
... def __init__(self, name):
... self.name = name
...
>>> universe = Universe(Galaxy(SolarSystem(Planet('Earth'))))
>>> deepgetattr(universe, 'galaxy.solarsystem.planet.name')
'Earth'
>>> deepgetattr(universe, 'solarsystem.planet.name', default=TypeError)
<class 'TypeError'>
"""
try:
return reduce(getattr, attr.split('.'), obj)
except AttributeError:
if default is not AttributeError:
return default
raise
class AttrDict(dict):
'''
A dictionary with attribute-style access. It maps attribute access to
the real dictionary.
from: http://code.activestate.com/recipes/473786-dictionary-with-attribute-style-access/
In a plain old dict, we can store values against keys like this
>>> d = {}
>>> d['a'] = 10
>>> d['a']
10
However, sometimes it is convenient to interact with a dict as
though it is an object. eg: d.a = 10 and access as d.a, this does
not work in a dict
>>> d = {}
>>> d.a = 10
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'dict' object has no attribute 'a'
This is where you can use an `AttrDict`
>>> d = AttrDict()
>>> d.a = 1
>>> d.a
1
>>> d
AttrDict({'a': 1})
>>> d['b'] = 2
>>> d['b']
2
>>> d.b
2
>>> del d['a']
>>> d
AttrDict({'b': 2})
>>> dd = d.copy()
>>> dd
AttrDict({'b': 2})
'''
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
def __getstate__(self):
return list(self.__dict__.items())
def __setstate__(self, items):
for key, val in items:
self.__dict__[key] = val
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, dict.__repr__(self))
def __setitem__(self, key, value):
return super(AttrDict, self).__setitem__(key, value)
def __getitem__(self, name):
item = super(AttrDict, self).__getitem__(name)
return AttrDict(item) if isinstance(item, dict) else item
def __delitem__(self, name):
return super(AttrDict, self).__delitem__(name)
__getattr__ = __getitem__
__setattr__ = __setitem__
def copy(self):
ch = AttrDict(self)
return ch
class IterAsFile(object):
'''
Wraps an iterator in a file-like API,
i.e. if you have a generator producing a list of strings,
this could make it look like a file.
from: http://stackoverflow.com/a/12593795/332313
>>> def str_fn():
... for c in 'a', 'b', 'c':
... yield c * 3
...
>>> IAF = IterAsFile(str_fn())
>>> IAF.read(6)
'aaabbb'
>>> IAF.read(4)
'ccc'
>>> IAF.read(2)
'''
def __init__(self, it):
self.it = it
self.next_chunk = ''
def _grow_chunk(self):
self.next_chunk = self.next_chunk + next(self.it)
def read(self, n):
if self.next_chunk == None:
return None
try:
while len(self.next_chunk) < n:
self._grow_chunk()
rv = self.next_chunk[:n]
self.next_chunk = self.next_chunk[n:]
return rv
except StopIteration:
rv = self.next_chunk
self.next_chunk = None
return rv
class LineReader(object):
def __init__(self, it, linesep='\n'):
self.parts = []
self.it = it
self.linesep = linesep
def __iter__(self):
for chunk in self.it:
loc = end_loc = 0
while loc <= len(chunk):
end_loc = chunk.find(self.linesep, loc)
if end_loc == -1:
self.parts.append(chunk[loc:])
break
else:
yield ''.join(self.parts) + chunk[loc:end_loc+1]
self.parts = []
loc = end_loc + 1
if self.parts:
yield ''.join(self.parts)
from .priority_dict import PriorityDict
class ExpiringCounter(object):
'''
>>> c = ExpiringCounter(duration=1)
>>> c.put('name')
>>> c.get('name')
1
>>> time.sleep(2)
>>> c.get('name')
0
>>> c.put('name')
>>> c.put('name')
>>> c.get('name')
2
'''
DEFAULT_DURATION = 60 #seconds
def __init__(self, duration=DEFAULT_DURATION):
self.duration = duration
self.latest_ts = int(time.time())
self.counts = PriorityDict()
self.count = 0
self.history = {}
def put(self, key):
self.update()
ts = int(time.time())
hcounts = self.history.get(ts, {})
hcounts[key] = hcounts.get(key, 0) + 1
self.history[ts] = hcounts
self.counts[key] = self.counts.get(key, 0) + 1
self.count += 1
def get(self, key):
self.update()
return self.counts.get(key, 0)
def update(self):
ts = int(time.time() - self.duration)
ts_keys = [x for x in self.history if x < ts]
for ts_key in ts_keys:
hcounts = self.history.pop(ts_key)
for key, count in list(hcounts.items()):
kcount = self.counts[key]
kcount -= count
if kcount <= 0: del self.counts[key]
else: self.counts[key] = kcount
self.count -= count
#TODO Examples and Readme.md
import resource
def set_file_limits(n):
'''
Set the limit on number of file descriptors
that this process can open.
'''
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (n, n))
return True
except ValueError:
return False
class Dummy(object):
'''
Abstraction that creates a dummy object
that does no-operations on method invocations
but logs all interactions
Let us create a dummy object and perform some
random operations on it
>>> d = Dummy(1, a=5)
>>> d.foo() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
>>> d.bar() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
>>> d.foo.bar() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
Now do the same as above but ask Dummy to print the activity
>>> d = Dummy(1, a=5, __quiet__=False) # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (1,), 'kwargs': {'a': 5}, 'prefix': []})
>>> d.foo() # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'foo'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__call__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
<deeputil.misc.Dummy object at ...>
>>> d.bar # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'bar'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['bar']})
<deeputil.misc.Dummy object at ...>
>>> d.foo.bar() # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'foo'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'bar'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
(<deeputil.misc.Dummy object at ...>, '__call__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
<deeputil.misc.Dummy object at ...>
'''
def _log(self, event, data):
if not self._quiet: print((self, event, data))
def __init__(self, *args, **kwargs):
self._prefix = kwargs.pop('__prefix__', [])
self._quiet = kwargs.pop('__quiet__', True)
self._log('__init__', dict(args=args, kwargs=kwargs, prefix=self._prefix))
def __getattr__(self, attr):
if attr == '__wrapped__': raise AttributeError
self._log('__getattr__', dict(attr=attr))
return Dummy(__prefix__=self._prefix + [attr], __quiet__=self._quiet)
def __call__(self, *args, **kwargs):
self._log('__call__', dict(args=args, kwargs=kwargs, prefix=self._prefix))
return Dummy(__prefix__=self._prefix, __quiet__=self._quiet)
def memoize(f):
'''
Caches result of a function
From: https://goo.gl/aXt4Qy
>>> import time
>>> @memoize
... def test(msg):
... # Processing for result that takes time
... time.sleep(1)
... return msg
>>>
>>> for i in range(5):
... start = time.time()
... test('calling memoized function')
... time_taken = time.time() - start
... # For first time it takes usual time
... if i == 0 and time_taken >= 1: print('ok')
... # Faster from the 2nd time
... elif i != 0 and time_taken <= 1: print('ok')
... else: print('NOT ok!')
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'''
class memodict(dict):
@wraps(f)
def __getitem__(self, *args):
return super(memodict, self).__getitem__(*args)
def __missing__(self, key):
self[key] = ret = f(key)
return ret
return memodict().__getitem__
@memoize
def load_object(imp_path):
'''
Given a python import path, load the object
For dynamic imports in a program
>>> isdir = load_object('os.path.isdir')
>>> isdir('/tmp')
True
>>> num = load_object('numbers.Number')
>>> isinstance('x', num)
False
>>> isinstance(777, num)
True
'''
module_name, obj_name = imp_path.split('.', 1)
module = __import__(module_name)
obj = attrgetter(obj_name)(module)
return obj
def grouper(n, iterable):
'''
Iterate over an iterator by chunks
'''
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
|
deep-compute/deeputil | deeputil/misc.py | xcode | python | def xcode(text, encoding='utf8', mode='ignore'):
'''
Converts unicode encoding to str
>>> xcode(b'hello')
b'hello'
>>> xcode('hello')
b'hello'
'''
return text.encode(encoding, mode) if isinstance(text, str) else text | Converts unicode encoding to str
>>> xcode(b'hello')
b'hello'
>>> xcode('hello')
b'hello' | train | https://github.com/deep-compute/deeputil/blob/9af5702bc3fd990688bf2aed16c20fa104be66df/deeputil/misc.py#L93-L102 | null | import time
import datetime
import calendar
import os
import inspect
import random
import string
import itertools
from six import iteritems as items
import sys
from operator import attrgetter
import binascii
from functools import reduce, wraps
def generate_random_string(length=6):
'''
Returns a random string of a specified length.
>>> len(generate_random_string(length=25))
25
Test randomness. Try N times and observe no duplicaton
>>> N = 100
>>> len(set(generate_random_string(10) for i in range(N))) == N
True
'''
n = int(length / 2 + 1)
x = binascii.hexlify(os.urandom(n))
s = x[:length]
return s.decode('utf-8')
def get_timestamp(dt=None):
'''
Return current timestamp if @dt is None
else return timestamp of @dt.
>>> t = datetime.datetime(2015, 0o5, 21)
>>> get_timestamp(t)
1432166400
'''
if dt is None: dt = datetime.datetime.utcnow()
t = dt.utctimetuple()
return calendar.timegm(t)
def get_datetime(epoch):
'''
get datetime from an epoch timestamp
>>> get_datetime(1432188772)
datetime.datetime(2015, 5, 21, 6, 12, 52)
'''
t = time.gmtime(epoch)
dt = datetime.datetime(*t[:6])
return dt
def convert_ts(tt):
'''
tt: time.struct_time(tm_year=2012, tm_mon=10, tm_mday=23, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=1, tm_yday=297, tm_isdst=-1)
>>> tt = time.strptime("23.10.2012", "%d.%m.%Y")
>>> convert_ts(tt)
1350950400
tt: time.struct_time(tm_year=1513, tm_mon=1, tm_mday=1, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=2, tm_yday=1, tm_isdst=0)
>>> tt = time.strptime("1.1.1513", "%d.%m.%Y")
>>> convert_ts(tt)
0
>>> tt = 12
>>> convert_ts(tt)
'''
try:
ts = calendar.timegm(tt)
'''
As from the github issue https://github.com/prashanthellina/rsslurp/issues/680,
there are some cases where we might get timestamp in negative values, so consider
0 if the converted timestamp is negative value.
'''
if ts < 0: ts = 0
except TypeError:
ts = None
return ts
#FIXME No unicode in python 3
# For python 2 & 3 compatiblity
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
def parse_location(loc, default_port):
'''
loc can be of the format http://<ip/domain>[:<port>]
eg:
http://localhost:8888
http://localhost/
return ip (str), port (int)
>>> parse_location('http://localhost/', 6379)
('localhost', 6379)
>>> parse_location('http://localhost:8888', 6379)
('localhost', 8888)
'''
parsed = urlparse(loc)
if ':' in parsed.netloc:
ip, port = parsed.netloc.split(':')
port = int(port)
else:
ip, port = parsed.netloc, default_port
return ip, port
from repoze.lru import ExpiringLRUCache
class ExpiringCache(ExpiringLRUCache):
'''
Return value for key. If not in cache or expired, return default
>>> c = ExpiringCache(10, default_timeout=1)
>>> c.put('a', 100)
>>> c.get('a')
100
>>> time.sleep(1)
>>> c.get('a')
'''
def get(self, key, default=None):
self.lookups += 1
try:
pos, val, expires = self.data[key]
except KeyError:
self.misses += 1
return default
if expires > time.time():
# cache entry still valid
self.hits += 1
# Not updating clock_refs to disable
# LRU logic as we just want expiry without LRU
# self.clock_refs[pos] = True
return val
else:
# cache entry has expired. Make sure the space in the cache can
# be recycled soon.
self.misses += 1
self.clock_refs[pos] = False
return default
def serialize_dict_keys(d, prefix=""):
"""returns all the keys in nested a dictionary.
>>> sorted(serialize_dict_keys({"a": {"b": {"c": 1, "b": 2} } }))
['a', 'a.b', 'a.b.b', 'a.b.c']
"""
keys = []
for k, v in d.items():
fqk = '{}{}'.format(prefix, k)
keys.append(fqk)
if isinstance(v, dict):
keys.extend(serialize_dict_keys(v, prefix="{}.".format(fqk)))
return keys
import collections
class MarkValue(str):
pass
def flatten_dict(d,
parent_key='', sep='.',
ignore_under_prefixed=True,
mark_value=True):
'''
Flattens a nested dictionary
>>> from pprint import pprint
>>> d = {"a": {"b": {"c": 1, "b": 2, "__d": 'ignore', "_e": "mark"} } }
>>> fd = flatten_dict(d)
>>> pprint(fd)
{'a.b._e': "'mark'", 'a.b.b': 2, 'a.b.c': 1}
'''
items = {}
for k in d:
if ignore_under_prefixed and k.startswith('__'): continue
v = d[k]
if mark_value and k.startswith('_') and not k.startswith('__'):
v = MarkValue(repr(v))
new_key = sep.join((parent_key, k)) if parent_key else k
if isinstance(v, collections.MutableMapping):
items.update(flatten_dict(v, new_key, sep=sep,
ignore_under_prefixed=True,
mark_value=True)
)
else:
items[new_key] = v
return items
def deepgetattr(obj, attr, default=AttributeError):
"""
Recurses through an attribute chain to get the ultimate value (obj/data/member/value)
from: http://pingfive.typepad.com/blog/2010/04/deep-getattr-python-function.html
>>> class Universe(object):
... def __init__(self, galaxy):
... self.galaxy = galaxy
...
>>> class Galaxy(object):
... def __init__(self, solarsystem):
... self.solarsystem = solarsystem
...
>>> class SolarSystem(object):
... def __init__(self, planet):
... self.planet = planet
...
>>> class Planet(object):
... def __init__(self, name):
... self.name = name
...
>>> universe = Universe(Galaxy(SolarSystem(Planet('Earth'))))
>>> deepgetattr(universe, 'galaxy.solarsystem.planet.name')
'Earth'
>>> deepgetattr(universe, 'solarsystem.planet.name', default=TypeError)
<class 'TypeError'>
"""
try:
return reduce(getattr, attr.split('.'), obj)
except AttributeError:
if default is not AttributeError:
return default
raise
class AttrDict(dict):
'''
A dictionary with attribute-style access. It maps attribute access to
the real dictionary.
from: http://code.activestate.com/recipes/473786-dictionary-with-attribute-style-access/
In a plain old dict, we can store values against keys like this
>>> d = {}
>>> d['a'] = 10
>>> d['a']
10
However, sometimes it is convenient to interact with a dict as
though it is an object. eg: d.a = 10 and access as d.a, this does
not work in a dict
>>> d = {}
>>> d.a = 10
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'dict' object has no attribute 'a'
This is where you can use an `AttrDict`
>>> d = AttrDict()
>>> d.a = 1
>>> d.a
1
>>> d
AttrDict({'a': 1})
>>> d['b'] = 2
>>> d['b']
2
>>> d.b
2
>>> del d['a']
>>> d
AttrDict({'b': 2})
>>> dd = d.copy()
>>> dd
AttrDict({'b': 2})
'''
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
def __getstate__(self):
return list(self.__dict__.items())
def __setstate__(self, items):
for key, val in items:
self.__dict__[key] = val
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, dict.__repr__(self))
def __setitem__(self, key, value):
return super(AttrDict, self).__setitem__(key, value)
def __getitem__(self, name):
item = super(AttrDict, self).__getitem__(name)
return AttrDict(item) if isinstance(item, dict) else item
def __delitem__(self, name):
return super(AttrDict, self).__delitem__(name)
__getattr__ = __getitem__
__setattr__ = __setitem__
def copy(self):
ch = AttrDict(self)
return ch
class IterAsFile(object):
'''
Wraps an iterator in a file-like API,
i.e. if you have a generator producing a list of strings,
this could make it look like a file.
from: http://stackoverflow.com/a/12593795/332313
>>> def str_fn():
... for c in 'a', 'b', 'c':
... yield c * 3
...
>>> IAF = IterAsFile(str_fn())
>>> IAF.read(6)
'aaabbb'
>>> IAF.read(4)
'ccc'
>>> IAF.read(2)
'''
def __init__(self, it):
self.it = it
self.next_chunk = ''
def _grow_chunk(self):
self.next_chunk = self.next_chunk + next(self.it)
def read(self, n):
if self.next_chunk == None:
return None
try:
while len(self.next_chunk) < n:
self._grow_chunk()
rv = self.next_chunk[:n]
self.next_chunk = self.next_chunk[n:]
return rv
except StopIteration:
rv = self.next_chunk
self.next_chunk = None
return rv
class LineReader(object):
def __init__(self, it, linesep='\n'):
self.parts = []
self.it = it
self.linesep = linesep
def __iter__(self):
for chunk in self.it:
loc = end_loc = 0
while loc <= len(chunk):
end_loc = chunk.find(self.linesep, loc)
if end_loc == -1:
self.parts.append(chunk[loc:])
break
else:
yield ''.join(self.parts) + chunk[loc:end_loc+1]
self.parts = []
loc = end_loc + 1
if self.parts:
yield ''.join(self.parts)
from .priority_dict import PriorityDict
class ExpiringCounter(object):
'''
>>> c = ExpiringCounter(duration=1)
>>> c.put('name')
>>> c.get('name')
1
>>> time.sleep(2)
>>> c.get('name')
0
>>> c.put('name')
>>> c.put('name')
>>> c.get('name')
2
'''
DEFAULT_DURATION = 60 #seconds
def __init__(self, duration=DEFAULT_DURATION):
self.duration = duration
self.latest_ts = int(time.time())
self.counts = PriorityDict()
self.count = 0
self.history = {}
def put(self, key):
self.update()
ts = int(time.time())
hcounts = self.history.get(ts, {})
hcounts[key] = hcounts.get(key, 0) + 1
self.history[ts] = hcounts
self.counts[key] = self.counts.get(key, 0) + 1
self.count += 1
def get(self, key):
self.update()
return self.counts.get(key, 0)
def update(self):
ts = int(time.time() - self.duration)
ts_keys = [x for x in self.history if x < ts]
for ts_key in ts_keys:
hcounts = self.history.pop(ts_key)
for key, count in list(hcounts.items()):
kcount = self.counts[key]
kcount -= count
if kcount <= 0: del self.counts[key]
else: self.counts[key] = kcount
self.count -= count
#TODO Examples and Readme.md
import resource
def set_file_limits(n):
'''
Set the limit on number of file descriptors
that this process can open.
'''
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (n, n))
return True
except ValueError:
return False
class Dummy(object):
'''
Abstraction that creates a dummy object
that does no-operations on method invocations
but logs all interactions
Let us create a dummy object and perform some
random operations on it
>>> d = Dummy(1, a=5)
>>> d.foo() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
>>> d.bar() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
>>> d.foo.bar() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
Now do the same as above but ask Dummy to print the activity
>>> d = Dummy(1, a=5, __quiet__=False) # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (1,), 'kwargs': {'a': 5}, 'prefix': []})
>>> d.foo() # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'foo'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__call__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
<deeputil.misc.Dummy object at ...>
>>> d.bar # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'bar'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['bar']})
<deeputil.misc.Dummy object at ...>
>>> d.foo.bar() # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'foo'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'bar'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
(<deeputil.misc.Dummy object at ...>, '__call__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
<deeputil.misc.Dummy object at ...>
'''
def _log(self, event, data):
if not self._quiet: print((self, event, data))
def __init__(self, *args, **kwargs):
self._prefix = kwargs.pop('__prefix__', [])
self._quiet = kwargs.pop('__quiet__', True)
self._log('__init__', dict(args=args, kwargs=kwargs, prefix=self._prefix))
def __getattr__(self, attr):
if attr == '__wrapped__': raise AttributeError
self._log('__getattr__', dict(attr=attr))
return Dummy(__prefix__=self._prefix + [attr], __quiet__=self._quiet)
def __call__(self, *args, **kwargs):
self._log('__call__', dict(args=args, kwargs=kwargs, prefix=self._prefix))
return Dummy(__prefix__=self._prefix, __quiet__=self._quiet)
def memoize(f):
'''
Caches result of a function
From: https://goo.gl/aXt4Qy
>>> import time
>>> @memoize
... def test(msg):
... # Processing for result that takes time
... time.sleep(1)
... return msg
>>>
>>> for i in range(5):
... start = time.time()
... test('calling memoized function')
... time_taken = time.time() - start
... # For first time it takes usual time
... if i == 0 and time_taken >= 1: print('ok')
... # Faster from the 2nd time
... elif i != 0 and time_taken <= 1: print('ok')
... else: print('NOT ok!')
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'''
class memodict(dict):
@wraps(f)
def __getitem__(self, *args):
return super(memodict, self).__getitem__(*args)
def __missing__(self, key):
self[key] = ret = f(key)
return ret
return memodict().__getitem__
@memoize
def load_object(imp_path):
'''
Given a python import path, load the object
For dynamic imports in a program
>>> isdir = load_object('os.path.isdir')
>>> isdir('/tmp')
True
>>> num = load_object('numbers.Number')
>>> isinstance('x', num)
False
>>> isinstance(777, num)
True
'''
module_name, obj_name = imp_path.split('.', 1)
module = __import__(module_name)
obj = attrgetter(obj_name)(module)
return obj
def grouper(n, iterable):
'''
Iterate over an iterator by chunks
'''
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
|
deep-compute/deeputil | deeputil/misc.py | parse_location | python | def parse_location(loc, default_port):
'''
loc can be of the format http://<ip/domain>[:<port>]
eg:
http://localhost:8888
http://localhost/
return ip (str), port (int)
>>> parse_location('http://localhost/', 6379)
('localhost', 6379)
>>> parse_location('http://localhost:8888', 6379)
('localhost', 8888)
'''
parsed = urlparse(loc)
if ':' in parsed.netloc:
ip, port = parsed.netloc.split(':')
port = int(port)
else:
ip, port = parsed.netloc, default_port
return ip, port | loc can be of the format http://<ip/domain>[:<port>]
eg:
http://localhost:8888
http://localhost/
return ip (str), port (int)
>>> parse_location('http://localhost/', 6379)
('localhost', 6379)
>>> parse_location('http://localhost:8888', 6379)
('localhost', 8888) | train | https://github.com/deep-compute/deeputil/blob/9af5702bc3fd990688bf2aed16c20fa104be66df/deeputil/misc.py#L111-L132 | null | import time
import datetime
import calendar
import os
import inspect
import random
import string
import itertools
from six import iteritems as items
import sys
from operator import attrgetter
import binascii
from functools import reduce, wraps
def generate_random_string(length=6):
'''
Returns a random string of a specified length.
>>> len(generate_random_string(length=25))
25
Test randomness. Try N times and observe no duplicaton
>>> N = 100
>>> len(set(generate_random_string(10) for i in range(N))) == N
True
'''
n = int(length / 2 + 1)
x = binascii.hexlify(os.urandom(n))
s = x[:length]
return s.decode('utf-8')
def get_timestamp(dt=None):
'''
Return current timestamp if @dt is None
else return timestamp of @dt.
>>> t = datetime.datetime(2015, 0o5, 21)
>>> get_timestamp(t)
1432166400
'''
if dt is None: dt = datetime.datetime.utcnow()
t = dt.utctimetuple()
return calendar.timegm(t)
def get_datetime(epoch):
'''
get datetime from an epoch timestamp
>>> get_datetime(1432188772)
datetime.datetime(2015, 5, 21, 6, 12, 52)
'''
t = time.gmtime(epoch)
dt = datetime.datetime(*t[:6])
return dt
def convert_ts(tt):
'''
tt: time.struct_time(tm_year=2012, tm_mon=10, tm_mday=23, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=1, tm_yday=297, tm_isdst=-1)
>>> tt = time.strptime("23.10.2012", "%d.%m.%Y")
>>> convert_ts(tt)
1350950400
tt: time.struct_time(tm_year=1513, tm_mon=1, tm_mday=1, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=2, tm_yday=1, tm_isdst=0)
>>> tt = time.strptime("1.1.1513", "%d.%m.%Y")
>>> convert_ts(tt)
0
>>> tt = 12
>>> convert_ts(tt)
'''
try:
ts = calendar.timegm(tt)
'''
As from the github issue https://github.com/prashanthellina/rsslurp/issues/680,
there are some cases where we might get timestamp in negative values, so consider
0 if the converted timestamp is negative value.
'''
if ts < 0: ts = 0
except TypeError:
ts = None
return ts
#FIXME No unicode in python 3
def xcode(text, encoding='utf8', mode='ignore'):
'''
Converts unicode encoding to str
>>> xcode(b'hello')
b'hello'
>>> xcode('hello')
b'hello'
'''
return text.encode(encoding, mode) if isinstance(text, str) else text
# For python 2 & 3 compatiblity
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from repoze.lru import ExpiringLRUCache
class ExpiringCache(ExpiringLRUCache):
'''
Return value for key. If not in cache or expired, return default
>>> c = ExpiringCache(10, default_timeout=1)
>>> c.put('a', 100)
>>> c.get('a')
100
>>> time.sleep(1)
>>> c.get('a')
'''
def get(self, key, default=None):
self.lookups += 1
try:
pos, val, expires = self.data[key]
except KeyError:
self.misses += 1
return default
if expires > time.time():
# cache entry still valid
self.hits += 1
# Not updating clock_refs to disable
# LRU logic as we just want expiry without LRU
# self.clock_refs[pos] = True
return val
else:
# cache entry has expired. Make sure the space in the cache can
# be recycled soon.
self.misses += 1
self.clock_refs[pos] = False
return default
def serialize_dict_keys(d, prefix=""):
"""returns all the keys in nested a dictionary.
>>> sorted(serialize_dict_keys({"a": {"b": {"c": 1, "b": 2} } }))
['a', 'a.b', 'a.b.b', 'a.b.c']
"""
keys = []
for k, v in d.items():
fqk = '{}{}'.format(prefix, k)
keys.append(fqk)
if isinstance(v, dict):
keys.extend(serialize_dict_keys(v, prefix="{}.".format(fqk)))
return keys
import collections
class MarkValue(str):
pass
def flatten_dict(d,
parent_key='', sep='.',
ignore_under_prefixed=True,
mark_value=True):
'''
Flattens a nested dictionary
>>> from pprint import pprint
>>> d = {"a": {"b": {"c": 1, "b": 2, "__d": 'ignore', "_e": "mark"} } }
>>> fd = flatten_dict(d)
>>> pprint(fd)
{'a.b._e': "'mark'", 'a.b.b': 2, 'a.b.c': 1}
'''
items = {}
for k in d:
if ignore_under_prefixed and k.startswith('__'): continue
v = d[k]
if mark_value and k.startswith('_') and not k.startswith('__'):
v = MarkValue(repr(v))
new_key = sep.join((parent_key, k)) if parent_key else k
if isinstance(v, collections.MutableMapping):
items.update(flatten_dict(v, new_key, sep=sep,
ignore_under_prefixed=True,
mark_value=True)
)
else:
items[new_key] = v
return items
def deepgetattr(obj, attr, default=AttributeError):
"""
Recurses through an attribute chain to get the ultimate value (obj/data/member/value)
from: http://pingfive.typepad.com/blog/2010/04/deep-getattr-python-function.html
>>> class Universe(object):
... def __init__(self, galaxy):
... self.galaxy = galaxy
...
>>> class Galaxy(object):
... def __init__(self, solarsystem):
... self.solarsystem = solarsystem
...
>>> class SolarSystem(object):
... def __init__(self, planet):
... self.planet = planet
...
>>> class Planet(object):
... def __init__(self, name):
... self.name = name
...
>>> universe = Universe(Galaxy(SolarSystem(Planet('Earth'))))
>>> deepgetattr(universe, 'galaxy.solarsystem.planet.name')
'Earth'
>>> deepgetattr(universe, 'solarsystem.planet.name', default=TypeError)
<class 'TypeError'>
"""
try:
return reduce(getattr, attr.split('.'), obj)
except AttributeError:
if default is not AttributeError:
return default
raise
class AttrDict(dict):
'''
A dictionary with attribute-style access. It maps attribute access to
the real dictionary.
from: http://code.activestate.com/recipes/473786-dictionary-with-attribute-style-access/
In a plain old dict, we can store values against keys like this
>>> d = {}
>>> d['a'] = 10
>>> d['a']
10
However, sometimes it is convenient to interact with a dict as
though it is an object. eg: d.a = 10 and access as d.a, this does
not work in a dict
>>> d = {}
>>> d.a = 10
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'dict' object has no attribute 'a'
This is where you can use an `AttrDict`
>>> d = AttrDict()
>>> d.a = 1
>>> d.a
1
>>> d
AttrDict({'a': 1})
>>> d['b'] = 2
>>> d['b']
2
>>> d.b
2
>>> del d['a']
>>> d
AttrDict({'b': 2})
>>> dd = d.copy()
>>> dd
AttrDict({'b': 2})
'''
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
def __getstate__(self):
return list(self.__dict__.items())
def __setstate__(self, items):
for key, val in items:
self.__dict__[key] = val
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, dict.__repr__(self))
def __setitem__(self, key, value):
return super(AttrDict, self).__setitem__(key, value)
def __getitem__(self, name):
item = super(AttrDict, self).__getitem__(name)
return AttrDict(item) if isinstance(item, dict) else item
def __delitem__(self, name):
return super(AttrDict, self).__delitem__(name)
__getattr__ = __getitem__
__setattr__ = __setitem__
def copy(self):
ch = AttrDict(self)
return ch
class IterAsFile(object):
'''
Wraps an iterator in a file-like API,
i.e. if you have a generator producing a list of strings,
this could make it look like a file.
from: http://stackoverflow.com/a/12593795/332313
>>> def str_fn():
... for c in 'a', 'b', 'c':
... yield c * 3
...
>>> IAF = IterAsFile(str_fn())
>>> IAF.read(6)
'aaabbb'
>>> IAF.read(4)
'ccc'
>>> IAF.read(2)
'''
def __init__(self, it):
self.it = it
self.next_chunk = ''
def _grow_chunk(self):
self.next_chunk = self.next_chunk + next(self.it)
def read(self, n):
if self.next_chunk == None:
return None
try:
while len(self.next_chunk) < n:
self._grow_chunk()
rv = self.next_chunk[:n]
self.next_chunk = self.next_chunk[n:]
return rv
except StopIteration:
rv = self.next_chunk
self.next_chunk = None
return rv
class LineReader(object):
def __init__(self, it, linesep='\n'):
self.parts = []
self.it = it
self.linesep = linesep
def __iter__(self):
for chunk in self.it:
loc = end_loc = 0
while loc <= len(chunk):
end_loc = chunk.find(self.linesep, loc)
if end_loc == -1:
self.parts.append(chunk[loc:])
break
else:
yield ''.join(self.parts) + chunk[loc:end_loc+1]
self.parts = []
loc = end_loc + 1
if self.parts:
yield ''.join(self.parts)
from .priority_dict import PriorityDict
class ExpiringCounter(object):
'''
>>> c = ExpiringCounter(duration=1)
>>> c.put('name')
>>> c.get('name')
1
>>> time.sleep(2)
>>> c.get('name')
0
>>> c.put('name')
>>> c.put('name')
>>> c.get('name')
2
'''
DEFAULT_DURATION = 60 #seconds
def __init__(self, duration=DEFAULT_DURATION):
self.duration = duration
self.latest_ts = int(time.time())
self.counts = PriorityDict()
self.count = 0
self.history = {}
def put(self, key):
self.update()
ts = int(time.time())
hcounts = self.history.get(ts, {})
hcounts[key] = hcounts.get(key, 0) + 1
self.history[ts] = hcounts
self.counts[key] = self.counts.get(key, 0) + 1
self.count += 1
def get(self, key):
self.update()
return self.counts.get(key, 0)
def update(self):
ts = int(time.time() - self.duration)
ts_keys = [x for x in self.history if x < ts]
for ts_key in ts_keys:
hcounts = self.history.pop(ts_key)
for key, count in list(hcounts.items()):
kcount = self.counts[key]
kcount -= count
if kcount <= 0: del self.counts[key]
else: self.counts[key] = kcount
self.count -= count
#TODO Examples and Readme.md
import resource
def set_file_limits(n):
'''
Set the limit on number of file descriptors
that this process can open.
'''
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (n, n))
return True
except ValueError:
return False
class Dummy(object):
'''
Abstraction that creates a dummy object
that does no-operations on method invocations
but logs all interactions
Let us create a dummy object and perform some
random operations on it
>>> d = Dummy(1, a=5)
>>> d.foo() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
>>> d.bar() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
>>> d.foo.bar() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
Now do the same as above but ask Dummy to print the activity
>>> d = Dummy(1, a=5, __quiet__=False) # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (1,), 'kwargs': {'a': 5}, 'prefix': []})
>>> d.foo() # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'foo'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__call__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
<deeputil.misc.Dummy object at ...>
>>> d.bar # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'bar'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['bar']})
<deeputil.misc.Dummy object at ...>
>>> d.foo.bar() # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'foo'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'bar'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
(<deeputil.misc.Dummy object at ...>, '__call__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
<deeputil.misc.Dummy object at ...>
'''
def _log(self, event, data):
if not self._quiet: print((self, event, data))
def __init__(self, *args, **kwargs):
self._prefix = kwargs.pop('__prefix__', [])
self._quiet = kwargs.pop('__quiet__', True)
self._log('__init__', dict(args=args, kwargs=kwargs, prefix=self._prefix))
def __getattr__(self, attr):
if attr == '__wrapped__': raise AttributeError
self._log('__getattr__', dict(attr=attr))
return Dummy(__prefix__=self._prefix + [attr], __quiet__=self._quiet)
def __call__(self, *args, **kwargs):
self._log('__call__', dict(args=args, kwargs=kwargs, prefix=self._prefix))
return Dummy(__prefix__=self._prefix, __quiet__=self._quiet)
def memoize(f):
'''
Caches result of a function
From: https://goo.gl/aXt4Qy
>>> import time
>>> @memoize
... def test(msg):
... # Processing for result that takes time
... time.sleep(1)
... return msg
>>>
>>> for i in range(5):
... start = time.time()
... test('calling memoized function')
... time_taken = time.time() - start
... # For first time it takes usual time
... if i == 0 and time_taken >= 1: print('ok')
... # Faster from the 2nd time
... elif i != 0 and time_taken <= 1: print('ok')
... else: print('NOT ok!')
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'''
class memodict(dict):
@wraps(f)
def __getitem__(self, *args):
return super(memodict, self).__getitem__(*args)
def __missing__(self, key):
self[key] = ret = f(key)
return ret
return memodict().__getitem__
@memoize
def load_object(imp_path):
'''
Given a python import path, load the object
For dynamic imports in a program
>>> isdir = load_object('os.path.isdir')
>>> isdir('/tmp')
True
>>> num = load_object('numbers.Number')
>>> isinstance('x', num)
False
>>> isinstance(777, num)
True
'''
module_name, obj_name = imp_path.split('.', 1)
module = __import__(module_name)
obj = attrgetter(obj_name)(module)
return obj
def grouper(n, iterable):
'''
Iterate over an iterator by chunks
'''
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
|
deep-compute/deeputil | deeputil/misc.py | serialize_dict_keys | python | def serialize_dict_keys(d, prefix=""):
keys = []
for k, v in d.items():
fqk = '{}{}'.format(prefix, k)
keys.append(fqk)
if isinstance(v, dict):
keys.extend(serialize_dict_keys(v, prefix="{}.".format(fqk)))
return keys | returns all the keys in nested a dictionary.
>>> sorted(serialize_dict_keys({"a": {"b": {"c": 1, "b": 2} } }))
['a', 'a.b', 'a.b.b', 'a.b.c'] | train | https://github.com/deep-compute/deeputil/blob/9af5702bc3fd990688bf2aed16c20fa104be66df/deeputil/misc.py#L172-L184 | [
"def serialize_dict_keys(d, prefix=\"\"):\n \"\"\"returns all the keys in nested a dictionary.\n >>> sorted(serialize_dict_keys({\"a\": {\"b\": {\"c\": 1, \"b\": 2} } }))\n ['a', 'a.b', 'a.b.b', 'a.b.c']\n \"\"\"\n keys = []\n for k, v in d.items():\n fqk = '{}{}'.format(prefix, k)\n keys.append(fqk)\n if isinstance(v, dict):\n keys.extend(serialize_dict_keys(v, prefix=\"{}.\".format(fqk)))\n\n return keys\n"
] | import time
import datetime
import calendar
import os
import inspect
import random
import string
import itertools
from six import iteritems as items
import sys
from operator import attrgetter
import binascii
from functools import reduce, wraps
def generate_random_string(length=6):
'''
Returns a random string of a specified length.
>>> len(generate_random_string(length=25))
25
Test randomness. Try N times and observe no duplicaton
>>> N = 100
>>> len(set(generate_random_string(10) for i in range(N))) == N
True
'''
n = int(length / 2 + 1)
x = binascii.hexlify(os.urandom(n))
s = x[:length]
return s.decode('utf-8')
def get_timestamp(dt=None):
'''
Return current timestamp if @dt is None
else return timestamp of @dt.
>>> t = datetime.datetime(2015, 0o5, 21)
>>> get_timestamp(t)
1432166400
'''
if dt is None: dt = datetime.datetime.utcnow()
t = dt.utctimetuple()
return calendar.timegm(t)
def get_datetime(epoch):
'''
get datetime from an epoch timestamp
>>> get_datetime(1432188772)
datetime.datetime(2015, 5, 21, 6, 12, 52)
'''
t = time.gmtime(epoch)
dt = datetime.datetime(*t[:6])
return dt
def convert_ts(tt):
'''
tt: time.struct_time(tm_year=2012, tm_mon=10, tm_mday=23, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=1, tm_yday=297, tm_isdst=-1)
>>> tt = time.strptime("23.10.2012", "%d.%m.%Y")
>>> convert_ts(tt)
1350950400
tt: time.struct_time(tm_year=1513, tm_mon=1, tm_mday=1, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=2, tm_yday=1, tm_isdst=0)
>>> tt = time.strptime("1.1.1513", "%d.%m.%Y")
>>> convert_ts(tt)
0
>>> tt = 12
>>> convert_ts(tt)
'''
try:
ts = calendar.timegm(tt)
'''
As from the github issue https://github.com/prashanthellina/rsslurp/issues/680,
there are some cases where we might get timestamp in negative values, so consider
0 if the converted timestamp is negative value.
'''
if ts < 0: ts = 0
except TypeError:
ts = None
return ts
#FIXME No unicode in python 3
def xcode(text, encoding='utf8', mode='ignore'):
'''
Converts unicode encoding to str
>>> xcode(b'hello')
b'hello'
>>> xcode('hello')
b'hello'
'''
return text.encode(encoding, mode) if isinstance(text, str) else text
# For python 2 & 3 compatiblity
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
def parse_location(loc, default_port):
'''
loc can be of the format http://<ip/domain>[:<port>]
eg:
http://localhost:8888
http://localhost/
return ip (str), port (int)
>>> parse_location('http://localhost/', 6379)
('localhost', 6379)
>>> parse_location('http://localhost:8888', 6379)
('localhost', 8888)
'''
parsed = urlparse(loc)
if ':' in parsed.netloc:
ip, port = parsed.netloc.split(':')
port = int(port)
else:
ip, port = parsed.netloc, default_port
return ip, port
from repoze.lru import ExpiringLRUCache
class ExpiringCache(ExpiringLRUCache):
'''
Return value for key. If not in cache or expired, return default
>>> c = ExpiringCache(10, default_timeout=1)
>>> c.put('a', 100)
>>> c.get('a')
100
>>> time.sleep(1)
>>> c.get('a')
'''
def get(self, key, default=None):
self.lookups += 1
try:
pos, val, expires = self.data[key]
except KeyError:
self.misses += 1
return default
if expires > time.time():
# cache entry still valid
self.hits += 1
# Not updating clock_refs to disable
# LRU logic as we just want expiry without LRU
# self.clock_refs[pos] = True
return val
else:
# cache entry has expired. Make sure the space in the cache can
# be recycled soon.
self.misses += 1
self.clock_refs[pos] = False
return default
import collections
class MarkValue(str):
pass
def flatten_dict(d,
parent_key='', sep='.',
ignore_under_prefixed=True,
mark_value=True):
'''
Flattens a nested dictionary
>>> from pprint import pprint
>>> d = {"a": {"b": {"c": 1, "b": 2, "__d": 'ignore', "_e": "mark"} } }
>>> fd = flatten_dict(d)
>>> pprint(fd)
{'a.b._e': "'mark'", 'a.b.b': 2, 'a.b.c': 1}
'''
items = {}
for k in d:
if ignore_under_prefixed and k.startswith('__'): continue
v = d[k]
if mark_value and k.startswith('_') and not k.startswith('__'):
v = MarkValue(repr(v))
new_key = sep.join((parent_key, k)) if parent_key else k
if isinstance(v, collections.MutableMapping):
items.update(flatten_dict(v, new_key, sep=sep,
ignore_under_prefixed=True,
mark_value=True)
)
else:
items[new_key] = v
return items
def deepgetattr(obj, attr, default=AttributeError):
"""
Recurses through an attribute chain to get the ultimate value (obj/data/member/value)
from: http://pingfive.typepad.com/blog/2010/04/deep-getattr-python-function.html
>>> class Universe(object):
... def __init__(self, galaxy):
... self.galaxy = galaxy
...
>>> class Galaxy(object):
... def __init__(self, solarsystem):
... self.solarsystem = solarsystem
...
>>> class SolarSystem(object):
... def __init__(self, planet):
... self.planet = planet
...
>>> class Planet(object):
... def __init__(self, name):
... self.name = name
...
>>> universe = Universe(Galaxy(SolarSystem(Planet('Earth'))))
>>> deepgetattr(universe, 'galaxy.solarsystem.planet.name')
'Earth'
>>> deepgetattr(universe, 'solarsystem.planet.name', default=TypeError)
<class 'TypeError'>
"""
try:
return reduce(getattr, attr.split('.'), obj)
except AttributeError:
if default is not AttributeError:
return default
raise
class AttrDict(dict):
'''
A dictionary with attribute-style access. It maps attribute access to
the real dictionary.
from: http://code.activestate.com/recipes/473786-dictionary-with-attribute-style-access/
In a plain old dict, we can store values against keys like this
>>> d = {}
>>> d['a'] = 10
>>> d['a']
10
However, sometimes it is convenient to interact with a dict as
though it is an object. eg: d.a = 10 and access as d.a, this does
not work in a dict
>>> d = {}
>>> d.a = 10
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'dict' object has no attribute 'a'
This is where you can use an `AttrDict`
>>> d = AttrDict()
>>> d.a = 1
>>> d.a
1
>>> d
AttrDict({'a': 1})
>>> d['b'] = 2
>>> d['b']
2
>>> d.b
2
>>> del d['a']
>>> d
AttrDict({'b': 2})
>>> dd = d.copy()
>>> dd
AttrDict({'b': 2})
'''
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
def __getstate__(self):
return list(self.__dict__.items())
def __setstate__(self, items):
for key, val in items:
self.__dict__[key] = val
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, dict.__repr__(self))
def __setitem__(self, key, value):
return super(AttrDict, self).__setitem__(key, value)
def __getitem__(self, name):
item = super(AttrDict, self).__getitem__(name)
return AttrDict(item) if isinstance(item, dict) else item
def __delitem__(self, name):
return super(AttrDict, self).__delitem__(name)
__getattr__ = __getitem__
__setattr__ = __setitem__
def copy(self):
ch = AttrDict(self)
return ch
class IterAsFile(object):
'''
Wraps an iterator in a file-like API,
i.e. if you have a generator producing a list of strings,
this could make it look like a file.
from: http://stackoverflow.com/a/12593795/332313
>>> def str_fn():
... for c in 'a', 'b', 'c':
... yield c * 3
...
>>> IAF = IterAsFile(str_fn())
>>> IAF.read(6)
'aaabbb'
>>> IAF.read(4)
'ccc'
>>> IAF.read(2)
'''
def __init__(self, it):
self.it = it
self.next_chunk = ''
def _grow_chunk(self):
self.next_chunk = self.next_chunk + next(self.it)
def read(self, n):
if self.next_chunk == None:
return None
try:
while len(self.next_chunk) < n:
self._grow_chunk()
rv = self.next_chunk[:n]
self.next_chunk = self.next_chunk[n:]
return rv
except StopIteration:
rv = self.next_chunk
self.next_chunk = None
return rv
class LineReader(object):
def __init__(self, it, linesep='\n'):
self.parts = []
self.it = it
self.linesep = linesep
def __iter__(self):
for chunk in self.it:
loc = end_loc = 0
while loc <= len(chunk):
end_loc = chunk.find(self.linesep, loc)
if end_loc == -1:
self.parts.append(chunk[loc:])
break
else:
yield ''.join(self.parts) + chunk[loc:end_loc+1]
self.parts = []
loc = end_loc + 1
if self.parts:
yield ''.join(self.parts)
from .priority_dict import PriorityDict
class ExpiringCounter(object):
'''
>>> c = ExpiringCounter(duration=1)
>>> c.put('name')
>>> c.get('name')
1
>>> time.sleep(2)
>>> c.get('name')
0
>>> c.put('name')
>>> c.put('name')
>>> c.get('name')
2
'''
DEFAULT_DURATION = 60 #seconds
def __init__(self, duration=DEFAULT_DURATION):
self.duration = duration
self.latest_ts = int(time.time())
self.counts = PriorityDict()
self.count = 0
self.history = {}
def put(self, key):
self.update()
ts = int(time.time())
hcounts = self.history.get(ts, {})
hcounts[key] = hcounts.get(key, 0) + 1
self.history[ts] = hcounts
self.counts[key] = self.counts.get(key, 0) + 1
self.count += 1
def get(self, key):
self.update()
return self.counts.get(key, 0)
def update(self):
ts = int(time.time() - self.duration)
ts_keys = [x for x in self.history if x < ts]
for ts_key in ts_keys:
hcounts = self.history.pop(ts_key)
for key, count in list(hcounts.items()):
kcount = self.counts[key]
kcount -= count
if kcount <= 0: del self.counts[key]
else: self.counts[key] = kcount
self.count -= count
#TODO Examples and Readme.md
import resource
def set_file_limits(n):
'''
Set the limit on number of file descriptors
that this process can open.
'''
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (n, n))
return True
except ValueError:
return False
class Dummy(object):
'''
Abstraction that creates a dummy object
that does no-operations on method invocations
but logs all interactions
Let us create a dummy object and perform some
random operations on it
>>> d = Dummy(1, a=5)
>>> d.foo() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
>>> d.bar() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
>>> d.foo.bar() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
Now do the same as above but ask Dummy to print the activity
>>> d = Dummy(1, a=5, __quiet__=False) # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (1,), 'kwargs': {'a': 5}, 'prefix': []})
>>> d.foo() # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'foo'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__call__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
<deeputil.misc.Dummy object at ...>
>>> d.bar # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'bar'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['bar']})
<deeputil.misc.Dummy object at ...>
>>> d.foo.bar() # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'foo'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'bar'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
(<deeputil.misc.Dummy object at ...>, '__call__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
<deeputil.misc.Dummy object at ...>
'''
def _log(self, event, data):
if not self._quiet: print((self, event, data))
def __init__(self, *args, **kwargs):
self._prefix = kwargs.pop('__prefix__', [])
self._quiet = kwargs.pop('__quiet__', True)
self._log('__init__', dict(args=args, kwargs=kwargs, prefix=self._prefix))
def __getattr__(self, attr):
if attr == '__wrapped__': raise AttributeError
self._log('__getattr__', dict(attr=attr))
return Dummy(__prefix__=self._prefix + [attr], __quiet__=self._quiet)
def __call__(self, *args, **kwargs):
self._log('__call__', dict(args=args, kwargs=kwargs, prefix=self._prefix))
return Dummy(__prefix__=self._prefix, __quiet__=self._quiet)
def memoize(f):
'''
Caches result of a function
From: https://goo.gl/aXt4Qy
>>> import time
>>> @memoize
... def test(msg):
... # Processing for result that takes time
... time.sleep(1)
... return msg
>>>
>>> for i in range(5):
... start = time.time()
... test('calling memoized function')
... time_taken = time.time() - start
... # For first time it takes usual time
... if i == 0 and time_taken >= 1: print('ok')
... # Faster from the 2nd time
... elif i != 0 and time_taken <= 1: print('ok')
... else: print('NOT ok!')
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'''
class memodict(dict):
@wraps(f)
def __getitem__(self, *args):
return super(memodict, self).__getitem__(*args)
def __missing__(self, key):
self[key] = ret = f(key)
return ret
return memodict().__getitem__
@memoize
def load_object(imp_path):
'''
Given a python import path, load the object
For dynamic imports in a program
>>> isdir = load_object('os.path.isdir')
>>> isdir('/tmp')
True
>>> num = load_object('numbers.Number')
>>> isinstance('x', num)
False
>>> isinstance(777, num)
True
'''
module_name, obj_name = imp_path.split('.', 1)
module = __import__(module_name)
obj = attrgetter(obj_name)(module)
return obj
def grouper(n, iterable):
'''
Iterate over an iterator by chunks
'''
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
|
deep-compute/deeputil | deeputil/misc.py | flatten_dict | python | def flatten_dict(d,
parent_key='', sep='.',
ignore_under_prefixed=True,
mark_value=True):
'''
Flattens a nested dictionary
>>> from pprint import pprint
>>> d = {"a": {"b": {"c": 1, "b": 2, "__d": 'ignore', "_e": "mark"} } }
>>> fd = flatten_dict(d)
>>> pprint(fd)
{'a.b._e': "'mark'", 'a.b.b': 2, 'a.b.c': 1}
'''
items = {}
for k in d:
if ignore_under_prefixed and k.startswith('__'): continue
v = d[k]
if mark_value and k.startswith('_') and not k.startswith('__'):
v = MarkValue(repr(v))
new_key = sep.join((parent_key, k)) if parent_key else k
if isinstance(v, collections.MutableMapping):
items.update(flatten_dict(v, new_key, sep=sep,
ignore_under_prefixed=True,
mark_value=True)
)
else:
items[new_key] = v
return items | Flattens a nested dictionary
>>> from pprint import pprint
>>> d = {"a": {"b": {"c": 1, "b": 2, "__d": 'ignore', "_e": "mark"} } }
>>> fd = flatten_dict(d)
>>> pprint(fd)
{'a.b._e': "'mark'", 'a.b.b': 2, 'a.b.c': 1} | train | https://github.com/deep-compute/deeputil/blob/9af5702bc3fd990688bf2aed16c20fa104be66df/deeputil/misc.py#L192-L221 | null | import time
import datetime
import calendar
import os
import inspect
import random
import string
import itertools
from six import iteritems as items
import sys
from operator import attrgetter
import binascii
from functools import reduce, wraps
def generate_random_string(length=6):
'''
Returns a random string of a specified length.
>>> len(generate_random_string(length=25))
25
Test randomness. Try N times and observe no duplicaton
>>> N = 100
>>> len(set(generate_random_string(10) for i in range(N))) == N
True
'''
n = int(length / 2 + 1)
x = binascii.hexlify(os.urandom(n))
s = x[:length]
return s.decode('utf-8')
def get_timestamp(dt=None):
'''
Return current timestamp if @dt is None
else return timestamp of @dt.
>>> t = datetime.datetime(2015, 0o5, 21)
>>> get_timestamp(t)
1432166400
'''
if dt is None: dt = datetime.datetime.utcnow()
t = dt.utctimetuple()
return calendar.timegm(t)
def get_datetime(epoch):
'''
get datetime from an epoch timestamp
>>> get_datetime(1432188772)
datetime.datetime(2015, 5, 21, 6, 12, 52)
'''
t = time.gmtime(epoch)
dt = datetime.datetime(*t[:6])
return dt
def convert_ts(tt):
'''
tt: time.struct_time(tm_year=2012, tm_mon=10, tm_mday=23, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=1, tm_yday=297, tm_isdst=-1)
>>> tt = time.strptime("23.10.2012", "%d.%m.%Y")
>>> convert_ts(tt)
1350950400
tt: time.struct_time(tm_year=1513, tm_mon=1, tm_mday=1, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=2, tm_yday=1, tm_isdst=0)
>>> tt = time.strptime("1.1.1513", "%d.%m.%Y")
>>> convert_ts(tt)
0
>>> tt = 12
>>> convert_ts(tt)
'''
try:
ts = calendar.timegm(tt)
'''
As from the github issue https://github.com/prashanthellina/rsslurp/issues/680,
there are some cases where we might get timestamp in negative values, so consider
0 if the converted timestamp is negative value.
'''
if ts < 0: ts = 0
except TypeError:
ts = None
return ts
#FIXME No unicode in python 3
def xcode(text, encoding='utf8', mode='ignore'):
'''
Converts unicode encoding to str
>>> xcode(b'hello')
b'hello'
>>> xcode('hello')
b'hello'
'''
return text.encode(encoding, mode) if isinstance(text, str) else text
# For python 2 & 3 compatiblity
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
def parse_location(loc, default_port):
'''
loc can be of the format http://<ip/domain>[:<port>]
eg:
http://localhost:8888
http://localhost/
return ip (str), port (int)
>>> parse_location('http://localhost/', 6379)
('localhost', 6379)
>>> parse_location('http://localhost:8888', 6379)
('localhost', 8888)
'''
parsed = urlparse(loc)
if ':' in parsed.netloc:
ip, port = parsed.netloc.split(':')
port = int(port)
else:
ip, port = parsed.netloc, default_port
return ip, port
from repoze.lru import ExpiringLRUCache
class ExpiringCache(ExpiringLRUCache):
'''
Return value for key. If not in cache or expired, return default
>>> c = ExpiringCache(10, default_timeout=1)
>>> c.put('a', 100)
>>> c.get('a')
100
>>> time.sleep(1)
>>> c.get('a')
'''
def get(self, key, default=None):
self.lookups += 1
try:
pos, val, expires = self.data[key]
except KeyError:
self.misses += 1
return default
if expires > time.time():
# cache entry still valid
self.hits += 1
# Not updating clock_refs to disable
# LRU logic as we just want expiry without LRU
# self.clock_refs[pos] = True
return val
else:
# cache entry has expired. Make sure the space in the cache can
# be recycled soon.
self.misses += 1
self.clock_refs[pos] = False
return default
def serialize_dict_keys(d, prefix=""):
"""returns all the keys in nested a dictionary.
>>> sorted(serialize_dict_keys({"a": {"b": {"c": 1, "b": 2} } }))
['a', 'a.b', 'a.b.b', 'a.b.c']
"""
keys = []
for k, v in d.items():
fqk = '{}{}'.format(prefix, k)
keys.append(fqk)
if isinstance(v, dict):
keys.extend(serialize_dict_keys(v, prefix="{}.".format(fqk)))
return keys
import collections
class MarkValue(str):
pass
def deepgetattr(obj, attr, default=AttributeError):
"""
Recurses through an attribute chain to get the ultimate value (obj/data/member/value)
from: http://pingfive.typepad.com/blog/2010/04/deep-getattr-python-function.html
>>> class Universe(object):
... def __init__(self, galaxy):
... self.galaxy = galaxy
...
>>> class Galaxy(object):
... def __init__(self, solarsystem):
... self.solarsystem = solarsystem
...
>>> class SolarSystem(object):
... def __init__(self, planet):
... self.planet = planet
...
>>> class Planet(object):
... def __init__(self, name):
... self.name = name
...
>>> universe = Universe(Galaxy(SolarSystem(Planet('Earth'))))
>>> deepgetattr(universe, 'galaxy.solarsystem.planet.name')
'Earth'
>>> deepgetattr(universe, 'solarsystem.planet.name', default=TypeError)
<class 'TypeError'>
"""
try:
return reduce(getattr, attr.split('.'), obj)
except AttributeError:
if default is not AttributeError:
return default
raise
class AttrDict(dict):
'''
A dictionary with attribute-style access. It maps attribute access to
the real dictionary.
from: http://code.activestate.com/recipes/473786-dictionary-with-attribute-style-access/
In a plain old dict, we can store values against keys like this
>>> d = {}
>>> d['a'] = 10
>>> d['a']
10
However, sometimes it is convenient to interact with a dict as
though it is an object. eg: d.a = 10 and access as d.a, this does
not work in a dict
>>> d = {}
>>> d.a = 10
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'dict' object has no attribute 'a'
This is where you can use an `AttrDict`
>>> d = AttrDict()
>>> d.a = 1
>>> d.a
1
>>> d
AttrDict({'a': 1})
>>> d['b'] = 2
>>> d['b']
2
>>> d.b
2
>>> del d['a']
>>> d
AttrDict({'b': 2})
>>> dd = d.copy()
>>> dd
AttrDict({'b': 2})
'''
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
def __getstate__(self):
return list(self.__dict__.items())
def __setstate__(self, items):
for key, val in items:
self.__dict__[key] = val
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, dict.__repr__(self))
def __setitem__(self, key, value):
return super(AttrDict, self).__setitem__(key, value)
def __getitem__(self, name):
item = super(AttrDict, self).__getitem__(name)
return AttrDict(item) if isinstance(item, dict) else item
def __delitem__(self, name):
return super(AttrDict, self).__delitem__(name)
__getattr__ = __getitem__
__setattr__ = __setitem__
def copy(self):
ch = AttrDict(self)
return ch
class IterAsFile(object):
'''
Wraps an iterator in a file-like API,
i.e. if you have a generator producing a list of strings,
this could make it look like a file.
from: http://stackoverflow.com/a/12593795/332313
>>> def str_fn():
... for c in 'a', 'b', 'c':
... yield c * 3
...
>>> IAF = IterAsFile(str_fn())
>>> IAF.read(6)
'aaabbb'
>>> IAF.read(4)
'ccc'
>>> IAF.read(2)
'''
def __init__(self, it):
self.it = it
self.next_chunk = ''
def _grow_chunk(self):
self.next_chunk = self.next_chunk + next(self.it)
def read(self, n):
if self.next_chunk == None:
return None
try:
while len(self.next_chunk) < n:
self._grow_chunk()
rv = self.next_chunk[:n]
self.next_chunk = self.next_chunk[n:]
return rv
except StopIteration:
rv = self.next_chunk
self.next_chunk = None
return rv
class LineReader(object):
def __init__(self, it, linesep='\n'):
self.parts = []
self.it = it
self.linesep = linesep
def __iter__(self):
for chunk in self.it:
loc = end_loc = 0
while loc <= len(chunk):
end_loc = chunk.find(self.linesep, loc)
if end_loc == -1:
self.parts.append(chunk[loc:])
break
else:
yield ''.join(self.parts) + chunk[loc:end_loc+1]
self.parts = []
loc = end_loc + 1
if self.parts:
yield ''.join(self.parts)
from .priority_dict import PriorityDict
class ExpiringCounter(object):
'''
>>> c = ExpiringCounter(duration=1)
>>> c.put('name')
>>> c.get('name')
1
>>> time.sleep(2)
>>> c.get('name')
0
>>> c.put('name')
>>> c.put('name')
>>> c.get('name')
2
'''
DEFAULT_DURATION = 60 #seconds
def __init__(self, duration=DEFAULT_DURATION):
self.duration = duration
self.latest_ts = int(time.time())
self.counts = PriorityDict()
self.count = 0
self.history = {}
def put(self, key):
self.update()
ts = int(time.time())
hcounts = self.history.get(ts, {})
hcounts[key] = hcounts.get(key, 0) + 1
self.history[ts] = hcounts
self.counts[key] = self.counts.get(key, 0) + 1
self.count += 1
def get(self, key):
self.update()
return self.counts.get(key, 0)
def update(self):
ts = int(time.time() - self.duration)
ts_keys = [x for x in self.history if x < ts]
for ts_key in ts_keys:
hcounts = self.history.pop(ts_key)
for key, count in list(hcounts.items()):
kcount = self.counts[key]
kcount -= count
if kcount <= 0: del self.counts[key]
else: self.counts[key] = kcount
self.count -= count
#TODO Examples and Readme.md
import resource
def set_file_limits(n):
'''
Set the limit on number of file descriptors
that this process can open.
'''
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (n, n))
return True
except ValueError:
return False
class Dummy(object):
'''
Abstraction that creates a dummy object
that does no-operations on method invocations
but logs all interactions
Let us create a dummy object and perform some
random operations on it
>>> d = Dummy(1, a=5)
>>> d.foo() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
>>> d.bar() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
>>> d.foo.bar() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
Now do the same as above but ask Dummy to print the activity
>>> d = Dummy(1, a=5, __quiet__=False) # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (1,), 'kwargs': {'a': 5}, 'prefix': []})
>>> d.foo() # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'foo'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__call__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
<deeputil.misc.Dummy object at ...>
>>> d.bar # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'bar'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['bar']})
<deeputil.misc.Dummy object at ...>
>>> d.foo.bar() # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'foo'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'bar'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
(<deeputil.misc.Dummy object at ...>, '__call__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
<deeputil.misc.Dummy object at ...>
'''
def _log(self, event, data):
if not self._quiet: print((self, event, data))
def __init__(self, *args, **kwargs):
self._prefix = kwargs.pop('__prefix__', [])
self._quiet = kwargs.pop('__quiet__', True)
self._log('__init__', dict(args=args, kwargs=kwargs, prefix=self._prefix))
def __getattr__(self, attr):
if attr == '__wrapped__': raise AttributeError
self._log('__getattr__', dict(attr=attr))
return Dummy(__prefix__=self._prefix + [attr], __quiet__=self._quiet)
def __call__(self, *args, **kwargs):
self._log('__call__', dict(args=args, kwargs=kwargs, prefix=self._prefix))
return Dummy(__prefix__=self._prefix, __quiet__=self._quiet)
def memoize(f):
'''
Caches result of a function
From: https://goo.gl/aXt4Qy
>>> import time
>>> @memoize
... def test(msg):
... # Processing for result that takes time
... time.sleep(1)
... return msg
>>>
>>> for i in range(5):
... start = time.time()
... test('calling memoized function')
... time_taken = time.time() - start
... # For first time it takes usual time
... if i == 0 and time_taken >= 1: print('ok')
... # Faster from the 2nd time
... elif i != 0 and time_taken <= 1: print('ok')
... else: print('NOT ok!')
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'''
class memodict(dict):
@wraps(f)
def __getitem__(self, *args):
return super(memodict, self).__getitem__(*args)
def __missing__(self, key):
self[key] = ret = f(key)
return ret
return memodict().__getitem__
@memoize
def load_object(imp_path):
'''
Given a python import path, load the object
For dynamic imports in a program
>>> isdir = load_object('os.path.isdir')
>>> isdir('/tmp')
True
>>> num = load_object('numbers.Number')
>>> isinstance('x', num)
False
>>> isinstance(777, num)
True
'''
module_name, obj_name = imp_path.split('.', 1)
module = __import__(module_name)
obj = attrgetter(obj_name)(module)
return obj
def grouper(n, iterable):
'''
Iterate over an iterator by chunks
'''
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
|
deep-compute/deeputil | deeputil/misc.py | deepgetattr | python | def deepgetattr(obj, attr, default=AttributeError):
try:
return reduce(getattr, attr.split('.'), obj)
except AttributeError:
if default is not AttributeError:
return default
raise | Recurses through an attribute chain to get the ultimate value (obj/data/member/value)
from: http://pingfive.typepad.com/blog/2010/04/deep-getattr-python-function.html
>>> class Universe(object):
... def __init__(self, galaxy):
... self.galaxy = galaxy
...
>>> class Galaxy(object):
... def __init__(self, solarsystem):
... self.solarsystem = solarsystem
...
>>> class SolarSystem(object):
... def __init__(self, planet):
... self.planet = planet
...
>>> class Planet(object):
... def __init__(self, name):
... self.name = name
...
>>> universe = Universe(Galaxy(SolarSystem(Planet('Earth'))))
>>> deepgetattr(universe, 'galaxy.solarsystem.planet.name')
'Earth'
>>> deepgetattr(universe, 'solarsystem.planet.name', default=TypeError)
<class 'TypeError'> | train | https://github.com/deep-compute/deeputil/blob/9af5702bc3fd990688bf2aed16c20fa104be66df/deeputil/misc.py#L223-L259 | null | import time
import datetime
import calendar
import os
import inspect
import random
import string
import itertools
from six import iteritems as items
import sys
from operator import attrgetter
import binascii
from functools import reduce, wraps
def generate_random_string(length=6):
'''
Returns a random string of a specified length.
>>> len(generate_random_string(length=25))
25
Test randomness. Try N times and observe no duplicaton
>>> N = 100
>>> len(set(generate_random_string(10) for i in range(N))) == N
True
'''
n = int(length / 2 + 1)
x = binascii.hexlify(os.urandom(n))
s = x[:length]
return s.decode('utf-8')
def get_timestamp(dt=None):
'''
Return current timestamp if @dt is None
else return timestamp of @dt.
>>> t = datetime.datetime(2015, 0o5, 21)
>>> get_timestamp(t)
1432166400
'''
if dt is None: dt = datetime.datetime.utcnow()
t = dt.utctimetuple()
return calendar.timegm(t)
def get_datetime(epoch):
'''
get datetime from an epoch timestamp
>>> get_datetime(1432188772)
datetime.datetime(2015, 5, 21, 6, 12, 52)
'''
t = time.gmtime(epoch)
dt = datetime.datetime(*t[:6])
return dt
def convert_ts(tt):
'''
tt: time.struct_time(tm_year=2012, tm_mon=10, tm_mday=23, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=1, tm_yday=297, tm_isdst=-1)
>>> tt = time.strptime("23.10.2012", "%d.%m.%Y")
>>> convert_ts(tt)
1350950400
tt: time.struct_time(tm_year=1513, tm_mon=1, tm_mday=1, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=2, tm_yday=1, tm_isdst=0)
>>> tt = time.strptime("1.1.1513", "%d.%m.%Y")
>>> convert_ts(tt)
0
>>> tt = 12
>>> convert_ts(tt)
'''
try:
ts = calendar.timegm(tt)
'''
As from the github issue https://github.com/prashanthellina/rsslurp/issues/680,
there are some cases where we might get timestamp in negative values, so consider
0 if the converted timestamp is negative value.
'''
if ts < 0: ts = 0
except TypeError:
ts = None
return ts
#FIXME No unicode in python 3
def xcode(text, encoding='utf8', mode='ignore'):
'''
Converts unicode encoding to str
>>> xcode(b'hello')
b'hello'
>>> xcode('hello')
b'hello'
'''
return text.encode(encoding, mode) if isinstance(text, str) else text
# For python 2 & 3 compatiblity
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
def parse_location(loc, default_port):
'''
loc can be of the format http://<ip/domain>[:<port>]
eg:
http://localhost:8888
http://localhost/
return ip (str), port (int)
>>> parse_location('http://localhost/', 6379)
('localhost', 6379)
>>> parse_location('http://localhost:8888', 6379)
('localhost', 8888)
'''
parsed = urlparse(loc)
if ':' in parsed.netloc:
ip, port = parsed.netloc.split(':')
port = int(port)
else:
ip, port = parsed.netloc, default_port
return ip, port
from repoze.lru import ExpiringLRUCache
class ExpiringCache(ExpiringLRUCache):
'''
Return value for key. If not in cache or expired, return default
>>> c = ExpiringCache(10, default_timeout=1)
>>> c.put('a', 100)
>>> c.get('a')
100
>>> time.sleep(1)
>>> c.get('a')
'''
def get(self, key, default=None):
self.lookups += 1
try:
pos, val, expires = self.data[key]
except KeyError:
self.misses += 1
return default
if expires > time.time():
# cache entry still valid
self.hits += 1
# Not updating clock_refs to disable
# LRU logic as we just want expiry without LRU
# self.clock_refs[pos] = True
return val
else:
# cache entry has expired. Make sure the space in the cache can
# be recycled soon.
self.misses += 1
self.clock_refs[pos] = False
return default
def serialize_dict_keys(d, prefix=""):
"""returns all the keys in nested a dictionary.
>>> sorted(serialize_dict_keys({"a": {"b": {"c": 1, "b": 2} } }))
['a', 'a.b', 'a.b.b', 'a.b.c']
"""
keys = []
for k, v in d.items():
fqk = '{}{}'.format(prefix, k)
keys.append(fqk)
if isinstance(v, dict):
keys.extend(serialize_dict_keys(v, prefix="{}.".format(fqk)))
return keys
import collections
class MarkValue(str):
pass
def flatten_dict(d,
parent_key='', sep='.',
ignore_under_prefixed=True,
mark_value=True):
'''
Flattens a nested dictionary
>>> from pprint import pprint
>>> d = {"a": {"b": {"c": 1, "b": 2, "__d": 'ignore', "_e": "mark"} } }
>>> fd = flatten_dict(d)
>>> pprint(fd)
{'a.b._e': "'mark'", 'a.b.b': 2, 'a.b.c': 1}
'''
items = {}
for k in d:
if ignore_under_prefixed and k.startswith('__'): continue
v = d[k]
if mark_value and k.startswith('_') and not k.startswith('__'):
v = MarkValue(repr(v))
new_key = sep.join((parent_key, k)) if parent_key else k
if isinstance(v, collections.MutableMapping):
items.update(flatten_dict(v, new_key, sep=sep,
ignore_under_prefixed=True,
mark_value=True)
)
else:
items[new_key] = v
return items
class AttrDict(dict):
'''
A dictionary with attribute-style access. It maps attribute access to
the real dictionary.
from: http://code.activestate.com/recipes/473786-dictionary-with-attribute-style-access/
In a plain old dict, we can store values against keys like this
>>> d = {}
>>> d['a'] = 10
>>> d['a']
10
However, sometimes it is convenient to interact with a dict as
though it is an object. eg: d.a = 10 and access as d.a, this does
not work in a dict
>>> d = {}
>>> d.a = 10
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'dict' object has no attribute 'a'
This is where you can use an `AttrDict`
>>> d = AttrDict()
>>> d.a = 1
>>> d.a
1
>>> d
AttrDict({'a': 1})
>>> d['b'] = 2
>>> d['b']
2
>>> d.b
2
>>> del d['a']
>>> d
AttrDict({'b': 2})
>>> dd = d.copy()
>>> dd
AttrDict({'b': 2})
'''
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
def __getstate__(self):
return list(self.__dict__.items())
def __setstate__(self, items):
for key, val in items:
self.__dict__[key] = val
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, dict.__repr__(self))
def __setitem__(self, key, value):
return super(AttrDict, self).__setitem__(key, value)
def __getitem__(self, name):
item = super(AttrDict, self).__getitem__(name)
return AttrDict(item) if isinstance(item, dict) else item
def __delitem__(self, name):
return super(AttrDict, self).__delitem__(name)
__getattr__ = __getitem__
__setattr__ = __setitem__
def copy(self):
ch = AttrDict(self)
return ch
class IterAsFile(object):
'''
Wraps an iterator in a file-like API,
i.e. if you have a generator producing a list of strings,
this could make it look like a file.
from: http://stackoverflow.com/a/12593795/332313
>>> def str_fn():
... for c in 'a', 'b', 'c':
... yield c * 3
...
>>> IAF = IterAsFile(str_fn())
>>> IAF.read(6)
'aaabbb'
>>> IAF.read(4)
'ccc'
>>> IAF.read(2)
'''
def __init__(self, it):
self.it = it
self.next_chunk = ''
def _grow_chunk(self):
self.next_chunk = self.next_chunk + next(self.it)
def read(self, n):
if self.next_chunk == None:
return None
try:
while len(self.next_chunk) < n:
self._grow_chunk()
rv = self.next_chunk[:n]
self.next_chunk = self.next_chunk[n:]
return rv
except StopIteration:
rv = self.next_chunk
self.next_chunk = None
return rv
class LineReader(object):
def __init__(self, it, linesep='\n'):
self.parts = []
self.it = it
self.linesep = linesep
def __iter__(self):
for chunk in self.it:
loc = end_loc = 0
while loc <= len(chunk):
end_loc = chunk.find(self.linesep, loc)
if end_loc == -1:
self.parts.append(chunk[loc:])
break
else:
yield ''.join(self.parts) + chunk[loc:end_loc+1]
self.parts = []
loc = end_loc + 1
if self.parts:
yield ''.join(self.parts)
from .priority_dict import PriorityDict
class ExpiringCounter(object):
'''
>>> c = ExpiringCounter(duration=1)
>>> c.put('name')
>>> c.get('name')
1
>>> time.sleep(2)
>>> c.get('name')
0
>>> c.put('name')
>>> c.put('name')
>>> c.get('name')
2
'''
DEFAULT_DURATION = 60 #seconds
def __init__(self, duration=DEFAULT_DURATION):
self.duration = duration
self.latest_ts = int(time.time())
self.counts = PriorityDict()
self.count = 0
self.history = {}
def put(self, key):
self.update()
ts = int(time.time())
hcounts = self.history.get(ts, {})
hcounts[key] = hcounts.get(key, 0) + 1
self.history[ts] = hcounts
self.counts[key] = self.counts.get(key, 0) + 1
self.count += 1
def get(self, key):
self.update()
return self.counts.get(key, 0)
def update(self):
ts = int(time.time() - self.duration)
ts_keys = [x for x in self.history if x < ts]
for ts_key in ts_keys:
hcounts = self.history.pop(ts_key)
for key, count in list(hcounts.items()):
kcount = self.counts[key]
kcount -= count
if kcount <= 0: del self.counts[key]
else: self.counts[key] = kcount
self.count -= count
#TODO Examples and Readme.md
import resource
def set_file_limits(n):
'''
Set the limit on number of file descriptors
that this process can open.
'''
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (n, n))
return True
except ValueError:
return False
class Dummy(object):
'''
Abstraction that creates a dummy object
that does no-operations on method invocations
but logs all interactions
Let us create a dummy object and perform some
random operations on it
>>> d = Dummy(1, a=5)
>>> d.foo() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
>>> d.bar() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
>>> d.foo.bar() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
Now do the same as above but ask Dummy to print the activity
>>> d = Dummy(1, a=5, __quiet__=False) # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (1,), 'kwargs': {'a': 5}, 'prefix': []})
>>> d.foo() # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'foo'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__call__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
<deeputil.misc.Dummy object at ...>
>>> d.bar # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'bar'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['bar']})
<deeputil.misc.Dummy object at ...>
>>> d.foo.bar() # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'foo'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'bar'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
(<deeputil.misc.Dummy object at ...>, '__call__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
<deeputil.misc.Dummy object at ...>
'''
def _log(self, event, data):
if not self._quiet: print((self, event, data))
def __init__(self, *args, **kwargs):
self._prefix = kwargs.pop('__prefix__', [])
self._quiet = kwargs.pop('__quiet__', True)
self._log('__init__', dict(args=args, kwargs=kwargs, prefix=self._prefix))
def __getattr__(self, attr):
if attr == '__wrapped__': raise AttributeError
self._log('__getattr__', dict(attr=attr))
return Dummy(__prefix__=self._prefix + [attr], __quiet__=self._quiet)
def __call__(self, *args, **kwargs):
self._log('__call__', dict(args=args, kwargs=kwargs, prefix=self._prefix))
return Dummy(__prefix__=self._prefix, __quiet__=self._quiet)
def memoize(f):
'''
Caches result of a function
From: https://goo.gl/aXt4Qy
>>> import time
>>> @memoize
... def test(msg):
... # Processing for result that takes time
... time.sleep(1)
... return msg
>>>
>>> for i in range(5):
... start = time.time()
... test('calling memoized function')
... time_taken = time.time() - start
... # For first time it takes usual time
... if i == 0 and time_taken >= 1: print('ok')
... # Faster from the 2nd time
... elif i != 0 and time_taken <= 1: print('ok')
... else: print('NOT ok!')
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'''
class memodict(dict):
@wraps(f)
def __getitem__(self, *args):
return super(memodict, self).__getitem__(*args)
def __missing__(self, key):
self[key] = ret = f(key)
return ret
return memodict().__getitem__
@memoize
def load_object(imp_path):
'''
Given a python import path, load the object
For dynamic imports in a program
>>> isdir = load_object('os.path.isdir')
>>> isdir('/tmp')
True
>>> num = load_object('numbers.Number')
>>> isinstance('x', num)
False
>>> isinstance(777, num)
True
'''
module_name, obj_name = imp_path.split('.', 1)
module = __import__(module_name)
obj = attrgetter(obj_name)(module)
return obj
def grouper(n, iterable):
'''
Iterate over an iterator by chunks
'''
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
|
deep-compute/deeputil | deeputil/misc.py | set_file_limits | python | def set_file_limits(n):
'''
Set the limit on number of file descriptors
that this process can open.
'''
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (n, n))
return True
except ValueError:
return False | Set the limit on number of file descriptors
that this process can open. | train | https://github.com/deep-compute/deeputil/blob/9af5702bc3fd990688bf2aed16c20fa104be66df/deeputil/misc.py#L473-L484 | null | import time
import datetime
import calendar
import os
import inspect
import random
import string
import itertools
from six import iteritems as items
import sys
from operator import attrgetter
import binascii
from functools import reduce, wraps
def generate_random_string(length=6):
'''
Returns a random string of a specified length.
>>> len(generate_random_string(length=25))
25
Test randomness. Try N times and observe no duplicaton
>>> N = 100
>>> len(set(generate_random_string(10) for i in range(N))) == N
True
'''
n = int(length / 2 + 1)
x = binascii.hexlify(os.urandom(n))
s = x[:length]
return s.decode('utf-8')
def get_timestamp(dt=None):
'''
Return current timestamp if @dt is None
else return timestamp of @dt.
>>> t = datetime.datetime(2015, 0o5, 21)
>>> get_timestamp(t)
1432166400
'''
if dt is None: dt = datetime.datetime.utcnow()
t = dt.utctimetuple()
return calendar.timegm(t)
def get_datetime(epoch):
'''
get datetime from an epoch timestamp
>>> get_datetime(1432188772)
datetime.datetime(2015, 5, 21, 6, 12, 52)
'''
t = time.gmtime(epoch)
dt = datetime.datetime(*t[:6])
return dt
def convert_ts(tt):
'''
tt: time.struct_time(tm_year=2012, tm_mon=10, tm_mday=23, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=1, tm_yday=297, tm_isdst=-1)
>>> tt = time.strptime("23.10.2012", "%d.%m.%Y")
>>> convert_ts(tt)
1350950400
tt: time.struct_time(tm_year=1513, tm_mon=1, tm_mday=1, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=2, tm_yday=1, tm_isdst=0)
>>> tt = time.strptime("1.1.1513", "%d.%m.%Y")
>>> convert_ts(tt)
0
>>> tt = 12
>>> convert_ts(tt)
'''
try:
ts = calendar.timegm(tt)
'''
As from the github issue https://github.com/prashanthellina/rsslurp/issues/680,
there are some cases where we might get timestamp in negative values, so consider
0 if the converted timestamp is negative value.
'''
if ts < 0: ts = 0
except TypeError:
ts = None
return ts
#FIXME No unicode in python 3
def xcode(text, encoding='utf8', mode='ignore'):
'''
Converts unicode encoding to str
>>> xcode(b'hello')
b'hello'
>>> xcode('hello')
b'hello'
'''
return text.encode(encoding, mode) if isinstance(text, str) else text
# For python 2 & 3 compatiblity
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
def parse_location(loc, default_port):
'''
loc can be of the format http://<ip/domain>[:<port>]
eg:
http://localhost:8888
http://localhost/
return ip (str), port (int)
>>> parse_location('http://localhost/', 6379)
('localhost', 6379)
>>> parse_location('http://localhost:8888', 6379)
('localhost', 8888)
'''
parsed = urlparse(loc)
if ':' in parsed.netloc:
ip, port = parsed.netloc.split(':')
port = int(port)
else:
ip, port = parsed.netloc, default_port
return ip, port
from repoze.lru import ExpiringLRUCache
class ExpiringCache(ExpiringLRUCache):
'''
Return value for key. If not in cache or expired, return default
>>> c = ExpiringCache(10, default_timeout=1)
>>> c.put('a', 100)
>>> c.get('a')
100
>>> time.sleep(1)
>>> c.get('a')
'''
def get(self, key, default=None):
self.lookups += 1
try:
pos, val, expires = self.data[key]
except KeyError:
self.misses += 1
return default
if expires > time.time():
# cache entry still valid
self.hits += 1
# Not updating clock_refs to disable
# LRU logic as we just want expiry without LRU
# self.clock_refs[pos] = True
return val
else:
# cache entry has expired. Make sure the space in the cache can
# be recycled soon.
self.misses += 1
self.clock_refs[pos] = False
return default
def serialize_dict_keys(d, prefix=""):
"""returns all the keys in nested a dictionary.
>>> sorted(serialize_dict_keys({"a": {"b": {"c": 1, "b": 2} } }))
['a', 'a.b', 'a.b.b', 'a.b.c']
"""
keys = []
for k, v in d.items():
fqk = '{}{}'.format(prefix, k)
keys.append(fqk)
if isinstance(v, dict):
keys.extend(serialize_dict_keys(v, prefix="{}.".format(fqk)))
return keys
import collections
class MarkValue(str):
pass
def flatten_dict(d,
parent_key='', sep='.',
ignore_under_prefixed=True,
mark_value=True):
'''
Flattens a nested dictionary
>>> from pprint import pprint
>>> d = {"a": {"b": {"c": 1, "b": 2, "__d": 'ignore', "_e": "mark"} } }
>>> fd = flatten_dict(d)
>>> pprint(fd)
{'a.b._e': "'mark'", 'a.b.b': 2, 'a.b.c': 1}
'''
items = {}
for k in d:
if ignore_under_prefixed and k.startswith('__'): continue
v = d[k]
if mark_value and k.startswith('_') and not k.startswith('__'):
v = MarkValue(repr(v))
new_key = sep.join((parent_key, k)) if parent_key else k
if isinstance(v, collections.MutableMapping):
items.update(flatten_dict(v, new_key, sep=sep,
ignore_under_prefixed=True,
mark_value=True)
)
else:
items[new_key] = v
return items
def deepgetattr(obj, attr, default=AttributeError):
"""
Recurses through an attribute chain to get the ultimate value (obj/data/member/value)
from: http://pingfive.typepad.com/blog/2010/04/deep-getattr-python-function.html
>>> class Universe(object):
... def __init__(self, galaxy):
... self.galaxy = galaxy
...
>>> class Galaxy(object):
... def __init__(self, solarsystem):
... self.solarsystem = solarsystem
...
>>> class SolarSystem(object):
... def __init__(self, planet):
... self.planet = planet
...
>>> class Planet(object):
... def __init__(self, name):
... self.name = name
...
>>> universe = Universe(Galaxy(SolarSystem(Planet('Earth'))))
>>> deepgetattr(universe, 'galaxy.solarsystem.planet.name')
'Earth'
>>> deepgetattr(universe, 'solarsystem.planet.name', default=TypeError)
<class 'TypeError'>
"""
try:
return reduce(getattr, attr.split('.'), obj)
except AttributeError:
if default is not AttributeError:
return default
raise
class AttrDict(dict):
'''
A dictionary with attribute-style access. It maps attribute access to
the real dictionary.
from: http://code.activestate.com/recipes/473786-dictionary-with-attribute-style-access/
In a plain old dict, we can store values against keys like this
>>> d = {}
>>> d['a'] = 10
>>> d['a']
10
However, sometimes it is convenient to interact with a dict as
though it is an object. eg: d.a = 10 and access as d.a, this does
not work in a dict
>>> d = {}
>>> d.a = 10
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'dict' object has no attribute 'a'
This is where you can use an `AttrDict`
>>> d = AttrDict()
>>> d.a = 1
>>> d.a
1
>>> d
AttrDict({'a': 1})
>>> d['b'] = 2
>>> d['b']
2
>>> d.b
2
>>> del d['a']
>>> d
AttrDict({'b': 2})
>>> dd = d.copy()
>>> dd
AttrDict({'b': 2})
'''
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
def __getstate__(self):
return list(self.__dict__.items())
def __setstate__(self, items):
for key, val in items:
self.__dict__[key] = val
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, dict.__repr__(self))
def __setitem__(self, key, value):
return super(AttrDict, self).__setitem__(key, value)
def __getitem__(self, name):
item = super(AttrDict, self).__getitem__(name)
return AttrDict(item) if isinstance(item, dict) else item
def __delitem__(self, name):
return super(AttrDict, self).__delitem__(name)
__getattr__ = __getitem__
__setattr__ = __setitem__
def copy(self):
ch = AttrDict(self)
return ch
class IterAsFile(object):
'''
Wraps an iterator in a file-like API,
i.e. if you have a generator producing a list of strings,
this could make it look like a file.
from: http://stackoverflow.com/a/12593795/332313
>>> def str_fn():
... for c in 'a', 'b', 'c':
... yield c * 3
...
>>> IAF = IterAsFile(str_fn())
>>> IAF.read(6)
'aaabbb'
>>> IAF.read(4)
'ccc'
>>> IAF.read(2)
'''
def __init__(self, it):
self.it = it
self.next_chunk = ''
def _grow_chunk(self):
self.next_chunk = self.next_chunk + next(self.it)
def read(self, n):
if self.next_chunk == None:
return None
try:
while len(self.next_chunk) < n:
self._grow_chunk()
rv = self.next_chunk[:n]
self.next_chunk = self.next_chunk[n:]
return rv
except StopIteration:
rv = self.next_chunk
self.next_chunk = None
return rv
class LineReader(object):
def __init__(self, it, linesep='\n'):
self.parts = []
self.it = it
self.linesep = linesep
def __iter__(self):
for chunk in self.it:
loc = end_loc = 0
while loc <= len(chunk):
end_loc = chunk.find(self.linesep, loc)
if end_loc == -1:
self.parts.append(chunk[loc:])
break
else:
yield ''.join(self.parts) + chunk[loc:end_loc+1]
self.parts = []
loc = end_loc + 1
if self.parts:
yield ''.join(self.parts)
from .priority_dict import PriorityDict
class ExpiringCounter(object):
'''
>>> c = ExpiringCounter(duration=1)
>>> c.put('name')
>>> c.get('name')
1
>>> time.sleep(2)
>>> c.get('name')
0
>>> c.put('name')
>>> c.put('name')
>>> c.get('name')
2
'''
DEFAULT_DURATION = 60 #seconds
def __init__(self, duration=DEFAULT_DURATION):
self.duration = duration
self.latest_ts = int(time.time())
self.counts = PriorityDict()
self.count = 0
self.history = {}
def put(self, key):
self.update()
ts = int(time.time())
hcounts = self.history.get(ts, {})
hcounts[key] = hcounts.get(key, 0) + 1
self.history[ts] = hcounts
self.counts[key] = self.counts.get(key, 0) + 1
self.count += 1
def get(self, key):
self.update()
return self.counts.get(key, 0)
def update(self):
ts = int(time.time() - self.duration)
ts_keys = [x for x in self.history if x < ts]
for ts_key in ts_keys:
hcounts = self.history.pop(ts_key)
for key, count in list(hcounts.items()):
kcount = self.counts[key]
kcount -= count
if kcount <= 0: del self.counts[key]
else: self.counts[key] = kcount
self.count -= count
#TODO Examples and Readme.md
import resource
class Dummy(object):
'''
Abstraction that creates a dummy object
that does no-operations on method invocations
but logs all interactions
Let us create a dummy object and perform some
random operations on it
>>> d = Dummy(1, a=5)
>>> d.foo() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
>>> d.bar() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
>>> d.foo.bar() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
Now do the same as above but ask Dummy to print the activity
>>> d = Dummy(1, a=5, __quiet__=False) # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (1,), 'kwargs': {'a': 5}, 'prefix': []})
>>> d.foo() # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'foo'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__call__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
<deeputil.misc.Dummy object at ...>
>>> d.bar # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'bar'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['bar']})
<deeputil.misc.Dummy object at ...>
>>> d.foo.bar() # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'foo'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'bar'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
(<deeputil.misc.Dummy object at ...>, '__call__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
<deeputil.misc.Dummy object at ...>
'''
def _log(self, event, data):
if not self._quiet: print((self, event, data))
def __init__(self, *args, **kwargs):
self._prefix = kwargs.pop('__prefix__', [])
self._quiet = kwargs.pop('__quiet__', True)
self._log('__init__', dict(args=args, kwargs=kwargs, prefix=self._prefix))
def __getattr__(self, attr):
if attr == '__wrapped__': raise AttributeError
self._log('__getattr__', dict(attr=attr))
return Dummy(__prefix__=self._prefix + [attr], __quiet__=self._quiet)
def __call__(self, *args, **kwargs):
self._log('__call__', dict(args=args, kwargs=kwargs, prefix=self._prefix))
return Dummy(__prefix__=self._prefix, __quiet__=self._quiet)
def memoize(f):
'''
Caches result of a function
From: https://goo.gl/aXt4Qy
>>> import time
>>> @memoize
... def test(msg):
... # Processing for result that takes time
... time.sleep(1)
... return msg
>>>
>>> for i in range(5):
... start = time.time()
... test('calling memoized function')
... time_taken = time.time() - start
... # For first time it takes usual time
... if i == 0 and time_taken >= 1: print('ok')
... # Faster from the 2nd time
... elif i != 0 and time_taken <= 1: print('ok')
... else: print('NOT ok!')
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'''
class memodict(dict):
@wraps(f)
def __getitem__(self, *args):
return super(memodict, self).__getitem__(*args)
def __missing__(self, key):
self[key] = ret = f(key)
return ret
return memodict().__getitem__
@memoize
def load_object(imp_path):
'''
Given a python import path, load the object
For dynamic imports in a program
>>> isdir = load_object('os.path.isdir')
>>> isdir('/tmp')
True
>>> num = load_object('numbers.Number')
>>> isinstance('x', num)
False
>>> isinstance(777, num)
True
'''
module_name, obj_name = imp_path.split('.', 1)
module = __import__(module_name)
obj = attrgetter(obj_name)(module)
return obj
def grouper(n, iterable):
'''
Iterate over an iterator by chunks
'''
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
|
deep-compute/deeputil | deeputil/misc.py | memoize | python | def memoize(f):
'''
Caches result of a function
From: https://goo.gl/aXt4Qy
>>> import time
>>> @memoize
... def test(msg):
... # Processing for result that takes time
... time.sleep(1)
... return msg
>>>
>>> for i in range(5):
... start = time.time()
... test('calling memoized function')
... time_taken = time.time() - start
... # For first time it takes usual time
... if i == 0 and time_taken >= 1: print('ok')
... # Faster from the 2nd time
... elif i != 0 and time_taken <= 1: print('ok')
... else: print('NOT ok!')
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'''
class memodict(dict):
@wraps(f)
def __getitem__(self, *args):
return super(memodict, self).__getitem__(*args)
def __missing__(self, key):
self[key] = ret = f(key)
return ret
return memodict().__getitem__ | Caches result of a function
From: https://goo.gl/aXt4Qy
>>> import time
>>> @memoize
... def test(msg):
... # Processing for result that takes time
... time.sleep(1)
... return msg
>>>
>>> for i in range(5):
... start = time.time()
... test('calling memoized function')
... time_taken = time.time() - start
... # For first time it takes usual time
... if i == 0 and time_taken >= 1: print('ok')
... # Faster from the 2nd time
... elif i != 0 and time_taken <= 1: print('ok')
... else: print('NOT ok!')
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok | train | https://github.com/deep-compute/deeputil/blob/9af5702bc3fd990688bf2aed16c20fa104be66df/deeputil/misc.py#L546-L588 | null | import time
import datetime
import calendar
import os
import inspect
import random
import string
import itertools
from six import iteritems as items
import sys
from operator import attrgetter
import binascii
from functools import reduce, wraps
def generate_random_string(length=6):
'''
Returns a random string of a specified length.
>>> len(generate_random_string(length=25))
25
Test randomness. Try N times and observe no duplicaton
>>> N = 100
>>> len(set(generate_random_string(10) for i in range(N))) == N
True
'''
n = int(length / 2 + 1)
x = binascii.hexlify(os.urandom(n))
s = x[:length]
return s.decode('utf-8')
def get_timestamp(dt=None):
'''
Return current timestamp if @dt is None
else return timestamp of @dt.
>>> t = datetime.datetime(2015, 0o5, 21)
>>> get_timestamp(t)
1432166400
'''
if dt is None: dt = datetime.datetime.utcnow()
t = dt.utctimetuple()
return calendar.timegm(t)
def get_datetime(epoch):
'''
get datetime from an epoch timestamp
>>> get_datetime(1432188772)
datetime.datetime(2015, 5, 21, 6, 12, 52)
'''
t = time.gmtime(epoch)
dt = datetime.datetime(*t[:6])
return dt
def convert_ts(tt):
'''
tt: time.struct_time(tm_year=2012, tm_mon=10, tm_mday=23, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=1, tm_yday=297, tm_isdst=-1)
>>> tt = time.strptime("23.10.2012", "%d.%m.%Y")
>>> convert_ts(tt)
1350950400
tt: time.struct_time(tm_year=1513, tm_mon=1, tm_mday=1, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=2, tm_yday=1, tm_isdst=0)
>>> tt = time.strptime("1.1.1513", "%d.%m.%Y")
>>> convert_ts(tt)
0
>>> tt = 12
>>> convert_ts(tt)
'''
try:
ts = calendar.timegm(tt)
'''
As from the github issue https://github.com/prashanthellina/rsslurp/issues/680,
there are some cases where we might get timestamp in negative values, so consider
0 if the converted timestamp is negative value.
'''
if ts < 0: ts = 0
except TypeError:
ts = None
return ts
#FIXME No unicode in python 3
def xcode(text, encoding='utf8', mode='ignore'):
'''
Converts unicode encoding to str
>>> xcode(b'hello')
b'hello'
>>> xcode('hello')
b'hello'
'''
return text.encode(encoding, mode) if isinstance(text, str) else text
# For python 2 & 3 compatiblity
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
def parse_location(loc, default_port):
'''
loc can be of the format http://<ip/domain>[:<port>]
eg:
http://localhost:8888
http://localhost/
return ip (str), port (int)
>>> parse_location('http://localhost/', 6379)
('localhost', 6379)
>>> parse_location('http://localhost:8888', 6379)
('localhost', 8888)
'''
parsed = urlparse(loc)
if ':' in parsed.netloc:
ip, port = parsed.netloc.split(':')
port = int(port)
else:
ip, port = parsed.netloc, default_port
return ip, port
from repoze.lru import ExpiringLRUCache
class ExpiringCache(ExpiringLRUCache):
'''
Return value for key. If not in cache or expired, return default
>>> c = ExpiringCache(10, default_timeout=1)
>>> c.put('a', 100)
>>> c.get('a')
100
>>> time.sleep(1)
>>> c.get('a')
'''
def get(self, key, default=None):
self.lookups += 1
try:
pos, val, expires = self.data[key]
except KeyError:
self.misses += 1
return default
if expires > time.time():
# cache entry still valid
self.hits += 1
# Not updating clock_refs to disable
# LRU logic as we just want expiry without LRU
# self.clock_refs[pos] = True
return val
else:
# cache entry has expired. Make sure the space in the cache can
# be recycled soon.
self.misses += 1
self.clock_refs[pos] = False
return default
def serialize_dict_keys(d, prefix=""):
"""returns all the keys in nested a dictionary.
>>> sorted(serialize_dict_keys({"a": {"b": {"c": 1, "b": 2} } }))
['a', 'a.b', 'a.b.b', 'a.b.c']
"""
keys = []
for k, v in d.items():
fqk = '{}{}'.format(prefix, k)
keys.append(fqk)
if isinstance(v, dict):
keys.extend(serialize_dict_keys(v, prefix="{}.".format(fqk)))
return keys
import collections
class MarkValue(str):
pass
def flatten_dict(d,
parent_key='', sep='.',
ignore_under_prefixed=True,
mark_value=True):
'''
Flattens a nested dictionary
>>> from pprint import pprint
>>> d = {"a": {"b": {"c": 1, "b": 2, "__d": 'ignore', "_e": "mark"} } }
>>> fd = flatten_dict(d)
>>> pprint(fd)
{'a.b._e': "'mark'", 'a.b.b': 2, 'a.b.c': 1}
'''
items = {}
for k in d:
if ignore_under_prefixed and k.startswith('__'): continue
v = d[k]
if mark_value and k.startswith('_') and not k.startswith('__'):
v = MarkValue(repr(v))
new_key = sep.join((parent_key, k)) if parent_key else k
if isinstance(v, collections.MutableMapping):
items.update(flatten_dict(v, new_key, sep=sep,
ignore_under_prefixed=True,
mark_value=True)
)
else:
items[new_key] = v
return items
def deepgetattr(obj, attr, default=AttributeError):
"""
Recurses through an attribute chain to get the ultimate value (obj/data/member/value)
from: http://pingfive.typepad.com/blog/2010/04/deep-getattr-python-function.html
>>> class Universe(object):
... def __init__(self, galaxy):
... self.galaxy = galaxy
...
>>> class Galaxy(object):
... def __init__(self, solarsystem):
... self.solarsystem = solarsystem
...
>>> class SolarSystem(object):
... def __init__(self, planet):
... self.planet = planet
...
>>> class Planet(object):
... def __init__(self, name):
... self.name = name
...
>>> universe = Universe(Galaxy(SolarSystem(Planet('Earth'))))
>>> deepgetattr(universe, 'galaxy.solarsystem.planet.name')
'Earth'
>>> deepgetattr(universe, 'solarsystem.planet.name', default=TypeError)
<class 'TypeError'>
"""
try:
return reduce(getattr, attr.split('.'), obj)
except AttributeError:
if default is not AttributeError:
return default
raise
class AttrDict(dict):
'''
A dictionary with attribute-style access. It maps attribute access to
the real dictionary.
from: http://code.activestate.com/recipes/473786-dictionary-with-attribute-style-access/
In a plain old dict, we can store values against keys like this
>>> d = {}
>>> d['a'] = 10
>>> d['a']
10
However, sometimes it is convenient to interact with a dict as
though it is an object. eg: d.a = 10 and access as d.a, this does
not work in a dict
>>> d = {}
>>> d.a = 10
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'dict' object has no attribute 'a'
This is where you can use an `AttrDict`
>>> d = AttrDict()
>>> d.a = 1
>>> d.a
1
>>> d
AttrDict({'a': 1})
>>> d['b'] = 2
>>> d['b']
2
>>> d.b
2
>>> del d['a']
>>> d
AttrDict({'b': 2})
>>> dd = d.copy()
>>> dd
AttrDict({'b': 2})
'''
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
def __getstate__(self):
return list(self.__dict__.items())
def __setstate__(self, items):
for key, val in items:
self.__dict__[key] = val
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, dict.__repr__(self))
def __setitem__(self, key, value):
return super(AttrDict, self).__setitem__(key, value)
def __getitem__(self, name):
item = super(AttrDict, self).__getitem__(name)
return AttrDict(item) if isinstance(item, dict) else item
def __delitem__(self, name):
return super(AttrDict, self).__delitem__(name)
__getattr__ = __getitem__
__setattr__ = __setitem__
def copy(self):
ch = AttrDict(self)
return ch
class IterAsFile(object):
'''
Wraps an iterator in a file-like API,
i.e. if you have a generator producing a list of strings,
this could make it look like a file.
from: http://stackoverflow.com/a/12593795/332313
>>> def str_fn():
... for c in 'a', 'b', 'c':
... yield c * 3
...
>>> IAF = IterAsFile(str_fn())
>>> IAF.read(6)
'aaabbb'
>>> IAF.read(4)
'ccc'
>>> IAF.read(2)
'''
def __init__(self, it):
self.it = it
self.next_chunk = ''
def _grow_chunk(self):
self.next_chunk = self.next_chunk + next(self.it)
def read(self, n):
if self.next_chunk == None:
return None
try:
while len(self.next_chunk) < n:
self._grow_chunk()
rv = self.next_chunk[:n]
self.next_chunk = self.next_chunk[n:]
return rv
except StopIteration:
rv = self.next_chunk
self.next_chunk = None
return rv
class LineReader(object):
def __init__(self, it, linesep='\n'):
self.parts = []
self.it = it
self.linesep = linesep
def __iter__(self):
for chunk in self.it:
loc = end_loc = 0
while loc <= len(chunk):
end_loc = chunk.find(self.linesep, loc)
if end_loc == -1:
self.parts.append(chunk[loc:])
break
else:
yield ''.join(self.parts) + chunk[loc:end_loc+1]
self.parts = []
loc = end_loc + 1
if self.parts:
yield ''.join(self.parts)
from .priority_dict import PriorityDict
class ExpiringCounter(object):
'''
>>> c = ExpiringCounter(duration=1)
>>> c.put('name')
>>> c.get('name')
1
>>> time.sleep(2)
>>> c.get('name')
0
>>> c.put('name')
>>> c.put('name')
>>> c.get('name')
2
'''
DEFAULT_DURATION = 60 #seconds
def __init__(self, duration=DEFAULT_DURATION):
self.duration = duration
self.latest_ts = int(time.time())
self.counts = PriorityDict()
self.count = 0
self.history = {}
def put(self, key):
self.update()
ts = int(time.time())
hcounts = self.history.get(ts, {})
hcounts[key] = hcounts.get(key, 0) + 1
self.history[ts] = hcounts
self.counts[key] = self.counts.get(key, 0) + 1
self.count += 1
def get(self, key):
self.update()
return self.counts.get(key, 0)
def update(self):
ts = int(time.time() - self.duration)
ts_keys = [x for x in self.history if x < ts]
for ts_key in ts_keys:
hcounts = self.history.pop(ts_key)
for key, count in list(hcounts.items()):
kcount = self.counts[key]
kcount -= count
if kcount <= 0: del self.counts[key]
else: self.counts[key] = kcount
self.count -= count
#TODO Examples and Readme.md
import resource
def set_file_limits(n):
'''
Set the limit on number of file descriptors
that this process can open.
'''
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (n, n))
return True
except ValueError:
return False
class Dummy(object):
'''
Abstraction that creates a dummy object
that does no-operations on method invocations
but logs all interactions
Let us create a dummy object and perform some
random operations on it
>>> d = Dummy(1, a=5)
>>> d.foo() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
>>> d.bar() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
>>> d.foo.bar() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
Now do the same as above but ask Dummy to print the activity
>>> d = Dummy(1, a=5, __quiet__=False) # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (1,), 'kwargs': {'a': 5}, 'prefix': []})
>>> d.foo() # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'foo'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__call__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
<deeputil.misc.Dummy object at ...>
>>> d.bar # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'bar'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['bar']})
<deeputil.misc.Dummy object at ...>
>>> d.foo.bar() # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'foo'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'bar'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
(<deeputil.misc.Dummy object at ...>, '__call__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
<deeputil.misc.Dummy object at ...>
'''
def _log(self, event, data):
if not self._quiet: print((self, event, data))
def __init__(self, *args, **kwargs):
self._prefix = kwargs.pop('__prefix__', [])
self._quiet = kwargs.pop('__quiet__', True)
self._log('__init__', dict(args=args, kwargs=kwargs, prefix=self._prefix))
def __getattr__(self, attr):
if attr == '__wrapped__': raise AttributeError
self._log('__getattr__', dict(attr=attr))
return Dummy(__prefix__=self._prefix + [attr], __quiet__=self._quiet)
def __call__(self, *args, **kwargs):
self._log('__call__', dict(args=args, kwargs=kwargs, prefix=self._prefix))
return Dummy(__prefix__=self._prefix, __quiet__=self._quiet)
def memoize(f):
'''
Caches result of a function
From: https://goo.gl/aXt4Qy
>>> import time
>>> @memoize
... def test(msg):
... # Processing for result that takes time
... time.sleep(1)
... return msg
>>>
>>> for i in range(5):
... start = time.time()
... test('calling memoized function')
... time_taken = time.time() - start
... # For first time it takes usual time
... if i == 0 and time_taken >= 1: print('ok')
... # Faster from the 2nd time
... elif i != 0 and time_taken <= 1: print('ok')
... else: print('NOT ok!')
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'''
class memodict(dict):
@wraps(f)
def __getitem__(self, *args):
return super(memodict, self).__getitem__(*args)
def __missing__(self, key):
self[key] = ret = f(key)
return ret
return memodict().__getitem__
@memoize
def load_object(imp_path):
'''
Given a python import path, load the object
For dynamic imports in a program
>>> isdir = load_object('os.path.isdir')
>>> isdir('/tmp')
True
>>> num = load_object('numbers.Number')
>>> isinstance('x', num)
False
>>> isinstance(777, num)
True
'''
module_name, obj_name = imp_path.split('.', 1)
module = __import__(module_name)
obj = attrgetter(obj_name)(module)
return obj
def grouper(n, iterable):
'''
Iterate over an iterator by chunks
'''
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
|
deep-compute/deeputil | deeputil/misc.py | load_object | python | def load_object(imp_path):
'''
Given a python import path, load the object
For dynamic imports in a program
>>> isdir = load_object('os.path.isdir')
>>> isdir('/tmp')
True
>>> num = load_object('numbers.Number')
>>> isinstance('x', num)
False
>>> isinstance(777, num)
True
'''
module_name, obj_name = imp_path.split('.', 1)
module = __import__(module_name)
obj = attrgetter(obj_name)(module)
return obj | Given a python import path, load the object
For dynamic imports in a program
>>> isdir = load_object('os.path.isdir')
>>> isdir('/tmp')
True
>>> num = load_object('numbers.Number')
>>> isinstance('x', num)
False
>>> isinstance(777, num)
True | train | https://github.com/deep-compute/deeputil/blob/9af5702bc3fd990688bf2aed16c20fa104be66df/deeputil/misc.py#L592-L611 | null | import time
import datetime
import calendar
import os
import inspect
import random
import string
import itertools
from six import iteritems as items
import sys
from operator import attrgetter
import binascii
from functools import reduce, wraps
def generate_random_string(length=6):
'''
Returns a random string of a specified length.
>>> len(generate_random_string(length=25))
25
Test randomness. Try N times and observe no duplicaton
>>> N = 100
>>> len(set(generate_random_string(10) for i in range(N))) == N
True
'''
n = int(length / 2 + 1)
x = binascii.hexlify(os.urandom(n))
s = x[:length]
return s.decode('utf-8')
def get_timestamp(dt=None):
'''
Return current timestamp if @dt is None
else return timestamp of @dt.
>>> t = datetime.datetime(2015, 0o5, 21)
>>> get_timestamp(t)
1432166400
'''
if dt is None: dt = datetime.datetime.utcnow()
t = dt.utctimetuple()
return calendar.timegm(t)
def get_datetime(epoch):
'''
get datetime from an epoch timestamp
>>> get_datetime(1432188772)
datetime.datetime(2015, 5, 21, 6, 12, 52)
'''
t = time.gmtime(epoch)
dt = datetime.datetime(*t[:6])
return dt
def convert_ts(tt):
'''
tt: time.struct_time(tm_year=2012, tm_mon=10, tm_mday=23, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=1, tm_yday=297, tm_isdst=-1)
>>> tt = time.strptime("23.10.2012", "%d.%m.%Y")
>>> convert_ts(tt)
1350950400
tt: time.struct_time(tm_year=1513, tm_mon=1, tm_mday=1, tm_hour=0,
tm_min=0, tm_sec=0, tm_wday=2, tm_yday=1, tm_isdst=0)
>>> tt = time.strptime("1.1.1513", "%d.%m.%Y")
>>> convert_ts(tt)
0
>>> tt = 12
>>> convert_ts(tt)
'''
try:
ts = calendar.timegm(tt)
'''
As from the github issue https://github.com/prashanthellina/rsslurp/issues/680,
there are some cases where we might get timestamp in negative values, so consider
0 if the converted timestamp is negative value.
'''
if ts < 0: ts = 0
except TypeError:
ts = None
return ts
#FIXME No unicode in python 3
def xcode(text, encoding='utf8', mode='ignore'):
'''
Converts unicode encoding to str
>>> xcode(b'hello')
b'hello'
>>> xcode('hello')
b'hello'
'''
return text.encode(encoding, mode) if isinstance(text, str) else text
# For python 2 & 3 compatiblity
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
def parse_location(loc, default_port):
'''
loc can be of the format http://<ip/domain>[:<port>]
eg:
http://localhost:8888
http://localhost/
return ip (str), port (int)
>>> parse_location('http://localhost/', 6379)
('localhost', 6379)
>>> parse_location('http://localhost:8888', 6379)
('localhost', 8888)
'''
parsed = urlparse(loc)
if ':' in parsed.netloc:
ip, port = parsed.netloc.split(':')
port = int(port)
else:
ip, port = parsed.netloc, default_port
return ip, port
from repoze.lru import ExpiringLRUCache
class ExpiringCache(ExpiringLRUCache):
'''
Return value for key. If not in cache or expired, return default
>>> c = ExpiringCache(10, default_timeout=1)
>>> c.put('a', 100)
>>> c.get('a')
100
>>> time.sleep(1)
>>> c.get('a')
'''
def get(self, key, default=None):
self.lookups += 1
try:
pos, val, expires = self.data[key]
except KeyError:
self.misses += 1
return default
if expires > time.time():
# cache entry still valid
self.hits += 1
# Not updating clock_refs to disable
# LRU logic as we just want expiry without LRU
# self.clock_refs[pos] = True
return val
else:
# cache entry has expired. Make sure the space in the cache can
# be recycled soon.
self.misses += 1
self.clock_refs[pos] = False
return default
def serialize_dict_keys(d, prefix=""):
"""returns all the keys in nested a dictionary.
>>> sorted(serialize_dict_keys({"a": {"b": {"c": 1, "b": 2} } }))
['a', 'a.b', 'a.b.b', 'a.b.c']
"""
keys = []
for k, v in d.items():
fqk = '{}{}'.format(prefix, k)
keys.append(fqk)
if isinstance(v, dict):
keys.extend(serialize_dict_keys(v, prefix="{}.".format(fqk)))
return keys
import collections
class MarkValue(str):
pass
def flatten_dict(d,
parent_key='', sep='.',
ignore_under_prefixed=True,
mark_value=True):
'''
Flattens a nested dictionary
>>> from pprint import pprint
>>> d = {"a": {"b": {"c": 1, "b": 2, "__d": 'ignore', "_e": "mark"} } }
>>> fd = flatten_dict(d)
>>> pprint(fd)
{'a.b._e': "'mark'", 'a.b.b': 2, 'a.b.c': 1}
'''
items = {}
for k in d:
if ignore_under_prefixed and k.startswith('__'): continue
v = d[k]
if mark_value and k.startswith('_') and not k.startswith('__'):
v = MarkValue(repr(v))
new_key = sep.join((parent_key, k)) if parent_key else k
if isinstance(v, collections.MutableMapping):
items.update(flatten_dict(v, new_key, sep=sep,
ignore_under_prefixed=True,
mark_value=True)
)
else:
items[new_key] = v
return items
def deepgetattr(obj, attr, default=AttributeError):
"""
Recurses through an attribute chain to get the ultimate value (obj/data/member/value)
from: http://pingfive.typepad.com/blog/2010/04/deep-getattr-python-function.html
>>> class Universe(object):
... def __init__(self, galaxy):
... self.galaxy = galaxy
...
>>> class Galaxy(object):
... def __init__(self, solarsystem):
... self.solarsystem = solarsystem
...
>>> class SolarSystem(object):
... def __init__(self, planet):
... self.planet = planet
...
>>> class Planet(object):
... def __init__(self, name):
... self.name = name
...
>>> universe = Universe(Galaxy(SolarSystem(Planet('Earth'))))
>>> deepgetattr(universe, 'galaxy.solarsystem.planet.name')
'Earth'
>>> deepgetattr(universe, 'solarsystem.planet.name', default=TypeError)
<class 'TypeError'>
"""
try:
return reduce(getattr, attr.split('.'), obj)
except AttributeError:
if default is not AttributeError:
return default
raise
class AttrDict(dict):
'''
A dictionary with attribute-style access. It maps attribute access to
the real dictionary.
from: http://code.activestate.com/recipes/473786-dictionary-with-attribute-style-access/
In a plain old dict, we can store values against keys like this
>>> d = {}
>>> d['a'] = 10
>>> d['a']
10
However, sometimes it is convenient to interact with a dict as
though it is an object. eg: d.a = 10 and access as d.a, this does
not work in a dict
>>> d = {}
>>> d.a = 10
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'dict' object has no attribute 'a'
This is where you can use an `AttrDict`
>>> d = AttrDict()
>>> d.a = 1
>>> d.a
1
>>> d
AttrDict({'a': 1})
>>> d['b'] = 2
>>> d['b']
2
>>> d.b
2
>>> del d['a']
>>> d
AttrDict({'b': 2})
>>> dd = d.copy()
>>> dd
AttrDict({'b': 2})
'''
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
def __getstate__(self):
return list(self.__dict__.items())
def __setstate__(self, items):
for key, val in items:
self.__dict__[key] = val
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, dict.__repr__(self))
def __setitem__(self, key, value):
return super(AttrDict, self).__setitem__(key, value)
def __getitem__(self, name):
item = super(AttrDict, self).__getitem__(name)
return AttrDict(item) if isinstance(item, dict) else item
def __delitem__(self, name):
return super(AttrDict, self).__delitem__(name)
__getattr__ = __getitem__
__setattr__ = __setitem__
def copy(self):
ch = AttrDict(self)
return ch
class IterAsFile(object):
'''
Wraps an iterator in a file-like API,
i.e. if you have a generator producing a list of strings,
this could make it look like a file.
from: http://stackoverflow.com/a/12593795/332313
>>> def str_fn():
... for c in 'a', 'b', 'c':
... yield c * 3
...
>>> IAF = IterAsFile(str_fn())
>>> IAF.read(6)
'aaabbb'
>>> IAF.read(4)
'ccc'
>>> IAF.read(2)
'''
def __init__(self, it):
self.it = it
self.next_chunk = ''
def _grow_chunk(self):
self.next_chunk = self.next_chunk + next(self.it)
def read(self, n):
if self.next_chunk == None:
return None
try:
while len(self.next_chunk) < n:
self._grow_chunk()
rv = self.next_chunk[:n]
self.next_chunk = self.next_chunk[n:]
return rv
except StopIteration:
rv = self.next_chunk
self.next_chunk = None
return rv
class LineReader(object):
def __init__(self, it, linesep='\n'):
self.parts = []
self.it = it
self.linesep = linesep
def __iter__(self):
for chunk in self.it:
loc = end_loc = 0
while loc <= len(chunk):
end_loc = chunk.find(self.linesep, loc)
if end_loc == -1:
self.parts.append(chunk[loc:])
break
else:
yield ''.join(self.parts) + chunk[loc:end_loc+1]
self.parts = []
loc = end_loc + 1
if self.parts:
yield ''.join(self.parts)
from .priority_dict import PriorityDict
class ExpiringCounter(object):
'''
>>> c = ExpiringCounter(duration=1)
>>> c.put('name')
>>> c.get('name')
1
>>> time.sleep(2)
>>> c.get('name')
0
>>> c.put('name')
>>> c.put('name')
>>> c.get('name')
2
'''
DEFAULT_DURATION = 60 #seconds
def __init__(self, duration=DEFAULT_DURATION):
self.duration = duration
self.latest_ts = int(time.time())
self.counts = PriorityDict()
self.count = 0
self.history = {}
def put(self, key):
self.update()
ts = int(time.time())
hcounts = self.history.get(ts, {})
hcounts[key] = hcounts.get(key, 0) + 1
self.history[ts] = hcounts
self.counts[key] = self.counts.get(key, 0) + 1
self.count += 1
def get(self, key):
self.update()
return self.counts.get(key, 0)
def update(self):
ts = int(time.time() - self.duration)
ts_keys = [x for x in self.history if x < ts]
for ts_key in ts_keys:
hcounts = self.history.pop(ts_key)
for key, count in list(hcounts.items()):
kcount = self.counts[key]
kcount -= count
if kcount <= 0: del self.counts[key]
else: self.counts[key] = kcount
self.count -= count
#TODO Examples and Readme.md
import resource
def set_file_limits(n):
'''
Set the limit on number of file descriptors
that this process can open.
'''
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (n, n))
return True
except ValueError:
return False
class Dummy(object):
'''
Abstraction that creates a dummy object
that does no-operations on method invocations
but logs all interactions
Let us create a dummy object and perform some
random operations on it
>>> d = Dummy(1, a=5)
>>> d.foo() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
>>> d.bar() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
>>> d.foo.bar() # doctest: +ELLIPSIS
<deeputil.misc.Dummy object at ...>
Now do the same as above but ask Dummy to print the activity
>>> d = Dummy(1, a=5, __quiet__=False) # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (1,), 'kwargs': {'a': 5}, 'prefix': []})
>>> d.foo() # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'foo'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__call__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
<deeputil.misc.Dummy object at ...>
>>> d.bar # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'bar'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['bar']})
<deeputil.misc.Dummy object at ...>
>>> d.foo.bar() # doctest: +ELLIPSIS
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'foo'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo']})
(<deeputil.misc.Dummy object at ...>, '__getattr__', {'attr': 'bar'})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
(<deeputil.misc.Dummy object at ...>, '__call__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
(<deeputil.misc.Dummy object at ...>, '__init__', {'args': (), 'kwargs': {}, 'prefix': ['foo', 'bar']})
<deeputil.misc.Dummy object at ...>
'''
def _log(self, event, data):
if not self._quiet: print((self, event, data))
def __init__(self, *args, **kwargs):
self._prefix = kwargs.pop('__prefix__', [])
self._quiet = kwargs.pop('__quiet__', True)
self._log('__init__', dict(args=args, kwargs=kwargs, prefix=self._prefix))
def __getattr__(self, attr):
if attr == '__wrapped__': raise AttributeError
self._log('__getattr__', dict(attr=attr))
return Dummy(__prefix__=self._prefix + [attr], __quiet__=self._quiet)
def __call__(self, *args, **kwargs):
self._log('__call__', dict(args=args, kwargs=kwargs, prefix=self._prefix))
return Dummy(__prefix__=self._prefix, __quiet__=self._quiet)
def memoize(f):
'''
Caches result of a function
From: https://goo.gl/aXt4Qy
>>> import time
>>> @memoize
... def test(msg):
... # Processing for result that takes time
... time.sleep(1)
... return msg
>>>
>>> for i in range(5):
... start = time.time()
... test('calling memoized function')
... time_taken = time.time() - start
... # For first time it takes usual time
... if i == 0 and time_taken >= 1: print('ok')
... # Faster from the 2nd time
... elif i != 0 and time_taken <= 1: print('ok')
... else: print('NOT ok!')
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'calling memoized function'
ok
'''
class memodict(dict):
@wraps(f)
def __getitem__(self, *args):
return super(memodict, self).__getitem__(*args)
def __missing__(self, key):
self[key] = ret = f(key)
return ret
return memodict().__getitem__
@memoize
def grouper(n, iterable):
'''
Iterate over an iterator by chunks
'''
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
|
deep-compute/deeputil | deeputil/keep_running.py | keeprunning | python | def keeprunning(wait_secs=0, exit_on_success=False,
on_success=None, on_error=None, on_done=None):
'''
Example 1: dosomething needs to run until completion condition
without needing to have a loop in its code. Also, when error
happens, we should NOT terminate execution
>>> from deeputil import AttrDict
>>> @keeprunning(wait_secs=1)
... def dosomething(state):
... state.i += 1
... print (state)
... if state.i % 2 == 0:
... print("Error happened")
... 1 / 0 # create an error condition
... if state.i >= 7:
... print ("Done")
... raise keeprunning.terminate
...
>>> state = AttrDict(i=0)
>>> dosomething(state)
AttrDict({'i': 1})
AttrDict({'i': 2})
Error happened
AttrDict({'i': 3})
AttrDict({'i': 4})
Error happened
AttrDict({'i': 5})
AttrDict({'i': 6})
Error happened
AttrDict({'i': 7})
Done
Example 2: In case you want to log exceptions while
dosomething keeps running, or perform any other action
when an exceptions arise
>>> def some_error(__exc__):
... print (__exc__)
...
>>> @keeprunning(on_error=some_error)
... def dosomething(state):
... state.i += 1
... print (state)
... if state.i % 2 == 0:
... print("Error happened")
... 1 / 0 # create an error condition
... if state.i >= 7:
... print ("Done")
... raise keeprunning.terminate
...
>>> state = AttrDict(i=0)
>>> dosomething(state)
AttrDict({'i': 1})
AttrDict({'i': 2})
Error happened
division by zero
AttrDict({'i': 3})
AttrDict({'i': 4})
Error happened
division by zero
AttrDict({'i': 5})
AttrDict({'i': 6})
Error happened
division by zero
AttrDict({'i': 7})
Done
Example 3: Full set of arguments that can be passed in @keeprunning()
with class implementations
>>> # Class that has some class variables
... class Demo(object):
... SUCCESS_MSG = 'Yay!!'
... DONE_MSG = 'STOPPED AT NOTHING!'
... ERROR_MSG = 'Error'
...
... # Functions to be called by @keeprunning
... def success(self):
... print((self.SUCCESS_MSG))
...
... def failure(self, __exc__):
... print((self.ERROR_MSG, __exc__))
...
... def task_done(self):
... print((self.DONE_MSG))
...
... #Actual use of keeprunning with all arguments passed
... @keeprunning(wait_secs=1, exit_on_success=False,
... on_success=success, on_error=failure, on_done=task_done)
... def dosomething(self, state):
... state.i += 1
... print (state)
... if state.i % 2 == 0:
... print("Error happened")
... # create an error condition
... 1 / 0
... if state.i >= 7:
... print ("Done")
... raise keeprunning.terminate
...
>>> demo = Demo()
>>> state = AttrDict(i=0)
>>> demo.dosomething(state)
AttrDict({'i': 1})
Yay!!
AttrDict({'i': 2})
Error happened
('Error', ZeroDivisionError('division by zero'))
AttrDict({'i': 3})
Yay!!
AttrDict({'i': 4})
Error happened
('Error', ZeroDivisionError('division by zero'))
AttrDict({'i': 5})
Yay!!
AttrDict({'i': 6})
Error happened
('Error', ZeroDivisionError('division by zero'))
AttrDict({'i': 7})
Done
STOPPED AT NOTHING!
'''
def decfn(fn):
def _call_callback(cb, fargs):
if not cb: return
# get the getargspec fn in inspect module (python 2/3 support)
G = getattr(inspect, 'getfullargspec', getattr(inspect, 'getargspec'))
cb_args = G(cb).args
cb_args = dict([(a, fargs.get(a, None)) for a in cb_args])
cb(**cb_args)
def _fn(*args, **kwargs):
fargs = inspect.getcallargs(fn, *args, **kwargs)
fargs.update(dict(__fn__=fn, __exc__=None))
while 1:
try:
fn(*args, **kwargs)
if exit_on_success: break
except (SystemExit, KeyboardInterrupt):
raise
except KeepRunningTerminate:
break
except Exception as exc:
fargs.update(dict(__exc__=exc))
_call_callback(on_error, fargs)
fargs.update(dict(__exc__=None))
if wait_secs: time.sleep(wait_secs)
continue
_call_callback(on_success, fargs)
_call_callback(on_done, fargs)
return _fn
return decfn | Example 1: dosomething needs to run until completion condition
without needing to have a loop in its code. Also, when error
happens, we should NOT terminate execution
>>> from deeputil import AttrDict
>>> @keeprunning(wait_secs=1)
... def dosomething(state):
... state.i += 1
... print (state)
... if state.i % 2 == 0:
... print("Error happened")
... 1 / 0 # create an error condition
... if state.i >= 7:
... print ("Done")
... raise keeprunning.terminate
...
>>> state = AttrDict(i=0)
>>> dosomething(state)
AttrDict({'i': 1})
AttrDict({'i': 2})
Error happened
AttrDict({'i': 3})
AttrDict({'i': 4})
Error happened
AttrDict({'i': 5})
AttrDict({'i': 6})
Error happened
AttrDict({'i': 7})
Done
Example 2: In case you want to log exceptions while
dosomething keeps running, or perform any other action
when an exceptions arise
>>> def some_error(__exc__):
... print (__exc__)
...
>>> @keeprunning(on_error=some_error)
... def dosomething(state):
... state.i += 1
... print (state)
... if state.i % 2 == 0:
... print("Error happened")
... 1 / 0 # create an error condition
... if state.i >= 7:
... print ("Done")
... raise keeprunning.terminate
...
>>> state = AttrDict(i=0)
>>> dosomething(state)
AttrDict({'i': 1})
AttrDict({'i': 2})
Error happened
division by zero
AttrDict({'i': 3})
AttrDict({'i': 4})
Error happened
division by zero
AttrDict({'i': 5})
AttrDict({'i': 6})
Error happened
division by zero
AttrDict({'i': 7})
Done
Example 3: Full set of arguments that can be passed in @keeprunning()
with class implementations
>>> # Class that has some class variables
... class Demo(object):
... SUCCESS_MSG = 'Yay!!'
... DONE_MSG = 'STOPPED AT NOTHING!'
... ERROR_MSG = 'Error'
...
... # Functions to be called by @keeprunning
... def success(self):
... print((self.SUCCESS_MSG))
...
... def failure(self, __exc__):
... print((self.ERROR_MSG, __exc__))
...
... def task_done(self):
... print((self.DONE_MSG))
...
... #Actual use of keeprunning with all arguments passed
... @keeprunning(wait_secs=1, exit_on_success=False,
... on_success=success, on_error=failure, on_done=task_done)
... def dosomething(self, state):
... state.i += 1
... print (state)
... if state.i % 2 == 0:
... print("Error happened")
... # create an error condition
... 1 / 0
... if state.i >= 7:
... print ("Done")
... raise keeprunning.terminate
...
>>> demo = Demo()
>>> state = AttrDict(i=0)
>>> demo.dosomething(state)
AttrDict({'i': 1})
Yay!!
AttrDict({'i': 2})
Error happened
('Error', ZeroDivisionError('division by zero'))
AttrDict({'i': 3})
Yay!!
AttrDict({'i': 4})
Error happened
('Error', ZeroDivisionError('division by zero'))
AttrDict({'i': 5})
Yay!!
AttrDict({'i': 6})
Error happened
('Error', ZeroDivisionError('division by zero'))
AttrDict({'i': 7})
Done
STOPPED AT NOTHING! | train | https://github.com/deep-compute/deeputil/blob/9af5702bc3fd990688bf2aed16c20fa104be66df/deeputil/keep_running.py#L10-L171 | null | '''Keeps running a function running even on error
'''
import time
import inspect
class KeepRunningTerminate(Exception):
pass
keeprunning.terminate = KeepRunningTerminate
|
OCHA-DAP/hdx-python-country | setup.py | script_dir | python | def script_dir(pyobject, follow_symlinks=True):
if getattr(sys, 'frozen', False): # py2exe, PyInstaller, cx_Freeze
path = abspath(sys.executable)
else:
path = inspect.getabsfile(pyobject)
if follow_symlinks:
path = realpath(path)
return dirname(path) | Get current script's directory
Args:
pyobject (Any): Any Python object in the script
follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.
Returns:
str: Current script's directory | train | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/setup.py#L11-L27 | null | # -*- coding: utf-8 -*-
import inspect
import sys
from codecs import open
from os.path import join, abspath, realpath, dirname
from setuptools import setup, find_packages
# Sadly we cannot use the location here because of the typing module which isn't in Python < 3.5
def script_dir_plus_file(filename, pyobject, follow_symlinks=True):
"""Get current script's directory and then append a filename
Args:
filename (str): Filename to append to directory path
pyobject (Any): Any Python object in the script
follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.
Returns:
str: Current script's directory and with filename appended
"""
return join(script_dir(pyobject, follow_symlinks), filename)
def get_version():
version_file = open(script_dir_plus_file(join('src', 'hdx', 'location', 'version.txt'), get_version),
encoding='utf-8')
return version_file.read().strip()
def get_readme():
readme_file = open(script_dir_plus_file('README.rst', get_readme), encoding='utf-8')
return readme_file.read()
requirements = ['hdx-python-utilities>=1.6.9',
'libhxl==4.5.1;python_version<"3"',
'libhxl>=4.13.2:python_version>="3"']
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries :: Python Modules",
]
setup(
name='hdx-python-country',
description='HDX Python country mapping utilities',
license='MIT',
url='https://github.com/OCHA-DAP/hdx-python-country',
version=get_version(),
author='Michael Rans',
author_email='rans@email.com',
keywords=['HDX', 'location', 'country code', 'country', 'iso 3166', 'iso2', 'iso3', 'region'],
long_description=get_readme(),
packages=find_packages(where='src'),
package_dir={'': 'src'},
include_package_data=True,
setup_requires=['pytest-runner'],
tests_require=['pytest'],
zip_safe=True,
classifiers=classifiers,
install_requires=requirements,
)
|
OCHA-DAP/hdx-python-country | setup.py | script_dir_plus_file | python | def script_dir_plus_file(filename, pyobject, follow_symlinks=True):
return join(script_dir(pyobject, follow_symlinks), filename) | Get current script's directory and then append a filename
Args:
filename (str): Filename to append to directory path
pyobject (Any): Any Python object in the script
follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.
Returns:
str: Current script's directory and with filename appended | train | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/setup.py#L30-L41 | [
"def script_dir(pyobject, follow_symlinks=True):\n \"\"\"Get current script's directory\n\n Args:\n pyobject (Any): Any Python object in the script\n follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.\n\n Returns:\n str: Current script's directory\n \"\"\"\n if getattr(sys, 'frozen', False): # py2exe, PyInstaller, cx_Freeze\n path = abspath(sys.executable)\n else:\n path = inspect.getabsfile(pyobject)\n if follow_symlinks:\n path = realpath(path)\n return dirname(path)\n"
] | # -*- coding: utf-8 -*-
import inspect
import sys
from codecs import open
from os.path import join, abspath, realpath, dirname
from setuptools import setup, find_packages
# Sadly we cannot use the location here because of the typing module which isn't in Python < 3.5
def script_dir(pyobject, follow_symlinks=True):
"""Get current script's directory
Args:
pyobject (Any): Any Python object in the script
follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.
Returns:
str: Current script's directory
"""
if getattr(sys, 'frozen', False): # py2exe, PyInstaller, cx_Freeze
path = abspath(sys.executable)
else:
path = inspect.getabsfile(pyobject)
if follow_symlinks:
path = realpath(path)
return dirname(path)
def get_version():
version_file = open(script_dir_plus_file(join('src', 'hdx', 'location', 'version.txt'), get_version),
encoding='utf-8')
return version_file.read().strip()
def get_readme():
readme_file = open(script_dir_plus_file('README.rst', get_readme), encoding='utf-8')
return readme_file.read()
requirements = ['hdx-python-utilities>=1.6.9',
'libhxl==4.5.1;python_version<"3"',
'libhxl>=4.13.2:python_version>="3"']
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries :: Python Modules",
]
setup(
name='hdx-python-country',
description='HDX Python country mapping utilities',
license='MIT',
url='https://github.com/OCHA-DAP/hdx-python-country',
version=get_version(),
author='Michael Rans',
author_email='rans@email.com',
keywords=['HDX', 'location', 'country code', 'country', 'iso 3166', 'iso2', 'iso3', 'region'],
long_description=get_readme(),
packages=find_packages(where='src'),
package_dir={'': 'src'},
include_package_data=True,
setup_requires=['pytest-runner'],
tests_require=['pytest'],
zip_safe=True,
classifiers=classifiers,
install_requires=requirements,
)
|
OCHA-DAP/hdx-python-country | src/hdx/location/country.py | Country._add_countriesdata | python | def _add_countriesdata(cls, iso3, country):
# type: (str, hxl.Row) -> None
countryname = country.get('#country+name+preferred')
cls._countriesdata['countrynames2iso3'][countryname.upper()] = iso3
iso2 = country.get('#country+code+v_iso2')
if iso2:
cls._countriesdata['iso2iso3'][iso2] = iso3
# different types so keys won't clash
cls._countriesdata['iso2iso3'][iso3] = iso2
m49 = country.get('#country+code+num+v_m49')
if m49:
m49 = int(m49)
cls._countriesdata['m49iso3'][m49] = iso3
# different types so keys won't clash
cls._countriesdata['m49iso3'][iso3] = m49
cls._countriesdata['aliases'][iso3] = re.compile(country.get('#country+regex'), re.IGNORECASE)
regionname = country.get('#region+main+name+preferred')
sub_regionname = country.get('#region+sub+name+preferred')
intermediate_regionname = country.get('#region+intermediate+name+preferred')
regionid = country.get('#region+main+code')
if regionid:
regionid = int(regionid)
sub_regionid = country.get('#region+sub+code')
if sub_regionid:
sub_regionid = int(sub_regionid)
intermediate_regionid = country.get('#region+intermediate+code')
if intermediate_regionid:
intermediate_regionid = int(intermediate_regionid)
# region, subregion and intermediate region codes do not clash so only need one dict
def add_country_to_set(colname, idval, iso3):
value = cls._countriesdata[colname].get(idval)
if value is None:
value = set()
cls._countriesdata['regioncodes2countries'][idval] = value
value.add(iso3)
if regionname:
add_country_to_set('regioncodes2countries', regionid, iso3)
cls._countriesdata['regioncodes2names'][regionid] = regionname
cls._countriesdata['regionnames2codes'][regionname.upper()] = regionid
if sub_regionname:
add_country_to_set('regioncodes2countries', sub_regionid, iso3)
cls._countriesdata['regioncodes2names'][sub_regionid] = sub_regionname
cls._countriesdata['regionnames2codes'][sub_regionname.upper()] = sub_regionid
if intermediate_regionname:
add_country_to_set('regioncodes2countries', intermediate_regionid, iso3)
cls._countriesdata['regioncodes2names'][intermediate_regionid] = intermediate_regionname
cls._countriesdata['regionnames2codes'][intermediate_regionname.upper()] = \
intermediate_regionid | Set up countries data from data in form provided by UNStats and World Bank
Args:
iso3 (str): ISO3 code for country
country (hxl.Row): Country information
Returns:
None | train | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L45-L104 | null | class Country(object):
"""Location class with various methods to help with countries and regions. Uses OCHA countries feed which
supplies data in form:
::
ID,HRinfo ID,RW ID,m49 numerical code,FTS API ID,Appears in UNTERM list,Appears in DGACM list,ISO 3166-1 Alpha 2-Codes,ISO 3166-1 Alpha 3-Codes,x Alpha2 codes,x Alpha3 codes,Preferred Term,m49 Alt Term,ISO Alt Term,UNTERM Alt Term,FTS Alt Term,HRinfo Alt Term,RW Short Name,RW API Alt Term,English Short,French Short,Spanish Short,Russian Short,Chinese Short,Arabic Short,Admin Level,Latitude,Longitude,Region Code,Region Name,Sub-region Code,Sub-region Name,Intermediate Region Code,Intermediate Region Name,Regex,Concatenation
#meta +id,#country +code +v_hrinfo_country,#country +code +v_reliefweb,#country +code +num +v_m49,#country +code +v_fts,,,#country +code +v_iso2,#country +code +v_iso3,,,#country +name +preferred,#country +name +alt +v_m49,#country +name +alt +v_iso,#country +name +alt +v_unterm,#country +name +alt +v_fts,#country +name +alt +v_hrinfo_country,#country +name +short +v_reliefweb,#country +name +alt +v_reliefweb,#country +name +i_en +alt +v_unterm,#country +name +i_fr +alt +v_unterm,#country +name +i_es +alt +v_unterm,#country +name +i_ru +alt +v_unterm,#country +name +i_zh +alt +v_unterm,#country +name +i_ar +alt +v_unterm,#geo +admin_level,#geo +lat,#geo +lon,#region +main +code,#region +main +name +preferred,#region +sub +code,#region +sub +name +preferred,#region +intermediate +code,#region +intermediate +name +preferred,#country +regex,
1,181,13,4,1,Y,Y,AF,AFG,,,Afghanistan,,,,,,,,Afghanistan,Afghanistan (l') [masc.],Afganistán (el),Афганистан,阿富汗,أفغانستان,0,33.83147477,66.02621828,142,Asia,34,Southern Asia,,,afghan,
"""
abbreviations = {'DEM.': 'DEMOCRATIC', 'FMR.': 'FORMER', 'PROV.': 'PROVINCE', 'REP.': 'REPUBLIC', 'ST.': 'SAINT',
'UTD.': 'UNITED', 'U.': 'UNITED', 'N.': 'NORTH', 'E.': 'EAST', 'W.': 'WEST', 'K.': 'KINGDOM'}
major_differentiators = ['DEMOCRATIC', 'NORTH', 'SOUTH', 'EAST', 'WEST', 'STATES']
multiple_abbreviations = {'FED.': ['FEDERATION', 'FEDERAL', 'FEDERATED'],
'ISL.': ['ISLAND', 'ISLANDS'],
'S.': ['SOUTH', 'STATES'],
'TERR.': ['TERRITORY', 'TERRITORIES']}
simplifications = ['THE', 'OF', 'ISLAMIC', 'STATES', 'BOLIVARIAN', 'PLURINATIONAL', "PEOPLE'S",
'DUTCH PART', 'FRENCH PART', 'MALVINAS', 'YUGOSLAV', 'KINGDOM', 'PROTECTORATE']
_countriesdata = None
_ochaurl_int = 'https://docs.google.com/spreadsheets/d/1NjSI2LaS3SqbgYc0HdD8oIb7lofGtiHgoKKATCpwVdY/export?format=csv&gid=1088874596'
_ochaurl = _ochaurl_int
@classmethod
@classmethod
def set_countriesdata(cls, countries):
# type: (str) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
countries (str): Countries data in HTML format provided by UNStats
Returns:
None
"""
cls._countriesdata = dict()
cls._countriesdata['countries'] = dict()
cls._countriesdata['iso2iso3'] = dict()
cls._countriesdata['m49iso3'] = dict()
cls._countriesdata['countrynames2iso3'] = dict()
cls._countriesdata['regioncodes2countries'] = dict()
cls._countriesdata['regioncodes2names'] = dict()
cls._countriesdata['regionnames2codes'] = dict()
cls._countriesdata['aliases'] = dict()
for country in countries:
iso3 = country.get('#country+code+v_iso3')
if not iso3:
continue
iso3 = iso3.upper()
cls._add_countriesdata(iso3, country)
cls._countriesdata['countries'][iso3] = country.dictionary
def sort_list(colname):
for idval in cls._countriesdata[colname]:
cls._countriesdata[colname][idval] = \
sorted(list(cls._countriesdata[colname][idval]))
sort_list('regioncodes2countries')
@classmethod
def countriesdata(cls, use_live=True):
# type: (bool) -> List[Dict[Dict]]
"""
Read countries data from OCHA countries feed (falling back to file)
Args:
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
Returns:
List[Dict[Dict]]: Countries dictionaries
"""
if cls._countriesdata is None:
countries = None
if use_live:
try:
countries = hxl.data(cls._ochaurl)
except IOError:
logger.exception('Download from OCHA feed failed! Falling back to stored file.')
if countries is None:
countries = hxl.data(
script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv',
Country), allow_local=True)
cls.set_countriesdata(countries)
return cls._countriesdata
@classmethod
def set_ocha_url(cls, url=None):
# type: (str) -> None
"""
Set World Bank url from which to retrieve countries data
Args:
url (str): World Bank url from which to retrieve countries data. Defaults to internal value.
Returns:
None
"""
if url is None:
url = cls._ochaurl_int
cls._ochaurl = url
@classmethod
def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country information from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: country information
"""
countriesdata = cls.countriesdata(use_live=use_live)
country = countriesdata['countries'].get(iso3.upper())
if country is not None:
return country
if exception is not None:
raise exception
return None
@classmethod
def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
if countryinfo is not None:
return countryinfo.get('#country+name+preferred')
return None
@classmethod
def get_iso2_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO2 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get ISO2 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO2 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso2 = countriesdata['iso2iso3'].get(iso3.upper())
if iso2 is not None:
return iso2
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from ISO2 code
Args:
iso2 (str): ISO2 code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['iso2iso3'].get(iso2.upper())
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
return None
@classmethod
def get_country_name_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_m49_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[int]
"""Get M49 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get M49 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[int]: M49 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
m49 = countriesdata['m49iso3'].get(iso3)
if m49 is not None:
return m49
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from M49 code
Args:
m49 (int): M49 numeric code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['m49iso3'].get(m49)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_country_name_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def expand_countryname_abbrevs(cls, country):
# type: (str) -> List[str]
"""Expands abbreviation(s) in country name in various ways (eg. FED -> FEDERATED, FEDERAL etc.)
Args:
country (str): Country with abbreviation(s)to expand
Returns:
List[str]: Uppercase country name with abbreviation(s) expanded in various ways
"""
def replace_ensure_space(word, replace, replacement):
return word.replace(replace, '%s ' % replacement).replace(' ', ' ').strip()
countryupper = country.upper()
for abbreviation in cls.abbreviations:
countryupper = replace_ensure_space(countryupper, abbreviation, cls.abbreviations[abbreviation])
candidates = [countryupper]
for abbreviation in cls.multiple_abbreviations:
if abbreviation in countryupper:
for expanded in cls.multiple_abbreviations[abbreviation]:
candidates.append(replace_ensure_space(countryupper, abbreviation, expanded))
return candidates
@classmethod
def simplify_countryname(cls, country):
# type: (str) -> (str, List[str])
"""Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc.
Args:
country (str): Country name to simplify
Returns:
Tuple[str, List[str]]: Uppercase simplified country name and list of removed words
"""
countryupper = country.upper()
words = get_words_in_sentence(countryupper)
index = countryupper.find(',')
if index != -1:
countryupper = countryupper[:index]
index = countryupper.find(':')
if index != -1:
countryupper = countryupper[:index]
regex = re.compile('\(.+?\)')
countryupper = regex.sub('', countryupper)
remove = copy.deepcopy(cls.simplifications)
for simplification1, simplification2 in cls.abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
remove.append(simplification2)
for simplification1, simplifications in cls.multiple_abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
for simplification2 in simplifications:
remove.append(simplification2)
remove = '|'.join(remove)
regex = re.compile(r'\b(' + remove + r')\b', flags=re.IGNORECASE)
countryupper = regex.sub('', countryupper)
countryupper = countryupper.strip()
countryupper_words = get_words_in_sentence(countryupper)
if len(countryupper_words) > 1:
countryupper = countryupper_words[0]
if countryupper:
words.remove(countryupper)
return countryupper, words
@classmethod
def get_iso3_country_code(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None
"""
countriesdata = cls.countriesdata(use_live=use_live)
countryupper = country.upper()
len_countryupper = len(countryupper)
if len_countryupper == 3:
if countryupper in countriesdata['countries']:
return countryupper
elif len_countryupper == 2:
iso3 = countriesdata['iso2iso3'].get(countryupper)
if iso3 is not None:
return iso3
iso3 = countriesdata['countrynames2iso3'].get(countryupper)
if iso3 is not None:
return iso3
for candidate in cls.expand_countryname_abbrevs(countryupper):
iso3 = countriesdata['countrynames2iso3'].get(candidate)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_country_code_fuzzy(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Tuple[[Optional[str], bool]]
"""Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second
showing if the match is exact or not.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False).
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = cls.get_iso3_country_code(country,
use_live=use_live) # don't put exception param here as we don't want it to throw
if iso3 is not None:
return iso3, True
def remove_matching_from_list(wordlist, word_or_part):
for word in wordlist:
if word_or_part in word:
wordlist.remove(word)
# fuzzy matching
expanded_country_candidates = cls.expand_countryname_abbrevs(country)
match_strength = 0
matches = set()
for countryname in sorted(countriesdata['countrynames2iso3']):
for candidate in expanded_country_candidates:
simplified_country, removed_words = cls.simplify_countryname(candidate)
if simplified_country in countryname:
words = get_words_in_sentence(countryname)
new_match_strength = 0
if simplified_country:
remove_matching_from_list(words, simplified_country)
new_match_strength += 32
for word in removed_words:
if word in countryname:
remove_matching_from_list(words, word)
new_match_strength += 4
else:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
for word in words:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
iso3 = countriesdata['countrynames2iso3'][countryname]
if new_match_strength > match_strength:
match_strength = new_match_strength
matches = set()
if new_match_strength == match_strength:
matches.add(iso3)
if len(matches) == 1 and match_strength > 16:
return matches.pop(), False
# regex lookup
for iso3, regex in countriesdata['aliases'].items():
index = re.search(regex, country.upper())
if index is not None:
return iso3, False
if exception is not None:
raise exception
return None, False
@classmethod
def get_countries_in_region(cls, region, use_live=True, exception=None):
# type: (Union[int,str], bool, Optional[ExceptionUpperBound]) -> List[str]
"""Get countries (ISO3 codes) in region
Args:
region (Union[int,str]): Three digit UNStats M49 region code or region name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if region not found. Defaults to None.
Returns:
List(str): Sorted list of ISO3 country names
"""
countriesdata = cls.countriesdata(use_live=use_live)
if isinstance(region, int):
regioncode = region
else:
regionupper = region.upper()
regioncode = countriesdata['regionnames2codes'].get(regionupper)
if regioncode is not None:
return countriesdata['regioncodes2countries'][regioncode]
if exception is not None:
raise exception
return list()
|
OCHA-DAP/hdx-python-country | src/hdx/location/country.py | Country.set_countriesdata | python | def set_countriesdata(cls, countries):
# type: (str) -> None
cls._countriesdata = dict()
cls._countriesdata['countries'] = dict()
cls._countriesdata['iso2iso3'] = dict()
cls._countriesdata['m49iso3'] = dict()
cls._countriesdata['countrynames2iso3'] = dict()
cls._countriesdata['regioncodes2countries'] = dict()
cls._countriesdata['regioncodes2names'] = dict()
cls._countriesdata['regionnames2codes'] = dict()
cls._countriesdata['aliases'] = dict()
for country in countries:
iso3 = country.get('#country+code+v_iso3')
if not iso3:
continue
iso3 = iso3.upper()
cls._add_countriesdata(iso3, country)
cls._countriesdata['countries'][iso3] = country.dictionary
def sort_list(colname):
for idval in cls._countriesdata[colname]:
cls._countriesdata[colname][idval] = \
sorted(list(cls._countriesdata[colname][idval]))
sort_list('regioncodes2countries') | Set up countries data from data in form provided by UNStats and World Bank
Args:
countries (str): Countries data in HTML format provided by UNStats
Returns:
None | train | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L107-L141 | [
"def _add_countriesdata(cls, iso3, country):\n # type: (str, hxl.Row) -> None\n \"\"\"\n Set up countries data from data in form provided by UNStats and World Bank\n\n Args:\n iso3 (str): ISO3 code for country\n country (hxl.Row): Country information\n\n Returns:\n None\n \"\"\"\n countryname = country.get('#country+name+preferred')\n cls._countriesdata['countrynames2iso3'][countryname.upper()] = iso3\n iso2 = country.get('#country+code+v_iso2')\n if iso2:\n cls._countriesdata['iso2iso3'][iso2] = iso3\n # different types so keys won't clash\n cls._countriesdata['iso2iso3'][iso3] = iso2\n m49 = country.get('#country+code+num+v_m49')\n if m49:\n m49 = int(m49)\n cls._countriesdata['m49iso3'][m49] = iso3\n # different types so keys won't clash\n cls._countriesdata['m49iso3'][iso3] = m49\n cls._countriesdata['aliases'][iso3] = re.compile(country.get('#country+regex'), re.IGNORECASE)\n regionname = country.get('#region+main+name+preferred')\n sub_regionname = country.get('#region+sub+name+preferred')\n intermediate_regionname = country.get('#region+intermediate+name+preferred')\n regionid = country.get('#region+main+code')\n if regionid:\n regionid = int(regionid)\n sub_regionid = country.get('#region+sub+code')\n if sub_regionid:\n sub_regionid = int(sub_regionid)\n intermediate_regionid = country.get('#region+intermediate+code')\n if intermediate_regionid:\n intermediate_regionid = int(intermediate_regionid)\n\n # region, subregion and intermediate region codes do not clash so only need one dict\n def add_country_to_set(colname, idval, iso3):\n value = cls._countriesdata[colname].get(idval)\n if value is None:\n value = set()\n cls._countriesdata['regioncodes2countries'][idval] = value\n value.add(iso3)\n\n if regionname:\n add_country_to_set('regioncodes2countries', regionid, iso3)\n cls._countriesdata['regioncodes2names'][regionid] = regionname\n cls._countriesdata['regionnames2codes'][regionname.upper()] = regionid\n if sub_regionname:\n add_country_to_set('regioncodes2countries', sub_regionid, iso3)\n cls._countriesdata['regioncodes2names'][sub_regionid] = sub_regionname\n cls._countriesdata['regionnames2codes'][sub_regionname.upper()] = sub_regionid\n if intermediate_regionname:\n add_country_to_set('regioncodes2countries', intermediate_regionid, iso3)\n cls._countriesdata['regioncodes2names'][intermediate_regionid] = intermediate_regionname\n cls._countriesdata['regionnames2codes'][intermediate_regionname.upper()] = \\\n intermediate_regionid\n",
"def sort_list(colname):\n for idval in cls._countriesdata[colname]:\n cls._countriesdata[colname][idval] = \\\n sorted(list(cls._countriesdata[colname][idval]))\n"
] | class Country(object):
"""Location class with various methods to help with countries and regions. Uses OCHA countries feed which
supplies data in form:
::
ID,HRinfo ID,RW ID,m49 numerical code,FTS API ID,Appears in UNTERM list,Appears in DGACM list,ISO 3166-1 Alpha 2-Codes,ISO 3166-1 Alpha 3-Codes,x Alpha2 codes,x Alpha3 codes,Preferred Term,m49 Alt Term,ISO Alt Term,UNTERM Alt Term,FTS Alt Term,HRinfo Alt Term,RW Short Name,RW API Alt Term,English Short,French Short,Spanish Short,Russian Short,Chinese Short,Arabic Short,Admin Level,Latitude,Longitude,Region Code,Region Name,Sub-region Code,Sub-region Name,Intermediate Region Code,Intermediate Region Name,Regex,Concatenation
#meta +id,#country +code +v_hrinfo_country,#country +code +v_reliefweb,#country +code +num +v_m49,#country +code +v_fts,,,#country +code +v_iso2,#country +code +v_iso3,,,#country +name +preferred,#country +name +alt +v_m49,#country +name +alt +v_iso,#country +name +alt +v_unterm,#country +name +alt +v_fts,#country +name +alt +v_hrinfo_country,#country +name +short +v_reliefweb,#country +name +alt +v_reliefweb,#country +name +i_en +alt +v_unterm,#country +name +i_fr +alt +v_unterm,#country +name +i_es +alt +v_unterm,#country +name +i_ru +alt +v_unterm,#country +name +i_zh +alt +v_unterm,#country +name +i_ar +alt +v_unterm,#geo +admin_level,#geo +lat,#geo +lon,#region +main +code,#region +main +name +preferred,#region +sub +code,#region +sub +name +preferred,#region +intermediate +code,#region +intermediate +name +preferred,#country +regex,
1,181,13,4,1,Y,Y,AF,AFG,,,Afghanistan,,,,,,,,Afghanistan,Afghanistan (l') [masc.],Afganistán (el),Афганистан,阿富汗,أفغانستان,0,33.83147477,66.02621828,142,Asia,34,Southern Asia,,,afghan,
"""
abbreviations = {'DEM.': 'DEMOCRATIC', 'FMR.': 'FORMER', 'PROV.': 'PROVINCE', 'REP.': 'REPUBLIC', 'ST.': 'SAINT',
'UTD.': 'UNITED', 'U.': 'UNITED', 'N.': 'NORTH', 'E.': 'EAST', 'W.': 'WEST', 'K.': 'KINGDOM'}
major_differentiators = ['DEMOCRATIC', 'NORTH', 'SOUTH', 'EAST', 'WEST', 'STATES']
multiple_abbreviations = {'FED.': ['FEDERATION', 'FEDERAL', 'FEDERATED'],
'ISL.': ['ISLAND', 'ISLANDS'],
'S.': ['SOUTH', 'STATES'],
'TERR.': ['TERRITORY', 'TERRITORIES']}
simplifications = ['THE', 'OF', 'ISLAMIC', 'STATES', 'BOLIVARIAN', 'PLURINATIONAL', "PEOPLE'S",
'DUTCH PART', 'FRENCH PART', 'MALVINAS', 'YUGOSLAV', 'KINGDOM', 'PROTECTORATE']
_countriesdata = None
_ochaurl_int = 'https://docs.google.com/spreadsheets/d/1NjSI2LaS3SqbgYc0HdD8oIb7lofGtiHgoKKATCpwVdY/export?format=csv&gid=1088874596'
_ochaurl = _ochaurl_int
@classmethod
def _add_countriesdata(cls, iso3, country):
# type: (str, hxl.Row) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
iso3 (str): ISO3 code for country
country (hxl.Row): Country information
Returns:
None
"""
countryname = country.get('#country+name+preferred')
cls._countriesdata['countrynames2iso3'][countryname.upper()] = iso3
iso2 = country.get('#country+code+v_iso2')
if iso2:
cls._countriesdata['iso2iso3'][iso2] = iso3
# different types so keys won't clash
cls._countriesdata['iso2iso3'][iso3] = iso2
m49 = country.get('#country+code+num+v_m49')
if m49:
m49 = int(m49)
cls._countriesdata['m49iso3'][m49] = iso3
# different types so keys won't clash
cls._countriesdata['m49iso3'][iso3] = m49
cls._countriesdata['aliases'][iso3] = re.compile(country.get('#country+regex'), re.IGNORECASE)
regionname = country.get('#region+main+name+preferred')
sub_regionname = country.get('#region+sub+name+preferred')
intermediate_regionname = country.get('#region+intermediate+name+preferred')
regionid = country.get('#region+main+code')
if regionid:
regionid = int(regionid)
sub_regionid = country.get('#region+sub+code')
if sub_regionid:
sub_regionid = int(sub_regionid)
intermediate_regionid = country.get('#region+intermediate+code')
if intermediate_regionid:
intermediate_regionid = int(intermediate_regionid)
# region, subregion and intermediate region codes do not clash so only need one dict
def add_country_to_set(colname, idval, iso3):
value = cls._countriesdata[colname].get(idval)
if value is None:
value = set()
cls._countriesdata['regioncodes2countries'][idval] = value
value.add(iso3)
if regionname:
add_country_to_set('regioncodes2countries', regionid, iso3)
cls._countriesdata['regioncodes2names'][regionid] = regionname
cls._countriesdata['regionnames2codes'][regionname.upper()] = regionid
if sub_regionname:
add_country_to_set('regioncodes2countries', sub_regionid, iso3)
cls._countriesdata['regioncodes2names'][sub_regionid] = sub_regionname
cls._countriesdata['regionnames2codes'][sub_regionname.upper()] = sub_regionid
if intermediate_regionname:
add_country_to_set('regioncodes2countries', intermediate_regionid, iso3)
cls._countriesdata['regioncodes2names'][intermediate_regionid] = intermediate_regionname
cls._countriesdata['regionnames2codes'][intermediate_regionname.upper()] = \
intermediate_regionid
@classmethod
@classmethod
def countriesdata(cls, use_live=True):
# type: (bool) -> List[Dict[Dict]]
"""
Read countries data from OCHA countries feed (falling back to file)
Args:
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
Returns:
List[Dict[Dict]]: Countries dictionaries
"""
if cls._countriesdata is None:
countries = None
if use_live:
try:
countries = hxl.data(cls._ochaurl)
except IOError:
logger.exception('Download from OCHA feed failed! Falling back to stored file.')
if countries is None:
countries = hxl.data(
script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv',
Country), allow_local=True)
cls.set_countriesdata(countries)
return cls._countriesdata
@classmethod
def set_ocha_url(cls, url=None):
# type: (str) -> None
"""
Set World Bank url from which to retrieve countries data
Args:
url (str): World Bank url from which to retrieve countries data. Defaults to internal value.
Returns:
None
"""
if url is None:
url = cls._ochaurl_int
cls._ochaurl = url
@classmethod
def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country information from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: country information
"""
countriesdata = cls.countriesdata(use_live=use_live)
country = countriesdata['countries'].get(iso3.upper())
if country is not None:
return country
if exception is not None:
raise exception
return None
@classmethod
def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
if countryinfo is not None:
return countryinfo.get('#country+name+preferred')
return None
@classmethod
def get_iso2_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO2 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get ISO2 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO2 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso2 = countriesdata['iso2iso3'].get(iso3.upper())
if iso2 is not None:
return iso2
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from ISO2 code
Args:
iso2 (str): ISO2 code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['iso2iso3'].get(iso2.upper())
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
return None
@classmethod
def get_country_name_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_m49_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[int]
"""Get M49 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get M49 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[int]: M49 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
m49 = countriesdata['m49iso3'].get(iso3)
if m49 is not None:
return m49
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from M49 code
Args:
m49 (int): M49 numeric code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['m49iso3'].get(m49)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_country_name_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def expand_countryname_abbrevs(cls, country):
# type: (str) -> List[str]
"""Expands abbreviation(s) in country name in various ways (eg. FED -> FEDERATED, FEDERAL etc.)
Args:
country (str): Country with abbreviation(s)to expand
Returns:
List[str]: Uppercase country name with abbreviation(s) expanded in various ways
"""
def replace_ensure_space(word, replace, replacement):
return word.replace(replace, '%s ' % replacement).replace(' ', ' ').strip()
countryupper = country.upper()
for abbreviation in cls.abbreviations:
countryupper = replace_ensure_space(countryupper, abbreviation, cls.abbreviations[abbreviation])
candidates = [countryupper]
for abbreviation in cls.multiple_abbreviations:
if abbreviation in countryupper:
for expanded in cls.multiple_abbreviations[abbreviation]:
candidates.append(replace_ensure_space(countryupper, abbreviation, expanded))
return candidates
@classmethod
def simplify_countryname(cls, country):
# type: (str) -> (str, List[str])
"""Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc.
Args:
country (str): Country name to simplify
Returns:
Tuple[str, List[str]]: Uppercase simplified country name and list of removed words
"""
countryupper = country.upper()
words = get_words_in_sentence(countryupper)
index = countryupper.find(',')
if index != -1:
countryupper = countryupper[:index]
index = countryupper.find(':')
if index != -1:
countryupper = countryupper[:index]
regex = re.compile('\(.+?\)')
countryupper = regex.sub('', countryupper)
remove = copy.deepcopy(cls.simplifications)
for simplification1, simplification2 in cls.abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
remove.append(simplification2)
for simplification1, simplifications in cls.multiple_abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
for simplification2 in simplifications:
remove.append(simplification2)
remove = '|'.join(remove)
regex = re.compile(r'\b(' + remove + r')\b', flags=re.IGNORECASE)
countryupper = regex.sub('', countryupper)
countryupper = countryupper.strip()
countryupper_words = get_words_in_sentence(countryupper)
if len(countryupper_words) > 1:
countryupper = countryupper_words[0]
if countryupper:
words.remove(countryupper)
return countryupper, words
@classmethod
def get_iso3_country_code(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None
"""
countriesdata = cls.countriesdata(use_live=use_live)
countryupper = country.upper()
len_countryupper = len(countryupper)
if len_countryupper == 3:
if countryupper in countriesdata['countries']:
return countryupper
elif len_countryupper == 2:
iso3 = countriesdata['iso2iso3'].get(countryupper)
if iso3 is not None:
return iso3
iso3 = countriesdata['countrynames2iso3'].get(countryupper)
if iso3 is not None:
return iso3
for candidate in cls.expand_countryname_abbrevs(countryupper):
iso3 = countriesdata['countrynames2iso3'].get(candidate)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_country_code_fuzzy(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Tuple[[Optional[str], bool]]
"""Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second
showing if the match is exact or not.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False).
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = cls.get_iso3_country_code(country,
use_live=use_live) # don't put exception param here as we don't want it to throw
if iso3 is not None:
return iso3, True
def remove_matching_from_list(wordlist, word_or_part):
for word in wordlist:
if word_or_part in word:
wordlist.remove(word)
# fuzzy matching
expanded_country_candidates = cls.expand_countryname_abbrevs(country)
match_strength = 0
matches = set()
for countryname in sorted(countriesdata['countrynames2iso3']):
for candidate in expanded_country_candidates:
simplified_country, removed_words = cls.simplify_countryname(candidate)
if simplified_country in countryname:
words = get_words_in_sentence(countryname)
new_match_strength = 0
if simplified_country:
remove_matching_from_list(words, simplified_country)
new_match_strength += 32
for word in removed_words:
if word in countryname:
remove_matching_from_list(words, word)
new_match_strength += 4
else:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
for word in words:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
iso3 = countriesdata['countrynames2iso3'][countryname]
if new_match_strength > match_strength:
match_strength = new_match_strength
matches = set()
if new_match_strength == match_strength:
matches.add(iso3)
if len(matches) == 1 and match_strength > 16:
return matches.pop(), False
# regex lookup
for iso3, regex in countriesdata['aliases'].items():
index = re.search(regex, country.upper())
if index is not None:
return iso3, False
if exception is not None:
raise exception
return None, False
@classmethod
def get_countries_in_region(cls, region, use_live=True, exception=None):
# type: (Union[int,str], bool, Optional[ExceptionUpperBound]) -> List[str]
"""Get countries (ISO3 codes) in region
Args:
region (Union[int,str]): Three digit UNStats M49 region code or region name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if region not found. Defaults to None.
Returns:
List(str): Sorted list of ISO3 country names
"""
countriesdata = cls.countriesdata(use_live=use_live)
if isinstance(region, int):
regioncode = region
else:
regionupper = region.upper()
regioncode = countriesdata['regionnames2codes'].get(regionupper)
if regioncode is not None:
return countriesdata['regioncodes2countries'][regioncode]
if exception is not None:
raise exception
return list()
|
OCHA-DAP/hdx-python-country | src/hdx/location/country.py | Country.countriesdata | python | def countriesdata(cls, use_live=True):
# type: (bool) -> List[Dict[Dict]]
if cls._countriesdata is None:
countries = None
if use_live:
try:
countries = hxl.data(cls._ochaurl)
except IOError:
logger.exception('Download from OCHA feed failed! Falling back to stored file.')
if countries is None:
countries = hxl.data(
script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv',
Country), allow_local=True)
cls.set_countriesdata(countries)
return cls._countriesdata | Read countries data from OCHA countries feed (falling back to file)
Args:
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
Returns:
List[Dict[Dict]]: Countries dictionaries | train | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L144-L167 | [
"def set_countriesdata(cls, countries):\n # type: (str) -> None\n \"\"\"\n Set up countries data from data in form provided by UNStats and World Bank\n\n Args:\n countries (str): Countries data in HTML format provided by UNStats\n\n Returns:\n None\n \"\"\"\n cls._countriesdata = dict()\n cls._countriesdata['countries'] = dict()\n cls._countriesdata['iso2iso3'] = dict()\n cls._countriesdata['m49iso3'] = dict()\n cls._countriesdata['countrynames2iso3'] = dict()\n cls._countriesdata['regioncodes2countries'] = dict()\n cls._countriesdata['regioncodes2names'] = dict()\n cls._countriesdata['regionnames2codes'] = dict()\n cls._countriesdata['aliases'] = dict()\n\n for country in countries:\n iso3 = country.get('#country+code+v_iso3')\n if not iso3:\n continue\n iso3 = iso3.upper()\n cls._add_countriesdata(iso3, country)\n cls._countriesdata['countries'][iso3] = country.dictionary\n\n def sort_list(colname):\n for idval in cls._countriesdata[colname]:\n cls._countriesdata[colname][idval] = \\\n sorted(list(cls._countriesdata[colname][idval]))\n\n sort_list('regioncodes2countries')\n"
] | class Country(object):
"""Location class with various methods to help with countries and regions. Uses OCHA countries feed which
supplies data in form:
::
ID,HRinfo ID,RW ID,m49 numerical code,FTS API ID,Appears in UNTERM list,Appears in DGACM list,ISO 3166-1 Alpha 2-Codes,ISO 3166-1 Alpha 3-Codes,x Alpha2 codes,x Alpha3 codes,Preferred Term,m49 Alt Term,ISO Alt Term,UNTERM Alt Term,FTS Alt Term,HRinfo Alt Term,RW Short Name,RW API Alt Term,English Short,French Short,Spanish Short,Russian Short,Chinese Short,Arabic Short,Admin Level,Latitude,Longitude,Region Code,Region Name,Sub-region Code,Sub-region Name,Intermediate Region Code,Intermediate Region Name,Regex,Concatenation
#meta +id,#country +code +v_hrinfo_country,#country +code +v_reliefweb,#country +code +num +v_m49,#country +code +v_fts,,,#country +code +v_iso2,#country +code +v_iso3,,,#country +name +preferred,#country +name +alt +v_m49,#country +name +alt +v_iso,#country +name +alt +v_unterm,#country +name +alt +v_fts,#country +name +alt +v_hrinfo_country,#country +name +short +v_reliefweb,#country +name +alt +v_reliefweb,#country +name +i_en +alt +v_unterm,#country +name +i_fr +alt +v_unterm,#country +name +i_es +alt +v_unterm,#country +name +i_ru +alt +v_unterm,#country +name +i_zh +alt +v_unterm,#country +name +i_ar +alt +v_unterm,#geo +admin_level,#geo +lat,#geo +lon,#region +main +code,#region +main +name +preferred,#region +sub +code,#region +sub +name +preferred,#region +intermediate +code,#region +intermediate +name +preferred,#country +regex,
1,181,13,4,1,Y,Y,AF,AFG,,,Afghanistan,,,,,,,,Afghanistan,Afghanistan (l') [masc.],Afganistán (el),Афганистан,阿富汗,أفغانستان,0,33.83147477,66.02621828,142,Asia,34,Southern Asia,,,afghan,
"""
abbreviations = {'DEM.': 'DEMOCRATIC', 'FMR.': 'FORMER', 'PROV.': 'PROVINCE', 'REP.': 'REPUBLIC', 'ST.': 'SAINT',
'UTD.': 'UNITED', 'U.': 'UNITED', 'N.': 'NORTH', 'E.': 'EAST', 'W.': 'WEST', 'K.': 'KINGDOM'}
major_differentiators = ['DEMOCRATIC', 'NORTH', 'SOUTH', 'EAST', 'WEST', 'STATES']
multiple_abbreviations = {'FED.': ['FEDERATION', 'FEDERAL', 'FEDERATED'],
'ISL.': ['ISLAND', 'ISLANDS'],
'S.': ['SOUTH', 'STATES'],
'TERR.': ['TERRITORY', 'TERRITORIES']}
simplifications = ['THE', 'OF', 'ISLAMIC', 'STATES', 'BOLIVARIAN', 'PLURINATIONAL', "PEOPLE'S",
'DUTCH PART', 'FRENCH PART', 'MALVINAS', 'YUGOSLAV', 'KINGDOM', 'PROTECTORATE']
_countriesdata = None
_ochaurl_int = 'https://docs.google.com/spreadsheets/d/1NjSI2LaS3SqbgYc0HdD8oIb7lofGtiHgoKKATCpwVdY/export?format=csv&gid=1088874596'
_ochaurl = _ochaurl_int
@classmethod
def _add_countriesdata(cls, iso3, country):
# type: (str, hxl.Row) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
iso3 (str): ISO3 code for country
country (hxl.Row): Country information
Returns:
None
"""
countryname = country.get('#country+name+preferred')
cls._countriesdata['countrynames2iso3'][countryname.upper()] = iso3
iso2 = country.get('#country+code+v_iso2')
if iso2:
cls._countriesdata['iso2iso3'][iso2] = iso3
# different types so keys won't clash
cls._countriesdata['iso2iso3'][iso3] = iso2
m49 = country.get('#country+code+num+v_m49')
if m49:
m49 = int(m49)
cls._countriesdata['m49iso3'][m49] = iso3
# different types so keys won't clash
cls._countriesdata['m49iso3'][iso3] = m49
cls._countriesdata['aliases'][iso3] = re.compile(country.get('#country+regex'), re.IGNORECASE)
regionname = country.get('#region+main+name+preferred')
sub_regionname = country.get('#region+sub+name+preferred')
intermediate_regionname = country.get('#region+intermediate+name+preferred')
regionid = country.get('#region+main+code')
if regionid:
regionid = int(regionid)
sub_regionid = country.get('#region+sub+code')
if sub_regionid:
sub_regionid = int(sub_regionid)
intermediate_regionid = country.get('#region+intermediate+code')
if intermediate_regionid:
intermediate_regionid = int(intermediate_regionid)
# region, subregion and intermediate region codes do not clash so only need one dict
def add_country_to_set(colname, idval, iso3):
value = cls._countriesdata[colname].get(idval)
if value is None:
value = set()
cls._countriesdata['regioncodes2countries'][idval] = value
value.add(iso3)
if regionname:
add_country_to_set('regioncodes2countries', regionid, iso3)
cls._countriesdata['regioncodes2names'][regionid] = regionname
cls._countriesdata['regionnames2codes'][regionname.upper()] = regionid
if sub_regionname:
add_country_to_set('regioncodes2countries', sub_regionid, iso3)
cls._countriesdata['regioncodes2names'][sub_regionid] = sub_regionname
cls._countriesdata['regionnames2codes'][sub_regionname.upper()] = sub_regionid
if intermediate_regionname:
add_country_to_set('regioncodes2countries', intermediate_regionid, iso3)
cls._countriesdata['regioncodes2names'][intermediate_regionid] = intermediate_regionname
cls._countriesdata['regionnames2codes'][intermediate_regionname.upper()] = \
intermediate_regionid
@classmethod
def set_countriesdata(cls, countries):
# type: (str) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
countries (str): Countries data in HTML format provided by UNStats
Returns:
None
"""
cls._countriesdata = dict()
cls._countriesdata['countries'] = dict()
cls._countriesdata['iso2iso3'] = dict()
cls._countriesdata['m49iso3'] = dict()
cls._countriesdata['countrynames2iso3'] = dict()
cls._countriesdata['regioncodes2countries'] = dict()
cls._countriesdata['regioncodes2names'] = dict()
cls._countriesdata['regionnames2codes'] = dict()
cls._countriesdata['aliases'] = dict()
for country in countries:
iso3 = country.get('#country+code+v_iso3')
if not iso3:
continue
iso3 = iso3.upper()
cls._add_countriesdata(iso3, country)
cls._countriesdata['countries'][iso3] = country.dictionary
def sort_list(colname):
for idval in cls._countriesdata[colname]:
cls._countriesdata[colname][idval] = \
sorted(list(cls._countriesdata[colname][idval]))
sort_list('regioncodes2countries')
@classmethod
@classmethod
def set_ocha_url(cls, url=None):
# type: (str) -> None
"""
Set World Bank url from which to retrieve countries data
Args:
url (str): World Bank url from which to retrieve countries data. Defaults to internal value.
Returns:
None
"""
if url is None:
url = cls._ochaurl_int
cls._ochaurl = url
@classmethod
def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country information from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: country information
"""
countriesdata = cls.countriesdata(use_live=use_live)
country = countriesdata['countries'].get(iso3.upper())
if country is not None:
return country
if exception is not None:
raise exception
return None
@classmethod
def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
if countryinfo is not None:
return countryinfo.get('#country+name+preferred')
return None
@classmethod
def get_iso2_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO2 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get ISO2 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO2 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso2 = countriesdata['iso2iso3'].get(iso3.upper())
if iso2 is not None:
return iso2
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from ISO2 code
Args:
iso2 (str): ISO2 code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['iso2iso3'].get(iso2.upper())
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
return None
@classmethod
def get_country_name_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_m49_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[int]
"""Get M49 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get M49 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[int]: M49 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
m49 = countriesdata['m49iso3'].get(iso3)
if m49 is not None:
return m49
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from M49 code
Args:
m49 (int): M49 numeric code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['m49iso3'].get(m49)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_country_name_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def expand_countryname_abbrevs(cls, country):
# type: (str) -> List[str]
"""Expands abbreviation(s) in country name in various ways (eg. FED -> FEDERATED, FEDERAL etc.)
Args:
country (str): Country with abbreviation(s)to expand
Returns:
List[str]: Uppercase country name with abbreviation(s) expanded in various ways
"""
def replace_ensure_space(word, replace, replacement):
return word.replace(replace, '%s ' % replacement).replace(' ', ' ').strip()
countryupper = country.upper()
for abbreviation in cls.abbreviations:
countryupper = replace_ensure_space(countryupper, abbreviation, cls.abbreviations[abbreviation])
candidates = [countryupper]
for abbreviation in cls.multiple_abbreviations:
if abbreviation in countryupper:
for expanded in cls.multiple_abbreviations[abbreviation]:
candidates.append(replace_ensure_space(countryupper, abbreviation, expanded))
return candidates
@classmethod
def simplify_countryname(cls, country):
# type: (str) -> (str, List[str])
"""Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc.
Args:
country (str): Country name to simplify
Returns:
Tuple[str, List[str]]: Uppercase simplified country name and list of removed words
"""
countryupper = country.upper()
words = get_words_in_sentence(countryupper)
index = countryupper.find(',')
if index != -1:
countryupper = countryupper[:index]
index = countryupper.find(':')
if index != -1:
countryupper = countryupper[:index]
regex = re.compile('\(.+?\)')
countryupper = regex.sub('', countryupper)
remove = copy.deepcopy(cls.simplifications)
for simplification1, simplification2 in cls.abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
remove.append(simplification2)
for simplification1, simplifications in cls.multiple_abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
for simplification2 in simplifications:
remove.append(simplification2)
remove = '|'.join(remove)
regex = re.compile(r'\b(' + remove + r')\b', flags=re.IGNORECASE)
countryupper = regex.sub('', countryupper)
countryupper = countryupper.strip()
countryupper_words = get_words_in_sentence(countryupper)
if len(countryupper_words) > 1:
countryupper = countryupper_words[0]
if countryupper:
words.remove(countryupper)
return countryupper, words
@classmethod
def get_iso3_country_code(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None
"""
countriesdata = cls.countriesdata(use_live=use_live)
countryupper = country.upper()
len_countryupper = len(countryupper)
if len_countryupper == 3:
if countryupper in countriesdata['countries']:
return countryupper
elif len_countryupper == 2:
iso3 = countriesdata['iso2iso3'].get(countryupper)
if iso3 is not None:
return iso3
iso3 = countriesdata['countrynames2iso3'].get(countryupper)
if iso3 is not None:
return iso3
for candidate in cls.expand_countryname_abbrevs(countryupper):
iso3 = countriesdata['countrynames2iso3'].get(candidate)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_country_code_fuzzy(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Tuple[[Optional[str], bool]]
"""Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second
showing if the match is exact or not.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False).
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = cls.get_iso3_country_code(country,
use_live=use_live) # don't put exception param here as we don't want it to throw
if iso3 is not None:
return iso3, True
def remove_matching_from_list(wordlist, word_or_part):
for word in wordlist:
if word_or_part in word:
wordlist.remove(word)
# fuzzy matching
expanded_country_candidates = cls.expand_countryname_abbrevs(country)
match_strength = 0
matches = set()
for countryname in sorted(countriesdata['countrynames2iso3']):
for candidate in expanded_country_candidates:
simplified_country, removed_words = cls.simplify_countryname(candidate)
if simplified_country in countryname:
words = get_words_in_sentence(countryname)
new_match_strength = 0
if simplified_country:
remove_matching_from_list(words, simplified_country)
new_match_strength += 32
for word in removed_words:
if word in countryname:
remove_matching_from_list(words, word)
new_match_strength += 4
else:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
for word in words:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
iso3 = countriesdata['countrynames2iso3'][countryname]
if new_match_strength > match_strength:
match_strength = new_match_strength
matches = set()
if new_match_strength == match_strength:
matches.add(iso3)
if len(matches) == 1 and match_strength > 16:
return matches.pop(), False
# regex lookup
for iso3, regex in countriesdata['aliases'].items():
index = re.search(regex, country.upper())
if index is not None:
return iso3, False
if exception is not None:
raise exception
return None, False
@classmethod
def get_countries_in_region(cls, region, use_live=True, exception=None):
# type: (Union[int,str], bool, Optional[ExceptionUpperBound]) -> List[str]
"""Get countries (ISO3 codes) in region
Args:
region (Union[int,str]): Three digit UNStats M49 region code or region name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if region not found. Defaults to None.
Returns:
List(str): Sorted list of ISO3 country names
"""
countriesdata = cls.countriesdata(use_live=use_live)
if isinstance(region, int):
regioncode = region
else:
regionupper = region.upper()
regioncode = countriesdata['regionnames2codes'].get(regionupper)
if regioncode is not None:
return countriesdata['regioncodes2countries'][regioncode]
if exception is not None:
raise exception
return list()
|
OCHA-DAP/hdx-python-country | src/hdx/location/country.py | Country.set_ocha_url | python | def set_ocha_url(cls, url=None):
# type: (str) -> None
if url is None:
url = cls._ochaurl_int
cls._ochaurl = url | Set World Bank url from which to retrieve countries data
Args:
url (str): World Bank url from which to retrieve countries data. Defaults to internal value.
Returns:
None | train | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L170-L183 | null | class Country(object):
"""Location class with various methods to help with countries and regions. Uses OCHA countries feed which
supplies data in form:
::
ID,HRinfo ID,RW ID,m49 numerical code,FTS API ID,Appears in UNTERM list,Appears in DGACM list,ISO 3166-1 Alpha 2-Codes,ISO 3166-1 Alpha 3-Codes,x Alpha2 codes,x Alpha3 codes,Preferred Term,m49 Alt Term,ISO Alt Term,UNTERM Alt Term,FTS Alt Term,HRinfo Alt Term,RW Short Name,RW API Alt Term,English Short,French Short,Spanish Short,Russian Short,Chinese Short,Arabic Short,Admin Level,Latitude,Longitude,Region Code,Region Name,Sub-region Code,Sub-region Name,Intermediate Region Code,Intermediate Region Name,Regex,Concatenation
#meta +id,#country +code +v_hrinfo_country,#country +code +v_reliefweb,#country +code +num +v_m49,#country +code +v_fts,,,#country +code +v_iso2,#country +code +v_iso3,,,#country +name +preferred,#country +name +alt +v_m49,#country +name +alt +v_iso,#country +name +alt +v_unterm,#country +name +alt +v_fts,#country +name +alt +v_hrinfo_country,#country +name +short +v_reliefweb,#country +name +alt +v_reliefweb,#country +name +i_en +alt +v_unterm,#country +name +i_fr +alt +v_unterm,#country +name +i_es +alt +v_unterm,#country +name +i_ru +alt +v_unterm,#country +name +i_zh +alt +v_unterm,#country +name +i_ar +alt +v_unterm,#geo +admin_level,#geo +lat,#geo +lon,#region +main +code,#region +main +name +preferred,#region +sub +code,#region +sub +name +preferred,#region +intermediate +code,#region +intermediate +name +preferred,#country +regex,
1,181,13,4,1,Y,Y,AF,AFG,,,Afghanistan,,,,,,,,Afghanistan,Afghanistan (l') [masc.],Afganistán (el),Афганистан,阿富汗,أفغانستان,0,33.83147477,66.02621828,142,Asia,34,Southern Asia,,,afghan,
"""
abbreviations = {'DEM.': 'DEMOCRATIC', 'FMR.': 'FORMER', 'PROV.': 'PROVINCE', 'REP.': 'REPUBLIC', 'ST.': 'SAINT',
'UTD.': 'UNITED', 'U.': 'UNITED', 'N.': 'NORTH', 'E.': 'EAST', 'W.': 'WEST', 'K.': 'KINGDOM'}
major_differentiators = ['DEMOCRATIC', 'NORTH', 'SOUTH', 'EAST', 'WEST', 'STATES']
multiple_abbreviations = {'FED.': ['FEDERATION', 'FEDERAL', 'FEDERATED'],
'ISL.': ['ISLAND', 'ISLANDS'],
'S.': ['SOUTH', 'STATES'],
'TERR.': ['TERRITORY', 'TERRITORIES']}
simplifications = ['THE', 'OF', 'ISLAMIC', 'STATES', 'BOLIVARIAN', 'PLURINATIONAL', "PEOPLE'S",
'DUTCH PART', 'FRENCH PART', 'MALVINAS', 'YUGOSLAV', 'KINGDOM', 'PROTECTORATE']
_countriesdata = None
_ochaurl_int = 'https://docs.google.com/spreadsheets/d/1NjSI2LaS3SqbgYc0HdD8oIb7lofGtiHgoKKATCpwVdY/export?format=csv&gid=1088874596'
_ochaurl = _ochaurl_int
@classmethod
def _add_countriesdata(cls, iso3, country):
# type: (str, hxl.Row) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
iso3 (str): ISO3 code for country
country (hxl.Row): Country information
Returns:
None
"""
countryname = country.get('#country+name+preferred')
cls._countriesdata['countrynames2iso3'][countryname.upper()] = iso3
iso2 = country.get('#country+code+v_iso2')
if iso2:
cls._countriesdata['iso2iso3'][iso2] = iso3
# different types so keys won't clash
cls._countriesdata['iso2iso3'][iso3] = iso2
m49 = country.get('#country+code+num+v_m49')
if m49:
m49 = int(m49)
cls._countriesdata['m49iso3'][m49] = iso3
# different types so keys won't clash
cls._countriesdata['m49iso3'][iso3] = m49
cls._countriesdata['aliases'][iso3] = re.compile(country.get('#country+regex'), re.IGNORECASE)
regionname = country.get('#region+main+name+preferred')
sub_regionname = country.get('#region+sub+name+preferred')
intermediate_regionname = country.get('#region+intermediate+name+preferred')
regionid = country.get('#region+main+code')
if regionid:
regionid = int(regionid)
sub_regionid = country.get('#region+sub+code')
if sub_regionid:
sub_regionid = int(sub_regionid)
intermediate_regionid = country.get('#region+intermediate+code')
if intermediate_regionid:
intermediate_regionid = int(intermediate_regionid)
# region, subregion and intermediate region codes do not clash so only need one dict
def add_country_to_set(colname, idval, iso3):
value = cls._countriesdata[colname].get(idval)
if value is None:
value = set()
cls._countriesdata['regioncodes2countries'][idval] = value
value.add(iso3)
if regionname:
add_country_to_set('regioncodes2countries', regionid, iso3)
cls._countriesdata['regioncodes2names'][regionid] = regionname
cls._countriesdata['regionnames2codes'][regionname.upper()] = regionid
if sub_regionname:
add_country_to_set('regioncodes2countries', sub_regionid, iso3)
cls._countriesdata['regioncodes2names'][sub_regionid] = sub_regionname
cls._countriesdata['regionnames2codes'][sub_regionname.upper()] = sub_regionid
if intermediate_regionname:
add_country_to_set('regioncodes2countries', intermediate_regionid, iso3)
cls._countriesdata['regioncodes2names'][intermediate_regionid] = intermediate_regionname
cls._countriesdata['regionnames2codes'][intermediate_regionname.upper()] = \
intermediate_regionid
@classmethod
def set_countriesdata(cls, countries):
# type: (str) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
countries (str): Countries data in HTML format provided by UNStats
Returns:
None
"""
cls._countriesdata = dict()
cls._countriesdata['countries'] = dict()
cls._countriesdata['iso2iso3'] = dict()
cls._countriesdata['m49iso3'] = dict()
cls._countriesdata['countrynames2iso3'] = dict()
cls._countriesdata['regioncodes2countries'] = dict()
cls._countriesdata['regioncodes2names'] = dict()
cls._countriesdata['regionnames2codes'] = dict()
cls._countriesdata['aliases'] = dict()
for country in countries:
iso3 = country.get('#country+code+v_iso3')
if not iso3:
continue
iso3 = iso3.upper()
cls._add_countriesdata(iso3, country)
cls._countriesdata['countries'][iso3] = country.dictionary
def sort_list(colname):
for idval in cls._countriesdata[colname]:
cls._countriesdata[colname][idval] = \
sorted(list(cls._countriesdata[colname][idval]))
sort_list('regioncodes2countries')
@classmethod
def countriesdata(cls, use_live=True):
# type: (bool) -> List[Dict[Dict]]
"""
Read countries data from OCHA countries feed (falling back to file)
Args:
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
Returns:
List[Dict[Dict]]: Countries dictionaries
"""
if cls._countriesdata is None:
countries = None
if use_live:
try:
countries = hxl.data(cls._ochaurl)
except IOError:
logger.exception('Download from OCHA feed failed! Falling back to stored file.')
if countries is None:
countries = hxl.data(
script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv',
Country), allow_local=True)
cls.set_countriesdata(countries)
return cls._countriesdata
@classmethod
@classmethod
def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country information from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: country information
"""
countriesdata = cls.countriesdata(use_live=use_live)
country = countriesdata['countries'].get(iso3.upper())
if country is not None:
return country
if exception is not None:
raise exception
return None
@classmethod
def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
if countryinfo is not None:
return countryinfo.get('#country+name+preferred')
return None
@classmethod
def get_iso2_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO2 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get ISO2 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO2 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso2 = countriesdata['iso2iso3'].get(iso3.upper())
if iso2 is not None:
return iso2
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from ISO2 code
Args:
iso2 (str): ISO2 code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['iso2iso3'].get(iso2.upper())
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
return None
@classmethod
def get_country_name_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_m49_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[int]
"""Get M49 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get M49 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[int]: M49 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
m49 = countriesdata['m49iso3'].get(iso3)
if m49 is not None:
return m49
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from M49 code
Args:
m49 (int): M49 numeric code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['m49iso3'].get(m49)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_country_name_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def expand_countryname_abbrevs(cls, country):
# type: (str) -> List[str]
"""Expands abbreviation(s) in country name in various ways (eg. FED -> FEDERATED, FEDERAL etc.)
Args:
country (str): Country with abbreviation(s)to expand
Returns:
List[str]: Uppercase country name with abbreviation(s) expanded in various ways
"""
def replace_ensure_space(word, replace, replacement):
return word.replace(replace, '%s ' % replacement).replace(' ', ' ').strip()
countryupper = country.upper()
for abbreviation in cls.abbreviations:
countryupper = replace_ensure_space(countryupper, abbreviation, cls.abbreviations[abbreviation])
candidates = [countryupper]
for abbreviation in cls.multiple_abbreviations:
if abbreviation in countryupper:
for expanded in cls.multiple_abbreviations[abbreviation]:
candidates.append(replace_ensure_space(countryupper, abbreviation, expanded))
return candidates
@classmethod
def simplify_countryname(cls, country):
# type: (str) -> (str, List[str])
"""Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc.
Args:
country (str): Country name to simplify
Returns:
Tuple[str, List[str]]: Uppercase simplified country name and list of removed words
"""
countryupper = country.upper()
words = get_words_in_sentence(countryupper)
index = countryupper.find(',')
if index != -1:
countryupper = countryupper[:index]
index = countryupper.find(':')
if index != -1:
countryupper = countryupper[:index]
regex = re.compile('\(.+?\)')
countryupper = regex.sub('', countryupper)
remove = copy.deepcopy(cls.simplifications)
for simplification1, simplification2 in cls.abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
remove.append(simplification2)
for simplification1, simplifications in cls.multiple_abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
for simplification2 in simplifications:
remove.append(simplification2)
remove = '|'.join(remove)
regex = re.compile(r'\b(' + remove + r')\b', flags=re.IGNORECASE)
countryupper = regex.sub('', countryupper)
countryupper = countryupper.strip()
countryupper_words = get_words_in_sentence(countryupper)
if len(countryupper_words) > 1:
countryupper = countryupper_words[0]
if countryupper:
words.remove(countryupper)
return countryupper, words
@classmethod
def get_iso3_country_code(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None
"""
countriesdata = cls.countriesdata(use_live=use_live)
countryupper = country.upper()
len_countryupper = len(countryupper)
if len_countryupper == 3:
if countryupper in countriesdata['countries']:
return countryupper
elif len_countryupper == 2:
iso3 = countriesdata['iso2iso3'].get(countryupper)
if iso3 is not None:
return iso3
iso3 = countriesdata['countrynames2iso3'].get(countryupper)
if iso3 is not None:
return iso3
for candidate in cls.expand_countryname_abbrevs(countryupper):
iso3 = countriesdata['countrynames2iso3'].get(candidate)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_country_code_fuzzy(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Tuple[[Optional[str], bool]]
"""Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second
showing if the match is exact or not.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False).
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = cls.get_iso3_country_code(country,
use_live=use_live) # don't put exception param here as we don't want it to throw
if iso3 is not None:
return iso3, True
def remove_matching_from_list(wordlist, word_or_part):
for word in wordlist:
if word_or_part in word:
wordlist.remove(word)
# fuzzy matching
expanded_country_candidates = cls.expand_countryname_abbrevs(country)
match_strength = 0
matches = set()
for countryname in sorted(countriesdata['countrynames2iso3']):
for candidate in expanded_country_candidates:
simplified_country, removed_words = cls.simplify_countryname(candidate)
if simplified_country in countryname:
words = get_words_in_sentence(countryname)
new_match_strength = 0
if simplified_country:
remove_matching_from_list(words, simplified_country)
new_match_strength += 32
for word in removed_words:
if word in countryname:
remove_matching_from_list(words, word)
new_match_strength += 4
else:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
for word in words:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
iso3 = countriesdata['countrynames2iso3'][countryname]
if new_match_strength > match_strength:
match_strength = new_match_strength
matches = set()
if new_match_strength == match_strength:
matches.add(iso3)
if len(matches) == 1 and match_strength > 16:
return matches.pop(), False
# regex lookup
for iso3, regex in countriesdata['aliases'].items():
index = re.search(regex, country.upper())
if index is not None:
return iso3, False
if exception is not None:
raise exception
return None, False
@classmethod
def get_countries_in_region(cls, region, use_live=True, exception=None):
# type: (Union[int,str], bool, Optional[ExceptionUpperBound]) -> List[str]
"""Get countries (ISO3 codes) in region
Args:
region (Union[int,str]): Three digit UNStats M49 region code or region name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if region not found. Defaults to None.
Returns:
List(str): Sorted list of ISO3 country names
"""
countriesdata = cls.countriesdata(use_live=use_live)
if isinstance(region, int):
regioncode = region
else:
regionupper = region.upper()
regioncode = countriesdata['regionnames2codes'].get(regionupper)
if regioncode is not None:
return countriesdata['regioncodes2countries'][regioncode]
if exception is not None:
raise exception
return list()
|
OCHA-DAP/hdx-python-country | src/hdx/location/country.py | Country.get_country_info_from_iso3 | python | def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
countriesdata = cls.countriesdata(use_live=use_live)
country = countriesdata['countries'].get(iso3.upper())
if country is not None:
return country
if exception is not None:
raise exception
return None | Get country information from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: country information | train | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L186-L205 | [
"def countriesdata(cls, use_live=True):\n # type: (bool) -> List[Dict[Dict]]\n \"\"\"\n Read countries data from OCHA countries feed (falling back to file)\n\n Args:\n use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\n\n Returns:\n List[Dict[Dict]]: Countries dictionaries\n \"\"\"\n if cls._countriesdata is None:\n countries = None\n if use_live:\n try:\n countries = hxl.data(cls._ochaurl)\n except IOError:\n logger.exception('Download from OCHA feed failed! Falling back to stored file.')\n if countries is None:\n countries = hxl.data(\n script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv',\n Country), allow_local=True)\n cls.set_countriesdata(countries)\n return cls._countriesdata\n"
] | class Country(object):
"""Location class with various methods to help with countries and regions. Uses OCHA countries feed which
supplies data in form:
::
ID,HRinfo ID,RW ID,m49 numerical code,FTS API ID,Appears in UNTERM list,Appears in DGACM list,ISO 3166-1 Alpha 2-Codes,ISO 3166-1 Alpha 3-Codes,x Alpha2 codes,x Alpha3 codes,Preferred Term,m49 Alt Term,ISO Alt Term,UNTERM Alt Term,FTS Alt Term,HRinfo Alt Term,RW Short Name,RW API Alt Term,English Short,French Short,Spanish Short,Russian Short,Chinese Short,Arabic Short,Admin Level,Latitude,Longitude,Region Code,Region Name,Sub-region Code,Sub-region Name,Intermediate Region Code,Intermediate Region Name,Regex,Concatenation
#meta +id,#country +code +v_hrinfo_country,#country +code +v_reliefweb,#country +code +num +v_m49,#country +code +v_fts,,,#country +code +v_iso2,#country +code +v_iso3,,,#country +name +preferred,#country +name +alt +v_m49,#country +name +alt +v_iso,#country +name +alt +v_unterm,#country +name +alt +v_fts,#country +name +alt +v_hrinfo_country,#country +name +short +v_reliefweb,#country +name +alt +v_reliefweb,#country +name +i_en +alt +v_unterm,#country +name +i_fr +alt +v_unterm,#country +name +i_es +alt +v_unterm,#country +name +i_ru +alt +v_unterm,#country +name +i_zh +alt +v_unterm,#country +name +i_ar +alt +v_unterm,#geo +admin_level,#geo +lat,#geo +lon,#region +main +code,#region +main +name +preferred,#region +sub +code,#region +sub +name +preferred,#region +intermediate +code,#region +intermediate +name +preferred,#country +regex,
1,181,13,4,1,Y,Y,AF,AFG,,,Afghanistan,,,,,,,,Afghanistan,Afghanistan (l') [masc.],Afganistán (el),Афганистан,阿富汗,أفغانستان,0,33.83147477,66.02621828,142,Asia,34,Southern Asia,,,afghan,
"""
abbreviations = {'DEM.': 'DEMOCRATIC', 'FMR.': 'FORMER', 'PROV.': 'PROVINCE', 'REP.': 'REPUBLIC', 'ST.': 'SAINT',
'UTD.': 'UNITED', 'U.': 'UNITED', 'N.': 'NORTH', 'E.': 'EAST', 'W.': 'WEST', 'K.': 'KINGDOM'}
major_differentiators = ['DEMOCRATIC', 'NORTH', 'SOUTH', 'EAST', 'WEST', 'STATES']
multiple_abbreviations = {'FED.': ['FEDERATION', 'FEDERAL', 'FEDERATED'],
'ISL.': ['ISLAND', 'ISLANDS'],
'S.': ['SOUTH', 'STATES'],
'TERR.': ['TERRITORY', 'TERRITORIES']}
simplifications = ['THE', 'OF', 'ISLAMIC', 'STATES', 'BOLIVARIAN', 'PLURINATIONAL', "PEOPLE'S",
'DUTCH PART', 'FRENCH PART', 'MALVINAS', 'YUGOSLAV', 'KINGDOM', 'PROTECTORATE']
_countriesdata = None
_ochaurl_int = 'https://docs.google.com/spreadsheets/d/1NjSI2LaS3SqbgYc0HdD8oIb7lofGtiHgoKKATCpwVdY/export?format=csv&gid=1088874596'
_ochaurl = _ochaurl_int
@classmethod
def _add_countriesdata(cls, iso3, country):
# type: (str, hxl.Row) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
iso3 (str): ISO3 code for country
country (hxl.Row): Country information
Returns:
None
"""
countryname = country.get('#country+name+preferred')
cls._countriesdata['countrynames2iso3'][countryname.upper()] = iso3
iso2 = country.get('#country+code+v_iso2')
if iso2:
cls._countriesdata['iso2iso3'][iso2] = iso3
# different types so keys won't clash
cls._countriesdata['iso2iso3'][iso3] = iso2
m49 = country.get('#country+code+num+v_m49')
if m49:
m49 = int(m49)
cls._countriesdata['m49iso3'][m49] = iso3
# different types so keys won't clash
cls._countriesdata['m49iso3'][iso3] = m49
cls._countriesdata['aliases'][iso3] = re.compile(country.get('#country+regex'), re.IGNORECASE)
regionname = country.get('#region+main+name+preferred')
sub_regionname = country.get('#region+sub+name+preferred')
intermediate_regionname = country.get('#region+intermediate+name+preferred')
regionid = country.get('#region+main+code')
if regionid:
regionid = int(regionid)
sub_regionid = country.get('#region+sub+code')
if sub_regionid:
sub_regionid = int(sub_regionid)
intermediate_regionid = country.get('#region+intermediate+code')
if intermediate_regionid:
intermediate_regionid = int(intermediate_regionid)
# region, subregion and intermediate region codes do not clash so only need one dict
def add_country_to_set(colname, idval, iso3):
value = cls._countriesdata[colname].get(idval)
if value is None:
value = set()
cls._countriesdata['regioncodes2countries'][idval] = value
value.add(iso3)
if regionname:
add_country_to_set('regioncodes2countries', regionid, iso3)
cls._countriesdata['regioncodes2names'][regionid] = regionname
cls._countriesdata['regionnames2codes'][regionname.upper()] = regionid
if sub_regionname:
add_country_to_set('regioncodes2countries', sub_regionid, iso3)
cls._countriesdata['regioncodes2names'][sub_regionid] = sub_regionname
cls._countriesdata['regionnames2codes'][sub_regionname.upper()] = sub_regionid
if intermediate_regionname:
add_country_to_set('regioncodes2countries', intermediate_regionid, iso3)
cls._countriesdata['regioncodes2names'][intermediate_regionid] = intermediate_regionname
cls._countriesdata['regionnames2codes'][intermediate_regionname.upper()] = \
intermediate_regionid
@classmethod
def set_countriesdata(cls, countries):
# type: (str) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
countries (str): Countries data in HTML format provided by UNStats
Returns:
None
"""
cls._countriesdata = dict()
cls._countriesdata['countries'] = dict()
cls._countriesdata['iso2iso3'] = dict()
cls._countriesdata['m49iso3'] = dict()
cls._countriesdata['countrynames2iso3'] = dict()
cls._countriesdata['regioncodes2countries'] = dict()
cls._countriesdata['regioncodes2names'] = dict()
cls._countriesdata['regionnames2codes'] = dict()
cls._countriesdata['aliases'] = dict()
for country in countries:
iso3 = country.get('#country+code+v_iso3')
if not iso3:
continue
iso3 = iso3.upper()
cls._add_countriesdata(iso3, country)
cls._countriesdata['countries'][iso3] = country.dictionary
def sort_list(colname):
for idval in cls._countriesdata[colname]:
cls._countriesdata[colname][idval] = \
sorted(list(cls._countriesdata[colname][idval]))
sort_list('regioncodes2countries')
@classmethod
def countriesdata(cls, use_live=True):
# type: (bool) -> List[Dict[Dict]]
"""
Read countries data from OCHA countries feed (falling back to file)
Args:
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
Returns:
List[Dict[Dict]]: Countries dictionaries
"""
if cls._countriesdata is None:
countries = None
if use_live:
try:
countries = hxl.data(cls._ochaurl)
except IOError:
logger.exception('Download from OCHA feed failed! Falling back to stored file.')
if countries is None:
countries = hxl.data(
script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv',
Country), allow_local=True)
cls.set_countriesdata(countries)
return cls._countriesdata
@classmethod
def set_ocha_url(cls, url=None):
# type: (str) -> None
"""
Set World Bank url from which to retrieve countries data
Args:
url (str): World Bank url from which to retrieve countries data. Defaults to internal value.
Returns:
None
"""
if url is None:
url = cls._ochaurl_int
cls._ochaurl = url
@classmethod
@classmethod
def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
if countryinfo is not None:
return countryinfo.get('#country+name+preferred')
return None
@classmethod
def get_iso2_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO2 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get ISO2 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO2 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso2 = countriesdata['iso2iso3'].get(iso3.upper())
if iso2 is not None:
return iso2
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from ISO2 code
Args:
iso2 (str): ISO2 code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['iso2iso3'].get(iso2.upper())
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
return None
@classmethod
def get_country_name_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_m49_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[int]
"""Get M49 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get M49 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[int]: M49 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
m49 = countriesdata['m49iso3'].get(iso3)
if m49 is not None:
return m49
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from M49 code
Args:
m49 (int): M49 numeric code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['m49iso3'].get(m49)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_country_name_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def expand_countryname_abbrevs(cls, country):
# type: (str) -> List[str]
"""Expands abbreviation(s) in country name in various ways (eg. FED -> FEDERATED, FEDERAL etc.)
Args:
country (str): Country with abbreviation(s)to expand
Returns:
List[str]: Uppercase country name with abbreviation(s) expanded in various ways
"""
def replace_ensure_space(word, replace, replacement):
return word.replace(replace, '%s ' % replacement).replace(' ', ' ').strip()
countryupper = country.upper()
for abbreviation in cls.abbreviations:
countryupper = replace_ensure_space(countryupper, abbreviation, cls.abbreviations[abbreviation])
candidates = [countryupper]
for abbreviation in cls.multiple_abbreviations:
if abbreviation in countryupper:
for expanded in cls.multiple_abbreviations[abbreviation]:
candidates.append(replace_ensure_space(countryupper, abbreviation, expanded))
return candidates
@classmethod
def simplify_countryname(cls, country):
# type: (str) -> (str, List[str])
"""Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc.
Args:
country (str): Country name to simplify
Returns:
Tuple[str, List[str]]: Uppercase simplified country name and list of removed words
"""
countryupper = country.upper()
words = get_words_in_sentence(countryupper)
index = countryupper.find(',')
if index != -1:
countryupper = countryupper[:index]
index = countryupper.find(':')
if index != -1:
countryupper = countryupper[:index]
regex = re.compile('\(.+?\)')
countryupper = regex.sub('', countryupper)
remove = copy.deepcopy(cls.simplifications)
for simplification1, simplification2 in cls.abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
remove.append(simplification2)
for simplification1, simplifications in cls.multiple_abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
for simplification2 in simplifications:
remove.append(simplification2)
remove = '|'.join(remove)
regex = re.compile(r'\b(' + remove + r')\b', flags=re.IGNORECASE)
countryupper = regex.sub('', countryupper)
countryupper = countryupper.strip()
countryupper_words = get_words_in_sentence(countryupper)
if len(countryupper_words) > 1:
countryupper = countryupper_words[0]
if countryupper:
words.remove(countryupper)
return countryupper, words
@classmethod
def get_iso3_country_code(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None
"""
countriesdata = cls.countriesdata(use_live=use_live)
countryupper = country.upper()
len_countryupper = len(countryupper)
if len_countryupper == 3:
if countryupper in countriesdata['countries']:
return countryupper
elif len_countryupper == 2:
iso3 = countriesdata['iso2iso3'].get(countryupper)
if iso3 is not None:
return iso3
iso3 = countriesdata['countrynames2iso3'].get(countryupper)
if iso3 is not None:
return iso3
for candidate in cls.expand_countryname_abbrevs(countryupper):
iso3 = countriesdata['countrynames2iso3'].get(candidate)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_country_code_fuzzy(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Tuple[[Optional[str], bool]]
"""Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second
showing if the match is exact or not.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False).
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = cls.get_iso3_country_code(country,
use_live=use_live) # don't put exception param here as we don't want it to throw
if iso3 is not None:
return iso3, True
def remove_matching_from_list(wordlist, word_or_part):
for word in wordlist:
if word_or_part in word:
wordlist.remove(word)
# fuzzy matching
expanded_country_candidates = cls.expand_countryname_abbrevs(country)
match_strength = 0
matches = set()
for countryname in sorted(countriesdata['countrynames2iso3']):
for candidate in expanded_country_candidates:
simplified_country, removed_words = cls.simplify_countryname(candidate)
if simplified_country in countryname:
words = get_words_in_sentence(countryname)
new_match_strength = 0
if simplified_country:
remove_matching_from_list(words, simplified_country)
new_match_strength += 32
for word in removed_words:
if word in countryname:
remove_matching_from_list(words, word)
new_match_strength += 4
else:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
for word in words:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
iso3 = countriesdata['countrynames2iso3'][countryname]
if new_match_strength > match_strength:
match_strength = new_match_strength
matches = set()
if new_match_strength == match_strength:
matches.add(iso3)
if len(matches) == 1 and match_strength > 16:
return matches.pop(), False
# regex lookup
for iso3, regex in countriesdata['aliases'].items():
index = re.search(regex, country.upper())
if index is not None:
return iso3, False
if exception is not None:
raise exception
return None, False
@classmethod
def get_countries_in_region(cls, region, use_live=True, exception=None):
# type: (Union[int,str], bool, Optional[ExceptionUpperBound]) -> List[str]
"""Get countries (ISO3 codes) in region
Args:
region (Union[int,str]): Three digit UNStats M49 region code or region name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if region not found. Defaults to None.
Returns:
List(str): Sorted list of ISO3 country names
"""
countriesdata = cls.countriesdata(use_live=use_live)
if isinstance(region, int):
regioncode = region
else:
regionupper = region.upper()
regioncode = countriesdata['regionnames2codes'].get(regionupper)
if regioncode is not None:
return countriesdata['regioncodes2countries'][regioncode]
if exception is not None:
raise exception
return list()
|
OCHA-DAP/hdx-python-country | src/hdx/location/country.py | Country.get_country_name_from_iso3 | python | def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
if countryinfo is not None:
return countryinfo.get('#country+name+preferred')
return None | Get country name from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name | train | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L208-L223 | [
"def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):\n # type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]\n \"\"\"Get country information from ISO3 code\n\n Args:\n iso3 (str): ISO3 code for which to get country information\n use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\n exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\n Returns:\n Optional[Dict[str]]: country information\n \"\"\"\n countriesdata = cls.countriesdata(use_live=use_live)\n country = countriesdata['countries'].get(iso3.upper())\n if country is not None:\n return country\n\n if exception is not None:\n raise exception\n return None\n"
] | class Country(object):
"""Location class with various methods to help with countries and regions. Uses OCHA countries feed which
supplies data in form:
::
ID,HRinfo ID,RW ID,m49 numerical code,FTS API ID,Appears in UNTERM list,Appears in DGACM list,ISO 3166-1 Alpha 2-Codes,ISO 3166-1 Alpha 3-Codes,x Alpha2 codes,x Alpha3 codes,Preferred Term,m49 Alt Term,ISO Alt Term,UNTERM Alt Term,FTS Alt Term,HRinfo Alt Term,RW Short Name,RW API Alt Term,English Short,French Short,Spanish Short,Russian Short,Chinese Short,Arabic Short,Admin Level,Latitude,Longitude,Region Code,Region Name,Sub-region Code,Sub-region Name,Intermediate Region Code,Intermediate Region Name,Regex,Concatenation
#meta +id,#country +code +v_hrinfo_country,#country +code +v_reliefweb,#country +code +num +v_m49,#country +code +v_fts,,,#country +code +v_iso2,#country +code +v_iso3,,,#country +name +preferred,#country +name +alt +v_m49,#country +name +alt +v_iso,#country +name +alt +v_unterm,#country +name +alt +v_fts,#country +name +alt +v_hrinfo_country,#country +name +short +v_reliefweb,#country +name +alt +v_reliefweb,#country +name +i_en +alt +v_unterm,#country +name +i_fr +alt +v_unterm,#country +name +i_es +alt +v_unterm,#country +name +i_ru +alt +v_unterm,#country +name +i_zh +alt +v_unterm,#country +name +i_ar +alt +v_unterm,#geo +admin_level,#geo +lat,#geo +lon,#region +main +code,#region +main +name +preferred,#region +sub +code,#region +sub +name +preferred,#region +intermediate +code,#region +intermediate +name +preferred,#country +regex,
1,181,13,4,1,Y,Y,AF,AFG,,,Afghanistan,,,,,,,,Afghanistan,Afghanistan (l') [masc.],Afganistán (el),Афганистан,阿富汗,أفغانستان,0,33.83147477,66.02621828,142,Asia,34,Southern Asia,,,afghan,
"""
abbreviations = {'DEM.': 'DEMOCRATIC', 'FMR.': 'FORMER', 'PROV.': 'PROVINCE', 'REP.': 'REPUBLIC', 'ST.': 'SAINT',
'UTD.': 'UNITED', 'U.': 'UNITED', 'N.': 'NORTH', 'E.': 'EAST', 'W.': 'WEST', 'K.': 'KINGDOM'}
major_differentiators = ['DEMOCRATIC', 'NORTH', 'SOUTH', 'EAST', 'WEST', 'STATES']
multiple_abbreviations = {'FED.': ['FEDERATION', 'FEDERAL', 'FEDERATED'],
'ISL.': ['ISLAND', 'ISLANDS'],
'S.': ['SOUTH', 'STATES'],
'TERR.': ['TERRITORY', 'TERRITORIES']}
simplifications = ['THE', 'OF', 'ISLAMIC', 'STATES', 'BOLIVARIAN', 'PLURINATIONAL', "PEOPLE'S",
'DUTCH PART', 'FRENCH PART', 'MALVINAS', 'YUGOSLAV', 'KINGDOM', 'PROTECTORATE']
_countriesdata = None
_ochaurl_int = 'https://docs.google.com/spreadsheets/d/1NjSI2LaS3SqbgYc0HdD8oIb7lofGtiHgoKKATCpwVdY/export?format=csv&gid=1088874596'
_ochaurl = _ochaurl_int
@classmethod
def _add_countriesdata(cls, iso3, country):
# type: (str, hxl.Row) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
iso3 (str): ISO3 code for country
country (hxl.Row): Country information
Returns:
None
"""
countryname = country.get('#country+name+preferred')
cls._countriesdata['countrynames2iso3'][countryname.upper()] = iso3
iso2 = country.get('#country+code+v_iso2')
if iso2:
cls._countriesdata['iso2iso3'][iso2] = iso3
# different types so keys won't clash
cls._countriesdata['iso2iso3'][iso3] = iso2
m49 = country.get('#country+code+num+v_m49')
if m49:
m49 = int(m49)
cls._countriesdata['m49iso3'][m49] = iso3
# different types so keys won't clash
cls._countriesdata['m49iso3'][iso3] = m49
cls._countriesdata['aliases'][iso3] = re.compile(country.get('#country+regex'), re.IGNORECASE)
regionname = country.get('#region+main+name+preferred')
sub_regionname = country.get('#region+sub+name+preferred')
intermediate_regionname = country.get('#region+intermediate+name+preferred')
regionid = country.get('#region+main+code')
if regionid:
regionid = int(regionid)
sub_regionid = country.get('#region+sub+code')
if sub_regionid:
sub_regionid = int(sub_regionid)
intermediate_regionid = country.get('#region+intermediate+code')
if intermediate_regionid:
intermediate_regionid = int(intermediate_regionid)
# region, subregion and intermediate region codes do not clash so only need one dict
def add_country_to_set(colname, idval, iso3):
value = cls._countriesdata[colname].get(idval)
if value is None:
value = set()
cls._countriesdata['regioncodes2countries'][idval] = value
value.add(iso3)
if regionname:
add_country_to_set('regioncodes2countries', regionid, iso3)
cls._countriesdata['regioncodes2names'][regionid] = regionname
cls._countriesdata['regionnames2codes'][regionname.upper()] = regionid
if sub_regionname:
add_country_to_set('regioncodes2countries', sub_regionid, iso3)
cls._countriesdata['regioncodes2names'][sub_regionid] = sub_regionname
cls._countriesdata['regionnames2codes'][sub_regionname.upper()] = sub_regionid
if intermediate_regionname:
add_country_to_set('regioncodes2countries', intermediate_regionid, iso3)
cls._countriesdata['regioncodes2names'][intermediate_regionid] = intermediate_regionname
cls._countriesdata['regionnames2codes'][intermediate_regionname.upper()] = \
intermediate_regionid
@classmethod
def set_countriesdata(cls, countries):
# type: (str) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
countries (str): Countries data in HTML format provided by UNStats
Returns:
None
"""
cls._countriesdata = dict()
cls._countriesdata['countries'] = dict()
cls._countriesdata['iso2iso3'] = dict()
cls._countriesdata['m49iso3'] = dict()
cls._countriesdata['countrynames2iso3'] = dict()
cls._countriesdata['regioncodes2countries'] = dict()
cls._countriesdata['regioncodes2names'] = dict()
cls._countriesdata['regionnames2codes'] = dict()
cls._countriesdata['aliases'] = dict()
for country in countries:
iso3 = country.get('#country+code+v_iso3')
if not iso3:
continue
iso3 = iso3.upper()
cls._add_countriesdata(iso3, country)
cls._countriesdata['countries'][iso3] = country.dictionary
def sort_list(colname):
for idval in cls._countriesdata[colname]:
cls._countriesdata[colname][idval] = \
sorted(list(cls._countriesdata[colname][idval]))
sort_list('regioncodes2countries')
@classmethod
def countriesdata(cls, use_live=True):
# type: (bool) -> List[Dict[Dict]]
"""
Read countries data from OCHA countries feed (falling back to file)
Args:
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
Returns:
List[Dict[Dict]]: Countries dictionaries
"""
if cls._countriesdata is None:
countries = None
if use_live:
try:
countries = hxl.data(cls._ochaurl)
except IOError:
logger.exception('Download from OCHA feed failed! Falling back to stored file.')
if countries is None:
countries = hxl.data(
script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv',
Country), allow_local=True)
cls.set_countriesdata(countries)
return cls._countriesdata
@classmethod
def set_ocha_url(cls, url=None):
# type: (str) -> None
"""
Set World Bank url from which to retrieve countries data
Args:
url (str): World Bank url from which to retrieve countries data. Defaults to internal value.
Returns:
None
"""
if url is None:
url = cls._ochaurl_int
cls._ochaurl = url
@classmethod
def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country information from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: country information
"""
countriesdata = cls.countriesdata(use_live=use_live)
country = countriesdata['countries'].get(iso3.upper())
if country is not None:
return country
if exception is not None:
raise exception
return None
@classmethod
@classmethod
def get_iso2_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO2 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get ISO2 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO2 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso2 = countriesdata['iso2iso3'].get(iso3.upper())
if iso2 is not None:
return iso2
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from ISO2 code
Args:
iso2 (str): ISO2 code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['iso2iso3'].get(iso2.upper())
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
return None
@classmethod
def get_country_name_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_m49_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[int]
"""Get M49 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get M49 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[int]: M49 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
m49 = countriesdata['m49iso3'].get(iso3)
if m49 is not None:
return m49
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from M49 code
Args:
m49 (int): M49 numeric code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['m49iso3'].get(m49)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_country_name_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def expand_countryname_abbrevs(cls, country):
# type: (str) -> List[str]
"""Expands abbreviation(s) in country name in various ways (eg. FED -> FEDERATED, FEDERAL etc.)
Args:
country (str): Country with abbreviation(s)to expand
Returns:
List[str]: Uppercase country name with abbreviation(s) expanded in various ways
"""
def replace_ensure_space(word, replace, replacement):
return word.replace(replace, '%s ' % replacement).replace(' ', ' ').strip()
countryupper = country.upper()
for abbreviation in cls.abbreviations:
countryupper = replace_ensure_space(countryupper, abbreviation, cls.abbreviations[abbreviation])
candidates = [countryupper]
for abbreviation in cls.multiple_abbreviations:
if abbreviation in countryupper:
for expanded in cls.multiple_abbreviations[abbreviation]:
candidates.append(replace_ensure_space(countryupper, abbreviation, expanded))
return candidates
@classmethod
def simplify_countryname(cls, country):
# type: (str) -> (str, List[str])
"""Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc.
Args:
country (str): Country name to simplify
Returns:
Tuple[str, List[str]]: Uppercase simplified country name and list of removed words
"""
countryupper = country.upper()
words = get_words_in_sentence(countryupper)
index = countryupper.find(',')
if index != -1:
countryupper = countryupper[:index]
index = countryupper.find(':')
if index != -1:
countryupper = countryupper[:index]
regex = re.compile('\(.+?\)')
countryupper = regex.sub('', countryupper)
remove = copy.deepcopy(cls.simplifications)
for simplification1, simplification2 in cls.abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
remove.append(simplification2)
for simplification1, simplifications in cls.multiple_abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
for simplification2 in simplifications:
remove.append(simplification2)
remove = '|'.join(remove)
regex = re.compile(r'\b(' + remove + r')\b', flags=re.IGNORECASE)
countryupper = regex.sub('', countryupper)
countryupper = countryupper.strip()
countryupper_words = get_words_in_sentence(countryupper)
if len(countryupper_words) > 1:
countryupper = countryupper_words[0]
if countryupper:
words.remove(countryupper)
return countryupper, words
@classmethod
def get_iso3_country_code(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None
"""
countriesdata = cls.countriesdata(use_live=use_live)
countryupper = country.upper()
len_countryupper = len(countryupper)
if len_countryupper == 3:
if countryupper in countriesdata['countries']:
return countryupper
elif len_countryupper == 2:
iso3 = countriesdata['iso2iso3'].get(countryupper)
if iso3 is not None:
return iso3
iso3 = countriesdata['countrynames2iso3'].get(countryupper)
if iso3 is not None:
return iso3
for candidate in cls.expand_countryname_abbrevs(countryupper):
iso3 = countriesdata['countrynames2iso3'].get(candidate)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_country_code_fuzzy(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Tuple[[Optional[str], bool]]
"""Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second
showing if the match is exact or not.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False).
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = cls.get_iso3_country_code(country,
use_live=use_live) # don't put exception param here as we don't want it to throw
if iso3 is not None:
return iso3, True
def remove_matching_from_list(wordlist, word_or_part):
for word in wordlist:
if word_or_part in word:
wordlist.remove(word)
# fuzzy matching
expanded_country_candidates = cls.expand_countryname_abbrevs(country)
match_strength = 0
matches = set()
for countryname in sorted(countriesdata['countrynames2iso3']):
for candidate in expanded_country_candidates:
simplified_country, removed_words = cls.simplify_countryname(candidate)
if simplified_country in countryname:
words = get_words_in_sentence(countryname)
new_match_strength = 0
if simplified_country:
remove_matching_from_list(words, simplified_country)
new_match_strength += 32
for word in removed_words:
if word in countryname:
remove_matching_from_list(words, word)
new_match_strength += 4
else:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
for word in words:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
iso3 = countriesdata['countrynames2iso3'][countryname]
if new_match_strength > match_strength:
match_strength = new_match_strength
matches = set()
if new_match_strength == match_strength:
matches.add(iso3)
if len(matches) == 1 and match_strength > 16:
return matches.pop(), False
# regex lookup
for iso3, regex in countriesdata['aliases'].items():
index = re.search(regex, country.upper())
if index is not None:
return iso3, False
if exception is not None:
raise exception
return None, False
@classmethod
def get_countries_in_region(cls, region, use_live=True, exception=None):
# type: (Union[int,str], bool, Optional[ExceptionUpperBound]) -> List[str]
"""Get countries (ISO3 codes) in region
Args:
region (Union[int,str]): Three digit UNStats M49 region code or region name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if region not found. Defaults to None.
Returns:
List(str): Sorted list of ISO3 country names
"""
countriesdata = cls.countriesdata(use_live=use_live)
if isinstance(region, int):
regioncode = region
else:
regionupper = region.upper()
regioncode = countriesdata['regionnames2codes'].get(regionupper)
if regioncode is not None:
return countriesdata['regioncodes2countries'][regioncode]
if exception is not None:
raise exception
return list()
|
OCHA-DAP/hdx-python-country | src/hdx/location/country.py | Country.get_iso2_from_iso3 | python | def get_iso2_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
countriesdata = cls.countriesdata(use_live=use_live)
iso2 = countriesdata['iso2iso3'].get(iso3.upper())
if iso2 is not None:
return iso2
if exception is not None:
raise exception
return None | Get ISO2 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get ISO2 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO2 code | train | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L226-L245 | [
"def countriesdata(cls, use_live=True):\n # type: (bool) -> List[Dict[Dict]]\n \"\"\"\n Read countries data from OCHA countries feed (falling back to file)\n\n Args:\n use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\n\n Returns:\n List[Dict[Dict]]: Countries dictionaries\n \"\"\"\n if cls._countriesdata is None:\n countries = None\n if use_live:\n try:\n countries = hxl.data(cls._ochaurl)\n except IOError:\n logger.exception('Download from OCHA feed failed! Falling back to stored file.')\n if countries is None:\n countries = hxl.data(\n script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv',\n Country), allow_local=True)\n cls.set_countriesdata(countries)\n return cls._countriesdata\n"
] | class Country(object):
"""Location class with various methods to help with countries and regions. Uses OCHA countries feed which
supplies data in form:
::
ID,HRinfo ID,RW ID,m49 numerical code,FTS API ID,Appears in UNTERM list,Appears in DGACM list,ISO 3166-1 Alpha 2-Codes,ISO 3166-1 Alpha 3-Codes,x Alpha2 codes,x Alpha3 codes,Preferred Term,m49 Alt Term,ISO Alt Term,UNTERM Alt Term,FTS Alt Term,HRinfo Alt Term,RW Short Name,RW API Alt Term,English Short,French Short,Spanish Short,Russian Short,Chinese Short,Arabic Short,Admin Level,Latitude,Longitude,Region Code,Region Name,Sub-region Code,Sub-region Name,Intermediate Region Code,Intermediate Region Name,Regex,Concatenation
#meta +id,#country +code +v_hrinfo_country,#country +code +v_reliefweb,#country +code +num +v_m49,#country +code +v_fts,,,#country +code +v_iso2,#country +code +v_iso3,,,#country +name +preferred,#country +name +alt +v_m49,#country +name +alt +v_iso,#country +name +alt +v_unterm,#country +name +alt +v_fts,#country +name +alt +v_hrinfo_country,#country +name +short +v_reliefweb,#country +name +alt +v_reliefweb,#country +name +i_en +alt +v_unterm,#country +name +i_fr +alt +v_unterm,#country +name +i_es +alt +v_unterm,#country +name +i_ru +alt +v_unterm,#country +name +i_zh +alt +v_unterm,#country +name +i_ar +alt +v_unterm,#geo +admin_level,#geo +lat,#geo +lon,#region +main +code,#region +main +name +preferred,#region +sub +code,#region +sub +name +preferred,#region +intermediate +code,#region +intermediate +name +preferred,#country +regex,
1,181,13,4,1,Y,Y,AF,AFG,,,Afghanistan,,,,,,,,Afghanistan,Afghanistan (l') [masc.],Afganistán (el),Афганистан,阿富汗,أفغانستان,0,33.83147477,66.02621828,142,Asia,34,Southern Asia,,,afghan,
"""
abbreviations = {'DEM.': 'DEMOCRATIC', 'FMR.': 'FORMER', 'PROV.': 'PROVINCE', 'REP.': 'REPUBLIC', 'ST.': 'SAINT',
'UTD.': 'UNITED', 'U.': 'UNITED', 'N.': 'NORTH', 'E.': 'EAST', 'W.': 'WEST', 'K.': 'KINGDOM'}
major_differentiators = ['DEMOCRATIC', 'NORTH', 'SOUTH', 'EAST', 'WEST', 'STATES']
multiple_abbreviations = {'FED.': ['FEDERATION', 'FEDERAL', 'FEDERATED'],
'ISL.': ['ISLAND', 'ISLANDS'],
'S.': ['SOUTH', 'STATES'],
'TERR.': ['TERRITORY', 'TERRITORIES']}
simplifications = ['THE', 'OF', 'ISLAMIC', 'STATES', 'BOLIVARIAN', 'PLURINATIONAL', "PEOPLE'S",
'DUTCH PART', 'FRENCH PART', 'MALVINAS', 'YUGOSLAV', 'KINGDOM', 'PROTECTORATE']
_countriesdata = None
_ochaurl_int = 'https://docs.google.com/spreadsheets/d/1NjSI2LaS3SqbgYc0HdD8oIb7lofGtiHgoKKATCpwVdY/export?format=csv&gid=1088874596'
_ochaurl = _ochaurl_int
@classmethod
def _add_countriesdata(cls, iso3, country):
# type: (str, hxl.Row) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
iso3 (str): ISO3 code for country
country (hxl.Row): Country information
Returns:
None
"""
countryname = country.get('#country+name+preferred')
cls._countriesdata['countrynames2iso3'][countryname.upper()] = iso3
iso2 = country.get('#country+code+v_iso2')
if iso2:
cls._countriesdata['iso2iso3'][iso2] = iso3
# different types so keys won't clash
cls._countriesdata['iso2iso3'][iso3] = iso2
m49 = country.get('#country+code+num+v_m49')
if m49:
m49 = int(m49)
cls._countriesdata['m49iso3'][m49] = iso3
# different types so keys won't clash
cls._countriesdata['m49iso3'][iso3] = m49
cls._countriesdata['aliases'][iso3] = re.compile(country.get('#country+regex'), re.IGNORECASE)
regionname = country.get('#region+main+name+preferred')
sub_regionname = country.get('#region+sub+name+preferred')
intermediate_regionname = country.get('#region+intermediate+name+preferred')
regionid = country.get('#region+main+code')
if regionid:
regionid = int(regionid)
sub_regionid = country.get('#region+sub+code')
if sub_regionid:
sub_regionid = int(sub_regionid)
intermediate_regionid = country.get('#region+intermediate+code')
if intermediate_regionid:
intermediate_regionid = int(intermediate_regionid)
# region, subregion and intermediate region codes do not clash so only need one dict
def add_country_to_set(colname, idval, iso3):
value = cls._countriesdata[colname].get(idval)
if value is None:
value = set()
cls._countriesdata['regioncodes2countries'][idval] = value
value.add(iso3)
if regionname:
add_country_to_set('regioncodes2countries', regionid, iso3)
cls._countriesdata['regioncodes2names'][regionid] = regionname
cls._countriesdata['regionnames2codes'][regionname.upper()] = regionid
if sub_regionname:
add_country_to_set('regioncodes2countries', sub_regionid, iso3)
cls._countriesdata['regioncodes2names'][sub_regionid] = sub_regionname
cls._countriesdata['regionnames2codes'][sub_regionname.upper()] = sub_regionid
if intermediate_regionname:
add_country_to_set('regioncodes2countries', intermediate_regionid, iso3)
cls._countriesdata['regioncodes2names'][intermediate_regionid] = intermediate_regionname
cls._countriesdata['regionnames2codes'][intermediate_regionname.upper()] = \
intermediate_regionid
@classmethod
def set_countriesdata(cls, countries):
# type: (str) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
countries (str): Countries data in HTML format provided by UNStats
Returns:
None
"""
cls._countriesdata = dict()
cls._countriesdata['countries'] = dict()
cls._countriesdata['iso2iso3'] = dict()
cls._countriesdata['m49iso3'] = dict()
cls._countriesdata['countrynames2iso3'] = dict()
cls._countriesdata['regioncodes2countries'] = dict()
cls._countriesdata['regioncodes2names'] = dict()
cls._countriesdata['regionnames2codes'] = dict()
cls._countriesdata['aliases'] = dict()
for country in countries:
iso3 = country.get('#country+code+v_iso3')
if not iso3:
continue
iso3 = iso3.upper()
cls._add_countriesdata(iso3, country)
cls._countriesdata['countries'][iso3] = country.dictionary
def sort_list(colname):
for idval in cls._countriesdata[colname]:
cls._countriesdata[colname][idval] = \
sorted(list(cls._countriesdata[colname][idval]))
sort_list('regioncodes2countries')
@classmethod
def countriesdata(cls, use_live=True):
# type: (bool) -> List[Dict[Dict]]
"""
Read countries data from OCHA countries feed (falling back to file)
Args:
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
Returns:
List[Dict[Dict]]: Countries dictionaries
"""
if cls._countriesdata is None:
countries = None
if use_live:
try:
countries = hxl.data(cls._ochaurl)
except IOError:
logger.exception('Download from OCHA feed failed! Falling back to stored file.')
if countries is None:
countries = hxl.data(
script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv',
Country), allow_local=True)
cls.set_countriesdata(countries)
return cls._countriesdata
@classmethod
def set_ocha_url(cls, url=None):
# type: (str) -> None
"""
Set World Bank url from which to retrieve countries data
Args:
url (str): World Bank url from which to retrieve countries data. Defaults to internal value.
Returns:
None
"""
if url is None:
url = cls._ochaurl_int
cls._ochaurl = url
@classmethod
def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country information from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: country information
"""
countriesdata = cls.countriesdata(use_live=use_live)
country = countriesdata['countries'].get(iso3.upper())
if country is not None:
return country
if exception is not None:
raise exception
return None
@classmethod
def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
if countryinfo is not None:
return countryinfo.get('#country+name+preferred')
return None
@classmethod
@classmethod
def get_iso3_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from ISO2 code
Args:
iso2 (str): ISO2 code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['iso2iso3'].get(iso2.upper())
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
return None
@classmethod
def get_country_name_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_m49_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[int]
"""Get M49 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get M49 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[int]: M49 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
m49 = countriesdata['m49iso3'].get(iso3)
if m49 is not None:
return m49
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from M49 code
Args:
m49 (int): M49 numeric code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['m49iso3'].get(m49)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_country_name_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def expand_countryname_abbrevs(cls, country):
# type: (str) -> List[str]
"""Expands abbreviation(s) in country name in various ways (eg. FED -> FEDERATED, FEDERAL etc.)
Args:
country (str): Country with abbreviation(s)to expand
Returns:
List[str]: Uppercase country name with abbreviation(s) expanded in various ways
"""
def replace_ensure_space(word, replace, replacement):
return word.replace(replace, '%s ' % replacement).replace(' ', ' ').strip()
countryupper = country.upper()
for abbreviation in cls.abbreviations:
countryupper = replace_ensure_space(countryupper, abbreviation, cls.abbreviations[abbreviation])
candidates = [countryupper]
for abbreviation in cls.multiple_abbreviations:
if abbreviation in countryupper:
for expanded in cls.multiple_abbreviations[abbreviation]:
candidates.append(replace_ensure_space(countryupper, abbreviation, expanded))
return candidates
@classmethod
def simplify_countryname(cls, country):
# type: (str) -> (str, List[str])
"""Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc.
Args:
country (str): Country name to simplify
Returns:
Tuple[str, List[str]]: Uppercase simplified country name and list of removed words
"""
countryupper = country.upper()
words = get_words_in_sentence(countryupper)
index = countryupper.find(',')
if index != -1:
countryupper = countryupper[:index]
index = countryupper.find(':')
if index != -1:
countryupper = countryupper[:index]
regex = re.compile('\(.+?\)')
countryupper = regex.sub('', countryupper)
remove = copy.deepcopy(cls.simplifications)
for simplification1, simplification2 in cls.abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
remove.append(simplification2)
for simplification1, simplifications in cls.multiple_abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
for simplification2 in simplifications:
remove.append(simplification2)
remove = '|'.join(remove)
regex = re.compile(r'\b(' + remove + r')\b', flags=re.IGNORECASE)
countryupper = regex.sub('', countryupper)
countryupper = countryupper.strip()
countryupper_words = get_words_in_sentence(countryupper)
if len(countryupper_words) > 1:
countryupper = countryupper_words[0]
if countryupper:
words.remove(countryupper)
return countryupper, words
@classmethod
def get_iso3_country_code(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None
"""
countriesdata = cls.countriesdata(use_live=use_live)
countryupper = country.upper()
len_countryupper = len(countryupper)
if len_countryupper == 3:
if countryupper in countriesdata['countries']:
return countryupper
elif len_countryupper == 2:
iso3 = countriesdata['iso2iso3'].get(countryupper)
if iso3 is not None:
return iso3
iso3 = countriesdata['countrynames2iso3'].get(countryupper)
if iso3 is not None:
return iso3
for candidate in cls.expand_countryname_abbrevs(countryupper):
iso3 = countriesdata['countrynames2iso3'].get(candidate)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_country_code_fuzzy(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Tuple[[Optional[str], bool]]
"""Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second
showing if the match is exact or not.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False).
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = cls.get_iso3_country_code(country,
use_live=use_live) # don't put exception param here as we don't want it to throw
if iso3 is not None:
return iso3, True
def remove_matching_from_list(wordlist, word_or_part):
for word in wordlist:
if word_or_part in word:
wordlist.remove(word)
# fuzzy matching
expanded_country_candidates = cls.expand_countryname_abbrevs(country)
match_strength = 0
matches = set()
for countryname in sorted(countriesdata['countrynames2iso3']):
for candidate in expanded_country_candidates:
simplified_country, removed_words = cls.simplify_countryname(candidate)
if simplified_country in countryname:
words = get_words_in_sentence(countryname)
new_match_strength = 0
if simplified_country:
remove_matching_from_list(words, simplified_country)
new_match_strength += 32
for word in removed_words:
if word in countryname:
remove_matching_from_list(words, word)
new_match_strength += 4
else:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
for word in words:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
iso3 = countriesdata['countrynames2iso3'][countryname]
if new_match_strength > match_strength:
match_strength = new_match_strength
matches = set()
if new_match_strength == match_strength:
matches.add(iso3)
if len(matches) == 1 and match_strength > 16:
return matches.pop(), False
# regex lookup
for iso3, regex in countriesdata['aliases'].items():
index = re.search(regex, country.upper())
if index is not None:
return iso3, False
if exception is not None:
raise exception
return None, False
@classmethod
def get_countries_in_region(cls, region, use_live=True, exception=None):
# type: (Union[int,str], bool, Optional[ExceptionUpperBound]) -> List[str]
"""Get countries (ISO3 codes) in region
Args:
region (Union[int,str]): Three digit UNStats M49 region code or region name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if region not found. Defaults to None.
Returns:
List(str): Sorted list of ISO3 country names
"""
countriesdata = cls.countriesdata(use_live=use_live)
if isinstance(region, int):
regioncode = region
else:
regionupper = region.upper()
regioncode = countriesdata['regionnames2codes'].get(regionupper)
if regioncode is not None:
return countriesdata['regioncodes2countries'][regioncode]
if exception is not None:
raise exception
return list()
|
OCHA-DAP/hdx-python-country | src/hdx/location/country.py | Country.get_country_info_from_iso2 | python | def get_country_info_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
return None | Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information | train | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L270-L285 | [
"def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):\n # type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]\n \"\"\"Get country information from ISO3 code\n\n Args:\n iso3 (str): ISO3 code for which to get country information\n use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\n exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\n Returns:\n Optional[Dict[str]]: country information\n \"\"\"\n countriesdata = cls.countriesdata(use_live=use_live)\n country = countriesdata['countries'].get(iso3.upper())\n if country is not None:\n return country\n\n if exception is not None:\n raise exception\n return None\n",
"def get_iso3_from_iso2(cls, iso2, use_live=True, exception=None):\n # type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]\n \"\"\"Get ISO3 from ISO2 code\n\n Args:\n iso2 (str): ISO2 code for which to get ISO3 code\n use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\n exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\n Returns:\n Optional[str]: ISO3 code\n \"\"\"\n countriesdata = cls.countriesdata(use_live=use_live)\n iso3 = countriesdata['iso2iso3'].get(iso2.upper())\n if iso3 is not None:\n return iso3\n\n if exception is not None:\n raise exception\n return None\n"
] | class Country(object):
"""Location class with various methods to help with countries and regions. Uses OCHA countries feed which
supplies data in form:
::
ID,HRinfo ID,RW ID,m49 numerical code,FTS API ID,Appears in UNTERM list,Appears in DGACM list,ISO 3166-1 Alpha 2-Codes,ISO 3166-1 Alpha 3-Codes,x Alpha2 codes,x Alpha3 codes,Preferred Term,m49 Alt Term,ISO Alt Term,UNTERM Alt Term,FTS Alt Term,HRinfo Alt Term,RW Short Name,RW API Alt Term,English Short,French Short,Spanish Short,Russian Short,Chinese Short,Arabic Short,Admin Level,Latitude,Longitude,Region Code,Region Name,Sub-region Code,Sub-region Name,Intermediate Region Code,Intermediate Region Name,Regex,Concatenation
#meta +id,#country +code +v_hrinfo_country,#country +code +v_reliefweb,#country +code +num +v_m49,#country +code +v_fts,,,#country +code +v_iso2,#country +code +v_iso3,,,#country +name +preferred,#country +name +alt +v_m49,#country +name +alt +v_iso,#country +name +alt +v_unterm,#country +name +alt +v_fts,#country +name +alt +v_hrinfo_country,#country +name +short +v_reliefweb,#country +name +alt +v_reliefweb,#country +name +i_en +alt +v_unterm,#country +name +i_fr +alt +v_unterm,#country +name +i_es +alt +v_unterm,#country +name +i_ru +alt +v_unterm,#country +name +i_zh +alt +v_unterm,#country +name +i_ar +alt +v_unterm,#geo +admin_level,#geo +lat,#geo +lon,#region +main +code,#region +main +name +preferred,#region +sub +code,#region +sub +name +preferred,#region +intermediate +code,#region +intermediate +name +preferred,#country +regex,
1,181,13,4,1,Y,Y,AF,AFG,,,Afghanistan,,,,,,,,Afghanistan,Afghanistan (l') [masc.],Afganistán (el),Афганистан,阿富汗,أفغانستان,0,33.83147477,66.02621828,142,Asia,34,Southern Asia,,,afghan,
"""
abbreviations = {'DEM.': 'DEMOCRATIC', 'FMR.': 'FORMER', 'PROV.': 'PROVINCE', 'REP.': 'REPUBLIC', 'ST.': 'SAINT',
'UTD.': 'UNITED', 'U.': 'UNITED', 'N.': 'NORTH', 'E.': 'EAST', 'W.': 'WEST', 'K.': 'KINGDOM'}
major_differentiators = ['DEMOCRATIC', 'NORTH', 'SOUTH', 'EAST', 'WEST', 'STATES']
multiple_abbreviations = {'FED.': ['FEDERATION', 'FEDERAL', 'FEDERATED'],
'ISL.': ['ISLAND', 'ISLANDS'],
'S.': ['SOUTH', 'STATES'],
'TERR.': ['TERRITORY', 'TERRITORIES']}
simplifications = ['THE', 'OF', 'ISLAMIC', 'STATES', 'BOLIVARIAN', 'PLURINATIONAL', "PEOPLE'S",
'DUTCH PART', 'FRENCH PART', 'MALVINAS', 'YUGOSLAV', 'KINGDOM', 'PROTECTORATE']
_countriesdata = None
_ochaurl_int = 'https://docs.google.com/spreadsheets/d/1NjSI2LaS3SqbgYc0HdD8oIb7lofGtiHgoKKATCpwVdY/export?format=csv&gid=1088874596'
_ochaurl = _ochaurl_int
@classmethod
def _add_countriesdata(cls, iso3, country):
# type: (str, hxl.Row) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
iso3 (str): ISO3 code for country
country (hxl.Row): Country information
Returns:
None
"""
countryname = country.get('#country+name+preferred')
cls._countriesdata['countrynames2iso3'][countryname.upper()] = iso3
iso2 = country.get('#country+code+v_iso2')
if iso2:
cls._countriesdata['iso2iso3'][iso2] = iso3
# different types so keys won't clash
cls._countriesdata['iso2iso3'][iso3] = iso2
m49 = country.get('#country+code+num+v_m49')
if m49:
m49 = int(m49)
cls._countriesdata['m49iso3'][m49] = iso3
# different types so keys won't clash
cls._countriesdata['m49iso3'][iso3] = m49
cls._countriesdata['aliases'][iso3] = re.compile(country.get('#country+regex'), re.IGNORECASE)
regionname = country.get('#region+main+name+preferred')
sub_regionname = country.get('#region+sub+name+preferred')
intermediate_regionname = country.get('#region+intermediate+name+preferred')
regionid = country.get('#region+main+code')
if regionid:
regionid = int(regionid)
sub_regionid = country.get('#region+sub+code')
if sub_regionid:
sub_regionid = int(sub_regionid)
intermediate_regionid = country.get('#region+intermediate+code')
if intermediate_regionid:
intermediate_regionid = int(intermediate_regionid)
# region, subregion and intermediate region codes do not clash so only need one dict
def add_country_to_set(colname, idval, iso3):
value = cls._countriesdata[colname].get(idval)
if value is None:
value = set()
cls._countriesdata['regioncodes2countries'][idval] = value
value.add(iso3)
if regionname:
add_country_to_set('regioncodes2countries', regionid, iso3)
cls._countriesdata['regioncodes2names'][regionid] = regionname
cls._countriesdata['regionnames2codes'][regionname.upper()] = regionid
if sub_regionname:
add_country_to_set('regioncodes2countries', sub_regionid, iso3)
cls._countriesdata['regioncodes2names'][sub_regionid] = sub_regionname
cls._countriesdata['regionnames2codes'][sub_regionname.upper()] = sub_regionid
if intermediate_regionname:
add_country_to_set('regioncodes2countries', intermediate_regionid, iso3)
cls._countriesdata['regioncodes2names'][intermediate_regionid] = intermediate_regionname
cls._countriesdata['regionnames2codes'][intermediate_regionname.upper()] = \
intermediate_regionid
@classmethod
def set_countriesdata(cls, countries):
# type: (str) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
countries (str): Countries data in HTML format provided by UNStats
Returns:
None
"""
cls._countriesdata = dict()
cls._countriesdata['countries'] = dict()
cls._countriesdata['iso2iso3'] = dict()
cls._countriesdata['m49iso3'] = dict()
cls._countriesdata['countrynames2iso3'] = dict()
cls._countriesdata['regioncodes2countries'] = dict()
cls._countriesdata['regioncodes2names'] = dict()
cls._countriesdata['regionnames2codes'] = dict()
cls._countriesdata['aliases'] = dict()
for country in countries:
iso3 = country.get('#country+code+v_iso3')
if not iso3:
continue
iso3 = iso3.upper()
cls._add_countriesdata(iso3, country)
cls._countriesdata['countries'][iso3] = country.dictionary
def sort_list(colname):
for idval in cls._countriesdata[colname]:
cls._countriesdata[colname][idval] = \
sorted(list(cls._countriesdata[colname][idval]))
sort_list('regioncodes2countries')
@classmethod
def countriesdata(cls, use_live=True):
# type: (bool) -> List[Dict[Dict]]
"""
Read countries data from OCHA countries feed (falling back to file)
Args:
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
Returns:
List[Dict[Dict]]: Countries dictionaries
"""
if cls._countriesdata is None:
countries = None
if use_live:
try:
countries = hxl.data(cls._ochaurl)
except IOError:
logger.exception('Download from OCHA feed failed! Falling back to stored file.')
if countries is None:
countries = hxl.data(
script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv',
Country), allow_local=True)
cls.set_countriesdata(countries)
return cls._countriesdata
@classmethod
def set_ocha_url(cls, url=None):
# type: (str) -> None
"""
Set World Bank url from which to retrieve countries data
Args:
url (str): World Bank url from which to retrieve countries data. Defaults to internal value.
Returns:
None
"""
if url is None:
url = cls._ochaurl_int
cls._ochaurl = url
@classmethod
def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country information from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: country information
"""
countriesdata = cls.countriesdata(use_live=use_live)
country = countriesdata['countries'].get(iso3.upper())
if country is not None:
return country
if exception is not None:
raise exception
return None
@classmethod
def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
if countryinfo is not None:
return countryinfo.get('#country+name+preferred')
return None
@classmethod
def get_iso2_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO2 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get ISO2 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO2 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso2 = countriesdata['iso2iso3'].get(iso3.upper())
if iso2 is not None:
return iso2
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from ISO2 code
Args:
iso2 (str): ISO2 code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['iso2iso3'].get(iso2.upper())
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
@classmethod
def get_country_name_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_m49_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[int]
"""Get M49 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get M49 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[int]: M49 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
m49 = countriesdata['m49iso3'].get(iso3)
if m49 is not None:
return m49
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from M49 code
Args:
m49 (int): M49 numeric code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['m49iso3'].get(m49)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_country_name_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def expand_countryname_abbrevs(cls, country):
# type: (str) -> List[str]
"""Expands abbreviation(s) in country name in various ways (eg. FED -> FEDERATED, FEDERAL etc.)
Args:
country (str): Country with abbreviation(s)to expand
Returns:
List[str]: Uppercase country name with abbreviation(s) expanded in various ways
"""
def replace_ensure_space(word, replace, replacement):
return word.replace(replace, '%s ' % replacement).replace(' ', ' ').strip()
countryupper = country.upper()
for abbreviation in cls.abbreviations:
countryupper = replace_ensure_space(countryupper, abbreviation, cls.abbreviations[abbreviation])
candidates = [countryupper]
for abbreviation in cls.multiple_abbreviations:
if abbreviation in countryupper:
for expanded in cls.multiple_abbreviations[abbreviation]:
candidates.append(replace_ensure_space(countryupper, abbreviation, expanded))
return candidates
@classmethod
def simplify_countryname(cls, country):
# type: (str) -> (str, List[str])
"""Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc.
Args:
country (str): Country name to simplify
Returns:
Tuple[str, List[str]]: Uppercase simplified country name and list of removed words
"""
countryupper = country.upper()
words = get_words_in_sentence(countryupper)
index = countryupper.find(',')
if index != -1:
countryupper = countryupper[:index]
index = countryupper.find(':')
if index != -1:
countryupper = countryupper[:index]
regex = re.compile('\(.+?\)')
countryupper = regex.sub('', countryupper)
remove = copy.deepcopy(cls.simplifications)
for simplification1, simplification2 in cls.abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
remove.append(simplification2)
for simplification1, simplifications in cls.multiple_abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
for simplification2 in simplifications:
remove.append(simplification2)
remove = '|'.join(remove)
regex = re.compile(r'\b(' + remove + r')\b', flags=re.IGNORECASE)
countryupper = regex.sub('', countryupper)
countryupper = countryupper.strip()
countryupper_words = get_words_in_sentence(countryupper)
if len(countryupper_words) > 1:
countryupper = countryupper_words[0]
if countryupper:
words.remove(countryupper)
return countryupper, words
@classmethod
def get_iso3_country_code(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None
"""
countriesdata = cls.countriesdata(use_live=use_live)
countryupper = country.upper()
len_countryupper = len(countryupper)
if len_countryupper == 3:
if countryupper in countriesdata['countries']:
return countryupper
elif len_countryupper == 2:
iso3 = countriesdata['iso2iso3'].get(countryupper)
if iso3 is not None:
return iso3
iso3 = countriesdata['countrynames2iso3'].get(countryupper)
if iso3 is not None:
return iso3
for candidate in cls.expand_countryname_abbrevs(countryupper):
iso3 = countriesdata['countrynames2iso3'].get(candidate)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_country_code_fuzzy(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Tuple[[Optional[str], bool]]
"""Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second
showing if the match is exact or not.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False).
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = cls.get_iso3_country_code(country,
use_live=use_live) # don't put exception param here as we don't want it to throw
if iso3 is not None:
return iso3, True
def remove_matching_from_list(wordlist, word_or_part):
for word in wordlist:
if word_or_part in word:
wordlist.remove(word)
# fuzzy matching
expanded_country_candidates = cls.expand_countryname_abbrevs(country)
match_strength = 0
matches = set()
for countryname in sorted(countriesdata['countrynames2iso3']):
for candidate in expanded_country_candidates:
simplified_country, removed_words = cls.simplify_countryname(candidate)
if simplified_country in countryname:
words = get_words_in_sentence(countryname)
new_match_strength = 0
if simplified_country:
remove_matching_from_list(words, simplified_country)
new_match_strength += 32
for word in removed_words:
if word in countryname:
remove_matching_from_list(words, word)
new_match_strength += 4
else:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
for word in words:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
iso3 = countriesdata['countrynames2iso3'][countryname]
if new_match_strength > match_strength:
match_strength = new_match_strength
matches = set()
if new_match_strength == match_strength:
matches.add(iso3)
if len(matches) == 1 and match_strength > 16:
return matches.pop(), False
# regex lookup
for iso3, regex in countriesdata['aliases'].items():
index = re.search(regex, country.upper())
if index is not None:
return iso3, False
if exception is not None:
raise exception
return None, False
@classmethod
def get_countries_in_region(cls, region, use_live=True, exception=None):
# type: (Union[int,str], bool, Optional[ExceptionUpperBound]) -> List[str]
"""Get countries (ISO3 codes) in region
Args:
region (Union[int,str]): Three digit UNStats M49 region code or region name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if region not found. Defaults to None.
Returns:
List(str): Sorted list of ISO3 country names
"""
countriesdata = cls.countriesdata(use_live=use_live)
if isinstance(region, int):
regioncode = region
else:
regionupper = region.upper()
regioncode = countriesdata['regionnames2codes'].get(regionupper)
if regioncode is not None:
return countriesdata['regioncodes2countries'][regioncode]
if exception is not None:
raise exception
return list()
|
OCHA-DAP/hdx-python-country | src/hdx/location/country.py | Country.get_country_name_from_iso2 | python | def get_country_name_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None | Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name | train | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L288-L303 | [
"def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None):\n # type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]\n \"\"\"Get country name from ISO3 code\n\n Args:\n iso3 (str): ISO3 code for which to get country name\n use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\n exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\n Returns:\n Optional[str]: Country name\n \"\"\"\n countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)\n if countryinfo is not None:\n return countryinfo.get('#country+name+preferred')\n return None\n",
"def get_iso3_from_iso2(cls, iso2, use_live=True, exception=None):\n # type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]\n \"\"\"Get ISO3 from ISO2 code\n\n Args:\n iso2 (str): ISO2 code for which to get ISO3 code\n use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\n exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\n Returns:\n Optional[str]: ISO3 code\n \"\"\"\n countriesdata = cls.countriesdata(use_live=use_live)\n iso3 = countriesdata['iso2iso3'].get(iso2.upper())\n if iso3 is not None:\n return iso3\n\n if exception is not None:\n raise exception\n return None\n"
] | class Country(object):
"""Location class with various methods to help with countries and regions. Uses OCHA countries feed which
supplies data in form:
::
ID,HRinfo ID,RW ID,m49 numerical code,FTS API ID,Appears in UNTERM list,Appears in DGACM list,ISO 3166-1 Alpha 2-Codes,ISO 3166-1 Alpha 3-Codes,x Alpha2 codes,x Alpha3 codes,Preferred Term,m49 Alt Term,ISO Alt Term,UNTERM Alt Term,FTS Alt Term,HRinfo Alt Term,RW Short Name,RW API Alt Term,English Short,French Short,Spanish Short,Russian Short,Chinese Short,Arabic Short,Admin Level,Latitude,Longitude,Region Code,Region Name,Sub-region Code,Sub-region Name,Intermediate Region Code,Intermediate Region Name,Regex,Concatenation
#meta +id,#country +code +v_hrinfo_country,#country +code +v_reliefweb,#country +code +num +v_m49,#country +code +v_fts,,,#country +code +v_iso2,#country +code +v_iso3,,,#country +name +preferred,#country +name +alt +v_m49,#country +name +alt +v_iso,#country +name +alt +v_unterm,#country +name +alt +v_fts,#country +name +alt +v_hrinfo_country,#country +name +short +v_reliefweb,#country +name +alt +v_reliefweb,#country +name +i_en +alt +v_unterm,#country +name +i_fr +alt +v_unterm,#country +name +i_es +alt +v_unterm,#country +name +i_ru +alt +v_unterm,#country +name +i_zh +alt +v_unterm,#country +name +i_ar +alt +v_unterm,#geo +admin_level,#geo +lat,#geo +lon,#region +main +code,#region +main +name +preferred,#region +sub +code,#region +sub +name +preferred,#region +intermediate +code,#region +intermediate +name +preferred,#country +regex,
1,181,13,4,1,Y,Y,AF,AFG,,,Afghanistan,,,,,,,,Afghanistan,Afghanistan (l') [masc.],Afganistán (el),Афганистан,阿富汗,أفغانستان,0,33.83147477,66.02621828,142,Asia,34,Southern Asia,,,afghan,
"""
abbreviations = {'DEM.': 'DEMOCRATIC', 'FMR.': 'FORMER', 'PROV.': 'PROVINCE', 'REP.': 'REPUBLIC', 'ST.': 'SAINT',
'UTD.': 'UNITED', 'U.': 'UNITED', 'N.': 'NORTH', 'E.': 'EAST', 'W.': 'WEST', 'K.': 'KINGDOM'}
major_differentiators = ['DEMOCRATIC', 'NORTH', 'SOUTH', 'EAST', 'WEST', 'STATES']
multiple_abbreviations = {'FED.': ['FEDERATION', 'FEDERAL', 'FEDERATED'],
'ISL.': ['ISLAND', 'ISLANDS'],
'S.': ['SOUTH', 'STATES'],
'TERR.': ['TERRITORY', 'TERRITORIES']}
simplifications = ['THE', 'OF', 'ISLAMIC', 'STATES', 'BOLIVARIAN', 'PLURINATIONAL', "PEOPLE'S",
'DUTCH PART', 'FRENCH PART', 'MALVINAS', 'YUGOSLAV', 'KINGDOM', 'PROTECTORATE']
_countriesdata = None
_ochaurl_int = 'https://docs.google.com/spreadsheets/d/1NjSI2LaS3SqbgYc0HdD8oIb7lofGtiHgoKKATCpwVdY/export?format=csv&gid=1088874596'
_ochaurl = _ochaurl_int
@classmethod
def _add_countriesdata(cls, iso3, country):
# type: (str, hxl.Row) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
iso3 (str): ISO3 code for country
country (hxl.Row): Country information
Returns:
None
"""
countryname = country.get('#country+name+preferred')
cls._countriesdata['countrynames2iso3'][countryname.upper()] = iso3
iso2 = country.get('#country+code+v_iso2')
if iso2:
cls._countriesdata['iso2iso3'][iso2] = iso3
# different types so keys won't clash
cls._countriesdata['iso2iso3'][iso3] = iso2
m49 = country.get('#country+code+num+v_m49')
if m49:
m49 = int(m49)
cls._countriesdata['m49iso3'][m49] = iso3
# different types so keys won't clash
cls._countriesdata['m49iso3'][iso3] = m49
cls._countriesdata['aliases'][iso3] = re.compile(country.get('#country+regex'), re.IGNORECASE)
regionname = country.get('#region+main+name+preferred')
sub_regionname = country.get('#region+sub+name+preferred')
intermediate_regionname = country.get('#region+intermediate+name+preferred')
regionid = country.get('#region+main+code')
if regionid:
regionid = int(regionid)
sub_regionid = country.get('#region+sub+code')
if sub_regionid:
sub_regionid = int(sub_regionid)
intermediate_regionid = country.get('#region+intermediate+code')
if intermediate_regionid:
intermediate_regionid = int(intermediate_regionid)
# region, subregion and intermediate region codes do not clash so only need one dict
def add_country_to_set(colname, idval, iso3):
value = cls._countriesdata[colname].get(idval)
if value is None:
value = set()
cls._countriesdata['regioncodes2countries'][idval] = value
value.add(iso3)
if regionname:
add_country_to_set('regioncodes2countries', regionid, iso3)
cls._countriesdata['regioncodes2names'][regionid] = regionname
cls._countriesdata['regionnames2codes'][regionname.upper()] = regionid
if sub_regionname:
add_country_to_set('regioncodes2countries', sub_regionid, iso3)
cls._countriesdata['regioncodes2names'][sub_regionid] = sub_regionname
cls._countriesdata['regionnames2codes'][sub_regionname.upper()] = sub_regionid
if intermediate_regionname:
add_country_to_set('regioncodes2countries', intermediate_regionid, iso3)
cls._countriesdata['regioncodes2names'][intermediate_regionid] = intermediate_regionname
cls._countriesdata['regionnames2codes'][intermediate_regionname.upper()] = \
intermediate_regionid
@classmethod
def set_countriesdata(cls, countries):
# type: (str) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
countries (str): Countries data in HTML format provided by UNStats
Returns:
None
"""
cls._countriesdata = dict()
cls._countriesdata['countries'] = dict()
cls._countriesdata['iso2iso3'] = dict()
cls._countriesdata['m49iso3'] = dict()
cls._countriesdata['countrynames2iso3'] = dict()
cls._countriesdata['regioncodes2countries'] = dict()
cls._countriesdata['regioncodes2names'] = dict()
cls._countriesdata['regionnames2codes'] = dict()
cls._countriesdata['aliases'] = dict()
for country in countries:
iso3 = country.get('#country+code+v_iso3')
if not iso3:
continue
iso3 = iso3.upper()
cls._add_countriesdata(iso3, country)
cls._countriesdata['countries'][iso3] = country.dictionary
def sort_list(colname):
for idval in cls._countriesdata[colname]:
cls._countriesdata[colname][idval] = \
sorted(list(cls._countriesdata[colname][idval]))
sort_list('regioncodes2countries')
@classmethod
def countriesdata(cls, use_live=True):
# type: (bool) -> List[Dict[Dict]]
"""
Read countries data from OCHA countries feed (falling back to file)
Args:
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
Returns:
List[Dict[Dict]]: Countries dictionaries
"""
if cls._countriesdata is None:
countries = None
if use_live:
try:
countries = hxl.data(cls._ochaurl)
except IOError:
logger.exception('Download from OCHA feed failed! Falling back to stored file.')
if countries is None:
countries = hxl.data(
script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv',
Country), allow_local=True)
cls.set_countriesdata(countries)
return cls._countriesdata
@classmethod
def set_ocha_url(cls, url=None):
# type: (str) -> None
"""
Set World Bank url from which to retrieve countries data
Args:
url (str): World Bank url from which to retrieve countries data. Defaults to internal value.
Returns:
None
"""
if url is None:
url = cls._ochaurl_int
cls._ochaurl = url
@classmethod
def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country information from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: country information
"""
countriesdata = cls.countriesdata(use_live=use_live)
country = countriesdata['countries'].get(iso3.upper())
if country is not None:
return country
if exception is not None:
raise exception
return None
@classmethod
def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
if countryinfo is not None:
return countryinfo.get('#country+name+preferred')
return None
@classmethod
def get_iso2_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO2 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get ISO2 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO2 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso2 = countriesdata['iso2iso3'].get(iso3.upper())
if iso2 is not None:
return iso2
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from ISO2 code
Args:
iso2 (str): ISO2 code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['iso2iso3'].get(iso2.upper())
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
return None
@classmethod
@classmethod
def get_m49_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[int]
"""Get M49 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get M49 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[int]: M49 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
m49 = countriesdata['m49iso3'].get(iso3)
if m49 is not None:
return m49
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from M49 code
Args:
m49 (int): M49 numeric code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['m49iso3'].get(m49)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_country_name_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def expand_countryname_abbrevs(cls, country):
# type: (str) -> List[str]
"""Expands abbreviation(s) in country name in various ways (eg. FED -> FEDERATED, FEDERAL etc.)
Args:
country (str): Country with abbreviation(s)to expand
Returns:
List[str]: Uppercase country name with abbreviation(s) expanded in various ways
"""
def replace_ensure_space(word, replace, replacement):
return word.replace(replace, '%s ' % replacement).replace(' ', ' ').strip()
countryupper = country.upper()
for abbreviation in cls.abbreviations:
countryupper = replace_ensure_space(countryupper, abbreviation, cls.abbreviations[abbreviation])
candidates = [countryupper]
for abbreviation in cls.multiple_abbreviations:
if abbreviation in countryupper:
for expanded in cls.multiple_abbreviations[abbreviation]:
candidates.append(replace_ensure_space(countryupper, abbreviation, expanded))
return candidates
@classmethod
def simplify_countryname(cls, country):
# type: (str) -> (str, List[str])
"""Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc.
Args:
country (str): Country name to simplify
Returns:
Tuple[str, List[str]]: Uppercase simplified country name and list of removed words
"""
countryupper = country.upper()
words = get_words_in_sentence(countryupper)
index = countryupper.find(',')
if index != -1:
countryupper = countryupper[:index]
index = countryupper.find(':')
if index != -1:
countryupper = countryupper[:index]
regex = re.compile('\(.+?\)')
countryupper = regex.sub('', countryupper)
remove = copy.deepcopy(cls.simplifications)
for simplification1, simplification2 in cls.abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
remove.append(simplification2)
for simplification1, simplifications in cls.multiple_abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
for simplification2 in simplifications:
remove.append(simplification2)
remove = '|'.join(remove)
regex = re.compile(r'\b(' + remove + r')\b', flags=re.IGNORECASE)
countryupper = regex.sub('', countryupper)
countryupper = countryupper.strip()
countryupper_words = get_words_in_sentence(countryupper)
if len(countryupper_words) > 1:
countryupper = countryupper_words[0]
if countryupper:
words.remove(countryupper)
return countryupper, words
@classmethod
def get_iso3_country_code(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None
"""
countriesdata = cls.countriesdata(use_live=use_live)
countryupper = country.upper()
len_countryupper = len(countryupper)
if len_countryupper == 3:
if countryupper in countriesdata['countries']:
return countryupper
elif len_countryupper == 2:
iso3 = countriesdata['iso2iso3'].get(countryupper)
if iso3 is not None:
return iso3
iso3 = countriesdata['countrynames2iso3'].get(countryupper)
if iso3 is not None:
return iso3
for candidate in cls.expand_countryname_abbrevs(countryupper):
iso3 = countriesdata['countrynames2iso3'].get(candidate)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_country_code_fuzzy(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Tuple[[Optional[str], bool]]
"""Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second
showing if the match is exact or not.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False).
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = cls.get_iso3_country_code(country,
use_live=use_live) # don't put exception param here as we don't want it to throw
if iso3 is not None:
return iso3, True
def remove_matching_from_list(wordlist, word_or_part):
for word in wordlist:
if word_or_part in word:
wordlist.remove(word)
# fuzzy matching
expanded_country_candidates = cls.expand_countryname_abbrevs(country)
match_strength = 0
matches = set()
for countryname in sorted(countriesdata['countrynames2iso3']):
for candidate in expanded_country_candidates:
simplified_country, removed_words = cls.simplify_countryname(candidate)
if simplified_country in countryname:
words = get_words_in_sentence(countryname)
new_match_strength = 0
if simplified_country:
remove_matching_from_list(words, simplified_country)
new_match_strength += 32
for word in removed_words:
if word in countryname:
remove_matching_from_list(words, word)
new_match_strength += 4
else:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
for word in words:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
iso3 = countriesdata['countrynames2iso3'][countryname]
if new_match_strength > match_strength:
match_strength = new_match_strength
matches = set()
if new_match_strength == match_strength:
matches.add(iso3)
if len(matches) == 1 and match_strength > 16:
return matches.pop(), False
# regex lookup
for iso3, regex in countriesdata['aliases'].items():
index = re.search(regex, country.upper())
if index is not None:
return iso3, False
if exception is not None:
raise exception
return None, False
@classmethod
def get_countries_in_region(cls, region, use_live=True, exception=None):
# type: (Union[int,str], bool, Optional[ExceptionUpperBound]) -> List[str]
"""Get countries (ISO3 codes) in region
Args:
region (Union[int,str]): Three digit UNStats M49 region code or region name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if region not found. Defaults to None.
Returns:
List(str): Sorted list of ISO3 country names
"""
countriesdata = cls.countriesdata(use_live=use_live)
if isinstance(region, int):
regioncode = region
else:
regionupper = region.upper()
regioncode = countriesdata['regionnames2codes'].get(regionupper)
if regioncode is not None:
return countriesdata['regioncodes2countries'][regioncode]
if exception is not None:
raise exception
return list()
|
OCHA-DAP/hdx-python-country | src/hdx/location/country.py | Country.get_m49_from_iso3 | python | def get_m49_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[int]
countriesdata = cls.countriesdata(use_live=use_live)
m49 = countriesdata['m49iso3'].get(iso3)
if m49 is not None:
return m49
if exception is not None:
raise exception
return None | Get M49 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get M49 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[int]: M49 code | train | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L306-L325 | [
"def countriesdata(cls, use_live=True):\n # type: (bool) -> List[Dict[Dict]]\n \"\"\"\n Read countries data from OCHA countries feed (falling back to file)\n\n Args:\n use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\n\n Returns:\n List[Dict[Dict]]: Countries dictionaries\n \"\"\"\n if cls._countriesdata is None:\n countries = None\n if use_live:\n try:\n countries = hxl.data(cls._ochaurl)\n except IOError:\n logger.exception('Download from OCHA feed failed! Falling back to stored file.')\n if countries is None:\n countries = hxl.data(\n script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv',\n Country), allow_local=True)\n cls.set_countriesdata(countries)\n return cls._countriesdata\n"
] | class Country(object):
"""Location class with various methods to help with countries and regions. Uses OCHA countries feed which
supplies data in form:
::
ID,HRinfo ID,RW ID,m49 numerical code,FTS API ID,Appears in UNTERM list,Appears in DGACM list,ISO 3166-1 Alpha 2-Codes,ISO 3166-1 Alpha 3-Codes,x Alpha2 codes,x Alpha3 codes,Preferred Term,m49 Alt Term,ISO Alt Term,UNTERM Alt Term,FTS Alt Term,HRinfo Alt Term,RW Short Name,RW API Alt Term,English Short,French Short,Spanish Short,Russian Short,Chinese Short,Arabic Short,Admin Level,Latitude,Longitude,Region Code,Region Name,Sub-region Code,Sub-region Name,Intermediate Region Code,Intermediate Region Name,Regex,Concatenation
#meta +id,#country +code +v_hrinfo_country,#country +code +v_reliefweb,#country +code +num +v_m49,#country +code +v_fts,,,#country +code +v_iso2,#country +code +v_iso3,,,#country +name +preferred,#country +name +alt +v_m49,#country +name +alt +v_iso,#country +name +alt +v_unterm,#country +name +alt +v_fts,#country +name +alt +v_hrinfo_country,#country +name +short +v_reliefweb,#country +name +alt +v_reliefweb,#country +name +i_en +alt +v_unterm,#country +name +i_fr +alt +v_unterm,#country +name +i_es +alt +v_unterm,#country +name +i_ru +alt +v_unterm,#country +name +i_zh +alt +v_unterm,#country +name +i_ar +alt +v_unterm,#geo +admin_level,#geo +lat,#geo +lon,#region +main +code,#region +main +name +preferred,#region +sub +code,#region +sub +name +preferred,#region +intermediate +code,#region +intermediate +name +preferred,#country +regex,
1,181,13,4,1,Y,Y,AF,AFG,,,Afghanistan,,,,,,,,Afghanistan,Afghanistan (l') [masc.],Afganistán (el),Афганистан,阿富汗,أفغانستان,0,33.83147477,66.02621828,142,Asia,34,Southern Asia,,,afghan,
"""
abbreviations = {'DEM.': 'DEMOCRATIC', 'FMR.': 'FORMER', 'PROV.': 'PROVINCE', 'REP.': 'REPUBLIC', 'ST.': 'SAINT',
'UTD.': 'UNITED', 'U.': 'UNITED', 'N.': 'NORTH', 'E.': 'EAST', 'W.': 'WEST', 'K.': 'KINGDOM'}
major_differentiators = ['DEMOCRATIC', 'NORTH', 'SOUTH', 'EAST', 'WEST', 'STATES']
multiple_abbreviations = {'FED.': ['FEDERATION', 'FEDERAL', 'FEDERATED'],
'ISL.': ['ISLAND', 'ISLANDS'],
'S.': ['SOUTH', 'STATES'],
'TERR.': ['TERRITORY', 'TERRITORIES']}
simplifications = ['THE', 'OF', 'ISLAMIC', 'STATES', 'BOLIVARIAN', 'PLURINATIONAL', "PEOPLE'S",
'DUTCH PART', 'FRENCH PART', 'MALVINAS', 'YUGOSLAV', 'KINGDOM', 'PROTECTORATE']
_countriesdata = None
_ochaurl_int = 'https://docs.google.com/spreadsheets/d/1NjSI2LaS3SqbgYc0HdD8oIb7lofGtiHgoKKATCpwVdY/export?format=csv&gid=1088874596'
_ochaurl = _ochaurl_int
@classmethod
def _add_countriesdata(cls, iso3, country):
# type: (str, hxl.Row) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
iso3 (str): ISO3 code for country
country (hxl.Row): Country information
Returns:
None
"""
countryname = country.get('#country+name+preferred')
cls._countriesdata['countrynames2iso3'][countryname.upper()] = iso3
iso2 = country.get('#country+code+v_iso2')
if iso2:
cls._countriesdata['iso2iso3'][iso2] = iso3
# different types so keys won't clash
cls._countriesdata['iso2iso3'][iso3] = iso2
m49 = country.get('#country+code+num+v_m49')
if m49:
m49 = int(m49)
cls._countriesdata['m49iso3'][m49] = iso3
# different types so keys won't clash
cls._countriesdata['m49iso3'][iso3] = m49
cls._countriesdata['aliases'][iso3] = re.compile(country.get('#country+regex'), re.IGNORECASE)
regionname = country.get('#region+main+name+preferred')
sub_regionname = country.get('#region+sub+name+preferred')
intermediate_regionname = country.get('#region+intermediate+name+preferred')
regionid = country.get('#region+main+code')
if regionid:
regionid = int(regionid)
sub_regionid = country.get('#region+sub+code')
if sub_regionid:
sub_regionid = int(sub_regionid)
intermediate_regionid = country.get('#region+intermediate+code')
if intermediate_regionid:
intermediate_regionid = int(intermediate_regionid)
# region, subregion and intermediate region codes do not clash so only need one dict
def add_country_to_set(colname, idval, iso3):
value = cls._countriesdata[colname].get(idval)
if value is None:
value = set()
cls._countriesdata['regioncodes2countries'][idval] = value
value.add(iso3)
if regionname:
add_country_to_set('regioncodes2countries', regionid, iso3)
cls._countriesdata['regioncodes2names'][regionid] = regionname
cls._countriesdata['regionnames2codes'][regionname.upper()] = regionid
if sub_regionname:
add_country_to_set('regioncodes2countries', sub_regionid, iso3)
cls._countriesdata['regioncodes2names'][sub_regionid] = sub_regionname
cls._countriesdata['regionnames2codes'][sub_regionname.upper()] = sub_regionid
if intermediate_regionname:
add_country_to_set('regioncodes2countries', intermediate_regionid, iso3)
cls._countriesdata['regioncodes2names'][intermediate_regionid] = intermediate_regionname
cls._countriesdata['regionnames2codes'][intermediate_regionname.upper()] = \
intermediate_regionid
@classmethod
def set_countriesdata(cls, countries):
# type: (str) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
countries (str): Countries data in HTML format provided by UNStats
Returns:
None
"""
cls._countriesdata = dict()
cls._countriesdata['countries'] = dict()
cls._countriesdata['iso2iso3'] = dict()
cls._countriesdata['m49iso3'] = dict()
cls._countriesdata['countrynames2iso3'] = dict()
cls._countriesdata['regioncodes2countries'] = dict()
cls._countriesdata['regioncodes2names'] = dict()
cls._countriesdata['regionnames2codes'] = dict()
cls._countriesdata['aliases'] = dict()
for country in countries:
iso3 = country.get('#country+code+v_iso3')
if not iso3:
continue
iso3 = iso3.upper()
cls._add_countriesdata(iso3, country)
cls._countriesdata['countries'][iso3] = country.dictionary
def sort_list(colname):
for idval in cls._countriesdata[colname]:
cls._countriesdata[colname][idval] = \
sorted(list(cls._countriesdata[colname][idval]))
sort_list('regioncodes2countries')
@classmethod
def countriesdata(cls, use_live=True):
# type: (bool) -> List[Dict[Dict]]
"""
Read countries data from OCHA countries feed (falling back to file)
Args:
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
Returns:
List[Dict[Dict]]: Countries dictionaries
"""
if cls._countriesdata is None:
countries = None
if use_live:
try:
countries = hxl.data(cls._ochaurl)
except IOError:
logger.exception('Download from OCHA feed failed! Falling back to stored file.')
if countries is None:
countries = hxl.data(
script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv',
Country), allow_local=True)
cls.set_countriesdata(countries)
return cls._countriesdata
@classmethod
def set_ocha_url(cls, url=None):
# type: (str) -> None
"""
Set World Bank url from which to retrieve countries data
Args:
url (str): World Bank url from which to retrieve countries data. Defaults to internal value.
Returns:
None
"""
if url is None:
url = cls._ochaurl_int
cls._ochaurl = url
@classmethod
def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country information from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: country information
"""
countriesdata = cls.countriesdata(use_live=use_live)
country = countriesdata['countries'].get(iso3.upper())
if country is not None:
return country
if exception is not None:
raise exception
return None
@classmethod
def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
if countryinfo is not None:
return countryinfo.get('#country+name+preferred')
return None
@classmethod
def get_iso2_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO2 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get ISO2 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO2 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso2 = countriesdata['iso2iso3'].get(iso3.upper())
if iso2 is not None:
return iso2
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from ISO2 code
Args:
iso2 (str): ISO2 code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['iso2iso3'].get(iso2.upper())
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
return None
@classmethod
def get_country_name_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
@classmethod
def get_iso3_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from M49 code
Args:
m49 (int): M49 numeric code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['m49iso3'].get(m49)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_country_name_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def expand_countryname_abbrevs(cls, country):
# type: (str) -> List[str]
"""Expands abbreviation(s) in country name in various ways (eg. FED -> FEDERATED, FEDERAL etc.)
Args:
country (str): Country with abbreviation(s)to expand
Returns:
List[str]: Uppercase country name with abbreviation(s) expanded in various ways
"""
def replace_ensure_space(word, replace, replacement):
return word.replace(replace, '%s ' % replacement).replace(' ', ' ').strip()
countryupper = country.upper()
for abbreviation in cls.abbreviations:
countryupper = replace_ensure_space(countryupper, abbreviation, cls.abbreviations[abbreviation])
candidates = [countryupper]
for abbreviation in cls.multiple_abbreviations:
if abbreviation in countryupper:
for expanded in cls.multiple_abbreviations[abbreviation]:
candidates.append(replace_ensure_space(countryupper, abbreviation, expanded))
return candidates
@classmethod
def simplify_countryname(cls, country):
# type: (str) -> (str, List[str])
"""Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc.
Args:
country (str): Country name to simplify
Returns:
Tuple[str, List[str]]: Uppercase simplified country name and list of removed words
"""
countryupper = country.upper()
words = get_words_in_sentence(countryupper)
index = countryupper.find(',')
if index != -1:
countryupper = countryupper[:index]
index = countryupper.find(':')
if index != -1:
countryupper = countryupper[:index]
regex = re.compile('\(.+?\)')
countryupper = regex.sub('', countryupper)
remove = copy.deepcopy(cls.simplifications)
for simplification1, simplification2 in cls.abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
remove.append(simplification2)
for simplification1, simplifications in cls.multiple_abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
for simplification2 in simplifications:
remove.append(simplification2)
remove = '|'.join(remove)
regex = re.compile(r'\b(' + remove + r')\b', flags=re.IGNORECASE)
countryupper = regex.sub('', countryupper)
countryupper = countryupper.strip()
countryupper_words = get_words_in_sentence(countryupper)
if len(countryupper_words) > 1:
countryupper = countryupper_words[0]
if countryupper:
words.remove(countryupper)
return countryupper, words
@classmethod
def get_iso3_country_code(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None
"""
countriesdata = cls.countriesdata(use_live=use_live)
countryupper = country.upper()
len_countryupper = len(countryupper)
if len_countryupper == 3:
if countryupper in countriesdata['countries']:
return countryupper
elif len_countryupper == 2:
iso3 = countriesdata['iso2iso3'].get(countryupper)
if iso3 is not None:
return iso3
iso3 = countriesdata['countrynames2iso3'].get(countryupper)
if iso3 is not None:
return iso3
for candidate in cls.expand_countryname_abbrevs(countryupper):
iso3 = countriesdata['countrynames2iso3'].get(candidate)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_country_code_fuzzy(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Tuple[[Optional[str], bool]]
"""Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second
showing if the match is exact or not.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False).
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = cls.get_iso3_country_code(country,
use_live=use_live) # don't put exception param here as we don't want it to throw
if iso3 is not None:
return iso3, True
def remove_matching_from_list(wordlist, word_or_part):
for word in wordlist:
if word_or_part in word:
wordlist.remove(word)
# fuzzy matching
expanded_country_candidates = cls.expand_countryname_abbrevs(country)
match_strength = 0
matches = set()
for countryname in sorted(countriesdata['countrynames2iso3']):
for candidate in expanded_country_candidates:
simplified_country, removed_words = cls.simplify_countryname(candidate)
if simplified_country in countryname:
words = get_words_in_sentence(countryname)
new_match_strength = 0
if simplified_country:
remove_matching_from_list(words, simplified_country)
new_match_strength += 32
for word in removed_words:
if word in countryname:
remove_matching_from_list(words, word)
new_match_strength += 4
else:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
for word in words:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
iso3 = countriesdata['countrynames2iso3'][countryname]
if new_match_strength > match_strength:
match_strength = new_match_strength
matches = set()
if new_match_strength == match_strength:
matches.add(iso3)
if len(matches) == 1 and match_strength > 16:
return matches.pop(), False
# regex lookup
for iso3, regex in countriesdata['aliases'].items():
index = re.search(regex, country.upper())
if index is not None:
return iso3, False
if exception is not None:
raise exception
return None, False
@classmethod
def get_countries_in_region(cls, region, use_live=True, exception=None):
# type: (Union[int,str], bool, Optional[ExceptionUpperBound]) -> List[str]
"""Get countries (ISO3 codes) in region
Args:
region (Union[int,str]): Three digit UNStats M49 region code or region name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if region not found. Defaults to None.
Returns:
List(str): Sorted list of ISO3 country names
"""
countriesdata = cls.countriesdata(use_live=use_live)
if isinstance(region, int):
regioncode = region
else:
regionupper = region.upper()
regioncode = countriesdata['regionnames2codes'].get(regionupper)
if regioncode is not None:
return countriesdata['regioncodes2countries'][regioncode]
if exception is not None:
raise exception
return list()
|
OCHA-DAP/hdx-python-country | src/hdx/location/country.py | Country.get_country_info_from_m49 | python | def get_country_info_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, exception=exception)
return None | Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information | train | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L350-L365 | [
"def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):\n # type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]\n \"\"\"Get country information from ISO3 code\n\n Args:\n iso3 (str): ISO3 code for which to get country information\n use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\n exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\n Returns:\n Optional[Dict[str]]: country information\n \"\"\"\n countriesdata = cls.countriesdata(use_live=use_live)\n country = countriesdata['countries'].get(iso3.upper())\n if country is not None:\n return country\n\n if exception is not None:\n raise exception\n return None\n",
"def get_iso3_from_m49(cls, m49, use_live=True, exception=None):\n # type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]\n \"\"\"Get ISO3 from M49 code\n\n Args:\n m49 (int): M49 numeric code for which to get ISO3 code\n use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\n exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\n Returns:\n Optional[str]: ISO3 code\n \"\"\"\n countriesdata = cls.countriesdata(use_live=use_live)\n iso3 = countriesdata['m49iso3'].get(m49)\n if iso3 is not None:\n return iso3\n\n if exception is not None:\n raise exception\n return None\n"
] | class Country(object):
"""Location class with various methods to help with countries and regions. Uses OCHA countries feed which
supplies data in form:
::
ID,HRinfo ID,RW ID,m49 numerical code,FTS API ID,Appears in UNTERM list,Appears in DGACM list,ISO 3166-1 Alpha 2-Codes,ISO 3166-1 Alpha 3-Codes,x Alpha2 codes,x Alpha3 codes,Preferred Term,m49 Alt Term,ISO Alt Term,UNTERM Alt Term,FTS Alt Term,HRinfo Alt Term,RW Short Name,RW API Alt Term,English Short,French Short,Spanish Short,Russian Short,Chinese Short,Arabic Short,Admin Level,Latitude,Longitude,Region Code,Region Name,Sub-region Code,Sub-region Name,Intermediate Region Code,Intermediate Region Name,Regex,Concatenation
#meta +id,#country +code +v_hrinfo_country,#country +code +v_reliefweb,#country +code +num +v_m49,#country +code +v_fts,,,#country +code +v_iso2,#country +code +v_iso3,,,#country +name +preferred,#country +name +alt +v_m49,#country +name +alt +v_iso,#country +name +alt +v_unterm,#country +name +alt +v_fts,#country +name +alt +v_hrinfo_country,#country +name +short +v_reliefweb,#country +name +alt +v_reliefweb,#country +name +i_en +alt +v_unterm,#country +name +i_fr +alt +v_unterm,#country +name +i_es +alt +v_unterm,#country +name +i_ru +alt +v_unterm,#country +name +i_zh +alt +v_unterm,#country +name +i_ar +alt +v_unterm,#geo +admin_level,#geo +lat,#geo +lon,#region +main +code,#region +main +name +preferred,#region +sub +code,#region +sub +name +preferred,#region +intermediate +code,#region +intermediate +name +preferred,#country +regex,
1,181,13,4,1,Y,Y,AF,AFG,,,Afghanistan,,,,,,,,Afghanistan,Afghanistan (l') [masc.],Afganistán (el),Афганистан,阿富汗,أفغانستان,0,33.83147477,66.02621828,142,Asia,34,Southern Asia,,,afghan,
"""
abbreviations = {'DEM.': 'DEMOCRATIC', 'FMR.': 'FORMER', 'PROV.': 'PROVINCE', 'REP.': 'REPUBLIC', 'ST.': 'SAINT',
'UTD.': 'UNITED', 'U.': 'UNITED', 'N.': 'NORTH', 'E.': 'EAST', 'W.': 'WEST', 'K.': 'KINGDOM'}
major_differentiators = ['DEMOCRATIC', 'NORTH', 'SOUTH', 'EAST', 'WEST', 'STATES']
multiple_abbreviations = {'FED.': ['FEDERATION', 'FEDERAL', 'FEDERATED'],
'ISL.': ['ISLAND', 'ISLANDS'],
'S.': ['SOUTH', 'STATES'],
'TERR.': ['TERRITORY', 'TERRITORIES']}
simplifications = ['THE', 'OF', 'ISLAMIC', 'STATES', 'BOLIVARIAN', 'PLURINATIONAL', "PEOPLE'S",
'DUTCH PART', 'FRENCH PART', 'MALVINAS', 'YUGOSLAV', 'KINGDOM', 'PROTECTORATE']
_countriesdata = None
_ochaurl_int = 'https://docs.google.com/spreadsheets/d/1NjSI2LaS3SqbgYc0HdD8oIb7lofGtiHgoKKATCpwVdY/export?format=csv&gid=1088874596'
_ochaurl = _ochaurl_int
@classmethod
def _add_countriesdata(cls, iso3, country):
# type: (str, hxl.Row) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
iso3 (str): ISO3 code for country
country (hxl.Row): Country information
Returns:
None
"""
countryname = country.get('#country+name+preferred')
cls._countriesdata['countrynames2iso3'][countryname.upper()] = iso3
iso2 = country.get('#country+code+v_iso2')
if iso2:
cls._countriesdata['iso2iso3'][iso2] = iso3
# different types so keys won't clash
cls._countriesdata['iso2iso3'][iso3] = iso2
m49 = country.get('#country+code+num+v_m49')
if m49:
m49 = int(m49)
cls._countriesdata['m49iso3'][m49] = iso3
# different types so keys won't clash
cls._countriesdata['m49iso3'][iso3] = m49
cls._countriesdata['aliases'][iso3] = re.compile(country.get('#country+regex'), re.IGNORECASE)
regionname = country.get('#region+main+name+preferred')
sub_regionname = country.get('#region+sub+name+preferred')
intermediate_regionname = country.get('#region+intermediate+name+preferred')
regionid = country.get('#region+main+code')
if regionid:
regionid = int(regionid)
sub_regionid = country.get('#region+sub+code')
if sub_regionid:
sub_regionid = int(sub_regionid)
intermediate_regionid = country.get('#region+intermediate+code')
if intermediate_regionid:
intermediate_regionid = int(intermediate_regionid)
# region, subregion and intermediate region codes do not clash so only need one dict
def add_country_to_set(colname, idval, iso3):
value = cls._countriesdata[colname].get(idval)
if value is None:
value = set()
cls._countriesdata['regioncodes2countries'][idval] = value
value.add(iso3)
if regionname:
add_country_to_set('regioncodes2countries', regionid, iso3)
cls._countriesdata['regioncodes2names'][regionid] = regionname
cls._countriesdata['regionnames2codes'][regionname.upper()] = regionid
if sub_regionname:
add_country_to_set('regioncodes2countries', sub_regionid, iso3)
cls._countriesdata['regioncodes2names'][sub_regionid] = sub_regionname
cls._countriesdata['regionnames2codes'][sub_regionname.upper()] = sub_regionid
if intermediate_regionname:
add_country_to_set('regioncodes2countries', intermediate_regionid, iso3)
cls._countriesdata['regioncodes2names'][intermediate_regionid] = intermediate_regionname
cls._countriesdata['regionnames2codes'][intermediate_regionname.upper()] = \
intermediate_regionid
@classmethod
def set_countriesdata(cls, countries):
# type: (str) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
countries (str): Countries data in HTML format provided by UNStats
Returns:
None
"""
cls._countriesdata = dict()
cls._countriesdata['countries'] = dict()
cls._countriesdata['iso2iso3'] = dict()
cls._countriesdata['m49iso3'] = dict()
cls._countriesdata['countrynames2iso3'] = dict()
cls._countriesdata['regioncodes2countries'] = dict()
cls._countriesdata['regioncodes2names'] = dict()
cls._countriesdata['regionnames2codes'] = dict()
cls._countriesdata['aliases'] = dict()
for country in countries:
iso3 = country.get('#country+code+v_iso3')
if not iso3:
continue
iso3 = iso3.upper()
cls._add_countriesdata(iso3, country)
cls._countriesdata['countries'][iso3] = country.dictionary
def sort_list(colname):
for idval in cls._countriesdata[colname]:
cls._countriesdata[colname][idval] = \
sorted(list(cls._countriesdata[colname][idval]))
sort_list('regioncodes2countries')
@classmethod
def countriesdata(cls, use_live=True):
# type: (bool) -> List[Dict[Dict]]
"""
Read countries data from OCHA countries feed (falling back to file)
Args:
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
Returns:
List[Dict[Dict]]: Countries dictionaries
"""
if cls._countriesdata is None:
countries = None
if use_live:
try:
countries = hxl.data(cls._ochaurl)
except IOError:
logger.exception('Download from OCHA feed failed! Falling back to stored file.')
if countries is None:
countries = hxl.data(
script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv',
Country), allow_local=True)
cls.set_countriesdata(countries)
return cls._countriesdata
@classmethod
def set_ocha_url(cls, url=None):
# type: (str) -> None
"""
Set World Bank url from which to retrieve countries data
Args:
url (str): World Bank url from which to retrieve countries data. Defaults to internal value.
Returns:
None
"""
if url is None:
url = cls._ochaurl_int
cls._ochaurl = url
@classmethod
def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country information from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: country information
"""
countriesdata = cls.countriesdata(use_live=use_live)
country = countriesdata['countries'].get(iso3.upper())
if country is not None:
return country
if exception is not None:
raise exception
return None
@classmethod
def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
if countryinfo is not None:
return countryinfo.get('#country+name+preferred')
return None
@classmethod
def get_iso2_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO2 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get ISO2 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO2 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso2 = countriesdata['iso2iso3'].get(iso3.upper())
if iso2 is not None:
return iso2
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from ISO2 code
Args:
iso2 (str): ISO2 code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['iso2iso3'].get(iso2.upper())
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
return None
@classmethod
def get_country_name_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_m49_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[int]
"""Get M49 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get M49 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[int]: M49 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
m49 = countriesdata['m49iso3'].get(iso3)
if m49 is not None:
return m49
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from M49 code
Args:
m49 (int): M49 numeric code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['m49iso3'].get(m49)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
@classmethod
def get_country_name_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def expand_countryname_abbrevs(cls, country):
# type: (str) -> List[str]
"""Expands abbreviation(s) in country name in various ways (eg. FED -> FEDERATED, FEDERAL etc.)
Args:
country (str): Country with abbreviation(s)to expand
Returns:
List[str]: Uppercase country name with abbreviation(s) expanded in various ways
"""
def replace_ensure_space(word, replace, replacement):
return word.replace(replace, '%s ' % replacement).replace(' ', ' ').strip()
countryupper = country.upper()
for abbreviation in cls.abbreviations:
countryupper = replace_ensure_space(countryupper, abbreviation, cls.abbreviations[abbreviation])
candidates = [countryupper]
for abbreviation in cls.multiple_abbreviations:
if abbreviation in countryupper:
for expanded in cls.multiple_abbreviations[abbreviation]:
candidates.append(replace_ensure_space(countryupper, abbreviation, expanded))
return candidates
@classmethod
def simplify_countryname(cls, country):
# type: (str) -> (str, List[str])
"""Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc.
Args:
country (str): Country name to simplify
Returns:
Tuple[str, List[str]]: Uppercase simplified country name and list of removed words
"""
countryupper = country.upper()
words = get_words_in_sentence(countryupper)
index = countryupper.find(',')
if index != -1:
countryupper = countryupper[:index]
index = countryupper.find(':')
if index != -1:
countryupper = countryupper[:index]
regex = re.compile('\(.+?\)')
countryupper = regex.sub('', countryupper)
remove = copy.deepcopy(cls.simplifications)
for simplification1, simplification2 in cls.abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
remove.append(simplification2)
for simplification1, simplifications in cls.multiple_abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
for simplification2 in simplifications:
remove.append(simplification2)
remove = '|'.join(remove)
regex = re.compile(r'\b(' + remove + r')\b', flags=re.IGNORECASE)
countryupper = regex.sub('', countryupper)
countryupper = countryupper.strip()
countryupper_words = get_words_in_sentence(countryupper)
if len(countryupper_words) > 1:
countryupper = countryupper_words[0]
if countryupper:
words.remove(countryupper)
return countryupper, words
@classmethod
def get_iso3_country_code(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None
"""
countriesdata = cls.countriesdata(use_live=use_live)
countryupper = country.upper()
len_countryupper = len(countryupper)
if len_countryupper == 3:
if countryupper in countriesdata['countries']:
return countryupper
elif len_countryupper == 2:
iso3 = countriesdata['iso2iso3'].get(countryupper)
if iso3 is not None:
return iso3
iso3 = countriesdata['countrynames2iso3'].get(countryupper)
if iso3 is not None:
return iso3
for candidate in cls.expand_countryname_abbrevs(countryupper):
iso3 = countriesdata['countrynames2iso3'].get(candidate)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_country_code_fuzzy(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Tuple[[Optional[str], bool]]
"""Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second
showing if the match is exact or not.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False).
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = cls.get_iso3_country_code(country,
use_live=use_live) # don't put exception param here as we don't want it to throw
if iso3 is not None:
return iso3, True
def remove_matching_from_list(wordlist, word_or_part):
for word in wordlist:
if word_or_part in word:
wordlist.remove(word)
# fuzzy matching
expanded_country_candidates = cls.expand_countryname_abbrevs(country)
match_strength = 0
matches = set()
for countryname in sorted(countriesdata['countrynames2iso3']):
for candidate in expanded_country_candidates:
simplified_country, removed_words = cls.simplify_countryname(candidate)
if simplified_country in countryname:
words = get_words_in_sentence(countryname)
new_match_strength = 0
if simplified_country:
remove_matching_from_list(words, simplified_country)
new_match_strength += 32
for word in removed_words:
if word in countryname:
remove_matching_from_list(words, word)
new_match_strength += 4
else:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
for word in words:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
iso3 = countriesdata['countrynames2iso3'][countryname]
if new_match_strength > match_strength:
match_strength = new_match_strength
matches = set()
if new_match_strength == match_strength:
matches.add(iso3)
if len(matches) == 1 and match_strength > 16:
return matches.pop(), False
# regex lookup
for iso3, regex in countriesdata['aliases'].items():
index = re.search(regex, country.upper())
if index is not None:
return iso3, False
if exception is not None:
raise exception
return None, False
@classmethod
def get_countries_in_region(cls, region, use_live=True, exception=None):
# type: (Union[int,str], bool, Optional[ExceptionUpperBound]) -> List[str]
"""Get countries (ISO3 codes) in region
Args:
region (Union[int,str]): Three digit UNStats M49 region code or region name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if region not found. Defaults to None.
Returns:
List(str): Sorted list of ISO3 country names
"""
countriesdata = cls.countriesdata(use_live=use_live)
if isinstance(region, int):
regioncode = region
else:
regionupper = region.upper()
regioncode = countriesdata['regionnames2codes'].get(regionupper)
if regioncode is not None:
return countriesdata['regioncodes2countries'][regioncode]
if exception is not None:
raise exception
return list()
|
OCHA-DAP/hdx-python-country | src/hdx/location/country.py | Country.get_country_name_from_m49 | python | def get_country_name_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None | Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name | train | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L368-L383 | [
"def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None):\n # type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]\n \"\"\"Get country name from ISO3 code\n\n Args:\n iso3 (str): ISO3 code for which to get country name\n use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\n exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\n Returns:\n Optional[str]: Country name\n \"\"\"\n countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)\n if countryinfo is not None:\n return countryinfo.get('#country+name+preferred')\n return None\n",
"def get_iso3_from_m49(cls, m49, use_live=True, exception=None):\n # type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]\n \"\"\"Get ISO3 from M49 code\n\n Args:\n m49 (int): M49 numeric code for which to get ISO3 code\n use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.\n exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.\n\n Returns:\n Optional[str]: ISO3 code\n \"\"\"\n countriesdata = cls.countriesdata(use_live=use_live)\n iso3 = countriesdata['m49iso3'].get(m49)\n if iso3 is not None:\n return iso3\n\n if exception is not None:\n raise exception\n return None\n"
] | class Country(object):
"""Location class with various methods to help with countries and regions. Uses OCHA countries feed which
supplies data in form:
::
ID,HRinfo ID,RW ID,m49 numerical code,FTS API ID,Appears in UNTERM list,Appears in DGACM list,ISO 3166-1 Alpha 2-Codes,ISO 3166-1 Alpha 3-Codes,x Alpha2 codes,x Alpha3 codes,Preferred Term,m49 Alt Term,ISO Alt Term,UNTERM Alt Term,FTS Alt Term,HRinfo Alt Term,RW Short Name,RW API Alt Term,English Short,French Short,Spanish Short,Russian Short,Chinese Short,Arabic Short,Admin Level,Latitude,Longitude,Region Code,Region Name,Sub-region Code,Sub-region Name,Intermediate Region Code,Intermediate Region Name,Regex,Concatenation
#meta +id,#country +code +v_hrinfo_country,#country +code +v_reliefweb,#country +code +num +v_m49,#country +code +v_fts,,,#country +code +v_iso2,#country +code +v_iso3,,,#country +name +preferred,#country +name +alt +v_m49,#country +name +alt +v_iso,#country +name +alt +v_unterm,#country +name +alt +v_fts,#country +name +alt +v_hrinfo_country,#country +name +short +v_reliefweb,#country +name +alt +v_reliefweb,#country +name +i_en +alt +v_unterm,#country +name +i_fr +alt +v_unterm,#country +name +i_es +alt +v_unterm,#country +name +i_ru +alt +v_unterm,#country +name +i_zh +alt +v_unterm,#country +name +i_ar +alt +v_unterm,#geo +admin_level,#geo +lat,#geo +lon,#region +main +code,#region +main +name +preferred,#region +sub +code,#region +sub +name +preferred,#region +intermediate +code,#region +intermediate +name +preferred,#country +regex,
1,181,13,4,1,Y,Y,AF,AFG,,,Afghanistan,,,,,,,,Afghanistan,Afghanistan (l') [masc.],Afganistán (el),Афганистан,阿富汗,أفغانستان,0,33.83147477,66.02621828,142,Asia,34,Southern Asia,,,afghan,
"""
abbreviations = {'DEM.': 'DEMOCRATIC', 'FMR.': 'FORMER', 'PROV.': 'PROVINCE', 'REP.': 'REPUBLIC', 'ST.': 'SAINT',
'UTD.': 'UNITED', 'U.': 'UNITED', 'N.': 'NORTH', 'E.': 'EAST', 'W.': 'WEST', 'K.': 'KINGDOM'}
major_differentiators = ['DEMOCRATIC', 'NORTH', 'SOUTH', 'EAST', 'WEST', 'STATES']
multiple_abbreviations = {'FED.': ['FEDERATION', 'FEDERAL', 'FEDERATED'],
'ISL.': ['ISLAND', 'ISLANDS'],
'S.': ['SOUTH', 'STATES'],
'TERR.': ['TERRITORY', 'TERRITORIES']}
simplifications = ['THE', 'OF', 'ISLAMIC', 'STATES', 'BOLIVARIAN', 'PLURINATIONAL', "PEOPLE'S",
'DUTCH PART', 'FRENCH PART', 'MALVINAS', 'YUGOSLAV', 'KINGDOM', 'PROTECTORATE']
_countriesdata = None
_ochaurl_int = 'https://docs.google.com/spreadsheets/d/1NjSI2LaS3SqbgYc0HdD8oIb7lofGtiHgoKKATCpwVdY/export?format=csv&gid=1088874596'
_ochaurl = _ochaurl_int
@classmethod
def _add_countriesdata(cls, iso3, country):
# type: (str, hxl.Row) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
iso3 (str): ISO3 code for country
country (hxl.Row): Country information
Returns:
None
"""
countryname = country.get('#country+name+preferred')
cls._countriesdata['countrynames2iso3'][countryname.upper()] = iso3
iso2 = country.get('#country+code+v_iso2')
if iso2:
cls._countriesdata['iso2iso3'][iso2] = iso3
# different types so keys won't clash
cls._countriesdata['iso2iso3'][iso3] = iso2
m49 = country.get('#country+code+num+v_m49')
if m49:
m49 = int(m49)
cls._countriesdata['m49iso3'][m49] = iso3
# different types so keys won't clash
cls._countriesdata['m49iso3'][iso3] = m49
cls._countriesdata['aliases'][iso3] = re.compile(country.get('#country+regex'), re.IGNORECASE)
regionname = country.get('#region+main+name+preferred')
sub_regionname = country.get('#region+sub+name+preferred')
intermediate_regionname = country.get('#region+intermediate+name+preferred')
regionid = country.get('#region+main+code')
if regionid:
regionid = int(regionid)
sub_regionid = country.get('#region+sub+code')
if sub_regionid:
sub_regionid = int(sub_regionid)
intermediate_regionid = country.get('#region+intermediate+code')
if intermediate_regionid:
intermediate_regionid = int(intermediate_regionid)
# region, subregion and intermediate region codes do not clash so only need one dict
def add_country_to_set(colname, idval, iso3):
value = cls._countriesdata[colname].get(idval)
if value is None:
value = set()
cls._countriesdata['regioncodes2countries'][idval] = value
value.add(iso3)
if regionname:
add_country_to_set('regioncodes2countries', regionid, iso3)
cls._countriesdata['regioncodes2names'][regionid] = regionname
cls._countriesdata['regionnames2codes'][regionname.upper()] = regionid
if sub_regionname:
add_country_to_set('regioncodes2countries', sub_regionid, iso3)
cls._countriesdata['regioncodes2names'][sub_regionid] = sub_regionname
cls._countriesdata['regionnames2codes'][sub_regionname.upper()] = sub_regionid
if intermediate_regionname:
add_country_to_set('regioncodes2countries', intermediate_regionid, iso3)
cls._countriesdata['regioncodes2names'][intermediate_regionid] = intermediate_regionname
cls._countriesdata['regionnames2codes'][intermediate_regionname.upper()] = \
intermediate_regionid
@classmethod
def set_countriesdata(cls, countries):
# type: (str) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
countries (str): Countries data in HTML format provided by UNStats
Returns:
None
"""
cls._countriesdata = dict()
cls._countriesdata['countries'] = dict()
cls._countriesdata['iso2iso3'] = dict()
cls._countriesdata['m49iso3'] = dict()
cls._countriesdata['countrynames2iso3'] = dict()
cls._countriesdata['regioncodes2countries'] = dict()
cls._countriesdata['regioncodes2names'] = dict()
cls._countriesdata['regionnames2codes'] = dict()
cls._countriesdata['aliases'] = dict()
for country in countries:
iso3 = country.get('#country+code+v_iso3')
if not iso3:
continue
iso3 = iso3.upper()
cls._add_countriesdata(iso3, country)
cls._countriesdata['countries'][iso3] = country.dictionary
def sort_list(colname):
for idval in cls._countriesdata[colname]:
cls._countriesdata[colname][idval] = \
sorted(list(cls._countriesdata[colname][idval]))
sort_list('regioncodes2countries')
@classmethod
def countriesdata(cls, use_live=True):
# type: (bool) -> List[Dict[Dict]]
"""
Read countries data from OCHA countries feed (falling back to file)
Args:
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
Returns:
List[Dict[Dict]]: Countries dictionaries
"""
if cls._countriesdata is None:
countries = None
if use_live:
try:
countries = hxl.data(cls._ochaurl)
except IOError:
logger.exception('Download from OCHA feed failed! Falling back to stored file.')
if countries is None:
countries = hxl.data(
script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv',
Country), allow_local=True)
cls.set_countriesdata(countries)
return cls._countriesdata
@classmethod
def set_ocha_url(cls, url=None):
# type: (str) -> None
"""
Set World Bank url from which to retrieve countries data
Args:
url (str): World Bank url from which to retrieve countries data. Defaults to internal value.
Returns:
None
"""
if url is None:
url = cls._ochaurl_int
cls._ochaurl = url
@classmethod
def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country information from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: country information
"""
countriesdata = cls.countriesdata(use_live=use_live)
country = countriesdata['countries'].get(iso3.upper())
if country is not None:
return country
if exception is not None:
raise exception
return None
@classmethod
def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
if countryinfo is not None:
return countryinfo.get('#country+name+preferred')
return None
@classmethod
def get_iso2_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO2 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get ISO2 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO2 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso2 = countriesdata['iso2iso3'].get(iso3.upper())
if iso2 is not None:
return iso2
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from ISO2 code
Args:
iso2 (str): ISO2 code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['iso2iso3'].get(iso2.upper())
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
return None
@classmethod
def get_country_name_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_m49_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[int]
"""Get M49 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get M49 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[int]: M49 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
m49 = countriesdata['m49iso3'].get(iso3)
if m49 is not None:
return m49
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from M49 code
Args:
m49 (int): M49 numeric code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['m49iso3'].get(m49)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, exception=exception)
return None
@classmethod
@classmethod
def expand_countryname_abbrevs(cls, country):
# type: (str) -> List[str]
"""Expands abbreviation(s) in country name in various ways (eg. FED -> FEDERATED, FEDERAL etc.)
Args:
country (str): Country with abbreviation(s)to expand
Returns:
List[str]: Uppercase country name with abbreviation(s) expanded in various ways
"""
def replace_ensure_space(word, replace, replacement):
return word.replace(replace, '%s ' % replacement).replace(' ', ' ').strip()
countryupper = country.upper()
for abbreviation in cls.abbreviations:
countryupper = replace_ensure_space(countryupper, abbreviation, cls.abbreviations[abbreviation])
candidates = [countryupper]
for abbreviation in cls.multiple_abbreviations:
if abbreviation in countryupper:
for expanded in cls.multiple_abbreviations[abbreviation]:
candidates.append(replace_ensure_space(countryupper, abbreviation, expanded))
return candidates
@classmethod
def simplify_countryname(cls, country):
# type: (str) -> (str, List[str])
"""Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc.
Args:
country (str): Country name to simplify
Returns:
Tuple[str, List[str]]: Uppercase simplified country name and list of removed words
"""
countryupper = country.upper()
words = get_words_in_sentence(countryupper)
index = countryupper.find(',')
if index != -1:
countryupper = countryupper[:index]
index = countryupper.find(':')
if index != -1:
countryupper = countryupper[:index]
regex = re.compile('\(.+?\)')
countryupper = regex.sub('', countryupper)
remove = copy.deepcopy(cls.simplifications)
for simplification1, simplification2 in cls.abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
remove.append(simplification2)
for simplification1, simplifications in cls.multiple_abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
for simplification2 in simplifications:
remove.append(simplification2)
remove = '|'.join(remove)
regex = re.compile(r'\b(' + remove + r')\b', flags=re.IGNORECASE)
countryupper = regex.sub('', countryupper)
countryupper = countryupper.strip()
countryupper_words = get_words_in_sentence(countryupper)
if len(countryupper_words) > 1:
countryupper = countryupper_words[0]
if countryupper:
words.remove(countryupper)
return countryupper, words
@classmethod
def get_iso3_country_code(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None
"""
countriesdata = cls.countriesdata(use_live=use_live)
countryupper = country.upper()
len_countryupper = len(countryupper)
if len_countryupper == 3:
if countryupper in countriesdata['countries']:
return countryupper
elif len_countryupper == 2:
iso3 = countriesdata['iso2iso3'].get(countryupper)
if iso3 is not None:
return iso3
iso3 = countriesdata['countrynames2iso3'].get(countryupper)
if iso3 is not None:
return iso3
for candidate in cls.expand_countryname_abbrevs(countryupper):
iso3 = countriesdata['countrynames2iso3'].get(candidate)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_country_code_fuzzy(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Tuple[[Optional[str], bool]]
"""Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second
showing if the match is exact or not.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False).
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = cls.get_iso3_country_code(country,
use_live=use_live) # don't put exception param here as we don't want it to throw
if iso3 is not None:
return iso3, True
def remove_matching_from_list(wordlist, word_or_part):
for word in wordlist:
if word_or_part in word:
wordlist.remove(word)
# fuzzy matching
expanded_country_candidates = cls.expand_countryname_abbrevs(country)
match_strength = 0
matches = set()
for countryname in sorted(countriesdata['countrynames2iso3']):
for candidate in expanded_country_candidates:
simplified_country, removed_words = cls.simplify_countryname(candidate)
if simplified_country in countryname:
words = get_words_in_sentence(countryname)
new_match_strength = 0
if simplified_country:
remove_matching_from_list(words, simplified_country)
new_match_strength += 32
for word in removed_words:
if word in countryname:
remove_matching_from_list(words, word)
new_match_strength += 4
else:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
for word in words:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
iso3 = countriesdata['countrynames2iso3'][countryname]
if new_match_strength > match_strength:
match_strength = new_match_strength
matches = set()
if new_match_strength == match_strength:
matches.add(iso3)
if len(matches) == 1 and match_strength > 16:
return matches.pop(), False
# regex lookup
for iso3, regex in countriesdata['aliases'].items():
index = re.search(regex, country.upper())
if index is not None:
return iso3, False
if exception is not None:
raise exception
return None, False
@classmethod
def get_countries_in_region(cls, region, use_live=True, exception=None):
# type: (Union[int,str], bool, Optional[ExceptionUpperBound]) -> List[str]
"""Get countries (ISO3 codes) in region
Args:
region (Union[int,str]): Three digit UNStats M49 region code or region name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if region not found. Defaults to None.
Returns:
List(str): Sorted list of ISO3 country names
"""
countriesdata = cls.countriesdata(use_live=use_live)
if isinstance(region, int):
regioncode = region
else:
regionupper = region.upper()
regioncode = countriesdata['regionnames2codes'].get(regionupper)
if regioncode is not None:
return countriesdata['regioncodes2countries'][regioncode]
if exception is not None:
raise exception
return list()
|
OCHA-DAP/hdx-python-country | src/hdx/location/country.py | Country.expand_countryname_abbrevs | python | def expand_countryname_abbrevs(cls, country):
# type: (str) -> List[str]
def replace_ensure_space(word, replace, replacement):
return word.replace(replace, '%s ' % replacement).replace(' ', ' ').strip()
countryupper = country.upper()
for abbreviation in cls.abbreviations:
countryupper = replace_ensure_space(countryupper, abbreviation, cls.abbreviations[abbreviation])
candidates = [countryupper]
for abbreviation in cls.multiple_abbreviations:
if abbreviation in countryupper:
for expanded in cls.multiple_abbreviations[abbreviation]:
candidates.append(replace_ensure_space(countryupper, abbreviation, expanded))
return candidates | Expands abbreviation(s) in country name in various ways (eg. FED -> FEDERATED, FEDERAL etc.)
Args:
country (str): Country with abbreviation(s)to expand
Returns:
List[str]: Uppercase country name with abbreviation(s) expanded in various ways | train | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L386-L406 | [
"def replace_ensure_space(word, replace, replacement):\n return word.replace(replace, '%s ' % replacement).replace(' ', ' ').strip()\n"
] | class Country(object):
"""Location class with various methods to help with countries and regions. Uses OCHA countries feed which
supplies data in form:
::
ID,HRinfo ID,RW ID,m49 numerical code,FTS API ID,Appears in UNTERM list,Appears in DGACM list,ISO 3166-1 Alpha 2-Codes,ISO 3166-1 Alpha 3-Codes,x Alpha2 codes,x Alpha3 codes,Preferred Term,m49 Alt Term,ISO Alt Term,UNTERM Alt Term,FTS Alt Term,HRinfo Alt Term,RW Short Name,RW API Alt Term,English Short,French Short,Spanish Short,Russian Short,Chinese Short,Arabic Short,Admin Level,Latitude,Longitude,Region Code,Region Name,Sub-region Code,Sub-region Name,Intermediate Region Code,Intermediate Region Name,Regex,Concatenation
#meta +id,#country +code +v_hrinfo_country,#country +code +v_reliefweb,#country +code +num +v_m49,#country +code +v_fts,,,#country +code +v_iso2,#country +code +v_iso3,,,#country +name +preferred,#country +name +alt +v_m49,#country +name +alt +v_iso,#country +name +alt +v_unterm,#country +name +alt +v_fts,#country +name +alt +v_hrinfo_country,#country +name +short +v_reliefweb,#country +name +alt +v_reliefweb,#country +name +i_en +alt +v_unterm,#country +name +i_fr +alt +v_unterm,#country +name +i_es +alt +v_unterm,#country +name +i_ru +alt +v_unterm,#country +name +i_zh +alt +v_unterm,#country +name +i_ar +alt +v_unterm,#geo +admin_level,#geo +lat,#geo +lon,#region +main +code,#region +main +name +preferred,#region +sub +code,#region +sub +name +preferred,#region +intermediate +code,#region +intermediate +name +preferred,#country +regex,
1,181,13,4,1,Y,Y,AF,AFG,,,Afghanistan,,,,,,,,Afghanistan,Afghanistan (l') [masc.],Afganistán (el),Афганистан,阿富汗,أفغانستان,0,33.83147477,66.02621828,142,Asia,34,Southern Asia,,,afghan,
"""
abbreviations = {'DEM.': 'DEMOCRATIC', 'FMR.': 'FORMER', 'PROV.': 'PROVINCE', 'REP.': 'REPUBLIC', 'ST.': 'SAINT',
'UTD.': 'UNITED', 'U.': 'UNITED', 'N.': 'NORTH', 'E.': 'EAST', 'W.': 'WEST', 'K.': 'KINGDOM'}
major_differentiators = ['DEMOCRATIC', 'NORTH', 'SOUTH', 'EAST', 'WEST', 'STATES']
multiple_abbreviations = {'FED.': ['FEDERATION', 'FEDERAL', 'FEDERATED'],
'ISL.': ['ISLAND', 'ISLANDS'],
'S.': ['SOUTH', 'STATES'],
'TERR.': ['TERRITORY', 'TERRITORIES']}
simplifications = ['THE', 'OF', 'ISLAMIC', 'STATES', 'BOLIVARIAN', 'PLURINATIONAL', "PEOPLE'S",
'DUTCH PART', 'FRENCH PART', 'MALVINAS', 'YUGOSLAV', 'KINGDOM', 'PROTECTORATE']
_countriesdata = None
_ochaurl_int = 'https://docs.google.com/spreadsheets/d/1NjSI2LaS3SqbgYc0HdD8oIb7lofGtiHgoKKATCpwVdY/export?format=csv&gid=1088874596'
_ochaurl = _ochaurl_int
@classmethod
def _add_countriesdata(cls, iso3, country):
# type: (str, hxl.Row) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
iso3 (str): ISO3 code for country
country (hxl.Row): Country information
Returns:
None
"""
countryname = country.get('#country+name+preferred')
cls._countriesdata['countrynames2iso3'][countryname.upper()] = iso3
iso2 = country.get('#country+code+v_iso2')
if iso2:
cls._countriesdata['iso2iso3'][iso2] = iso3
# different types so keys won't clash
cls._countriesdata['iso2iso3'][iso3] = iso2
m49 = country.get('#country+code+num+v_m49')
if m49:
m49 = int(m49)
cls._countriesdata['m49iso3'][m49] = iso3
# different types so keys won't clash
cls._countriesdata['m49iso3'][iso3] = m49
cls._countriesdata['aliases'][iso3] = re.compile(country.get('#country+regex'), re.IGNORECASE)
regionname = country.get('#region+main+name+preferred')
sub_regionname = country.get('#region+sub+name+preferred')
intermediate_regionname = country.get('#region+intermediate+name+preferred')
regionid = country.get('#region+main+code')
if regionid:
regionid = int(regionid)
sub_regionid = country.get('#region+sub+code')
if sub_regionid:
sub_regionid = int(sub_regionid)
intermediate_regionid = country.get('#region+intermediate+code')
if intermediate_regionid:
intermediate_regionid = int(intermediate_regionid)
# region, subregion and intermediate region codes do not clash so only need one dict
def add_country_to_set(colname, idval, iso3):
value = cls._countriesdata[colname].get(idval)
if value is None:
value = set()
cls._countriesdata['regioncodes2countries'][idval] = value
value.add(iso3)
if regionname:
add_country_to_set('regioncodes2countries', regionid, iso3)
cls._countriesdata['regioncodes2names'][regionid] = regionname
cls._countriesdata['regionnames2codes'][regionname.upper()] = regionid
if sub_regionname:
add_country_to_set('regioncodes2countries', sub_regionid, iso3)
cls._countriesdata['regioncodes2names'][sub_regionid] = sub_regionname
cls._countriesdata['regionnames2codes'][sub_regionname.upper()] = sub_regionid
if intermediate_regionname:
add_country_to_set('regioncodes2countries', intermediate_regionid, iso3)
cls._countriesdata['regioncodes2names'][intermediate_regionid] = intermediate_regionname
cls._countriesdata['regionnames2codes'][intermediate_regionname.upper()] = \
intermediate_regionid
@classmethod
def set_countriesdata(cls, countries):
# type: (str) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
countries (str): Countries data in HTML format provided by UNStats
Returns:
None
"""
cls._countriesdata = dict()
cls._countriesdata['countries'] = dict()
cls._countriesdata['iso2iso3'] = dict()
cls._countriesdata['m49iso3'] = dict()
cls._countriesdata['countrynames2iso3'] = dict()
cls._countriesdata['regioncodes2countries'] = dict()
cls._countriesdata['regioncodes2names'] = dict()
cls._countriesdata['regionnames2codes'] = dict()
cls._countriesdata['aliases'] = dict()
for country in countries:
iso3 = country.get('#country+code+v_iso3')
if not iso3:
continue
iso3 = iso3.upper()
cls._add_countriesdata(iso3, country)
cls._countriesdata['countries'][iso3] = country.dictionary
def sort_list(colname):
for idval in cls._countriesdata[colname]:
cls._countriesdata[colname][idval] = \
sorted(list(cls._countriesdata[colname][idval]))
sort_list('regioncodes2countries')
@classmethod
def countriesdata(cls, use_live=True):
# type: (bool) -> List[Dict[Dict]]
"""
Read countries data from OCHA countries feed (falling back to file)
Args:
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
Returns:
List[Dict[Dict]]: Countries dictionaries
"""
if cls._countriesdata is None:
countries = None
if use_live:
try:
countries = hxl.data(cls._ochaurl)
except IOError:
logger.exception('Download from OCHA feed failed! Falling back to stored file.')
if countries is None:
countries = hxl.data(
script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv',
Country), allow_local=True)
cls.set_countriesdata(countries)
return cls._countriesdata
@classmethod
def set_ocha_url(cls, url=None):
# type: (str) -> None
"""
Set World Bank url from which to retrieve countries data
Args:
url (str): World Bank url from which to retrieve countries data. Defaults to internal value.
Returns:
None
"""
if url is None:
url = cls._ochaurl_int
cls._ochaurl = url
@classmethod
def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country information from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: country information
"""
countriesdata = cls.countriesdata(use_live=use_live)
country = countriesdata['countries'].get(iso3.upper())
if country is not None:
return country
if exception is not None:
raise exception
return None
@classmethod
def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
if countryinfo is not None:
return countryinfo.get('#country+name+preferred')
return None
@classmethod
def get_iso2_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO2 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get ISO2 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO2 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso2 = countriesdata['iso2iso3'].get(iso3.upper())
if iso2 is not None:
return iso2
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from ISO2 code
Args:
iso2 (str): ISO2 code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['iso2iso3'].get(iso2.upper())
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
return None
@classmethod
def get_country_name_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_m49_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[int]
"""Get M49 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get M49 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[int]: M49 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
m49 = countriesdata['m49iso3'].get(iso3)
if m49 is not None:
return m49
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from M49 code
Args:
m49 (int): M49 numeric code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['m49iso3'].get(m49)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_country_name_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
@classmethod
def simplify_countryname(cls, country):
# type: (str) -> (str, List[str])
"""Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc.
Args:
country (str): Country name to simplify
Returns:
Tuple[str, List[str]]: Uppercase simplified country name and list of removed words
"""
countryupper = country.upper()
words = get_words_in_sentence(countryupper)
index = countryupper.find(',')
if index != -1:
countryupper = countryupper[:index]
index = countryupper.find(':')
if index != -1:
countryupper = countryupper[:index]
regex = re.compile('\(.+?\)')
countryupper = regex.sub('', countryupper)
remove = copy.deepcopy(cls.simplifications)
for simplification1, simplification2 in cls.abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
remove.append(simplification2)
for simplification1, simplifications in cls.multiple_abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
for simplification2 in simplifications:
remove.append(simplification2)
remove = '|'.join(remove)
regex = re.compile(r'\b(' + remove + r')\b', flags=re.IGNORECASE)
countryupper = regex.sub('', countryupper)
countryupper = countryupper.strip()
countryupper_words = get_words_in_sentence(countryupper)
if len(countryupper_words) > 1:
countryupper = countryupper_words[0]
if countryupper:
words.remove(countryupper)
return countryupper, words
@classmethod
def get_iso3_country_code(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None
"""
countriesdata = cls.countriesdata(use_live=use_live)
countryupper = country.upper()
len_countryupper = len(countryupper)
if len_countryupper == 3:
if countryupper in countriesdata['countries']:
return countryupper
elif len_countryupper == 2:
iso3 = countriesdata['iso2iso3'].get(countryupper)
if iso3 is not None:
return iso3
iso3 = countriesdata['countrynames2iso3'].get(countryupper)
if iso3 is not None:
return iso3
for candidate in cls.expand_countryname_abbrevs(countryupper):
iso3 = countriesdata['countrynames2iso3'].get(candidate)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_country_code_fuzzy(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Tuple[[Optional[str], bool]]
"""Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second
showing if the match is exact or not.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False).
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = cls.get_iso3_country_code(country,
use_live=use_live) # don't put exception param here as we don't want it to throw
if iso3 is not None:
return iso3, True
def remove_matching_from_list(wordlist, word_or_part):
for word in wordlist:
if word_or_part in word:
wordlist.remove(word)
# fuzzy matching
expanded_country_candidates = cls.expand_countryname_abbrevs(country)
match_strength = 0
matches = set()
for countryname in sorted(countriesdata['countrynames2iso3']):
for candidate in expanded_country_candidates:
simplified_country, removed_words = cls.simplify_countryname(candidate)
if simplified_country in countryname:
words = get_words_in_sentence(countryname)
new_match_strength = 0
if simplified_country:
remove_matching_from_list(words, simplified_country)
new_match_strength += 32
for word in removed_words:
if word in countryname:
remove_matching_from_list(words, word)
new_match_strength += 4
else:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
for word in words:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
iso3 = countriesdata['countrynames2iso3'][countryname]
if new_match_strength > match_strength:
match_strength = new_match_strength
matches = set()
if new_match_strength == match_strength:
matches.add(iso3)
if len(matches) == 1 and match_strength > 16:
return matches.pop(), False
# regex lookup
for iso3, regex in countriesdata['aliases'].items():
index = re.search(regex, country.upper())
if index is not None:
return iso3, False
if exception is not None:
raise exception
return None, False
@classmethod
def get_countries_in_region(cls, region, use_live=True, exception=None):
# type: (Union[int,str], bool, Optional[ExceptionUpperBound]) -> List[str]
"""Get countries (ISO3 codes) in region
Args:
region (Union[int,str]): Three digit UNStats M49 region code or region name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if region not found. Defaults to None.
Returns:
List(str): Sorted list of ISO3 country names
"""
countriesdata = cls.countriesdata(use_live=use_live)
if isinstance(region, int):
regioncode = region
else:
regionupper = region.upper()
regioncode = countriesdata['regionnames2codes'].get(regionupper)
if regioncode is not None:
return countriesdata['regioncodes2countries'][regioncode]
if exception is not None:
raise exception
return list()
|
OCHA-DAP/hdx-python-country | src/hdx/location/country.py | Country.simplify_countryname | python | def simplify_countryname(cls, country):
# type: (str) -> (str, List[str])
countryupper = country.upper()
words = get_words_in_sentence(countryupper)
index = countryupper.find(',')
if index != -1:
countryupper = countryupper[:index]
index = countryupper.find(':')
if index != -1:
countryupper = countryupper[:index]
regex = re.compile('\(.+?\)')
countryupper = regex.sub('', countryupper)
remove = copy.deepcopy(cls.simplifications)
for simplification1, simplification2 in cls.abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
remove.append(simplification2)
for simplification1, simplifications in cls.multiple_abbreviations.items():
countryupper = countryupper.replace(simplification1, '')
for simplification2 in simplifications:
remove.append(simplification2)
remove = '|'.join(remove)
regex = re.compile(r'\b(' + remove + r')\b', flags=re.IGNORECASE)
countryupper = regex.sub('', countryupper)
countryupper = countryupper.strip()
countryupper_words = get_words_in_sentence(countryupper)
if len(countryupper_words) > 1:
countryupper = countryupper_words[0]
if countryupper:
words.remove(countryupper)
return countryupper, words | Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc.
Args:
country (str): Country name to simplify
Returns:
Tuple[str, List[str]]: Uppercase simplified country name and list of removed words | train | https://github.com/OCHA-DAP/hdx-python-country/blob/e86a0b5f182a5d010c4cd7faa36a213cfbcc01f6/src/hdx/location/country.py#L409-L446 | null | class Country(object):
"""Location class with various methods to help with countries and regions. Uses OCHA countries feed which
supplies data in form:
::
ID,HRinfo ID,RW ID,m49 numerical code,FTS API ID,Appears in UNTERM list,Appears in DGACM list,ISO 3166-1 Alpha 2-Codes,ISO 3166-1 Alpha 3-Codes,x Alpha2 codes,x Alpha3 codes,Preferred Term,m49 Alt Term,ISO Alt Term,UNTERM Alt Term,FTS Alt Term,HRinfo Alt Term,RW Short Name,RW API Alt Term,English Short,French Short,Spanish Short,Russian Short,Chinese Short,Arabic Short,Admin Level,Latitude,Longitude,Region Code,Region Name,Sub-region Code,Sub-region Name,Intermediate Region Code,Intermediate Region Name,Regex,Concatenation
#meta +id,#country +code +v_hrinfo_country,#country +code +v_reliefweb,#country +code +num +v_m49,#country +code +v_fts,,,#country +code +v_iso2,#country +code +v_iso3,,,#country +name +preferred,#country +name +alt +v_m49,#country +name +alt +v_iso,#country +name +alt +v_unterm,#country +name +alt +v_fts,#country +name +alt +v_hrinfo_country,#country +name +short +v_reliefweb,#country +name +alt +v_reliefweb,#country +name +i_en +alt +v_unterm,#country +name +i_fr +alt +v_unterm,#country +name +i_es +alt +v_unterm,#country +name +i_ru +alt +v_unterm,#country +name +i_zh +alt +v_unterm,#country +name +i_ar +alt +v_unterm,#geo +admin_level,#geo +lat,#geo +lon,#region +main +code,#region +main +name +preferred,#region +sub +code,#region +sub +name +preferred,#region +intermediate +code,#region +intermediate +name +preferred,#country +regex,
1,181,13,4,1,Y,Y,AF,AFG,,,Afghanistan,,,,,,,,Afghanistan,Afghanistan (l') [masc.],Afganistán (el),Афганистан,阿富汗,أفغانستان,0,33.83147477,66.02621828,142,Asia,34,Southern Asia,,,afghan,
"""
abbreviations = {'DEM.': 'DEMOCRATIC', 'FMR.': 'FORMER', 'PROV.': 'PROVINCE', 'REP.': 'REPUBLIC', 'ST.': 'SAINT',
'UTD.': 'UNITED', 'U.': 'UNITED', 'N.': 'NORTH', 'E.': 'EAST', 'W.': 'WEST', 'K.': 'KINGDOM'}
major_differentiators = ['DEMOCRATIC', 'NORTH', 'SOUTH', 'EAST', 'WEST', 'STATES']
multiple_abbreviations = {'FED.': ['FEDERATION', 'FEDERAL', 'FEDERATED'],
'ISL.': ['ISLAND', 'ISLANDS'],
'S.': ['SOUTH', 'STATES'],
'TERR.': ['TERRITORY', 'TERRITORIES']}
simplifications = ['THE', 'OF', 'ISLAMIC', 'STATES', 'BOLIVARIAN', 'PLURINATIONAL', "PEOPLE'S",
'DUTCH PART', 'FRENCH PART', 'MALVINAS', 'YUGOSLAV', 'KINGDOM', 'PROTECTORATE']
_countriesdata = None
_ochaurl_int = 'https://docs.google.com/spreadsheets/d/1NjSI2LaS3SqbgYc0HdD8oIb7lofGtiHgoKKATCpwVdY/export?format=csv&gid=1088874596'
_ochaurl = _ochaurl_int
@classmethod
def _add_countriesdata(cls, iso3, country):
# type: (str, hxl.Row) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
iso3 (str): ISO3 code for country
country (hxl.Row): Country information
Returns:
None
"""
countryname = country.get('#country+name+preferred')
cls._countriesdata['countrynames2iso3'][countryname.upper()] = iso3
iso2 = country.get('#country+code+v_iso2')
if iso2:
cls._countriesdata['iso2iso3'][iso2] = iso3
# different types so keys won't clash
cls._countriesdata['iso2iso3'][iso3] = iso2
m49 = country.get('#country+code+num+v_m49')
if m49:
m49 = int(m49)
cls._countriesdata['m49iso3'][m49] = iso3
# different types so keys won't clash
cls._countriesdata['m49iso3'][iso3] = m49
cls._countriesdata['aliases'][iso3] = re.compile(country.get('#country+regex'), re.IGNORECASE)
regionname = country.get('#region+main+name+preferred')
sub_regionname = country.get('#region+sub+name+preferred')
intermediate_regionname = country.get('#region+intermediate+name+preferred')
regionid = country.get('#region+main+code')
if regionid:
regionid = int(regionid)
sub_regionid = country.get('#region+sub+code')
if sub_regionid:
sub_regionid = int(sub_regionid)
intermediate_regionid = country.get('#region+intermediate+code')
if intermediate_regionid:
intermediate_regionid = int(intermediate_regionid)
# region, subregion and intermediate region codes do not clash so only need one dict
def add_country_to_set(colname, idval, iso3):
value = cls._countriesdata[colname].get(idval)
if value is None:
value = set()
cls._countriesdata['regioncodes2countries'][idval] = value
value.add(iso3)
if regionname:
add_country_to_set('regioncodes2countries', regionid, iso3)
cls._countriesdata['regioncodes2names'][regionid] = regionname
cls._countriesdata['regionnames2codes'][regionname.upper()] = regionid
if sub_regionname:
add_country_to_set('regioncodes2countries', sub_regionid, iso3)
cls._countriesdata['regioncodes2names'][sub_regionid] = sub_regionname
cls._countriesdata['regionnames2codes'][sub_regionname.upper()] = sub_regionid
if intermediate_regionname:
add_country_to_set('regioncodes2countries', intermediate_regionid, iso3)
cls._countriesdata['regioncodes2names'][intermediate_regionid] = intermediate_regionname
cls._countriesdata['regionnames2codes'][intermediate_regionname.upper()] = \
intermediate_regionid
@classmethod
def set_countriesdata(cls, countries):
# type: (str) -> None
"""
Set up countries data from data in form provided by UNStats and World Bank
Args:
countries (str): Countries data in HTML format provided by UNStats
Returns:
None
"""
cls._countriesdata = dict()
cls._countriesdata['countries'] = dict()
cls._countriesdata['iso2iso3'] = dict()
cls._countriesdata['m49iso3'] = dict()
cls._countriesdata['countrynames2iso3'] = dict()
cls._countriesdata['regioncodes2countries'] = dict()
cls._countriesdata['regioncodes2names'] = dict()
cls._countriesdata['regionnames2codes'] = dict()
cls._countriesdata['aliases'] = dict()
for country in countries:
iso3 = country.get('#country+code+v_iso3')
if not iso3:
continue
iso3 = iso3.upper()
cls._add_countriesdata(iso3, country)
cls._countriesdata['countries'][iso3] = country.dictionary
def sort_list(colname):
for idval in cls._countriesdata[colname]:
cls._countriesdata[colname][idval] = \
sorted(list(cls._countriesdata[colname][idval]))
sort_list('regioncodes2countries')
@classmethod
def countriesdata(cls, use_live=True):
# type: (bool) -> List[Dict[Dict]]
"""
Read countries data from OCHA countries feed (falling back to file)
Args:
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
Returns:
List[Dict[Dict]]: Countries dictionaries
"""
if cls._countriesdata is None:
countries = None
if use_live:
try:
countries = hxl.data(cls._ochaurl)
except IOError:
logger.exception('Download from OCHA feed failed! Falling back to stored file.')
if countries is None:
countries = hxl.data(
script_dir_plus_file('Countries & Territories Taxonomy MVP - C&T Taxonomy with HXL Tags.csv',
Country), allow_local=True)
cls.set_countriesdata(countries)
return cls._countriesdata
@classmethod
def set_ocha_url(cls, url=None):
# type: (str) -> None
"""
Set World Bank url from which to retrieve countries data
Args:
url (str): World Bank url from which to retrieve countries data. Defaults to internal value.
Returns:
None
"""
if url is None:
url = cls._ochaurl_int
cls._ochaurl = url
@classmethod
def get_country_info_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country information from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: country information
"""
countriesdata = cls.countriesdata(use_live=use_live)
country = countriesdata['countries'].get(iso3.upper())
if country is not None:
return country
if exception is not None:
raise exception
return None
@classmethod
def get_country_name_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO3 code
Args:
iso3 (str): ISO3 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
countryinfo = cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
if countryinfo is not None:
return countryinfo.get('#country+name+preferred')
return None
@classmethod
def get_iso2_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO2 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get ISO2 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO2 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso2 = countriesdata['iso2iso3'].get(iso3.upper())
if iso2 is not None:
return iso2
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from ISO2 code
Args:
iso2 (str): ISO2 code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['iso2iso3'].get(iso2.upper())
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, use_live=use_live, exception=exception)
return None
@classmethod
def get_country_name_from_iso2(cls, iso2, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from ISO2 code
Args:
iso2 (str): ISO2 code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_iso2(iso2, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_m49_from_iso3(cls, iso3, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[int]
"""Get M49 from ISO3 code
Args:
iso3 (str): ISO3 code for which to get M49 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[int]: M49 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
m49 = countriesdata['m49iso3'].get(iso3)
if m49 is not None:
return m49
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 from M49 code
Args:
m49 (int): M49 numeric code for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 code
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = countriesdata['m49iso3'].get(m49)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_country_info_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[Dict[str]]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country information
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[Dict[str]]: Country information
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_info_from_iso3(iso3, exception=exception)
return None
@classmethod
def get_country_name_from_m49(cls, m49, use_live=True, exception=None):
# type: (int, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get country name from M49 code
Args:
m49 (int): M49 numeric code for which to get country name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: Country name
"""
iso3 = cls.get_iso3_from_m49(m49, use_live=use_live, exception=exception)
if iso3 is not None:
return cls.get_country_name_from_iso3(iso3, exception=exception)
return None
@classmethod
def expand_countryname_abbrevs(cls, country):
# type: (str) -> List[str]
"""Expands abbreviation(s) in country name in various ways (eg. FED -> FEDERATED, FEDERAL etc.)
Args:
country (str): Country with abbreviation(s)to expand
Returns:
List[str]: Uppercase country name with abbreviation(s) expanded in various ways
"""
def replace_ensure_space(word, replace, replacement):
return word.replace(replace, '%s ' % replacement).replace(' ', ' ').strip()
countryupper = country.upper()
for abbreviation in cls.abbreviations:
countryupper = replace_ensure_space(countryupper, abbreviation, cls.abbreviations[abbreviation])
candidates = [countryupper]
for abbreviation in cls.multiple_abbreviations:
if abbreviation in countryupper:
for expanded in cls.multiple_abbreviations[abbreviation]:
candidates.append(replace_ensure_space(countryupper, abbreviation, expanded))
return candidates
@classmethod
@classmethod
def get_iso3_country_code(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None
"""
countriesdata = cls.countriesdata(use_live=use_live)
countryupper = country.upper()
len_countryupper = len(countryupper)
if len_countryupper == 3:
if countryupper in countriesdata['countries']:
return countryupper
elif len_countryupper == 2:
iso3 = countriesdata['iso2iso3'].get(countryupper)
if iso3 is not None:
return iso3
iso3 = countriesdata['countrynames2iso3'].get(countryupper)
if iso3 is not None:
return iso3
for candidate in cls.expand_countryname_abbrevs(countryupper):
iso3 = countriesdata['countrynames2iso3'].get(candidate)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None
@classmethod
def get_iso3_country_code_fuzzy(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Tuple[[Optional[str], bool]]
"""Get ISO3 code for cls. A tuple is returned with the first value being the ISO3 code and the second
showing if the match is exact or not.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Tuple[[Optional[str], bool]]: ISO3 code and if the match is exact or (None, False).
"""
countriesdata = cls.countriesdata(use_live=use_live)
iso3 = cls.get_iso3_country_code(country,
use_live=use_live) # don't put exception param here as we don't want it to throw
if iso3 is not None:
return iso3, True
def remove_matching_from_list(wordlist, word_or_part):
for word in wordlist:
if word_or_part in word:
wordlist.remove(word)
# fuzzy matching
expanded_country_candidates = cls.expand_countryname_abbrevs(country)
match_strength = 0
matches = set()
for countryname in sorted(countriesdata['countrynames2iso3']):
for candidate in expanded_country_candidates:
simplified_country, removed_words = cls.simplify_countryname(candidate)
if simplified_country in countryname:
words = get_words_in_sentence(countryname)
new_match_strength = 0
if simplified_country:
remove_matching_from_list(words, simplified_country)
new_match_strength += 32
for word in removed_words:
if word in countryname:
remove_matching_from_list(words, word)
new_match_strength += 4
else:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
for word in words:
if word in cls.major_differentiators:
new_match_strength -= 16
else:
new_match_strength -= 1
iso3 = countriesdata['countrynames2iso3'][countryname]
if new_match_strength > match_strength:
match_strength = new_match_strength
matches = set()
if new_match_strength == match_strength:
matches.add(iso3)
if len(matches) == 1 and match_strength > 16:
return matches.pop(), False
# regex lookup
for iso3, regex in countriesdata['aliases'].items():
index = re.search(regex, country.upper())
if index is not None:
return iso3, False
if exception is not None:
raise exception
return None, False
@classmethod
def get_countries_in_region(cls, region, use_live=True, exception=None):
# type: (Union[int,str], bool, Optional[ExceptionUpperBound]) -> List[str]
"""Get countries (ISO3 codes) in region
Args:
region (Union[int,str]): Three digit UNStats M49 region code or region name
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if region not found. Defaults to None.
Returns:
List(str): Sorted list of ISO3 country names
"""
countriesdata = cls.countriesdata(use_live=use_live)
if isinstance(region, int):
regioncode = region
else:
regionupper = region.upper()
regioncode = countriesdata['regionnames2codes'].get(regionupper)
if regioncode is not None:
return countriesdata['regioncodes2countries'][regioncode]
if exception is not None:
raise exception
return list()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.