repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
twidi/django-adv-cache-tag
|
adv_cache_tag/tag.py
|
CacheTag.prepare_params
|
python
|
def prepare_params(self):
if self.options.resolve_fragment:
self.fragment_name = self.node.fragment_name.resolve(self.context)
else:
self.fragment_name = str(self.node.fragment_name)
# Remove quotes that surround the name
for char in '\'\"':
if self.fragment_name.startswith(char) or self.fragment_name.endswith(char):
if self.fragment_name.startswith(char) and self.fragment_name.endswith(char):
self.fragment_name = self.fragment_name[1:-1]
break
else:
raise ValueError('Number of quotes around the fragment name is incoherent')
self.expire_time = self.get_expire_time()
if self.options.versioning:
self.version = force_bytes(self.get_version())
self.vary_on = [template.Variable(var).resolve(self.context) for var in self.node.vary_on]
|
Prepare the parameters passed to the templatetag
|
train
|
https://github.com/twidi/django-adv-cache-tag/blob/811f8db4dac73667c7d2fe0ea97a24969593eb8a/adv_cache_tag/tag.py#L234-L256
|
[
"def get_expire_time(self):\n \"\"\"Update the expiry time with the multiplicator.\"\"\"\n expiry_time = super(TestCacheTag, self).get_expire_time()\n return self.multiplicator * expiry_time\n",
"def get_expire_time(self):\n \"\"\"\n Return the expire time passed to the templatetag.\n Must be None or an integer.\n \"\"\"\n try:\n expire_time = self.node.expire_time.resolve(self.context)\n except template.VariableDoesNotExist:\n raise template.TemplateSyntaxError('\"%s\" tag got an unknown variable: %r' %\n (self.node.nodename, self.node.expire_time.var))\n try:\n if expire_time is not None:\n expire_time = str(expire_time)\n if not expire_time.isdigit():\n raise TypeError\n expire_time = int(expire_time)\n except (ValueError, TypeError):\n raise template.TemplateSyntaxError(\n '\"%s\" tag got a non-integer (or None) timeout value: %r' % (\n self.node.nodename, expire_time\n )\n )\n\n return expire_time\n",
"def get_version(self):\n \"\"\"\n Return the stringified version passed to the templatetag.\n \"\"\"\n if not self.node.version:\n return None\n try:\n version = smart_str('%s' % self.node.version.resolve(self.context))\n except template.VariableDoesNotExist:\n raise template.TemplateSyntaxError('\"%s\" tag got an unknown variable: %r' %\n (self.node.nodename, self.node.version.var))\n\n return '%s' % version\n"
] |
class CacheTag(object, metaclass=CacheTagMetaClass):
"""
The main class of `django-adv-cache-tag` which does all the work.
To change its behaviour, simply change one or more of these settings
(see in the `Meta` class for details) :
* ADV_CACHE_VERSIONING
* ADV_CACHE_COMPRESS
* ADV_CACHE_COMPRESS_SPACES
* ADV_CACHE_INCLUDE_PK
* ADV_CACHE_BACKEND
* ADV_CACHE_VERSION
* ADV_CACHE_RESOLVE_NAME
Or inherit from this class and don't forget to register your tag :
from adv_cache_tag.tag import CacheTag
register = template.Library()
class MyCacheTag(CacheTag):
# change something
MyCacheTag.register(register, 'my_cache')
By inheriting you can change many things as CacheTag implements a lot of
small methods
"""
# Will change if the algorithm changes
INTERNAL_VERSION = '1'
# Used to separate internal version, template version, and the content
VERSION_SEPARATOR = '::'
# Regex used to reduce spaces/blanks (many spaces into one)
RE_SPACELESS = re.compile(r'\s\s+')
# generate a token for this site, based on the secret_key
RAW_TOKEN = 'RAW_' + hashlib.sha1(
b'RAW_TOKEN_SALT1' + force_bytes(hashlib.sha1(
b'RAW_TOKEN_SALT2' + force_bytes(settings.SECRET_KEY)
).hexdigest())
).hexdigest()
# tokens to use around the already parsed parts of the cached template
RAW_TOKEN_START = template.BLOCK_TAG_START + RAW_TOKEN + template.BLOCK_TAG_END
RAW_TOKEN_END = template.BLOCK_TAG_START + 'end' + RAW_TOKEN + template.BLOCK_TAG_END
# internal use only: keep reference to templatetags functions
_templatetags = {}
# internal use only: name of the templatetags module to load for this class and subclasses
_templatetags_modules = {}
options = None
Node = Node
class Meta:
"""
Options of this class. Accessible via cls.options or self.options.
To force (and/or add) options in your own class, simply redefine a
`Meta` class in your own main cache class with updated/add values
"""
# If versioning is activated (internal versioning is always on)
versioning = getattr(settings, 'ADV_CACHE_VERSIONING', False)
# If the content will be compressed before caching
compress = getattr(settings, 'ADV_CACHE_COMPRESS', False)
# If many spaces/blanks will be converted into one
compress_spaces = getattr(settings, 'ADV_CACHE_COMPRESS_SPACES', False)
# If a "pk" (you can pass what you want) will be added to the cache key
include_pk = getattr(settings, 'ADV_CACHE_INCLUDE_PK', False)
# The cache backend to use (or use the "default" one)
cache_backend = getattr(settings, 'ADV_CACHE_BACKEND', 'default')
# Part of the INTERNAL_VERSION configurable via settings
internal_version = getattr(settings, 'ADV_CACHE_VERSION', '')
# If the fragment name should be resolved or taken as is
resolve_fragment = getattr(settings, 'ADV_CACHE_RESOLVE_NAME', False)
# Use a metaclass to use the right class in the Node class, and assign Meta to options
def __init__(self, node, context):
"""
Constructor of the Cache class:
* preparing fields to be used later,
* prepare the templatetag parameters
* create the cache key
"""
super(CacheTag, self).__init__()
# the actual Node object
self.node = node
# the context used for the rendering
self.context = context
# indicate that we force regenerating the cache, even if it exists
self.regenerate = bool(self.context.get('__regenerate__', False))
# indicate if we only want html without parsing the nocache parts
self.partial = bool(self.context.get('__partial__', False))
# the content of the template, will be used through the whole process
self.content = ''
# the version used in the cached templatetag
self.content_version = None
# Final "INTERNAL_VERSION"
if self.options.internal_version:
self.INTERNAL_VERSION = b'%s|%s' % (self.__class__.INTERNAL_VERSION,
self.options.internal_version)
else:
self.INTERNAL_VERSION = force_bytes(self.__class__.INTERNAL_VERSION)
self.VERSION_SEPARATOR = force_bytes(self.__class__.VERSION_SEPARATOR)
# prepare all parameters passed to the templatetag
self.expire_time = None
self.version = None
self.prepare_params()
# get the cache and cache key
self.cache = self.get_cache_object()
self.cache_key = self.get_cache_key()
def get_expire_time(self):
"""
Return the expire time passed to the templatetag.
Must be None or an integer.
"""
try:
expire_time = self.node.expire_time.resolve(self.context)
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.expire_time.var))
try:
if expire_time is not None:
expire_time = str(expire_time)
if not expire_time.isdigit():
raise TypeError
expire_time = int(expire_time)
except (ValueError, TypeError):
raise template.TemplateSyntaxError(
'"%s" tag got a non-integer (or None) timeout value: %r' % (
self.node.nodename, expire_time
)
)
return expire_time
def get_version(self):
"""
Return the stringified version passed to the templatetag.
"""
if not self.node.version:
return None
try:
version = smart_str('%s' % self.node.version.resolve(self.context))
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.version.var))
return '%s' % version
def hash_args(self):
"""
Take all the arguments passed after the fragment name and return a
hashed version which will be used in the cache key
"""
return hashlib.md5(force_bytes(':'.join([urlquote(force_bytes(var)) for var in self.vary_on]))).hexdigest()
def get_pk(self):
"""
Return the pk to use in the cache key. It's the first version of the
templatetag arguments after the fragment name
"""
return self.vary_on[0]
def get_base_cache_key(self):
"""
Return a string with format placeholder used as a source to compute the
final cache key.
Placeholders are :
* %(nodename)s : the name of the templatetag
* %(name)s : the fragment name passed to the templatetag
* %(pk)s : the return of the `get_pk` method, passed only if `include_pk` is True
* %(hash)s : the return of the `hash_args` method
"""
if self.options.include_pk:
return 'template.%(nodename)s.%(name)s.%(pk)s.%(hash)s'
else:
return 'template.%(nodename)s.%(name)s.%(hash)s'
def get_cache_key_args(self):
"""
Return the arguments to be passed to the base cache key returned by `get_base_cache_key`.
"""
cache_key_args = dict(
nodename=self.node.nodename,
name=self.fragment_name,
hash=self.hash_args(),
)
if self.options.include_pk:
cache_key_args['pk'] = self.get_pk()
return cache_key_args
def get_cache_key(self):
"""
Compute and return the final cache key, using return values of
`get_base_cache_key` and `get_cache_key_args`.
"""
return self.get_base_cache_key() % self.get_cache_key_args()
def get_cache_object(self):
"""
Return the cache object to be used to set and get the values in cache.
By default it's the default cache defined by django, but it can be
every object with a `get` and a `set` method (or not, if `cache_get`
and `cache_set` methods are overridden)
"""
return get_cache(self.node.cache_backend or self.options.cache_backend)
def cache_get(self):
"""
Get content from the cache
"""
return self.cache.get(self.cache_key)
def cache_set(self, to_cache):
"""
Set content into the cache
"""
self.cache.set(self.cache_key, to_cache, self.expire_time)
def join_content_version(self, to_cache):
"""
Add the version(s) to the content to cache : internal version at first
and then the template version if versioning is activated.
Each version, and the content, are separated with `VERSION_SEPARATOR`.
This method is called after the encoding (if "compress" or
"compress_spaces" options are on)
"""
parts = [self.INTERNAL_VERSION]
if self.options.versioning:
parts.append(force_bytes(self.version))
parts.append(force_bytes(to_cache))
return self.VERSION_SEPARATOR.join(parts)
def split_content_version(self):
"""
Remove and return the version(s) from the cached content. First the
internal version, and if versioning is activated, the template one.
And finally save the content, but only if all versions match.
The content saved is the encoded one (if "compress" or
"compress_spaces" options are on). By doing so, we avoid decoding if
the versions didn't match, to save some cpu cycles.
"""
try:
nb_parts = 2
if self.options.versioning:
nb_parts = 3
parts = self.content.split(self.VERSION_SEPARATOR, nb_parts - 1)
assert len(parts) == nb_parts
self.content_internal_version = parts[0]
if self.options.versioning:
self.content_version = parts[1]
self.content = parts[-1]
except Exception:
self.content = None
def decode_content(self):
"""
Decode (decompress...) the content got from the cache, to the final
html
"""
self.content = pickle.loads(zlib.decompress(self.content))
def encode_content(self):
"""
Encode (compress...) the html to the data to be cached
"""
return zlib.compress(pickle.dumps(self.content))
def render_node(self):
"""
Render the template and save the generated content
"""
self.content = self.node.nodelist.render(self.context)
def create_content(self):
"""
Render the template, apply options on it, and save it to the cache.
"""
self.render_node()
if self.options.compress_spaces:
self.content = self.RE_SPACELESS.sub(' ', self.content)
if self.options.compress:
to_cache = self.encode_content()
else:
to_cache = self.content
to_cache = self.join_content_version(to_cache)
try:
self.cache_set(to_cache)
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when saving the cached template fragment')
def load_content(self):
"""
It's the main method of the class.
Try to load the template from cache, get the versions and decode the
content.
If something was wrong during this process (or if we had a
`__regenerate__` value to True in the context), create new content and
save it in cache.
"""
self.content = None
if not self.regenerate:
try:
self.content = self.cache_get()
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when getting the cached template fragment')
try:
assert self.content
self.split_content_version()
assert self.content
if self.content_internal_version != self.INTERNAL_VERSION or (
self.options.versioning and self.content_version != self.version):
self.content = None
assert self.content
if self.options.compress:
self.decode_content()
except Exception:
self.create_content()
self.content = smart_str(self.content)
def render(self):
"""
Try to load content (from cache or by rendering the template).
If it fails, return an empty string or raise the exception if it's a
TemplateSyntaxError.
With this, we can no parse and render the content included in the
{% nocache %} blocks, but only if we have have this tag and if we don't
have `__partial__` to True in the context (in this case we simple
return the html with the {% nocache %} block not parsed.
"""
try:
self.load_content()
except template.TemplateSyntaxError:
raise
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when rendering template fragment')
return ''
if self.partial or self.RAW_TOKEN_START not in self.content:
return self.content
return self.render_nocache()
@staticmethod
def get_all_tags_and_filters_by_function():
"""
Return a dict with all the template tags (in the `tags` entry) and filters (in the
`filters` entry) that are available.
Both entries are a dict with the function as key, and a tuple with (library name, function
name) as value.
This is cached after the first call.
"""
libraries = get_template_libraries()
force = False
# We'll force the update of the cache if new libraries where added
if hasattr(CacheTag.get_all_tags_and_filters_by_function, '_len_libraries'):
if len(libraries) != CacheTag.get_all_tags_and_filters_by_function._len_libraries:
force = True
if force or not hasattr(CacheTag.get_all_tags_and_filters_by_function, '_cache'):
CacheTag.get_all_tags_and_filters_by_function._len_libraries = len(libraries)
available_tags = {}
available_filters = {}
for lib_name, lib in libraries.items():
available_tags.update(
(function, (lib_name, tag_name))
for tag_name, function
in lib.tags.items()
)
available_filters.update(
(function, (lib_name, filter_name))
for filter_name, function
in lib.filters.items()
)
CacheTag.get_all_tags_and_filters_by_function._cache = {
'tags': available_tags,
'filters': available_filters
}
return CacheTag.get_all_tags_and_filters_by_function._cache
@classmethod
def get_templatetag_module(cls):
"""
Return the templatetags module name for which the current class is used.
It's used to render the nocache blocks by loading the correct module
"""
if cls not in CacheTag._templatetags_modules:
# find the library including the main templatetag of the current class
all_tags = cls.get_all_tags_and_filters_by_function()['tags']
CacheTag._templatetags_modules[cls] = all_tags[CacheTag._templatetags[cls]['cache']][0]
return CacheTag._templatetags_modules[cls]
def render_nocache(self):
"""
Render the `nocache` blocks of the content and return the whole
html
"""
tmpl = template.Template(''.join([
# start by loading the cache library
template.BLOCK_TAG_START,
'load %s' % self.get_templatetag_module(),
template.BLOCK_TAG_END,
# and surround the cached template by "raw" tags
self.RAW_TOKEN_START,
self.content,
self.RAW_TOKEN_END,
]))
return tmpl.render(self.context)
@classmethod
def get_template_node_arguments(cls, tokens):
"""
Return the arguments taken from the templatetag that will be used to the
Node class.
Take a list of all tokens and return a list of real tokens. Here
should be done some validations (number of tokens...) and eventually
some parsing...
"""
if len(tokens) < 3:
raise template.TemplateSyntaxError(
"'%r' tag requires at least 2 arguments." % tokens[0])
return tokens[1], tokens[2], tokens[3:]
@classmethod
def register(cls, library_register, nodename='cache', nocache_nodename='nocache'):
"""
Register all needed templatetags, with these parameters :
* library_register : the `register` object (result of
`template.Library()`) in your templatetag module
* nodename : the node to use for the cache templatetag (the default
is "cache")
* nocache_nodename : the node to use for the nocache templatetag
"""
if cls in CacheTag._templatetags:
raise RuntimeError('The adv-cache-tag class %s is already registered' % cls)
CacheTag._templatetags[cls] = {}
def templatetag_cache(parser, token):
"""
Return a new Node object for the main cache templatetag
"""
nodelist = parser.parse(('end%s' % nodename,))
parser.delete_first_token()
args = cls.get_template_node_arguments(token.contents.split())
return cls.Node(nodename, nodelist, *args)
library_register.tag(nodename, templatetag_cache)
CacheTag._templatetags[cls]['cache'] = templatetag_cache
def templatetag_raw(parser, token):
"""
Return a TextNode with all html not parsed, used for templatetags
that need to not be parsed : the `nocache` one and the `RAW` one,
used to surround cached html (to be not parsed again)
Based on http://www.holovaty.com/writing/django-two-phased-rendering/
"""
# Whatever is between {% nocache %} and {% endnocache %} will be preserved as
# raw, un-rendered template code.
text = []
parse_until = 'end%s' % token.contents
tag_mapping = {
TOKEN_TEXT: ('', ''),
TOKEN_VAR: ('{{', '}}'),
TOKEN_BLOCK: ('{%', '%}'),
TOKEN_COMMENT: ('{#', '#}'),
}
# By the time this template tag is called, the template system has already
# lexed the template into tokens. Here, we loop over the tokens until
# {% endraw %} and parse them to TextNodes. We have to add the start and
# end bits (e.g. "{{" for variables) because those have already been
# stripped off in a previous part of the template-parsing process.
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == parse_until:
return template.TextNode(''.join(text))
start, end = tag_mapping[token.token_type]
text.append('%s%s%s' % (start, token.contents, end))
parser.unclosed_block_tag(parse_until)
library_register.tag(cls.RAW_TOKEN, templatetag_raw)
CacheTag._templatetags[cls]['raw'] = templatetag_raw
def templatetag_nocache(parser, token):
"""
Return a TextNode with raw html from the `nocache` templatetag,
and surround it with `endRAW` and `RAW` (precisely
`cls.RAW_TOKEN_END` and `cls.RAW_TOKEN_START`).
So for
{% nocache %}foo{% endnocache %}
we get
{% endRAW... %}foo{% RAW... %}
When the main cache templatetag content will be loaded from cache,
it will be surrounded by the same templatetags, reversed.
So if at first we had
{% cache %}bar{% nocache %}foo{% endnocache %}baz{% endcache %}
The cached version will be
bar{% endRAW... %}foo{% RAW... %}baz
And the final html to be rendered will be
{% RAW... %}bar{% endRAW... %}foo{% RAW... %}baz{% endRAW... %}
And the html within `RAW` and `endRAW` will not be parsed, as wanted
"""
# We'll load in the no-cache part all template tags and filters loaded in the main
# template, to be able to use it when the no-cache will be rendered
all_tags_and_filters = cls.get_all_tags_and_filters_by_function()
available_tags = all_tags_and_filters['tags']
available_filters = all_tags_and_filters['filters']
needed = {}
current_module = cls.get_templatetag_module()
for function in parser.tags.values():
if function in available_tags:
lib, name = available_tags[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
for function in parser.filters.values():
if function in available_filters:
lib, name = available_filters[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
load_string = ''.join(
'%sload %s from %s%s' % (
template.BLOCK_TAG_START,
' '.join(names),
lib,
template.BLOCK_TAG_END,
)
for lib, names in needed.items()
)
node = templatetag_raw(parser, token)
node.s = cls.RAW_TOKEN_END + load_string + node.s + cls.RAW_TOKEN_START
return node
library_register.tag(nocache_nodename, templatetag_nocache)
CacheTag._templatetags['nocache'] = templatetag_nocache
|
twidi/django-adv-cache-tag
|
adv_cache_tag/tag.py
|
CacheTag.get_expire_time
|
python
|
def get_expire_time(self):
try:
expire_time = self.node.expire_time.resolve(self.context)
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.expire_time.var))
try:
if expire_time is not None:
expire_time = str(expire_time)
if not expire_time.isdigit():
raise TypeError
expire_time = int(expire_time)
except (ValueError, TypeError):
raise template.TemplateSyntaxError(
'"%s" tag got a non-integer (or None) timeout value: %r' % (
self.node.nodename, expire_time
)
)
return expire_time
|
Return the expire time passed to the templatetag.
Must be None or an integer.
|
train
|
https://github.com/twidi/django-adv-cache-tag/blob/811f8db4dac73667c7d2fe0ea97a24969593eb8a/adv_cache_tag/tag.py#L258-L281
| null |
class CacheTag(object, metaclass=CacheTagMetaClass):
"""
The main class of `django-adv-cache-tag` which does all the work.
To change its behaviour, simply change one or more of these settings
(see in the `Meta` class for details) :
* ADV_CACHE_VERSIONING
* ADV_CACHE_COMPRESS
* ADV_CACHE_COMPRESS_SPACES
* ADV_CACHE_INCLUDE_PK
* ADV_CACHE_BACKEND
* ADV_CACHE_VERSION
* ADV_CACHE_RESOLVE_NAME
Or inherit from this class and don't forget to register your tag :
from adv_cache_tag.tag import CacheTag
register = template.Library()
class MyCacheTag(CacheTag):
# change something
MyCacheTag.register(register, 'my_cache')
By inheriting you can change many things as CacheTag implements a lot of
small methods
"""
# Will change if the algorithm changes
INTERNAL_VERSION = '1'
# Used to separate internal version, template version, and the content
VERSION_SEPARATOR = '::'
# Regex used to reduce spaces/blanks (many spaces into one)
RE_SPACELESS = re.compile(r'\s\s+')
# generate a token for this site, based on the secret_key
RAW_TOKEN = 'RAW_' + hashlib.sha1(
b'RAW_TOKEN_SALT1' + force_bytes(hashlib.sha1(
b'RAW_TOKEN_SALT2' + force_bytes(settings.SECRET_KEY)
).hexdigest())
).hexdigest()
# tokens to use around the already parsed parts of the cached template
RAW_TOKEN_START = template.BLOCK_TAG_START + RAW_TOKEN + template.BLOCK_TAG_END
RAW_TOKEN_END = template.BLOCK_TAG_START + 'end' + RAW_TOKEN + template.BLOCK_TAG_END
# internal use only: keep reference to templatetags functions
_templatetags = {}
# internal use only: name of the templatetags module to load for this class and subclasses
_templatetags_modules = {}
options = None
Node = Node
class Meta:
"""
Options of this class. Accessible via cls.options or self.options.
To force (and/or add) options in your own class, simply redefine a
`Meta` class in your own main cache class with updated/add values
"""
# If versioning is activated (internal versioning is always on)
versioning = getattr(settings, 'ADV_CACHE_VERSIONING', False)
# If the content will be compressed before caching
compress = getattr(settings, 'ADV_CACHE_COMPRESS', False)
# If many spaces/blanks will be converted into one
compress_spaces = getattr(settings, 'ADV_CACHE_COMPRESS_SPACES', False)
# If a "pk" (you can pass what you want) will be added to the cache key
include_pk = getattr(settings, 'ADV_CACHE_INCLUDE_PK', False)
# The cache backend to use (or use the "default" one)
cache_backend = getattr(settings, 'ADV_CACHE_BACKEND', 'default')
# Part of the INTERNAL_VERSION configurable via settings
internal_version = getattr(settings, 'ADV_CACHE_VERSION', '')
# If the fragment name should be resolved or taken as is
resolve_fragment = getattr(settings, 'ADV_CACHE_RESOLVE_NAME', False)
# Use a metaclass to use the right class in the Node class, and assign Meta to options
def __init__(self, node, context):
"""
Constructor of the Cache class:
* preparing fields to be used later,
* prepare the templatetag parameters
* create the cache key
"""
super(CacheTag, self).__init__()
# the actual Node object
self.node = node
# the context used for the rendering
self.context = context
# indicate that we force regenerating the cache, even if it exists
self.regenerate = bool(self.context.get('__regenerate__', False))
# indicate if we only want html without parsing the nocache parts
self.partial = bool(self.context.get('__partial__', False))
# the content of the template, will be used through the whole process
self.content = ''
# the version used in the cached templatetag
self.content_version = None
# Final "INTERNAL_VERSION"
if self.options.internal_version:
self.INTERNAL_VERSION = b'%s|%s' % (self.__class__.INTERNAL_VERSION,
self.options.internal_version)
else:
self.INTERNAL_VERSION = force_bytes(self.__class__.INTERNAL_VERSION)
self.VERSION_SEPARATOR = force_bytes(self.__class__.VERSION_SEPARATOR)
# prepare all parameters passed to the templatetag
self.expire_time = None
self.version = None
self.prepare_params()
# get the cache and cache key
self.cache = self.get_cache_object()
self.cache_key = self.get_cache_key()
def prepare_params(self):
"""
Prepare the parameters passed to the templatetag
"""
if self.options.resolve_fragment:
self.fragment_name = self.node.fragment_name.resolve(self.context)
else:
self.fragment_name = str(self.node.fragment_name)
# Remove quotes that surround the name
for char in '\'\"':
if self.fragment_name.startswith(char) or self.fragment_name.endswith(char):
if self.fragment_name.startswith(char) and self.fragment_name.endswith(char):
self.fragment_name = self.fragment_name[1:-1]
break
else:
raise ValueError('Number of quotes around the fragment name is incoherent')
self.expire_time = self.get_expire_time()
if self.options.versioning:
self.version = force_bytes(self.get_version())
self.vary_on = [template.Variable(var).resolve(self.context) for var in self.node.vary_on]
def get_version(self):
"""
Return the stringified version passed to the templatetag.
"""
if not self.node.version:
return None
try:
version = smart_str('%s' % self.node.version.resolve(self.context))
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.version.var))
return '%s' % version
def hash_args(self):
"""
Take all the arguments passed after the fragment name and return a
hashed version which will be used in the cache key
"""
return hashlib.md5(force_bytes(':'.join([urlquote(force_bytes(var)) for var in self.vary_on]))).hexdigest()
def get_pk(self):
"""
Return the pk to use in the cache key. It's the first version of the
templatetag arguments after the fragment name
"""
return self.vary_on[0]
def get_base_cache_key(self):
"""
Return a string with format placeholder used as a source to compute the
final cache key.
Placeholders are :
* %(nodename)s : the name of the templatetag
* %(name)s : the fragment name passed to the templatetag
* %(pk)s : the return of the `get_pk` method, passed only if `include_pk` is True
* %(hash)s : the return of the `hash_args` method
"""
if self.options.include_pk:
return 'template.%(nodename)s.%(name)s.%(pk)s.%(hash)s'
else:
return 'template.%(nodename)s.%(name)s.%(hash)s'
def get_cache_key_args(self):
"""
Return the arguments to be passed to the base cache key returned by `get_base_cache_key`.
"""
cache_key_args = dict(
nodename=self.node.nodename,
name=self.fragment_name,
hash=self.hash_args(),
)
if self.options.include_pk:
cache_key_args['pk'] = self.get_pk()
return cache_key_args
def get_cache_key(self):
"""
Compute and return the final cache key, using return values of
`get_base_cache_key` and `get_cache_key_args`.
"""
return self.get_base_cache_key() % self.get_cache_key_args()
def get_cache_object(self):
"""
Return the cache object to be used to set and get the values in cache.
By default it's the default cache defined by django, but it can be
every object with a `get` and a `set` method (or not, if `cache_get`
and `cache_set` methods are overridden)
"""
return get_cache(self.node.cache_backend or self.options.cache_backend)
def cache_get(self):
"""
Get content from the cache
"""
return self.cache.get(self.cache_key)
def cache_set(self, to_cache):
"""
Set content into the cache
"""
self.cache.set(self.cache_key, to_cache, self.expire_time)
def join_content_version(self, to_cache):
"""
Add the version(s) to the content to cache : internal version at first
and then the template version if versioning is activated.
Each version, and the content, are separated with `VERSION_SEPARATOR`.
This method is called after the encoding (if "compress" or
"compress_spaces" options are on)
"""
parts = [self.INTERNAL_VERSION]
if self.options.versioning:
parts.append(force_bytes(self.version))
parts.append(force_bytes(to_cache))
return self.VERSION_SEPARATOR.join(parts)
def split_content_version(self):
"""
Remove and return the version(s) from the cached content. First the
internal version, and if versioning is activated, the template one.
And finally save the content, but only if all versions match.
The content saved is the encoded one (if "compress" or
"compress_spaces" options are on). By doing so, we avoid decoding if
the versions didn't match, to save some cpu cycles.
"""
try:
nb_parts = 2
if self.options.versioning:
nb_parts = 3
parts = self.content.split(self.VERSION_SEPARATOR, nb_parts - 1)
assert len(parts) == nb_parts
self.content_internal_version = parts[0]
if self.options.versioning:
self.content_version = parts[1]
self.content = parts[-1]
except Exception:
self.content = None
def decode_content(self):
"""
Decode (decompress...) the content got from the cache, to the final
html
"""
self.content = pickle.loads(zlib.decompress(self.content))
def encode_content(self):
"""
Encode (compress...) the html to the data to be cached
"""
return zlib.compress(pickle.dumps(self.content))
def render_node(self):
"""
Render the template and save the generated content
"""
self.content = self.node.nodelist.render(self.context)
def create_content(self):
"""
Render the template, apply options on it, and save it to the cache.
"""
self.render_node()
if self.options.compress_spaces:
self.content = self.RE_SPACELESS.sub(' ', self.content)
if self.options.compress:
to_cache = self.encode_content()
else:
to_cache = self.content
to_cache = self.join_content_version(to_cache)
try:
self.cache_set(to_cache)
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when saving the cached template fragment')
def load_content(self):
"""
It's the main method of the class.
Try to load the template from cache, get the versions and decode the
content.
If something was wrong during this process (or if we had a
`__regenerate__` value to True in the context), create new content and
save it in cache.
"""
self.content = None
if not self.regenerate:
try:
self.content = self.cache_get()
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when getting the cached template fragment')
try:
assert self.content
self.split_content_version()
assert self.content
if self.content_internal_version != self.INTERNAL_VERSION or (
self.options.versioning and self.content_version != self.version):
self.content = None
assert self.content
if self.options.compress:
self.decode_content()
except Exception:
self.create_content()
self.content = smart_str(self.content)
def render(self):
"""
Try to load content (from cache or by rendering the template).
If it fails, return an empty string or raise the exception if it's a
TemplateSyntaxError.
With this, we can no parse and render the content included in the
{% nocache %} blocks, but only if we have have this tag and if we don't
have `__partial__` to True in the context (in this case we simple
return the html with the {% nocache %} block not parsed.
"""
try:
self.load_content()
except template.TemplateSyntaxError:
raise
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when rendering template fragment')
return ''
if self.partial or self.RAW_TOKEN_START not in self.content:
return self.content
return self.render_nocache()
@staticmethod
def get_all_tags_and_filters_by_function():
"""
Return a dict with all the template tags (in the `tags` entry) and filters (in the
`filters` entry) that are available.
Both entries are a dict with the function as key, and a tuple with (library name, function
name) as value.
This is cached after the first call.
"""
libraries = get_template_libraries()
force = False
# We'll force the update of the cache if new libraries where added
if hasattr(CacheTag.get_all_tags_and_filters_by_function, '_len_libraries'):
if len(libraries) != CacheTag.get_all_tags_and_filters_by_function._len_libraries:
force = True
if force or not hasattr(CacheTag.get_all_tags_and_filters_by_function, '_cache'):
CacheTag.get_all_tags_and_filters_by_function._len_libraries = len(libraries)
available_tags = {}
available_filters = {}
for lib_name, lib in libraries.items():
available_tags.update(
(function, (lib_name, tag_name))
for tag_name, function
in lib.tags.items()
)
available_filters.update(
(function, (lib_name, filter_name))
for filter_name, function
in lib.filters.items()
)
CacheTag.get_all_tags_and_filters_by_function._cache = {
'tags': available_tags,
'filters': available_filters
}
return CacheTag.get_all_tags_and_filters_by_function._cache
@classmethod
def get_templatetag_module(cls):
"""
Return the templatetags module name for which the current class is used.
It's used to render the nocache blocks by loading the correct module
"""
if cls not in CacheTag._templatetags_modules:
# find the library including the main templatetag of the current class
all_tags = cls.get_all_tags_and_filters_by_function()['tags']
CacheTag._templatetags_modules[cls] = all_tags[CacheTag._templatetags[cls]['cache']][0]
return CacheTag._templatetags_modules[cls]
def render_nocache(self):
"""
Render the `nocache` blocks of the content and return the whole
html
"""
tmpl = template.Template(''.join([
# start by loading the cache library
template.BLOCK_TAG_START,
'load %s' % self.get_templatetag_module(),
template.BLOCK_TAG_END,
# and surround the cached template by "raw" tags
self.RAW_TOKEN_START,
self.content,
self.RAW_TOKEN_END,
]))
return tmpl.render(self.context)
@classmethod
def get_template_node_arguments(cls, tokens):
"""
Return the arguments taken from the templatetag that will be used to the
Node class.
Take a list of all tokens and return a list of real tokens. Here
should be done some validations (number of tokens...) and eventually
some parsing...
"""
if len(tokens) < 3:
raise template.TemplateSyntaxError(
"'%r' tag requires at least 2 arguments." % tokens[0])
return tokens[1], tokens[2], tokens[3:]
@classmethod
def register(cls, library_register, nodename='cache', nocache_nodename='nocache'):
"""
Register all needed templatetags, with these parameters :
* library_register : the `register` object (result of
`template.Library()`) in your templatetag module
* nodename : the node to use for the cache templatetag (the default
is "cache")
* nocache_nodename : the node to use for the nocache templatetag
"""
if cls in CacheTag._templatetags:
raise RuntimeError('The adv-cache-tag class %s is already registered' % cls)
CacheTag._templatetags[cls] = {}
def templatetag_cache(parser, token):
"""
Return a new Node object for the main cache templatetag
"""
nodelist = parser.parse(('end%s' % nodename,))
parser.delete_first_token()
args = cls.get_template_node_arguments(token.contents.split())
return cls.Node(nodename, nodelist, *args)
library_register.tag(nodename, templatetag_cache)
CacheTag._templatetags[cls]['cache'] = templatetag_cache
def templatetag_raw(parser, token):
"""
Return a TextNode with all html not parsed, used for templatetags
that need to not be parsed : the `nocache` one and the `RAW` one,
used to surround cached html (to be not parsed again)
Based on http://www.holovaty.com/writing/django-two-phased-rendering/
"""
# Whatever is between {% nocache %} and {% endnocache %} will be preserved as
# raw, un-rendered template code.
text = []
parse_until = 'end%s' % token.contents
tag_mapping = {
TOKEN_TEXT: ('', ''),
TOKEN_VAR: ('{{', '}}'),
TOKEN_BLOCK: ('{%', '%}'),
TOKEN_COMMENT: ('{#', '#}'),
}
# By the time this template tag is called, the template system has already
# lexed the template into tokens. Here, we loop over the tokens until
# {% endraw %} and parse them to TextNodes. We have to add the start and
# end bits (e.g. "{{" for variables) because those have already been
# stripped off in a previous part of the template-parsing process.
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == parse_until:
return template.TextNode(''.join(text))
start, end = tag_mapping[token.token_type]
text.append('%s%s%s' % (start, token.contents, end))
parser.unclosed_block_tag(parse_until)
library_register.tag(cls.RAW_TOKEN, templatetag_raw)
CacheTag._templatetags[cls]['raw'] = templatetag_raw
def templatetag_nocache(parser, token):
"""
Return a TextNode with raw html from the `nocache` templatetag,
and surround it with `endRAW` and `RAW` (precisely
`cls.RAW_TOKEN_END` and `cls.RAW_TOKEN_START`).
So for
{% nocache %}foo{% endnocache %}
we get
{% endRAW... %}foo{% RAW... %}
When the main cache templatetag content will be loaded from cache,
it will be surrounded by the same templatetags, reversed.
So if at first we had
{% cache %}bar{% nocache %}foo{% endnocache %}baz{% endcache %}
The cached version will be
bar{% endRAW... %}foo{% RAW... %}baz
And the final html to be rendered will be
{% RAW... %}bar{% endRAW... %}foo{% RAW... %}baz{% endRAW... %}
And the html within `RAW` and `endRAW` will not be parsed, as wanted
"""
# We'll load in the no-cache part all template tags and filters loaded in the main
# template, to be able to use it when the no-cache will be rendered
all_tags_and_filters = cls.get_all_tags_and_filters_by_function()
available_tags = all_tags_and_filters['tags']
available_filters = all_tags_and_filters['filters']
needed = {}
current_module = cls.get_templatetag_module()
for function in parser.tags.values():
if function in available_tags:
lib, name = available_tags[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
for function in parser.filters.values():
if function in available_filters:
lib, name = available_filters[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
load_string = ''.join(
'%sload %s from %s%s' % (
template.BLOCK_TAG_START,
' '.join(names),
lib,
template.BLOCK_TAG_END,
)
for lib, names in needed.items()
)
node = templatetag_raw(parser, token)
node.s = cls.RAW_TOKEN_END + load_string + node.s + cls.RAW_TOKEN_START
return node
library_register.tag(nocache_nodename, templatetag_nocache)
CacheTag._templatetags['nocache'] = templatetag_nocache
|
twidi/django-adv-cache-tag
|
adv_cache_tag/tag.py
|
CacheTag.get_version
|
python
|
def get_version(self):
if not self.node.version:
return None
try:
version = smart_str('%s' % self.node.version.resolve(self.context))
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.version.var))
return '%s' % version
|
Return the stringified version passed to the templatetag.
|
train
|
https://github.com/twidi/django-adv-cache-tag/blob/811f8db4dac73667c7d2fe0ea97a24969593eb8a/adv_cache_tag/tag.py#L283-L295
| null |
class CacheTag(object, metaclass=CacheTagMetaClass):
"""
The main class of `django-adv-cache-tag` which does all the work.
To change its behaviour, simply change one or more of these settings
(see in the `Meta` class for details) :
* ADV_CACHE_VERSIONING
* ADV_CACHE_COMPRESS
* ADV_CACHE_COMPRESS_SPACES
* ADV_CACHE_INCLUDE_PK
* ADV_CACHE_BACKEND
* ADV_CACHE_VERSION
* ADV_CACHE_RESOLVE_NAME
Or inherit from this class and don't forget to register your tag :
from adv_cache_tag.tag import CacheTag
register = template.Library()
class MyCacheTag(CacheTag):
# change something
MyCacheTag.register(register, 'my_cache')
By inheriting you can change many things as CacheTag implements a lot of
small methods
"""
# Will change if the algorithm changes
INTERNAL_VERSION = '1'
# Used to separate internal version, template version, and the content
VERSION_SEPARATOR = '::'
# Regex used to reduce spaces/blanks (many spaces into one)
RE_SPACELESS = re.compile(r'\s\s+')
# generate a token for this site, based on the secret_key
RAW_TOKEN = 'RAW_' + hashlib.sha1(
b'RAW_TOKEN_SALT1' + force_bytes(hashlib.sha1(
b'RAW_TOKEN_SALT2' + force_bytes(settings.SECRET_KEY)
).hexdigest())
).hexdigest()
# tokens to use around the already parsed parts of the cached template
RAW_TOKEN_START = template.BLOCK_TAG_START + RAW_TOKEN + template.BLOCK_TAG_END
RAW_TOKEN_END = template.BLOCK_TAG_START + 'end' + RAW_TOKEN + template.BLOCK_TAG_END
# internal use only: keep reference to templatetags functions
_templatetags = {}
# internal use only: name of the templatetags module to load for this class and subclasses
_templatetags_modules = {}
options = None
Node = Node
class Meta:
"""
Options of this class. Accessible via cls.options or self.options.
To force (and/or add) options in your own class, simply redefine a
`Meta` class in your own main cache class with updated/add values
"""
# If versioning is activated (internal versioning is always on)
versioning = getattr(settings, 'ADV_CACHE_VERSIONING', False)
# If the content will be compressed before caching
compress = getattr(settings, 'ADV_CACHE_COMPRESS', False)
# If many spaces/blanks will be converted into one
compress_spaces = getattr(settings, 'ADV_CACHE_COMPRESS_SPACES', False)
# If a "pk" (you can pass what you want) will be added to the cache key
include_pk = getattr(settings, 'ADV_CACHE_INCLUDE_PK', False)
# The cache backend to use (or use the "default" one)
cache_backend = getattr(settings, 'ADV_CACHE_BACKEND', 'default')
# Part of the INTERNAL_VERSION configurable via settings
internal_version = getattr(settings, 'ADV_CACHE_VERSION', '')
# If the fragment name should be resolved or taken as is
resolve_fragment = getattr(settings, 'ADV_CACHE_RESOLVE_NAME', False)
# Use a metaclass to use the right class in the Node class, and assign Meta to options
def __init__(self, node, context):
"""
Constructor of the Cache class:
* preparing fields to be used later,
* prepare the templatetag parameters
* create the cache key
"""
super(CacheTag, self).__init__()
# the actual Node object
self.node = node
# the context used for the rendering
self.context = context
# indicate that we force regenerating the cache, even if it exists
self.regenerate = bool(self.context.get('__regenerate__', False))
# indicate if we only want html without parsing the nocache parts
self.partial = bool(self.context.get('__partial__', False))
# the content of the template, will be used through the whole process
self.content = ''
# the version used in the cached templatetag
self.content_version = None
# Final "INTERNAL_VERSION"
if self.options.internal_version:
self.INTERNAL_VERSION = b'%s|%s' % (self.__class__.INTERNAL_VERSION,
self.options.internal_version)
else:
self.INTERNAL_VERSION = force_bytes(self.__class__.INTERNAL_VERSION)
self.VERSION_SEPARATOR = force_bytes(self.__class__.VERSION_SEPARATOR)
# prepare all parameters passed to the templatetag
self.expire_time = None
self.version = None
self.prepare_params()
# get the cache and cache key
self.cache = self.get_cache_object()
self.cache_key = self.get_cache_key()
def prepare_params(self):
"""
Prepare the parameters passed to the templatetag
"""
if self.options.resolve_fragment:
self.fragment_name = self.node.fragment_name.resolve(self.context)
else:
self.fragment_name = str(self.node.fragment_name)
# Remove quotes that surround the name
for char in '\'\"':
if self.fragment_name.startswith(char) or self.fragment_name.endswith(char):
if self.fragment_name.startswith(char) and self.fragment_name.endswith(char):
self.fragment_name = self.fragment_name[1:-1]
break
else:
raise ValueError('Number of quotes around the fragment name is incoherent')
self.expire_time = self.get_expire_time()
if self.options.versioning:
self.version = force_bytes(self.get_version())
self.vary_on = [template.Variable(var).resolve(self.context) for var in self.node.vary_on]
def get_expire_time(self):
"""
Return the expire time passed to the templatetag.
Must be None or an integer.
"""
try:
expire_time = self.node.expire_time.resolve(self.context)
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.expire_time.var))
try:
if expire_time is not None:
expire_time = str(expire_time)
if not expire_time.isdigit():
raise TypeError
expire_time = int(expire_time)
except (ValueError, TypeError):
raise template.TemplateSyntaxError(
'"%s" tag got a non-integer (or None) timeout value: %r' % (
self.node.nodename, expire_time
)
)
return expire_time
def hash_args(self):
"""
Take all the arguments passed after the fragment name and return a
hashed version which will be used in the cache key
"""
return hashlib.md5(force_bytes(':'.join([urlquote(force_bytes(var)) for var in self.vary_on]))).hexdigest()
def get_pk(self):
"""
Return the pk to use in the cache key. It's the first version of the
templatetag arguments after the fragment name
"""
return self.vary_on[0]
def get_base_cache_key(self):
"""
Return a string with format placeholder used as a source to compute the
final cache key.
Placeholders are :
* %(nodename)s : the name of the templatetag
* %(name)s : the fragment name passed to the templatetag
* %(pk)s : the return of the `get_pk` method, passed only if `include_pk` is True
* %(hash)s : the return of the `hash_args` method
"""
if self.options.include_pk:
return 'template.%(nodename)s.%(name)s.%(pk)s.%(hash)s'
else:
return 'template.%(nodename)s.%(name)s.%(hash)s'
def get_cache_key_args(self):
"""
Return the arguments to be passed to the base cache key returned by `get_base_cache_key`.
"""
cache_key_args = dict(
nodename=self.node.nodename,
name=self.fragment_name,
hash=self.hash_args(),
)
if self.options.include_pk:
cache_key_args['pk'] = self.get_pk()
return cache_key_args
def get_cache_key(self):
"""
Compute and return the final cache key, using return values of
`get_base_cache_key` and `get_cache_key_args`.
"""
return self.get_base_cache_key() % self.get_cache_key_args()
def get_cache_object(self):
"""
Return the cache object to be used to set and get the values in cache.
By default it's the default cache defined by django, but it can be
every object with a `get` and a `set` method (or not, if `cache_get`
and `cache_set` methods are overridden)
"""
return get_cache(self.node.cache_backend or self.options.cache_backend)
def cache_get(self):
"""
Get content from the cache
"""
return self.cache.get(self.cache_key)
def cache_set(self, to_cache):
"""
Set content into the cache
"""
self.cache.set(self.cache_key, to_cache, self.expire_time)
def join_content_version(self, to_cache):
"""
Add the version(s) to the content to cache : internal version at first
and then the template version if versioning is activated.
Each version, and the content, are separated with `VERSION_SEPARATOR`.
This method is called after the encoding (if "compress" or
"compress_spaces" options are on)
"""
parts = [self.INTERNAL_VERSION]
if self.options.versioning:
parts.append(force_bytes(self.version))
parts.append(force_bytes(to_cache))
return self.VERSION_SEPARATOR.join(parts)
def split_content_version(self):
"""
Remove and return the version(s) from the cached content. First the
internal version, and if versioning is activated, the template one.
And finally save the content, but only if all versions match.
The content saved is the encoded one (if "compress" or
"compress_spaces" options are on). By doing so, we avoid decoding if
the versions didn't match, to save some cpu cycles.
"""
try:
nb_parts = 2
if self.options.versioning:
nb_parts = 3
parts = self.content.split(self.VERSION_SEPARATOR, nb_parts - 1)
assert len(parts) == nb_parts
self.content_internal_version = parts[0]
if self.options.versioning:
self.content_version = parts[1]
self.content = parts[-1]
except Exception:
self.content = None
def decode_content(self):
"""
Decode (decompress...) the content got from the cache, to the final
html
"""
self.content = pickle.loads(zlib.decompress(self.content))
def encode_content(self):
"""
Encode (compress...) the html to the data to be cached
"""
return zlib.compress(pickle.dumps(self.content))
def render_node(self):
"""
Render the template and save the generated content
"""
self.content = self.node.nodelist.render(self.context)
def create_content(self):
"""
Render the template, apply options on it, and save it to the cache.
"""
self.render_node()
if self.options.compress_spaces:
self.content = self.RE_SPACELESS.sub(' ', self.content)
if self.options.compress:
to_cache = self.encode_content()
else:
to_cache = self.content
to_cache = self.join_content_version(to_cache)
try:
self.cache_set(to_cache)
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when saving the cached template fragment')
def load_content(self):
"""
It's the main method of the class.
Try to load the template from cache, get the versions and decode the
content.
If something was wrong during this process (or if we had a
`__regenerate__` value to True in the context), create new content and
save it in cache.
"""
self.content = None
if not self.regenerate:
try:
self.content = self.cache_get()
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when getting the cached template fragment')
try:
assert self.content
self.split_content_version()
assert self.content
if self.content_internal_version != self.INTERNAL_VERSION or (
self.options.versioning and self.content_version != self.version):
self.content = None
assert self.content
if self.options.compress:
self.decode_content()
except Exception:
self.create_content()
self.content = smart_str(self.content)
def render(self):
"""
Try to load content (from cache or by rendering the template).
If it fails, return an empty string or raise the exception if it's a
TemplateSyntaxError.
With this, we can no parse and render the content included in the
{% nocache %} blocks, but only if we have have this tag and if we don't
have `__partial__` to True in the context (in this case we simple
return the html with the {% nocache %} block not parsed.
"""
try:
self.load_content()
except template.TemplateSyntaxError:
raise
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when rendering template fragment')
return ''
if self.partial or self.RAW_TOKEN_START not in self.content:
return self.content
return self.render_nocache()
@staticmethod
def get_all_tags_and_filters_by_function():
"""
Return a dict with all the template tags (in the `tags` entry) and filters (in the
`filters` entry) that are available.
Both entries are a dict with the function as key, and a tuple with (library name, function
name) as value.
This is cached after the first call.
"""
libraries = get_template_libraries()
force = False
# We'll force the update of the cache if new libraries where added
if hasattr(CacheTag.get_all_tags_and_filters_by_function, '_len_libraries'):
if len(libraries) != CacheTag.get_all_tags_and_filters_by_function._len_libraries:
force = True
if force or not hasattr(CacheTag.get_all_tags_and_filters_by_function, '_cache'):
CacheTag.get_all_tags_and_filters_by_function._len_libraries = len(libraries)
available_tags = {}
available_filters = {}
for lib_name, lib in libraries.items():
available_tags.update(
(function, (lib_name, tag_name))
for tag_name, function
in lib.tags.items()
)
available_filters.update(
(function, (lib_name, filter_name))
for filter_name, function
in lib.filters.items()
)
CacheTag.get_all_tags_and_filters_by_function._cache = {
'tags': available_tags,
'filters': available_filters
}
return CacheTag.get_all_tags_and_filters_by_function._cache
@classmethod
def get_templatetag_module(cls):
"""
Return the templatetags module name for which the current class is used.
It's used to render the nocache blocks by loading the correct module
"""
if cls not in CacheTag._templatetags_modules:
# find the library including the main templatetag of the current class
all_tags = cls.get_all_tags_and_filters_by_function()['tags']
CacheTag._templatetags_modules[cls] = all_tags[CacheTag._templatetags[cls]['cache']][0]
return CacheTag._templatetags_modules[cls]
def render_nocache(self):
"""
Render the `nocache` blocks of the content and return the whole
html
"""
tmpl = template.Template(''.join([
# start by loading the cache library
template.BLOCK_TAG_START,
'load %s' % self.get_templatetag_module(),
template.BLOCK_TAG_END,
# and surround the cached template by "raw" tags
self.RAW_TOKEN_START,
self.content,
self.RAW_TOKEN_END,
]))
return tmpl.render(self.context)
@classmethod
def get_template_node_arguments(cls, tokens):
"""
Return the arguments taken from the templatetag that will be used to the
Node class.
Take a list of all tokens and return a list of real tokens. Here
should be done some validations (number of tokens...) and eventually
some parsing...
"""
if len(tokens) < 3:
raise template.TemplateSyntaxError(
"'%r' tag requires at least 2 arguments." % tokens[0])
return tokens[1], tokens[2], tokens[3:]
@classmethod
def register(cls, library_register, nodename='cache', nocache_nodename='nocache'):
"""
Register all needed templatetags, with these parameters :
* library_register : the `register` object (result of
`template.Library()`) in your templatetag module
* nodename : the node to use for the cache templatetag (the default
is "cache")
* nocache_nodename : the node to use for the nocache templatetag
"""
if cls in CacheTag._templatetags:
raise RuntimeError('The adv-cache-tag class %s is already registered' % cls)
CacheTag._templatetags[cls] = {}
def templatetag_cache(parser, token):
"""
Return a new Node object for the main cache templatetag
"""
nodelist = parser.parse(('end%s' % nodename,))
parser.delete_first_token()
args = cls.get_template_node_arguments(token.contents.split())
return cls.Node(nodename, nodelist, *args)
library_register.tag(nodename, templatetag_cache)
CacheTag._templatetags[cls]['cache'] = templatetag_cache
def templatetag_raw(parser, token):
"""
Return a TextNode with all html not parsed, used for templatetags
that need to not be parsed : the `nocache` one and the `RAW` one,
used to surround cached html (to be not parsed again)
Based on http://www.holovaty.com/writing/django-two-phased-rendering/
"""
# Whatever is between {% nocache %} and {% endnocache %} will be preserved as
# raw, un-rendered template code.
text = []
parse_until = 'end%s' % token.contents
tag_mapping = {
TOKEN_TEXT: ('', ''),
TOKEN_VAR: ('{{', '}}'),
TOKEN_BLOCK: ('{%', '%}'),
TOKEN_COMMENT: ('{#', '#}'),
}
# By the time this template tag is called, the template system has already
# lexed the template into tokens. Here, we loop over the tokens until
# {% endraw %} and parse them to TextNodes. We have to add the start and
# end bits (e.g. "{{" for variables) because those have already been
# stripped off in a previous part of the template-parsing process.
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == parse_until:
return template.TextNode(''.join(text))
start, end = tag_mapping[token.token_type]
text.append('%s%s%s' % (start, token.contents, end))
parser.unclosed_block_tag(parse_until)
library_register.tag(cls.RAW_TOKEN, templatetag_raw)
CacheTag._templatetags[cls]['raw'] = templatetag_raw
def templatetag_nocache(parser, token):
"""
Return a TextNode with raw html from the `nocache` templatetag,
and surround it with `endRAW` and `RAW` (precisely
`cls.RAW_TOKEN_END` and `cls.RAW_TOKEN_START`).
So for
{% nocache %}foo{% endnocache %}
we get
{% endRAW... %}foo{% RAW... %}
When the main cache templatetag content will be loaded from cache,
it will be surrounded by the same templatetags, reversed.
So if at first we had
{% cache %}bar{% nocache %}foo{% endnocache %}baz{% endcache %}
The cached version will be
bar{% endRAW... %}foo{% RAW... %}baz
And the final html to be rendered will be
{% RAW... %}bar{% endRAW... %}foo{% RAW... %}baz{% endRAW... %}
And the html within `RAW` and `endRAW` will not be parsed, as wanted
"""
# We'll load in the no-cache part all template tags and filters loaded in the main
# template, to be able to use it when the no-cache will be rendered
all_tags_and_filters = cls.get_all_tags_and_filters_by_function()
available_tags = all_tags_and_filters['tags']
available_filters = all_tags_and_filters['filters']
needed = {}
current_module = cls.get_templatetag_module()
for function in parser.tags.values():
if function in available_tags:
lib, name = available_tags[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
for function in parser.filters.values():
if function in available_filters:
lib, name = available_filters[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
load_string = ''.join(
'%sload %s from %s%s' % (
template.BLOCK_TAG_START,
' '.join(names),
lib,
template.BLOCK_TAG_END,
)
for lib, names in needed.items()
)
node = templatetag_raw(parser, token)
node.s = cls.RAW_TOKEN_END + load_string + node.s + cls.RAW_TOKEN_START
return node
library_register.tag(nocache_nodename, templatetag_nocache)
CacheTag._templatetags['nocache'] = templatetag_nocache
|
twidi/django-adv-cache-tag
|
adv_cache_tag/tag.py
|
CacheTag.hash_args
|
python
|
def hash_args(self):
return hashlib.md5(force_bytes(':'.join([urlquote(force_bytes(var)) for var in self.vary_on]))).hexdigest()
|
Take all the arguments passed after the fragment name and return a
hashed version which will be used in the cache key
|
train
|
https://github.com/twidi/django-adv-cache-tag/blob/811f8db4dac73667c7d2fe0ea97a24969593eb8a/adv_cache_tag/tag.py#L297-L302
| null |
class CacheTag(object, metaclass=CacheTagMetaClass):
"""
The main class of `django-adv-cache-tag` which does all the work.
To change its behaviour, simply change one or more of these settings
(see in the `Meta` class for details) :
* ADV_CACHE_VERSIONING
* ADV_CACHE_COMPRESS
* ADV_CACHE_COMPRESS_SPACES
* ADV_CACHE_INCLUDE_PK
* ADV_CACHE_BACKEND
* ADV_CACHE_VERSION
* ADV_CACHE_RESOLVE_NAME
Or inherit from this class and don't forget to register your tag :
from adv_cache_tag.tag import CacheTag
register = template.Library()
class MyCacheTag(CacheTag):
# change something
MyCacheTag.register(register, 'my_cache')
By inheriting you can change many things as CacheTag implements a lot of
small methods
"""
# Will change if the algorithm changes
INTERNAL_VERSION = '1'
# Used to separate internal version, template version, and the content
VERSION_SEPARATOR = '::'
# Regex used to reduce spaces/blanks (many spaces into one)
RE_SPACELESS = re.compile(r'\s\s+')
# generate a token for this site, based on the secret_key
RAW_TOKEN = 'RAW_' + hashlib.sha1(
b'RAW_TOKEN_SALT1' + force_bytes(hashlib.sha1(
b'RAW_TOKEN_SALT2' + force_bytes(settings.SECRET_KEY)
).hexdigest())
).hexdigest()
# tokens to use around the already parsed parts of the cached template
RAW_TOKEN_START = template.BLOCK_TAG_START + RAW_TOKEN + template.BLOCK_TAG_END
RAW_TOKEN_END = template.BLOCK_TAG_START + 'end' + RAW_TOKEN + template.BLOCK_TAG_END
# internal use only: keep reference to templatetags functions
_templatetags = {}
# internal use only: name of the templatetags module to load for this class and subclasses
_templatetags_modules = {}
options = None
Node = Node
class Meta:
"""
Options of this class. Accessible via cls.options or self.options.
To force (and/or add) options in your own class, simply redefine a
`Meta` class in your own main cache class with updated/add values
"""
# If versioning is activated (internal versioning is always on)
versioning = getattr(settings, 'ADV_CACHE_VERSIONING', False)
# If the content will be compressed before caching
compress = getattr(settings, 'ADV_CACHE_COMPRESS', False)
# If many spaces/blanks will be converted into one
compress_spaces = getattr(settings, 'ADV_CACHE_COMPRESS_SPACES', False)
# If a "pk" (you can pass what you want) will be added to the cache key
include_pk = getattr(settings, 'ADV_CACHE_INCLUDE_PK', False)
# The cache backend to use (or use the "default" one)
cache_backend = getattr(settings, 'ADV_CACHE_BACKEND', 'default')
# Part of the INTERNAL_VERSION configurable via settings
internal_version = getattr(settings, 'ADV_CACHE_VERSION', '')
# If the fragment name should be resolved or taken as is
resolve_fragment = getattr(settings, 'ADV_CACHE_RESOLVE_NAME', False)
# Use a metaclass to use the right class in the Node class, and assign Meta to options
def __init__(self, node, context):
"""
Constructor of the Cache class:
* preparing fields to be used later,
* prepare the templatetag parameters
* create the cache key
"""
super(CacheTag, self).__init__()
# the actual Node object
self.node = node
# the context used for the rendering
self.context = context
# indicate that we force regenerating the cache, even if it exists
self.regenerate = bool(self.context.get('__regenerate__', False))
# indicate if we only want html without parsing the nocache parts
self.partial = bool(self.context.get('__partial__', False))
# the content of the template, will be used through the whole process
self.content = ''
# the version used in the cached templatetag
self.content_version = None
# Final "INTERNAL_VERSION"
if self.options.internal_version:
self.INTERNAL_VERSION = b'%s|%s' % (self.__class__.INTERNAL_VERSION,
self.options.internal_version)
else:
self.INTERNAL_VERSION = force_bytes(self.__class__.INTERNAL_VERSION)
self.VERSION_SEPARATOR = force_bytes(self.__class__.VERSION_SEPARATOR)
# prepare all parameters passed to the templatetag
self.expire_time = None
self.version = None
self.prepare_params()
# get the cache and cache key
self.cache = self.get_cache_object()
self.cache_key = self.get_cache_key()
def prepare_params(self):
"""
Prepare the parameters passed to the templatetag
"""
if self.options.resolve_fragment:
self.fragment_name = self.node.fragment_name.resolve(self.context)
else:
self.fragment_name = str(self.node.fragment_name)
# Remove quotes that surround the name
for char in '\'\"':
if self.fragment_name.startswith(char) or self.fragment_name.endswith(char):
if self.fragment_name.startswith(char) and self.fragment_name.endswith(char):
self.fragment_name = self.fragment_name[1:-1]
break
else:
raise ValueError('Number of quotes around the fragment name is incoherent')
self.expire_time = self.get_expire_time()
if self.options.versioning:
self.version = force_bytes(self.get_version())
self.vary_on = [template.Variable(var).resolve(self.context) for var in self.node.vary_on]
def get_expire_time(self):
"""
Return the expire time passed to the templatetag.
Must be None or an integer.
"""
try:
expire_time = self.node.expire_time.resolve(self.context)
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.expire_time.var))
try:
if expire_time is not None:
expire_time = str(expire_time)
if not expire_time.isdigit():
raise TypeError
expire_time = int(expire_time)
except (ValueError, TypeError):
raise template.TemplateSyntaxError(
'"%s" tag got a non-integer (or None) timeout value: %r' % (
self.node.nodename, expire_time
)
)
return expire_time
def get_version(self):
"""
Return the stringified version passed to the templatetag.
"""
if not self.node.version:
return None
try:
version = smart_str('%s' % self.node.version.resolve(self.context))
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.version.var))
return '%s' % version
def get_pk(self):
"""
Return the pk to use in the cache key. It's the first version of the
templatetag arguments after the fragment name
"""
return self.vary_on[0]
def get_base_cache_key(self):
"""
Return a string with format placeholder used as a source to compute the
final cache key.
Placeholders are :
* %(nodename)s : the name of the templatetag
* %(name)s : the fragment name passed to the templatetag
* %(pk)s : the return of the `get_pk` method, passed only if `include_pk` is True
* %(hash)s : the return of the `hash_args` method
"""
if self.options.include_pk:
return 'template.%(nodename)s.%(name)s.%(pk)s.%(hash)s'
else:
return 'template.%(nodename)s.%(name)s.%(hash)s'
def get_cache_key_args(self):
"""
Return the arguments to be passed to the base cache key returned by `get_base_cache_key`.
"""
cache_key_args = dict(
nodename=self.node.nodename,
name=self.fragment_name,
hash=self.hash_args(),
)
if self.options.include_pk:
cache_key_args['pk'] = self.get_pk()
return cache_key_args
def get_cache_key(self):
"""
Compute and return the final cache key, using return values of
`get_base_cache_key` and `get_cache_key_args`.
"""
return self.get_base_cache_key() % self.get_cache_key_args()
def get_cache_object(self):
"""
Return the cache object to be used to set and get the values in cache.
By default it's the default cache defined by django, but it can be
every object with a `get` and a `set` method (or not, if `cache_get`
and `cache_set` methods are overridden)
"""
return get_cache(self.node.cache_backend or self.options.cache_backend)
def cache_get(self):
"""
Get content from the cache
"""
return self.cache.get(self.cache_key)
def cache_set(self, to_cache):
"""
Set content into the cache
"""
self.cache.set(self.cache_key, to_cache, self.expire_time)
def join_content_version(self, to_cache):
"""
Add the version(s) to the content to cache : internal version at first
and then the template version if versioning is activated.
Each version, and the content, are separated with `VERSION_SEPARATOR`.
This method is called after the encoding (if "compress" or
"compress_spaces" options are on)
"""
parts = [self.INTERNAL_VERSION]
if self.options.versioning:
parts.append(force_bytes(self.version))
parts.append(force_bytes(to_cache))
return self.VERSION_SEPARATOR.join(parts)
def split_content_version(self):
"""
Remove and return the version(s) from the cached content. First the
internal version, and if versioning is activated, the template one.
And finally save the content, but only if all versions match.
The content saved is the encoded one (if "compress" or
"compress_spaces" options are on). By doing so, we avoid decoding if
the versions didn't match, to save some cpu cycles.
"""
try:
nb_parts = 2
if self.options.versioning:
nb_parts = 3
parts = self.content.split(self.VERSION_SEPARATOR, nb_parts - 1)
assert len(parts) == nb_parts
self.content_internal_version = parts[0]
if self.options.versioning:
self.content_version = parts[1]
self.content = parts[-1]
except Exception:
self.content = None
def decode_content(self):
"""
Decode (decompress...) the content got from the cache, to the final
html
"""
self.content = pickle.loads(zlib.decompress(self.content))
def encode_content(self):
"""
Encode (compress...) the html to the data to be cached
"""
return zlib.compress(pickle.dumps(self.content))
def render_node(self):
"""
Render the template and save the generated content
"""
self.content = self.node.nodelist.render(self.context)
def create_content(self):
"""
Render the template, apply options on it, and save it to the cache.
"""
self.render_node()
if self.options.compress_spaces:
self.content = self.RE_SPACELESS.sub(' ', self.content)
if self.options.compress:
to_cache = self.encode_content()
else:
to_cache = self.content
to_cache = self.join_content_version(to_cache)
try:
self.cache_set(to_cache)
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when saving the cached template fragment')
def load_content(self):
"""
It's the main method of the class.
Try to load the template from cache, get the versions and decode the
content.
If something was wrong during this process (or if we had a
`__regenerate__` value to True in the context), create new content and
save it in cache.
"""
self.content = None
if not self.regenerate:
try:
self.content = self.cache_get()
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when getting the cached template fragment')
try:
assert self.content
self.split_content_version()
assert self.content
if self.content_internal_version != self.INTERNAL_VERSION or (
self.options.versioning and self.content_version != self.version):
self.content = None
assert self.content
if self.options.compress:
self.decode_content()
except Exception:
self.create_content()
self.content = smart_str(self.content)
def render(self):
"""
Try to load content (from cache or by rendering the template).
If it fails, return an empty string or raise the exception if it's a
TemplateSyntaxError.
With this, we can no parse and render the content included in the
{% nocache %} blocks, but only if we have have this tag and if we don't
have `__partial__` to True in the context (in this case we simple
return the html with the {% nocache %} block not parsed.
"""
try:
self.load_content()
except template.TemplateSyntaxError:
raise
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when rendering template fragment')
return ''
if self.partial or self.RAW_TOKEN_START not in self.content:
return self.content
return self.render_nocache()
@staticmethod
def get_all_tags_and_filters_by_function():
"""
Return a dict with all the template tags (in the `tags` entry) and filters (in the
`filters` entry) that are available.
Both entries are a dict with the function as key, and a tuple with (library name, function
name) as value.
This is cached after the first call.
"""
libraries = get_template_libraries()
force = False
# We'll force the update of the cache if new libraries where added
if hasattr(CacheTag.get_all_tags_and_filters_by_function, '_len_libraries'):
if len(libraries) != CacheTag.get_all_tags_and_filters_by_function._len_libraries:
force = True
if force or not hasattr(CacheTag.get_all_tags_and_filters_by_function, '_cache'):
CacheTag.get_all_tags_and_filters_by_function._len_libraries = len(libraries)
available_tags = {}
available_filters = {}
for lib_name, lib in libraries.items():
available_tags.update(
(function, (lib_name, tag_name))
for tag_name, function
in lib.tags.items()
)
available_filters.update(
(function, (lib_name, filter_name))
for filter_name, function
in lib.filters.items()
)
CacheTag.get_all_tags_and_filters_by_function._cache = {
'tags': available_tags,
'filters': available_filters
}
return CacheTag.get_all_tags_and_filters_by_function._cache
@classmethod
def get_templatetag_module(cls):
"""
Return the templatetags module name for which the current class is used.
It's used to render the nocache blocks by loading the correct module
"""
if cls not in CacheTag._templatetags_modules:
# find the library including the main templatetag of the current class
all_tags = cls.get_all_tags_and_filters_by_function()['tags']
CacheTag._templatetags_modules[cls] = all_tags[CacheTag._templatetags[cls]['cache']][0]
return CacheTag._templatetags_modules[cls]
def render_nocache(self):
"""
Render the `nocache` blocks of the content and return the whole
html
"""
tmpl = template.Template(''.join([
# start by loading the cache library
template.BLOCK_TAG_START,
'load %s' % self.get_templatetag_module(),
template.BLOCK_TAG_END,
# and surround the cached template by "raw" tags
self.RAW_TOKEN_START,
self.content,
self.RAW_TOKEN_END,
]))
return tmpl.render(self.context)
@classmethod
def get_template_node_arguments(cls, tokens):
"""
Return the arguments taken from the templatetag that will be used to the
Node class.
Take a list of all tokens and return a list of real tokens. Here
should be done some validations (number of tokens...) and eventually
some parsing...
"""
if len(tokens) < 3:
raise template.TemplateSyntaxError(
"'%r' tag requires at least 2 arguments." % tokens[0])
return tokens[1], tokens[2], tokens[3:]
@classmethod
def register(cls, library_register, nodename='cache', nocache_nodename='nocache'):
"""
Register all needed templatetags, with these parameters :
* library_register : the `register` object (result of
`template.Library()`) in your templatetag module
* nodename : the node to use for the cache templatetag (the default
is "cache")
* nocache_nodename : the node to use for the nocache templatetag
"""
if cls in CacheTag._templatetags:
raise RuntimeError('The adv-cache-tag class %s is already registered' % cls)
CacheTag._templatetags[cls] = {}
def templatetag_cache(parser, token):
"""
Return a new Node object for the main cache templatetag
"""
nodelist = parser.parse(('end%s' % nodename,))
parser.delete_first_token()
args = cls.get_template_node_arguments(token.contents.split())
return cls.Node(nodename, nodelist, *args)
library_register.tag(nodename, templatetag_cache)
CacheTag._templatetags[cls]['cache'] = templatetag_cache
def templatetag_raw(parser, token):
"""
Return a TextNode with all html not parsed, used for templatetags
that need to not be parsed : the `nocache` one and the `RAW` one,
used to surround cached html (to be not parsed again)
Based on http://www.holovaty.com/writing/django-two-phased-rendering/
"""
# Whatever is between {% nocache %} and {% endnocache %} will be preserved as
# raw, un-rendered template code.
text = []
parse_until = 'end%s' % token.contents
tag_mapping = {
TOKEN_TEXT: ('', ''),
TOKEN_VAR: ('{{', '}}'),
TOKEN_BLOCK: ('{%', '%}'),
TOKEN_COMMENT: ('{#', '#}'),
}
# By the time this template tag is called, the template system has already
# lexed the template into tokens. Here, we loop over the tokens until
# {% endraw %} and parse them to TextNodes. We have to add the start and
# end bits (e.g. "{{" for variables) because those have already been
# stripped off in a previous part of the template-parsing process.
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == parse_until:
return template.TextNode(''.join(text))
start, end = tag_mapping[token.token_type]
text.append('%s%s%s' % (start, token.contents, end))
parser.unclosed_block_tag(parse_until)
library_register.tag(cls.RAW_TOKEN, templatetag_raw)
CacheTag._templatetags[cls]['raw'] = templatetag_raw
def templatetag_nocache(parser, token):
"""
Return a TextNode with raw html from the `nocache` templatetag,
and surround it with `endRAW` and `RAW` (precisely
`cls.RAW_TOKEN_END` and `cls.RAW_TOKEN_START`).
So for
{% nocache %}foo{% endnocache %}
we get
{% endRAW... %}foo{% RAW... %}
When the main cache templatetag content will be loaded from cache,
it will be surrounded by the same templatetags, reversed.
So if at first we had
{% cache %}bar{% nocache %}foo{% endnocache %}baz{% endcache %}
The cached version will be
bar{% endRAW... %}foo{% RAW... %}baz
And the final html to be rendered will be
{% RAW... %}bar{% endRAW... %}foo{% RAW... %}baz{% endRAW... %}
And the html within `RAW` and `endRAW` will not be parsed, as wanted
"""
# We'll load in the no-cache part all template tags and filters loaded in the main
# template, to be able to use it when the no-cache will be rendered
all_tags_and_filters = cls.get_all_tags_and_filters_by_function()
available_tags = all_tags_and_filters['tags']
available_filters = all_tags_and_filters['filters']
needed = {}
current_module = cls.get_templatetag_module()
for function in parser.tags.values():
if function in available_tags:
lib, name = available_tags[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
for function in parser.filters.values():
if function in available_filters:
lib, name = available_filters[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
load_string = ''.join(
'%sload %s from %s%s' % (
template.BLOCK_TAG_START,
' '.join(names),
lib,
template.BLOCK_TAG_END,
)
for lib, names in needed.items()
)
node = templatetag_raw(parser, token)
node.s = cls.RAW_TOKEN_END + load_string + node.s + cls.RAW_TOKEN_START
return node
library_register.tag(nocache_nodename, templatetag_nocache)
CacheTag._templatetags['nocache'] = templatetag_nocache
|
twidi/django-adv-cache-tag
|
adv_cache_tag/tag.py
|
CacheTag.get_cache_key_args
|
python
|
def get_cache_key_args(self):
cache_key_args = dict(
nodename=self.node.nodename,
name=self.fragment_name,
hash=self.hash_args(),
)
if self.options.include_pk:
cache_key_args['pk'] = self.get_pk()
return cache_key_args
|
Return the arguments to be passed to the base cache key returned by `get_base_cache_key`.
|
train
|
https://github.com/twidi/django-adv-cache-tag/blob/811f8db4dac73667c7d2fe0ea97a24969593eb8a/adv_cache_tag/tag.py#L326-L338
|
[
"def hash_args(self):\n \"\"\"\n Take all the arguments passed after the fragment name and return a\n hashed version which will be used in the cache key\n \"\"\"\n return hashlib.md5(force_bytes(':'.join([urlquote(force_bytes(var)) for var in self.vary_on]))).hexdigest()\n",
"def get_pk(self):\n \"\"\"\n Return the pk to use in the cache key. It's the first version of the\n templatetag arguments after the fragment name\n \"\"\"\n return self.vary_on[0]\n"
] |
class CacheTag(object, metaclass=CacheTagMetaClass):
"""
The main class of `django-adv-cache-tag` which does all the work.
To change its behaviour, simply change one or more of these settings
(see in the `Meta` class for details) :
* ADV_CACHE_VERSIONING
* ADV_CACHE_COMPRESS
* ADV_CACHE_COMPRESS_SPACES
* ADV_CACHE_INCLUDE_PK
* ADV_CACHE_BACKEND
* ADV_CACHE_VERSION
* ADV_CACHE_RESOLVE_NAME
Or inherit from this class and don't forget to register your tag :
from adv_cache_tag.tag import CacheTag
register = template.Library()
class MyCacheTag(CacheTag):
# change something
MyCacheTag.register(register, 'my_cache')
By inheriting you can change many things as CacheTag implements a lot of
small methods
"""
# Will change if the algorithm changes
INTERNAL_VERSION = '1'
# Used to separate internal version, template version, and the content
VERSION_SEPARATOR = '::'
# Regex used to reduce spaces/blanks (many spaces into one)
RE_SPACELESS = re.compile(r'\s\s+')
# generate a token for this site, based on the secret_key
RAW_TOKEN = 'RAW_' + hashlib.sha1(
b'RAW_TOKEN_SALT1' + force_bytes(hashlib.sha1(
b'RAW_TOKEN_SALT2' + force_bytes(settings.SECRET_KEY)
).hexdigest())
).hexdigest()
# tokens to use around the already parsed parts of the cached template
RAW_TOKEN_START = template.BLOCK_TAG_START + RAW_TOKEN + template.BLOCK_TAG_END
RAW_TOKEN_END = template.BLOCK_TAG_START + 'end' + RAW_TOKEN + template.BLOCK_TAG_END
# internal use only: keep reference to templatetags functions
_templatetags = {}
# internal use only: name of the templatetags module to load for this class and subclasses
_templatetags_modules = {}
options = None
Node = Node
class Meta:
"""
Options of this class. Accessible via cls.options or self.options.
To force (and/or add) options in your own class, simply redefine a
`Meta` class in your own main cache class with updated/add values
"""
# If versioning is activated (internal versioning is always on)
versioning = getattr(settings, 'ADV_CACHE_VERSIONING', False)
# If the content will be compressed before caching
compress = getattr(settings, 'ADV_CACHE_COMPRESS', False)
# If many spaces/blanks will be converted into one
compress_spaces = getattr(settings, 'ADV_CACHE_COMPRESS_SPACES', False)
# If a "pk" (you can pass what you want) will be added to the cache key
include_pk = getattr(settings, 'ADV_CACHE_INCLUDE_PK', False)
# The cache backend to use (or use the "default" one)
cache_backend = getattr(settings, 'ADV_CACHE_BACKEND', 'default')
# Part of the INTERNAL_VERSION configurable via settings
internal_version = getattr(settings, 'ADV_CACHE_VERSION', '')
# If the fragment name should be resolved or taken as is
resolve_fragment = getattr(settings, 'ADV_CACHE_RESOLVE_NAME', False)
# Use a metaclass to use the right class in the Node class, and assign Meta to options
def __init__(self, node, context):
"""
Constructor of the Cache class:
* preparing fields to be used later,
* prepare the templatetag parameters
* create the cache key
"""
super(CacheTag, self).__init__()
# the actual Node object
self.node = node
# the context used for the rendering
self.context = context
# indicate that we force regenerating the cache, even if it exists
self.regenerate = bool(self.context.get('__regenerate__', False))
# indicate if we only want html without parsing the nocache parts
self.partial = bool(self.context.get('__partial__', False))
# the content of the template, will be used through the whole process
self.content = ''
# the version used in the cached templatetag
self.content_version = None
# Final "INTERNAL_VERSION"
if self.options.internal_version:
self.INTERNAL_VERSION = b'%s|%s' % (self.__class__.INTERNAL_VERSION,
self.options.internal_version)
else:
self.INTERNAL_VERSION = force_bytes(self.__class__.INTERNAL_VERSION)
self.VERSION_SEPARATOR = force_bytes(self.__class__.VERSION_SEPARATOR)
# prepare all parameters passed to the templatetag
self.expire_time = None
self.version = None
self.prepare_params()
# get the cache and cache key
self.cache = self.get_cache_object()
self.cache_key = self.get_cache_key()
def prepare_params(self):
"""
Prepare the parameters passed to the templatetag
"""
if self.options.resolve_fragment:
self.fragment_name = self.node.fragment_name.resolve(self.context)
else:
self.fragment_name = str(self.node.fragment_name)
# Remove quotes that surround the name
for char in '\'\"':
if self.fragment_name.startswith(char) or self.fragment_name.endswith(char):
if self.fragment_name.startswith(char) and self.fragment_name.endswith(char):
self.fragment_name = self.fragment_name[1:-1]
break
else:
raise ValueError('Number of quotes around the fragment name is incoherent')
self.expire_time = self.get_expire_time()
if self.options.versioning:
self.version = force_bytes(self.get_version())
self.vary_on = [template.Variable(var).resolve(self.context) for var in self.node.vary_on]
def get_expire_time(self):
"""
Return the expire time passed to the templatetag.
Must be None or an integer.
"""
try:
expire_time = self.node.expire_time.resolve(self.context)
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.expire_time.var))
try:
if expire_time is not None:
expire_time = str(expire_time)
if not expire_time.isdigit():
raise TypeError
expire_time = int(expire_time)
except (ValueError, TypeError):
raise template.TemplateSyntaxError(
'"%s" tag got a non-integer (or None) timeout value: %r' % (
self.node.nodename, expire_time
)
)
return expire_time
def get_version(self):
"""
Return the stringified version passed to the templatetag.
"""
if not self.node.version:
return None
try:
version = smart_str('%s' % self.node.version.resolve(self.context))
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.version.var))
return '%s' % version
def hash_args(self):
"""
Take all the arguments passed after the fragment name and return a
hashed version which will be used in the cache key
"""
return hashlib.md5(force_bytes(':'.join([urlquote(force_bytes(var)) for var in self.vary_on]))).hexdigest()
def get_pk(self):
"""
Return the pk to use in the cache key. It's the first version of the
templatetag arguments after the fragment name
"""
return self.vary_on[0]
def get_base_cache_key(self):
"""
Return a string with format placeholder used as a source to compute the
final cache key.
Placeholders are :
* %(nodename)s : the name of the templatetag
* %(name)s : the fragment name passed to the templatetag
* %(pk)s : the return of the `get_pk` method, passed only if `include_pk` is True
* %(hash)s : the return of the `hash_args` method
"""
if self.options.include_pk:
return 'template.%(nodename)s.%(name)s.%(pk)s.%(hash)s'
else:
return 'template.%(nodename)s.%(name)s.%(hash)s'
def get_cache_key(self):
"""
Compute and return the final cache key, using return values of
`get_base_cache_key` and `get_cache_key_args`.
"""
return self.get_base_cache_key() % self.get_cache_key_args()
def get_cache_object(self):
"""
Return the cache object to be used to set and get the values in cache.
By default it's the default cache defined by django, but it can be
every object with a `get` and a `set` method (or not, if `cache_get`
and `cache_set` methods are overridden)
"""
return get_cache(self.node.cache_backend or self.options.cache_backend)
def cache_get(self):
"""
Get content from the cache
"""
return self.cache.get(self.cache_key)
def cache_set(self, to_cache):
"""
Set content into the cache
"""
self.cache.set(self.cache_key, to_cache, self.expire_time)
def join_content_version(self, to_cache):
"""
Add the version(s) to the content to cache : internal version at first
and then the template version if versioning is activated.
Each version, and the content, are separated with `VERSION_SEPARATOR`.
This method is called after the encoding (if "compress" or
"compress_spaces" options are on)
"""
parts = [self.INTERNAL_VERSION]
if self.options.versioning:
parts.append(force_bytes(self.version))
parts.append(force_bytes(to_cache))
return self.VERSION_SEPARATOR.join(parts)
def split_content_version(self):
"""
Remove and return the version(s) from the cached content. First the
internal version, and if versioning is activated, the template one.
And finally save the content, but only if all versions match.
The content saved is the encoded one (if "compress" or
"compress_spaces" options are on). By doing so, we avoid decoding if
the versions didn't match, to save some cpu cycles.
"""
try:
nb_parts = 2
if self.options.versioning:
nb_parts = 3
parts = self.content.split(self.VERSION_SEPARATOR, nb_parts - 1)
assert len(parts) == nb_parts
self.content_internal_version = parts[0]
if self.options.versioning:
self.content_version = parts[1]
self.content = parts[-1]
except Exception:
self.content = None
def decode_content(self):
"""
Decode (decompress...) the content got from the cache, to the final
html
"""
self.content = pickle.loads(zlib.decompress(self.content))
def encode_content(self):
"""
Encode (compress...) the html to the data to be cached
"""
return zlib.compress(pickle.dumps(self.content))
def render_node(self):
"""
Render the template and save the generated content
"""
self.content = self.node.nodelist.render(self.context)
def create_content(self):
"""
Render the template, apply options on it, and save it to the cache.
"""
self.render_node()
if self.options.compress_spaces:
self.content = self.RE_SPACELESS.sub(' ', self.content)
if self.options.compress:
to_cache = self.encode_content()
else:
to_cache = self.content
to_cache = self.join_content_version(to_cache)
try:
self.cache_set(to_cache)
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when saving the cached template fragment')
def load_content(self):
"""
It's the main method of the class.
Try to load the template from cache, get the versions and decode the
content.
If something was wrong during this process (or if we had a
`__regenerate__` value to True in the context), create new content and
save it in cache.
"""
self.content = None
if not self.regenerate:
try:
self.content = self.cache_get()
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when getting the cached template fragment')
try:
assert self.content
self.split_content_version()
assert self.content
if self.content_internal_version != self.INTERNAL_VERSION or (
self.options.versioning and self.content_version != self.version):
self.content = None
assert self.content
if self.options.compress:
self.decode_content()
except Exception:
self.create_content()
self.content = smart_str(self.content)
def render(self):
"""
Try to load content (from cache or by rendering the template).
If it fails, return an empty string or raise the exception if it's a
TemplateSyntaxError.
With this, we can no parse and render the content included in the
{% nocache %} blocks, but only if we have have this tag and if we don't
have `__partial__` to True in the context (in this case we simple
return the html with the {% nocache %} block not parsed.
"""
try:
self.load_content()
except template.TemplateSyntaxError:
raise
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when rendering template fragment')
return ''
if self.partial or self.RAW_TOKEN_START not in self.content:
return self.content
return self.render_nocache()
@staticmethod
def get_all_tags_and_filters_by_function():
"""
Return a dict with all the template tags (in the `tags` entry) and filters (in the
`filters` entry) that are available.
Both entries are a dict with the function as key, and a tuple with (library name, function
name) as value.
This is cached after the first call.
"""
libraries = get_template_libraries()
force = False
# We'll force the update of the cache if new libraries where added
if hasattr(CacheTag.get_all_tags_and_filters_by_function, '_len_libraries'):
if len(libraries) != CacheTag.get_all_tags_and_filters_by_function._len_libraries:
force = True
if force or not hasattr(CacheTag.get_all_tags_and_filters_by_function, '_cache'):
CacheTag.get_all_tags_and_filters_by_function._len_libraries = len(libraries)
available_tags = {}
available_filters = {}
for lib_name, lib in libraries.items():
available_tags.update(
(function, (lib_name, tag_name))
for tag_name, function
in lib.tags.items()
)
available_filters.update(
(function, (lib_name, filter_name))
for filter_name, function
in lib.filters.items()
)
CacheTag.get_all_tags_and_filters_by_function._cache = {
'tags': available_tags,
'filters': available_filters
}
return CacheTag.get_all_tags_and_filters_by_function._cache
@classmethod
def get_templatetag_module(cls):
"""
Return the templatetags module name for which the current class is used.
It's used to render the nocache blocks by loading the correct module
"""
if cls not in CacheTag._templatetags_modules:
# find the library including the main templatetag of the current class
all_tags = cls.get_all_tags_and_filters_by_function()['tags']
CacheTag._templatetags_modules[cls] = all_tags[CacheTag._templatetags[cls]['cache']][0]
return CacheTag._templatetags_modules[cls]
def render_nocache(self):
"""
Render the `nocache` blocks of the content and return the whole
html
"""
tmpl = template.Template(''.join([
# start by loading the cache library
template.BLOCK_TAG_START,
'load %s' % self.get_templatetag_module(),
template.BLOCK_TAG_END,
# and surround the cached template by "raw" tags
self.RAW_TOKEN_START,
self.content,
self.RAW_TOKEN_END,
]))
return tmpl.render(self.context)
@classmethod
def get_template_node_arguments(cls, tokens):
"""
Return the arguments taken from the templatetag that will be used to the
Node class.
Take a list of all tokens and return a list of real tokens. Here
should be done some validations (number of tokens...) and eventually
some parsing...
"""
if len(tokens) < 3:
raise template.TemplateSyntaxError(
"'%r' tag requires at least 2 arguments." % tokens[0])
return tokens[1], tokens[2], tokens[3:]
@classmethod
def register(cls, library_register, nodename='cache', nocache_nodename='nocache'):
"""
Register all needed templatetags, with these parameters :
* library_register : the `register` object (result of
`template.Library()`) in your templatetag module
* nodename : the node to use for the cache templatetag (the default
is "cache")
* nocache_nodename : the node to use for the nocache templatetag
"""
if cls in CacheTag._templatetags:
raise RuntimeError('The adv-cache-tag class %s is already registered' % cls)
CacheTag._templatetags[cls] = {}
def templatetag_cache(parser, token):
"""
Return a new Node object for the main cache templatetag
"""
nodelist = parser.parse(('end%s' % nodename,))
parser.delete_first_token()
args = cls.get_template_node_arguments(token.contents.split())
return cls.Node(nodename, nodelist, *args)
library_register.tag(nodename, templatetag_cache)
CacheTag._templatetags[cls]['cache'] = templatetag_cache
def templatetag_raw(parser, token):
"""
Return a TextNode with all html not parsed, used for templatetags
that need to not be parsed : the `nocache` one and the `RAW` one,
used to surround cached html (to be not parsed again)
Based on http://www.holovaty.com/writing/django-two-phased-rendering/
"""
# Whatever is between {% nocache %} and {% endnocache %} will be preserved as
# raw, un-rendered template code.
text = []
parse_until = 'end%s' % token.contents
tag_mapping = {
TOKEN_TEXT: ('', ''),
TOKEN_VAR: ('{{', '}}'),
TOKEN_BLOCK: ('{%', '%}'),
TOKEN_COMMENT: ('{#', '#}'),
}
# By the time this template tag is called, the template system has already
# lexed the template into tokens. Here, we loop over the tokens until
# {% endraw %} and parse them to TextNodes. We have to add the start and
# end bits (e.g. "{{" for variables) because those have already been
# stripped off in a previous part of the template-parsing process.
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == parse_until:
return template.TextNode(''.join(text))
start, end = tag_mapping[token.token_type]
text.append('%s%s%s' % (start, token.contents, end))
parser.unclosed_block_tag(parse_until)
library_register.tag(cls.RAW_TOKEN, templatetag_raw)
CacheTag._templatetags[cls]['raw'] = templatetag_raw
def templatetag_nocache(parser, token):
"""
Return a TextNode with raw html from the `nocache` templatetag,
and surround it with `endRAW` and `RAW` (precisely
`cls.RAW_TOKEN_END` and `cls.RAW_TOKEN_START`).
So for
{% nocache %}foo{% endnocache %}
we get
{% endRAW... %}foo{% RAW... %}
When the main cache templatetag content will be loaded from cache,
it will be surrounded by the same templatetags, reversed.
So if at first we had
{% cache %}bar{% nocache %}foo{% endnocache %}baz{% endcache %}
The cached version will be
bar{% endRAW... %}foo{% RAW... %}baz
And the final html to be rendered will be
{% RAW... %}bar{% endRAW... %}foo{% RAW... %}baz{% endRAW... %}
And the html within `RAW` and `endRAW` will not be parsed, as wanted
"""
# We'll load in the no-cache part all template tags and filters loaded in the main
# template, to be able to use it when the no-cache will be rendered
all_tags_and_filters = cls.get_all_tags_and_filters_by_function()
available_tags = all_tags_and_filters['tags']
available_filters = all_tags_and_filters['filters']
needed = {}
current_module = cls.get_templatetag_module()
for function in parser.tags.values():
if function in available_tags:
lib, name = available_tags[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
for function in parser.filters.values():
if function in available_filters:
lib, name = available_filters[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
load_string = ''.join(
'%sload %s from %s%s' % (
template.BLOCK_TAG_START,
' '.join(names),
lib,
template.BLOCK_TAG_END,
)
for lib, names in needed.items()
)
node = templatetag_raw(parser, token)
node.s = cls.RAW_TOKEN_END + load_string + node.s + cls.RAW_TOKEN_START
return node
library_register.tag(nocache_nodename, templatetag_nocache)
CacheTag._templatetags['nocache'] = templatetag_nocache
|
twidi/django-adv-cache-tag
|
adv_cache_tag/tag.py
|
CacheTag.cache_set
|
python
|
def cache_set(self, to_cache):
self.cache.set(self.cache_key, to_cache, self.expire_time)
|
Set content into the cache
|
train
|
https://github.com/twidi/django-adv-cache-tag/blob/811f8db4dac73667c7d2fe0ea97a24969593eb8a/adv_cache_tag/tag.py#L362-L366
| null |
class CacheTag(object, metaclass=CacheTagMetaClass):
"""
The main class of `django-adv-cache-tag` which does all the work.
To change its behaviour, simply change one or more of these settings
(see in the `Meta` class for details) :
* ADV_CACHE_VERSIONING
* ADV_CACHE_COMPRESS
* ADV_CACHE_COMPRESS_SPACES
* ADV_CACHE_INCLUDE_PK
* ADV_CACHE_BACKEND
* ADV_CACHE_VERSION
* ADV_CACHE_RESOLVE_NAME
Or inherit from this class and don't forget to register your tag :
from adv_cache_tag.tag import CacheTag
register = template.Library()
class MyCacheTag(CacheTag):
# change something
MyCacheTag.register(register, 'my_cache')
By inheriting you can change many things as CacheTag implements a lot of
small methods
"""
# Will change if the algorithm changes
INTERNAL_VERSION = '1'
# Used to separate internal version, template version, and the content
VERSION_SEPARATOR = '::'
# Regex used to reduce spaces/blanks (many spaces into one)
RE_SPACELESS = re.compile(r'\s\s+')
# generate a token for this site, based on the secret_key
RAW_TOKEN = 'RAW_' + hashlib.sha1(
b'RAW_TOKEN_SALT1' + force_bytes(hashlib.sha1(
b'RAW_TOKEN_SALT2' + force_bytes(settings.SECRET_KEY)
).hexdigest())
).hexdigest()
# tokens to use around the already parsed parts of the cached template
RAW_TOKEN_START = template.BLOCK_TAG_START + RAW_TOKEN + template.BLOCK_TAG_END
RAW_TOKEN_END = template.BLOCK_TAG_START + 'end' + RAW_TOKEN + template.BLOCK_TAG_END
# internal use only: keep reference to templatetags functions
_templatetags = {}
# internal use only: name of the templatetags module to load for this class and subclasses
_templatetags_modules = {}
options = None
Node = Node
class Meta:
"""
Options of this class. Accessible via cls.options or self.options.
To force (and/or add) options in your own class, simply redefine a
`Meta` class in your own main cache class with updated/add values
"""
# If versioning is activated (internal versioning is always on)
versioning = getattr(settings, 'ADV_CACHE_VERSIONING', False)
# If the content will be compressed before caching
compress = getattr(settings, 'ADV_CACHE_COMPRESS', False)
# If many spaces/blanks will be converted into one
compress_spaces = getattr(settings, 'ADV_CACHE_COMPRESS_SPACES', False)
# If a "pk" (you can pass what you want) will be added to the cache key
include_pk = getattr(settings, 'ADV_CACHE_INCLUDE_PK', False)
# The cache backend to use (or use the "default" one)
cache_backend = getattr(settings, 'ADV_CACHE_BACKEND', 'default')
# Part of the INTERNAL_VERSION configurable via settings
internal_version = getattr(settings, 'ADV_CACHE_VERSION', '')
# If the fragment name should be resolved or taken as is
resolve_fragment = getattr(settings, 'ADV_CACHE_RESOLVE_NAME', False)
# Use a metaclass to use the right class in the Node class, and assign Meta to options
def __init__(self, node, context):
"""
Constructor of the Cache class:
* preparing fields to be used later,
* prepare the templatetag parameters
* create the cache key
"""
super(CacheTag, self).__init__()
# the actual Node object
self.node = node
# the context used for the rendering
self.context = context
# indicate that we force regenerating the cache, even if it exists
self.regenerate = bool(self.context.get('__regenerate__', False))
# indicate if we only want html without parsing the nocache parts
self.partial = bool(self.context.get('__partial__', False))
# the content of the template, will be used through the whole process
self.content = ''
# the version used in the cached templatetag
self.content_version = None
# Final "INTERNAL_VERSION"
if self.options.internal_version:
self.INTERNAL_VERSION = b'%s|%s' % (self.__class__.INTERNAL_VERSION,
self.options.internal_version)
else:
self.INTERNAL_VERSION = force_bytes(self.__class__.INTERNAL_VERSION)
self.VERSION_SEPARATOR = force_bytes(self.__class__.VERSION_SEPARATOR)
# prepare all parameters passed to the templatetag
self.expire_time = None
self.version = None
self.prepare_params()
# get the cache and cache key
self.cache = self.get_cache_object()
self.cache_key = self.get_cache_key()
def prepare_params(self):
"""
Prepare the parameters passed to the templatetag
"""
if self.options.resolve_fragment:
self.fragment_name = self.node.fragment_name.resolve(self.context)
else:
self.fragment_name = str(self.node.fragment_name)
# Remove quotes that surround the name
for char in '\'\"':
if self.fragment_name.startswith(char) or self.fragment_name.endswith(char):
if self.fragment_name.startswith(char) and self.fragment_name.endswith(char):
self.fragment_name = self.fragment_name[1:-1]
break
else:
raise ValueError('Number of quotes around the fragment name is incoherent')
self.expire_time = self.get_expire_time()
if self.options.versioning:
self.version = force_bytes(self.get_version())
self.vary_on = [template.Variable(var).resolve(self.context) for var in self.node.vary_on]
def get_expire_time(self):
"""
Return the expire time passed to the templatetag.
Must be None or an integer.
"""
try:
expire_time = self.node.expire_time.resolve(self.context)
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.expire_time.var))
try:
if expire_time is not None:
expire_time = str(expire_time)
if not expire_time.isdigit():
raise TypeError
expire_time = int(expire_time)
except (ValueError, TypeError):
raise template.TemplateSyntaxError(
'"%s" tag got a non-integer (or None) timeout value: %r' % (
self.node.nodename, expire_time
)
)
return expire_time
def get_version(self):
"""
Return the stringified version passed to the templatetag.
"""
if not self.node.version:
return None
try:
version = smart_str('%s' % self.node.version.resolve(self.context))
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.version.var))
return '%s' % version
def hash_args(self):
"""
Take all the arguments passed after the fragment name and return a
hashed version which will be used in the cache key
"""
return hashlib.md5(force_bytes(':'.join([urlquote(force_bytes(var)) for var in self.vary_on]))).hexdigest()
def get_pk(self):
"""
Return the pk to use in the cache key. It's the first version of the
templatetag arguments after the fragment name
"""
return self.vary_on[0]
def get_base_cache_key(self):
"""
Return a string with format placeholder used as a source to compute the
final cache key.
Placeholders are :
* %(nodename)s : the name of the templatetag
* %(name)s : the fragment name passed to the templatetag
* %(pk)s : the return of the `get_pk` method, passed only if `include_pk` is True
* %(hash)s : the return of the `hash_args` method
"""
if self.options.include_pk:
return 'template.%(nodename)s.%(name)s.%(pk)s.%(hash)s'
else:
return 'template.%(nodename)s.%(name)s.%(hash)s'
def get_cache_key_args(self):
"""
Return the arguments to be passed to the base cache key returned by `get_base_cache_key`.
"""
cache_key_args = dict(
nodename=self.node.nodename,
name=self.fragment_name,
hash=self.hash_args(),
)
if self.options.include_pk:
cache_key_args['pk'] = self.get_pk()
return cache_key_args
def get_cache_key(self):
"""
Compute and return the final cache key, using return values of
`get_base_cache_key` and `get_cache_key_args`.
"""
return self.get_base_cache_key() % self.get_cache_key_args()
def get_cache_object(self):
"""
Return the cache object to be used to set and get the values in cache.
By default it's the default cache defined by django, but it can be
every object with a `get` and a `set` method (or not, if `cache_get`
and `cache_set` methods are overridden)
"""
return get_cache(self.node.cache_backend or self.options.cache_backend)
def cache_get(self):
"""
Get content from the cache
"""
return self.cache.get(self.cache_key)
def join_content_version(self, to_cache):
"""
Add the version(s) to the content to cache : internal version at first
and then the template version if versioning is activated.
Each version, and the content, are separated with `VERSION_SEPARATOR`.
This method is called after the encoding (if "compress" or
"compress_spaces" options are on)
"""
parts = [self.INTERNAL_VERSION]
if self.options.versioning:
parts.append(force_bytes(self.version))
parts.append(force_bytes(to_cache))
return self.VERSION_SEPARATOR.join(parts)
def split_content_version(self):
"""
Remove and return the version(s) from the cached content. First the
internal version, and if versioning is activated, the template one.
And finally save the content, but only if all versions match.
The content saved is the encoded one (if "compress" or
"compress_spaces" options are on). By doing so, we avoid decoding if
the versions didn't match, to save some cpu cycles.
"""
try:
nb_parts = 2
if self.options.versioning:
nb_parts = 3
parts = self.content.split(self.VERSION_SEPARATOR, nb_parts - 1)
assert len(parts) == nb_parts
self.content_internal_version = parts[0]
if self.options.versioning:
self.content_version = parts[1]
self.content = parts[-1]
except Exception:
self.content = None
def decode_content(self):
"""
Decode (decompress...) the content got from the cache, to the final
html
"""
self.content = pickle.loads(zlib.decompress(self.content))
def encode_content(self):
"""
Encode (compress...) the html to the data to be cached
"""
return zlib.compress(pickle.dumps(self.content))
def render_node(self):
"""
Render the template and save the generated content
"""
self.content = self.node.nodelist.render(self.context)
def create_content(self):
"""
Render the template, apply options on it, and save it to the cache.
"""
self.render_node()
if self.options.compress_spaces:
self.content = self.RE_SPACELESS.sub(' ', self.content)
if self.options.compress:
to_cache = self.encode_content()
else:
to_cache = self.content
to_cache = self.join_content_version(to_cache)
try:
self.cache_set(to_cache)
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when saving the cached template fragment')
def load_content(self):
"""
It's the main method of the class.
Try to load the template from cache, get the versions and decode the
content.
If something was wrong during this process (or if we had a
`__regenerate__` value to True in the context), create new content and
save it in cache.
"""
self.content = None
if not self.regenerate:
try:
self.content = self.cache_get()
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when getting the cached template fragment')
try:
assert self.content
self.split_content_version()
assert self.content
if self.content_internal_version != self.INTERNAL_VERSION or (
self.options.versioning and self.content_version != self.version):
self.content = None
assert self.content
if self.options.compress:
self.decode_content()
except Exception:
self.create_content()
self.content = smart_str(self.content)
def render(self):
"""
Try to load content (from cache or by rendering the template).
If it fails, return an empty string or raise the exception if it's a
TemplateSyntaxError.
With this, we can no parse and render the content included in the
{% nocache %} blocks, but only if we have have this tag and if we don't
have `__partial__` to True in the context (in this case we simple
return the html with the {% nocache %} block not parsed.
"""
try:
self.load_content()
except template.TemplateSyntaxError:
raise
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when rendering template fragment')
return ''
if self.partial or self.RAW_TOKEN_START not in self.content:
return self.content
return self.render_nocache()
@staticmethod
def get_all_tags_and_filters_by_function():
"""
Return a dict with all the template tags (in the `tags` entry) and filters (in the
`filters` entry) that are available.
Both entries are a dict with the function as key, and a tuple with (library name, function
name) as value.
This is cached after the first call.
"""
libraries = get_template_libraries()
force = False
# We'll force the update of the cache if new libraries where added
if hasattr(CacheTag.get_all_tags_and_filters_by_function, '_len_libraries'):
if len(libraries) != CacheTag.get_all_tags_and_filters_by_function._len_libraries:
force = True
if force or not hasattr(CacheTag.get_all_tags_and_filters_by_function, '_cache'):
CacheTag.get_all_tags_and_filters_by_function._len_libraries = len(libraries)
available_tags = {}
available_filters = {}
for lib_name, lib in libraries.items():
available_tags.update(
(function, (lib_name, tag_name))
for tag_name, function
in lib.tags.items()
)
available_filters.update(
(function, (lib_name, filter_name))
for filter_name, function
in lib.filters.items()
)
CacheTag.get_all_tags_and_filters_by_function._cache = {
'tags': available_tags,
'filters': available_filters
}
return CacheTag.get_all_tags_and_filters_by_function._cache
@classmethod
def get_templatetag_module(cls):
"""
Return the templatetags module name for which the current class is used.
It's used to render the nocache blocks by loading the correct module
"""
if cls not in CacheTag._templatetags_modules:
# find the library including the main templatetag of the current class
all_tags = cls.get_all_tags_and_filters_by_function()['tags']
CacheTag._templatetags_modules[cls] = all_tags[CacheTag._templatetags[cls]['cache']][0]
return CacheTag._templatetags_modules[cls]
def render_nocache(self):
"""
Render the `nocache` blocks of the content and return the whole
html
"""
tmpl = template.Template(''.join([
# start by loading the cache library
template.BLOCK_TAG_START,
'load %s' % self.get_templatetag_module(),
template.BLOCK_TAG_END,
# and surround the cached template by "raw" tags
self.RAW_TOKEN_START,
self.content,
self.RAW_TOKEN_END,
]))
return tmpl.render(self.context)
@classmethod
def get_template_node_arguments(cls, tokens):
"""
Return the arguments taken from the templatetag that will be used to the
Node class.
Take a list of all tokens and return a list of real tokens. Here
should be done some validations (number of tokens...) and eventually
some parsing...
"""
if len(tokens) < 3:
raise template.TemplateSyntaxError(
"'%r' tag requires at least 2 arguments." % tokens[0])
return tokens[1], tokens[2], tokens[3:]
@classmethod
def register(cls, library_register, nodename='cache', nocache_nodename='nocache'):
"""
Register all needed templatetags, with these parameters :
* library_register : the `register` object (result of
`template.Library()`) in your templatetag module
* nodename : the node to use for the cache templatetag (the default
is "cache")
* nocache_nodename : the node to use for the nocache templatetag
"""
if cls in CacheTag._templatetags:
raise RuntimeError('The adv-cache-tag class %s is already registered' % cls)
CacheTag._templatetags[cls] = {}
def templatetag_cache(parser, token):
"""
Return a new Node object for the main cache templatetag
"""
nodelist = parser.parse(('end%s' % nodename,))
parser.delete_first_token()
args = cls.get_template_node_arguments(token.contents.split())
return cls.Node(nodename, nodelist, *args)
library_register.tag(nodename, templatetag_cache)
CacheTag._templatetags[cls]['cache'] = templatetag_cache
def templatetag_raw(parser, token):
"""
Return a TextNode with all html not parsed, used for templatetags
that need to not be parsed : the `nocache` one and the `RAW` one,
used to surround cached html (to be not parsed again)
Based on http://www.holovaty.com/writing/django-two-phased-rendering/
"""
# Whatever is between {% nocache %} and {% endnocache %} will be preserved as
# raw, un-rendered template code.
text = []
parse_until = 'end%s' % token.contents
tag_mapping = {
TOKEN_TEXT: ('', ''),
TOKEN_VAR: ('{{', '}}'),
TOKEN_BLOCK: ('{%', '%}'),
TOKEN_COMMENT: ('{#', '#}'),
}
# By the time this template tag is called, the template system has already
# lexed the template into tokens. Here, we loop over the tokens until
# {% endraw %} and parse them to TextNodes. We have to add the start and
# end bits (e.g. "{{" for variables) because those have already been
# stripped off in a previous part of the template-parsing process.
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == parse_until:
return template.TextNode(''.join(text))
start, end = tag_mapping[token.token_type]
text.append('%s%s%s' % (start, token.contents, end))
parser.unclosed_block_tag(parse_until)
library_register.tag(cls.RAW_TOKEN, templatetag_raw)
CacheTag._templatetags[cls]['raw'] = templatetag_raw
def templatetag_nocache(parser, token):
"""
Return a TextNode with raw html from the `nocache` templatetag,
and surround it with `endRAW` and `RAW` (precisely
`cls.RAW_TOKEN_END` and `cls.RAW_TOKEN_START`).
So for
{% nocache %}foo{% endnocache %}
we get
{% endRAW... %}foo{% RAW... %}
When the main cache templatetag content will be loaded from cache,
it will be surrounded by the same templatetags, reversed.
So if at first we had
{% cache %}bar{% nocache %}foo{% endnocache %}baz{% endcache %}
The cached version will be
bar{% endRAW... %}foo{% RAW... %}baz
And the final html to be rendered will be
{% RAW... %}bar{% endRAW... %}foo{% RAW... %}baz{% endRAW... %}
And the html within `RAW` and `endRAW` will not be parsed, as wanted
"""
# We'll load in the no-cache part all template tags and filters loaded in the main
# template, to be able to use it when the no-cache will be rendered
all_tags_and_filters = cls.get_all_tags_and_filters_by_function()
available_tags = all_tags_and_filters['tags']
available_filters = all_tags_and_filters['filters']
needed = {}
current_module = cls.get_templatetag_module()
for function in parser.tags.values():
if function in available_tags:
lib, name = available_tags[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
for function in parser.filters.values():
if function in available_filters:
lib, name = available_filters[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
load_string = ''.join(
'%sload %s from %s%s' % (
template.BLOCK_TAG_START,
' '.join(names),
lib,
template.BLOCK_TAG_END,
)
for lib, names in needed.items()
)
node = templatetag_raw(parser, token)
node.s = cls.RAW_TOKEN_END + load_string + node.s + cls.RAW_TOKEN_START
return node
library_register.tag(nocache_nodename, templatetag_nocache)
CacheTag._templatetags['nocache'] = templatetag_nocache
|
twidi/django-adv-cache-tag
|
adv_cache_tag/tag.py
|
CacheTag.join_content_version
|
python
|
def join_content_version(self, to_cache):
parts = [self.INTERNAL_VERSION]
if self.options.versioning:
parts.append(force_bytes(self.version))
parts.append(force_bytes(to_cache))
return self.VERSION_SEPARATOR.join(parts)
|
Add the version(s) to the content to cache : internal version at first
and then the template version if versioning is activated.
Each version, and the content, are separated with `VERSION_SEPARATOR`.
This method is called after the encoding (if "compress" or
"compress_spaces" options are on)
|
train
|
https://github.com/twidi/django-adv-cache-tag/blob/811f8db4dac73667c7d2fe0ea97a24969593eb8a/adv_cache_tag/tag.py#L368-L381
| null |
class CacheTag(object, metaclass=CacheTagMetaClass):
"""
The main class of `django-adv-cache-tag` which does all the work.
To change its behaviour, simply change one or more of these settings
(see in the `Meta` class for details) :
* ADV_CACHE_VERSIONING
* ADV_CACHE_COMPRESS
* ADV_CACHE_COMPRESS_SPACES
* ADV_CACHE_INCLUDE_PK
* ADV_CACHE_BACKEND
* ADV_CACHE_VERSION
* ADV_CACHE_RESOLVE_NAME
Or inherit from this class and don't forget to register your tag :
from adv_cache_tag.tag import CacheTag
register = template.Library()
class MyCacheTag(CacheTag):
# change something
MyCacheTag.register(register, 'my_cache')
By inheriting you can change many things as CacheTag implements a lot of
small methods
"""
# Will change if the algorithm changes
INTERNAL_VERSION = '1'
# Used to separate internal version, template version, and the content
VERSION_SEPARATOR = '::'
# Regex used to reduce spaces/blanks (many spaces into one)
RE_SPACELESS = re.compile(r'\s\s+')
# generate a token for this site, based on the secret_key
RAW_TOKEN = 'RAW_' + hashlib.sha1(
b'RAW_TOKEN_SALT1' + force_bytes(hashlib.sha1(
b'RAW_TOKEN_SALT2' + force_bytes(settings.SECRET_KEY)
).hexdigest())
).hexdigest()
# tokens to use around the already parsed parts of the cached template
RAW_TOKEN_START = template.BLOCK_TAG_START + RAW_TOKEN + template.BLOCK_TAG_END
RAW_TOKEN_END = template.BLOCK_TAG_START + 'end' + RAW_TOKEN + template.BLOCK_TAG_END
# internal use only: keep reference to templatetags functions
_templatetags = {}
# internal use only: name of the templatetags module to load for this class and subclasses
_templatetags_modules = {}
options = None
Node = Node
class Meta:
"""
Options of this class. Accessible via cls.options or self.options.
To force (and/or add) options in your own class, simply redefine a
`Meta` class in your own main cache class with updated/add values
"""
# If versioning is activated (internal versioning is always on)
versioning = getattr(settings, 'ADV_CACHE_VERSIONING', False)
# If the content will be compressed before caching
compress = getattr(settings, 'ADV_CACHE_COMPRESS', False)
# If many spaces/blanks will be converted into one
compress_spaces = getattr(settings, 'ADV_CACHE_COMPRESS_SPACES', False)
# If a "pk" (you can pass what you want) will be added to the cache key
include_pk = getattr(settings, 'ADV_CACHE_INCLUDE_PK', False)
# The cache backend to use (or use the "default" one)
cache_backend = getattr(settings, 'ADV_CACHE_BACKEND', 'default')
# Part of the INTERNAL_VERSION configurable via settings
internal_version = getattr(settings, 'ADV_CACHE_VERSION', '')
# If the fragment name should be resolved or taken as is
resolve_fragment = getattr(settings, 'ADV_CACHE_RESOLVE_NAME', False)
# Use a metaclass to use the right class in the Node class, and assign Meta to options
def __init__(self, node, context):
"""
Constructor of the Cache class:
* preparing fields to be used later,
* prepare the templatetag parameters
* create the cache key
"""
super(CacheTag, self).__init__()
# the actual Node object
self.node = node
# the context used for the rendering
self.context = context
# indicate that we force regenerating the cache, even if it exists
self.regenerate = bool(self.context.get('__regenerate__', False))
# indicate if we only want html without parsing the nocache parts
self.partial = bool(self.context.get('__partial__', False))
# the content of the template, will be used through the whole process
self.content = ''
# the version used in the cached templatetag
self.content_version = None
# Final "INTERNAL_VERSION"
if self.options.internal_version:
self.INTERNAL_VERSION = b'%s|%s' % (self.__class__.INTERNAL_VERSION,
self.options.internal_version)
else:
self.INTERNAL_VERSION = force_bytes(self.__class__.INTERNAL_VERSION)
self.VERSION_SEPARATOR = force_bytes(self.__class__.VERSION_SEPARATOR)
# prepare all parameters passed to the templatetag
self.expire_time = None
self.version = None
self.prepare_params()
# get the cache and cache key
self.cache = self.get_cache_object()
self.cache_key = self.get_cache_key()
def prepare_params(self):
"""
Prepare the parameters passed to the templatetag
"""
if self.options.resolve_fragment:
self.fragment_name = self.node.fragment_name.resolve(self.context)
else:
self.fragment_name = str(self.node.fragment_name)
# Remove quotes that surround the name
for char in '\'\"':
if self.fragment_name.startswith(char) or self.fragment_name.endswith(char):
if self.fragment_name.startswith(char) and self.fragment_name.endswith(char):
self.fragment_name = self.fragment_name[1:-1]
break
else:
raise ValueError('Number of quotes around the fragment name is incoherent')
self.expire_time = self.get_expire_time()
if self.options.versioning:
self.version = force_bytes(self.get_version())
self.vary_on = [template.Variable(var).resolve(self.context) for var in self.node.vary_on]
def get_expire_time(self):
"""
Return the expire time passed to the templatetag.
Must be None or an integer.
"""
try:
expire_time = self.node.expire_time.resolve(self.context)
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.expire_time.var))
try:
if expire_time is not None:
expire_time = str(expire_time)
if not expire_time.isdigit():
raise TypeError
expire_time = int(expire_time)
except (ValueError, TypeError):
raise template.TemplateSyntaxError(
'"%s" tag got a non-integer (or None) timeout value: %r' % (
self.node.nodename, expire_time
)
)
return expire_time
def get_version(self):
"""
Return the stringified version passed to the templatetag.
"""
if not self.node.version:
return None
try:
version = smart_str('%s' % self.node.version.resolve(self.context))
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.version.var))
return '%s' % version
def hash_args(self):
"""
Take all the arguments passed after the fragment name and return a
hashed version which will be used in the cache key
"""
return hashlib.md5(force_bytes(':'.join([urlquote(force_bytes(var)) for var in self.vary_on]))).hexdigest()
def get_pk(self):
"""
Return the pk to use in the cache key. It's the first version of the
templatetag arguments after the fragment name
"""
return self.vary_on[0]
def get_base_cache_key(self):
"""
Return a string with format placeholder used as a source to compute the
final cache key.
Placeholders are :
* %(nodename)s : the name of the templatetag
* %(name)s : the fragment name passed to the templatetag
* %(pk)s : the return of the `get_pk` method, passed only if `include_pk` is True
* %(hash)s : the return of the `hash_args` method
"""
if self.options.include_pk:
return 'template.%(nodename)s.%(name)s.%(pk)s.%(hash)s'
else:
return 'template.%(nodename)s.%(name)s.%(hash)s'
def get_cache_key_args(self):
"""
Return the arguments to be passed to the base cache key returned by `get_base_cache_key`.
"""
cache_key_args = dict(
nodename=self.node.nodename,
name=self.fragment_name,
hash=self.hash_args(),
)
if self.options.include_pk:
cache_key_args['pk'] = self.get_pk()
return cache_key_args
def get_cache_key(self):
"""
Compute and return the final cache key, using return values of
`get_base_cache_key` and `get_cache_key_args`.
"""
return self.get_base_cache_key() % self.get_cache_key_args()
def get_cache_object(self):
"""
Return the cache object to be used to set and get the values in cache.
By default it's the default cache defined by django, but it can be
every object with a `get` and a `set` method (or not, if `cache_get`
and `cache_set` methods are overridden)
"""
return get_cache(self.node.cache_backend or self.options.cache_backend)
def cache_get(self):
"""
Get content from the cache
"""
return self.cache.get(self.cache_key)
def cache_set(self, to_cache):
"""
Set content into the cache
"""
self.cache.set(self.cache_key, to_cache, self.expire_time)
def split_content_version(self):
"""
Remove and return the version(s) from the cached content. First the
internal version, and if versioning is activated, the template one.
And finally save the content, but only if all versions match.
The content saved is the encoded one (if "compress" or
"compress_spaces" options are on). By doing so, we avoid decoding if
the versions didn't match, to save some cpu cycles.
"""
try:
nb_parts = 2
if self.options.versioning:
nb_parts = 3
parts = self.content.split(self.VERSION_SEPARATOR, nb_parts - 1)
assert len(parts) == nb_parts
self.content_internal_version = parts[0]
if self.options.versioning:
self.content_version = parts[1]
self.content = parts[-1]
except Exception:
self.content = None
def decode_content(self):
"""
Decode (decompress...) the content got from the cache, to the final
html
"""
self.content = pickle.loads(zlib.decompress(self.content))
def encode_content(self):
"""
Encode (compress...) the html to the data to be cached
"""
return zlib.compress(pickle.dumps(self.content))
def render_node(self):
"""
Render the template and save the generated content
"""
self.content = self.node.nodelist.render(self.context)
def create_content(self):
"""
Render the template, apply options on it, and save it to the cache.
"""
self.render_node()
if self.options.compress_spaces:
self.content = self.RE_SPACELESS.sub(' ', self.content)
if self.options.compress:
to_cache = self.encode_content()
else:
to_cache = self.content
to_cache = self.join_content_version(to_cache)
try:
self.cache_set(to_cache)
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when saving the cached template fragment')
def load_content(self):
"""
It's the main method of the class.
Try to load the template from cache, get the versions and decode the
content.
If something was wrong during this process (or if we had a
`__regenerate__` value to True in the context), create new content and
save it in cache.
"""
self.content = None
if not self.regenerate:
try:
self.content = self.cache_get()
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when getting the cached template fragment')
try:
assert self.content
self.split_content_version()
assert self.content
if self.content_internal_version != self.INTERNAL_VERSION or (
self.options.versioning and self.content_version != self.version):
self.content = None
assert self.content
if self.options.compress:
self.decode_content()
except Exception:
self.create_content()
self.content = smart_str(self.content)
def render(self):
"""
Try to load content (from cache or by rendering the template).
If it fails, return an empty string or raise the exception if it's a
TemplateSyntaxError.
With this, we can no parse and render the content included in the
{% nocache %} blocks, but only if we have have this tag and if we don't
have `__partial__` to True in the context (in this case we simple
return the html with the {% nocache %} block not parsed.
"""
try:
self.load_content()
except template.TemplateSyntaxError:
raise
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when rendering template fragment')
return ''
if self.partial or self.RAW_TOKEN_START not in self.content:
return self.content
return self.render_nocache()
@staticmethod
def get_all_tags_and_filters_by_function():
"""
Return a dict with all the template tags (in the `tags` entry) and filters (in the
`filters` entry) that are available.
Both entries are a dict with the function as key, and a tuple with (library name, function
name) as value.
This is cached after the first call.
"""
libraries = get_template_libraries()
force = False
# We'll force the update of the cache if new libraries where added
if hasattr(CacheTag.get_all_tags_and_filters_by_function, '_len_libraries'):
if len(libraries) != CacheTag.get_all_tags_and_filters_by_function._len_libraries:
force = True
if force or not hasattr(CacheTag.get_all_tags_and_filters_by_function, '_cache'):
CacheTag.get_all_tags_and_filters_by_function._len_libraries = len(libraries)
available_tags = {}
available_filters = {}
for lib_name, lib in libraries.items():
available_tags.update(
(function, (lib_name, tag_name))
for tag_name, function
in lib.tags.items()
)
available_filters.update(
(function, (lib_name, filter_name))
for filter_name, function
in lib.filters.items()
)
CacheTag.get_all_tags_and_filters_by_function._cache = {
'tags': available_tags,
'filters': available_filters
}
return CacheTag.get_all_tags_and_filters_by_function._cache
@classmethod
def get_templatetag_module(cls):
"""
Return the templatetags module name for which the current class is used.
It's used to render the nocache blocks by loading the correct module
"""
if cls not in CacheTag._templatetags_modules:
# find the library including the main templatetag of the current class
all_tags = cls.get_all_tags_and_filters_by_function()['tags']
CacheTag._templatetags_modules[cls] = all_tags[CacheTag._templatetags[cls]['cache']][0]
return CacheTag._templatetags_modules[cls]
def render_nocache(self):
"""
Render the `nocache` blocks of the content and return the whole
html
"""
tmpl = template.Template(''.join([
# start by loading the cache library
template.BLOCK_TAG_START,
'load %s' % self.get_templatetag_module(),
template.BLOCK_TAG_END,
# and surround the cached template by "raw" tags
self.RAW_TOKEN_START,
self.content,
self.RAW_TOKEN_END,
]))
return tmpl.render(self.context)
@classmethod
def get_template_node_arguments(cls, tokens):
"""
Return the arguments taken from the templatetag that will be used to the
Node class.
Take a list of all tokens and return a list of real tokens. Here
should be done some validations (number of tokens...) and eventually
some parsing...
"""
if len(tokens) < 3:
raise template.TemplateSyntaxError(
"'%r' tag requires at least 2 arguments." % tokens[0])
return tokens[1], tokens[2], tokens[3:]
@classmethod
def register(cls, library_register, nodename='cache', nocache_nodename='nocache'):
"""
Register all needed templatetags, with these parameters :
* library_register : the `register` object (result of
`template.Library()`) in your templatetag module
* nodename : the node to use for the cache templatetag (the default
is "cache")
* nocache_nodename : the node to use for the nocache templatetag
"""
if cls in CacheTag._templatetags:
raise RuntimeError('The adv-cache-tag class %s is already registered' % cls)
CacheTag._templatetags[cls] = {}
def templatetag_cache(parser, token):
"""
Return a new Node object for the main cache templatetag
"""
nodelist = parser.parse(('end%s' % nodename,))
parser.delete_first_token()
args = cls.get_template_node_arguments(token.contents.split())
return cls.Node(nodename, nodelist, *args)
library_register.tag(nodename, templatetag_cache)
CacheTag._templatetags[cls]['cache'] = templatetag_cache
def templatetag_raw(parser, token):
"""
Return a TextNode with all html not parsed, used for templatetags
that need to not be parsed : the `nocache` one and the `RAW` one,
used to surround cached html (to be not parsed again)
Based on http://www.holovaty.com/writing/django-two-phased-rendering/
"""
# Whatever is between {% nocache %} and {% endnocache %} will be preserved as
# raw, un-rendered template code.
text = []
parse_until = 'end%s' % token.contents
tag_mapping = {
TOKEN_TEXT: ('', ''),
TOKEN_VAR: ('{{', '}}'),
TOKEN_BLOCK: ('{%', '%}'),
TOKEN_COMMENT: ('{#', '#}'),
}
# By the time this template tag is called, the template system has already
# lexed the template into tokens. Here, we loop over the tokens until
# {% endraw %} and parse them to TextNodes. We have to add the start and
# end bits (e.g. "{{" for variables) because those have already been
# stripped off in a previous part of the template-parsing process.
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == parse_until:
return template.TextNode(''.join(text))
start, end = tag_mapping[token.token_type]
text.append('%s%s%s' % (start, token.contents, end))
parser.unclosed_block_tag(parse_until)
library_register.tag(cls.RAW_TOKEN, templatetag_raw)
CacheTag._templatetags[cls]['raw'] = templatetag_raw
def templatetag_nocache(parser, token):
"""
Return a TextNode with raw html from the `nocache` templatetag,
and surround it with `endRAW` and `RAW` (precisely
`cls.RAW_TOKEN_END` and `cls.RAW_TOKEN_START`).
So for
{% nocache %}foo{% endnocache %}
we get
{% endRAW... %}foo{% RAW... %}
When the main cache templatetag content will be loaded from cache,
it will be surrounded by the same templatetags, reversed.
So if at first we had
{% cache %}bar{% nocache %}foo{% endnocache %}baz{% endcache %}
The cached version will be
bar{% endRAW... %}foo{% RAW... %}baz
And the final html to be rendered will be
{% RAW... %}bar{% endRAW... %}foo{% RAW... %}baz{% endRAW... %}
And the html within `RAW` and `endRAW` will not be parsed, as wanted
"""
# We'll load in the no-cache part all template tags and filters loaded in the main
# template, to be able to use it when the no-cache will be rendered
all_tags_and_filters = cls.get_all_tags_and_filters_by_function()
available_tags = all_tags_and_filters['tags']
available_filters = all_tags_and_filters['filters']
needed = {}
current_module = cls.get_templatetag_module()
for function in parser.tags.values():
if function in available_tags:
lib, name = available_tags[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
for function in parser.filters.values():
if function in available_filters:
lib, name = available_filters[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
load_string = ''.join(
'%sload %s from %s%s' % (
template.BLOCK_TAG_START,
' '.join(names),
lib,
template.BLOCK_TAG_END,
)
for lib, names in needed.items()
)
node = templatetag_raw(parser, token)
node.s = cls.RAW_TOKEN_END + load_string + node.s + cls.RAW_TOKEN_START
return node
library_register.tag(nocache_nodename, templatetag_nocache)
CacheTag._templatetags['nocache'] = templatetag_nocache
|
twidi/django-adv-cache-tag
|
adv_cache_tag/tag.py
|
CacheTag.split_content_version
|
python
|
def split_content_version(self):
try:
nb_parts = 2
if self.options.versioning:
nb_parts = 3
parts = self.content.split(self.VERSION_SEPARATOR, nb_parts - 1)
assert len(parts) == nb_parts
self.content_internal_version = parts[0]
if self.options.versioning:
self.content_version = parts[1]
self.content = parts[-1]
except Exception:
self.content = None
|
Remove and return the version(s) from the cached content. First the
internal version, and if versioning is activated, the template one.
And finally save the content, but only if all versions match.
The content saved is the encoded one (if "compress" or
"compress_spaces" options are on). By doing so, we avoid decoding if
the versions didn't match, to save some cpu cycles.
|
train
|
https://github.com/twidi/django-adv-cache-tag/blob/811f8db4dac73667c7d2fe0ea97a24969593eb8a/adv_cache_tag/tag.py#L383-L406
| null |
class CacheTag(object, metaclass=CacheTagMetaClass):
"""
The main class of `django-adv-cache-tag` which does all the work.
To change its behaviour, simply change one or more of these settings
(see in the `Meta` class for details) :
* ADV_CACHE_VERSIONING
* ADV_CACHE_COMPRESS
* ADV_CACHE_COMPRESS_SPACES
* ADV_CACHE_INCLUDE_PK
* ADV_CACHE_BACKEND
* ADV_CACHE_VERSION
* ADV_CACHE_RESOLVE_NAME
Or inherit from this class and don't forget to register your tag :
from adv_cache_tag.tag import CacheTag
register = template.Library()
class MyCacheTag(CacheTag):
# change something
MyCacheTag.register(register, 'my_cache')
By inheriting you can change many things as CacheTag implements a lot of
small methods
"""
# Will change if the algorithm changes
INTERNAL_VERSION = '1'
# Used to separate internal version, template version, and the content
VERSION_SEPARATOR = '::'
# Regex used to reduce spaces/blanks (many spaces into one)
RE_SPACELESS = re.compile(r'\s\s+')
# generate a token for this site, based on the secret_key
RAW_TOKEN = 'RAW_' + hashlib.sha1(
b'RAW_TOKEN_SALT1' + force_bytes(hashlib.sha1(
b'RAW_TOKEN_SALT2' + force_bytes(settings.SECRET_KEY)
).hexdigest())
).hexdigest()
# tokens to use around the already parsed parts of the cached template
RAW_TOKEN_START = template.BLOCK_TAG_START + RAW_TOKEN + template.BLOCK_TAG_END
RAW_TOKEN_END = template.BLOCK_TAG_START + 'end' + RAW_TOKEN + template.BLOCK_TAG_END
# internal use only: keep reference to templatetags functions
_templatetags = {}
# internal use only: name of the templatetags module to load for this class and subclasses
_templatetags_modules = {}
options = None
Node = Node
class Meta:
"""
Options of this class. Accessible via cls.options or self.options.
To force (and/or add) options in your own class, simply redefine a
`Meta` class in your own main cache class with updated/add values
"""
# If versioning is activated (internal versioning is always on)
versioning = getattr(settings, 'ADV_CACHE_VERSIONING', False)
# If the content will be compressed before caching
compress = getattr(settings, 'ADV_CACHE_COMPRESS', False)
# If many spaces/blanks will be converted into one
compress_spaces = getattr(settings, 'ADV_CACHE_COMPRESS_SPACES', False)
# If a "pk" (you can pass what you want) will be added to the cache key
include_pk = getattr(settings, 'ADV_CACHE_INCLUDE_PK', False)
# The cache backend to use (or use the "default" one)
cache_backend = getattr(settings, 'ADV_CACHE_BACKEND', 'default')
# Part of the INTERNAL_VERSION configurable via settings
internal_version = getattr(settings, 'ADV_CACHE_VERSION', '')
# If the fragment name should be resolved or taken as is
resolve_fragment = getattr(settings, 'ADV_CACHE_RESOLVE_NAME', False)
# Use a metaclass to use the right class in the Node class, and assign Meta to options
def __init__(self, node, context):
"""
Constructor of the Cache class:
* preparing fields to be used later,
* prepare the templatetag parameters
* create the cache key
"""
super(CacheTag, self).__init__()
# the actual Node object
self.node = node
# the context used for the rendering
self.context = context
# indicate that we force regenerating the cache, even if it exists
self.regenerate = bool(self.context.get('__regenerate__', False))
# indicate if we only want html without parsing the nocache parts
self.partial = bool(self.context.get('__partial__', False))
# the content of the template, will be used through the whole process
self.content = ''
# the version used in the cached templatetag
self.content_version = None
# Final "INTERNAL_VERSION"
if self.options.internal_version:
self.INTERNAL_VERSION = b'%s|%s' % (self.__class__.INTERNAL_VERSION,
self.options.internal_version)
else:
self.INTERNAL_VERSION = force_bytes(self.__class__.INTERNAL_VERSION)
self.VERSION_SEPARATOR = force_bytes(self.__class__.VERSION_SEPARATOR)
# prepare all parameters passed to the templatetag
self.expire_time = None
self.version = None
self.prepare_params()
# get the cache and cache key
self.cache = self.get_cache_object()
self.cache_key = self.get_cache_key()
def prepare_params(self):
"""
Prepare the parameters passed to the templatetag
"""
if self.options.resolve_fragment:
self.fragment_name = self.node.fragment_name.resolve(self.context)
else:
self.fragment_name = str(self.node.fragment_name)
# Remove quotes that surround the name
for char in '\'\"':
if self.fragment_name.startswith(char) or self.fragment_name.endswith(char):
if self.fragment_name.startswith(char) and self.fragment_name.endswith(char):
self.fragment_name = self.fragment_name[1:-1]
break
else:
raise ValueError('Number of quotes around the fragment name is incoherent')
self.expire_time = self.get_expire_time()
if self.options.versioning:
self.version = force_bytes(self.get_version())
self.vary_on = [template.Variable(var).resolve(self.context) for var in self.node.vary_on]
def get_expire_time(self):
"""
Return the expire time passed to the templatetag.
Must be None or an integer.
"""
try:
expire_time = self.node.expire_time.resolve(self.context)
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.expire_time.var))
try:
if expire_time is not None:
expire_time = str(expire_time)
if not expire_time.isdigit():
raise TypeError
expire_time = int(expire_time)
except (ValueError, TypeError):
raise template.TemplateSyntaxError(
'"%s" tag got a non-integer (or None) timeout value: %r' % (
self.node.nodename, expire_time
)
)
return expire_time
def get_version(self):
"""
Return the stringified version passed to the templatetag.
"""
if not self.node.version:
return None
try:
version = smart_str('%s' % self.node.version.resolve(self.context))
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.version.var))
return '%s' % version
def hash_args(self):
"""
Take all the arguments passed after the fragment name and return a
hashed version which will be used in the cache key
"""
return hashlib.md5(force_bytes(':'.join([urlquote(force_bytes(var)) for var in self.vary_on]))).hexdigest()
def get_pk(self):
"""
Return the pk to use in the cache key. It's the first version of the
templatetag arguments after the fragment name
"""
return self.vary_on[0]
def get_base_cache_key(self):
"""
Return a string with format placeholder used as a source to compute the
final cache key.
Placeholders are :
* %(nodename)s : the name of the templatetag
* %(name)s : the fragment name passed to the templatetag
* %(pk)s : the return of the `get_pk` method, passed only if `include_pk` is True
* %(hash)s : the return of the `hash_args` method
"""
if self.options.include_pk:
return 'template.%(nodename)s.%(name)s.%(pk)s.%(hash)s'
else:
return 'template.%(nodename)s.%(name)s.%(hash)s'
def get_cache_key_args(self):
"""
Return the arguments to be passed to the base cache key returned by `get_base_cache_key`.
"""
cache_key_args = dict(
nodename=self.node.nodename,
name=self.fragment_name,
hash=self.hash_args(),
)
if self.options.include_pk:
cache_key_args['pk'] = self.get_pk()
return cache_key_args
def get_cache_key(self):
"""
Compute and return the final cache key, using return values of
`get_base_cache_key` and `get_cache_key_args`.
"""
return self.get_base_cache_key() % self.get_cache_key_args()
def get_cache_object(self):
"""
Return the cache object to be used to set and get the values in cache.
By default it's the default cache defined by django, but it can be
every object with a `get` and a `set` method (or not, if `cache_get`
and `cache_set` methods are overridden)
"""
return get_cache(self.node.cache_backend or self.options.cache_backend)
def cache_get(self):
"""
Get content from the cache
"""
return self.cache.get(self.cache_key)
def cache_set(self, to_cache):
"""
Set content into the cache
"""
self.cache.set(self.cache_key, to_cache, self.expire_time)
def join_content_version(self, to_cache):
"""
Add the version(s) to the content to cache : internal version at first
and then the template version if versioning is activated.
Each version, and the content, are separated with `VERSION_SEPARATOR`.
This method is called after the encoding (if "compress" or
"compress_spaces" options are on)
"""
parts = [self.INTERNAL_VERSION]
if self.options.versioning:
parts.append(force_bytes(self.version))
parts.append(force_bytes(to_cache))
return self.VERSION_SEPARATOR.join(parts)
def decode_content(self):
"""
Decode (decompress...) the content got from the cache, to the final
html
"""
self.content = pickle.loads(zlib.decompress(self.content))
def encode_content(self):
"""
Encode (compress...) the html to the data to be cached
"""
return zlib.compress(pickle.dumps(self.content))
def render_node(self):
"""
Render the template and save the generated content
"""
self.content = self.node.nodelist.render(self.context)
def create_content(self):
"""
Render the template, apply options on it, and save it to the cache.
"""
self.render_node()
if self.options.compress_spaces:
self.content = self.RE_SPACELESS.sub(' ', self.content)
if self.options.compress:
to_cache = self.encode_content()
else:
to_cache = self.content
to_cache = self.join_content_version(to_cache)
try:
self.cache_set(to_cache)
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when saving the cached template fragment')
def load_content(self):
"""
It's the main method of the class.
Try to load the template from cache, get the versions and decode the
content.
If something was wrong during this process (or if we had a
`__regenerate__` value to True in the context), create new content and
save it in cache.
"""
self.content = None
if not self.regenerate:
try:
self.content = self.cache_get()
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when getting the cached template fragment')
try:
assert self.content
self.split_content_version()
assert self.content
if self.content_internal_version != self.INTERNAL_VERSION or (
self.options.versioning and self.content_version != self.version):
self.content = None
assert self.content
if self.options.compress:
self.decode_content()
except Exception:
self.create_content()
self.content = smart_str(self.content)
def render(self):
"""
Try to load content (from cache or by rendering the template).
If it fails, return an empty string or raise the exception if it's a
TemplateSyntaxError.
With this, we can no parse and render the content included in the
{% nocache %} blocks, but only if we have have this tag and if we don't
have `__partial__` to True in the context (in this case we simple
return the html with the {% nocache %} block not parsed.
"""
try:
self.load_content()
except template.TemplateSyntaxError:
raise
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when rendering template fragment')
return ''
if self.partial or self.RAW_TOKEN_START not in self.content:
return self.content
return self.render_nocache()
@staticmethod
def get_all_tags_and_filters_by_function():
"""
Return a dict with all the template tags (in the `tags` entry) and filters (in the
`filters` entry) that are available.
Both entries are a dict with the function as key, and a tuple with (library name, function
name) as value.
This is cached after the first call.
"""
libraries = get_template_libraries()
force = False
# We'll force the update of the cache if new libraries where added
if hasattr(CacheTag.get_all_tags_and_filters_by_function, '_len_libraries'):
if len(libraries) != CacheTag.get_all_tags_and_filters_by_function._len_libraries:
force = True
if force or not hasattr(CacheTag.get_all_tags_and_filters_by_function, '_cache'):
CacheTag.get_all_tags_and_filters_by_function._len_libraries = len(libraries)
available_tags = {}
available_filters = {}
for lib_name, lib in libraries.items():
available_tags.update(
(function, (lib_name, tag_name))
for tag_name, function
in lib.tags.items()
)
available_filters.update(
(function, (lib_name, filter_name))
for filter_name, function
in lib.filters.items()
)
CacheTag.get_all_tags_and_filters_by_function._cache = {
'tags': available_tags,
'filters': available_filters
}
return CacheTag.get_all_tags_and_filters_by_function._cache
@classmethod
def get_templatetag_module(cls):
"""
Return the templatetags module name for which the current class is used.
It's used to render the nocache blocks by loading the correct module
"""
if cls not in CacheTag._templatetags_modules:
# find the library including the main templatetag of the current class
all_tags = cls.get_all_tags_and_filters_by_function()['tags']
CacheTag._templatetags_modules[cls] = all_tags[CacheTag._templatetags[cls]['cache']][0]
return CacheTag._templatetags_modules[cls]
def render_nocache(self):
"""
Render the `nocache` blocks of the content and return the whole
html
"""
tmpl = template.Template(''.join([
# start by loading the cache library
template.BLOCK_TAG_START,
'load %s' % self.get_templatetag_module(),
template.BLOCK_TAG_END,
# and surround the cached template by "raw" tags
self.RAW_TOKEN_START,
self.content,
self.RAW_TOKEN_END,
]))
return tmpl.render(self.context)
@classmethod
def get_template_node_arguments(cls, tokens):
"""
Return the arguments taken from the templatetag that will be used to the
Node class.
Take a list of all tokens and return a list of real tokens. Here
should be done some validations (number of tokens...) and eventually
some parsing...
"""
if len(tokens) < 3:
raise template.TemplateSyntaxError(
"'%r' tag requires at least 2 arguments." % tokens[0])
return tokens[1], tokens[2], tokens[3:]
@classmethod
def register(cls, library_register, nodename='cache', nocache_nodename='nocache'):
"""
Register all needed templatetags, with these parameters :
* library_register : the `register` object (result of
`template.Library()`) in your templatetag module
* nodename : the node to use for the cache templatetag (the default
is "cache")
* nocache_nodename : the node to use for the nocache templatetag
"""
if cls in CacheTag._templatetags:
raise RuntimeError('The adv-cache-tag class %s is already registered' % cls)
CacheTag._templatetags[cls] = {}
def templatetag_cache(parser, token):
"""
Return a new Node object for the main cache templatetag
"""
nodelist = parser.parse(('end%s' % nodename,))
parser.delete_first_token()
args = cls.get_template_node_arguments(token.contents.split())
return cls.Node(nodename, nodelist, *args)
library_register.tag(nodename, templatetag_cache)
CacheTag._templatetags[cls]['cache'] = templatetag_cache
def templatetag_raw(parser, token):
"""
Return a TextNode with all html not parsed, used for templatetags
that need to not be parsed : the `nocache` one and the `RAW` one,
used to surround cached html (to be not parsed again)
Based on http://www.holovaty.com/writing/django-two-phased-rendering/
"""
# Whatever is between {% nocache %} and {% endnocache %} will be preserved as
# raw, un-rendered template code.
text = []
parse_until = 'end%s' % token.contents
tag_mapping = {
TOKEN_TEXT: ('', ''),
TOKEN_VAR: ('{{', '}}'),
TOKEN_BLOCK: ('{%', '%}'),
TOKEN_COMMENT: ('{#', '#}'),
}
# By the time this template tag is called, the template system has already
# lexed the template into tokens. Here, we loop over the tokens until
# {% endraw %} and parse them to TextNodes. We have to add the start and
# end bits (e.g. "{{" for variables) because those have already been
# stripped off in a previous part of the template-parsing process.
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == parse_until:
return template.TextNode(''.join(text))
start, end = tag_mapping[token.token_type]
text.append('%s%s%s' % (start, token.contents, end))
parser.unclosed_block_tag(parse_until)
library_register.tag(cls.RAW_TOKEN, templatetag_raw)
CacheTag._templatetags[cls]['raw'] = templatetag_raw
def templatetag_nocache(parser, token):
"""
Return a TextNode with raw html from the `nocache` templatetag,
and surround it with `endRAW` and `RAW` (precisely
`cls.RAW_TOKEN_END` and `cls.RAW_TOKEN_START`).
So for
{% nocache %}foo{% endnocache %}
we get
{% endRAW... %}foo{% RAW... %}
When the main cache templatetag content will be loaded from cache,
it will be surrounded by the same templatetags, reversed.
So if at first we had
{% cache %}bar{% nocache %}foo{% endnocache %}baz{% endcache %}
The cached version will be
bar{% endRAW... %}foo{% RAW... %}baz
And the final html to be rendered will be
{% RAW... %}bar{% endRAW... %}foo{% RAW... %}baz{% endRAW... %}
And the html within `RAW` and `endRAW` will not be parsed, as wanted
"""
# We'll load in the no-cache part all template tags and filters loaded in the main
# template, to be able to use it when the no-cache will be rendered
all_tags_and_filters = cls.get_all_tags_and_filters_by_function()
available_tags = all_tags_and_filters['tags']
available_filters = all_tags_and_filters['filters']
needed = {}
current_module = cls.get_templatetag_module()
for function in parser.tags.values():
if function in available_tags:
lib, name = available_tags[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
for function in parser.filters.values():
if function in available_filters:
lib, name = available_filters[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
load_string = ''.join(
'%sload %s from %s%s' % (
template.BLOCK_TAG_START,
' '.join(names),
lib,
template.BLOCK_TAG_END,
)
for lib, names in needed.items()
)
node = templatetag_raw(parser, token)
node.s = cls.RAW_TOKEN_END + load_string + node.s + cls.RAW_TOKEN_START
return node
library_register.tag(nocache_nodename, templatetag_nocache)
CacheTag._templatetags['nocache'] = templatetag_nocache
|
twidi/django-adv-cache-tag
|
adv_cache_tag/tag.py
|
CacheTag.decode_content
|
python
|
def decode_content(self):
self.content = pickle.loads(zlib.decompress(self.content))
|
Decode (decompress...) the content got from the cache, to the final
html
|
train
|
https://github.com/twidi/django-adv-cache-tag/blob/811f8db4dac73667c7d2fe0ea97a24969593eb8a/adv_cache_tag/tag.py#L408-L413
| null |
class CacheTag(object, metaclass=CacheTagMetaClass):
"""
The main class of `django-adv-cache-tag` which does all the work.
To change its behaviour, simply change one or more of these settings
(see in the `Meta` class for details) :
* ADV_CACHE_VERSIONING
* ADV_CACHE_COMPRESS
* ADV_CACHE_COMPRESS_SPACES
* ADV_CACHE_INCLUDE_PK
* ADV_CACHE_BACKEND
* ADV_CACHE_VERSION
* ADV_CACHE_RESOLVE_NAME
Or inherit from this class and don't forget to register your tag :
from adv_cache_tag.tag import CacheTag
register = template.Library()
class MyCacheTag(CacheTag):
# change something
MyCacheTag.register(register, 'my_cache')
By inheriting you can change many things as CacheTag implements a lot of
small methods
"""
# Will change if the algorithm changes
INTERNAL_VERSION = '1'
# Used to separate internal version, template version, and the content
VERSION_SEPARATOR = '::'
# Regex used to reduce spaces/blanks (many spaces into one)
RE_SPACELESS = re.compile(r'\s\s+')
# generate a token for this site, based on the secret_key
RAW_TOKEN = 'RAW_' + hashlib.sha1(
b'RAW_TOKEN_SALT1' + force_bytes(hashlib.sha1(
b'RAW_TOKEN_SALT2' + force_bytes(settings.SECRET_KEY)
).hexdigest())
).hexdigest()
# tokens to use around the already parsed parts of the cached template
RAW_TOKEN_START = template.BLOCK_TAG_START + RAW_TOKEN + template.BLOCK_TAG_END
RAW_TOKEN_END = template.BLOCK_TAG_START + 'end' + RAW_TOKEN + template.BLOCK_TAG_END
# internal use only: keep reference to templatetags functions
_templatetags = {}
# internal use only: name of the templatetags module to load for this class and subclasses
_templatetags_modules = {}
options = None
Node = Node
class Meta:
"""
Options of this class. Accessible via cls.options or self.options.
To force (and/or add) options in your own class, simply redefine a
`Meta` class in your own main cache class with updated/add values
"""
# If versioning is activated (internal versioning is always on)
versioning = getattr(settings, 'ADV_CACHE_VERSIONING', False)
# If the content will be compressed before caching
compress = getattr(settings, 'ADV_CACHE_COMPRESS', False)
# If many spaces/blanks will be converted into one
compress_spaces = getattr(settings, 'ADV_CACHE_COMPRESS_SPACES', False)
# If a "pk" (you can pass what you want) will be added to the cache key
include_pk = getattr(settings, 'ADV_CACHE_INCLUDE_PK', False)
# The cache backend to use (or use the "default" one)
cache_backend = getattr(settings, 'ADV_CACHE_BACKEND', 'default')
# Part of the INTERNAL_VERSION configurable via settings
internal_version = getattr(settings, 'ADV_CACHE_VERSION', '')
# If the fragment name should be resolved or taken as is
resolve_fragment = getattr(settings, 'ADV_CACHE_RESOLVE_NAME', False)
# Use a metaclass to use the right class in the Node class, and assign Meta to options
def __init__(self, node, context):
"""
Constructor of the Cache class:
* preparing fields to be used later,
* prepare the templatetag parameters
* create the cache key
"""
super(CacheTag, self).__init__()
# the actual Node object
self.node = node
# the context used for the rendering
self.context = context
# indicate that we force regenerating the cache, even if it exists
self.regenerate = bool(self.context.get('__regenerate__', False))
# indicate if we only want html without parsing the nocache parts
self.partial = bool(self.context.get('__partial__', False))
# the content of the template, will be used through the whole process
self.content = ''
# the version used in the cached templatetag
self.content_version = None
# Final "INTERNAL_VERSION"
if self.options.internal_version:
self.INTERNAL_VERSION = b'%s|%s' % (self.__class__.INTERNAL_VERSION,
self.options.internal_version)
else:
self.INTERNAL_VERSION = force_bytes(self.__class__.INTERNAL_VERSION)
self.VERSION_SEPARATOR = force_bytes(self.__class__.VERSION_SEPARATOR)
# prepare all parameters passed to the templatetag
self.expire_time = None
self.version = None
self.prepare_params()
# get the cache and cache key
self.cache = self.get_cache_object()
self.cache_key = self.get_cache_key()
def prepare_params(self):
"""
Prepare the parameters passed to the templatetag
"""
if self.options.resolve_fragment:
self.fragment_name = self.node.fragment_name.resolve(self.context)
else:
self.fragment_name = str(self.node.fragment_name)
# Remove quotes that surround the name
for char in '\'\"':
if self.fragment_name.startswith(char) or self.fragment_name.endswith(char):
if self.fragment_name.startswith(char) and self.fragment_name.endswith(char):
self.fragment_name = self.fragment_name[1:-1]
break
else:
raise ValueError('Number of quotes around the fragment name is incoherent')
self.expire_time = self.get_expire_time()
if self.options.versioning:
self.version = force_bytes(self.get_version())
self.vary_on = [template.Variable(var).resolve(self.context) for var in self.node.vary_on]
def get_expire_time(self):
"""
Return the expire time passed to the templatetag.
Must be None or an integer.
"""
try:
expire_time = self.node.expire_time.resolve(self.context)
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.expire_time.var))
try:
if expire_time is not None:
expire_time = str(expire_time)
if not expire_time.isdigit():
raise TypeError
expire_time = int(expire_time)
except (ValueError, TypeError):
raise template.TemplateSyntaxError(
'"%s" tag got a non-integer (or None) timeout value: %r' % (
self.node.nodename, expire_time
)
)
return expire_time
def get_version(self):
"""
Return the stringified version passed to the templatetag.
"""
if not self.node.version:
return None
try:
version = smart_str('%s' % self.node.version.resolve(self.context))
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.version.var))
return '%s' % version
def hash_args(self):
"""
Take all the arguments passed after the fragment name and return a
hashed version which will be used in the cache key
"""
return hashlib.md5(force_bytes(':'.join([urlquote(force_bytes(var)) for var in self.vary_on]))).hexdigest()
def get_pk(self):
"""
Return the pk to use in the cache key. It's the first version of the
templatetag arguments after the fragment name
"""
return self.vary_on[0]
def get_base_cache_key(self):
"""
Return a string with format placeholder used as a source to compute the
final cache key.
Placeholders are :
* %(nodename)s : the name of the templatetag
* %(name)s : the fragment name passed to the templatetag
* %(pk)s : the return of the `get_pk` method, passed only if `include_pk` is True
* %(hash)s : the return of the `hash_args` method
"""
if self.options.include_pk:
return 'template.%(nodename)s.%(name)s.%(pk)s.%(hash)s'
else:
return 'template.%(nodename)s.%(name)s.%(hash)s'
def get_cache_key_args(self):
"""
Return the arguments to be passed to the base cache key returned by `get_base_cache_key`.
"""
cache_key_args = dict(
nodename=self.node.nodename,
name=self.fragment_name,
hash=self.hash_args(),
)
if self.options.include_pk:
cache_key_args['pk'] = self.get_pk()
return cache_key_args
def get_cache_key(self):
"""
Compute and return the final cache key, using return values of
`get_base_cache_key` and `get_cache_key_args`.
"""
return self.get_base_cache_key() % self.get_cache_key_args()
def get_cache_object(self):
"""
Return the cache object to be used to set and get the values in cache.
By default it's the default cache defined by django, but it can be
every object with a `get` and a `set` method (or not, if `cache_get`
and `cache_set` methods are overridden)
"""
return get_cache(self.node.cache_backend or self.options.cache_backend)
def cache_get(self):
"""
Get content from the cache
"""
return self.cache.get(self.cache_key)
def cache_set(self, to_cache):
"""
Set content into the cache
"""
self.cache.set(self.cache_key, to_cache, self.expire_time)
def join_content_version(self, to_cache):
"""
Add the version(s) to the content to cache : internal version at first
and then the template version if versioning is activated.
Each version, and the content, are separated with `VERSION_SEPARATOR`.
This method is called after the encoding (if "compress" or
"compress_spaces" options are on)
"""
parts = [self.INTERNAL_VERSION]
if self.options.versioning:
parts.append(force_bytes(self.version))
parts.append(force_bytes(to_cache))
return self.VERSION_SEPARATOR.join(parts)
def split_content_version(self):
"""
Remove and return the version(s) from the cached content. First the
internal version, and if versioning is activated, the template one.
And finally save the content, but only if all versions match.
The content saved is the encoded one (if "compress" or
"compress_spaces" options are on). By doing so, we avoid decoding if
the versions didn't match, to save some cpu cycles.
"""
try:
nb_parts = 2
if self.options.versioning:
nb_parts = 3
parts = self.content.split(self.VERSION_SEPARATOR, nb_parts - 1)
assert len(parts) == nb_parts
self.content_internal_version = parts[0]
if self.options.versioning:
self.content_version = parts[1]
self.content = parts[-1]
except Exception:
self.content = None
def encode_content(self):
"""
Encode (compress...) the html to the data to be cached
"""
return zlib.compress(pickle.dumps(self.content))
def render_node(self):
"""
Render the template and save the generated content
"""
self.content = self.node.nodelist.render(self.context)
def create_content(self):
"""
Render the template, apply options on it, and save it to the cache.
"""
self.render_node()
if self.options.compress_spaces:
self.content = self.RE_SPACELESS.sub(' ', self.content)
if self.options.compress:
to_cache = self.encode_content()
else:
to_cache = self.content
to_cache = self.join_content_version(to_cache)
try:
self.cache_set(to_cache)
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when saving the cached template fragment')
def load_content(self):
"""
It's the main method of the class.
Try to load the template from cache, get the versions and decode the
content.
If something was wrong during this process (or if we had a
`__regenerate__` value to True in the context), create new content and
save it in cache.
"""
self.content = None
if not self.regenerate:
try:
self.content = self.cache_get()
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when getting the cached template fragment')
try:
assert self.content
self.split_content_version()
assert self.content
if self.content_internal_version != self.INTERNAL_VERSION or (
self.options.versioning and self.content_version != self.version):
self.content = None
assert self.content
if self.options.compress:
self.decode_content()
except Exception:
self.create_content()
self.content = smart_str(self.content)
def render(self):
"""
Try to load content (from cache or by rendering the template).
If it fails, return an empty string or raise the exception if it's a
TemplateSyntaxError.
With this, we can no parse and render the content included in the
{% nocache %} blocks, but only if we have have this tag and if we don't
have `__partial__` to True in the context (in this case we simple
return the html with the {% nocache %} block not parsed.
"""
try:
self.load_content()
except template.TemplateSyntaxError:
raise
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when rendering template fragment')
return ''
if self.partial or self.RAW_TOKEN_START not in self.content:
return self.content
return self.render_nocache()
@staticmethod
def get_all_tags_and_filters_by_function():
"""
Return a dict with all the template tags (in the `tags` entry) and filters (in the
`filters` entry) that are available.
Both entries are a dict with the function as key, and a tuple with (library name, function
name) as value.
This is cached after the first call.
"""
libraries = get_template_libraries()
force = False
# We'll force the update of the cache if new libraries where added
if hasattr(CacheTag.get_all_tags_and_filters_by_function, '_len_libraries'):
if len(libraries) != CacheTag.get_all_tags_and_filters_by_function._len_libraries:
force = True
if force or not hasattr(CacheTag.get_all_tags_and_filters_by_function, '_cache'):
CacheTag.get_all_tags_and_filters_by_function._len_libraries = len(libraries)
available_tags = {}
available_filters = {}
for lib_name, lib in libraries.items():
available_tags.update(
(function, (lib_name, tag_name))
for tag_name, function
in lib.tags.items()
)
available_filters.update(
(function, (lib_name, filter_name))
for filter_name, function
in lib.filters.items()
)
CacheTag.get_all_tags_and_filters_by_function._cache = {
'tags': available_tags,
'filters': available_filters
}
return CacheTag.get_all_tags_and_filters_by_function._cache
@classmethod
def get_templatetag_module(cls):
"""
Return the templatetags module name for which the current class is used.
It's used to render the nocache blocks by loading the correct module
"""
if cls not in CacheTag._templatetags_modules:
# find the library including the main templatetag of the current class
all_tags = cls.get_all_tags_and_filters_by_function()['tags']
CacheTag._templatetags_modules[cls] = all_tags[CacheTag._templatetags[cls]['cache']][0]
return CacheTag._templatetags_modules[cls]
def render_nocache(self):
"""
Render the `nocache` blocks of the content and return the whole
html
"""
tmpl = template.Template(''.join([
# start by loading the cache library
template.BLOCK_TAG_START,
'load %s' % self.get_templatetag_module(),
template.BLOCK_TAG_END,
# and surround the cached template by "raw" tags
self.RAW_TOKEN_START,
self.content,
self.RAW_TOKEN_END,
]))
return tmpl.render(self.context)
@classmethod
def get_template_node_arguments(cls, tokens):
"""
Return the arguments taken from the templatetag that will be used to the
Node class.
Take a list of all tokens and return a list of real tokens. Here
should be done some validations (number of tokens...) and eventually
some parsing...
"""
if len(tokens) < 3:
raise template.TemplateSyntaxError(
"'%r' tag requires at least 2 arguments." % tokens[0])
return tokens[1], tokens[2], tokens[3:]
@classmethod
def register(cls, library_register, nodename='cache', nocache_nodename='nocache'):
"""
Register all needed templatetags, with these parameters :
* library_register : the `register` object (result of
`template.Library()`) in your templatetag module
* nodename : the node to use for the cache templatetag (the default
is "cache")
* nocache_nodename : the node to use for the nocache templatetag
"""
if cls in CacheTag._templatetags:
raise RuntimeError('The adv-cache-tag class %s is already registered' % cls)
CacheTag._templatetags[cls] = {}
def templatetag_cache(parser, token):
"""
Return a new Node object for the main cache templatetag
"""
nodelist = parser.parse(('end%s' % nodename,))
parser.delete_first_token()
args = cls.get_template_node_arguments(token.contents.split())
return cls.Node(nodename, nodelist, *args)
library_register.tag(nodename, templatetag_cache)
CacheTag._templatetags[cls]['cache'] = templatetag_cache
def templatetag_raw(parser, token):
"""
Return a TextNode with all html not parsed, used for templatetags
that need to not be parsed : the `nocache` one and the `RAW` one,
used to surround cached html (to be not parsed again)
Based on http://www.holovaty.com/writing/django-two-phased-rendering/
"""
# Whatever is between {% nocache %} and {% endnocache %} will be preserved as
# raw, un-rendered template code.
text = []
parse_until = 'end%s' % token.contents
tag_mapping = {
TOKEN_TEXT: ('', ''),
TOKEN_VAR: ('{{', '}}'),
TOKEN_BLOCK: ('{%', '%}'),
TOKEN_COMMENT: ('{#', '#}'),
}
# By the time this template tag is called, the template system has already
# lexed the template into tokens. Here, we loop over the tokens until
# {% endraw %} and parse them to TextNodes. We have to add the start and
# end bits (e.g. "{{" for variables) because those have already been
# stripped off in a previous part of the template-parsing process.
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == parse_until:
return template.TextNode(''.join(text))
start, end = tag_mapping[token.token_type]
text.append('%s%s%s' % (start, token.contents, end))
parser.unclosed_block_tag(parse_until)
library_register.tag(cls.RAW_TOKEN, templatetag_raw)
CacheTag._templatetags[cls]['raw'] = templatetag_raw
def templatetag_nocache(parser, token):
"""
Return a TextNode with raw html from the `nocache` templatetag,
and surround it with `endRAW` and `RAW` (precisely
`cls.RAW_TOKEN_END` and `cls.RAW_TOKEN_START`).
So for
{% nocache %}foo{% endnocache %}
we get
{% endRAW... %}foo{% RAW... %}
When the main cache templatetag content will be loaded from cache,
it will be surrounded by the same templatetags, reversed.
So if at first we had
{% cache %}bar{% nocache %}foo{% endnocache %}baz{% endcache %}
The cached version will be
bar{% endRAW... %}foo{% RAW... %}baz
And the final html to be rendered will be
{% RAW... %}bar{% endRAW... %}foo{% RAW... %}baz{% endRAW... %}
And the html within `RAW` and `endRAW` will not be parsed, as wanted
"""
# We'll load in the no-cache part all template tags and filters loaded in the main
# template, to be able to use it when the no-cache will be rendered
all_tags_and_filters = cls.get_all_tags_and_filters_by_function()
available_tags = all_tags_and_filters['tags']
available_filters = all_tags_and_filters['filters']
needed = {}
current_module = cls.get_templatetag_module()
for function in parser.tags.values():
if function in available_tags:
lib, name = available_tags[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
for function in parser.filters.values():
if function in available_filters:
lib, name = available_filters[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
load_string = ''.join(
'%sload %s from %s%s' % (
template.BLOCK_TAG_START,
' '.join(names),
lib,
template.BLOCK_TAG_END,
)
for lib, names in needed.items()
)
node = templatetag_raw(parser, token)
node.s = cls.RAW_TOKEN_END + load_string + node.s + cls.RAW_TOKEN_START
return node
library_register.tag(nocache_nodename, templatetag_nocache)
CacheTag._templatetags['nocache'] = templatetag_nocache
|
twidi/django-adv-cache-tag
|
adv_cache_tag/tag.py
|
CacheTag.render_node
|
python
|
def render_node(self):
self.content = self.node.nodelist.render(self.context)
|
Render the template and save the generated content
|
train
|
https://github.com/twidi/django-adv-cache-tag/blob/811f8db4dac73667c7d2fe0ea97a24969593eb8a/adv_cache_tag/tag.py#L421-L425
| null |
class CacheTag(object, metaclass=CacheTagMetaClass):
"""
The main class of `django-adv-cache-tag` which does all the work.
To change its behaviour, simply change one or more of these settings
(see in the `Meta` class for details) :
* ADV_CACHE_VERSIONING
* ADV_CACHE_COMPRESS
* ADV_CACHE_COMPRESS_SPACES
* ADV_CACHE_INCLUDE_PK
* ADV_CACHE_BACKEND
* ADV_CACHE_VERSION
* ADV_CACHE_RESOLVE_NAME
Or inherit from this class and don't forget to register your tag :
from adv_cache_tag.tag import CacheTag
register = template.Library()
class MyCacheTag(CacheTag):
# change something
MyCacheTag.register(register, 'my_cache')
By inheriting you can change many things as CacheTag implements a lot of
small methods
"""
# Will change if the algorithm changes
INTERNAL_VERSION = '1'
# Used to separate internal version, template version, and the content
VERSION_SEPARATOR = '::'
# Regex used to reduce spaces/blanks (many spaces into one)
RE_SPACELESS = re.compile(r'\s\s+')
# generate a token for this site, based on the secret_key
RAW_TOKEN = 'RAW_' + hashlib.sha1(
b'RAW_TOKEN_SALT1' + force_bytes(hashlib.sha1(
b'RAW_TOKEN_SALT2' + force_bytes(settings.SECRET_KEY)
).hexdigest())
).hexdigest()
# tokens to use around the already parsed parts of the cached template
RAW_TOKEN_START = template.BLOCK_TAG_START + RAW_TOKEN + template.BLOCK_TAG_END
RAW_TOKEN_END = template.BLOCK_TAG_START + 'end' + RAW_TOKEN + template.BLOCK_TAG_END
# internal use only: keep reference to templatetags functions
_templatetags = {}
# internal use only: name of the templatetags module to load for this class and subclasses
_templatetags_modules = {}
options = None
Node = Node
class Meta:
"""
Options of this class. Accessible via cls.options or self.options.
To force (and/or add) options in your own class, simply redefine a
`Meta` class in your own main cache class with updated/add values
"""
# If versioning is activated (internal versioning is always on)
versioning = getattr(settings, 'ADV_CACHE_VERSIONING', False)
# If the content will be compressed before caching
compress = getattr(settings, 'ADV_CACHE_COMPRESS', False)
# If many spaces/blanks will be converted into one
compress_spaces = getattr(settings, 'ADV_CACHE_COMPRESS_SPACES', False)
# If a "pk" (you can pass what you want) will be added to the cache key
include_pk = getattr(settings, 'ADV_CACHE_INCLUDE_PK', False)
# The cache backend to use (or use the "default" one)
cache_backend = getattr(settings, 'ADV_CACHE_BACKEND', 'default')
# Part of the INTERNAL_VERSION configurable via settings
internal_version = getattr(settings, 'ADV_CACHE_VERSION', '')
# If the fragment name should be resolved or taken as is
resolve_fragment = getattr(settings, 'ADV_CACHE_RESOLVE_NAME', False)
# Use a metaclass to use the right class in the Node class, and assign Meta to options
def __init__(self, node, context):
"""
Constructor of the Cache class:
* preparing fields to be used later,
* prepare the templatetag parameters
* create the cache key
"""
super(CacheTag, self).__init__()
# the actual Node object
self.node = node
# the context used for the rendering
self.context = context
# indicate that we force regenerating the cache, even if it exists
self.regenerate = bool(self.context.get('__regenerate__', False))
# indicate if we only want html without parsing the nocache parts
self.partial = bool(self.context.get('__partial__', False))
# the content of the template, will be used through the whole process
self.content = ''
# the version used in the cached templatetag
self.content_version = None
# Final "INTERNAL_VERSION"
if self.options.internal_version:
self.INTERNAL_VERSION = b'%s|%s' % (self.__class__.INTERNAL_VERSION,
self.options.internal_version)
else:
self.INTERNAL_VERSION = force_bytes(self.__class__.INTERNAL_VERSION)
self.VERSION_SEPARATOR = force_bytes(self.__class__.VERSION_SEPARATOR)
# prepare all parameters passed to the templatetag
self.expire_time = None
self.version = None
self.prepare_params()
# get the cache and cache key
self.cache = self.get_cache_object()
self.cache_key = self.get_cache_key()
def prepare_params(self):
"""
Prepare the parameters passed to the templatetag
"""
if self.options.resolve_fragment:
self.fragment_name = self.node.fragment_name.resolve(self.context)
else:
self.fragment_name = str(self.node.fragment_name)
# Remove quotes that surround the name
for char in '\'\"':
if self.fragment_name.startswith(char) or self.fragment_name.endswith(char):
if self.fragment_name.startswith(char) and self.fragment_name.endswith(char):
self.fragment_name = self.fragment_name[1:-1]
break
else:
raise ValueError('Number of quotes around the fragment name is incoherent')
self.expire_time = self.get_expire_time()
if self.options.versioning:
self.version = force_bytes(self.get_version())
self.vary_on = [template.Variable(var).resolve(self.context) for var in self.node.vary_on]
def get_expire_time(self):
"""
Return the expire time passed to the templatetag.
Must be None or an integer.
"""
try:
expire_time = self.node.expire_time.resolve(self.context)
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.expire_time.var))
try:
if expire_time is not None:
expire_time = str(expire_time)
if not expire_time.isdigit():
raise TypeError
expire_time = int(expire_time)
except (ValueError, TypeError):
raise template.TemplateSyntaxError(
'"%s" tag got a non-integer (or None) timeout value: %r' % (
self.node.nodename, expire_time
)
)
return expire_time
def get_version(self):
"""
Return the stringified version passed to the templatetag.
"""
if not self.node.version:
return None
try:
version = smart_str('%s' % self.node.version.resolve(self.context))
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.version.var))
return '%s' % version
def hash_args(self):
"""
Take all the arguments passed after the fragment name and return a
hashed version which will be used in the cache key
"""
return hashlib.md5(force_bytes(':'.join([urlquote(force_bytes(var)) for var in self.vary_on]))).hexdigest()
def get_pk(self):
"""
Return the pk to use in the cache key. It's the first version of the
templatetag arguments after the fragment name
"""
return self.vary_on[0]
def get_base_cache_key(self):
"""
Return a string with format placeholder used as a source to compute the
final cache key.
Placeholders are :
* %(nodename)s : the name of the templatetag
* %(name)s : the fragment name passed to the templatetag
* %(pk)s : the return of the `get_pk` method, passed only if `include_pk` is True
* %(hash)s : the return of the `hash_args` method
"""
if self.options.include_pk:
return 'template.%(nodename)s.%(name)s.%(pk)s.%(hash)s'
else:
return 'template.%(nodename)s.%(name)s.%(hash)s'
def get_cache_key_args(self):
"""
Return the arguments to be passed to the base cache key returned by `get_base_cache_key`.
"""
cache_key_args = dict(
nodename=self.node.nodename,
name=self.fragment_name,
hash=self.hash_args(),
)
if self.options.include_pk:
cache_key_args['pk'] = self.get_pk()
return cache_key_args
def get_cache_key(self):
"""
Compute and return the final cache key, using return values of
`get_base_cache_key` and `get_cache_key_args`.
"""
return self.get_base_cache_key() % self.get_cache_key_args()
def get_cache_object(self):
"""
Return the cache object to be used to set and get the values in cache.
By default it's the default cache defined by django, but it can be
every object with a `get` and a `set` method (or not, if `cache_get`
and `cache_set` methods are overridden)
"""
return get_cache(self.node.cache_backend or self.options.cache_backend)
def cache_get(self):
"""
Get content from the cache
"""
return self.cache.get(self.cache_key)
def cache_set(self, to_cache):
"""
Set content into the cache
"""
self.cache.set(self.cache_key, to_cache, self.expire_time)
def join_content_version(self, to_cache):
"""
Add the version(s) to the content to cache : internal version at first
and then the template version if versioning is activated.
Each version, and the content, are separated with `VERSION_SEPARATOR`.
This method is called after the encoding (if "compress" or
"compress_spaces" options are on)
"""
parts = [self.INTERNAL_VERSION]
if self.options.versioning:
parts.append(force_bytes(self.version))
parts.append(force_bytes(to_cache))
return self.VERSION_SEPARATOR.join(parts)
def split_content_version(self):
"""
Remove and return the version(s) from the cached content. First the
internal version, and if versioning is activated, the template one.
And finally save the content, but only if all versions match.
The content saved is the encoded one (if "compress" or
"compress_spaces" options are on). By doing so, we avoid decoding if
the versions didn't match, to save some cpu cycles.
"""
try:
nb_parts = 2
if self.options.versioning:
nb_parts = 3
parts = self.content.split(self.VERSION_SEPARATOR, nb_parts - 1)
assert len(parts) == nb_parts
self.content_internal_version = parts[0]
if self.options.versioning:
self.content_version = parts[1]
self.content = parts[-1]
except Exception:
self.content = None
def decode_content(self):
"""
Decode (decompress...) the content got from the cache, to the final
html
"""
self.content = pickle.loads(zlib.decompress(self.content))
def encode_content(self):
"""
Encode (compress...) the html to the data to be cached
"""
return zlib.compress(pickle.dumps(self.content))
def create_content(self):
"""
Render the template, apply options on it, and save it to the cache.
"""
self.render_node()
if self.options.compress_spaces:
self.content = self.RE_SPACELESS.sub(' ', self.content)
if self.options.compress:
to_cache = self.encode_content()
else:
to_cache = self.content
to_cache = self.join_content_version(to_cache)
try:
self.cache_set(to_cache)
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when saving the cached template fragment')
def load_content(self):
"""
It's the main method of the class.
Try to load the template from cache, get the versions and decode the
content.
If something was wrong during this process (or if we had a
`__regenerate__` value to True in the context), create new content and
save it in cache.
"""
self.content = None
if not self.regenerate:
try:
self.content = self.cache_get()
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when getting the cached template fragment')
try:
assert self.content
self.split_content_version()
assert self.content
if self.content_internal_version != self.INTERNAL_VERSION or (
self.options.versioning and self.content_version != self.version):
self.content = None
assert self.content
if self.options.compress:
self.decode_content()
except Exception:
self.create_content()
self.content = smart_str(self.content)
def render(self):
"""
Try to load content (from cache or by rendering the template).
If it fails, return an empty string or raise the exception if it's a
TemplateSyntaxError.
With this, we can no parse and render the content included in the
{% nocache %} blocks, but only if we have have this tag and if we don't
have `__partial__` to True in the context (in this case we simple
return the html with the {% nocache %} block not parsed.
"""
try:
self.load_content()
except template.TemplateSyntaxError:
raise
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when rendering template fragment')
return ''
if self.partial or self.RAW_TOKEN_START not in self.content:
return self.content
return self.render_nocache()
@staticmethod
def get_all_tags_and_filters_by_function():
"""
Return a dict with all the template tags (in the `tags` entry) and filters (in the
`filters` entry) that are available.
Both entries are a dict with the function as key, and a tuple with (library name, function
name) as value.
This is cached after the first call.
"""
libraries = get_template_libraries()
force = False
# We'll force the update of the cache if new libraries where added
if hasattr(CacheTag.get_all_tags_and_filters_by_function, '_len_libraries'):
if len(libraries) != CacheTag.get_all_tags_and_filters_by_function._len_libraries:
force = True
if force or not hasattr(CacheTag.get_all_tags_and_filters_by_function, '_cache'):
CacheTag.get_all_tags_and_filters_by_function._len_libraries = len(libraries)
available_tags = {}
available_filters = {}
for lib_name, lib in libraries.items():
available_tags.update(
(function, (lib_name, tag_name))
for tag_name, function
in lib.tags.items()
)
available_filters.update(
(function, (lib_name, filter_name))
for filter_name, function
in lib.filters.items()
)
CacheTag.get_all_tags_and_filters_by_function._cache = {
'tags': available_tags,
'filters': available_filters
}
return CacheTag.get_all_tags_and_filters_by_function._cache
@classmethod
def get_templatetag_module(cls):
"""
Return the templatetags module name for which the current class is used.
It's used to render the nocache blocks by loading the correct module
"""
if cls not in CacheTag._templatetags_modules:
# find the library including the main templatetag of the current class
all_tags = cls.get_all_tags_and_filters_by_function()['tags']
CacheTag._templatetags_modules[cls] = all_tags[CacheTag._templatetags[cls]['cache']][0]
return CacheTag._templatetags_modules[cls]
def render_nocache(self):
"""
Render the `nocache` blocks of the content and return the whole
html
"""
tmpl = template.Template(''.join([
# start by loading the cache library
template.BLOCK_TAG_START,
'load %s' % self.get_templatetag_module(),
template.BLOCK_TAG_END,
# and surround the cached template by "raw" tags
self.RAW_TOKEN_START,
self.content,
self.RAW_TOKEN_END,
]))
return tmpl.render(self.context)
@classmethod
def get_template_node_arguments(cls, tokens):
"""
Return the arguments taken from the templatetag that will be used to the
Node class.
Take a list of all tokens and return a list of real tokens. Here
should be done some validations (number of tokens...) and eventually
some parsing...
"""
if len(tokens) < 3:
raise template.TemplateSyntaxError(
"'%r' tag requires at least 2 arguments." % tokens[0])
return tokens[1], tokens[2], tokens[3:]
@classmethod
def register(cls, library_register, nodename='cache', nocache_nodename='nocache'):
"""
Register all needed templatetags, with these parameters :
* library_register : the `register` object (result of
`template.Library()`) in your templatetag module
* nodename : the node to use for the cache templatetag (the default
is "cache")
* nocache_nodename : the node to use for the nocache templatetag
"""
if cls in CacheTag._templatetags:
raise RuntimeError('The adv-cache-tag class %s is already registered' % cls)
CacheTag._templatetags[cls] = {}
def templatetag_cache(parser, token):
"""
Return a new Node object for the main cache templatetag
"""
nodelist = parser.parse(('end%s' % nodename,))
parser.delete_first_token()
args = cls.get_template_node_arguments(token.contents.split())
return cls.Node(nodename, nodelist, *args)
library_register.tag(nodename, templatetag_cache)
CacheTag._templatetags[cls]['cache'] = templatetag_cache
def templatetag_raw(parser, token):
"""
Return a TextNode with all html not parsed, used for templatetags
that need to not be parsed : the `nocache` one and the `RAW` one,
used to surround cached html (to be not parsed again)
Based on http://www.holovaty.com/writing/django-two-phased-rendering/
"""
# Whatever is between {% nocache %} and {% endnocache %} will be preserved as
# raw, un-rendered template code.
text = []
parse_until = 'end%s' % token.contents
tag_mapping = {
TOKEN_TEXT: ('', ''),
TOKEN_VAR: ('{{', '}}'),
TOKEN_BLOCK: ('{%', '%}'),
TOKEN_COMMENT: ('{#', '#}'),
}
# By the time this template tag is called, the template system has already
# lexed the template into tokens. Here, we loop over the tokens until
# {% endraw %} and parse them to TextNodes. We have to add the start and
# end bits (e.g. "{{" for variables) because those have already been
# stripped off in a previous part of the template-parsing process.
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == parse_until:
return template.TextNode(''.join(text))
start, end = tag_mapping[token.token_type]
text.append('%s%s%s' % (start, token.contents, end))
parser.unclosed_block_tag(parse_until)
library_register.tag(cls.RAW_TOKEN, templatetag_raw)
CacheTag._templatetags[cls]['raw'] = templatetag_raw
def templatetag_nocache(parser, token):
"""
Return a TextNode with raw html from the `nocache` templatetag,
and surround it with `endRAW` and `RAW` (precisely
`cls.RAW_TOKEN_END` and `cls.RAW_TOKEN_START`).
So for
{% nocache %}foo{% endnocache %}
we get
{% endRAW... %}foo{% RAW... %}
When the main cache templatetag content will be loaded from cache,
it will be surrounded by the same templatetags, reversed.
So if at first we had
{% cache %}bar{% nocache %}foo{% endnocache %}baz{% endcache %}
The cached version will be
bar{% endRAW... %}foo{% RAW... %}baz
And the final html to be rendered will be
{% RAW... %}bar{% endRAW... %}foo{% RAW... %}baz{% endRAW... %}
And the html within `RAW` and `endRAW` will not be parsed, as wanted
"""
# We'll load in the no-cache part all template tags and filters loaded in the main
# template, to be able to use it when the no-cache will be rendered
all_tags_and_filters = cls.get_all_tags_and_filters_by_function()
available_tags = all_tags_and_filters['tags']
available_filters = all_tags_and_filters['filters']
needed = {}
current_module = cls.get_templatetag_module()
for function in parser.tags.values():
if function in available_tags:
lib, name = available_tags[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
for function in parser.filters.values():
if function in available_filters:
lib, name = available_filters[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
load_string = ''.join(
'%sload %s from %s%s' % (
template.BLOCK_TAG_START,
' '.join(names),
lib,
template.BLOCK_TAG_END,
)
for lib, names in needed.items()
)
node = templatetag_raw(parser, token)
node.s = cls.RAW_TOKEN_END + load_string + node.s + cls.RAW_TOKEN_START
return node
library_register.tag(nocache_nodename, templatetag_nocache)
CacheTag._templatetags['nocache'] = templatetag_nocache
|
twidi/django-adv-cache-tag
|
adv_cache_tag/tag.py
|
CacheTag.create_content
|
python
|
def create_content(self):
self.render_node()
if self.options.compress_spaces:
self.content = self.RE_SPACELESS.sub(' ', self.content)
if self.options.compress:
to_cache = self.encode_content()
else:
to_cache = self.content
to_cache = self.join_content_version(to_cache)
try:
self.cache_set(to_cache)
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when saving the cached template fragment')
|
Render the template, apply options on it, and save it to the cache.
|
train
|
https://github.com/twidi/django-adv-cache-tag/blob/811f8db4dac73667c7d2fe0ea97a24969593eb8a/adv_cache_tag/tag.py#L427-L448
|
[
"def is_template_debug_activated():\n if django_version < (1, 8):\n return settings.TEMPLATE_DEBUG\n\n # not so simple now, it's an option of a template backend\n for template_settings in settings.TEMPLATES:\n if template_settings['BACKEND'] == 'django.template.backends.django.DjangoTemplates':\n return bool(template_settings.get('OPTIONS', {}).get('debug', False))\n\n return False\n",
"def cache_set(self, to_cache):\n \"\"\"\n Set content into the cache\n \"\"\"\n self.cache.set(self.cache_key, to_cache, self.expire_time)\n",
"def join_content_version(self, to_cache):\n \"\"\"\n Add the version(s) to the content to cache : internal version at first\n and then the template version if versioning is activated.\n Each version, and the content, are separated with `VERSION_SEPARATOR`.\n This method is called after the encoding (if \"compress\" or\n \"compress_spaces\" options are on)\n \"\"\"\n parts = [self.INTERNAL_VERSION]\n if self.options.versioning:\n parts.append(force_bytes(self.version))\n parts.append(force_bytes(to_cache))\n\n return self.VERSION_SEPARATOR.join(parts)\n",
"def encode_content(self):\n \"\"\"\n Encode (compress...) the html to the data to be cached\n \"\"\"\n return zlib.compress(pickle.dumps(self.content))\n",
"def render_node(self):\n \"\"\"\n Render the template and save the generated content\n \"\"\"\n self.content = self.node.nodelist.render(self.context)\n"
] |
class CacheTag(object, metaclass=CacheTagMetaClass):
"""
The main class of `django-adv-cache-tag` which does all the work.
To change its behaviour, simply change one or more of these settings
(see in the `Meta` class for details) :
* ADV_CACHE_VERSIONING
* ADV_CACHE_COMPRESS
* ADV_CACHE_COMPRESS_SPACES
* ADV_CACHE_INCLUDE_PK
* ADV_CACHE_BACKEND
* ADV_CACHE_VERSION
* ADV_CACHE_RESOLVE_NAME
Or inherit from this class and don't forget to register your tag :
from adv_cache_tag.tag import CacheTag
register = template.Library()
class MyCacheTag(CacheTag):
# change something
MyCacheTag.register(register, 'my_cache')
By inheriting you can change many things as CacheTag implements a lot of
small methods
"""
# Will change if the algorithm changes
INTERNAL_VERSION = '1'
# Used to separate internal version, template version, and the content
VERSION_SEPARATOR = '::'
# Regex used to reduce spaces/blanks (many spaces into one)
RE_SPACELESS = re.compile(r'\s\s+')
# generate a token for this site, based on the secret_key
RAW_TOKEN = 'RAW_' + hashlib.sha1(
b'RAW_TOKEN_SALT1' + force_bytes(hashlib.sha1(
b'RAW_TOKEN_SALT2' + force_bytes(settings.SECRET_KEY)
).hexdigest())
).hexdigest()
# tokens to use around the already parsed parts of the cached template
RAW_TOKEN_START = template.BLOCK_TAG_START + RAW_TOKEN + template.BLOCK_TAG_END
RAW_TOKEN_END = template.BLOCK_TAG_START + 'end' + RAW_TOKEN + template.BLOCK_TAG_END
# internal use only: keep reference to templatetags functions
_templatetags = {}
# internal use only: name of the templatetags module to load for this class and subclasses
_templatetags_modules = {}
options = None
Node = Node
class Meta:
"""
Options of this class. Accessible via cls.options or self.options.
To force (and/or add) options in your own class, simply redefine a
`Meta` class in your own main cache class with updated/add values
"""
# If versioning is activated (internal versioning is always on)
versioning = getattr(settings, 'ADV_CACHE_VERSIONING', False)
# If the content will be compressed before caching
compress = getattr(settings, 'ADV_CACHE_COMPRESS', False)
# If many spaces/blanks will be converted into one
compress_spaces = getattr(settings, 'ADV_CACHE_COMPRESS_SPACES', False)
# If a "pk" (you can pass what you want) will be added to the cache key
include_pk = getattr(settings, 'ADV_CACHE_INCLUDE_PK', False)
# The cache backend to use (or use the "default" one)
cache_backend = getattr(settings, 'ADV_CACHE_BACKEND', 'default')
# Part of the INTERNAL_VERSION configurable via settings
internal_version = getattr(settings, 'ADV_CACHE_VERSION', '')
# If the fragment name should be resolved or taken as is
resolve_fragment = getattr(settings, 'ADV_CACHE_RESOLVE_NAME', False)
# Use a metaclass to use the right class in the Node class, and assign Meta to options
def __init__(self, node, context):
"""
Constructor of the Cache class:
* preparing fields to be used later,
* prepare the templatetag parameters
* create the cache key
"""
super(CacheTag, self).__init__()
# the actual Node object
self.node = node
# the context used for the rendering
self.context = context
# indicate that we force regenerating the cache, even if it exists
self.regenerate = bool(self.context.get('__regenerate__', False))
# indicate if we only want html without parsing the nocache parts
self.partial = bool(self.context.get('__partial__', False))
# the content of the template, will be used through the whole process
self.content = ''
# the version used in the cached templatetag
self.content_version = None
# Final "INTERNAL_VERSION"
if self.options.internal_version:
self.INTERNAL_VERSION = b'%s|%s' % (self.__class__.INTERNAL_VERSION,
self.options.internal_version)
else:
self.INTERNAL_VERSION = force_bytes(self.__class__.INTERNAL_VERSION)
self.VERSION_SEPARATOR = force_bytes(self.__class__.VERSION_SEPARATOR)
# prepare all parameters passed to the templatetag
self.expire_time = None
self.version = None
self.prepare_params()
# get the cache and cache key
self.cache = self.get_cache_object()
self.cache_key = self.get_cache_key()
def prepare_params(self):
"""
Prepare the parameters passed to the templatetag
"""
if self.options.resolve_fragment:
self.fragment_name = self.node.fragment_name.resolve(self.context)
else:
self.fragment_name = str(self.node.fragment_name)
# Remove quotes that surround the name
for char in '\'\"':
if self.fragment_name.startswith(char) or self.fragment_name.endswith(char):
if self.fragment_name.startswith(char) and self.fragment_name.endswith(char):
self.fragment_name = self.fragment_name[1:-1]
break
else:
raise ValueError('Number of quotes around the fragment name is incoherent')
self.expire_time = self.get_expire_time()
if self.options.versioning:
self.version = force_bytes(self.get_version())
self.vary_on = [template.Variable(var).resolve(self.context) for var in self.node.vary_on]
def get_expire_time(self):
"""
Return the expire time passed to the templatetag.
Must be None or an integer.
"""
try:
expire_time = self.node.expire_time.resolve(self.context)
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.expire_time.var))
try:
if expire_time is not None:
expire_time = str(expire_time)
if not expire_time.isdigit():
raise TypeError
expire_time = int(expire_time)
except (ValueError, TypeError):
raise template.TemplateSyntaxError(
'"%s" tag got a non-integer (or None) timeout value: %r' % (
self.node.nodename, expire_time
)
)
return expire_time
def get_version(self):
"""
Return the stringified version passed to the templatetag.
"""
if not self.node.version:
return None
try:
version = smart_str('%s' % self.node.version.resolve(self.context))
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.version.var))
return '%s' % version
def hash_args(self):
"""
Take all the arguments passed after the fragment name and return a
hashed version which will be used in the cache key
"""
return hashlib.md5(force_bytes(':'.join([urlquote(force_bytes(var)) for var in self.vary_on]))).hexdigest()
def get_pk(self):
"""
Return the pk to use in the cache key. It's the first version of the
templatetag arguments after the fragment name
"""
return self.vary_on[0]
def get_base_cache_key(self):
"""
Return a string with format placeholder used as a source to compute the
final cache key.
Placeholders are :
* %(nodename)s : the name of the templatetag
* %(name)s : the fragment name passed to the templatetag
* %(pk)s : the return of the `get_pk` method, passed only if `include_pk` is True
* %(hash)s : the return of the `hash_args` method
"""
if self.options.include_pk:
return 'template.%(nodename)s.%(name)s.%(pk)s.%(hash)s'
else:
return 'template.%(nodename)s.%(name)s.%(hash)s'
def get_cache_key_args(self):
"""
Return the arguments to be passed to the base cache key returned by `get_base_cache_key`.
"""
cache_key_args = dict(
nodename=self.node.nodename,
name=self.fragment_name,
hash=self.hash_args(),
)
if self.options.include_pk:
cache_key_args['pk'] = self.get_pk()
return cache_key_args
def get_cache_key(self):
"""
Compute and return the final cache key, using return values of
`get_base_cache_key` and `get_cache_key_args`.
"""
return self.get_base_cache_key() % self.get_cache_key_args()
def get_cache_object(self):
"""
Return the cache object to be used to set and get the values in cache.
By default it's the default cache defined by django, but it can be
every object with a `get` and a `set` method (or not, if `cache_get`
and `cache_set` methods are overridden)
"""
return get_cache(self.node.cache_backend or self.options.cache_backend)
def cache_get(self):
"""
Get content from the cache
"""
return self.cache.get(self.cache_key)
def cache_set(self, to_cache):
"""
Set content into the cache
"""
self.cache.set(self.cache_key, to_cache, self.expire_time)
def join_content_version(self, to_cache):
"""
Add the version(s) to the content to cache : internal version at first
and then the template version if versioning is activated.
Each version, and the content, are separated with `VERSION_SEPARATOR`.
This method is called after the encoding (if "compress" or
"compress_spaces" options are on)
"""
parts = [self.INTERNAL_VERSION]
if self.options.versioning:
parts.append(force_bytes(self.version))
parts.append(force_bytes(to_cache))
return self.VERSION_SEPARATOR.join(parts)
def split_content_version(self):
"""
Remove and return the version(s) from the cached content. First the
internal version, and if versioning is activated, the template one.
And finally save the content, but only if all versions match.
The content saved is the encoded one (if "compress" or
"compress_spaces" options are on). By doing so, we avoid decoding if
the versions didn't match, to save some cpu cycles.
"""
try:
nb_parts = 2
if self.options.versioning:
nb_parts = 3
parts = self.content.split(self.VERSION_SEPARATOR, nb_parts - 1)
assert len(parts) == nb_parts
self.content_internal_version = parts[0]
if self.options.versioning:
self.content_version = parts[1]
self.content = parts[-1]
except Exception:
self.content = None
def decode_content(self):
"""
Decode (decompress...) the content got from the cache, to the final
html
"""
self.content = pickle.loads(zlib.decompress(self.content))
def encode_content(self):
"""
Encode (compress...) the html to the data to be cached
"""
return zlib.compress(pickle.dumps(self.content))
def render_node(self):
"""
Render the template and save the generated content
"""
self.content = self.node.nodelist.render(self.context)
def load_content(self):
"""
It's the main method of the class.
Try to load the template from cache, get the versions and decode the
content.
If something was wrong during this process (or if we had a
`__regenerate__` value to True in the context), create new content and
save it in cache.
"""
self.content = None
if not self.regenerate:
try:
self.content = self.cache_get()
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when getting the cached template fragment')
try:
assert self.content
self.split_content_version()
assert self.content
if self.content_internal_version != self.INTERNAL_VERSION or (
self.options.versioning and self.content_version != self.version):
self.content = None
assert self.content
if self.options.compress:
self.decode_content()
except Exception:
self.create_content()
self.content = smart_str(self.content)
def render(self):
"""
Try to load content (from cache or by rendering the template).
If it fails, return an empty string or raise the exception if it's a
TemplateSyntaxError.
With this, we can no parse and render the content included in the
{% nocache %} blocks, but only if we have have this tag and if we don't
have `__partial__` to True in the context (in this case we simple
return the html with the {% nocache %} block not parsed.
"""
try:
self.load_content()
except template.TemplateSyntaxError:
raise
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when rendering template fragment')
return ''
if self.partial or self.RAW_TOKEN_START not in self.content:
return self.content
return self.render_nocache()
@staticmethod
def get_all_tags_and_filters_by_function():
"""
Return a dict with all the template tags (in the `tags` entry) and filters (in the
`filters` entry) that are available.
Both entries are a dict with the function as key, and a tuple with (library name, function
name) as value.
This is cached after the first call.
"""
libraries = get_template_libraries()
force = False
# We'll force the update of the cache if new libraries where added
if hasattr(CacheTag.get_all_tags_and_filters_by_function, '_len_libraries'):
if len(libraries) != CacheTag.get_all_tags_and_filters_by_function._len_libraries:
force = True
if force or not hasattr(CacheTag.get_all_tags_and_filters_by_function, '_cache'):
CacheTag.get_all_tags_and_filters_by_function._len_libraries = len(libraries)
available_tags = {}
available_filters = {}
for lib_name, lib in libraries.items():
available_tags.update(
(function, (lib_name, tag_name))
for tag_name, function
in lib.tags.items()
)
available_filters.update(
(function, (lib_name, filter_name))
for filter_name, function
in lib.filters.items()
)
CacheTag.get_all_tags_and_filters_by_function._cache = {
'tags': available_tags,
'filters': available_filters
}
return CacheTag.get_all_tags_and_filters_by_function._cache
@classmethod
def get_templatetag_module(cls):
"""
Return the templatetags module name for which the current class is used.
It's used to render the nocache blocks by loading the correct module
"""
if cls not in CacheTag._templatetags_modules:
# find the library including the main templatetag of the current class
all_tags = cls.get_all_tags_and_filters_by_function()['tags']
CacheTag._templatetags_modules[cls] = all_tags[CacheTag._templatetags[cls]['cache']][0]
return CacheTag._templatetags_modules[cls]
def render_nocache(self):
"""
Render the `nocache` blocks of the content and return the whole
html
"""
tmpl = template.Template(''.join([
# start by loading the cache library
template.BLOCK_TAG_START,
'load %s' % self.get_templatetag_module(),
template.BLOCK_TAG_END,
# and surround the cached template by "raw" tags
self.RAW_TOKEN_START,
self.content,
self.RAW_TOKEN_END,
]))
return tmpl.render(self.context)
@classmethod
def get_template_node_arguments(cls, tokens):
"""
Return the arguments taken from the templatetag that will be used to the
Node class.
Take a list of all tokens and return a list of real tokens. Here
should be done some validations (number of tokens...) and eventually
some parsing...
"""
if len(tokens) < 3:
raise template.TemplateSyntaxError(
"'%r' tag requires at least 2 arguments." % tokens[0])
return tokens[1], tokens[2], tokens[3:]
@classmethod
def register(cls, library_register, nodename='cache', nocache_nodename='nocache'):
"""
Register all needed templatetags, with these parameters :
* library_register : the `register` object (result of
`template.Library()`) in your templatetag module
* nodename : the node to use for the cache templatetag (the default
is "cache")
* nocache_nodename : the node to use for the nocache templatetag
"""
if cls in CacheTag._templatetags:
raise RuntimeError('The adv-cache-tag class %s is already registered' % cls)
CacheTag._templatetags[cls] = {}
def templatetag_cache(parser, token):
"""
Return a new Node object for the main cache templatetag
"""
nodelist = parser.parse(('end%s' % nodename,))
parser.delete_first_token()
args = cls.get_template_node_arguments(token.contents.split())
return cls.Node(nodename, nodelist, *args)
library_register.tag(nodename, templatetag_cache)
CacheTag._templatetags[cls]['cache'] = templatetag_cache
def templatetag_raw(parser, token):
"""
Return a TextNode with all html not parsed, used for templatetags
that need to not be parsed : the `nocache` one and the `RAW` one,
used to surround cached html (to be not parsed again)
Based on http://www.holovaty.com/writing/django-two-phased-rendering/
"""
# Whatever is between {% nocache %} and {% endnocache %} will be preserved as
# raw, un-rendered template code.
text = []
parse_until = 'end%s' % token.contents
tag_mapping = {
TOKEN_TEXT: ('', ''),
TOKEN_VAR: ('{{', '}}'),
TOKEN_BLOCK: ('{%', '%}'),
TOKEN_COMMENT: ('{#', '#}'),
}
# By the time this template tag is called, the template system has already
# lexed the template into tokens. Here, we loop over the tokens until
# {% endraw %} and parse them to TextNodes. We have to add the start and
# end bits (e.g. "{{" for variables) because those have already been
# stripped off in a previous part of the template-parsing process.
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == parse_until:
return template.TextNode(''.join(text))
start, end = tag_mapping[token.token_type]
text.append('%s%s%s' % (start, token.contents, end))
parser.unclosed_block_tag(parse_until)
library_register.tag(cls.RAW_TOKEN, templatetag_raw)
CacheTag._templatetags[cls]['raw'] = templatetag_raw
def templatetag_nocache(parser, token):
"""
Return a TextNode with raw html from the `nocache` templatetag,
and surround it with `endRAW` and `RAW` (precisely
`cls.RAW_TOKEN_END` and `cls.RAW_TOKEN_START`).
So for
{% nocache %}foo{% endnocache %}
we get
{% endRAW... %}foo{% RAW... %}
When the main cache templatetag content will be loaded from cache,
it will be surrounded by the same templatetags, reversed.
So if at first we had
{% cache %}bar{% nocache %}foo{% endnocache %}baz{% endcache %}
The cached version will be
bar{% endRAW... %}foo{% RAW... %}baz
And the final html to be rendered will be
{% RAW... %}bar{% endRAW... %}foo{% RAW... %}baz{% endRAW... %}
And the html within `RAW` and `endRAW` will not be parsed, as wanted
"""
# We'll load in the no-cache part all template tags and filters loaded in the main
# template, to be able to use it when the no-cache will be rendered
all_tags_and_filters = cls.get_all_tags_and_filters_by_function()
available_tags = all_tags_and_filters['tags']
available_filters = all_tags_and_filters['filters']
needed = {}
current_module = cls.get_templatetag_module()
for function in parser.tags.values():
if function in available_tags:
lib, name = available_tags[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
for function in parser.filters.values():
if function in available_filters:
lib, name = available_filters[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
load_string = ''.join(
'%sload %s from %s%s' % (
template.BLOCK_TAG_START,
' '.join(names),
lib,
template.BLOCK_TAG_END,
)
for lib, names in needed.items()
)
node = templatetag_raw(parser, token)
node.s = cls.RAW_TOKEN_END + load_string + node.s + cls.RAW_TOKEN_START
return node
library_register.tag(nocache_nodename, templatetag_nocache)
CacheTag._templatetags['nocache'] = templatetag_nocache
|
twidi/django-adv-cache-tag
|
adv_cache_tag/tag.py
|
CacheTag.load_content
|
python
|
def load_content(self):
self.content = None
if not self.regenerate:
try:
self.content = self.cache_get()
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when getting the cached template fragment')
try:
assert self.content
self.split_content_version()
assert self.content
if self.content_internal_version != self.INTERNAL_VERSION or (
self.options.versioning and self.content_version != self.version):
self.content = None
assert self.content
if self.options.compress:
self.decode_content()
except Exception:
self.create_content()
self.content = smart_str(self.content)
|
It's the main method of the class.
Try to load the template from cache, get the versions and decode the
content.
If something was wrong during this process (or if we had a
`__regenerate__` value to True in the context), create new content and
save it in cache.
|
train
|
https://github.com/twidi/django-adv-cache-tag/blob/811f8db4dac73667c7d2fe0ea97a24969593eb8a/adv_cache_tag/tag.py#L450-L490
|
[
"def is_template_debug_activated():\n if django_version < (1, 8):\n return settings.TEMPLATE_DEBUG\n\n # not so simple now, it's an option of a template backend\n for template_settings in settings.TEMPLATES:\n if template_settings['BACKEND'] == 'django.template.backends.django.DjangoTemplates':\n return bool(template_settings.get('OPTIONS', {}).get('debug', False))\n\n return False\n",
"def cache_get(self):\n \"\"\"\n Get content from the cache\n \"\"\"\n return self.cache.get(self.cache_key)\n",
"def split_content_version(self):\n \"\"\"\n Remove and return the version(s) from the cached content. First the\n internal version, and if versioning is activated, the template one.\n And finally save the content, but only if all versions match.\n The content saved is the encoded one (if \"compress\" or\n \"compress_spaces\" options are on). By doing so, we avoid decoding if\n the versions didn't match, to save some cpu cycles.\n \"\"\"\n try:\n nb_parts = 2\n if self.options.versioning:\n nb_parts = 3\n\n parts = self.content.split(self.VERSION_SEPARATOR, nb_parts - 1)\n assert len(parts) == nb_parts\n\n self.content_internal_version = parts[0]\n if self.options.versioning:\n self.content_version = parts[1]\n\n self.content = parts[-1]\n except Exception:\n self.content = None\n",
"def decode_content(self):\n \"\"\"\n Decode (decompress...) the content got from the cache, to the final\n html\n \"\"\"\n self.content = pickle.loads(zlib.decompress(self.content))\n",
"def create_content(self):\n \"\"\"\n Render the template, apply options on it, and save it to the cache.\n \"\"\"\n self.render_node()\n\n if self.options.compress_spaces:\n self.content = self.RE_SPACELESS.sub(' ', self.content)\n\n if self.options.compress:\n to_cache = self.encode_content()\n else:\n to_cache = self.content\n\n to_cache = self.join_content_version(to_cache)\n\n try:\n self.cache_set(to_cache)\n except Exception:\n if is_template_debug_activated():\n raise\n logger.exception('Error when saving the cached template fragment')\n"
] |
class CacheTag(object, metaclass=CacheTagMetaClass):
"""
The main class of `django-adv-cache-tag` which does all the work.
To change its behaviour, simply change one or more of these settings
(see in the `Meta` class for details) :
* ADV_CACHE_VERSIONING
* ADV_CACHE_COMPRESS
* ADV_CACHE_COMPRESS_SPACES
* ADV_CACHE_INCLUDE_PK
* ADV_CACHE_BACKEND
* ADV_CACHE_VERSION
* ADV_CACHE_RESOLVE_NAME
Or inherit from this class and don't forget to register your tag :
from adv_cache_tag.tag import CacheTag
register = template.Library()
class MyCacheTag(CacheTag):
# change something
MyCacheTag.register(register, 'my_cache')
By inheriting you can change many things as CacheTag implements a lot of
small methods
"""
# Will change if the algorithm changes
INTERNAL_VERSION = '1'
# Used to separate internal version, template version, and the content
VERSION_SEPARATOR = '::'
# Regex used to reduce spaces/blanks (many spaces into one)
RE_SPACELESS = re.compile(r'\s\s+')
# generate a token for this site, based on the secret_key
RAW_TOKEN = 'RAW_' + hashlib.sha1(
b'RAW_TOKEN_SALT1' + force_bytes(hashlib.sha1(
b'RAW_TOKEN_SALT2' + force_bytes(settings.SECRET_KEY)
).hexdigest())
).hexdigest()
# tokens to use around the already parsed parts of the cached template
RAW_TOKEN_START = template.BLOCK_TAG_START + RAW_TOKEN + template.BLOCK_TAG_END
RAW_TOKEN_END = template.BLOCK_TAG_START + 'end' + RAW_TOKEN + template.BLOCK_TAG_END
# internal use only: keep reference to templatetags functions
_templatetags = {}
# internal use only: name of the templatetags module to load for this class and subclasses
_templatetags_modules = {}
options = None
Node = Node
class Meta:
"""
Options of this class. Accessible via cls.options or self.options.
To force (and/or add) options in your own class, simply redefine a
`Meta` class in your own main cache class with updated/add values
"""
# If versioning is activated (internal versioning is always on)
versioning = getattr(settings, 'ADV_CACHE_VERSIONING', False)
# If the content will be compressed before caching
compress = getattr(settings, 'ADV_CACHE_COMPRESS', False)
# If many spaces/blanks will be converted into one
compress_spaces = getattr(settings, 'ADV_CACHE_COMPRESS_SPACES', False)
# If a "pk" (you can pass what you want) will be added to the cache key
include_pk = getattr(settings, 'ADV_CACHE_INCLUDE_PK', False)
# The cache backend to use (or use the "default" one)
cache_backend = getattr(settings, 'ADV_CACHE_BACKEND', 'default')
# Part of the INTERNAL_VERSION configurable via settings
internal_version = getattr(settings, 'ADV_CACHE_VERSION', '')
# If the fragment name should be resolved or taken as is
resolve_fragment = getattr(settings, 'ADV_CACHE_RESOLVE_NAME', False)
# Use a metaclass to use the right class in the Node class, and assign Meta to options
def __init__(self, node, context):
"""
Constructor of the Cache class:
* preparing fields to be used later,
* prepare the templatetag parameters
* create the cache key
"""
super(CacheTag, self).__init__()
# the actual Node object
self.node = node
# the context used for the rendering
self.context = context
# indicate that we force regenerating the cache, even if it exists
self.regenerate = bool(self.context.get('__regenerate__', False))
# indicate if we only want html without parsing the nocache parts
self.partial = bool(self.context.get('__partial__', False))
# the content of the template, will be used through the whole process
self.content = ''
# the version used in the cached templatetag
self.content_version = None
# Final "INTERNAL_VERSION"
if self.options.internal_version:
self.INTERNAL_VERSION = b'%s|%s' % (self.__class__.INTERNAL_VERSION,
self.options.internal_version)
else:
self.INTERNAL_VERSION = force_bytes(self.__class__.INTERNAL_VERSION)
self.VERSION_SEPARATOR = force_bytes(self.__class__.VERSION_SEPARATOR)
# prepare all parameters passed to the templatetag
self.expire_time = None
self.version = None
self.prepare_params()
# get the cache and cache key
self.cache = self.get_cache_object()
self.cache_key = self.get_cache_key()
def prepare_params(self):
"""
Prepare the parameters passed to the templatetag
"""
if self.options.resolve_fragment:
self.fragment_name = self.node.fragment_name.resolve(self.context)
else:
self.fragment_name = str(self.node.fragment_name)
# Remove quotes that surround the name
for char in '\'\"':
if self.fragment_name.startswith(char) or self.fragment_name.endswith(char):
if self.fragment_name.startswith(char) and self.fragment_name.endswith(char):
self.fragment_name = self.fragment_name[1:-1]
break
else:
raise ValueError('Number of quotes around the fragment name is incoherent')
self.expire_time = self.get_expire_time()
if self.options.versioning:
self.version = force_bytes(self.get_version())
self.vary_on = [template.Variable(var).resolve(self.context) for var in self.node.vary_on]
def get_expire_time(self):
"""
Return the expire time passed to the templatetag.
Must be None or an integer.
"""
try:
expire_time = self.node.expire_time.resolve(self.context)
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.expire_time.var))
try:
if expire_time is not None:
expire_time = str(expire_time)
if not expire_time.isdigit():
raise TypeError
expire_time = int(expire_time)
except (ValueError, TypeError):
raise template.TemplateSyntaxError(
'"%s" tag got a non-integer (or None) timeout value: %r' % (
self.node.nodename, expire_time
)
)
return expire_time
def get_version(self):
"""
Return the stringified version passed to the templatetag.
"""
if not self.node.version:
return None
try:
version = smart_str('%s' % self.node.version.resolve(self.context))
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.version.var))
return '%s' % version
def hash_args(self):
"""
Take all the arguments passed after the fragment name and return a
hashed version which will be used in the cache key
"""
return hashlib.md5(force_bytes(':'.join([urlquote(force_bytes(var)) for var in self.vary_on]))).hexdigest()
def get_pk(self):
"""
Return the pk to use in the cache key. It's the first version of the
templatetag arguments after the fragment name
"""
return self.vary_on[0]
def get_base_cache_key(self):
"""
Return a string with format placeholder used as a source to compute the
final cache key.
Placeholders are :
* %(nodename)s : the name of the templatetag
* %(name)s : the fragment name passed to the templatetag
* %(pk)s : the return of the `get_pk` method, passed only if `include_pk` is True
* %(hash)s : the return of the `hash_args` method
"""
if self.options.include_pk:
return 'template.%(nodename)s.%(name)s.%(pk)s.%(hash)s'
else:
return 'template.%(nodename)s.%(name)s.%(hash)s'
def get_cache_key_args(self):
"""
Return the arguments to be passed to the base cache key returned by `get_base_cache_key`.
"""
cache_key_args = dict(
nodename=self.node.nodename,
name=self.fragment_name,
hash=self.hash_args(),
)
if self.options.include_pk:
cache_key_args['pk'] = self.get_pk()
return cache_key_args
def get_cache_key(self):
"""
Compute and return the final cache key, using return values of
`get_base_cache_key` and `get_cache_key_args`.
"""
return self.get_base_cache_key() % self.get_cache_key_args()
def get_cache_object(self):
"""
Return the cache object to be used to set and get the values in cache.
By default it's the default cache defined by django, but it can be
every object with a `get` and a `set` method (or not, if `cache_get`
and `cache_set` methods are overridden)
"""
return get_cache(self.node.cache_backend or self.options.cache_backend)
def cache_get(self):
"""
Get content from the cache
"""
return self.cache.get(self.cache_key)
def cache_set(self, to_cache):
"""
Set content into the cache
"""
self.cache.set(self.cache_key, to_cache, self.expire_time)
def join_content_version(self, to_cache):
"""
Add the version(s) to the content to cache : internal version at first
and then the template version if versioning is activated.
Each version, and the content, are separated with `VERSION_SEPARATOR`.
This method is called after the encoding (if "compress" or
"compress_spaces" options are on)
"""
parts = [self.INTERNAL_VERSION]
if self.options.versioning:
parts.append(force_bytes(self.version))
parts.append(force_bytes(to_cache))
return self.VERSION_SEPARATOR.join(parts)
def split_content_version(self):
"""
Remove and return the version(s) from the cached content. First the
internal version, and if versioning is activated, the template one.
And finally save the content, but only if all versions match.
The content saved is the encoded one (if "compress" or
"compress_spaces" options are on). By doing so, we avoid decoding if
the versions didn't match, to save some cpu cycles.
"""
try:
nb_parts = 2
if self.options.versioning:
nb_parts = 3
parts = self.content.split(self.VERSION_SEPARATOR, nb_parts - 1)
assert len(parts) == nb_parts
self.content_internal_version = parts[0]
if self.options.versioning:
self.content_version = parts[1]
self.content = parts[-1]
except Exception:
self.content = None
def decode_content(self):
"""
Decode (decompress...) the content got from the cache, to the final
html
"""
self.content = pickle.loads(zlib.decompress(self.content))
def encode_content(self):
"""
Encode (compress...) the html to the data to be cached
"""
return zlib.compress(pickle.dumps(self.content))
def render_node(self):
"""
Render the template and save the generated content
"""
self.content = self.node.nodelist.render(self.context)
def create_content(self):
"""
Render the template, apply options on it, and save it to the cache.
"""
self.render_node()
if self.options.compress_spaces:
self.content = self.RE_SPACELESS.sub(' ', self.content)
if self.options.compress:
to_cache = self.encode_content()
else:
to_cache = self.content
to_cache = self.join_content_version(to_cache)
try:
self.cache_set(to_cache)
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when saving the cached template fragment')
def render(self):
"""
Try to load content (from cache or by rendering the template).
If it fails, return an empty string or raise the exception if it's a
TemplateSyntaxError.
With this, we can no parse and render the content included in the
{% nocache %} blocks, but only if we have have this tag and if we don't
have `__partial__` to True in the context (in this case we simple
return the html with the {% nocache %} block not parsed.
"""
try:
self.load_content()
except template.TemplateSyntaxError:
raise
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when rendering template fragment')
return ''
if self.partial or self.RAW_TOKEN_START not in self.content:
return self.content
return self.render_nocache()
@staticmethod
def get_all_tags_and_filters_by_function():
"""
Return a dict with all the template tags (in the `tags` entry) and filters (in the
`filters` entry) that are available.
Both entries are a dict with the function as key, and a tuple with (library name, function
name) as value.
This is cached after the first call.
"""
libraries = get_template_libraries()
force = False
# We'll force the update of the cache if new libraries where added
if hasattr(CacheTag.get_all_tags_and_filters_by_function, '_len_libraries'):
if len(libraries) != CacheTag.get_all_tags_and_filters_by_function._len_libraries:
force = True
if force or not hasattr(CacheTag.get_all_tags_and_filters_by_function, '_cache'):
CacheTag.get_all_tags_and_filters_by_function._len_libraries = len(libraries)
available_tags = {}
available_filters = {}
for lib_name, lib in libraries.items():
available_tags.update(
(function, (lib_name, tag_name))
for tag_name, function
in lib.tags.items()
)
available_filters.update(
(function, (lib_name, filter_name))
for filter_name, function
in lib.filters.items()
)
CacheTag.get_all_tags_and_filters_by_function._cache = {
'tags': available_tags,
'filters': available_filters
}
return CacheTag.get_all_tags_and_filters_by_function._cache
@classmethod
def get_templatetag_module(cls):
"""
Return the templatetags module name for which the current class is used.
It's used to render the nocache blocks by loading the correct module
"""
if cls not in CacheTag._templatetags_modules:
# find the library including the main templatetag of the current class
all_tags = cls.get_all_tags_and_filters_by_function()['tags']
CacheTag._templatetags_modules[cls] = all_tags[CacheTag._templatetags[cls]['cache']][0]
return CacheTag._templatetags_modules[cls]
def render_nocache(self):
"""
Render the `nocache` blocks of the content and return the whole
html
"""
tmpl = template.Template(''.join([
# start by loading the cache library
template.BLOCK_TAG_START,
'load %s' % self.get_templatetag_module(),
template.BLOCK_TAG_END,
# and surround the cached template by "raw" tags
self.RAW_TOKEN_START,
self.content,
self.RAW_TOKEN_END,
]))
return tmpl.render(self.context)
@classmethod
def get_template_node_arguments(cls, tokens):
"""
Return the arguments taken from the templatetag that will be used to the
Node class.
Take a list of all tokens and return a list of real tokens. Here
should be done some validations (number of tokens...) and eventually
some parsing...
"""
if len(tokens) < 3:
raise template.TemplateSyntaxError(
"'%r' tag requires at least 2 arguments." % tokens[0])
return tokens[1], tokens[2], tokens[3:]
@classmethod
def register(cls, library_register, nodename='cache', nocache_nodename='nocache'):
"""
Register all needed templatetags, with these parameters :
* library_register : the `register` object (result of
`template.Library()`) in your templatetag module
* nodename : the node to use for the cache templatetag (the default
is "cache")
* nocache_nodename : the node to use for the nocache templatetag
"""
if cls in CacheTag._templatetags:
raise RuntimeError('The adv-cache-tag class %s is already registered' % cls)
CacheTag._templatetags[cls] = {}
def templatetag_cache(parser, token):
"""
Return a new Node object for the main cache templatetag
"""
nodelist = parser.parse(('end%s' % nodename,))
parser.delete_first_token()
args = cls.get_template_node_arguments(token.contents.split())
return cls.Node(nodename, nodelist, *args)
library_register.tag(nodename, templatetag_cache)
CacheTag._templatetags[cls]['cache'] = templatetag_cache
def templatetag_raw(parser, token):
"""
Return a TextNode with all html not parsed, used for templatetags
that need to not be parsed : the `nocache` one and the `RAW` one,
used to surround cached html (to be not parsed again)
Based on http://www.holovaty.com/writing/django-two-phased-rendering/
"""
# Whatever is between {% nocache %} and {% endnocache %} will be preserved as
# raw, un-rendered template code.
text = []
parse_until = 'end%s' % token.contents
tag_mapping = {
TOKEN_TEXT: ('', ''),
TOKEN_VAR: ('{{', '}}'),
TOKEN_BLOCK: ('{%', '%}'),
TOKEN_COMMENT: ('{#', '#}'),
}
# By the time this template tag is called, the template system has already
# lexed the template into tokens. Here, we loop over the tokens until
# {% endraw %} and parse them to TextNodes. We have to add the start and
# end bits (e.g. "{{" for variables) because those have already been
# stripped off in a previous part of the template-parsing process.
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == parse_until:
return template.TextNode(''.join(text))
start, end = tag_mapping[token.token_type]
text.append('%s%s%s' % (start, token.contents, end))
parser.unclosed_block_tag(parse_until)
library_register.tag(cls.RAW_TOKEN, templatetag_raw)
CacheTag._templatetags[cls]['raw'] = templatetag_raw
def templatetag_nocache(parser, token):
"""
Return a TextNode with raw html from the `nocache` templatetag,
and surround it with `endRAW` and `RAW` (precisely
`cls.RAW_TOKEN_END` and `cls.RAW_TOKEN_START`).
So for
{% nocache %}foo{% endnocache %}
we get
{% endRAW... %}foo{% RAW... %}
When the main cache templatetag content will be loaded from cache,
it will be surrounded by the same templatetags, reversed.
So if at first we had
{% cache %}bar{% nocache %}foo{% endnocache %}baz{% endcache %}
The cached version will be
bar{% endRAW... %}foo{% RAW... %}baz
And the final html to be rendered will be
{% RAW... %}bar{% endRAW... %}foo{% RAW... %}baz{% endRAW... %}
And the html within `RAW` and `endRAW` will not be parsed, as wanted
"""
# We'll load in the no-cache part all template tags and filters loaded in the main
# template, to be able to use it when the no-cache will be rendered
all_tags_and_filters = cls.get_all_tags_and_filters_by_function()
available_tags = all_tags_and_filters['tags']
available_filters = all_tags_and_filters['filters']
needed = {}
current_module = cls.get_templatetag_module()
for function in parser.tags.values():
if function in available_tags:
lib, name = available_tags[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
for function in parser.filters.values():
if function in available_filters:
lib, name = available_filters[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
load_string = ''.join(
'%sload %s from %s%s' % (
template.BLOCK_TAG_START,
' '.join(names),
lib,
template.BLOCK_TAG_END,
)
for lib, names in needed.items()
)
node = templatetag_raw(parser, token)
node.s = cls.RAW_TOKEN_END + load_string + node.s + cls.RAW_TOKEN_START
return node
library_register.tag(nocache_nodename, templatetag_nocache)
CacheTag._templatetags['nocache'] = templatetag_nocache
|
twidi/django-adv-cache-tag
|
adv_cache_tag/tag.py
|
CacheTag.render
|
python
|
def render(self):
try:
self.load_content()
except template.TemplateSyntaxError:
raise
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when rendering template fragment')
return ''
if self.partial or self.RAW_TOKEN_START not in self.content:
return self.content
return self.render_nocache()
|
Try to load content (from cache or by rendering the template).
If it fails, return an empty string or raise the exception if it's a
TemplateSyntaxError.
With this, we can no parse and render the content included in the
{% nocache %} blocks, but only if we have have this tag and if we don't
have `__partial__` to True in the context (in this case we simple
return the html with the {% nocache %} block not parsed.
|
train
|
https://github.com/twidi/django-adv-cache-tag/blob/811f8db4dac73667c7d2fe0ea97a24969593eb8a/adv_cache_tag/tag.py#L493-L516
|
[
"def is_template_debug_activated():\n if django_version < (1, 8):\n return settings.TEMPLATE_DEBUG\n\n # not so simple now, it's an option of a template backend\n for template_settings in settings.TEMPLATES:\n if template_settings['BACKEND'] == 'django.template.backends.django.DjangoTemplates':\n return bool(template_settings.get('OPTIONS', {}).get('debug', False))\n\n return False\n",
"def load_content(self):\n \"\"\"\n It's the main method of the class.\n Try to load the template from cache, get the versions and decode the\n content.\n If something was wrong during this process (or if we had a\n `__regenerate__` value to True in the context), create new content and\n save it in cache.\n \"\"\"\n\n self.content = None\n\n if not self.regenerate:\n try:\n self.content = self.cache_get()\n except Exception:\n if is_template_debug_activated():\n raise\n logger.exception('Error when getting the cached template fragment')\n\n try:\n\n assert self.content\n\n self.split_content_version()\n\n assert self.content\n\n if self.content_internal_version != self.INTERNAL_VERSION or (\n self.options.versioning and self.content_version != self.version):\n self.content = None\n\n assert self.content\n\n if self.options.compress:\n self.decode_content()\n\n except Exception:\n self.create_content()\n\n self.content = smart_str(self.content)\n",
"def render_nocache(self):\n \"\"\"\n Render the `nocache` blocks of the content and return the whole\n html\n \"\"\"\n tmpl = template.Template(''.join([\n # start by loading the cache library\n template.BLOCK_TAG_START,\n 'load %s' % self.get_templatetag_module(),\n template.BLOCK_TAG_END,\n # and surround the cached template by \"raw\" tags\n self.RAW_TOKEN_START,\n self.content,\n self.RAW_TOKEN_END,\n ]))\n return tmpl.render(self.context)\n"
] |
class CacheTag(object, metaclass=CacheTagMetaClass):
"""
The main class of `django-adv-cache-tag` which does all the work.
To change its behaviour, simply change one or more of these settings
(see in the `Meta` class for details) :
* ADV_CACHE_VERSIONING
* ADV_CACHE_COMPRESS
* ADV_CACHE_COMPRESS_SPACES
* ADV_CACHE_INCLUDE_PK
* ADV_CACHE_BACKEND
* ADV_CACHE_VERSION
* ADV_CACHE_RESOLVE_NAME
Or inherit from this class and don't forget to register your tag :
from adv_cache_tag.tag import CacheTag
register = template.Library()
class MyCacheTag(CacheTag):
# change something
MyCacheTag.register(register, 'my_cache')
By inheriting you can change many things as CacheTag implements a lot of
small methods
"""
# Will change if the algorithm changes
INTERNAL_VERSION = '1'
# Used to separate internal version, template version, and the content
VERSION_SEPARATOR = '::'
# Regex used to reduce spaces/blanks (many spaces into one)
RE_SPACELESS = re.compile(r'\s\s+')
# generate a token for this site, based on the secret_key
RAW_TOKEN = 'RAW_' + hashlib.sha1(
b'RAW_TOKEN_SALT1' + force_bytes(hashlib.sha1(
b'RAW_TOKEN_SALT2' + force_bytes(settings.SECRET_KEY)
).hexdigest())
).hexdigest()
# tokens to use around the already parsed parts of the cached template
RAW_TOKEN_START = template.BLOCK_TAG_START + RAW_TOKEN + template.BLOCK_TAG_END
RAW_TOKEN_END = template.BLOCK_TAG_START + 'end' + RAW_TOKEN + template.BLOCK_TAG_END
# internal use only: keep reference to templatetags functions
_templatetags = {}
# internal use only: name of the templatetags module to load for this class and subclasses
_templatetags_modules = {}
options = None
Node = Node
class Meta:
"""
Options of this class. Accessible via cls.options or self.options.
To force (and/or add) options in your own class, simply redefine a
`Meta` class in your own main cache class with updated/add values
"""
# If versioning is activated (internal versioning is always on)
versioning = getattr(settings, 'ADV_CACHE_VERSIONING', False)
# If the content will be compressed before caching
compress = getattr(settings, 'ADV_CACHE_COMPRESS', False)
# If many spaces/blanks will be converted into one
compress_spaces = getattr(settings, 'ADV_CACHE_COMPRESS_SPACES', False)
# If a "pk" (you can pass what you want) will be added to the cache key
include_pk = getattr(settings, 'ADV_CACHE_INCLUDE_PK', False)
# The cache backend to use (or use the "default" one)
cache_backend = getattr(settings, 'ADV_CACHE_BACKEND', 'default')
# Part of the INTERNAL_VERSION configurable via settings
internal_version = getattr(settings, 'ADV_CACHE_VERSION', '')
# If the fragment name should be resolved or taken as is
resolve_fragment = getattr(settings, 'ADV_CACHE_RESOLVE_NAME', False)
# Use a metaclass to use the right class in the Node class, and assign Meta to options
def __init__(self, node, context):
"""
Constructor of the Cache class:
* preparing fields to be used later,
* prepare the templatetag parameters
* create the cache key
"""
super(CacheTag, self).__init__()
# the actual Node object
self.node = node
# the context used for the rendering
self.context = context
# indicate that we force regenerating the cache, even if it exists
self.regenerate = bool(self.context.get('__regenerate__', False))
# indicate if we only want html without parsing the nocache parts
self.partial = bool(self.context.get('__partial__', False))
# the content of the template, will be used through the whole process
self.content = ''
# the version used in the cached templatetag
self.content_version = None
# Final "INTERNAL_VERSION"
if self.options.internal_version:
self.INTERNAL_VERSION = b'%s|%s' % (self.__class__.INTERNAL_VERSION,
self.options.internal_version)
else:
self.INTERNAL_VERSION = force_bytes(self.__class__.INTERNAL_VERSION)
self.VERSION_SEPARATOR = force_bytes(self.__class__.VERSION_SEPARATOR)
# prepare all parameters passed to the templatetag
self.expire_time = None
self.version = None
self.prepare_params()
# get the cache and cache key
self.cache = self.get_cache_object()
self.cache_key = self.get_cache_key()
def prepare_params(self):
"""
Prepare the parameters passed to the templatetag
"""
if self.options.resolve_fragment:
self.fragment_name = self.node.fragment_name.resolve(self.context)
else:
self.fragment_name = str(self.node.fragment_name)
# Remove quotes that surround the name
for char in '\'\"':
if self.fragment_name.startswith(char) or self.fragment_name.endswith(char):
if self.fragment_name.startswith(char) and self.fragment_name.endswith(char):
self.fragment_name = self.fragment_name[1:-1]
break
else:
raise ValueError('Number of quotes around the fragment name is incoherent')
self.expire_time = self.get_expire_time()
if self.options.versioning:
self.version = force_bytes(self.get_version())
self.vary_on = [template.Variable(var).resolve(self.context) for var in self.node.vary_on]
def get_expire_time(self):
"""
Return the expire time passed to the templatetag.
Must be None or an integer.
"""
try:
expire_time = self.node.expire_time.resolve(self.context)
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.expire_time.var))
try:
if expire_time is not None:
expire_time = str(expire_time)
if not expire_time.isdigit():
raise TypeError
expire_time = int(expire_time)
except (ValueError, TypeError):
raise template.TemplateSyntaxError(
'"%s" tag got a non-integer (or None) timeout value: %r' % (
self.node.nodename, expire_time
)
)
return expire_time
def get_version(self):
"""
Return the stringified version passed to the templatetag.
"""
if not self.node.version:
return None
try:
version = smart_str('%s' % self.node.version.resolve(self.context))
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.version.var))
return '%s' % version
def hash_args(self):
"""
Take all the arguments passed after the fragment name and return a
hashed version which will be used in the cache key
"""
return hashlib.md5(force_bytes(':'.join([urlquote(force_bytes(var)) for var in self.vary_on]))).hexdigest()
def get_pk(self):
"""
Return the pk to use in the cache key. It's the first version of the
templatetag arguments after the fragment name
"""
return self.vary_on[0]
def get_base_cache_key(self):
"""
Return a string with format placeholder used as a source to compute the
final cache key.
Placeholders are :
* %(nodename)s : the name of the templatetag
* %(name)s : the fragment name passed to the templatetag
* %(pk)s : the return of the `get_pk` method, passed only if `include_pk` is True
* %(hash)s : the return of the `hash_args` method
"""
if self.options.include_pk:
return 'template.%(nodename)s.%(name)s.%(pk)s.%(hash)s'
else:
return 'template.%(nodename)s.%(name)s.%(hash)s'
def get_cache_key_args(self):
"""
Return the arguments to be passed to the base cache key returned by `get_base_cache_key`.
"""
cache_key_args = dict(
nodename=self.node.nodename,
name=self.fragment_name,
hash=self.hash_args(),
)
if self.options.include_pk:
cache_key_args['pk'] = self.get_pk()
return cache_key_args
def get_cache_key(self):
"""
Compute and return the final cache key, using return values of
`get_base_cache_key` and `get_cache_key_args`.
"""
return self.get_base_cache_key() % self.get_cache_key_args()
def get_cache_object(self):
"""
Return the cache object to be used to set and get the values in cache.
By default it's the default cache defined by django, but it can be
every object with a `get` and a `set` method (or not, if `cache_get`
and `cache_set` methods are overridden)
"""
return get_cache(self.node.cache_backend or self.options.cache_backend)
def cache_get(self):
"""
Get content from the cache
"""
return self.cache.get(self.cache_key)
def cache_set(self, to_cache):
"""
Set content into the cache
"""
self.cache.set(self.cache_key, to_cache, self.expire_time)
def join_content_version(self, to_cache):
"""
Add the version(s) to the content to cache : internal version at first
and then the template version if versioning is activated.
Each version, and the content, are separated with `VERSION_SEPARATOR`.
This method is called after the encoding (if "compress" or
"compress_spaces" options are on)
"""
parts = [self.INTERNAL_VERSION]
if self.options.versioning:
parts.append(force_bytes(self.version))
parts.append(force_bytes(to_cache))
return self.VERSION_SEPARATOR.join(parts)
def split_content_version(self):
"""
Remove and return the version(s) from the cached content. First the
internal version, and if versioning is activated, the template one.
And finally save the content, but only if all versions match.
The content saved is the encoded one (if "compress" or
"compress_spaces" options are on). By doing so, we avoid decoding if
the versions didn't match, to save some cpu cycles.
"""
try:
nb_parts = 2
if self.options.versioning:
nb_parts = 3
parts = self.content.split(self.VERSION_SEPARATOR, nb_parts - 1)
assert len(parts) == nb_parts
self.content_internal_version = parts[0]
if self.options.versioning:
self.content_version = parts[1]
self.content = parts[-1]
except Exception:
self.content = None
def decode_content(self):
"""
Decode (decompress...) the content got from the cache, to the final
html
"""
self.content = pickle.loads(zlib.decompress(self.content))
def encode_content(self):
"""
Encode (compress...) the html to the data to be cached
"""
return zlib.compress(pickle.dumps(self.content))
def render_node(self):
"""
Render the template and save the generated content
"""
self.content = self.node.nodelist.render(self.context)
def create_content(self):
"""
Render the template, apply options on it, and save it to the cache.
"""
self.render_node()
if self.options.compress_spaces:
self.content = self.RE_SPACELESS.sub(' ', self.content)
if self.options.compress:
to_cache = self.encode_content()
else:
to_cache = self.content
to_cache = self.join_content_version(to_cache)
try:
self.cache_set(to_cache)
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when saving the cached template fragment')
def load_content(self):
"""
It's the main method of the class.
Try to load the template from cache, get the versions and decode the
content.
If something was wrong during this process (or if we had a
`__regenerate__` value to True in the context), create new content and
save it in cache.
"""
self.content = None
if not self.regenerate:
try:
self.content = self.cache_get()
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when getting the cached template fragment')
try:
assert self.content
self.split_content_version()
assert self.content
if self.content_internal_version != self.INTERNAL_VERSION or (
self.options.versioning and self.content_version != self.version):
self.content = None
assert self.content
if self.options.compress:
self.decode_content()
except Exception:
self.create_content()
self.content = smart_str(self.content)
@staticmethod
def get_all_tags_and_filters_by_function():
"""
Return a dict with all the template tags (in the `tags` entry) and filters (in the
`filters` entry) that are available.
Both entries are a dict with the function as key, and a tuple with (library name, function
name) as value.
This is cached after the first call.
"""
libraries = get_template_libraries()
force = False
# We'll force the update of the cache if new libraries where added
if hasattr(CacheTag.get_all_tags_and_filters_by_function, '_len_libraries'):
if len(libraries) != CacheTag.get_all_tags_and_filters_by_function._len_libraries:
force = True
if force or not hasattr(CacheTag.get_all_tags_and_filters_by_function, '_cache'):
CacheTag.get_all_tags_and_filters_by_function._len_libraries = len(libraries)
available_tags = {}
available_filters = {}
for lib_name, lib in libraries.items():
available_tags.update(
(function, (lib_name, tag_name))
for tag_name, function
in lib.tags.items()
)
available_filters.update(
(function, (lib_name, filter_name))
for filter_name, function
in lib.filters.items()
)
CacheTag.get_all_tags_and_filters_by_function._cache = {
'tags': available_tags,
'filters': available_filters
}
return CacheTag.get_all_tags_and_filters_by_function._cache
@classmethod
def get_templatetag_module(cls):
"""
Return the templatetags module name for which the current class is used.
It's used to render the nocache blocks by loading the correct module
"""
if cls not in CacheTag._templatetags_modules:
# find the library including the main templatetag of the current class
all_tags = cls.get_all_tags_and_filters_by_function()['tags']
CacheTag._templatetags_modules[cls] = all_tags[CacheTag._templatetags[cls]['cache']][0]
return CacheTag._templatetags_modules[cls]
def render_nocache(self):
"""
Render the `nocache` blocks of the content and return the whole
html
"""
tmpl = template.Template(''.join([
# start by loading the cache library
template.BLOCK_TAG_START,
'load %s' % self.get_templatetag_module(),
template.BLOCK_TAG_END,
# and surround the cached template by "raw" tags
self.RAW_TOKEN_START,
self.content,
self.RAW_TOKEN_END,
]))
return tmpl.render(self.context)
@classmethod
def get_template_node_arguments(cls, tokens):
"""
Return the arguments taken from the templatetag that will be used to the
Node class.
Take a list of all tokens and return a list of real tokens. Here
should be done some validations (number of tokens...) and eventually
some parsing...
"""
if len(tokens) < 3:
raise template.TemplateSyntaxError(
"'%r' tag requires at least 2 arguments." % tokens[0])
return tokens[1], tokens[2], tokens[3:]
@classmethod
def register(cls, library_register, nodename='cache', nocache_nodename='nocache'):
"""
Register all needed templatetags, with these parameters :
* library_register : the `register` object (result of
`template.Library()`) in your templatetag module
* nodename : the node to use for the cache templatetag (the default
is "cache")
* nocache_nodename : the node to use for the nocache templatetag
"""
if cls in CacheTag._templatetags:
raise RuntimeError('The adv-cache-tag class %s is already registered' % cls)
CacheTag._templatetags[cls] = {}
def templatetag_cache(parser, token):
"""
Return a new Node object for the main cache templatetag
"""
nodelist = parser.parse(('end%s' % nodename,))
parser.delete_first_token()
args = cls.get_template_node_arguments(token.contents.split())
return cls.Node(nodename, nodelist, *args)
library_register.tag(nodename, templatetag_cache)
CacheTag._templatetags[cls]['cache'] = templatetag_cache
def templatetag_raw(parser, token):
"""
Return a TextNode with all html not parsed, used for templatetags
that need to not be parsed : the `nocache` one and the `RAW` one,
used to surround cached html (to be not parsed again)
Based on http://www.holovaty.com/writing/django-two-phased-rendering/
"""
# Whatever is between {% nocache %} and {% endnocache %} will be preserved as
# raw, un-rendered template code.
text = []
parse_until = 'end%s' % token.contents
tag_mapping = {
TOKEN_TEXT: ('', ''),
TOKEN_VAR: ('{{', '}}'),
TOKEN_BLOCK: ('{%', '%}'),
TOKEN_COMMENT: ('{#', '#}'),
}
# By the time this template tag is called, the template system has already
# lexed the template into tokens. Here, we loop over the tokens until
# {% endraw %} and parse them to TextNodes. We have to add the start and
# end bits (e.g. "{{" for variables) because those have already been
# stripped off in a previous part of the template-parsing process.
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == parse_until:
return template.TextNode(''.join(text))
start, end = tag_mapping[token.token_type]
text.append('%s%s%s' % (start, token.contents, end))
parser.unclosed_block_tag(parse_until)
library_register.tag(cls.RAW_TOKEN, templatetag_raw)
CacheTag._templatetags[cls]['raw'] = templatetag_raw
def templatetag_nocache(parser, token):
"""
Return a TextNode with raw html from the `nocache` templatetag,
and surround it with `endRAW` and `RAW` (precisely
`cls.RAW_TOKEN_END` and `cls.RAW_TOKEN_START`).
So for
{% nocache %}foo{% endnocache %}
we get
{% endRAW... %}foo{% RAW... %}
When the main cache templatetag content will be loaded from cache,
it will be surrounded by the same templatetags, reversed.
So if at first we had
{% cache %}bar{% nocache %}foo{% endnocache %}baz{% endcache %}
The cached version will be
bar{% endRAW... %}foo{% RAW... %}baz
And the final html to be rendered will be
{% RAW... %}bar{% endRAW... %}foo{% RAW... %}baz{% endRAW... %}
And the html within `RAW` and `endRAW` will not be parsed, as wanted
"""
# We'll load in the no-cache part all template tags and filters loaded in the main
# template, to be able to use it when the no-cache will be rendered
all_tags_and_filters = cls.get_all_tags_and_filters_by_function()
available_tags = all_tags_and_filters['tags']
available_filters = all_tags_and_filters['filters']
needed = {}
current_module = cls.get_templatetag_module()
for function in parser.tags.values():
if function in available_tags:
lib, name = available_tags[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
for function in parser.filters.values():
if function in available_filters:
lib, name = available_filters[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
load_string = ''.join(
'%sload %s from %s%s' % (
template.BLOCK_TAG_START,
' '.join(names),
lib,
template.BLOCK_TAG_END,
)
for lib, names in needed.items()
)
node = templatetag_raw(parser, token)
node.s = cls.RAW_TOKEN_END + load_string + node.s + cls.RAW_TOKEN_START
return node
library_register.tag(nocache_nodename, templatetag_nocache)
CacheTag._templatetags['nocache'] = templatetag_nocache
|
twidi/django-adv-cache-tag
|
adv_cache_tag/tag.py
|
CacheTag.get_all_tags_and_filters_by_function
|
python
|
def get_all_tags_and_filters_by_function():
libraries = get_template_libraries()
force = False
# We'll force the update of the cache if new libraries where added
if hasattr(CacheTag.get_all_tags_and_filters_by_function, '_len_libraries'):
if len(libraries) != CacheTag.get_all_tags_and_filters_by_function._len_libraries:
force = True
if force or not hasattr(CacheTag.get_all_tags_and_filters_by_function, '_cache'):
CacheTag.get_all_tags_and_filters_by_function._len_libraries = len(libraries)
available_tags = {}
available_filters = {}
for lib_name, lib in libraries.items():
available_tags.update(
(function, (lib_name, tag_name))
for tag_name, function
in lib.tags.items()
)
available_filters.update(
(function, (lib_name, filter_name))
for filter_name, function
in lib.filters.items()
)
CacheTag.get_all_tags_and_filters_by_function._cache = {
'tags': available_tags,
'filters': available_filters
}
return CacheTag.get_all_tags_and_filters_by_function._cache
|
Return a dict with all the template tags (in the `tags` entry) and filters (in the
`filters` entry) that are available.
Both entries are a dict with the function as key, and a tuple with (library name, function
name) as value.
This is cached after the first call.
|
train
|
https://github.com/twidi/django-adv-cache-tag/blob/811f8db4dac73667c7d2fe0ea97a24969593eb8a/adv_cache_tag/tag.py#L519-L559
|
[
"def get_template_libraries():\n try:\n from django.template.base import libraries\n except ImportError:\n # Django >= 1.9\n from django.template import engines\n libraries = engines['django'].engine.template_libraries\n\n return libraries\n"
] |
class CacheTag(object, metaclass=CacheTagMetaClass):
"""
The main class of `django-adv-cache-tag` which does all the work.
To change its behaviour, simply change one or more of these settings
(see in the `Meta` class for details) :
* ADV_CACHE_VERSIONING
* ADV_CACHE_COMPRESS
* ADV_CACHE_COMPRESS_SPACES
* ADV_CACHE_INCLUDE_PK
* ADV_CACHE_BACKEND
* ADV_CACHE_VERSION
* ADV_CACHE_RESOLVE_NAME
Or inherit from this class and don't forget to register your tag :
from adv_cache_tag.tag import CacheTag
register = template.Library()
class MyCacheTag(CacheTag):
# change something
MyCacheTag.register(register, 'my_cache')
By inheriting you can change many things as CacheTag implements a lot of
small methods
"""
# Will change if the algorithm changes
INTERNAL_VERSION = '1'
# Used to separate internal version, template version, and the content
VERSION_SEPARATOR = '::'
# Regex used to reduce spaces/blanks (many spaces into one)
RE_SPACELESS = re.compile(r'\s\s+')
# generate a token for this site, based on the secret_key
RAW_TOKEN = 'RAW_' + hashlib.sha1(
b'RAW_TOKEN_SALT1' + force_bytes(hashlib.sha1(
b'RAW_TOKEN_SALT2' + force_bytes(settings.SECRET_KEY)
).hexdigest())
).hexdigest()
# tokens to use around the already parsed parts of the cached template
RAW_TOKEN_START = template.BLOCK_TAG_START + RAW_TOKEN + template.BLOCK_TAG_END
RAW_TOKEN_END = template.BLOCK_TAG_START + 'end' + RAW_TOKEN + template.BLOCK_TAG_END
# internal use only: keep reference to templatetags functions
_templatetags = {}
# internal use only: name of the templatetags module to load for this class and subclasses
_templatetags_modules = {}
options = None
Node = Node
class Meta:
"""
Options of this class. Accessible via cls.options or self.options.
To force (and/or add) options in your own class, simply redefine a
`Meta` class in your own main cache class with updated/add values
"""
# If versioning is activated (internal versioning is always on)
versioning = getattr(settings, 'ADV_CACHE_VERSIONING', False)
# If the content will be compressed before caching
compress = getattr(settings, 'ADV_CACHE_COMPRESS', False)
# If many spaces/blanks will be converted into one
compress_spaces = getattr(settings, 'ADV_CACHE_COMPRESS_SPACES', False)
# If a "pk" (you can pass what you want) will be added to the cache key
include_pk = getattr(settings, 'ADV_CACHE_INCLUDE_PK', False)
# The cache backend to use (or use the "default" one)
cache_backend = getattr(settings, 'ADV_CACHE_BACKEND', 'default')
# Part of the INTERNAL_VERSION configurable via settings
internal_version = getattr(settings, 'ADV_CACHE_VERSION', '')
# If the fragment name should be resolved or taken as is
resolve_fragment = getattr(settings, 'ADV_CACHE_RESOLVE_NAME', False)
# Use a metaclass to use the right class in the Node class, and assign Meta to options
def __init__(self, node, context):
"""
Constructor of the Cache class:
* preparing fields to be used later,
* prepare the templatetag parameters
* create the cache key
"""
super(CacheTag, self).__init__()
# the actual Node object
self.node = node
# the context used for the rendering
self.context = context
# indicate that we force regenerating the cache, even if it exists
self.regenerate = bool(self.context.get('__regenerate__', False))
# indicate if we only want html without parsing the nocache parts
self.partial = bool(self.context.get('__partial__', False))
# the content of the template, will be used through the whole process
self.content = ''
# the version used in the cached templatetag
self.content_version = None
# Final "INTERNAL_VERSION"
if self.options.internal_version:
self.INTERNAL_VERSION = b'%s|%s' % (self.__class__.INTERNAL_VERSION,
self.options.internal_version)
else:
self.INTERNAL_VERSION = force_bytes(self.__class__.INTERNAL_VERSION)
self.VERSION_SEPARATOR = force_bytes(self.__class__.VERSION_SEPARATOR)
# prepare all parameters passed to the templatetag
self.expire_time = None
self.version = None
self.prepare_params()
# get the cache and cache key
self.cache = self.get_cache_object()
self.cache_key = self.get_cache_key()
def prepare_params(self):
"""
Prepare the parameters passed to the templatetag
"""
if self.options.resolve_fragment:
self.fragment_name = self.node.fragment_name.resolve(self.context)
else:
self.fragment_name = str(self.node.fragment_name)
# Remove quotes that surround the name
for char in '\'\"':
if self.fragment_name.startswith(char) or self.fragment_name.endswith(char):
if self.fragment_name.startswith(char) and self.fragment_name.endswith(char):
self.fragment_name = self.fragment_name[1:-1]
break
else:
raise ValueError('Number of quotes around the fragment name is incoherent')
self.expire_time = self.get_expire_time()
if self.options.versioning:
self.version = force_bytes(self.get_version())
self.vary_on = [template.Variable(var).resolve(self.context) for var in self.node.vary_on]
def get_expire_time(self):
"""
Return the expire time passed to the templatetag.
Must be None or an integer.
"""
try:
expire_time = self.node.expire_time.resolve(self.context)
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.expire_time.var))
try:
if expire_time is not None:
expire_time = str(expire_time)
if not expire_time.isdigit():
raise TypeError
expire_time = int(expire_time)
except (ValueError, TypeError):
raise template.TemplateSyntaxError(
'"%s" tag got a non-integer (or None) timeout value: %r' % (
self.node.nodename, expire_time
)
)
return expire_time
def get_version(self):
"""
Return the stringified version passed to the templatetag.
"""
if not self.node.version:
return None
try:
version = smart_str('%s' % self.node.version.resolve(self.context))
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.version.var))
return '%s' % version
def hash_args(self):
"""
Take all the arguments passed after the fragment name and return a
hashed version which will be used in the cache key
"""
return hashlib.md5(force_bytes(':'.join([urlquote(force_bytes(var)) for var in self.vary_on]))).hexdigest()
def get_pk(self):
"""
Return the pk to use in the cache key. It's the first version of the
templatetag arguments after the fragment name
"""
return self.vary_on[0]
def get_base_cache_key(self):
"""
Return a string with format placeholder used as a source to compute the
final cache key.
Placeholders are :
* %(nodename)s : the name of the templatetag
* %(name)s : the fragment name passed to the templatetag
* %(pk)s : the return of the `get_pk` method, passed only if `include_pk` is True
* %(hash)s : the return of the `hash_args` method
"""
if self.options.include_pk:
return 'template.%(nodename)s.%(name)s.%(pk)s.%(hash)s'
else:
return 'template.%(nodename)s.%(name)s.%(hash)s'
def get_cache_key_args(self):
"""
Return the arguments to be passed to the base cache key returned by `get_base_cache_key`.
"""
cache_key_args = dict(
nodename=self.node.nodename,
name=self.fragment_name,
hash=self.hash_args(),
)
if self.options.include_pk:
cache_key_args['pk'] = self.get_pk()
return cache_key_args
def get_cache_key(self):
"""
Compute and return the final cache key, using return values of
`get_base_cache_key` and `get_cache_key_args`.
"""
return self.get_base_cache_key() % self.get_cache_key_args()
def get_cache_object(self):
"""
Return the cache object to be used to set and get the values in cache.
By default it's the default cache defined by django, but it can be
every object with a `get` and a `set` method (or not, if `cache_get`
and `cache_set` methods are overridden)
"""
return get_cache(self.node.cache_backend or self.options.cache_backend)
def cache_get(self):
"""
Get content from the cache
"""
return self.cache.get(self.cache_key)
def cache_set(self, to_cache):
"""
Set content into the cache
"""
self.cache.set(self.cache_key, to_cache, self.expire_time)
def join_content_version(self, to_cache):
"""
Add the version(s) to the content to cache : internal version at first
and then the template version if versioning is activated.
Each version, and the content, are separated with `VERSION_SEPARATOR`.
This method is called after the encoding (if "compress" or
"compress_spaces" options are on)
"""
parts = [self.INTERNAL_VERSION]
if self.options.versioning:
parts.append(force_bytes(self.version))
parts.append(force_bytes(to_cache))
return self.VERSION_SEPARATOR.join(parts)
def split_content_version(self):
"""
Remove and return the version(s) from the cached content. First the
internal version, and if versioning is activated, the template one.
And finally save the content, but only if all versions match.
The content saved is the encoded one (if "compress" or
"compress_spaces" options are on). By doing so, we avoid decoding if
the versions didn't match, to save some cpu cycles.
"""
try:
nb_parts = 2
if self.options.versioning:
nb_parts = 3
parts = self.content.split(self.VERSION_SEPARATOR, nb_parts - 1)
assert len(parts) == nb_parts
self.content_internal_version = parts[0]
if self.options.versioning:
self.content_version = parts[1]
self.content = parts[-1]
except Exception:
self.content = None
def decode_content(self):
"""
Decode (decompress...) the content got from the cache, to the final
html
"""
self.content = pickle.loads(zlib.decompress(self.content))
def encode_content(self):
"""
Encode (compress...) the html to the data to be cached
"""
return zlib.compress(pickle.dumps(self.content))
def render_node(self):
"""
Render the template and save the generated content
"""
self.content = self.node.nodelist.render(self.context)
def create_content(self):
"""
Render the template, apply options on it, and save it to the cache.
"""
self.render_node()
if self.options.compress_spaces:
self.content = self.RE_SPACELESS.sub(' ', self.content)
if self.options.compress:
to_cache = self.encode_content()
else:
to_cache = self.content
to_cache = self.join_content_version(to_cache)
try:
self.cache_set(to_cache)
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when saving the cached template fragment')
def load_content(self):
"""
It's the main method of the class.
Try to load the template from cache, get the versions and decode the
content.
If something was wrong during this process (or if we had a
`__regenerate__` value to True in the context), create new content and
save it in cache.
"""
self.content = None
if not self.regenerate:
try:
self.content = self.cache_get()
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when getting the cached template fragment')
try:
assert self.content
self.split_content_version()
assert self.content
if self.content_internal_version != self.INTERNAL_VERSION or (
self.options.versioning and self.content_version != self.version):
self.content = None
assert self.content
if self.options.compress:
self.decode_content()
except Exception:
self.create_content()
self.content = smart_str(self.content)
def render(self):
"""
Try to load content (from cache or by rendering the template).
If it fails, return an empty string or raise the exception if it's a
TemplateSyntaxError.
With this, we can no parse and render the content included in the
{% nocache %} blocks, but only if we have have this tag and if we don't
have `__partial__` to True in the context (in this case we simple
return the html with the {% nocache %} block not parsed.
"""
try:
self.load_content()
except template.TemplateSyntaxError:
raise
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when rendering template fragment')
return ''
if self.partial or self.RAW_TOKEN_START not in self.content:
return self.content
return self.render_nocache()
@staticmethod
@classmethod
def get_templatetag_module(cls):
"""
Return the templatetags module name for which the current class is used.
It's used to render the nocache blocks by loading the correct module
"""
if cls not in CacheTag._templatetags_modules:
# find the library including the main templatetag of the current class
all_tags = cls.get_all_tags_and_filters_by_function()['tags']
CacheTag._templatetags_modules[cls] = all_tags[CacheTag._templatetags[cls]['cache']][0]
return CacheTag._templatetags_modules[cls]
def render_nocache(self):
"""
Render the `nocache` blocks of the content and return the whole
html
"""
tmpl = template.Template(''.join([
# start by loading the cache library
template.BLOCK_TAG_START,
'load %s' % self.get_templatetag_module(),
template.BLOCK_TAG_END,
# and surround the cached template by "raw" tags
self.RAW_TOKEN_START,
self.content,
self.RAW_TOKEN_END,
]))
return tmpl.render(self.context)
@classmethod
def get_template_node_arguments(cls, tokens):
"""
Return the arguments taken from the templatetag that will be used to the
Node class.
Take a list of all tokens and return a list of real tokens. Here
should be done some validations (number of tokens...) and eventually
some parsing...
"""
if len(tokens) < 3:
raise template.TemplateSyntaxError(
"'%r' tag requires at least 2 arguments." % tokens[0])
return tokens[1], tokens[2], tokens[3:]
@classmethod
def register(cls, library_register, nodename='cache', nocache_nodename='nocache'):
"""
Register all needed templatetags, with these parameters :
* library_register : the `register` object (result of
`template.Library()`) in your templatetag module
* nodename : the node to use for the cache templatetag (the default
is "cache")
* nocache_nodename : the node to use for the nocache templatetag
"""
if cls in CacheTag._templatetags:
raise RuntimeError('The adv-cache-tag class %s is already registered' % cls)
CacheTag._templatetags[cls] = {}
def templatetag_cache(parser, token):
"""
Return a new Node object for the main cache templatetag
"""
nodelist = parser.parse(('end%s' % nodename,))
parser.delete_first_token()
args = cls.get_template_node_arguments(token.contents.split())
return cls.Node(nodename, nodelist, *args)
library_register.tag(nodename, templatetag_cache)
CacheTag._templatetags[cls]['cache'] = templatetag_cache
def templatetag_raw(parser, token):
"""
Return a TextNode with all html not parsed, used for templatetags
that need to not be parsed : the `nocache` one and the `RAW` one,
used to surround cached html (to be not parsed again)
Based on http://www.holovaty.com/writing/django-two-phased-rendering/
"""
# Whatever is between {% nocache %} and {% endnocache %} will be preserved as
# raw, un-rendered template code.
text = []
parse_until = 'end%s' % token.contents
tag_mapping = {
TOKEN_TEXT: ('', ''),
TOKEN_VAR: ('{{', '}}'),
TOKEN_BLOCK: ('{%', '%}'),
TOKEN_COMMENT: ('{#', '#}'),
}
# By the time this template tag is called, the template system has already
# lexed the template into tokens. Here, we loop over the tokens until
# {% endraw %} and parse them to TextNodes. We have to add the start and
# end bits (e.g. "{{" for variables) because those have already been
# stripped off in a previous part of the template-parsing process.
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == parse_until:
return template.TextNode(''.join(text))
start, end = tag_mapping[token.token_type]
text.append('%s%s%s' % (start, token.contents, end))
parser.unclosed_block_tag(parse_until)
library_register.tag(cls.RAW_TOKEN, templatetag_raw)
CacheTag._templatetags[cls]['raw'] = templatetag_raw
def templatetag_nocache(parser, token):
"""
Return a TextNode with raw html from the `nocache` templatetag,
and surround it with `endRAW` and `RAW` (precisely
`cls.RAW_TOKEN_END` and `cls.RAW_TOKEN_START`).
So for
{% nocache %}foo{% endnocache %}
we get
{% endRAW... %}foo{% RAW... %}
When the main cache templatetag content will be loaded from cache,
it will be surrounded by the same templatetags, reversed.
So if at first we had
{% cache %}bar{% nocache %}foo{% endnocache %}baz{% endcache %}
The cached version will be
bar{% endRAW... %}foo{% RAW... %}baz
And the final html to be rendered will be
{% RAW... %}bar{% endRAW... %}foo{% RAW... %}baz{% endRAW... %}
And the html within `RAW` and `endRAW` will not be parsed, as wanted
"""
# We'll load in the no-cache part all template tags and filters loaded in the main
# template, to be able to use it when the no-cache will be rendered
all_tags_and_filters = cls.get_all_tags_and_filters_by_function()
available_tags = all_tags_and_filters['tags']
available_filters = all_tags_and_filters['filters']
needed = {}
current_module = cls.get_templatetag_module()
for function in parser.tags.values():
if function in available_tags:
lib, name = available_tags[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
for function in parser.filters.values():
if function in available_filters:
lib, name = available_filters[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
load_string = ''.join(
'%sload %s from %s%s' % (
template.BLOCK_TAG_START,
' '.join(names),
lib,
template.BLOCK_TAG_END,
)
for lib, names in needed.items()
)
node = templatetag_raw(parser, token)
node.s = cls.RAW_TOKEN_END + load_string + node.s + cls.RAW_TOKEN_START
return node
library_register.tag(nocache_nodename, templatetag_nocache)
CacheTag._templatetags['nocache'] = templatetag_nocache
|
twidi/django-adv-cache-tag
|
adv_cache_tag/tag.py
|
CacheTag.get_templatetag_module
|
python
|
def get_templatetag_module(cls):
if cls not in CacheTag._templatetags_modules:
# find the library including the main templatetag of the current class
all_tags = cls.get_all_tags_and_filters_by_function()['tags']
CacheTag._templatetags_modules[cls] = all_tags[CacheTag._templatetags[cls]['cache']][0]
return CacheTag._templatetags_modules[cls]
|
Return the templatetags module name for which the current class is used.
It's used to render the nocache blocks by loading the correct module
|
train
|
https://github.com/twidi/django-adv-cache-tag/blob/811f8db4dac73667c7d2fe0ea97a24969593eb8a/adv_cache_tag/tag.py#L562-L571
|
[
"def get_all_tags_and_filters_by_function():\n \"\"\"\n Return a dict with all the template tags (in the `tags` entry) and filters (in the\n `filters` entry) that are available.\n Both entries are a dict with the function as key, and a tuple with (library name, function\n name) as value.\n This is cached after the first call.\n \"\"\"\n\n libraries = get_template_libraries()\n\n force = False\n\n # We'll force the update of the cache if new libraries where added\n if hasattr(CacheTag.get_all_tags_and_filters_by_function, '_len_libraries'):\n if len(libraries) != CacheTag.get_all_tags_and_filters_by_function._len_libraries:\n force = True\n\n if force or not hasattr(CacheTag.get_all_tags_and_filters_by_function, '_cache'):\n CacheTag.get_all_tags_and_filters_by_function._len_libraries = len(libraries)\n available_tags = {}\n available_filters = {}\n\n for lib_name, lib in libraries.items():\n available_tags.update(\n (function, (lib_name, tag_name))\n for tag_name, function\n in lib.tags.items()\n )\n available_filters.update(\n (function, (lib_name, filter_name))\n for filter_name, function\n in lib.filters.items()\n )\n\n CacheTag.get_all_tags_and_filters_by_function._cache = {\n 'tags': available_tags,\n 'filters': available_filters\n }\n\n return CacheTag.get_all_tags_and_filters_by_function._cache\n"
] |
class CacheTag(object, metaclass=CacheTagMetaClass):
"""
The main class of `django-adv-cache-tag` which does all the work.
To change its behaviour, simply change one or more of these settings
(see in the `Meta` class for details) :
* ADV_CACHE_VERSIONING
* ADV_CACHE_COMPRESS
* ADV_CACHE_COMPRESS_SPACES
* ADV_CACHE_INCLUDE_PK
* ADV_CACHE_BACKEND
* ADV_CACHE_VERSION
* ADV_CACHE_RESOLVE_NAME
Or inherit from this class and don't forget to register your tag :
from adv_cache_tag.tag import CacheTag
register = template.Library()
class MyCacheTag(CacheTag):
# change something
MyCacheTag.register(register, 'my_cache')
By inheriting you can change many things as CacheTag implements a lot of
small methods
"""
# Will change if the algorithm changes
INTERNAL_VERSION = '1'
# Used to separate internal version, template version, and the content
VERSION_SEPARATOR = '::'
# Regex used to reduce spaces/blanks (many spaces into one)
RE_SPACELESS = re.compile(r'\s\s+')
# generate a token for this site, based on the secret_key
RAW_TOKEN = 'RAW_' + hashlib.sha1(
b'RAW_TOKEN_SALT1' + force_bytes(hashlib.sha1(
b'RAW_TOKEN_SALT2' + force_bytes(settings.SECRET_KEY)
).hexdigest())
).hexdigest()
# tokens to use around the already parsed parts of the cached template
RAW_TOKEN_START = template.BLOCK_TAG_START + RAW_TOKEN + template.BLOCK_TAG_END
RAW_TOKEN_END = template.BLOCK_TAG_START + 'end' + RAW_TOKEN + template.BLOCK_TAG_END
# internal use only: keep reference to templatetags functions
_templatetags = {}
# internal use only: name of the templatetags module to load for this class and subclasses
_templatetags_modules = {}
options = None
Node = Node
class Meta:
"""
Options of this class. Accessible via cls.options or self.options.
To force (and/or add) options in your own class, simply redefine a
`Meta` class in your own main cache class with updated/add values
"""
# If versioning is activated (internal versioning is always on)
versioning = getattr(settings, 'ADV_CACHE_VERSIONING', False)
# If the content will be compressed before caching
compress = getattr(settings, 'ADV_CACHE_COMPRESS', False)
# If many spaces/blanks will be converted into one
compress_spaces = getattr(settings, 'ADV_CACHE_COMPRESS_SPACES', False)
# If a "pk" (you can pass what you want) will be added to the cache key
include_pk = getattr(settings, 'ADV_CACHE_INCLUDE_PK', False)
# The cache backend to use (or use the "default" one)
cache_backend = getattr(settings, 'ADV_CACHE_BACKEND', 'default')
# Part of the INTERNAL_VERSION configurable via settings
internal_version = getattr(settings, 'ADV_CACHE_VERSION', '')
# If the fragment name should be resolved or taken as is
resolve_fragment = getattr(settings, 'ADV_CACHE_RESOLVE_NAME', False)
# Use a metaclass to use the right class in the Node class, and assign Meta to options
def __init__(self, node, context):
"""
Constructor of the Cache class:
* preparing fields to be used later,
* prepare the templatetag parameters
* create the cache key
"""
super(CacheTag, self).__init__()
# the actual Node object
self.node = node
# the context used for the rendering
self.context = context
# indicate that we force regenerating the cache, even if it exists
self.regenerate = bool(self.context.get('__regenerate__', False))
# indicate if we only want html without parsing the nocache parts
self.partial = bool(self.context.get('__partial__', False))
# the content of the template, will be used through the whole process
self.content = ''
# the version used in the cached templatetag
self.content_version = None
# Final "INTERNAL_VERSION"
if self.options.internal_version:
self.INTERNAL_VERSION = b'%s|%s' % (self.__class__.INTERNAL_VERSION,
self.options.internal_version)
else:
self.INTERNAL_VERSION = force_bytes(self.__class__.INTERNAL_VERSION)
self.VERSION_SEPARATOR = force_bytes(self.__class__.VERSION_SEPARATOR)
# prepare all parameters passed to the templatetag
self.expire_time = None
self.version = None
self.prepare_params()
# get the cache and cache key
self.cache = self.get_cache_object()
self.cache_key = self.get_cache_key()
def prepare_params(self):
"""
Prepare the parameters passed to the templatetag
"""
if self.options.resolve_fragment:
self.fragment_name = self.node.fragment_name.resolve(self.context)
else:
self.fragment_name = str(self.node.fragment_name)
# Remove quotes that surround the name
for char in '\'\"':
if self.fragment_name.startswith(char) or self.fragment_name.endswith(char):
if self.fragment_name.startswith(char) and self.fragment_name.endswith(char):
self.fragment_name = self.fragment_name[1:-1]
break
else:
raise ValueError('Number of quotes around the fragment name is incoherent')
self.expire_time = self.get_expire_time()
if self.options.versioning:
self.version = force_bytes(self.get_version())
self.vary_on = [template.Variable(var).resolve(self.context) for var in self.node.vary_on]
def get_expire_time(self):
"""
Return the expire time passed to the templatetag.
Must be None or an integer.
"""
try:
expire_time = self.node.expire_time.resolve(self.context)
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.expire_time.var))
try:
if expire_time is not None:
expire_time = str(expire_time)
if not expire_time.isdigit():
raise TypeError
expire_time = int(expire_time)
except (ValueError, TypeError):
raise template.TemplateSyntaxError(
'"%s" tag got a non-integer (or None) timeout value: %r' % (
self.node.nodename, expire_time
)
)
return expire_time
def get_version(self):
"""
Return the stringified version passed to the templatetag.
"""
if not self.node.version:
return None
try:
version = smart_str('%s' % self.node.version.resolve(self.context))
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.version.var))
return '%s' % version
def hash_args(self):
"""
Take all the arguments passed after the fragment name and return a
hashed version which will be used in the cache key
"""
return hashlib.md5(force_bytes(':'.join([urlquote(force_bytes(var)) for var in self.vary_on]))).hexdigest()
def get_pk(self):
"""
Return the pk to use in the cache key. It's the first version of the
templatetag arguments after the fragment name
"""
return self.vary_on[0]
def get_base_cache_key(self):
"""
Return a string with format placeholder used as a source to compute the
final cache key.
Placeholders are :
* %(nodename)s : the name of the templatetag
* %(name)s : the fragment name passed to the templatetag
* %(pk)s : the return of the `get_pk` method, passed only if `include_pk` is True
* %(hash)s : the return of the `hash_args` method
"""
if self.options.include_pk:
return 'template.%(nodename)s.%(name)s.%(pk)s.%(hash)s'
else:
return 'template.%(nodename)s.%(name)s.%(hash)s'
def get_cache_key_args(self):
"""
Return the arguments to be passed to the base cache key returned by `get_base_cache_key`.
"""
cache_key_args = dict(
nodename=self.node.nodename,
name=self.fragment_name,
hash=self.hash_args(),
)
if self.options.include_pk:
cache_key_args['pk'] = self.get_pk()
return cache_key_args
def get_cache_key(self):
"""
Compute and return the final cache key, using return values of
`get_base_cache_key` and `get_cache_key_args`.
"""
return self.get_base_cache_key() % self.get_cache_key_args()
def get_cache_object(self):
"""
Return the cache object to be used to set and get the values in cache.
By default it's the default cache defined by django, but it can be
every object with a `get` and a `set` method (or not, if `cache_get`
and `cache_set` methods are overridden)
"""
return get_cache(self.node.cache_backend or self.options.cache_backend)
def cache_get(self):
"""
Get content from the cache
"""
return self.cache.get(self.cache_key)
def cache_set(self, to_cache):
"""
Set content into the cache
"""
self.cache.set(self.cache_key, to_cache, self.expire_time)
def join_content_version(self, to_cache):
"""
Add the version(s) to the content to cache : internal version at first
and then the template version if versioning is activated.
Each version, and the content, are separated with `VERSION_SEPARATOR`.
This method is called after the encoding (if "compress" or
"compress_spaces" options are on)
"""
parts = [self.INTERNAL_VERSION]
if self.options.versioning:
parts.append(force_bytes(self.version))
parts.append(force_bytes(to_cache))
return self.VERSION_SEPARATOR.join(parts)
def split_content_version(self):
"""
Remove and return the version(s) from the cached content. First the
internal version, and if versioning is activated, the template one.
And finally save the content, but only if all versions match.
The content saved is the encoded one (if "compress" or
"compress_spaces" options are on). By doing so, we avoid decoding if
the versions didn't match, to save some cpu cycles.
"""
try:
nb_parts = 2
if self.options.versioning:
nb_parts = 3
parts = self.content.split(self.VERSION_SEPARATOR, nb_parts - 1)
assert len(parts) == nb_parts
self.content_internal_version = parts[0]
if self.options.versioning:
self.content_version = parts[1]
self.content = parts[-1]
except Exception:
self.content = None
def decode_content(self):
"""
Decode (decompress...) the content got from the cache, to the final
html
"""
self.content = pickle.loads(zlib.decompress(self.content))
def encode_content(self):
"""
Encode (compress...) the html to the data to be cached
"""
return zlib.compress(pickle.dumps(self.content))
def render_node(self):
"""
Render the template and save the generated content
"""
self.content = self.node.nodelist.render(self.context)
def create_content(self):
"""
Render the template, apply options on it, and save it to the cache.
"""
self.render_node()
if self.options.compress_spaces:
self.content = self.RE_SPACELESS.sub(' ', self.content)
if self.options.compress:
to_cache = self.encode_content()
else:
to_cache = self.content
to_cache = self.join_content_version(to_cache)
try:
self.cache_set(to_cache)
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when saving the cached template fragment')
def load_content(self):
"""
It's the main method of the class.
Try to load the template from cache, get the versions and decode the
content.
If something was wrong during this process (or if we had a
`__regenerate__` value to True in the context), create new content and
save it in cache.
"""
self.content = None
if not self.regenerate:
try:
self.content = self.cache_get()
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when getting the cached template fragment')
try:
assert self.content
self.split_content_version()
assert self.content
if self.content_internal_version != self.INTERNAL_VERSION or (
self.options.versioning and self.content_version != self.version):
self.content = None
assert self.content
if self.options.compress:
self.decode_content()
except Exception:
self.create_content()
self.content = smart_str(self.content)
def render(self):
"""
Try to load content (from cache or by rendering the template).
If it fails, return an empty string or raise the exception if it's a
TemplateSyntaxError.
With this, we can no parse and render the content included in the
{% nocache %} blocks, but only if we have have this tag and if we don't
have `__partial__` to True in the context (in this case we simple
return the html with the {% nocache %} block not parsed.
"""
try:
self.load_content()
except template.TemplateSyntaxError:
raise
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when rendering template fragment')
return ''
if self.partial or self.RAW_TOKEN_START not in self.content:
return self.content
return self.render_nocache()
@staticmethod
def get_all_tags_and_filters_by_function():
"""
Return a dict with all the template tags (in the `tags` entry) and filters (in the
`filters` entry) that are available.
Both entries are a dict with the function as key, and a tuple with (library name, function
name) as value.
This is cached after the first call.
"""
libraries = get_template_libraries()
force = False
# We'll force the update of the cache if new libraries where added
if hasattr(CacheTag.get_all_tags_and_filters_by_function, '_len_libraries'):
if len(libraries) != CacheTag.get_all_tags_and_filters_by_function._len_libraries:
force = True
if force or not hasattr(CacheTag.get_all_tags_and_filters_by_function, '_cache'):
CacheTag.get_all_tags_and_filters_by_function._len_libraries = len(libraries)
available_tags = {}
available_filters = {}
for lib_name, lib in libraries.items():
available_tags.update(
(function, (lib_name, tag_name))
for tag_name, function
in lib.tags.items()
)
available_filters.update(
(function, (lib_name, filter_name))
for filter_name, function
in lib.filters.items()
)
CacheTag.get_all_tags_and_filters_by_function._cache = {
'tags': available_tags,
'filters': available_filters
}
return CacheTag.get_all_tags_and_filters_by_function._cache
@classmethod
def render_nocache(self):
"""
Render the `nocache` blocks of the content and return the whole
html
"""
tmpl = template.Template(''.join([
# start by loading the cache library
template.BLOCK_TAG_START,
'load %s' % self.get_templatetag_module(),
template.BLOCK_TAG_END,
# and surround the cached template by "raw" tags
self.RAW_TOKEN_START,
self.content,
self.RAW_TOKEN_END,
]))
return tmpl.render(self.context)
@classmethod
def get_template_node_arguments(cls, tokens):
"""
Return the arguments taken from the templatetag that will be used to the
Node class.
Take a list of all tokens and return a list of real tokens. Here
should be done some validations (number of tokens...) and eventually
some parsing...
"""
if len(tokens) < 3:
raise template.TemplateSyntaxError(
"'%r' tag requires at least 2 arguments." % tokens[0])
return tokens[1], tokens[2], tokens[3:]
@classmethod
def register(cls, library_register, nodename='cache', nocache_nodename='nocache'):
"""
Register all needed templatetags, with these parameters :
* library_register : the `register` object (result of
`template.Library()`) in your templatetag module
* nodename : the node to use for the cache templatetag (the default
is "cache")
* nocache_nodename : the node to use for the nocache templatetag
"""
if cls in CacheTag._templatetags:
raise RuntimeError('The adv-cache-tag class %s is already registered' % cls)
CacheTag._templatetags[cls] = {}
def templatetag_cache(parser, token):
"""
Return a new Node object for the main cache templatetag
"""
nodelist = parser.parse(('end%s' % nodename,))
parser.delete_first_token()
args = cls.get_template_node_arguments(token.contents.split())
return cls.Node(nodename, nodelist, *args)
library_register.tag(nodename, templatetag_cache)
CacheTag._templatetags[cls]['cache'] = templatetag_cache
def templatetag_raw(parser, token):
"""
Return a TextNode with all html not parsed, used for templatetags
that need to not be parsed : the `nocache` one and the `RAW` one,
used to surround cached html (to be not parsed again)
Based on http://www.holovaty.com/writing/django-two-phased-rendering/
"""
# Whatever is between {% nocache %} and {% endnocache %} will be preserved as
# raw, un-rendered template code.
text = []
parse_until = 'end%s' % token.contents
tag_mapping = {
TOKEN_TEXT: ('', ''),
TOKEN_VAR: ('{{', '}}'),
TOKEN_BLOCK: ('{%', '%}'),
TOKEN_COMMENT: ('{#', '#}'),
}
# By the time this template tag is called, the template system has already
# lexed the template into tokens. Here, we loop over the tokens until
# {% endraw %} and parse them to TextNodes. We have to add the start and
# end bits (e.g. "{{" for variables) because those have already been
# stripped off in a previous part of the template-parsing process.
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == parse_until:
return template.TextNode(''.join(text))
start, end = tag_mapping[token.token_type]
text.append('%s%s%s' % (start, token.contents, end))
parser.unclosed_block_tag(parse_until)
library_register.tag(cls.RAW_TOKEN, templatetag_raw)
CacheTag._templatetags[cls]['raw'] = templatetag_raw
def templatetag_nocache(parser, token):
"""
Return a TextNode with raw html from the `nocache` templatetag,
and surround it with `endRAW` and `RAW` (precisely
`cls.RAW_TOKEN_END` and `cls.RAW_TOKEN_START`).
So for
{% nocache %}foo{% endnocache %}
we get
{% endRAW... %}foo{% RAW... %}
When the main cache templatetag content will be loaded from cache,
it will be surrounded by the same templatetags, reversed.
So if at first we had
{% cache %}bar{% nocache %}foo{% endnocache %}baz{% endcache %}
The cached version will be
bar{% endRAW... %}foo{% RAW... %}baz
And the final html to be rendered will be
{% RAW... %}bar{% endRAW... %}foo{% RAW... %}baz{% endRAW... %}
And the html within `RAW` and `endRAW` will not be parsed, as wanted
"""
# We'll load in the no-cache part all template tags and filters loaded in the main
# template, to be able to use it when the no-cache will be rendered
all_tags_and_filters = cls.get_all_tags_and_filters_by_function()
available_tags = all_tags_and_filters['tags']
available_filters = all_tags_and_filters['filters']
needed = {}
current_module = cls.get_templatetag_module()
for function in parser.tags.values():
if function in available_tags:
lib, name = available_tags[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
for function in parser.filters.values():
if function in available_filters:
lib, name = available_filters[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
load_string = ''.join(
'%sload %s from %s%s' % (
template.BLOCK_TAG_START,
' '.join(names),
lib,
template.BLOCK_TAG_END,
)
for lib, names in needed.items()
)
node = templatetag_raw(parser, token)
node.s = cls.RAW_TOKEN_END + load_string + node.s + cls.RAW_TOKEN_START
return node
library_register.tag(nocache_nodename, templatetag_nocache)
CacheTag._templatetags['nocache'] = templatetag_nocache
|
twidi/django-adv-cache-tag
|
adv_cache_tag/tag.py
|
CacheTag.render_nocache
|
python
|
def render_nocache(self):
tmpl = template.Template(''.join([
# start by loading the cache library
template.BLOCK_TAG_START,
'load %s' % self.get_templatetag_module(),
template.BLOCK_TAG_END,
# and surround the cached template by "raw" tags
self.RAW_TOKEN_START,
self.content,
self.RAW_TOKEN_END,
]))
return tmpl.render(self.context)
|
Render the `nocache` blocks of the content and return the whole
html
|
train
|
https://github.com/twidi/django-adv-cache-tag/blob/811f8db4dac73667c7d2fe0ea97a24969593eb8a/adv_cache_tag/tag.py#L573-L588
|
[
"def get_templatetag_module(cls):\n \"\"\"\n Return the templatetags module name for which the current class is used.\n It's used to render the nocache blocks by loading the correct module\n \"\"\"\n if cls not in CacheTag._templatetags_modules:\n # find the library including the main templatetag of the current class\n all_tags = cls.get_all_tags_and_filters_by_function()['tags']\n CacheTag._templatetags_modules[cls] = all_tags[CacheTag._templatetags[cls]['cache']][0]\n return CacheTag._templatetags_modules[cls]\n"
] |
class CacheTag(object, metaclass=CacheTagMetaClass):
"""
The main class of `django-adv-cache-tag` which does all the work.
To change its behaviour, simply change one or more of these settings
(see in the `Meta` class for details) :
* ADV_CACHE_VERSIONING
* ADV_CACHE_COMPRESS
* ADV_CACHE_COMPRESS_SPACES
* ADV_CACHE_INCLUDE_PK
* ADV_CACHE_BACKEND
* ADV_CACHE_VERSION
* ADV_CACHE_RESOLVE_NAME
Or inherit from this class and don't forget to register your tag :
from adv_cache_tag.tag import CacheTag
register = template.Library()
class MyCacheTag(CacheTag):
# change something
MyCacheTag.register(register, 'my_cache')
By inheriting you can change many things as CacheTag implements a lot of
small methods
"""
# Will change if the algorithm changes
INTERNAL_VERSION = '1'
# Used to separate internal version, template version, and the content
VERSION_SEPARATOR = '::'
# Regex used to reduce spaces/blanks (many spaces into one)
RE_SPACELESS = re.compile(r'\s\s+')
# generate a token for this site, based on the secret_key
RAW_TOKEN = 'RAW_' + hashlib.sha1(
b'RAW_TOKEN_SALT1' + force_bytes(hashlib.sha1(
b'RAW_TOKEN_SALT2' + force_bytes(settings.SECRET_KEY)
).hexdigest())
).hexdigest()
# tokens to use around the already parsed parts of the cached template
RAW_TOKEN_START = template.BLOCK_TAG_START + RAW_TOKEN + template.BLOCK_TAG_END
RAW_TOKEN_END = template.BLOCK_TAG_START + 'end' + RAW_TOKEN + template.BLOCK_TAG_END
# internal use only: keep reference to templatetags functions
_templatetags = {}
# internal use only: name of the templatetags module to load for this class and subclasses
_templatetags_modules = {}
options = None
Node = Node
class Meta:
"""
Options of this class. Accessible via cls.options or self.options.
To force (and/or add) options in your own class, simply redefine a
`Meta` class in your own main cache class with updated/add values
"""
# If versioning is activated (internal versioning is always on)
versioning = getattr(settings, 'ADV_CACHE_VERSIONING', False)
# If the content will be compressed before caching
compress = getattr(settings, 'ADV_CACHE_COMPRESS', False)
# If many spaces/blanks will be converted into one
compress_spaces = getattr(settings, 'ADV_CACHE_COMPRESS_SPACES', False)
# If a "pk" (you can pass what you want) will be added to the cache key
include_pk = getattr(settings, 'ADV_CACHE_INCLUDE_PK', False)
# The cache backend to use (or use the "default" one)
cache_backend = getattr(settings, 'ADV_CACHE_BACKEND', 'default')
# Part of the INTERNAL_VERSION configurable via settings
internal_version = getattr(settings, 'ADV_CACHE_VERSION', '')
# If the fragment name should be resolved or taken as is
resolve_fragment = getattr(settings, 'ADV_CACHE_RESOLVE_NAME', False)
# Use a metaclass to use the right class in the Node class, and assign Meta to options
def __init__(self, node, context):
"""
Constructor of the Cache class:
* preparing fields to be used later,
* prepare the templatetag parameters
* create the cache key
"""
super(CacheTag, self).__init__()
# the actual Node object
self.node = node
# the context used for the rendering
self.context = context
# indicate that we force regenerating the cache, even if it exists
self.regenerate = bool(self.context.get('__regenerate__', False))
# indicate if we only want html without parsing the nocache parts
self.partial = bool(self.context.get('__partial__', False))
# the content of the template, will be used through the whole process
self.content = ''
# the version used in the cached templatetag
self.content_version = None
# Final "INTERNAL_VERSION"
if self.options.internal_version:
self.INTERNAL_VERSION = b'%s|%s' % (self.__class__.INTERNAL_VERSION,
self.options.internal_version)
else:
self.INTERNAL_VERSION = force_bytes(self.__class__.INTERNAL_VERSION)
self.VERSION_SEPARATOR = force_bytes(self.__class__.VERSION_SEPARATOR)
# prepare all parameters passed to the templatetag
self.expire_time = None
self.version = None
self.prepare_params()
# get the cache and cache key
self.cache = self.get_cache_object()
self.cache_key = self.get_cache_key()
def prepare_params(self):
"""
Prepare the parameters passed to the templatetag
"""
if self.options.resolve_fragment:
self.fragment_name = self.node.fragment_name.resolve(self.context)
else:
self.fragment_name = str(self.node.fragment_name)
# Remove quotes that surround the name
for char in '\'\"':
if self.fragment_name.startswith(char) or self.fragment_name.endswith(char):
if self.fragment_name.startswith(char) and self.fragment_name.endswith(char):
self.fragment_name = self.fragment_name[1:-1]
break
else:
raise ValueError('Number of quotes around the fragment name is incoherent')
self.expire_time = self.get_expire_time()
if self.options.versioning:
self.version = force_bytes(self.get_version())
self.vary_on = [template.Variable(var).resolve(self.context) for var in self.node.vary_on]
def get_expire_time(self):
"""
Return the expire time passed to the templatetag.
Must be None or an integer.
"""
try:
expire_time = self.node.expire_time.resolve(self.context)
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.expire_time.var))
try:
if expire_time is not None:
expire_time = str(expire_time)
if not expire_time.isdigit():
raise TypeError
expire_time = int(expire_time)
except (ValueError, TypeError):
raise template.TemplateSyntaxError(
'"%s" tag got a non-integer (or None) timeout value: %r' % (
self.node.nodename, expire_time
)
)
return expire_time
def get_version(self):
"""
Return the stringified version passed to the templatetag.
"""
if not self.node.version:
return None
try:
version = smart_str('%s' % self.node.version.resolve(self.context))
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.version.var))
return '%s' % version
def hash_args(self):
"""
Take all the arguments passed after the fragment name and return a
hashed version which will be used in the cache key
"""
return hashlib.md5(force_bytes(':'.join([urlquote(force_bytes(var)) for var in self.vary_on]))).hexdigest()
def get_pk(self):
"""
Return the pk to use in the cache key. It's the first version of the
templatetag arguments after the fragment name
"""
return self.vary_on[0]
def get_base_cache_key(self):
"""
Return a string with format placeholder used as a source to compute the
final cache key.
Placeholders are :
* %(nodename)s : the name of the templatetag
* %(name)s : the fragment name passed to the templatetag
* %(pk)s : the return of the `get_pk` method, passed only if `include_pk` is True
* %(hash)s : the return of the `hash_args` method
"""
if self.options.include_pk:
return 'template.%(nodename)s.%(name)s.%(pk)s.%(hash)s'
else:
return 'template.%(nodename)s.%(name)s.%(hash)s'
def get_cache_key_args(self):
"""
Return the arguments to be passed to the base cache key returned by `get_base_cache_key`.
"""
cache_key_args = dict(
nodename=self.node.nodename,
name=self.fragment_name,
hash=self.hash_args(),
)
if self.options.include_pk:
cache_key_args['pk'] = self.get_pk()
return cache_key_args
def get_cache_key(self):
"""
Compute and return the final cache key, using return values of
`get_base_cache_key` and `get_cache_key_args`.
"""
return self.get_base_cache_key() % self.get_cache_key_args()
def get_cache_object(self):
"""
Return the cache object to be used to set and get the values in cache.
By default it's the default cache defined by django, but it can be
every object with a `get` and a `set` method (or not, if `cache_get`
and `cache_set` methods are overridden)
"""
return get_cache(self.node.cache_backend or self.options.cache_backend)
def cache_get(self):
"""
Get content from the cache
"""
return self.cache.get(self.cache_key)
def cache_set(self, to_cache):
"""
Set content into the cache
"""
self.cache.set(self.cache_key, to_cache, self.expire_time)
def join_content_version(self, to_cache):
"""
Add the version(s) to the content to cache : internal version at first
and then the template version if versioning is activated.
Each version, and the content, are separated with `VERSION_SEPARATOR`.
This method is called after the encoding (if "compress" or
"compress_spaces" options are on)
"""
parts = [self.INTERNAL_VERSION]
if self.options.versioning:
parts.append(force_bytes(self.version))
parts.append(force_bytes(to_cache))
return self.VERSION_SEPARATOR.join(parts)
def split_content_version(self):
"""
Remove and return the version(s) from the cached content. First the
internal version, and if versioning is activated, the template one.
And finally save the content, but only if all versions match.
The content saved is the encoded one (if "compress" or
"compress_spaces" options are on). By doing so, we avoid decoding if
the versions didn't match, to save some cpu cycles.
"""
try:
nb_parts = 2
if self.options.versioning:
nb_parts = 3
parts = self.content.split(self.VERSION_SEPARATOR, nb_parts - 1)
assert len(parts) == nb_parts
self.content_internal_version = parts[0]
if self.options.versioning:
self.content_version = parts[1]
self.content = parts[-1]
except Exception:
self.content = None
def decode_content(self):
"""
Decode (decompress...) the content got from the cache, to the final
html
"""
self.content = pickle.loads(zlib.decompress(self.content))
def encode_content(self):
"""
Encode (compress...) the html to the data to be cached
"""
return zlib.compress(pickle.dumps(self.content))
def render_node(self):
"""
Render the template and save the generated content
"""
self.content = self.node.nodelist.render(self.context)
def create_content(self):
"""
Render the template, apply options on it, and save it to the cache.
"""
self.render_node()
if self.options.compress_spaces:
self.content = self.RE_SPACELESS.sub(' ', self.content)
if self.options.compress:
to_cache = self.encode_content()
else:
to_cache = self.content
to_cache = self.join_content_version(to_cache)
try:
self.cache_set(to_cache)
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when saving the cached template fragment')
def load_content(self):
"""
It's the main method of the class.
Try to load the template from cache, get the versions and decode the
content.
If something was wrong during this process (or if we had a
`__regenerate__` value to True in the context), create new content and
save it in cache.
"""
self.content = None
if not self.regenerate:
try:
self.content = self.cache_get()
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when getting the cached template fragment')
try:
assert self.content
self.split_content_version()
assert self.content
if self.content_internal_version != self.INTERNAL_VERSION or (
self.options.versioning and self.content_version != self.version):
self.content = None
assert self.content
if self.options.compress:
self.decode_content()
except Exception:
self.create_content()
self.content = smart_str(self.content)
def render(self):
"""
Try to load content (from cache or by rendering the template).
If it fails, return an empty string or raise the exception if it's a
TemplateSyntaxError.
With this, we can no parse and render the content included in the
{% nocache %} blocks, but only if we have have this tag and if we don't
have `__partial__` to True in the context (in this case we simple
return the html with the {% nocache %} block not parsed.
"""
try:
self.load_content()
except template.TemplateSyntaxError:
raise
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when rendering template fragment')
return ''
if self.partial or self.RAW_TOKEN_START not in self.content:
return self.content
return self.render_nocache()
@staticmethod
def get_all_tags_and_filters_by_function():
"""
Return a dict with all the template tags (in the `tags` entry) and filters (in the
`filters` entry) that are available.
Both entries are a dict with the function as key, and a tuple with (library name, function
name) as value.
This is cached after the first call.
"""
libraries = get_template_libraries()
force = False
# We'll force the update of the cache if new libraries where added
if hasattr(CacheTag.get_all_tags_and_filters_by_function, '_len_libraries'):
if len(libraries) != CacheTag.get_all_tags_and_filters_by_function._len_libraries:
force = True
if force or not hasattr(CacheTag.get_all_tags_and_filters_by_function, '_cache'):
CacheTag.get_all_tags_and_filters_by_function._len_libraries = len(libraries)
available_tags = {}
available_filters = {}
for lib_name, lib in libraries.items():
available_tags.update(
(function, (lib_name, tag_name))
for tag_name, function
in lib.tags.items()
)
available_filters.update(
(function, (lib_name, filter_name))
for filter_name, function
in lib.filters.items()
)
CacheTag.get_all_tags_and_filters_by_function._cache = {
'tags': available_tags,
'filters': available_filters
}
return CacheTag.get_all_tags_and_filters_by_function._cache
@classmethod
def get_templatetag_module(cls):
"""
Return the templatetags module name for which the current class is used.
It's used to render the nocache blocks by loading the correct module
"""
if cls not in CacheTag._templatetags_modules:
# find the library including the main templatetag of the current class
all_tags = cls.get_all_tags_and_filters_by_function()['tags']
CacheTag._templatetags_modules[cls] = all_tags[CacheTag._templatetags[cls]['cache']][0]
return CacheTag._templatetags_modules[cls]
@classmethod
def get_template_node_arguments(cls, tokens):
"""
Return the arguments taken from the templatetag that will be used to the
Node class.
Take a list of all tokens and return a list of real tokens. Here
should be done some validations (number of tokens...) and eventually
some parsing...
"""
if len(tokens) < 3:
raise template.TemplateSyntaxError(
"'%r' tag requires at least 2 arguments." % tokens[0])
return tokens[1], tokens[2], tokens[3:]
@classmethod
def register(cls, library_register, nodename='cache', nocache_nodename='nocache'):
"""
Register all needed templatetags, with these parameters :
* library_register : the `register` object (result of
`template.Library()`) in your templatetag module
* nodename : the node to use for the cache templatetag (the default
is "cache")
* nocache_nodename : the node to use for the nocache templatetag
"""
if cls in CacheTag._templatetags:
raise RuntimeError('The adv-cache-tag class %s is already registered' % cls)
CacheTag._templatetags[cls] = {}
def templatetag_cache(parser, token):
"""
Return a new Node object for the main cache templatetag
"""
nodelist = parser.parse(('end%s' % nodename,))
parser.delete_first_token()
args = cls.get_template_node_arguments(token.contents.split())
return cls.Node(nodename, nodelist, *args)
library_register.tag(nodename, templatetag_cache)
CacheTag._templatetags[cls]['cache'] = templatetag_cache
def templatetag_raw(parser, token):
"""
Return a TextNode with all html not parsed, used for templatetags
that need to not be parsed : the `nocache` one and the `RAW` one,
used to surround cached html (to be not parsed again)
Based on http://www.holovaty.com/writing/django-two-phased-rendering/
"""
# Whatever is between {% nocache %} and {% endnocache %} will be preserved as
# raw, un-rendered template code.
text = []
parse_until = 'end%s' % token.contents
tag_mapping = {
TOKEN_TEXT: ('', ''),
TOKEN_VAR: ('{{', '}}'),
TOKEN_BLOCK: ('{%', '%}'),
TOKEN_COMMENT: ('{#', '#}'),
}
# By the time this template tag is called, the template system has already
# lexed the template into tokens. Here, we loop over the tokens until
# {% endraw %} and parse them to TextNodes. We have to add the start and
# end bits (e.g. "{{" for variables) because those have already been
# stripped off in a previous part of the template-parsing process.
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == parse_until:
return template.TextNode(''.join(text))
start, end = tag_mapping[token.token_type]
text.append('%s%s%s' % (start, token.contents, end))
parser.unclosed_block_tag(parse_until)
library_register.tag(cls.RAW_TOKEN, templatetag_raw)
CacheTag._templatetags[cls]['raw'] = templatetag_raw
def templatetag_nocache(parser, token):
"""
Return a TextNode with raw html from the `nocache` templatetag,
and surround it with `endRAW` and `RAW` (precisely
`cls.RAW_TOKEN_END` and `cls.RAW_TOKEN_START`).
So for
{% nocache %}foo{% endnocache %}
we get
{% endRAW... %}foo{% RAW... %}
When the main cache templatetag content will be loaded from cache,
it will be surrounded by the same templatetags, reversed.
So if at first we had
{% cache %}bar{% nocache %}foo{% endnocache %}baz{% endcache %}
The cached version will be
bar{% endRAW... %}foo{% RAW... %}baz
And the final html to be rendered will be
{% RAW... %}bar{% endRAW... %}foo{% RAW... %}baz{% endRAW... %}
And the html within `RAW` and `endRAW` will not be parsed, as wanted
"""
# We'll load in the no-cache part all template tags and filters loaded in the main
# template, to be able to use it when the no-cache will be rendered
all_tags_and_filters = cls.get_all_tags_and_filters_by_function()
available_tags = all_tags_and_filters['tags']
available_filters = all_tags_and_filters['filters']
needed = {}
current_module = cls.get_templatetag_module()
for function in parser.tags.values():
if function in available_tags:
lib, name = available_tags[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
for function in parser.filters.values():
if function in available_filters:
lib, name = available_filters[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
load_string = ''.join(
'%sload %s from %s%s' % (
template.BLOCK_TAG_START,
' '.join(names),
lib,
template.BLOCK_TAG_END,
)
for lib, names in needed.items()
)
node = templatetag_raw(parser, token)
node.s = cls.RAW_TOKEN_END + load_string + node.s + cls.RAW_TOKEN_START
return node
library_register.tag(nocache_nodename, templatetag_nocache)
CacheTag._templatetags['nocache'] = templatetag_nocache
|
twidi/django-adv-cache-tag
|
adv_cache_tag/tag.py
|
CacheTag.get_template_node_arguments
|
python
|
def get_template_node_arguments(cls, tokens):
if len(tokens) < 3:
raise template.TemplateSyntaxError(
"'%r' tag requires at least 2 arguments." % tokens[0])
return tokens[1], tokens[2], tokens[3:]
|
Return the arguments taken from the templatetag that will be used to the
Node class.
Take a list of all tokens and return a list of real tokens. Here
should be done some validations (number of tokens...) and eventually
some parsing...
|
train
|
https://github.com/twidi/django-adv-cache-tag/blob/811f8db4dac73667c7d2fe0ea97a24969593eb8a/adv_cache_tag/tag.py#L591-L602
| null |
class CacheTag(object, metaclass=CacheTagMetaClass):
"""
The main class of `django-adv-cache-tag` which does all the work.
To change its behaviour, simply change one or more of these settings
(see in the `Meta` class for details) :
* ADV_CACHE_VERSIONING
* ADV_CACHE_COMPRESS
* ADV_CACHE_COMPRESS_SPACES
* ADV_CACHE_INCLUDE_PK
* ADV_CACHE_BACKEND
* ADV_CACHE_VERSION
* ADV_CACHE_RESOLVE_NAME
Or inherit from this class and don't forget to register your tag :
from adv_cache_tag.tag import CacheTag
register = template.Library()
class MyCacheTag(CacheTag):
# change something
MyCacheTag.register(register, 'my_cache')
By inheriting you can change many things as CacheTag implements a lot of
small methods
"""
# Will change if the algorithm changes
INTERNAL_VERSION = '1'
# Used to separate internal version, template version, and the content
VERSION_SEPARATOR = '::'
# Regex used to reduce spaces/blanks (many spaces into one)
RE_SPACELESS = re.compile(r'\s\s+')
# generate a token for this site, based on the secret_key
RAW_TOKEN = 'RAW_' + hashlib.sha1(
b'RAW_TOKEN_SALT1' + force_bytes(hashlib.sha1(
b'RAW_TOKEN_SALT2' + force_bytes(settings.SECRET_KEY)
).hexdigest())
).hexdigest()
# tokens to use around the already parsed parts of the cached template
RAW_TOKEN_START = template.BLOCK_TAG_START + RAW_TOKEN + template.BLOCK_TAG_END
RAW_TOKEN_END = template.BLOCK_TAG_START + 'end' + RAW_TOKEN + template.BLOCK_TAG_END
# internal use only: keep reference to templatetags functions
_templatetags = {}
# internal use only: name of the templatetags module to load for this class and subclasses
_templatetags_modules = {}
options = None
Node = Node
class Meta:
"""
Options of this class. Accessible via cls.options or self.options.
To force (and/or add) options in your own class, simply redefine a
`Meta` class in your own main cache class with updated/add values
"""
# If versioning is activated (internal versioning is always on)
versioning = getattr(settings, 'ADV_CACHE_VERSIONING', False)
# If the content will be compressed before caching
compress = getattr(settings, 'ADV_CACHE_COMPRESS', False)
# If many spaces/blanks will be converted into one
compress_spaces = getattr(settings, 'ADV_CACHE_COMPRESS_SPACES', False)
# If a "pk" (you can pass what you want) will be added to the cache key
include_pk = getattr(settings, 'ADV_CACHE_INCLUDE_PK', False)
# The cache backend to use (or use the "default" one)
cache_backend = getattr(settings, 'ADV_CACHE_BACKEND', 'default')
# Part of the INTERNAL_VERSION configurable via settings
internal_version = getattr(settings, 'ADV_CACHE_VERSION', '')
# If the fragment name should be resolved or taken as is
resolve_fragment = getattr(settings, 'ADV_CACHE_RESOLVE_NAME', False)
# Use a metaclass to use the right class in the Node class, and assign Meta to options
def __init__(self, node, context):
"""
Constructor of the Cache class:
* preparing fields to be used later,
* prepare the templatetag parameters
* create the cache key
"""
super(CacheTag, self).__init__()
# the actual Node object
self.node = node
# the context used for the rendering
self.context = context
# indicate that we force regenerating the cache, even if it exists
self.regenerate = bool(self.context.get('__regenerate__', False))
# indicate if we only want html without parsing the nocache parts
self.partial = bool(self.context.get('__partial__', False))
# the content of the template, will be used through the whole process
self.content = ''
# the version used in the cached templatetag
self.content_version = None
# Final "INTERNAL_VERSION"
if self.options.internal_version:
self.INTERNAL_VERSION = b'%s|%s' % (self.__class__.INTERNAL_VERSION,
self.options.internal_version)
else:
self.INTERNAL_VERSION = force_bytes(self.__class__.INTERNAL_VERSION)
self.VERSION_SEPARATOR = force_bytes(self.__class__.VERSION_SEPARATOR)
# prepare all parameters passed to the templatetag
self.expire_time = None
self.version = None
self.prepare_params()
# get the cache and cache key
self.cache = self.get_cache_object()
self.cache_key = self.get_cache_key()
def prepare_params(self):
"""
Prepare the parameters passed to the templatetag
"""
if self.options.resolve_fragment:
self.fragment_name = self.node.fragment_name.resolve(self.context)
else:
self.fragment_name = str(self.node.fragment_name)
# Remove quotes that surround the name
for char in '\'\"':
if self.fragment_name.startswith(char) or self.fragment_name.endswith(char):
if self.fragment_name.startswith(char) and self.fragment_name.endswith(char):
self.fragment_name = self.fragment_name[1:-1]
break
else:
raise ValueError('Number of quotes around the fragment name is incoherent')
self.expire_time = self.get_expire_time()
if self.options.versioning:
self.version = force_bytes(self.get_version())
self.vary_on = [template.Variable(var).resolve(self.context) for var in self.node.vary_on]
def get_expire_time(self):
"""
Return the expire time passed to the templatetag.
Must be None or an integer.
"""
try:
expire_time = self.node.expire_time.resolve(self.context)
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.expire_time.var))
try:
if expire_time is not None:
expire_time = str(expire_time)
if not expire_time.isdigit():
raise TypeError
expire_time = int(expire_time)
except (ValueError, TypeError):
raise template.TemplateSyntaxError(
'"%s" tag got a non-integer (or None) timeout value: %r' % (
self.node.nodename, expire_time
)
)
return expire_time
def get_version(self):
"""
Return the stringified version passed to the templatetag.
"""
if not self.node.version:
return None
try:
version = smart_str('%s' % self.node.version.resolve(self.context))
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.version.var))
return '%s' % version
def hash_args(self):
"""
Take all the arguments passed after the fragment name and return a
hashed version which will be used in the cache key
"""
return hashlib.md5(force_bytes(':'.join([urlquote(force_bytes(var)) for var in self.vary_on]))).hexdigest()
def get_pk(self):
"""
Return the pk to use in the cache key. It's the first version of the
templatetag arguments after the fragment name
"""
return self.vary_on[0]
def get_base_cache_key(self):
"""
Return a string with format placeholder used as a source to compute the
final cache key.
Placeholders are :
* %(nodename)s : the name of the templatetag
* %(name)s : the fragment name passed to the templatetag
* %(pk)s : the return of the `get_pk` method, passed only if `include_pk` is True
* %(hash)s : the return of the `hash_args` method
"""
if self.options.include_pk:
return 'template.%(nodename)s.%(name)s.%(pk)s.%(hash)s'
else:
return 'template.%(nodename)s.%(name)s.%(hash)s'
def get_cache_key_args(self):
"""
Return the arguments to be passed to the base cache key returned by `get_base_cache_key`.
"""
cache_key_args = dict(
nodename=self.node.nodename,
name=self.fragment_name,
hash=self.hash_args(),
)
if self.options.include_pk:
cache_key_args['pk'] = self.get_pk()
return cache_key_args
def get_cache_key(self):
"""
Compute and return the final cache key, using return values of
`get_base_cache_key` and `get_cache_key_args`.
"""
return self.get_base_cache_key() % self.get_cache_key_args()
def get_cache_object(self):
"""
Return the cache object to be used to set and get the values in cache.
By default it's the default cache defined by django, but it can be
every object with a `get` and a `set` method (or not, if `cache_get`
and `cache_set` methods are overridden)
"""
return get_cache(self.node.cache_backend or self.options.cache_backend)
def cache_get(self):
"""
Get content from the cache
"""
return self.cache.get(self.cache_key)
def cache_set(self, to_cache):
"""
Set content into the cache
"""
self.cache.set(self.cache_key, to_cache, self.expire_time)
def join_content_version(self, to_cache):
"""
Add the version(s) to the content to cache : internal version at first
and then the template version if versioning is activated.
Each version, and the content, are separated with `VERSION_SEPARATOR`.
This method is called after the encoding (if "compress" or
"compress_spaces" options are on)
"""
parts = [self.INTERNAL_VERSION]
if self.options.versioning:
parts.append(force_bytes(self.version))
parts.append(force_bytes(to_cache))
return self.VERSION_SEPARATOR.join(parts)
def split_content_version(self):
"""
Remove and return the version(s) from the cached content. First the
internal version, and if versioning is activated, the template one.
And finally save the content, but only if all versions match.
The content saved is the encoded one (if "compress" or
"compress_spaces" options are on). By doing so, we avoid decoding if
the versions didn't match, to save some cpu cycles.
"""
try:
nb_parts = 2
if self.options.versioning:
nb_parts = 3
parts = self.content.split(self.VERSION_SEPARATOR, nb_parts - 1)
assert len(parts) == nb_parts
self.content_internal_version = parts[0]
if self.options.versioning:
self.content_version = parts[1]
self.content = parts[-1]
except Exception:
self.content = None
def decode_content(self):
"""
Decode (decompress...) the content got from the cache, to the final
html
"""
self.content = pickle.loads(zlib.decompress(self.content))
def encode_content(self):
"""
Encode (compress...) the html to the data to be cached
"""
return zlib.compress(pickle.dumps(self.content))
def render_node(self):
"""
Render the template and save the generated content
"""
self.content = self.node.nodelist.render(self.context)
def create_content(self):
"""
Render the template, apply options on it, and save it to the cache.
"""
self.render_node()
if self.options.compress_spaces:
self.content = self.RE_SPACELESS.sub(' ', self.content)
if self.options.compress:
to_cache = self.encode_content()
else:
to_cache = self.content
to_cache = self.join_content_version(to_cache)
try:
self.cache_set(to_cache)
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when saving the cached template fragment')
def load_content(self):
"""
It's the main method of the class.
Try to load the template from cache, get the versions and decode the
content.
If something was wrong during this process (or if we had a
`__regenerate__` value to True in the context), create new content and
save it in cache.
"""
self.content = None
if not self.regenerate:
try:
self.content = self.cache_get()
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when getting the cached template fragment')
try:
assert self.content
self.split_content_version()
assert self.content
if self.content_internal_version != self.INTERNAL_VERSION or (
self.options.versioning and self.content_version != self.version):
self.content = None
assert self.content
if self.options.compress:
self.decode_content()
except Exception:
self.create_content()
self.content = smart_str(self.content)
def render(self):
"""
Try to load content (from cache or by rendering the template).
If it fails, return an empty string or raise the exception if it's a
TemplateSyntaxError.
With this, we can no parse and render the content included in the
{% nocache %} blocks, but only if we have have this tag and if we don't
have `__partial__` to True in the context (in this case we simple
return the html with the {% nocache %} block not parsed.
"""
try:
self.load_content()
except template.TemplateSyntaxError:
raise
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when rendering template fragment')
return ''
if self.partial or self.RAW_TOKEN_START not in self.content:
return self.content
return self.render_nocache()
@staticmethod
def get_all_tags_and_filters_by_function():
"""
Return a dict with all the template tags (in the `tags` entry) and filters (in the
`filters` entry) that are available.
Both entries are a dict with the function as key, and a tuple with (library name, function
name) as value.
This is cached after the first call.
"""
libraries = get_template_libraries()
force = False
# We'll force the update of the cache if new libraries where added
if hasattr(CacheTag.get_all_tags_and_filters_by_function, '_len_libraries'):
if len(libraries) != CacheTag.get_all_tags_and_filters_by_function._len_libraries:
force = True
if force or not hasattr(CacheTag.get_all_tags_and_filters_by_function, '_cache'):
CacheTag.get_all_tags_and_filters_by_function._len_libraries = len(libraries)
available_tags = {}
available_filters = {}
for lib_name, lib in libraries.items():
available_tags.update(
(function, (lib_name, tag_name))
for tag_name, function
in lib.tags.items()
)
available_filters.update(
(function, (lib_name, filter_name))
for filter_name, function
in lib.filters.items()
)
CacheTag.get_all_tags_and_filters_by_function._cache = {
'tags': available_tags,
'filters': available_filters
}
return CacheTag.get_all_tags_and_filters_by_function._cache
@classmethod
def get_templatetag_module(cls):
"""
Return the templatetags module name for which the current class is used.
It's used to render the nocache blocks by loading the correct module
"""
if cls not in CacheTag._templatetags_modules:
# find the library including the main templatetag of the current class
all_tags = cls.get_all_tags_and_filters_by_function()['tags']
CacheTag._templatetags_modules[cls] = all_tags[CacheTag._templatetags[cls]['cache']][0]
return CacheTag._templatetags_modules[cls]
def render_nocache(self):
"""
Render the `nocache` blocks of the content and return the whole
html
"""
tmpl = template.Template(''.join([
# start by loading the cache library
template.BLOCK_TAG_START,
'load %s' % self.get_templatetag_module(),
template.BLOCK_TAG_END,
# and surround the cached template by "raw" tags
self.RAW_TOKEN_START,
self.content,
self.RAW_TOKEN_END,
]))
return tmpl.render(self.context)
@classmethod
@classmethod
def register(cls, library_register, nodename='cache', nocache_nodename='nocache'):
"""
Register all needed templatetags, with these parameters :
* library_register : the `register` object (result of
`template.Library()`) in your templatetag module
* nodename : the node to use for the cache templatetag (the default
is "cache")
* nocache_nodename : the node to use for the nocache templatetag
"""
if cls in CacheTag._templatetags:
raise RuntimeError('The adv-cache-tag class %s is already registered' % cls)
CacheTag._templatetags[cls] = {}
def templatetag_cache(parser, token):
"""
Return a new Node object for the main cache templatetag
"""
nodelist = parser.parse(('end%s' % nodename,))
parser.delete_first_token()
args = cls.get_template_node_arguments(token.contents.split())
return cls.Node(nodename, nodelist, *args)
library_register.tag(nodename, templatetag_cache)
CacheTag._templatetags[cls]['cache'] = templatetag_cache
def templatetag_raw(parser, token):
"""
Return a TextNode with all html not parsed, used for templatetags
that need to not be parsed : the `nocache` one and the `RAW` one,
used to surround cached html (to be not parsed again)
Based on http://www.holovaty.com/writing/django-two-phased-rendering/
"""
# Whatever is between {% nocache %} and {% endnocache %} will be preserved as
# raw, un-rendered template code.
text = []
parse_until = 'end%s' % token.contents
tag_mapping = {
TOKEN_TEXT: ('', ''),
TOKEN_VAR: ('{{', '}}'),
TOKEN_BLOCK: ('{%', '%}'),
TOKEN_COMMENT: ('{#', '#}'),
}
# By the time this template tag is called, the template system has already
# lexed the template into tokens. Here, we loop over the tokens until
# {% endraw %} and parse them to TextNodes. We have to add the start and
# end bits (e.g. "{{" for variables) because those have already been
# stripped off in a previous part of the template-parsing process.
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == parse_until:
return template.TextNode(''.join(text))
start, end = tag_mapping[token.token_type]
text.append('%s%s%s' % (start, token.contents, end))
parser.unclosed_block_tag(parse_until)
library_register.tag(cls.RAW_TOKEN, templatetag_raw)
CacheTag._templatetags[cls]['raw'] = templatetag_raw
def templatetag_nocache(parser, token):
"""
Return a TextNode with raw html from the `nocache` templatetag,
and surround it with `endRAW` and `RAW` (precisely
`cls.RAW_TOKEN_END` and `cls.RAW_TOKEN_START`).
So for
{% nocache %}foo{% endnocache %}
we get
{% endRAW... %}foo{% RAW... %}
When the main cache templatetag content will be loaded from cache,
it will be surrounded by the same templatetags, reversed.
So if at first we had
{% cache %}bar{% nocache %}foo{% endnocache %}baz{% endcache %}
The cached version will be
bar{% endRAW... %}foo{% RAW... %}baz
And the final html to be rendered will be
{% RAW... %}bar{% endRAW... %}foo{% RAW... %}baz{% endRAW... %}
And the html within `RAW` and `endRAW` will not be parsed, as wanted
"""
# We'll load in the no-cache part all template tags and filters loaded in the main
# template, to be able to use it when the no-cache will be rendered
all_tags_and_filters = cls.get_all_tags_and_filters_by_function()
available_tags = all_tags_and_filters['tags']
available_filters = all_tags_and_filters['filters']
needed = {}
current_module = cls.get_templatetag_module()
for function in parser.tags.values():
if function in available_tags:
lib, name = available_tags[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
for function in parser.filters.values():
if function in available_filters:
lib, name = available_filters[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
load_string = ''.join(
'%sload %s from %s%s' % (
template.BLOCK_TAG_START,
' '.join(names),
lib,
template.BLOCK_TAG_END,
)
for lib, names in needed.items()
)
node = templatetag_raw(parser, token)
node.s = cls.RAW_TOKEN_END + load_string + node.s + cls.RAW_TOKEN_START
return node
library_register.tag(nocache_nodename, templatetag_nocache)
CacheTag._templatetags['nocache'] = templatetag_nocache
|
twidi/django-adv-cache-tag
|
adv_cache_tag/tag.py
|
CacheTag.register
|
python
|
def register(cls, library_register, nodename='cache', nocache_nodename='nocache'):
if cls in CacheTag._templatetags:
raise RuntimeError('The adv-cache-tag class %s is already registered' % cls)
CacheTag._templatetags[cls] = {}
def templatetag_cache(parser, token):
"""
Return a new Node object for the main cache templatetag
"""
nodelist = parser.parse(('end%s' % nodename,))
parser.delete_first_token()
args = cls.get_template_node_arguments(token.contents.split())
return cls.Node(nodename, nodelist, *args)
library_register.tag(nodename, templatetag_cache)
CacheTag._templatetags[cls]['cache'] = templatetag_cache
def templatetag_raw(parser, token):
"""
Return a TextNode with all html not parsed, used for templatetags
that need to not be parsed : the `nocache` one and the `RAW` one,
used to surround cached html (to be not parsed again)
Based on http://www.holovaty.com/writing/django-two-phased-rendering/
"""
# Whatever is between {% nocache %} and {% endnocache %} will be preserved as
# raw, un-rendered template code.
text = []
parse_until = 'end%s' % token.contents
tag_mapping = {
TOKEN_TEXT: ('', ''),
TOKEN_VAR: ('{{', '}}'),
TOKEN_BLOCK: ('{%', '%}'),
TOKEN_COMMENT: ('{#', '#}'),
}
# By the time this template tag is called, the template system has already
# lexed the template into tokens. Here, we loop over the tokens until
# {% endraw %} and parse them to TextNodes. We have to add the start and
# end bits (e.g. "{{" for variables) because those have already been
# stripped off in a previous part of the template-parsing process.
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == parse_until:
return template.TextNode(''.join(text))
start, end = tag_mapping[token.token_type]
text.append('%s%s%s' % (start, token.contents, end))
parser.unclosed_block_tag(parse_until)
library_register.tag(cls.RAW_TOKEN, templatetag_raw)
CacheTag._templatetags[cls]['raw'] = templatetag_raw
def templatetag_nocache(parser, token):
"""
Return a TextNode with raw html from the `nocache` templatetag,
and surround it with `endRAW` and `RAW` (precisely
`cls.RAW_TOKEN_END` and `cls.RAW_TOKEN_START`).
So for
{% nocache %}foo{% endnocache %}
we get
{% endRAW... %}foo{% RAW... %}
When the main cache templatetag content will be loaded from cache,
it will be surrounded by the same templatetags, reversed.
So if at first we had
{% cache %}bar{% nocache %}foo{% endnocache %}baz{% endcache %}
The cached version will be
bar{% endRAW... %}foo{% RAW... %}baz
And the final html to be rendered will be
{% RAW... %}bar{% endRAW... %}foo{% RAW... %}baz{% endRAW... %}
And the html within `RAW` and `endRAW` will not be parsed, as wanted
"""
# We'll load in the no-cache part all template tags and filters loaded in the main
# template, to be able to use it when the no-cache will be rendered
all_tags_and_filters = cls.get_all_tags_and_filters_by_function()
available_tags = all_tags_and_filters['tags']
available_filters = all_tags_and_filters['filters']
needed = {}
current_module = cls.get_templatetag_module()
for function in parser.tags.values():
if function in available_tags:
lib, name = available_tags[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
for function in parser.filters.values():
if function in available_filters:
lib, name = available_filters[function]
if lib == current_module:
continue
needed.setdefault(lib, set()).add(name)
load_string = ''.join(
'%sload %s from %s%s' % (
template.BLOCK_TAG_START,
' '.join(names),
lib,
template.BLOCK_TAG_END,
)
for lib, names in needed.items()
)
node = templatetag_raw(parser, token)
node.s = cls.RAW_TOKEN_END + load_string + node.s + cls.RAW_TOKEN_START
return node
library_register.tag(nocache_nodename, templatetag_nocache)
CacheTag._templatetags['nocache'] = templatetag_nocache
|
Register all needed templatetags, with these parameters :
* library_register : the `register` object (result of
`template.Library()`) in your templatetag module
* nodename : the node to use for the cache templatetag (the default
is "cache")
* nocache_nodename : the node to use for the nocache templatetag
|
train
|
https://github.com/twidi/django-adv-cache-tag/blob/811f8db4dac73667c7d2fe0ea97a24969593eb8a/adv_cache_tag/tag.py#L605-L726
| null |
class CacheTag(object, metaclass=CacheTagMetaClass):
"""
The main class of `django-adv-cache-tag` which does all the work.
To change its behaviour, simply change one or more of these settings
(see in the `Meta` class for details) :
* ADV_CACHE_VERSIONING
* ADV_CACHE_COMPRESS
* ADV_CACHE_COMPRESS_SPACES
* ADV_CACHE_INCLUDE_PK
* ADV_CACHE_BACKEND
* ADV_CACHE_VERSION
* ADV_CACHE_RESOLVE_NAME
Or inherit from this class and don't forget to register your tag :
from adv_cache_tag.tag import CacheTag
register = template.Library()
class MyCacheTag(CacheTag):
# change something
MyCacheTag.register(register, 'my_cache')
By inheriting you can change many things as CacheTag implements a lot of
small methods
"""
# Will change if the algorithm changes
INTERNAL_VERSION = '1'
# Used to separate internal version, template version, and the content
VERSION_SEPARATOR = '::'
# Regex used to reduce spaces/blanks (many spaces into one)
RE_SPACELESS = re.compile(r'\s\s+')
# generate a token for this site, based on the secret_key
RAW_TOKEN = 'RAW_' + hashlib.sha1(
b'RAW_TOKEN_SALT1' + force_bytes(hashlib.sha1(
b'RAW_TOKEN_SALT2' + force_bytes(settings.SECRET_KEY)
).hexdigest())
).hexdigest()
# tokens to use around the already parsed parts of the cached template
RAW_TOKEN_START = template.BLOCK_TAG_START + RAW_TOKEN + template.BLOCK_TAG_END
RAW_TOKEN_END = template.BLOCK_TAG_START + 'end' + RAW_TOKEN + template.BLOCK_TAG_END
# internal use only: keep reference to templatetags functions
_templatetags = {}
# internal use only: name of the templatetags module to load for this class and subclasses
_templatetags_modules = {}
options = None
Node = Node
class Meta:
"""
Options of this class. Accessible via cls.options or self.options.
To force (and/or add) options in your own class, simply redefine a
`Meta` class in your own main cache class with updated/add values
"""
# If versioning is activated (internal versioning is always on)
versioning = getattr(settings, 'ADV_CACHE_VERSIONING', False)
# If the content will be compressed before caching
compress = getattr(settings, 'ADV_CACHE_COMPRESS', False)
# If many spaces/blanks will be converted into one
compress_spaces = getattr(settings, 'ADV_CACHE_COMPRESS_SPACES', False)
# If a "pk" (you can pass what you want) will be added to the cache key
include_pk = getattr(settings, 'ADV_CACHE_INCLUDE_PK', False)
# The cache backend to use (or use the "default" one)
cache_backend = getattr(settings, 'ADV_CACHE_BACKEND', 'default')
# Part of the INTERNAL_VERSION configurable via settings
internal_version = getattr(settings, 'ADV_CACHE_VERSION', '')
# If the fragment name should be resolved or taken as is
resolve_fragment = getattr(settings, 'ADV_CACHE_RESOLVE_NAME', False)
# Use a metaclass to use the right class in the Node class, and assign Meta to options
def __init__(self, node, context):
"""
Constructor of the Cache class:
* preparing fields to be used later,
* prepare the templatetag parameters
* create the cache key
"""
super(CacheTag, self).__init__()
# the actual Node object
self.node = node
# the context used for the rendering
self.context = context
# indicate that we force regenerating the cache, even if it exists
self.regenerate = bool(self.context.get('__regenerate__', False))
# indicate if we only want html without parsing the nocache parts
self.partial = bool(self.context.get('__partial__', False))
# the content of the template, will be used through the whole process
self.content = ''
# the version used in the cached templatetag
self.content_version = None
# Final "INTERNAL_VERSION"
if self.options.internal_version:
self.INTERNAL_VERSION = b'%s|%s' % (self.__class__.INTERNAL_VERSION,
self.options.internal_version)
else:
self.INTERNAL_VERSION = force_bytes(self.__class__.INTERNAL_VERSION)
self.VERSION_SEPARATOR = force_bytes(self.__class__.VERSION_SEPARATOR)
# prepare all parameters passed to the templatetag
self.expire_time = None
self.version = None
self.prepare_params()
# get the cache and cache key
self.cache = self.get_cache_object()
self.cache_key = self.get_cache_key()
def prepare_params(self):
"""
Prepare the parameters passed to the templatetag
"""
if self.options.resolve_fragment:
self.fragment_name = self.node.fragment_name.resolve(self.context)
else:
self.fragment_name = str(self.node.fragment_name)
# Remove quotes that surround the name
for char in '\'\"':
if self.fragment_name.startswith(char) or self.fragment_name.endswith(char):
if self.fragment_name.startswith(char) and self.fragment_name.endswith(char):
self.fragment_name = self.fragment_name[1:-1]
break
else:
raise ValueError('Number of quotes around the fragment name is incoherent')
self.expire_time = self.get_expire_time()
if self.options.versioning:
self.version = force_bytes(self.get_version())
self.vary_on = [template.Variable(var).resolve(self.context) for var in self.node.vary_on]
def get_expire_time(self):
"""
Return the expire time passed to the templatetag.
Must be None or an integer.
"""
try:
expire_time = self.node.expire_time.resolve(self.context)
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.expire_time.var))
try:
if expire_time is not None:
expire_time = str(expire_time)
if not expire_time.isdigit():
raise TypeError
expire_time = int(expire_time)
except (ValueError, TypeError):
raise template.TemplateSyntaxError(
'"%s" tag got a non-integer (or None) timeout value: %r' % (
self.node.nodename, expire_time
)
)
return expire_time
def get_version(self):
"""
Return the stringified version passed to the templatetag.
"""
if not self.node.version:
return None
try:
version = smart_str('%s' % self.node.version.resolve(self.context))
except template.VariableDoesNotExist:
raise template.TemplateSyntaxError('"%s" tag got an unknown variable: %r' %
(self.node.nodename, self.node.version.var))
return '%s' % version
def hash_args(self):
"""
Take all the arguments passed after the fragment name and return a
hashed version which will be used in the cache key
"""
return hashlib.md5(force_bytes(':'.join([urlquote(force_bytes(var)) for var in self.vary_on]))).hexdigest()
def get_pk(self):
"""
Return the pk to use in the cache key. It's the first version of the
templatetag arguments after the fragment name
"""
return self.vary_on[0]
def get_base_cache_key(self):
"""
Return a string with format placeholder used as a source to compute the
final cache key.
Placeholders are :
* %(nodename)s : the name of the templatetag
* %(name)s : the fragment name passed to the templatetag
* %(pk)s : the return of the `get_pk` method, passed only if `include_pk` is True
* %(hash)s : the return of the `hash_args` method
"""
if self.options.include_pk:
return 'template.%(nodename)s.%(name)s.%(pk)s.%(hash)s'
else:
return 'template.%(nodename)s.%(name)s.%(hash)s'
def get_cache_key_args(self):
"""
Return the arguments to be passed to the base cache key returned by `get_base_cache_key`.
"""
cache_key_args = dict(
nodename=self.node.nodename,
name=self.fragment_name,
hash=self.hash_args(),
)
if self.options.include_pk:
cache_key_args['pk'] = self.get_pk()
return cache_key_args
def get_cache_key(self):
"""
Compute and return the final cache key, using return values of
`get_base_cache_key` and `get_cache_key_args`.
"""
return self.get_base_cache_key() % self.get_cache_key_args()
def get_cache_object(self):
"""
Return the cache object to be used to set and get the values in cache.
By default it's the default cache defined by django, but it can be
every object with a `get` and a `set` method (or not, if `cache_get`
and `cache_set` methods are overridden)
"""
return get_cache(self.node.cache_backend or self.options.cache_backend)
def cache_get(self):
"""
Get content from the cache
"""
return self.cache.get(self.cache_key)
def cache_set(self, to_cache):
"""
Set content into the cache
"""
self.cache.set(self.cache_key, to_cache, self.expire_time)
def join_content_version(self, to_cache):
"""
Add the version(s) to the content to cache : internal version at first
and then the template version if versioning is activated.
Each version, and the content, are separated with `VERSION_SEPARATOR`.
This method is called after the encoding (if "compress" or
"compress_spaces" options are on)
"""
parts = [self.INTERNAL_VERSION]
if self.options.versioning:
parts.append(force_bytes(self.version))
parts.append(force_bytes(to_cache))
return self.VERSION_SEPARATOR.join(parts)
def split_content_version(self):
"""
Remove and return the version(s) from the cached content. First the
internal version, and if versioning is activated, the template one.
And finally save the content, but only if all versions match.
The content saved is the encoded one (if "compress" or
"compress_spaces" options are on). By doing so, we avoid decoding if
the versions didn't match, to save some cpu cycles.
"""
try:
nb_parts = 2
if self.options.versioning:
nb_parts = 3
parts = self.content.split(self.VERSION_SEPARATOR, nb_parts - 1)
assert len(parts) == nb_parts
self.content_internal_version = parts[0]
if self.options.versioning:
self.content_version = parts[1]
self.content = parts[-1]
except Exception:
self.content = None
def decode_content(self):
"""
Decode (decompress...) the content got from the cache, to the final
html
"""
self.content = pickle.loads(zlib.decompress(self.content))
def encode_content(self):
"""
Encode (compress...) the html to the data to be cached
"""
return zlib.compress(pickle.dumps(self.content))
def render_node(self):
"""
Render the template and save the generated content
"""
self.content = self.node.nodelist.render(self.context)
def create_content(self):
"""
Render the template, apply options on it, and save it to the cache.
"""
self.render_node()
if self.options.compress_spaces:
self.content = self.RE_SPACELESS.sub(' ', self.content)
if self.options.compress:
to_cache = self.encode_content()
else:
to_cache = self.content
to_cache = self.join_content_version(to_cache)
try:
self.cache_set(to_cache)
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when saving the cached template fragment')
def load_content(self):
"""
It's the main method of the class.
Try to load the template from cache, get the versions and decode the
content.
If something was wrong during this process (or if we had a
`__regenerate__` value to True in the context), create new content and
save it in cache.
"""
self.content = None
if not self.regenerate:
try:
self.content = self.cache_get()
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when getting the cached template fragment')
try:
assert self.content
self.split_content_version()
assert self.content
if self.content_internal_version != self.INTERNAL_VERSION or (
self.options.versioning and self.content_version != self.version):
self.content = None
assert self.content
if self.options.compress:
self.decode_content()
except Exception:
self.create_content()
self.content = smart_str(self.content)
def render(self):
"""
Try to load content (from cache or by rendering the template).
If it fails, return an empty string or raise the exception if it's a
TemplateSyntaxError.
With this, we can no parse and render the content included in the
{% nocache %} blocks, but only if we have have this tag and if we don't
have `__partial__` to True in the context (in this case we simple
return the html with the {% nocache %} block not parsed.
"""
try:
self.load_content()
except template.TemplateSyntaxError:
raise
except Exception:
if is_template_debug_activated():
raise
logger.exception('Error when rendering template fragment')
return ''
if self.partial or self.RAW_TOKEN_START not in self.content:
return self.content
return self.render_nocache()
@staticmethod
def get_all_tags_and_filters_by_function():
"""
Return a dict with all the template tags (in the `tags` entry) and filters (in the
`filters` entry) that are available.
Both entries are a dict with the function as key, and a tuple with (library name, function
name) as value.
This is cached after the first call.
"""
libraries = get_template_libraries()
force = False
# We'll force the update of the cache if new libraries where added
if hasattr(CacheTag.get_all_tags_and_filters_by_function, '_len_libraries'):
if len(libraries) != CacheTag.get_all_tags_and_filters_by_function._len_libraries:
force = True
if force or not hasattr(CacheTag.get_all_tags_and_filters_by_function, '_cache'):
CacheTag.get_all_tags_and_filters_by_function._len_libraries = len(libraries)
available_tags = {}
available_filters = {}
for lib_name, lib in libraries.items():
available_tags.update(
(function, (lib_name, tag_name))
for tag_name, function
in lib.tags.items()
)
available_filters.update(
(function, (lib_name, filter_name))
for filter_name, function
in lib.filters.items()
)
CacheTag.get_all_tags_and_filters_by_function._cache = {
'tags': available_tags,
'filters': available_filters
}
return CacheTag.get_all_tags_and_filters_by_function._cache
@classmethod
def get_templatetag_module(cls):
"""
Return the templatetags module name for which the current class is used.
It's used to render the nocache blocks by loading the correct module
"""
if cls not in CacheTag._templatetags_modules:
# find the library including the main templatetag of the current class
all_tags = cls.get_all_tags_and_filters_by_function()['tags']
CacheTag._templatetags_modules[cls] = all_tags[CacheTag._templatetags[cls]['cache']][0]
return CacheTag._templatetags_modules[cls]
def render_nocache(self):
"""
Render the `nocache` blocks of the content and return the whole
html
"""
tmpl = template.Template(''.join([
# start by loading the cache library
template.BLOCK_TAG_START,
'load %s' % self.get_templatetag_module(),
template.BLOCK_TAG_END,
# and surround the cached template by "raw" tags
self.RAW_TOKEN_START,
self.content,
self.RAW_TOKEN_END,
]))
return tmpl.render(self.context)
@classmethod
def get_template_node_arguments(cls, tokens):
"""
Return the arguments taken from the templatetag that will be used to the
Node class.
Take a list of all tokens and return a list of real tokens. Here
should be done some validations (number of tokens...) and eventually
some parsing...
"""
if len(tokens) < 3:
raise template.TemplateSyntaxError(
"'%r' tag requires at least 2 arguments." % tokens[0])
return tokens[1], tokens[2], tokens[3:]
@classmethod
|
pyfca/pyfca
|
pyfca/implications.py
|
L
|
python
|
def L(g,i):
g1 = g&(2**i)
if i:
n = Lwidth(i)
Ln = L(g,i-1)
if g1:
return Ln<<(2*n) | Ln<<n | Ln
else:
return int('1'*n,2)<<(2*n) | Ln<<n | Ln
else:
if g1:
return int('000',2)
else:
return int('100',2)
|
recursively constructs L line for g; i = len(g)-1
|
train
|
https://github.com/pyfca/pyfca/blob/cf8cea9e76076dbf4bb3f38996dcb5491b0eb0b0/pyfca/implications.py#L33-L47
|
[
"Lwidth = Hwidth = lambda n: 3**n\n",
"def L(g,i):\n \"\"\"recursively constructs L line for g; i = len(g)-1\"\"\"\n g1 = g&(2**i)\n if i:\n n = Lwidth(i)\n Ln = L(g,i-1)\n if g1:\n return Ln<<(2*n) | Ln<<n | Ln\n else:\n return int('1'*n,2)<<(2*n) | Ln<<n | Ln\n else:\n if g1:\n return int('000',2)\n else:\n return int('100',2)\n"
] |
#!/usr/bin/env python3
# encoding: utf-8
"""
Implications
------------
This uses the python int as a bit field to store the FCA context.
See this `blog`_ for more.
.. _`blog`: http://rolandpuntaier.blogspot.com/2015/07/implications.html
"""
from math import trunc, log2
from functools import reduce
from itertools import tee
from collections import defaultdict
def istr(i,b,w,c="0123456789abcdefghijklmnopqrstuvwxyz"):
return ((w<=0 and i==0) and " ") or (istr(i//b, b, w-1, c).lstrip() + c[i%b])
digitat = lambda i,a,b: int(istr(i,b,a+1)[-a],b)
digitat2 = lambda i,a: (i>>a)&1
#concatenate...
horizontally = lambda K1,K2,b,w1,w2: [int(s,b) for s in [istr(k1,b,w1)+istr(k2,b,w2) for k1,k2 in zip(K1,K2)]]
horizontally2 = lambda K1,K2,w1,w2: [(k1<<w2)|k2 for k1,k2 in zip(K1,K2)]
vertically2 = vertically = lambda K1,K2: K1+K2
Lwidth = Hwidth = lambda n: 3**n
def H(g,i):
"""recursively constructs H line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Hwidth(i)
i=i-1
Hn = H(g,i)
if g1:
return Hn<<(2*n) | Hn<<n | Hn
else:
return int('1'*n,2)<<(2*n) | L(g,i)<<n | Hn
else:
if g1:
return int('111',2)
else:
return int('101',2)
def UV_H(Hg,gw):
"""
Constructs implications and intents based on H
gw = g width
Hg = H(g), g is the binary coding of the attribute set
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
"""
lefts = set()
K = []
UV = []
p = Hwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Hg&pp:
y = istr(p,3,gw)
yy = y.replace('1','0')
if yy not in lefts:
if y.find('1') == -1:#y∈{0,2}^n
K.append(y)
else:
UV.append(y)
lefts.add(yy)
return (UV,K)
Awidth = lambda n: 2**n
def A(g,i):
"""recursively constructs A line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Awidth(i)
An = A(g,i-1)
if g1:
return An<<n | An
else:
return int('1'*n,2)<<n | An
else:
if g1:
return int('00',2)
else:
return int('10',2)
Bwidth = lambda n:n*2**(n-1)
def B(g,i):
"""recursively constructs B line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
nA = Awidth(i)
nB = Bwidth(i)
i=i-1
Bn = B(g,i)
if g1:
return Bn << (nA+nB) | int('1'*nA,2) << nB | Bn
else:
return int('1'*nB,2) << (nA+nB) | A(g,i) << nB | Bn
else:
if g1:
return 1
else:
return 0
def A012(t,i):
if i<0:
return ""
nA = Awidth(i)
if t < nA:
return "0"+A012(t,i-1)
else:
return "2"+A012(t-nA,i-1)
def B012(t,i):
"""
Constructs ternary implication coding (0=not there, 2=U, 1=V)
t is B column position
i = |M|-1 to 0
"""
if not i:
return "1"
nA = Awidth(i)
nB = Bwidth(i)
nBB = nB + nA
if t < nB:
return "0"+B012(t,i-1)
elif t < nBB:
return "1"+A012(t-nB,i-1)
else:
return "2"+B012(t-nBB,i-1)
def UV_B(Bg,gw):
"""
returns the implications UV based on B
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
UV = []
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
UV.append(uv)
return UV
def omega(imps):
"""
Calculates a measure for the size of the implication basis: \sum |U||V|
"""
if isinstance(imps,v_Us_dict):
return sum([omega(V) for U,V in imps.items()])#|V|=1
if isinstance(imps,list):
return sum([omega(x) for x in imps])
if isinstance(imps,str):
#imps = due[-1]
try:
U,V = imps.split("->")
Us = U.split(",") if "," in U else U.split()
Vs = V.split(",") if "," in V else V.split()
res = len(Us)*len(Vs)
return res
except:
return 0
if isinstance(imps,int):
b=bin(imps)[2:]
res = len([x for x in b if x=='1'])
return res
class v_Us_dict(defaultdict):
"""
In an implication U→u, u is the significant component.
U is coded as int.
u is the bit column of the implication's conclusion.
{u:[U1,U2,...]}
"""
def __init__(self,Bg,gw):
"""
returns the implications {v:Us} based on B
v is the significant component
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
self.width = gw
if isinstance(Bg,int):
defaultdict.__init__(self,list)
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
#let's find minima regarding product order
#{v:[Umin1,Umin2,...]}
v = uv.find('1')#v=significant
u = uv[:v]+'0'+uv[v+1:]
u = int(u.replace('2','1'),2)
Umin_s = self[gw-v-1]#bit position from right
it = [i for i,U in enumerate(Umin_s) if U&u==u]
for i in reversed(it):
del Umin_s[i]
else:
Umin_s.append(u)
elif isinstance(Bg,list):
defaultdict.__init__(self,list)
for k,v in Bg:
assert isinstance(v,list)
self[k] += v
else:
defaultdict.__init__(self,list,Bg)
def __eq__(self, other):
if len(self) != len(other):
return False
for v,U in self.items():
if v not in other:
return False
Uo = other[v]
if not set(Uo)==set(U):
return False
return True
def Code012(self):
for v,Us in self.items():
vleft = self.width - v - 1
for u in Us:
b = bin(u)[2:]
w0 = self.width-len(b)
c01 = '0'*w0+b
c01 = c01.replace('1','2')
c01 = c01[:vleft]+'1'+c01[vleft+1:]
yield c01
def __str__(self):
return defaultdict.__str__(self).replace('defaultdict','v_Us_dict')
def __len__(self):
return sum((len(x) for x in self.values()))
def flatten(self):
for v,Us in self.items():
for u in Us:
yield (v,u)
def __add__(self, other):
res = v_Us_dict([],self.width)
if isinstance(other,tuple):
other = {other[0]:[other[1]]}
keys = set(self)|set(other)
for v in keys:
t = set()
if v in self:
t |= set(self[v])
if v in other:
t |= set(other[v])
if t:
res[v] = list(t)
return res
def __sub__(self, other):
res = v_Us_dict([],self.width)
for v,U in self.items():
r = list(set(U) - set(other[v]))
if r:
res[v] = r
return res
def __mul__(self, other):
"""
This is the o operation in [1]_, that represents the 3rd Armstrong rule.
It returns combinations for i‡j: (i,u1|u2) or (j,u1|u2),
"""
res = v_Us_dict([],self.width)
if id(self)==id(other):
s = iter(self.items())
try:
while True:
v1, us1 = next(s)
vv1 = 2**v1
s, ss = tee(s)#remember s and iterate with copy ss
try:
while True:
v2, us2 = next(ss)
vv2 = 2**v2
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
except StopIteration:
pass
except StopIteration:
pass
else:
for v1,us1 in self.items():
vv1 = 2**v1
for v2,us2 in other.items():
vv2 = 2**v2
if v1 != v2:
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
for v,U in res.items():
res[v] = list(set(U))#remove duplicates
return res
def __invert__(self):
"""
U->v generated from L=∪ min L_i via the 3rd Armstrong rule
Note, that this can become bigger than L.
"""
Y = self
Yn = Y*Y
while True:
YnplusY = Yn+Y
Yg = Yn*YnplusY
#YgenNotInL = Yg - L
#YgenInL = Yg - YgenNotInL
#Yn1 = Yn + YgenInL
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def __pow__(self, other):
"""
'other' is a (v,u) couple
generates U->v involving 'other'
#other = (0,64)
"""
Y = self
Z = v_Us_dict({other[0]:[other[1]]},self.width)
Yn = Y*Z
while True:
YnplusY = Yn+Y
Yg = Z*YnplusY
#this does not work for test_basis1
#YnplusZ = Yn+Z
#Yg = YnplusZ*YnplusY
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def koenig(self):
"""
This needs to be L = contextg.v_Us_B()
"""
L = self
Y = L - (L*L)
while True:
Ybar = Y + ~Y
take = L - Ybar
if not len(take):
return Y
else:
ZZ = list(set(take)-set(Y))#use significant which is not in Y
if len(ZZ) > 0:
v = ZZ[0]
z=(v,take[v][0])
else:
z = next(take.flatten())
Yzgen = Y**z
Y = (Y - Yzgen) + z #Yn+1
#Lost = Ybar - (Y + ~Y)
#assert len(Lost) == 0
def respects(g,imp):
"""
g is an int, where each bit is an attribute
implication UV is ternary coded 1 = ∈V, 2 = ∈U, 0 otherwise
g and UV have the same number of digits
"""
if isinstance(g,str):
g = int(g,2)
if isinstance(imp,int):
imp = istr(imp,3,g.bit_length())
V = int(imp.replace('1','2').replace('2','1'),2)
U = int(imp.replace('1','0').replace('2','1'),2)
ginU = U&g == U
ginV = V&g == V
return not ginU or ginV
class Context(list):
def __init__(self, *args, **kwargs):
"""Context can be initialized with
- a rectangular text block of 0s and 1s
- a list of ints and a "width" keyword argument.
A "mapping" keyword argument as list associates the bits with objects of any kind.
"""
if isinstance(args[0],str):
lines = [s.strip() for s in args[0].splitlines() if s.strip()]
linelens = [len(tt) for tt in lines]
self.width = linelens[0]
samelen = linelens.count(linelens[0])==len(linelens)
assert samelen, "Context needs all lines to be of same number of 0s and 1s"
super().__init__([int(s,2) for s in lines])
else:
super().__init__(*args)
self.width = kwargs['width']
try:
self.mapping = kwargs['mapping']
except:
self.mapping = [i for i in range(self.width)]
def __add__(self, other):
c = Context(list.__add__(self,other),width=self.width)
return c
def __sub__(self, other):
c = Context(horizontally2(self,other,self.width,other.width),width=self.width+other.width)
return c
def column(self, i):
"""from right"""
return ''.join([str(digitat2(r,i)) for r in self])
def row(self, i):
try:
r = istr(self[i],2,self.width)
except IndexError:
r = '0'*self.width
return r
def __getitem__(self,xy):
if isinstance(xy,tuple):
return digitat2(list.__getitem__(self,xy[0]),xy[1])
else:
return list.__getitem__(self,xy)
def transpose(self):
cs='\n'.join([self.column(i) for i in reversed(range(self.width))])
return Context(cs)
def __str__(self):
rs='\n'.join([self.row(i) for i in range(len(self))])
return rs
def size(self):
return self.width, len(self)
def UV_H(self):
"""
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
This is UV_H function, but the returned implications are respected by all attribute sets of this context.
This corresponds to a multiplication or & operation of the Hg sets.
"""
h = reduce(lambda x,y:x&y,(H(g,self.width-1) for g in self))
return UV_H(h, self.width)
def UV_B(self):
"""
returns UV = all respected U->Ux in ternary coding (1=V,2=U)
"""
h = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
return UV_B(h, self.width)
def v_Us_B(self):
"""
returns the implications {v:Us} based on B
This is L=∪ min L_i in [1]_
"""
Bg = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
gw = self.width
return v_Us_dict(Bg, gw)
def respects(self, implications):
if isinstance(implications,v_Us_dict):
implications = implications.Code012()
for g in self:
for i in implications:
if not respects(g,i):
return False
return True
def __call__(self, intOrCode012, right = None):
"""
mapping from bits to attributes using mapping (which defaults to ints)
- right, if available, is the conclusion of the implication; used if intOrCode012 is int
"""
if isinstance(intOrCode012,v_Us_dict):
return frozenset(self(x,right=i) for i,x in intOrCode012.items())
if isinstance(intOrCode012,list):
return frozenset(self(x,right=right) for x in intOrCode012)
if isinstance(intOrCode012,int):
res = []
pp = 1
for pos in range(self.width):
if intOrCode012&pp:
res.append(self.mapping[-pos-1])
pp = pp*2
if right != None:
return (frozenset(res),frozenset([self.mapping[-right-1]]))
else:
return frozenset(res)
if isinstance(intOrCode012,str):
left = []
right = []
for pos in range(self.width):
if intOrCode012[pos] == '2':
left.append(self.mapping[pos])
elif intOrCode012[pos] == '1':
right.append(self.mapping[pos])
if left:
if right:
return (frozenset(left),frozenset(right))
else:
return frozenset(left)
else:
return frozenset(right)
C = Context
def C1(w,h):
return Context('\n'.join(['1'*w]*h))
def C0(w,h):
return Context('\n'.join(['0'*w]*h))
#HH, LL, BB, AA are `\mathbb{H}`, `\mathbb{L}`, `\mathbb{B}`, `\mathbb{A}` from [1]_.
#They are not needed to construct the implication basis.
def LL(n):
"""constructs the LL context"""
if (n<=0):return Context('0')
else:
LL1=LL(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - LL1
r2 = LL1 - LL1 - LL1
return r1 + r2
def HH(n):
"""constructs the HH context"""
if (n<=0):return Context('1')
else:
LL1=LL(n-1)
HH1=HH(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - HH1
r2 = HH1 - HH1 - HH1
return r1 + r2
def AA(n):
"""constructs the AA context"""
if (n<=1):return Context('10\n00')
else:
AA1=AA(n-1)
r1 = C1(2**(n-1),2**(n-1)) - AA1
r2 = AA1 - AA1
return r1 + r2
def BB(n):
"""constructs the BB context"""
if (n<=1):return Context('0\n1')
else:
BB1=BB(n-1)
AA1=AA(n-1)
r1 = C1((n-1)*2**(n-2),2**(n-1)) - AA1 - BB1
r2 = BB1 - C1(2**(n-1),2**(n-1)) - BB1;
return r1 + r2
#.. _[1]:
#
# `Endliche Hüllensysteme und ihre Implikationenbasen <http://www.emis.de/journals/SLC/wpapers/s49koenig.pdf>`_ by Roman König.
|
pyfca/pyfca
|
pyfca/implications.py
|
H
|
python
|
def H(g,i):
g1 = g&(2**i)
if i:
n = Hwidth(i)
i=i-1
Hn = H(g,i)
if g1:
return Hn<<(2*n) | Hn<<n | Hn
else:
return int('1'*n,2)<<(2*n) | L(g,i)<<n | Hn
else:
if g1:
return int('111',2)
else:
return int('101',2)
|
recursively constructs H line for g; i = len(g)-1
|
train
|
https://github.com/pyfca/pyfca/blob/cf8cea9e76076dbf4bb3f38996dcb5491b0eb0b0/pyfca/implications.py#L48-L63
|
[
"Lwidth = Hwidth = lambda n: 3**n\n",
"def L(g,i):\n \"\"\"recursively constructs L line for g; i = len(g)-1\"\"\"\n g1 = g&(2**i)\n if i:\n n = Lwidth(i)\n Ln = L(g,i-1)\n if g1:\n return Ln<<(2*n) | Ln<<n | Ln\n else:\n return int('1'*n,2)<<(2*n) | Ln<<n | Ln\n else:\n if g1:\n return int('000',2)\n else:\n return int('100',2)\n",
"def H(g,i):\n \"\"\"recursively constructs H line for g; i = len(g)-1\"\"\"\n g1 = g&(2**i)\n if i:\n n = Hwidth(i)\n i=i-1\n Hn = H(g,i)\n if g1:\n return Hn<<(2*n) | Hn<<n | Hn\n else:\n return int('1'*n,2)<<(2*n) | L(g,i)<<n | Hn\n else:\n if g1:\n return int('111',2)\n else:\n return int('101',2)\n"
] |
#!/usr/bin/env python3
# encoding: utf-8
"""
Implications
------------
This uses the python int as a bit field to store the FCA context.
See this `blog`_ for more.
.. _`blog`: http://rolandpuntaier.blogspot.com/2015/07/implications.html
"""
from math import trunc, log2
from functools import reduce
from itertools import tee
from collections import defaultdict
def istr(i,b,w,c="0123456789abcdefghijklmnopqrstuvwxyz"):
return ((w<=0 and i==0) and " ") or (istr(i//b, b, w-1, c).lstrip() + c[i%b])
digitat = lambda i,a,b: int(istr(i,b,a+1)[-a],b)
digitat2 = lambda i,a: (i>>a)&1
#concatenate...
horizontally = lambda K1,K2,b,w1,w2: [int(s,b) for s in [istr(k1,b,w1)+istr(k2,b,w2) for k1,k2 in zip(K1,K2)]]
horizontally2 = lambda K1,K2,w1,w2: [(k1<<w2)|k2 for k1,k2 in zip(K1,K2)]
vertically2 = vertically = lambda K1,K2: K1+K2
Lwidth = Hwidth = lambda n: 3**n
def L(g,i):
"""recursively constructs L line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Lwidth(i)
Ln = L(g,i-1)
if g1:
return Ln<<(2*n) | Ln<<n | Ln
else:
return int('1'*n,2)<<(2*n) | Ln<<n | Ln
else:
if g1:
return int('000',2)
else:
return int('100',2)
def UV_H(Hg,gw):
"""
Constructs implications and intents based on H
gw = g width
Hg = H(g), g is the binary coding of the attribute set
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
"""
lefts = set()
K = []
UV = []
p = Hwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Hg&pp:
y = istr(p,3,gw)
yy = y.replace('1','0')
if yy not in lefts:
if y.find('1') == -1:#y∈{0,2}^n
K.append(y)
else:
UV.append(y)
lefts.add(yy)
return (UV,K)
Awidth = lambda n: 2**n
def A(g,i):
"""recursively constructs A line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Awidth(i)
An = A(g,i-1)
if g1:
return An<<n | An
else:
return int('1'*n,2)<<n | An
else:
if g1:
return int('00',2)
else:
return int('10',2)
Bwidth = lambda n:n*2**(n-1)
def B(g,i):
"""recursively constructs B line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
nA = Awidth(i)
nB = Bwidth(i)
i=i-1
Bn = B(g,i)
if g1:
return Bn << (nA+nB) | int('1'*nA,2) << nB | Bn
else:
return int('1'*nB,2) << (nA+nB) | A(g,i) << nB | Bn
else:
if g1:
return 1
else:
return 0
def A012(t,i):
if i<0:
return ""
nA = Awidth(i)
if t < nA:
return "0"+A012(t,i-1)
else:
return "2"+A012(t-nA,i-1)
def B012(t,i):
"""
Constructs ternary implication coding (0=not there, 2=U, 1=V)
t is B column position
i = |M|-1 to 0
"""
if not i:
return "1"
nA = Awidth(i)
nB = Bwidth(i)
nBB = nB + nA
if t < nB:
return "0"+B012(t,i-1)
elif t < nBB:
return "1"+A012(t-nB,i-1)
else:
return "2"+B012(t-nBB,i-1)
def UV_B(Bg,gw):
"""
returns the implications UV based on B
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
UV = []
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
UV.append(uv)
return UV
def omega(imps):
"""
Calculates a measure for the size of the implication basis: \sum |U||V|
"""
if isinstance(imps,v_Us_dict):
return sum([omega(V) for U,V in imps.items()])#|V|=1
if isinstance(imps,list):
return sum([omega(x) for x in imps])
if isinstance(imps,str):
#imps = due[-1]
try:
U,V = imps.split("->")
Us = U.split(",") if "," in U else U.split()
Vs = V.split(",") if "," in V else V.split()
res = len(Us)*len(Vs)
return res
except:
return 0
if isinstance(imps,int):
b=bin(imps)[2:]
res = len([x for x in b if x=='1'])
return res
class v_Us_dict(defaultdict):
"""
In an implication U→u, u is the significant component.
U is coded as int.
u is the bit column of the implication's conclusion.
{u:[U1,U2,...]}
"""
def __init__(self,Bg,gw):
"""
returns the implications {v:Us} based on B
v is the significant component
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
self.width = gw
if isinstance(Bg,int):
defaultdict.__init__(self,list)
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
#let's find minima regarding product order
#{v:[Umin1,Umin2,...]}
v = uv.find('1')#v=significant
u = uv[:v]+'0'+uv[v+1:]
u = int(u.replace('2','1'),2)
Umin_s = self[gw-v-1]#bit position from right
it = [i for i,U in enumerate(Umin_s) if U&u==u]
for i in reversed(it):
del Umin_s[i]
else:
Umin_s.append(u)
elif isinstance(Bg,list):
defaultdict.__init__(self,list)
for k,v in Bg:
assert isinstance(v,list)
self[k] += v
else:
defaultdict.__init__(self,list,Bg)
def __eq__(self, other):
if len(self) != len(other):
return False
for v,U in self.items():
if v not in other:
return False
Uo = other[v]
if not set(Uo)==set(U):
return False
return True
def Code012(self):
for v,Us in self.items():
vleft = self.width - v - 1
for u in Us:
b = bin(u)[2:]
w0 = self.width-len(b)
c01 = '0'*w0+b
c01 = c01.replace('1','2')
c01 = c01[:vleft]+'1'+c01[vleft+1:]
yield c01
def __str__(self):
return defaultdict.__str__(self).replace('defaultdict','v_Us_dict')
def __len__(self):
return sum((len(x) for x in self.values()))
def flatten(self):
for v,Us in self.items():
for u in Us:
yield (v,u)
def __add__(self, other):
res = v_Us_dict([],self.width)
if isinstance(other,tuple):
other = {other[0]:[other[1]]}
keys = set(self)|set(other)
for v in keys:
t = set()
if v in self:
t |= set(self[v])
if v in other:
t |= set(other[v])
if t:
res[v] = list(t)
return res
def __sub__(self, other):
res = v_Us_dict([],self.width)
for v,U in self.items():
r = list(set(U) - set(other[v]))
if r:
res[v] = r
return res
def __mul__(self, other):
"""
This is the o operation in [1]_, that represents the 3rd Armstrong rule.
It returns combinations for i‡j: (i,u1|u2) or (j,u1|u2),
"""
res = v_Us_dict([],self.width)
if id(self)==id(other):
s = iter(self.items())
try:
while True:
v1, us1 = next(s)
vv1 = 2**v1
s, ss = tee(s)#remember s and iterate with copy ss
try:
while True:
v2, us2 = next(ss)
vv2 = 2**v2
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
except StopIteration:
pass
except StopIteration:
pass
else:
for v1,us1 in self.items():
vv1 = 2**v1
for v2,us2 in other.items():
vv2 = 2**v2
if v1 != v2:
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
for v,U in res.items():
res[v] = list(set(U))#remove duplicates
return res
def __invert__(self):
"""
U->v generated from L=∪ min L_i via the 3rd Armstrong rule
Note, that this can become bigger than L.
"""
Y = self
Yn = Y*Y
while True:
YnplusY = Yn+Y
Yg = Yn*YnplusY
#YgenNotInL = Yg - L
#YgenInL = Yg - YgenNotInL
#Yn1 = Yn + YgenInL
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def __pow__(self, other):
"""
'other' is a (v,u) couple
generates U->v involving 'other'
#other = (0,64)
"""
Y = self
Z = v_Us_dict({other[0]:[other[1]]},self.width)
Yn = Y*Z
while True:
YnplusY = Yn+Y
Yg = Z*YnplusY
#this does not work for test_basis1
#YnplusZ = Yn+Z
#Yg = YnplusZ*YnplusY
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def koenig(self):
"""
This needs to be L = contextg.v_Us_B()
"""
L = self
Y = L - (L*L)
while True:
Ybar = Y + ~Y
take = L - Ybar
if not len(take):
return Y
else:
ZZ = list(set(take)-set(Y))#use significant which is not in Y
if len(ZZ) > 0:
v = ZZ[0]
z=(v,take[v][0])
else:
z = next(take.flatten())
Yzgen = Y**z
Y = (Y - Yzgen) + z #Yn+1
#Lost = Ybar - (Y + ~Y)
#assert len(Lost) == 0
def respects(g,imp):
"""
g is an int, where each bit is an attribute
implication UV is ternary coded 1 = ∈V, 2 = ∈U, 0 otherwise
g and UV have the same number of digits
"""
if isinstance(g,str):
g = int(g,2)
if isinstance(imp,int):
imp = istr(imp,3,g.bit_length())
V = int(imp.replace('1','2').replace('2','1'),2)
U = int(imp.replace('1','0').replace('2','1'),2)
ginU = U&g == U
ginV = V&g == V
return not ginU or ginV
class Context(list):
def __init__(self, *args, **kwargs):
"""Context can be initialized with
- a rectangular text block of 0s and 1s
- a list of ints and a "width" keyword argument.
A "mapping" keyword argument as list associates the bits with objects of any kind.
"""
if isinstance(args[0],str):
lines = [s.strip() for s in args[0].splitlines() if s.strip()]
linelens = [len(tt) for tt in lines]
self.width = linelens[0]
samelen = linelens.count(linelens[0])==len(linelens)
assert samelen, "Context needs all lines to be of same number of 0s and 1s"
super().__init__([int(s,2) for s in lines])
else:
super().__init__(*args)
self.width = kwargs['width']
try:
self.mapping = kwargs['mapping']
except:
self.mapping = [i for i in range(self.width)]
def __add__(self, other):
c = Context(list.__add__(self,other),width=self.width)
return c
def __sub__(self, other):
c = Context(horizontally2(self,other,self.width,other.width),width=self.width+other.width)
return c
def column(self, i):
"""from right"""
return ''.join([str(digitat2(r,i)) for r in self])
def row(self, i):
try:
r = istr(self[i],2,self.width)
except IndexError:
r = '0'*self.width
return r
def __getitem__(self,xy):
if isinstance(xy,tuple):
return digitat2(list.__getitem__(self,xy[0]),xy[1])
else:
return list.__getitem__(self,xy)
def transpose(self):
cs='\n'.join([self.column(i) for i in reversed(range(self.width))])
return Context(cs)
def __str__(self):
rs='\n'.join([self.row(i) for i in range(len(self))])
return rs
def size(self):
return self.width, len(self)
def UV_H(self):
"""
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
This is UV_H function, but the returned implications are respected by all attribute sets of this context.
This corresponds to a multiplication or & operation of the Hg sets.
"""
h = reduce(lambda x,y:x&y,(H(g,self.width-1) for g in self))
return UV_H(h, self.width)
def UV_B(self):
"""
returns UV = all respected U->Ux in ternary coding (1=V,2=U)
"""
h = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
return UV_B(h, self.width)
def v_Us_B(self):
"""
returns the implications {v:Us} based on B
This is L=∪ min L_i in [1]_
"""
Bg = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
gw = self.width
return v_Us_dict(Bg, gw)
def respects(self, implications):
if isinstance(implications,v_Us_dict):
implications = implications.Code012()
for g in self:
for i in implications:
if not respects(g,i):
return False
return True
def __call__(self, intOrCode012, right = None):
"""
mapping from bits to attributes using mapping (which defaults to ints)
- right, if available, is the conclusion of the implication; used if intOrCode012 is int
"""
if isinstance(intOrCode012,v_Us_dict):
return frozenset(self(x,right=i) for i,x in intOrCode012.items())
if isinstance(intOrCode012,list):
return frozenset(self(x,right=right) for x in intOrCode012)
if isinstance(intOrCode012,int):
res = []
pp = 1
for pos in range(self.width):
if intOrCode012&pp:
res.append(self.mapping[-pos-1])
pp = pp*2
if right != None:
return (frozenset(res),frozenset([self.mapping[-right-1]]))
else:
return frozenset(res)
if isinstance(intOrCode012,str):
left = []
right = []
for pos in range(self.width):
if intOrCode012[pos] == '2':
left.append(self.mapping[pos])
elif intOrCode012[pos] == '1':
right.append(self.mapping[pos])
if left:
if right:
return (frozenset(left),frozenset(right))
else:
return frozenset(left)
else:
return frozenset(right)
C = Context
def C1(w,h):
return Context('\n'.join(['1'*w]*h))
def C0(w,h):
return Context('\n'.join(['0'*w]*h))
#HH, LL, BB, AA are `\mathbb{H}`, `\mathbb{L}`, `\mathbb{B}`, `\mathbb{A}` from [1]_.
#They are not needed to construct the implication basis.
def LL(n):
"""constructs the LL context"""
if (n<=0):return Context('0')
else:
LL1=LL(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - LL1
r2 = LL1 - LL1 - LL1
return r1 + r2
def HH(n):
"""constructs the HH context"""
if (n<=0):return Context('1')
else:
LL1=LL(n-1)
HH1=HH(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - HH1
r2 = HH1 - HH1 - HH1
return r1 + r2
def AA(n):
"""constructs the AA context"""
if (n<=1):return Context('10\n00')
else:
AA1=AA(n-1)
r1 = C1(2**(n-1),2**(n-1)) - AA1
r2 = AA1 - AA1
return r1 + r2
def BB(n):
"""constructs the BB context"""
if (n<=1):return Context('0\n1')
else:
BB1=BB(n-1)
AA1=AA(n-1)
r1 = C1((n-1)*2**(n-2),2**(n-1)) - AA1 - BB1
r2 = BB1 - C1(2**(n-1),2**(n-1)) - BB1;
return r1 + r2
#.. _[1]:
#
# `Endliche Hüllensysteme und ihre Implikationenbasen <http://www.emis.de/journals/SLC/wpapers/s49koenig.pdf>`_ by Roman König.
|
pyfca/pyfca
|
pyfca/implications.py
|
UV_H
|
python
|
def UV_H(Hg,gw):
lefts = set()
K = []
UV = []
p = Hwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Hg&pp:
y = istr(p,3,gw)
yy = y.replace('1','0')
if yy not in lefts:
if y.find('1') == -1:#y∈{0,2}^n
K.append(y)
else:
UV.append(y)
lefts.add(yy)
return (UV,K)
|
Constructs implications and intents based on H
gw = g width
Hg = H(g), g is the binary coding of the attribute set
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
|
train
|
https://github.com/pyfca/pyfca/blob/cf8cea9e76076dbf4bb3f38996dcb5491b0eb0b0/pyfca/implications.py#L65-L90
|
[
"Lwidth = Hwidth = lambda n: 3**n\n",
"def istr(i,b,w,c=\"0123456789abcdefghijklmnopqrstuvwxyz\"):\n return ((w<=0 and i==0) and \" \") or (istr(i//b, b, w-1, c).lstrip() + c[i%b])\n"
] |
#!/usr/bin/env python3
# encoding: utf-8
"""
Implications
------------
This uses the python int as a bit field to store the FCA context.
See this `blog`_ for more.
.. _`blog`: http://rolandpuntaier.blogspot.com/2015/07/implications.html
"""
from math import trunc, log2
from functools import reduce
from itertools import tee
from collections import defaultdict
def istr(i,b,w,c="0123456789abcdefghijklmnopqrstuvwxyz"):
return ((w<=0 and i==0) and " ") or (istr(i//b, b, w-1, c).lstrip() + c[i%b])
digitat = lambda i,a,b: int(istr(i,b,a+1)[-a],b)
digitat2 = lambda i,a: (i>>a)&1
#concatenate...
horizontally = lambda K1,K2,b,w1,w2: [int(s,b) for s in [istr(k1,b,w1)+istr(k2,b,w2) for k1,k2 in zip(K1,K2)]]
horizontally2 = lambda K1,K2,w1,w2: [(k1<<w2)|k2 for k1,k2 in zip(K1,K2)]
vertically2 = vertically = lambda K1,K2: K1+K2
Lwidth = Hwidth = lambda n: 3**n
def L(g,i):
"""recursively constructs L line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Lwidth(i)
Ln = L(g,i-1)
if g1:
return Ln<<(2*n) | Ln<<n | Ln
else:
return int('1'*n,2)<<(2*n) | Ln<<n | Ln
else:
if g1:
return int('000',2)
else:
return int('100',2)
def H(g,i):
"""recursively constructs H line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Hwidth(i)
i=i-1
Hn = H(g,i)
if g1:
return Hn<<(2*n) | Hn<<n | Hn
else:
return int('1'*n,2)<<(2*n) | L(g,i)<<n | Hn
else:
if g1:
return int('111',2)
else:
return int('101',2)
Awidth = lambda n: 2**n
def A(g,i):
"""recursively constructs A line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Awidth(i)
An = A(g,i-1)
if g1:
return An<<n | An
else:
return int('1'*n,2)<<n | An
else:
if g1:
return int('00',2)
else:
return int('10',2)
Bwidth = lambda n:n*2**(n-1)
def B(g,i):
"""recursively constructs B line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
nA = Awidth(i)
nB = Bwidth(i)
i=i-1
Bn = B(g,i)
if g1:
return Bn << (nA+nB) | int('1'*nA,2) << nB | Bn
else:
return int('1'*nB,2) << (nA+nB) | A(g,i) << nB | Bn
else:
if g1:
return 1
else:
return 0
def A012(t,i):
if i<0:
return ""
nA = Awidth(i)
if t < nA:
return "0"+A012(t,i-1)
else:
return "2"+A012(t-nA,i-1)
def B012(t,i):
"""
Constructs ternary implication coding (0=not there, 2=U, 1=V)
t is B column position
i = |M|-1 to 0
"""
if not i:
return "1"
nA = Awidth(i)
nB = Bwidth(i)
nBB = nB + nA
if t < nB:
return "0"+B012(t,i-1)
elif t < nBB:
return "1"+A012(t-nB,i-1)
else:
return "2"+B012(t-nBB,i-1)
def UV_B(Bg,gw):
"""
returns the implications UV based on B
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
UV = []
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
UV.append(uv)
return UV
def omega(imps):
"""
Calculates a measure for the size of the implication basis: \sum |U||V|
"""
if isinstance(imps,v_Us_dict):
return sum([omega(V) for U,V in imps.items()])#|V|=1
if isinstance(imps,list):
return sum([omega(x) for x in imps])
if isinstance(imps,str):
#imps = due[-1]
try:
U,V = imps.split("->")
Us = U.split(",") if "," in U else U.split()
Vs = V.split(",") if "," in V else V.split()
res = len(Us)*len(Vs)
return res
except:
return 0
if isinstance(imps,int):
b=bin(imps)[2:]
res = len([x for x in b if x=='1'])
return res
class v_Us_dict(defaultdict):
"""
In an implication U→u, u is the significant component.
U is coded as int.
u is the bit column of the implication's conclusion.
{u:[U1,U2,...]}
"""
def __init__(self,Bg,gw):
"""
returns the implications {v:Us} based on B
v is the significant component
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
self.width = gw
if isinstance(Bg,int):
defaultdict.__init__(self,list)
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
#let's find minima regarding product order
#{v:[Umin1,Umin2,...]}
v = uv.find('1')#v=significant
u = uv[:v]+'0'+uv[v+1:]
u = int(u.replace('2','1'),2)
Umin_s = self[gw-v-1]#bit position from right
it = [i for i,U in enumerate(Umin_s) if U&u==u]
for i in reversed(it):
del Umin_s[i]
else:
Umin_s.append(u)
elif isinstance(Bg,list):
defaultdict.__init__(self,list)
for k,v in Bg:
assert isinstance(v,list)
self[k] += v
else:
defaultdict.__init__(self,list,Bg)
def __eq__(self, other):
if len(self) != len(other):
return False
for v,U in self.items():
if v not in other:
return False
Uo = other[v]
if not set(Uo)==set(U):
return False
return True
def Code012(self):
for v,Us in self.items():
vleft = self.width - v - 1
for u in Us:
b = bin(u)[2:]
w0 = self.width-len(b)
c01 = '0'*w0+b
c01 = c01.replace('1','2')
c01 = c01[:vleft]+'1'+c01[vleft+1:]
yield c01
def __str__(self):
return defaultdict.__str__(self).replace('defaultdict','v_Us_dict')
def __len__(self):
return sum((len(x) for x in self.values()))
def flatten(self):
for v,Us in self.items():
for u in Us:
yield (v,u)
def __add__(self, other):
res = v_Us_dict([],self.width)
if isinstance(other,tuple):
other = {other[0]:[other[1]]}
keys = set(self)|set(other)
for v in keys:
t = set()
if v in self:
t |= set(self[v])
if v in other:
t |= set(other[v])
if t:
res[v] = list(t)
return res
def __sub__(self, other):
res = v_Us_dict([],self.width)
for v,U in self.items():
r = list(set(U) - set(other[v]))
if r:
res[v] = r
return res
def __mul__(self, other):
"""
This is the o operation in [1]_, that represents the 3rd Armstrong rule.
It returns combinations for i‡j: (i,u1|u2) or (j,u1|u2),
"""
res = v_Us_dict([],self.width)
if id(self)==id(other):
s = iter(self.items())
try:
while True:
v1, us1 = next(s)
vv1 = 2**v1
s, ss = tee(s)#remember s and iterate with copy ss
try:
while True:
v2, us2 = next(ss)
vv2 = 2**v2
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
except StopIteration:
pass
except StopIteration:
pass
else:
for v1,us1 in self.items():
vv1 = 2**v1
for v2,us2 in other.items():
vv2 = 2**v2
if v1 != v2:
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
for v,U in res.items():
res[v] = list(set(U))#remove duplicates
return res
def __invert__(self):
"""
U->v generated from L=∪ min L_i via the 3rd Armstrong rule
Note, that this can become bigger than L.
"""
Y = self
Yn = Y*Y
while True:
YnplusY = Yn+Y
Yg = Yn*YnplusY
#YgenNotInL = Yg - L
#YgenInL = Yg - YgenNotInL
#Yn1 = Yn + YgenInL
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def __pow__(self, other):
"""
'other' is a (v,u) couple
generates U->v involving 'other'
#other = (0,64)
"""
Y = self
Z = v_Us_dict({other[0]:[other[1]]},self.width)
Yn = Y*Z
while True:
YnplusY = Yn+Y
Yg = Z*YnplusY
#this does not work for test_basis1
#YnplusZ = Yn+Z
#Yg = YnplusZ*YnplusY
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def koenig(self):
"""
This needs to be L = contextg.v_Us_B()
"""
L = self
Y = L - (L*L)
while True:
Ybar = Y + ~Y
take = L - Ybar
if not len(take):
return Y
else:
ZZ = list(set(take)-set(Y))#use significant which is not in Y
if len(ZZ) > 0:
v = ZZ[0]
z=(v,take[v][0])
else:
z = next(take.flatten())
Yzgen = Y**z
Y = (Y - Yzgen) + z #Yn+1
#Lost = Ybar - (Y + ~Y)
#assert len(Lost) == 0
def respects(g,imp):
"""
g is an int, where each bit is an attribute
implication UV is ternary coded 1 = ∈V, 2 = ∈U, 0 otherwise
g and UV have the same number of digits
"""
if isinstance(g,str):
g = int(g,2)
if isinstance(imp,int):
imp = istr(imp,3,g.bit_length())
V = int(imp.replace('1','2').replace('2','1'),2)
U = int(imp.replace('1','0').replace('2','1'),2)
ginU = U&g == U
ginV = V&g == V
return not ginU or ginV
class Context(list):
def __init__(self, *args, **kwargs):
"""Context can be initialized with
- a rectangular text block of 0s and 1s
- a list of ints and a "width" keyword argument.
A "mapping" keyword argument as list associates the bits with objects of any kind.
"""
if isinstance(args[0],str):
lines = [s.strip() for s in args[0].splitlines() if s.strip()]
linelens = [len(tt) for tt in lines]
self.width = linelens[0]
samelen = linelens.count(linelens[0])==len(linelens)
assert samelen, "Context needs all lines to be of same number of 0s and 1s"
super().__init__([int(s,2) for s in lines])
else:
super().__init__(*args)
self.width = kwargs['width']
try:
self.mapping = kwargs['mapping']
except:
self.mapping = [i for i in range(self.width)]
def __add__(self, other):
c = Context(list.__add__(self,other),width=self.width)
return c
def __sub__(self, other):
c = Context(horizontally2(self,other,self.width,other.width),width=self.width+other.width)
return c
def column(self, i):
"""from right"""
return ''.join([str(digitat2(r,i)) for r in self])
def row(self, i):
try:
r = istr(self[i],2,self.width)
except IndexError:
r = '0'*self.width
return r
def __getitem__(self,xy):
if isinstance(xy,tuple):
return digitat2(list.__getitem__(self,xy[0]),xy[1])
else:
return list.__getitem__(self,xy)
def transpose(self):
cs='\n'.join([self.column(i) for i in reversed(range(self.width))])
return Context(cs)
def __str__(self):
rs='\n'.join([self.row(i) for i in range(len(self))])
return rs
def size(self):
return self.width, len(self)
def UV_H(self):
"""
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
This is UV_H function, but the returned implications are respected by all attribute sets of this context.
This corresponds to a multiplication or & operation of the Hg sets.
"""
h = reduce(lambda x,y:x&y,(H(g,self.width-1) for g in self))
return UV_H(h, self.width)
def UV_B(self):
"""
returns UV = all respected U->Ux in ternary coding (1=V,2=U)
"""
h = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
return UV_B(h, self.width)
def v_Us_B(self):
"""
returns the implications {v:Us} based on B
This is L=∪ min L_i in [1]_
"""
Bg = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
gw = self.width
return v_Us_dict(Bg, gw)
def respects(self, implications):
if isinstance(implications,v_Us_dict):
implications = implications.Code012()
for g in self:
for i in implications:
if not respects(g,i):
return False
return True
def __call__(self, intOrCode012, right = None):
"""
mapping from bits to attributes using mapping (which defaults to ints)
- right, if available, is the conclusion of the implication; used if intOrCode012 is int
"""
if isinstance(intOrCode012,v_Us_dict):
return frozenset(self(x,right=i) for i,x in intOrCode012.items())
if isinstance(intOrCode012,list):
return frozenset(self(x,right=right) for x in intOrCode012)
if isinstance(intOrCode012,int):
res = []
pp = 1
for pos in range(self.width):
if intOrCode012&pp:
res.append(self.mapping[-pos-1])
pp = pp*2
if right != None:
return (frozenset(res),frozenset([self.mapping[-right-1]]))
else:
return frozenset(res)
if isinstance(intOrCode012,str):
left = []
right = []
for pos in range(self.width):
if intOrCode012[pos] == '2':
left.append(self.mapping[pos])
elif intOrCode012[pos] == '1':
right.append(self.mapping[pos])
if left:
if right:
return (frozenset(left),frozenset(right))
else:
return frozenset(left)
else:
return frozenset(right)
C = Context
def C1(w,h):
return Context('\n'.join(['1'*w]*h))
def C0(w,h):
return Context('\n'.join(['0'*w]*h))
#HH, LL, BB, AA are `\mathbb{H}`, `\mathbb{L}`, `\mathbb{B}`, `\mathbb{A}` from [1]_.
#They are not needed to construct the implication basis.
def LL(n):
"""constructs the LL context"""
if (n<=0):return Context('0')
else:
LL1=LL(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - LL1
r2 = LL1 - LL1 - LL1
return r1 + r2
def HH(n):
"""constructs the HH context"""
if (n<=0):return Context('1')
else:
LL1=LL(n-1)
HH1=HH(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - HH1
r2 = HH1 - HH1 - HH1
return r1 + r2
def AA(n):
"""constructs the AA context"""
if (n<=1):return Context('10\n00')
else:
AA1=AA(n-1)
r1 = C1(2**(n-1),2**(n-1)) - AA1
r2 = AA1 - AA1
return r1 + r2
def BB(n):
"""constructs the BB context"""
if (n<=1):return Context('0\n1')
else:
BB1=BB(n-1)
AA1=AA(n-1)
r1 = C1((n-1)*2**(n-2),2**(n-1)) - AA1 - BB1
r2 = BB1 - C1(2**(n-1),2**(n-1)) - BB1;
return r1 + r2
#.. _[1]:
#
# `Endliche Hüllensysteme und ihre Implikationenbasen <http://www.emis.de/journals/SLC/wpapers/s49koenig.pdf>`_ by Roman König.
|
pyfca/pyfca
|
pyfca/implications.py
|
A
|
python
|
def A(g,i):
g1 = g&(2**i)
if i:
n = Awidth(i)
An = A(g,i-1)
if g1:
return An<<n | An
else:
return int('1'*n,2)<<n | An
else:
if g1:
return int('00',2)
else:
return int('10',2)
|
recursively constructs A line for g; i = len(g)-1
|
train
|
https://github.com/pyfca/pyfca/blob/cf8cea9e76076dbf4bb3f38996dcb5491b0eb0b0/pyfca/implications.py#L93-L107
|
[
"Awidth = lambda n: 2**n\n",
"def A(g,i):\n \"\"\"recursively constructs A line for g; i = len(g)-1\"\"\"\n g1 = g&(2**i)\n if i:\n n = Awidth(i)\n An = A(g,i-1)\n if g1:\n return An<<n | An\n else:\n return int('1'*n,2)<<n | An\n else:\n if g1:\n return int('00',2)\n else:\n return int('10',2)\n"
] |
#!/usr/bin/env python3
# encoding: utf-8
"""
Implications
------------
This uses the python int as a bit field to store the FCA context.
See this `blog`_ for more.
.. _`blog`: http://rolandpuntaier.blogspot.com/2015/07/implications.html
"""
from math import trunc, log2
from functools import reduce
from itertools import tee
from collections import defaultdict
def istr(i,b,w,c="0123456789abcdefghijklmnopqrstuvwxyz"):
return ((w<=0 and i==0) and " ") or (istr(i//b, b, w-1, c).lstrip() + c[i%b])
digitat = lambda i,a,b: int(istr(i,b,a+1)[-a],b)
digitat2 = lambda i,a: (i>>a)&1
#concatenate...
horizontally = lambda K1,K2,b,w1,w2: [int(s,b) for s in [istr(k1,b,w1)+istr(k2,b,w2) for k1,k2 in zip(K1,K2)]]
horizontally2 = lambda K1,K2,w1,w2: [(k1<<w2)|k2 for k1,k2 in zip(K1,K2)]
vertically2 = vertically = lambda K1,K2: K1+K2
Lwidth = Hwidth = lambda n: 3**n
def L(g,i):
"""recursively constructs L line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Lwidth(i)
Ln = L(g,i-1)
if g1:
return Ln<<(2*n) | Ln<<n | Ln
else:
return int('1'*n,2)<<(2*n) | Ln<<n | Ln
else:
if g1:
return int('000',2)
else:
return int('100',2)
def H(g,i):
"""recursively constructs H line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Hwidth(i)
i=i-1
Hn = H(g,i)
if g1:
return Hn<<(2*n) | Hn<<n | Hn
else:
return int('1'*n,2)<<(2*n) | L(g,i)<<n | Hn
else:
if g1:
return int('111',2)
else:
return int('101',2)
def UV_H(Hg,gw):
"""
Constructs implications and intents based on H
gw = g width
Hg = H(g), g is the binary coding of the attribute set
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
"""
lefts = set()
K = []
UV = []
p = Hwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Hg&pp:
y = istr(p,3,gw)
yy = y.replace('1','0')
if yy not in lefts:
if y.find('1') == -1:#y∈{0,2}^n
K.append(y)
else:
UV.append(y)
lefts.add(yy)
return (UV,K)
Awidth = lambda n: 2**n
Bwidth = lambda n:n*2**(n-1)
def B(g,i):
"""recursively constructs B line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
nA = Awidth(i)
nB = Bwidth(i)
i=i-1
Bn = B(g,i)
if g1:
return Bn << (nA+nB) | int('1'*nA,2) << nB | Bn
else:
return int('1'*nB,2) << (nA+nB) | A(g,i) << nB | Bn
else:
if g1:
return 1
else:
return 0
def A012(t,i):
if i<0:
return ""
nA = Awidth(i)
if t < nA:
return "0"+A012(t,i-1)
else:
return "2"+A012(t-nA,i-1)
def B012(t,i):
"""
Constructs ternary implication coding (0=not there, 2=U, 1=V)
t is B column position
i = |M|-1 to 0
"""
if not i:
return "1"
nA = Awidth(i)
nB = Bwidth(i)
nBB = nB + nA
if t < nB:
return "0"+B012(t,i-1)
elif t < nBB:
return "1"+A012(t-nB,i-1)
else:
return "2"+B012(t-nBB,i-1)
def UV_B(Bg,gw):
"""
returns the implications UV based on B
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
UV = []
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
UV.append(uv)
return UV
def omega(imps):
"""
Calculates a measure for the size of the implication basis: \sum |U||V|
"""
if isinstance(imps,v_Us_dict):
return sum([omega(V) for U,V in imps.items()])#|V|=1
if isinstance(imps,list):
return sum([omega(x) for x in imps])
if isinstance(imps,str):
#imps = due[-1]
try:
U,V = imps.split("->")
Us = U.split(",") if "," in U else U.split()
Vs = V.split(",") if "," in V else V.split()
res = len(Us)*len(Vs)
return res
except:
return 0
if isinstance(imps,int):
b=bin(imps)[2:]
res = len([x for x in b if x=='1'])
return res
class v_Us_dict(defaultdict):
"""
In an implication U→u, u is the significant component.
U is coded as int.
u is the bit column of the implication's conclusion.
{u:[U1,U2,...]}
"""
def __init__(self,Bg,gw):
"""
returns the implications {v:Us} based on B
v is the significant component
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
self.width = gw
if isinstance(Bg,int):
defaultdict.__init__(self,list)
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
#let's find minima regarding product order
#{v:[Umin1,Umin2,...]}
v = uv.find('1')#v=significant
u = uv[:v]+'0'+uv[v+1:]
u = int(u.replace('2','1'),2)
Umin_s = self[gw-v-1]#bit position from right
it = [i for i,U in enumerate(Umin_s) if U&u==u]
for i in reversed(it):
del Umin_s[i]
else:
Umin_s.append(u)
elif isinstance(Bg,list):
defaultdict.__init__(self,list)
for k,v in Bg:
assert isinstance(v,list)
self[k] += v
else:
defaultdict.__init__(self,list,Bg)
def __eq__(self, other):
if len(self) != len(other):
return False
for v,U in self.items():
if v not in other:
return False
Uo = other[v]
if not set(Uo)==set(U):
return False
return True
def Code012(self):
for v,Us in self.items():
vleft = self.width - v - 1
for u in Us:
b = bin(u)[2:]
w0 = self.width-len(b)
c01 = '0'*w0+b
c01 = c01.replace('1','2')
c01 = c01[:vleft]+'1'+c01[vleft+1:]
yield c01
def __str__(self):
return defaultdict.__str__(self).replace('defaultdict','v_Us_dict')
def __len__(self):
return sum((len(x) for x in self.values()))
def flatten(self):
for v,Us in self.items():
for u in Us:
yield (v,u)
def __add__(self, other):
res = v_Us_dict([],self.width)
if isinstance(other,tuple):
other = {other[0]:[other[1]]}
keys = set(self)|set(other)
for v in keys:
t = set()
if v in self:
t |= set(self[v])
if v in other:
t |= set(other[v])
if t:
res[v] = list(t)
return res
def __sub__(self, other):
res = v_Us_dict([],self.width)
for v,U in self.items():
r = list(set(U) - set(other[v]))
if r:
res[v] = r
return res
def __mul__(self, other):
"""
This is the o operation in [1]_, that represents the 3rd Armstrong rule.
It returns combinations for i‡j: (i,u1|u2) or (j,u1|u2),
"""
res = v_Us_dict([],self.width)
if id(self)==id(other):
s = iter(self.items())
try:
while True:
v1, us1 = next(s)
vv1 = 2**v1
s, ss = tee(s)#remember s and iterate with copy ss
try:
while True:
v2, us2 = next(ss)
vv2 = 2**v2
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
except StopIteration:
pass
except StopIteration:
pass
else:
for v1,us1 in self.items():
vv1 = 2**v1
for v2,us2 in other.items():
vv2 = 2**v2
if v1 != v2:
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
for v,U in res.items():
res[v] = list(set(U))#remove duplicates
return res
def __invert__(self):
"""
U->v generated from L=∪ min L_i via the 3rd Armstrong rule
Note, that this can become bigger than L.
"""
Y = self
Yn = Y*Y
while True:
YnplusY = Yn+Y
Yg = Yn*YnplusY
#YgenNotInL = Yg - L
#YgenInL = Yg - YgenNotInL
#Yn1 = Yn + YgenInL
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def __pow__(self, other):
"""
'other' is a (v,u) couple
generates U->v involving 'other'
#other = (0,64)
"""
Y = self
Z = v_Us_dict({other[0]:[other[1]]},self.width)
Yn = Y*Z
while True:
YnplusY = Yn+Y
Yg = Z*YnplusY
#this does not work for test_basis1
#YnplusZ = Yn+Z
#Yg = YnplusZ*YnplusY
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def koenig(self):
"""
This needs to be L = contextg.v_Us_B()
"""
L = self
Y = L - (L*L)
while True:
Ybar = Y + ~Y
take = L - Ybar
if not len(take):
return Y
else:
ZZ = list(set(take)-set(Y))#use significant which is not in Y
if len(ZZ) > 0:
v = ZZ[0]
z=(v,take[v][0])
else:
z = next(take.flatten())
Yzgen = Y**z
Y = (Y - Yzgen) + z #Yn+1
#Lost = Ybar - (Y + ~Y)
#assert len(Lost) == 0
def respects(g,imp):
"""
g is an int, where each bit is an attribute
implication UV is ternary coded 1 = ∈V, 2 = ∈U, 0 otherwise
g and UV have the same number of digits
"""
if isinstance(g,str):
g = int(g,2)
if isinstance(imp,int):
imp = istr(imp,3,g.bit_length())
V = int(imp.replace('1','2').replace('2','1'),2)
U = int(imp.replace('1','0').replace('2','1'),2)
ginU = U&g == U
ginV = V&g == V
return not ginU or ginV
class Context(list):
def __init__(self, *args, **kwargs):
"""Context can be initialized with
- a rectangular text block of 0s and 1s
- a list of ints and a "width" keyword argument.
A "mapping" keyword argument as list associates the bits with objects of any kind.
"""
if isinstance(args[0],str):
lines = [s.strip() for s in args[0].splitlines() if s.strip()]
linelens = [len(tt) for tt in lines]
self.width = linelens[0]
samelen = linelens.count(linelens[0])==len(linelens)
assert samelen, "Context needs all lines to be of same number of 0s and 1s"
super().__init__([int(s,2) for s in lines])
else:
super().__init__(*args)
self.width = kwargs['width']
try:
self.mapping = kwargs['mapping']
except:
self.mapping = [i for i in range(self.width)]
def __add__(self, other):
c = Context(list.__add__(self,other),width=self.width)
return c
def __sub__(self, other):
c = Context(horizontally2(self,other,self.width,other.width),width=self.width+other.width)
return c
def column(self, i):
"""from right"""
return ''.join([str(digitat2(r,i)) for r in self])
def row(self, i):
try:
r = istr(self[i],2,self.width)
except IndexError:
r = '0'*self.width
return r
def __getitem__(self,xy):
if isinstance(xy,tuple):
return digitat2(list.__getitem__(self,xy[0]),xy[1])
else:
return list.__getitem__(self,xy)
def transpose(self):
cs='\n'.join([self.column(i) for i in reversed(range(self.width))])
return Context(cs)
def __str__(self):
rs='\n'.join([self.row(i) for i in range(len(self))])
return rs
def size(self):
return self.width, len(self)
def UV_H(self):
"""
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
This is UV_H function, but the returned implications are respected by all attribute sets of this context.
This corresponds to a multiplication or & operation of the Hg sets.
"""
h = reduce(lambda x,y:x&y,(H(g,self.width-1) for g in self))
return UV_H(h, self.width)
def UV_B(self):
"""
returns UV = all respected U->Ux in ternary coding (1=V,2=U)
"""
h = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
return UV_B(h, self.width)
def v_Us_B(self):
"""
returns the implications {v:Us} based on B
This is L=∪ min L_i in [1]_
"""
Bg = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
gw = self.width
return v_Us_dict(Bg, gw)
def respects(self, implications):
if isinstance(implications,v_Us_dict):
implications = implications.Code012()
for g in self:
for i in implications:
if not respects(g,i):
return False
return True
def __call__(self, intOrCode012, right = None):
"""
mapping from bits to attributes using mapping (which defaults to ints)
- right, if available, is the conclusion of the implication; used if intOrCode012 is int
"""
if isinstance(intOrCode012,v_Us_dict):
return frozenset(self(x,right=i) for i,x in intOrCode012.items())
if isinstance(intOrCode012,list):
return frozenset(self(x,right=right) for x in intOrCode012)
if isinstance(intOrCode012,int):
res = []
pp = 1
for pos in range(self.width):
if intOrCode012&pp:
res.append(self.mapping[-pos-1])
pp = pp*2
if right != None:
return (frozenset(res),frozenset([self.mapping[-right-1]]))
else:
return frozenset(res)
if isinstance(intOrCode012,str):
left = []
right = []
for pos in range(self.width):
if intOrCode012[pos] == '2':
left.append(self.mapping[pos])
elif intOrCode012[pos] == '1':
right.append(self.mapping[pos])
if left:
if right:
return (frozenset(left),frozenset(right))
else:
return frozenset(left)
else:
return frozenset(right)
C = Context
def C1(w,h):
return Context('\n'.join(['1'*w]*h))
def C0(w,h):
return Context('\n'.join(['0'*w]*h))
#HH, LL, BB, AA are `\mathbb{H}`, `\mathbb{L}`, `\mathbb{B}`, `\mathbb{A}` from [1]_.
#They are not needed to construct the implication basis.
def LL(n):
"""constructs the LL context"""
if (n<=0):return Context('0')
else:
LL1=LL(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - LL1
r2 = LL1 - LL1 - LL1
return r1 + r2
def HH(n):
"""constructs the HH context"""
if (n<=0):return Context('1')
else:
LL1=LL(n-1)
HH1=HH(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - HH1
r2 = HH1 - HH1 - HH1
return r1 + r2
def AA(n):
"""constructs the AA context"""
if (n<=1):return Context('10\n00')
else:
AA1=AA(n-1)
r1 = C1(2**(n-1),2**(n-1)) - AA1
r2 = AA1 - AA1
return r1 + r2
def BB(n):
"""constructs the BB context"""
if (n<=1):return Context('0\n1')
else:
BB1=BB(n-1)
AA1=AA(n-1)
r1 = C1((n-1)*2**(n-2),2**(n-1)) - AA1 - BB1
r2 = BB1 - C1(2**(n-1),2**(n-1)) - BB1;
return r1 + r2
#.. _[1]:
#
# `Endliche Hüllensysteme und ihre Implikationenbasen <http://www.emis.de/journals/SLC/wpapers/s49koenig.pdf>`_ by Roman König.
|
pyfca/pyfca
|
pyfca/implications.py
|
B
|
python
|
def B(g,i):
g1 = g&(2**i)
if i:
nA = Awidth(i)
nB = Bwidth(i)
i=i-1
Bn = B(g,i)
if g1:
return Bn << (nA+nB) | int('1'*nA,2) << nB | Bn
else:
return int('1'*nB,2) << (nA+nB) | A(g,i) << nB | Bn
else:
if g1:
return 1
else:
return 0
|
recursively constructs B line for g; i = len(g)-1
|
train
|
https://github.com/pyfca/pyfca/blob/cf8cea9e76076dbf4bb3f38996dcb5491b0eb0b0/pyfca/implications.py#L109-L125
|
[
"Awidth = lambda n: 2**n\n",
"Bwidth = lambda n:n*2**(n-1)\n",
"def A(g,i):\n \"\"\"recursively constructs A line for g; i = len(g)-1\"\"\"\n g1 = g&(2**i)\n if i:\n n = Awidth(i)\n An = A(g,i-1)\n if g1:\n return An<<n | An\n else:\n return int('1'*n,2)<<n | An\n else:\n if g1:\n return int('00',2)\n else:\n return int('10',2)\n",
"def B(g,i):\n \"\"\"recursively constructs B line for g; i = len(g)-1\"\"\"\n g1 = g&(2**i)\n if i:\n nA = Awidth(i)\n nB = Bwidth(i)\n i=i-1\n Bn = B(g,i)\n if g1:\n return Bn << (nA+nB) | int('1'*nA,2) << nB | Bn\n else:\n return int('1'*nB,2) << (nA+nB) | A(g,i) << nB | Bn\n else:\n if g1:\n return 1\n else:\n return 0\n"
] |
#!/usr/bin/env python3
# encoding: utf-8
"""
Implications
------------
This uses the python int as a bit field to store the FCA context.
See this `blog`_ for more.
.. _`blog`: http://rolandpuntaier.blogspot.com/2015/07/implications.html
"""
from math import trunc, log2
from functools import reduce
from itertools import tee
from collections import defaultdict
def istr(i,b,w,c="0123456789abcdefghijklmnopqrstuvwxyz"):
return ((w<=0 and i==0) and " ") or (istr(i//b, b, w-1, c).lstrip() + c[i%b])
digitat = lambda i,a,b: int(istr(i,b,a+1)[-a],b)
digitat2 = lambda i,a: (i>>a)&1
#concatenate...
horizontally = lambda K1,K2,b,w1,w2: [int(s,b) for s in [istr(k1,b,w1)+istr(k2,b,w2) for k1,k2 in zip(K1,K2)]]
horizontally2 = lambda K1,K2,w1,w2: [(k1<<w2)|k2 for k1,k2 in zip(K1,K2)]
vertically2 = vertically = lambda K1,K2: K1+K2
Lwidth = Hwidth = lambda n: 3**n
def L(g,i):
"""recursively constructs L line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Lwidth(i)
Ln = L(g,i-1)
if g1:
return Ln<<(2*n) | Ln<<n | Ln
else:
return int('1'*n,2)<<(2*n) | Ln<<n | Ln
else:
if g1:
return int('000',2)
else:
return int('100',2)
def H(g,i):
"""recursively constructs H line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Hwidth(i)
i=i-1
Hn = H(g,i)
if g1:
return Hn<<(2*n) | Hn<<n | Hn
else:
return int('1'*n,2)<<(2*n) | L(g,i)<<n | Hn
else:
if g1:
return int('111',2)
else:
return int('101',2)
def UV_H(Hg,gw):
"""
Constructs implications and intents based on H
gw = g width
Hg = H(g), g is the binary coding of the attribute set
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
"""
lefts = set()
K = []
UV = []
p = Hwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Hg&pp:
y = istr(p,3,gw)
yy = y.replace('1','0')
if yy not in lefts:
if y.find('1') == -1:#y∈{0,2}^n
K.append(y)
else:
UV.append(y)
lefts.add(yy)
return (UV,K)
Awidth = lambda n: 2**n
def A(g,i):
"""recursively constructs A line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Awidth(i)
An = A(g,i-1)
if g1:
return An<<n | An
else:
return int('1'*n,2)<<n | An
else:
if g1:
return int('00',2)
else:
return int('10',2)
Bwidth = lambda n:n*2**(n-1)
def A012(t,i):
if i<0:
return ""
nA = Awidth(i)
if t < nA:
return "0"+A012(t,i-1)
else:
return "2"+A012(t-nA,i-1)
def B012(t,i):
"""
Constructs ternary implication coding (0=not there, 2=U, 1=V)
t is B column position
i = |M|-1 to 0
"""
if not i:
return "1"
nA = Awidth(i)
nB = Bwidth(i)
nBB = nB + nA
if t < nB:
return "0"+B012(t,i-1)
elif t < nBB:
return "1"+A012(t-nB,i-1)
else:
return "2"+B012(t-nBB,i-1)
def UV_B(Bg,gw):
"""
returns the implications UV based on B
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
UV = []
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
UV.append(uv)
return UV
def omega(imps):
"""
Calculates a measure for the size of the implication basis: \sum |U||V|
"""
if isinstance(imps,v_Us_dict):
return sum([omega(V) for U,V in imps.items()])#|V|=1
if isinstance(imps,list):
return sum([omega(x) for x in imps])
if isinstance(imps,str):
#imps = due[-1]
try:
U,V = imps.split("->")
Us = U.split(",") if "," in U else U.split()
Vs = V.split(",") if "," in V else V.split()
res = len(Us)*len(Vs)
return res
except:
return 0
if isinstance(imps,int):
b=bin(imps)[2:]
res = len([x for x in b if x=='1'])
return res
class v_Us_dict(defaultdict):
"""
In an implication U→u, u is the significant component.
U is coded as int.
u is the bit column of the implication's conclusion.
{u:[U1,U2,...]}
"""
def __init__(self,Bg,gw):
"""
returns the implications {v:Us} based on B
v is the significant component
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
self.width = gw
if isinstance(Bg,int):
defaultdict.__init__(self,list)
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
#let's find minima regarding product order
#{v:[Umin1,Umin2,...]}
v = uv.find('1')#v=significant
u = uv[:v]+'0'+uv[v+1:]
u = int(u.replace('2','1'),2)
Umin_s = self[gw-v-1]#bit position from right
it = [i for i,U in enumerate(Umin_s) if U&u==u]
for i in reversed(it):
del Umin_s[i]
else:
Umin_s.append(u)
elif isinstance(Bg,list):
defaultdict.__init__(self,list)
for k,v in Bg:
assert isinstance(v,list)
self[k] += v
else:
defaultdict.__init__(self,list,Bg)
def __eq__(self, other):
if len(self) != len(other):
return False
for v,U in self.items():
if v not in other:
return False
Uo = other[v]
if not set(Uo)==set(U):
return False
return True
def Code012(self):
for v,Us in self.items():
vleft = self.width - v - 1
for u in Us:
b = bin(u)[2:]
w0 = self.width-len(b)
c01 = '0'*w0+b
c01 = c01.replace('1','2')
c01 = c01[:vleft]+'1'+c01[vleft+1:]
yield c01
def __str__(self):
return defaultdict.__str__(self).replace('defaultdict','v_Us_dict')
def __len__(self):
return sum((len(x) for x in self.values()))
def flatten(self):
for v,Us in self.items():
for u in Us:
yield (v,u)
def __add__(self, other):
res = v_Us_dict([],self.width)
if isinstance(other,tuple):
other = {other[0]:[other[1]]}
keys = set(self)|set(other)
for v in keys:
t = set()
if v in self:
t |= set(self[v])
if v in other:
t |= set(other[v])
if t:
res[v] = list(t)
return res
def __sub__(self, other):
res = v_Us_dict([],self.width)
for v,U in self.items():
r = list(set(U) - set(other[v]))
if r:
res[v] = r
return res
def __mul__(self, other):
"""
This is the o operation in [1]_, that represents the 3rd Armstrong rule.
It returns combinations for i‡j: (i,u1|u2) or (j,u1|u2),
"""
res = v_Us_dict([],self.width)
if id(self)==id(other):
s = iter(self.items())
try:
while True:
v1, us1 = next(s)
vv1 = 2**v1
s, ss = tee(s)#remember s and iterate with copy ss
try:
while True:
v2, us2 = next(ss)
vv2 = 2**v2
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
except StopIteration:
pass
except StopIteration:
pass
else:
for v1,us1 in self.items():
vv1 = 2**v1
for v2,us2 in other.items():
vv2 = 2**v2
if v1 != v2:
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
for v,U in res.items():
res[v] = list(set(U))#remove duplicates
return res
def __invert__(self):
"""
U->v generated from L=∪ min L_i via the 3rd Armstrong rule
Note, that this can become bigger than L.
"""
Y = self
Yn = Y*Y
while True:
YnplusY = Yn+Y
Yg = Yn*YnplusY
#YgenNotInL = Yg - L
#YgenInL = Yg - YgenNotInL
#Yn1 = Yn + YgenInL
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def __pow__(self, other):
"""
'other' is a (v,u) couple
generates U->v involving 'other'
#other = (0,64)
"""
Y = self
Z = v_Us_dict({other[0]:[other[1]]},self.width)
Yn = Y*Z
while True:
YnplusY = Yn+Y
Yg = Z*YnplusY
#this does not work for test_basis1
#YnplusZ = Yn+Z
#Yg = YnplusZ*YnplusY
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def koenig(self):
"""
This needs to be L = contextg.v_Us_B()
"""
L = self
Y = L - (L*L)
while True:
Ybar = Y + ~Y
take = L - Ybar
if not len(take):
return Y
else:
ZZ = list(set(take)-set(Y))#use significant which is not in Y
if len(ZZ) > 0:
v = ZZ[0]
z=(v,take[v][0])
else:
z = next(take.flatten())
Yzgen = Y**z
Y = (Y - Yzgen) + z #Yn+1
#Lost = Ybar - (Y + ~Y)
#assert len(Lost) == 0
def respects(g,imp):
"""
g is an int, where each bit is an attribute
implication UV is ternary coded 1 = ∈V, 2 = ∈U, 0 otherwise
g and UV have the same number of digits
"""
if isinstance(g,str):
g = int(g,2)
if isinstance(imp,int):
imp = istr(imp,3,g.bit_length())
V = int(imp.replace('1','2').replace('2','1'),2)
U = int(imp.replace('1','0').replace('2','1'),2)
ginU = U&g == U
ginV = V&g == V
return not ginU or ginV
class Context(list):
def __init__(self, *args, **kwargs):
"""Context can be initialized with
- a rectangular text block of 0s and 1s
- a list of ints and a "width" keyword argument.
A "mapping" keyword argument as list associates the bits with objects of any kind.
"""
if isinstance(args[0],str):
lines = [s.strip() for s in args[0].splitlines() if s.strip()]
linelens = [len(tt) for tt in lines]
self.width = linelens[0]
samelen = linelens.count(linelens[0])==len(linelens)
assert samelen, "Context needs all lines to be of same number of 0s and 1s"
super().__init__([int(s,2) for s in lines])
else:
super().__init__(*args)
self.width = kwargs['width']
try:
self.mapping = kwargs['mapping']
except:
self.mapping = [i for i in range(self.width)]
def __add__(self, other):
c = Context(list.__add__(self,other),width=self.width)
return c
def __sub__(self, other):
c = Context(horizontally2(self,other,self.width,other.width),width=self.width+other.width)
return c
def column(self, i):
"""from right"""
return ''.join([str(digitat2(r,i)) for r in self])
def row(self, i):
try:
r = istr(self[i],2,self.width)
except IndexError:
r = '0'*self.width
return r
def __getitem__(self,xy):
if isinstance(xy,tuple):
return digitat2(list.__getitem__(self,xy[0]),xy[1])
else:
return list.__getitem__(self,xy)
def transpose(self):
cs='\n'.join([self.column(i) for i in reversed(range(self.width))])
return Context(cs)
def __str__(self):
rs='\n'.join([self.row(i) for i in range(len(self))])
return rs
def size(self):
return self.width, len(self)
def UV_H(self):
"""
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
This is UV_H function, but the returned implications are respected by all attribute sets of this context.
This corresponds to a multiplication or & operation of the Hg sets.
"""
h = reduce(lambda x,y:x&y,(H(g,self.width-1) for g in self))
return UV_H(h, self.width)
def UV_B(self):
"""
returns UV = all respected U->Ux in ternary coding (1=V,2=U)
"""
h = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
return UV_B(h, self.width)
def v_Us_B(self):
"""
returns the implications {v:Us} based on B
This is L=∪ min L_i in [1]_
"""
Bg = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
gw = self.width
return v_Us_dict(Bg, gw)
def respects(self, implications):
if isinstance(implications,v_Us_dict):
implications = implications.Code012()
for g in self:
for i in implications:
if not respects(g,i):
return False
return True
def __call__(self, intOrCode012, right = None):
"""
mapping from bits to attributes using mapping (which defaults to ints)
- right, if available, is the conclusion of the implication; used if intOrCode012 is int
"""
if isinstance(intOrCode012,v_Us_dict):
return frozenset(self(x,right=i) for i,x in intOrCode012.items())
if isinstance(intOrCode012,list):
return frozenset(self(x,right=right) for x in intOrCode012)
if isinstance(intOrCode012,int):
res = []
pp = 1
for pos in range(self.width):
if intOrCode012&pp:
res.append(self.mapping[-pos-1])
pp = pp*2
if right != None:
return (frozenset(res),frozenset([self.mapping[-right-1]]))
else:
return frozenset(res)
if isinstance(intOrCode012,str):
left = []
right = []
for pos in range(self.width):
if intOrCode012[pos] == '2':
left.append(self.mapping[pos])
elif intOrCode012[pos] == '1':
right.append(self.mapping[pos])
if left:
if right:
return (frozenset(left),frozenset(right))
else:
return frozenset(left)
else:
return frozenset(right)
C = Context
def C1(w,h):
return Context('\n'.join(['1'*w]*h))
def C0(w,h):
return Context('\n'.join(['0'*w]*h))
#HH, LL, BB, AA are `\mathbb{H}`, `\mathbb{L}`, `\mathbb{B}`, `\mathbb{A}` from [1]_.
#They are not needed to construct the implication basis.
def LL(n):
"""constructs the LL context"""
if (n<=0):return Context('0')
else:
LL1=LL(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - LL1
r2 = LL1 - LL1 - LL1
return r1 + r2
def HH(n):
"""constructs the HH context"""
if (n<=0):return Context('1')
else:
LL1=LL(n-1)
HH1=HH(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - HH1
r2 = HH1 - HH1 - HH1
return r1 + r2
def AA(n):
"""constructs the AA context"""
if (n<=1):return Context('10\n00')
else:
AA1=AA(n-1)
r1 = C1(2**(n-1),2**(n-1)) - AA1
r2 = AA1 - AA1
return r1 + r2
def BB(n):
"""constructs the BB context"""
if (n<=1):return Context('0\n1')
else:
BB1=BB(n-1)
AA1=AA(n-1)
r1 = C1((n-1)*2**(n-2),2**(n-1)) - AA1 - BB1
r2 = BB1 - C1(2**(n-1),2**(n-1)) - BB1;
return r1 + r2
#.. _[1]:
#
# `Endliche Hüllensysteme und ihre Implikationenbasen <http://www.emis.de/journals/SLC/wpapers/s49koenig.pdf>`_ by Roman König.
|
pyfca/pyfca
|
pyfca/implications.py
|
B012
|
python
|
def B012(t,i):
if not i:
return "1"
nA = Awidth(i)
nB = Bwidth(i)
nBB = nB + nA
if t < nB:
return "0"+B012(t,i-1)
elif t < nBB:
return "1"+A012(t-nB,i-1)
else:
return "2"+B012(t-nBB,i-1)
|
Constructs ternary implication coding (0=not there, 2=U, 1=V)
t is B column position
i = |M|-1 to 0
|
train
|
https://github.com/pyfca/pyfca/blob/cf8cea9e76076dbf4bb3f38996dcb5491b0eb0b0/pyfca/implications.py#L135-L151
|
[
"Awidth = lambda n: 2**n\n",
"Bwidth = lambda n:n*2**(n-1)\n",
"def A012(t,i):\n if i<0:\n return \"\"\n nA = Awidth(i)\n if t < nA:\n return \"0\"+A012(t,i-1)\n else:\n return \"2\"+A012(t-nA,i-1)\n",
"def B012(t,i):\n \"\"\"\n Constructs ternary implication coding (0=not there, 2=U, 1=V)\n t is B column position\n i = |M|-1 to 0\n \"\"\"\n if not i:\n return \"1\"\n nA = Awidth(i)\n nB = Bwidth(i)\n nBB = nB + nA\n if t < nB:\n return \"0\"+B012(t,i-1)\n elif t < nBB:\n return \"1\"+A012(t-nB,i-1)\n else:\n return \"2\"+B012(t-nBB,i-1)\n"
] |
#!/usr/bin/env python3
# encoding: utf-8
"""
Implications
------------
This uses the python int as a bit field to store the FCA context.
See this `blog`_ for more.
.. _`blog`: http://rolandpuntaier.blogspot.com/2015/07/implications.html
"""
from math import trunc, log2
from functools import reduce
from itertools import tee
from collections import defaultdict
def istr(i,b,w,c="0123456789abcdefghijklmnopqrstuvwxyz"):
return ((w<=0 and i==0) and " ") or (istr(i//b, b, w-1, c).lstrip() + c[i%b])
digitat = lambda i,a,b: int(istr(i,b,a+1)[-a],b)
digitat2 = lambda i,a: (i>>a)&1
#concatenate...
horizontally = lambda K1,K2,b,w1,w2: [int(s,b) for s in [istr(k1,b,w1)+istr(k2,b,w2) for k1,k2 in zip(K1,K2)]]
horizontally2 = lambda K1,K2,w1,w2: [(k1<<w2)|k2 for k1,k2 in zip(K1,K2)]
vertically2 = vertically = lambda K1,K2: K1+K2
Lwidth = Hwidth = lambda n: 3**n
def L(g,i):
"""recursively constructs L line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Lwidth(i)
Ln = L(g,i-1)
if g1:
return Ln<<(2*n) | Ln<<n | Ln
else:
return int('1'*n,2)<<(2*n) | Ln<<n | Ln
else:
if g1:
return int('000',2)
else:
return int('100',2)
def H(g,i):
"""recursively constructs H line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Hwidth(i)
i=i-1
Hn = H(g,i)
if g1:
return Hn<<(2*n) | Hn<<n | Hn
else:
return int('1'*n,2)<<(2*n) | L(g,i)<<n | Hn
else:
if g1:
return int('111',2)
else:
return int('101',2)
def UV_H(Hg,gw):
"""
Constructs implications and intents based on H
gw = g width
Hg = H(g), g is the binary coding of the attribute set
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
"""
lefts = set()
K = []
UV = []
p = Hwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Hg&pp:
y = istr(p,3,gw)
yy = y.replace('1','0')
if yy not in lefts:
if y.find('1') == -1:#y∈{0,2}^n
K.append(y)
else:
UV.append(y)
lefts.add(yy)
return (UV,K)
Awidth = lambda n: 2**n
def A(g,i):
"""recursively constructs A line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Awidth(i)
An = A(g,i-1)
if g1:
return An<<n | An
else:
return int('1'*n,2)<<n | An
else:
if g1:
return int('00',2)
else:
return int('10',2)
Bwidth = lambda n:n*2**(n-1)
def B(g,i):
"""recursively constructs B line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
nA = Awidth(i)
nB = Bwidth(i)
i=i-1
Bn = B(g,i)
if g1:
return Bn << (nA+nB) | int('1'*nA,2) << nB | Bn
else:
return int('1'*nB,2) << (nA+nB) | A(g,i) << nB | Bn
else:
if g1:
return 1
else:
return 0
def A012(t,i):
if i<0:
return ""
nA = Awidth(i)
if t < nA:
return "0"+A012(t,i-1)
else:
return "2"+A012(t-nA,i-1)
def UV_B(Bg,gw):
"""
returns the implications UV based on B
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
UV = []
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
UV.append(uv)
return UV
def omega(imps):
"""
Calculates a measure for the size of the implication basis: \sum |U||V|
"""
if isinstance(imps,v_Us_dict):
return sum([omega(V) for U,V in imps.items()])#|V|=1
if isinstance(imps,list):
return sum([omega(x) for x in imps])
if isinstance(imps,str):
#imps = due[-1]
try:
U,V = imps.split("->")
Us = U.split(",") if "," in U else U.split()
Vs = V.split(",") if "," in V else V.split()
res = len(Us)*len(Vs)
return res
except:
return 0
if isinstance(imps,int):
b=bin(imps)[2:]
res = len([x for x in b if x=='1'])
return res
class v_Us_dict(defaultdict):
"""
In an implication U→u, u is the significant component.
U is coded as int.
u is the bit column of the implication's conclusion.
{u:[U1,U2,...]}
"""
def __init__(self,Bg,gw):
"""
returns the implications {v:Us} based on B
v is the significant component
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
self.width = gw
if isinstance(Bg,int):
defaultdict.__init__(self,list)
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
#let's find minima regarding product order
#{v:[Umin1,Umin2,...]}
v = uv.find('1')#v=significant
u = uv[:v]+'0'+uv[v+1:]
u = int(u.replace('2','1'),2)
Umin_s = self[gw-v-1]#bit position from right
it = [i for i,U in enumerate(Umin_s) if U&u==u]
for i in reversed(it):
del Umin_s[i]
else:
Umin_s.append(u)
elif isinstance(Bg,list):
defaultdict.__init__(self,list)
for k,v in Bg:
assert isinstance(v,list)
self[k] += v
else:
defaultdict.__init__(self,list,Bg)
def __eq__(self, other):
if len(self) != len(other):
return False
for v,U in self.items():
if v not in other:
return False
Uo = other[v]
if not set(Uo)==set(U):
return False
return True
def Code012(self):
for v,Us in self.items():
vleft = self.width - v - 1
for u in Us:
b = bin(u)[2:]
w0 = self.width-len(b)
c01 = '0'*w0+b
c01 = c01.replace('1','2')
c01 = c01[:vleft]+'1'+c01[vleft+1:]
yield c01
def __str__(self):
return defaultdict.__str__(self).replace('defaultdict','v_Us_dict')
def __len__(self):
return sum((len(x) for x in self.values()))
def flatten(self):
for v,Us in self.items():
for u in Us:
yield (v,u)
def __add__(self, other):
res = v_Us_dict([],self.width)
if isinstance(other,tuple):
other = {other[0]:[other[1]]}
keys = set(self)|set(other)
for v in keys:
t = set()
if v in self:
t |= set(self[v])
if v in other:
t |= set(other[v])
if t:
res[v] = list(t)
return res
def __sub__(self, other):
res = v_Us_dict([],self.width)
for v,U in self.items():
r = list(set(U) - set(other[v]))
if r:
res[v] = r
return res
def __mul__(self, other):
"""
This is the o operation in [1]_, that represents the 3rd Armstrong rule.
It returns combinations for i‡j: (i,u1|u2) or (j,u1|u2),
"""
res = v_Us_dict([],self.width)
if id(self)==id(other):
s = iter(self.items())
try:
while True:
v1, us1 = next(s)
vv1 = 2**v1
s, ss = tee(s)#remember s and iterate with copy ss
try:
while True:
v2, us2 = next(ss)
vv2 = 2**v2
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
except StopIteration:
pass
except StopIteration:
pass
else:
for v1,us1 in self.items():
vv1 = 2**v1
for v2,us2 in other.items():
vv2 = 2**v2
if v1 != v2:
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
for v,U in res.items():
res[v] = list(set(U))#remove duplicates
return res
def __invert__(self):
"""
U->v generated from L=∪ min L_i via the 3rd Armstrong rule
Note, that this can become bigger than L.
"""
Y = self
Yn = Y*Y
while True:
YnplusY = Yn+Y
Yg = Yn*YnplusY
#YgenNotInL = Yg - L
#YgenInL = Yg - YgenNotInL
#Yn1 = Yn + YgenInL
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def __pow__(self, other):
"""
'other' is a (v,u) couple
generates U->v involving 'other'
#other = (0,64)
"""
Y = self
Z = v_Us_dict({other[0]:[other[1]]},self.width)
Yn = Y*Z
while True:
YnplusY = Yn+Y
Yg = Z*YnplusY
#this does not work for test_basis1
#YnplusZ = Yn+Z
#Yg = YnplusZ*YnplusY
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def koenig(self):
"""
This needs to be L = contextg.v_Us_B()
"""
L = self
Y = L - (L*L)
while True:
Ybar = Y + ~Y
take = L - Ybar
if not len(take):
return Y
else:
ZZ = list(set(take)-set(Y))#use significant which is not in Y
if len(ZZ) > 0:
v = ZZ[0]
z=(v,take[v][0])
else:
z = next(take.flatten())
Yzgen = Y**z
Y = (Y - Yzgen) + z #Yn+1
#Lost = Ybar - (Y + ~Y)
#assert len(Lost) == 0
def respects(g,imp):
"""
g is an int, where each bit is an attribute
implication UV is ternary coded 1 = ∈V, 2 = ∈U, 0 otherwise
g and UV have the same number of digits
"""
if isinstance(g,str):
g = int(g,2)
if isinstance(imp,int):
imp = istr(imp,3,g.bit_length())
V = int(imp.replace('1','2').replace('2','1'),2)
U = int(imp.replace('1','0').replace('2','1'),2)
ginU = U&g == U
ginV = V&g == V
return not ginU or ginV
class Context(list):
def __init__(self, *args, **kwargs):
"""Context can be initialized with
- a rectangular text block of 0s and 1s
- a list of ints and a "width" keyword argument.
A "mapping" keyword argument as list associates the bits with objects of any kind.
"""
if isinstance(args[0],str):
lines = [s.strip() for s in args[0].splitlines() if s.strip()]
linelens = [len(tt) for tt in lines]
self.width = linelens[0]
samelen = linelens.count(linelens[0])==len(linelens)
assert samelen, "Context needs all lines to be of same number of 0s and 1s"
super().__init__([int(s,2) for s in lines])
else:
super().__init__(*args)
self.width = kwargs['width']
try:
self.mapping = kwargs['mapping']
except:
self.mapping = [i for i in range(self.width)]
def __add__(self, other):
c = Context(list.__add__(self,other),width=self.width)
return c
def __sub__(self, other):
c = Context(horizontally2(self,other,self.width,other.width),width=self.width+other.width)
return c
def column(self, i):
"""from right"""
return ''.join([str(digitat2(r,i)) for r in self])
def row(self, i):
try:
r = istr(self[i],2,self.width)
except IndexError:
r = '0'*self.width
return r
def __getitem__(self,xy):
if isinstance(xy,tuple):
return digitat2(list.__getitem__(self,xy[0]),xy[1])
else:
return list.__getitem__(self,xy)
def transpose(self):
cs='\n'.join([self.column(i) for i in reversed(range(self.width))])
return Context(cs)
def __str__(self):
rs='\n'.join([self.row(i) for i in range(len(self))])
return rs
def size(self):
return self.width, len(self)
def UV_H(self):
"""
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
This is UV_H function, but the returned implications are respected by all attribute sets of this context.
This corresponds to a multiplication or & operation of the Hg sets.
"""
h = reduce(lambda x,y:x&y,(H(g,self.width-1) for g in self))
return UV_H(h, self.width)
def UV_B(self):
"""
returns UV = all respected U->Ux in ternary coding (1=V,2=U)
"""
h = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
return UV_B(h, self.width)
def v_Us_B(self):
"""
returns the implications {v:Us} based on B
This is L=∪ min L_i in [1]_
"""
Bg = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
gw = self.width
return v_Us_dict(Bg, gw)
def respects(self, implications):
if isinstance(implications,v_Us_dict):
implications = implications.Code012()
for g in self:
for i in implications:
if not respects(g,i):
return False
return True
def __call__(self, intOrCode012, right = None):
"""
mapping from bits to attributes using mapping (which defaults to ints)
- right, if available, is the conclusion of the implication; used if intOrCode012 is int
"""
if isinstance(intOrCode012,v_Us_dict):
return frozenset(self(x,right=i) for i,x in intOrCode012.items())
if isinstance(intOrCode012,list):
return frozenset(self(x,right=right) for x in intOrCode012)
if isinstance(intOrCode012,int):
res = []
pp = 1
for pos in range(self.width):
if intOrCode012&pp:
res.append(self.mapping[-pos-1])
pp = pp*2
if right != None:
return (frozenset(res),frozenset([self.mapping[-right-1]]))
else:
return frozenset(res)
if isinstance(intOrCode012,str):
left = []
right = []
for pos in range(self.width):
if intOrCode012[pos] == '2':
left.append(self.mapping[pos])
elif intOrCode012[pos] == '1':
right.append(self.mapping[pos])
if left:
if right:
return (frozenset(left),frozenset(right))
else:
return frozenset(left)
else:
return frozenset(right)
C = Context
def C1(w,h):
return Context('\n'.join(['1'*w]*h))
def C0(w,h):
return Context('\n'.join(['0'*w]*h))
#HH, LL, BB, AA are `\mathbb{H}`, `\mathbb{L}`, `\mathbb{B}`, `\mathbb{A}` from [1]_.
#They are not needed to construct the implication basis.
def LL(n):
"""constructs the LL context"""
if (n<=0):return Context('0')
else:
LL1=LL(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - LL1
r2 = LL1 - LL1 - LL1
return r1 + r2
def HH(n):
"""constructs the HH context"""
if (n<=0):return Context('1')
else:
LL1=LL(n-1)
HH1=HH(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - HH1
r2 = HH1 - HH1 - HH1
return r1 + r2
def AA(n):
"""constructs the AA context"""
if (n<=1):return Context('10\n00')
else:
AA1=AA(n-1)
r1 = C1(2**(n-1),2**(n-1)) - AA1
r2 = AA1 - AA1
return r1 + r2
def BB(n):
"""constructs the BB context"""
if (n<=1):return Context('0\n1')
else:
BB1=BB(n-1)
AA1=AA(n-1)
r1 = C1((n-1)*2**(n-2),2**(n-1)) - AA1 - BB1
r2 = BB1 - C1(2**(n-1),2**(n-1)) - BB1;
return r1 + r2
#.. _[1]:
#
# `Endliche Hüllensysteme und ihre Implikationenbasen <http://www.emis.de/journals/SLC/wpapers/s49koenig.pdf>`_ by Roman König.
|
pyfca/pyfca
|
pyfca/implications.py
|
UV_B
|
python
|
def UV_B(Bg,gw):
UV = []
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
UV.append(uv)
return UV
|
returns the implications UV based on B
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
|
train
|
https://github.com/pyfca/pyfca/blob/cf8cea9e76076dbf4bb3f38996dcb5491b0eb0b0/pyfca/implications.py#L153-L168
|
[
"Bwidth = lambda n:n*2**(n-1)\n",
"def B012(t,i):\n \"\"\"\n Constructs ternary implication coding (0=not there, 2=U, 1=V)\n t is B column position\n i = |M|-1 to 0\n \"\"\"\n if not i:\n return \"1\"\n nA = Awidth(i)\n nB = Bwidth(i)\n nBB = nB + nA\n if t < nB:\n return \"0\"+B012(t,i-1)\n elif t < nBB:\n return \"1\"+A012(t-nB,i-1)\n else:\n return \"2\"+B012(t-nBB,i-1)\n"
] |
#!/usr/bin/env python3
# encoding: utf-8
"""
Implications
------------
This uses the python int as a bit field to store the FCA context.
See this `blog`_ for more.
.. _`blog`: http://rolandpuntaier.blogspot.com/2015/07/implications.html
"""
from math import trunc, log2
from functools import reduce
from itertools import tee
from collections import defaultdict
def istr(i,b,w,c="0123456789abcdefghijklmnopqrstuvwxyz"):
return ((w<=0 and i==0) and " ") or (istr(i//b, b, w-1, c).lstrip() + c[i%b])
digitat = lambda i,a,b: int(istr(i,b,a+1)[-a],b)
digitat2 = lambda i,a: (i>>a)&1
#concatenate...
horizontally = lambda K1,K2,b,w1,w2: [int(s,b) for s in [istr(k1,b,w1)+istr(k2,b,w2) for k1,k2 in zip(K1,K2)]]
horizontally2 = lambda K1,K2,w1,w2: [(k1<<w2)|k2 for k1,k2 in zip(K1,K2)]
vertically2 = vertically = lambda K1,K2: K1+K2
Lwidth = Hwidth = lambda n: 3**n
def L(g,i):
"""recursively constructs L line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Lwidth(i)
Ln = L(g,i-1)
if g1:
return Ln<<(2*n) | Ln<<n | Ln
else:
return int('1'*n,2)<<(2*n) | Ln<<n | Ln
else:
if g1:
return int('000',2)
else:
return int('100',2)
def H(g,i):
"""recursively constructs H line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Hwidth(i)
i=i-1
Hn = H(g,i)
if g1:
return Hn<<(2*n) | Hn<<n | Hn
else:
return int('1'*n,2)<<(2*n) | L(g,i)<<n | Hn
else:
if g1:
return int('111',2)
else:
return int('101',2)
def UV_H(Hg,gw):
"""
Constructs implications and intents based on H
gw = g width
Hg = H(g), g is the binary coding of the attribute set
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
"""
lefts = set()
K = []
UV = []
p = Hwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Hg&pp:
y = istr(p,3,gw)
yy = y.replace('1','0')
if yy not in lefts:
if y.find('1') == -1:#y∈{0,2}^n
K.append(y)
else:
UV.append(y)
lefts.add(yy)
return (UV,K)
Awidth = lambda n: 2**n
def A(g,i):
"""recursively constructs A line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Awidth(i)
An = A(g,i-1)
if g1:
return An<<n | An
else:
return int('1'*n,2)<<n | An
else:
if g1:
return int('00',2)
else:
return int('10',2)
Bwidth = lambda n:n*2**(n-1)
def B(g,i):
"""recursively constructs B line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
nA = Awidth(i)
nB = Bwidth(i)
i=i-1
Bn = B(g,i)
if g1:
return Bn << (nA+nB) | int('1'*nA,2) << nB | Bn
else:
return int('1'*nB,2) << (nA+nB) | A(g,i) << nB | Bn
else:
if g1:
return 1
else:
return 0
def A012(t,i):
if i<0:
return ""
nA = Awidth(i)
if t < nA:
return "0"+A012(t,i-1)
else:
return "2"+A012(t-nA,i-1)
def B012(t,i):
"""
Constructs ternary implication coding (0=not there, 2=U, 1=V)
t is B column position
i = |M|-1 to 0
"""
if not i:
return "1"
nA = Awidth(i)
nB = Bwidth(i)
nBB = nB + nA
if t < nB:
return "0"+B012(t,i-1)
elif t < nBB:
return "1"+A012(t-nB,i-1)
else:
return "2"+B012(t-nBB,i-1)
def omega(imps):
"""
Calculates a measure for the size of the implication basis: \sum |U||V|
"""
if isinstance(imps,v_Us_dict):
return sum([omega(V) for U,V in imps.items()])#|V|=1
if isinstance(imps,list):
return sum([omega(x) for x in imps])
if isinstance(imps,str):
#imps = due[-1]
try:
U,V = imps.split("->")
Us = U.split(",") if "," in U else U.split()
Vs = V.split(",") if "," in V else V.split()
res = len(Us)*len(Vs)
return res
except:
return 0
if isinstance(imps,int):
b=bin(imps)[2:]
res = len([x for x in b if x=='1'])
return res
class v_Us_dict(defaultdict):
"""
In an implication U→u, u is the significant component.
U is coded as int.
u is the bit column of the implication's conclusion.
{u:[U1,U2,...]}
"""
def __init__(self,Bg,gw):
"""
returns the implications {v:Us} based on B
v is the significant component
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
self.width = gw
if isinstance(Bg,int):
defaultdict.__init__(self,list)
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
#let's find minima regarding product order
#{v:[Umin1,Umin2,...]}
v = uv.find('1')#v=significant
u = uv[:v]+'0'+uv[v+1:]
u = int(u.replace('2','1'),2)
Umin_s = self[gw-v-1]#bit position from right
it = [i for i,U in enumerate(Umin_s) if U&u==u]
for i in reversed(it):
del Umin_s[i]
else:
Umin_s.append(u)
elif isinstance(Bg,list):
defaultdict.__init__(self,list)
for k,v in Bg:
assert isinstance(v,list)
self[k] += v
else:
defaultdict.__init__(self,list,Bg)
def __eq__(self, other):
if len(self) != len(other):
return False
for v,U in self.items():
if v not in other:
return False
Uo = other[v]
if not set(Uo)==set(U):
return False
return True
def Code012(self):
for v,Us in self.items():
vleft = self.width - v - 1
for u in Us:
b = bin(u)[2:]
w0 = self.width-len(b)
c01 = '0'*w0+b
c01 = c01.replace('1','2')
c01 = c01[:vleft]+'1'+c01[vleft+1:]
yield c01
def __str__(self):
return defaultdict.__str__(self).replace('defaultdict','v_Us_dict')
def __len__(self):
return sum((len(x) for x in self.values()))
def flatten(self):
for v,Us in self.items():
for u in Us:
yield (v,u)
def __add__(self, other):
res = v_Us_dict([],self.width)
if isinstance(other,tuple):
other = {other[0]:[other[1]]}
keys = set(self)|set(other)
for v in keys:
t = set()
if v in self:
t |= set(self[v])
if v in other:
t |= set(other[v])
if t:
res[v] = list(t)
return res
def __sub__(self, other):
res = v_Us_dict([],self.width)
for v,U in self.items():
r = list(set(U) - set(other[v]))
if r:
res[v] = r
return res
def __mul__(self, other):
"""
This is the o operation in [1]_, that represents the 3rd Armstrong rule.
It returns combinations for i‡j: (i,u1|u2) or (j,u1|u2),
"""
res = v_Us_dict([],self.width)
if id(self)==id(other):
s = iter(self.items())
try:
while True:
v1, us1 = next(s)
vv1 = 2**v1
s, ss = tee(s)#remember s and iterate with copy ss
try:
while True:
v2, us2 = next(ss)
vv2 = 2**v2
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
except StopIteration:
pass
except StopIteration:
pass
else:
for v1,us1 in self.items():
vv1 = 2**v1
for v2,us2 in other.items():
vv2 = 2**v2
if v1 != v2:
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
for v,U in res.items():
res[v] = list(set(U))#remove duplicates
return res
def __invert__(self):
"""
U->v generated from L=∪ min L_i via the 3rd Armstrong rule
Note, that this can become bigger than L.
"""
Y = self
Yn = Y*Y
while True:
YnplusY = Yn+Y
Yg = Yn*YnplusY
#YgenNotInL = Yg - L
#YgenInL = Yg - YgenNotInL
#Yn1 = Yn + YgenInL
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def __pow__(self, other):
"""
'other' is a (v,u) couple
generates U->v involving 'other'
#other = (0,64)
"""
Y = self
Z = v_Us_dict({other[0]:[other[1]]},self.width)
Yn = Y*Z
while True:
YnplusY = Yn+Y
Yg = Z*YnplusY
#this does not work for test_basis1
#YnplusZ = Yn+Z
#Yg = YnplusZ*YnplusY
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def koenig(self):
"""
This needs to be L = contextg.v_Us_B()
"""
L = self
Y = L - (L*L)
while True:
Ybar = Y + ~Y
take = L - Ybar
if not len(take):
return Y
else:
ZZ = list(set(take)-set(Y))#use significant which is not in Y
if len(ZZ) > 0:
v = ZZ[0]
z=(v,take[v][0])
else:
z = next(take.flatten())
Yzgen = Y**z
Y = (Y - Yzgen) + z #Yn+1
#Lost = Ybar - (Y + ~Y)
#assert len(Lost) == 0
def respects(g,imp):
"""
g is an int, where each bit is an attribute
implication UV is ternary coded 1 = ∈V, 2 = ∈U, 0 otherwise
g and UV have the same number of digits
"""
if isinstance(g,str):
g = int(g,2)
if isinstance(imp,int):
imp = istr(imp,3,g.bit_length())
V = int(imp.replace('1','2').replace('2','1'),2)
U = int(imp.replace('1','0').replace('2','1'),2)
ginU = U&g == U
ginV = V&g == V
return not ginU or ginV
class Context(list):
def __init__(self, *args, **kwargs):
"""Context can be initialized with
- a rectangular text block of 0s and 1s
- a list of ints and a "width" keyword argument.
A "mapping" keyword argument as list associates the bits with objects of any kind.
"""
if isinstance(args[0],str):
lines = [s.strip() for s in args[0].splitlines() if s.strip()]
linelens = [len(tt) for tt in lines]
self.width = linelens[0]
samelen = linelens.count(linelens[0])==len(linelens)
assert samelen, "Context needs all lines to be of same number of 0s and 1s"
super().__init__([int(s,2) for s in lines])
else:
super().__init__(*args)
self.width = kwargs['width']
try:
self.mapping = kwargs['mapping']
except:
self.mapping = [i for i in range(self.width)]
def __add__(self, other):
c = Context(list.__add__(self,other),width=self.width)
return c
def __sub__(self, other):
c = Context(horizontally2(self,other,self.width,other.width),width=self.width+other.width)
return c
def column(self, i):
"""from right"""
return ''.join([str(digitat2(r,i)) for r in self])
def row(self, i):
try:
r = istr(self[i],2,self.width)
except IndexError:
r = '0'*self.width
return r
def __getitem__(self,xy):
if isinstance(xy,tuple):
return digitat2(list.__getitem__(self,xy[0]),xy[1])
else:
return list.__getitem__(self,xy)
def transpose(self):
cs='\n'.join([self.column(i) for i in reversed(range(self.width))])
return Context(cs)
def __str__(self):
rs='\n'.join([self.row(i) for i in range(len(self))])
return rs
def size(self):
return self.width, len(self)
def UV_H(self):
"""
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
This is UV_H function, but the returned implications are respected by all attribute sets of this context.
This corresponds to a multiplication or & operation of the Hg sets.
"""
h = reduce(lambda x,y:x&y,(H(g,self.width-1) for g in self))
return UV_H(h, self.width)
def UV_B(self):
"""
returns UV = all respected U->Ux in ternary coding (1=V,2=U)
"""
h = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
return UV_B(h, self.width)
def v_Us_B(self):
"""
returns the implications {v:Us} based on B
This is L=∪ min L_i in [1]_
"""
Bg = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
gw = self.width
return v_Us_dict(Bg, gw)
def respects(self, implications):
if isinstance(implications,v_Us_dict):
implications = implications.Code012()
for g in self:
for i in implications:
if not respects(g,i):
return False
return True
def __call__(self, intOrCode012, right = None):
"""
mapping from bits to attributes using mapping (which defaults to ints)
- right, if available, is the conclusion of the implication; used if intOrCode012 is int
"""
if isinstance(intOrCode012,v_Us_dict):
return frozenset(self(x,right=i) for i,x in intOrCode012.items())
if isinstance(intOrCode012,list):
return frozenset(self(x,right=right) for x in intOrCode012)
if isinstance(intOrCode012,int):
res = []
pp = 1
for pos in range(self.width):
if intOrCode012&pp:
res.append(self.mapping[-pos-1])
pp = pp*2
if right != None:
return (frozenset(res),frozenset([self.mapping[-right-1]]))
else:
return frozenset(res)
if isinstance(intOrCode012,str):
left = []
right = []
for pos in range(self.width):
if intOrCode012[pos] == '2':
left.append(self.mapping[pos])
elif intOrCode012[pos] == '1':
right.append(self.mapping[pos])
if left:
if right:
return (frozenset(left),frozenset(right))
else:
return frozenset(left)
else:
return frozenset(right)
C = Context
def C1(w,h):
return Context('\n'.join(['1'*w]*h))
def C0(w,h):
return Context('\n'.join(['0'*w]*h))
#HH, LL, BB, AA are `\mathbb{H}`, `\mathbb{L}`, `\mathbb{B}`, `\mathbb{A}` from [1]_.
#They are not needed to construct the implication basis.
def LL(n):
"""constructs the LL context"""
if (n<=0):return Context('0')
else:
LL1=LL(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - LL1
r2 = LL1 - LL1 - LL1
return r1 + r2
def HH(n):
"""constructs the HH context"""
if (n<=0):return Context('1')
else:
LL1=LL(n-1)
HH1=HH(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - HH1
r2 = HH1 - HH1 - HH1
return r1 + r2
def AA(n):
"""constructs the AA context"""
if (n<=1):return Context('10\n00')
else:
AA1=AA(n-1)
r1 = C1(2**(n-1),2**(n-1)) - AA1
r2 = AA1 - AA1
return r1 + r2
def BB(n):
"""constructs the BB context"""
if (n<=1):return Context('0\n1')
else:
BB1=BB(n-1)
AA1=AA(n-1)
r1 = C1((n-1)*2**(n-2),2**(n-1)) - AA1 - BB1
r2 = BB1 - C1(2**(n-1),2**(n-1)) - BB1;
return r1 + r2
#.. _[1]:
#
# `Endliche Hüllensysteme und ihre Implikationenbasen <http://www.emis.de/journals/SLC/wpapers/s49koenig.pdf>`_ by Roman König.
|
pyfca/pyfca
|
pyfca/implications.py
|
omega
|
python
|
def omega(imps):
if isinstance(imps,v_Us_dict):
return sum([omega(V) for U,V in imps.items()])#|V|=1
if isinstance(imps,list):
return sum([omega(x) for x in imps])
if isinstance(imps,str):
#imps = due[-1]
try:
U,V = imps.split("->")
Us = U.split(",") if "," in U else U.split()
Vs = V.split(",") if "," in V else V.split()
res = len(Us)*len(Vs)
return res
except:
return 0
if isinstance(imps,int):
b=bin(imps)[2:]
res = len([x for x in b if x=='1'])
return res
|
Calculates a measure for the size of the implication basis: \sum |U||V|
|
train
|
https://github.com/pyfca/pyfca/blob/cf8cea9e76076dbf4bb3f38996dcb5491b0eb0b0/pyfca/implications.py#L170-L191
| null |
#!/usr/bin/env python3
# encoding: utf-8
"""
Implications
------------
This uses the python int as a bit field to store the FCA context.
See this `blog`_ for more.
.. _`blog`: http://rolandpuntaier.blogspot.com/2015/07/implications.html
"""
from math import trunc, log2
from functools import reduce
from itertools import tee
from collections import defaultdict
def istr(i,b,w,c="0123456789abcdefghijklmnopqrstuvwxyz"):
return ((w<=0 and i==0) and " ") or (istr(i//b, b, w-1, c).lstrip() + c[i%b])
digitat = lambda i,a,b: int(istr(i,b,a+1)[-a],b)
digitat2 = lambda i,a: (i>>a)&1
#concatenate...
horizontally = lambda K1,K2,b,w1,w2: [int(s,b) for s in [istr(k1,b,w1)+istr(k2,b,w2) for k1,k2 in zip(K1,K2)]]
horizontally2 = lambda K1,K2,w1,w2: [(k1<<w2)|k2 for k1,k2 in zip(K1,K2)]
vertically2 = vertically = lambda K1,K2: K1+K2
Lwidth = Hwidth = lambda n: 3**n
def L(g,i):
"""recursively constructs L line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Lwidth(i)
Ln = L(g,i-1)
if g1:
return Ln<<(2*n) | Ln<<n | Ln
else:
return int('1'*n,2)<<(2*n) | Ln<<n | Ln
else:
if g1:
return int('000',2)
else:
return int('100',2)
def H(g,i):
"""recursively constructs H line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Hwidth(i)
i=i-1
Hn = H(g,i)
if g1:
return Hn<<(2*n) | Hn<<n | Hn
else:
return int('1'*n,2)<<(2*n) | L(g,i)<<n | Hn
else:
if g1:
return int('111',2)
else:
return int('101',2)
def UV_H(Hg,gw):
"""
Constructs implications and intents based on H
gw = g width
Hg = H(g), g is the binary coding of the attribute set
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
"""
lefts = set()
K = []
UV = []
p = Hwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Hg&pp:
y = istr(p,3,gw)
yy = y.replace('1','0')
if yy not in lefts:
if y.find('1') == -1:#y∈{0,2}^n
K.append(y)
else:
UV.append(y)
lefts.add(yy)
return (UV,K)
Awidth = lambda n: 2**n
def A(g,i):
"""recursively constructs A line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Awidth(i)
An = A(g,i-1)
if g1:
return An<<n | An
else:
return int('1'*n,2)<<n | An
else:
if g1:
return int('00',2)
else:
return int('10',2)
Bwidth = lambda n:n*2**(n-1)
def B(g,i):
"""recursively constructs B line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
nA = Awidth(i)
nB = Bwidth(i)
i=i-1
Bn = B(g,i)
if g1:
return Bn << (nA+nB) | int('1'*nA,2) << nB | Bn
else:
return int('1'*nB,2) << (nA+nB) | A(g,i) << nB | Bn
else:
if g1:
return 1
else:
return 0
def A012(t,i):
if i<0:
return ""
nA = Awidth(i)
if t < nA:
return "0"+A012(t,i-1)
else:
return "2"+A012(t-nA,i-1)
def B012(t,i):
"""
Constructs ternary implication coding (0=not there, 2=U, 1=V)
t is B column position
i = |M|-1 to 0
"""
if not i:
return "1"
nA = Awidth(i)
nB = Bwidth(i)
nBB = nB + nA
if t < nB:
return "0"+B012(t,i-1)
elif t < nBB:
return "1"+A012(t-nB,i-1)
else:
return "2"+B012(t-nBB,i-1)
def UV_B(Bg,gw):
"""
returns the implications UV based on B
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
UV = []
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
UV.append(uv)
return UV
class v_Us_dict(defaultdict):
"""
In an implication U→u, u is the significant component.
U is coded as int.
u is the bit column of the implication's conclusion.
{u:[U1,U2,...]}
"""
def __init__(self,Bg,gw):
"""
returns the implications {v:Us} based on B
v is the significant component
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
self.width = gw
if isinstance(Bg,int):
defaultdict.__init__(self,list)
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
#let's find minima regarding product order
#{v:[Umin1,Umin2,...]}
v = uv.find('1')#v=significant
u = uv[:v]+'0'+uv[v+1:]
u = int(u.replace('2','1'),2)
Umin_s = self[gw-v-1]#bit position from right
it = [i for i,U in enumerate(Umin_s) if U&u==u]
for i in reversed(it):
del Umin_s[i]
else:
Umin_s.append(u)
elif isinstance(Bg,list):
defaultdict.__init__(self,list)
for k,v in Bg:
assert isinstance(v,list)
self[k] += v
else:
defaultdict.__init__(self,list,Bg)
def __eq__(self, other):
if len(self) != len(other):
return False
for v,U in self.items():
if v not in other:
return False
Uo = other[v]
if not set(Uo)==set(U):
return False
return True
def Code012(self):
for v,Us in self.items():
vleft = self.width - v - 1
for u in Us:
b = bin(u)[2:]
w0 = self.width-len(b)
c01 = '0'*w0+b
c01 = c01.replace('1','2')
c01 = c01[:vleft]+'1'+c01[vleft+1:]
yield c01
def __str__(self):
return defaultdict.__str__(self).replace('defaultdict','v_Us_dict')
def __len__(self):
return sum((len(x) for x in self.values()))
def flatten(self):
for v,Us in self.items():
for u in Us:
yield (v,u)
def __add__(self, other):
res = v_Us_dict([],self.width)
if isinstance(other,tuple):
other = {other[0]:[other[1]]}
keys = set(self)|set(other)
for v in keys:
t = set()
if v in self:
t |= set(self[v])
if v in other:
t |= set(other[v])
if t:
res[v] = list(t)
return res
def __sub__(self, other):
res = v_Us_dict([],self.width)
for v,U in self.items():
r = list(set(U) - set(other[v]))
if r:
res[v] = r
return res
def __mul__(self, other):
"""
This is the o operation in [1]_, that represents the 3rd Armstrong rule.
It returns combinations for i‡j: (i,u1|u2) or (j,u1|u2),
"""
res = v_Us_dict([],self.width)
if id(self)==id(other):
s = iter(self.items())
try:
while True:
v1, us1 = next(s)
vv1 = 2**v1
s, ss = tee(s)#remember s and iterate with copy ss
try:
while True:
v2, us2 = next(ss)
vv2 = 2**v2
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
except StopIteration:
pass
except StopIteration:
pass
else:
for v1,us1 in self.items():
vv1 = 2**v1
for v2,us2 in other.items():
vv2 = 2**v2
if v1 != v2:
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
for v,U in res.items():
res[v] = list(set(U))#remove duplicates
return res
def __invert__(self):
"""
U->v generated from L=∪ min L_i via the 3rd Armstrong rule
Note, that this can become bigger than L.
"""
Y = self
Yn = Y*Y
while True:
YnplusY = Yn+Y
Yg = Yn*YnplusY
#YgenNotInL = Yg - L
#YgenInL = Yg - YgenNotInL
#Yn1 = Yn + YgenInL
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def __pow__(self, other):
"""
'other' is a (v,u) couple
generates U->v involving 'other'
#other = (0,64)
"""
Y = self
Z = v_Us_dict({other[0]:[other[1]]},self.width)
Yn = Y*Z
while True:
YnplusY = Yn+Y
Yg = Z*YnplusY
#this does not work for test_basis1
#YnplusZ = Yn+Z
#Yg = YnplusZ*YnplusY
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def koenig(self):
"""
This needs to be L = contextg.v_Us_B()
"""
L = self
Y = L - (L*L)
while True:
Ybar = Y + ~Y
take = L - Ybar
if not len(take):
return Y
else:
ZZ = list(set(take)-set(Y))#use significant which is not in Y
if len(ZZ) > 0:
v = ZZ[0]
z=(v,take[v][0])
else:
z = next(take.flatten())
Yzgen = Y**z
Y = (Y - Yzgen) + z #Yn+1
#Lost = Ybar - (Y + ~Y)
#assert len(Lost) == 0
def respects(g,imp):
"""
g is an int, where each bit is an attribute
implication UV is ternary coded 1 = ∈V, 2 = ∈U, 0 otherwise
g and UV have the same number of digits
"""
if isinstance(g,str):
g = int(g,2)
if isinstance(imp,int):
imp = istr(imp,3,g.bit_length())
V = int(imp.replace('1','2').replace('2','1'),2)
U = int(imp.replace('1','0').replace('2','1'),2)
ginU = U&g == U
ginV = V&g == V
return not ginU or ginV
class Context(list):
def __init__(self, *args, **kwargs):
"""Context can be initialized with
- a rectangular text block of 0s and 1s
- a list of ints and a "width" keyword argument.
A "mapping" keyword argument as list associates the bits with objects of any kind.
"""
if isinstance(args[0],str):
lines = [s.strip() for s in args[0].splitlines() if s.strip()]
linelens = [len(tt) for tt in lines]
self.width = linelens[0]
samelen = linelens.count(linelens[0])==len(linelens)
assert samelen, "Context needs all lines to be of same number of 0s and 1s"
super().__init__([int(s,2) for s in lines])
else:
super().__init__(*args)
self.width = kwargs['width']
try:
self.mapping = kwargs['mapping']
except:
self.mapping = [i for i in range(self.width)]
def __add__(self, other):
c = Context(list.__add__(self,other),width=self.width)
return c
def __sub__(self, other):
c = Context(horizontally2(self,other,self.width,other.width),width=self.width+other.width)
return c
def column(self, i):
"""from right"""
return ''.join([str(digitat2(r,i)) for r in self])
def row(self, i):
try:
r = istr(self[i],2,self.width)
except IndexError:
r = '0'*self.width
return r
def __getitem__(self,xy):
if isinstance(xy,tuple):
return digitat2(list.__getitem__(self,xy[0]),xy[1])
else:
return list.__getitem__(self,xy)
def transpose(self):
cs='\n'.join([self.column(i) for i in reversed(range(self.width))])
return Context(cs)
def __str__(self):
rs='\n'.join([self.row(i) for i in range(len(self))])
return rs
def size(self):
return self.width, len(self)
def UV_H(self):
"""
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
This is UV_H function, but the returned implications are respected by all attribute sets of this context.
This corresponds to a multiplication or & operation of the Hg sets.
"""
h = reduce(lambda x,y:x&y,(H(g,self.width-1) for g in self))
return UV_H(h, self.width)
def UV_B(self):
"""
returns UV = all respected U->Ux in ternary coding (1=V,2=U)
"""
h = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
return UV_B(h, self.width)
def v_Us_B(self):
"""
returns the implications {v:Us} based on B
This is L=∪ min L_i in [1]_
"""
Bg = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
gw = self.width
return v_Us_dict(Bg, gw)
def respects(self, implications):
if isinstance(implications,v_Us_dict):
implications = implications.Code012()
for g in self:
for i in implications:
if not respects(g,i):
return False
return True
def __call__(self, intOrCode012, right = None):
"""
mapping from bits to attributes using mapping (which defaults to ints)
- right, if available, is the conclusion of the implication; used if intOrCode012 is int
"""
if isinstance(intOrCode012,v_Us_dict):
return frozenset(self(x,right=i) for i,x in intOrCode012.items())
if isinstance(intOrCode012,list):
return frozenset(self(x,right=right) for x in intOrCode012)
if isinstance(intOrCode012,int):
res = []
pp = 1
for pos in range(self.width):
if intOrCode012&pp:
res.append(self.mapping[-pos-1])
pp = pp*2
if right != None:
return (frozenset(res),frozenset([self.mapping[-right-1]]))
else:
return frozenset(res)
if isinstance(intOrCode012,str):
left = []
right = []
for pos in range(self.width):
if intOrCode012[pos] == '2':
left.append(self.mapping[pos])
elif intOrCode012[pos] == '1':
right.append(self.mapping[pos])
if left:
if right:
return (frozenset(left),frozenset(right))
else:
return frozenset(left)
else:
return frozenset(right)
C = Context
def C1(w,h):
return Context('\n'.join(['1'*w]*h))
def C0(w,h):
return Context('\n'.join(['0'*w]*h))
#HH, LL, BB, AA are `\mathbb{H}`, `\mathbb{L}`, `\mathbb{B}`, `\mathbb{A}` from [1]_.
#They are not needed to construct the implication basis.
def LL(n):
"""constructs the LL context"""
if (n<=0):return Context('0')
else:
LL1=LL(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - LL1
r2 = LL1 - LL1 - LL1
return r1 + r2
def HH(n):
"""constructs the HH context"""
if (n<=0):return Context('1')
else:
LL1=LL(n-1)
HH1=HH(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - HH1
r2 = HH1 - HH1 - HH1
return r1 + r2
def AA(n):
"""constructs the AA context"""
if (n<=1):return Context('10\n00')
else:
AA1=AA(n-1)
r1 = C1(2**(n-1),2**(n-1)) - AA1
r2 = AA1 - AA1
return r1 + r2
def BB(n):
"""constructs the BB context"""
if (n<=1):return Context('0\n1')
else:
BB1=BB(n-1)
AA1=AA(n-1)
r1 = C1((n-1)*2**(n-2),2**(n-1)) - AA1 - BB1
r2 = BB1 - C1(2**(n-1),2**(n-1)) - BB1;
return r1 + r2
#.. _[1]:
#
# `Endliche Hüllensysteme und ihre Implikationenbasen <http://www.emis.de/journals/SLC/wpapers/s49koenig.pdf>`_ by Roman König.
|
pyfca/pyfca
|
pyfca/implications.py
|
respects
|
python
|
def respects(g,imp):
if isinstance(g,str):
g = int(g,2)
if isinstance(imp,int):
imp = istr(imp,3,g.bit_length())
V = int(imp.replace('1','2').replace('2','1'),2)
U = int(imp.replace('1','0').replace('2','1'),2)
ginU = U&g == U
ginV = V&g == V
return not ginU or ginV
|
g is an int, where each bit is an attribute
implication UV is ternary coded 1 = ∈V, 2 = ∈U, 0 otherwise
g and UV have the same number of digits
|
train
|
https://github.com/pyfca/pyfca/blob/cf8cea9e76076dbf4bb3f38996dcb5491b0eb0b0/pyfca/implications.py#L387-L401
|
[
"def istr(i,b,w,c=\"0123456789abcdefghijklmnopqrstuvwxyz\"):\n return ((w<=0 and i==0) and \" \") or (istr(i//b, b, w-1, c).lstrip() + c[i%b])\n"
] |
#!/usr/bin/env python3
# encoding: utf-8
"""
Implications
------------
This uses the python int as a bit field to store the FCA context.
See this `blog`_ for more.
.. _`blog`: http://rolandpuntaier.blogspot.com/2015/07/implications.html
"""
from math import trunc, log2
from functools import reduce
from itertools import tee
from collections import defaultdict
def istr(i,b,w,c="0123456789abcdefghijklmnopqrstuvwxyz"):
return ((w<=0 and i==0) and " ") or (istr(i//b, b, w-1, c).lstrip() + c[i%b])
digitat = lambda i,a,b: int(istr(i,b,a+1)[-a],b)
digitat2 = lambda i,a: (i>>a)&1
#concatenate...
horizontally = lambda K1,K2,b,w1,w2: [int(s,b) for s in [istr(k1,b,w1)+istr(k2,b,w2) for k1,k2 in zip(K1,K2)]]
horizontally2 = lambda K1,K2,w1,w2: [(k1<<w2)|k2 for k1,k2 in zip(K1,K2)]
vertically2 = vertically = lambda K1,K2: K1+K2
Lwidth = Hwidth = lambda n: 3**n
def L(g,i):
"""recursively constructs L line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Lwidth(i)
Ln = L(g,i-1)
if g1:
return Ln<<(2*n) | Ln<<n | Ln
else:
return int('1'*n,2)<<(2*n) | Ln<<n | Ln
else:
if g1:
return int('000',2)
else:
return int('100',2)
def H(g,i):
"""recursively constructs H line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Hwidth(i)
i=i-1
Hn = H(g,i)
if g1:
return Hn<<(2*n) | Hn<<n | Hn
else:
return int('1'*n,2)<<(2*n) | L(g,i)<<n | Hn
else:
if g1:
return int('111',2)
else:
return int('101',2)
def UV_H(Hg,gw):
"""
Constructs implications and intents based on H
gw = g width
Hg = H(g), g is the binary coding of the attribute set
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
"""
lefts = set()
K = []
UV = []
p = Hwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Hg&pp:
y = istr(p,3,gw)
yy = y.replace('1','0')
if yy not in lefts:
if y.find('1') == -1:#y∈{0,2}^n
K.append(y)
else:
UV.append(y)
lefts.add(yy)
return (UV,K)
Awidth = lambda n: 2**n
def A(g,i):
"""recursively constructs A line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Awidth(i)
An = A(g,i-1)
if g1:
return An<<n | An
else:
return int('1'*n,2)<<n | An
else:
if g1:
return int('00',2)
else:
return int('10',2)
Bwidth = lambda n:n*2**(n-1)
def B(g,i):
"""recursively constructs B line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
nA = Awidth(i)
nB = Bwidth(i)
i=i-1
Bn = B(g,i)
if g1:
return Bn << (nA+nB) | int('1'*nA,2) << nB | Bn
else:
return int('1'*nB,2) << (nA+nB) | A(g,i) << nB | Bn
else:
if g1:
return 1
else:
return 0
def A012(t,i):
if i<0:
return ""
nA = Awidth(i)
if t < nA:
return "0"+A012(t,i-1)
else:
return "2"+A012(t-nA,i-1)
def B012(t,i):
"""
Constructs ternary implication coding (0=not there, 2=U, 1=V)
t is B column position
i = |M|-1 to 0
"""
if not i:
return "1"
nA = Awidth(i)
nB = Bwidth(i)
nBB = nB + nA
if t < nB:
return "0"+B012(t,i-1)
elif t < nBB:
return "1"+A012(t-nB,i-1)
else:
return "2"+B012(t-nBB,i-1)
def UV_B(Bg,gw):
"""
returns the implications UV based on B
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
UV = []
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
UV.append(uv)
return UV
def omega(imps):
"""
Calculates a measure for the size of the implication basis: \sum |U||V|
"""
if isinstance(imps,v_Us_dict):
return sum([omega(V) for U,V in imps.items()])#|V|=1
if isinstance(imps,list):
return sum([omega(x) for x in imps])
if isinstance(imps,str):
#imps = due[-1]
try:
U,V = imps.split("->")
Us = U.split(",") if "," in U else U.split()
Vs = V.split(",") if "," in V else V.split()
res = len(Us)*len(Vs)
return res
except:
return 0
if isinstance(imps,int):
b=bin(imps)[2:]
res = len([x for x in b if x=='1'])
return res
class v_Us_dict(defaultdict):
"""
In an implication U→u, u is the significant component.
U is coded as int.
u is the bit column of the implication's conclusion.
{u:[U1,U2,...]}
"""
def __init__(self,Bg,gw):
"""
returns the implications {v:Us} based on B
v is the significant component
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
self.width = gw
if isinstance(Bg,int):
defaultdict.__init__(self,list)
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
#let's find minima regarding product order
#{v:[Umin1,Umin2,...]}
v = uv.find('1')#v=significant
u = uv[:v]+'0'+uv[v+1:]
u = int(u.replace('2','1'),2)
Umin_s = self[gw-v-1]#bit position from right
it = [i for i,U in enumerate(Umin_s) if U&u==u]
for i in reversed(it):
del Umin_s[i]
else:
Umin_s.append(u)
elif isinstance(Bg,list):
defaultdict.__init__(self,list)
for k,v in Bg:
assert isinstance(v,list)
self[k] += v
else:
defaultdict.__init__(self,list,Bg)
def __eq__(self, other):
if len(self) != len(other):
return False
for v,U in self.items():
if v not in other:
return False
Uo = other[v]
if not set(Uo)==set(U):
return False
return True
def Code012(self):
for v,Us in self.items():
vleft = self.width - v - 1
for u in Us:
b = bin(u)[2:]
w0 = self.width-len(b)
c01 = '0'*w0+b
c01 = c01.replace('1','2')
c01 = c01[:vleft]+'1'+c01[vleft+1:]
yield c01
def __str__(self):
return defaultdict.__str__(self).replace('defaultdict','v_Us_dict')
def __len__(self):
return sum((len(x) for x in self.values()))
def flatten(self):
for v,Us in self.items():
for u in Us:
yield (v,u)
def __add__(self, other):
res = v_Us_dict([],self.width)
if isinstance(other,tuple):
other = {other[0]:[other[1]]}
keys = set(self)|set(other)
for v in keys:
t = set()
if v in self:
t |= set(self[v])
if v in other:
t |= set(other[v])
if t:
res[v] = list(t)
return res
def __sub__(self, other):
res = v_Us_dict([],self.width)
for v,U in self.items():
r = list(set(U) - set(other[v]))
if r:
res[v] = r
return res
def __mul__(self, other):
"""
This is the o operation in [1]_, that represents the 3rd Armstrong rule.
It returns combinations for i‡j: (i,u1|u2) or (j,u1|u2),
"""
res = v_Us_dict([],self.width)
if id(self)==id(other):
s = iter(self.items())
try:
while True:
v1, us1 = next(s)
vv1 = 2**v1
s, ss = tee(s)#remember s and iterate with copy ss
try:
while True:
v2, us2 = next(ss)
vv2 = 2**v2
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
except StopIteration:
pass
except StopIteration:
pass
else:
for v1,us1 in self.items():
vv1 = 2**v1
for v2,us2 in other.items():
vv2 = 2**v2
if v1 != v2:
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
for v,U in res.items():
res[v] = list(set(U))#remove duplicates
return res
def __invert__(self):
"""
U->v generated from L=∪ min L_i via the 3rd Armstrong rule
Note, that this can become bigger than L.
"""
Y = self
Yn = Y*Y
while True:
YnplusY = Yn+Y
Yg = Yn*YnplusY
#YgenNotInL = Yg - L
#YgenInL = Yg - YgenNotInL
#Yn1 = Yn + YgenInL
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def __pow__(self, other):
"""
'other' is a (v,u) couple
generates U->v involving 'other'
#other = (0,64)
"""
Y = self
Z = v_Us_dict({other[0]:[other[1]]},self.width)
Yn = Y*Z
while True:
YnplusY = Yn+Y
Yg = Z*YnplusY
#this does not work for test_basis1
#YnplusZ = Yn+Z
#Yg = YnplusZ*YnplusY
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def koenig(self):
"""
This needs to be L = contextg.v_Us_B()
"""
L = self
Y = L - (L*L)
while True:
Ybar = Y + ~Y
take = L - Ybar
if not len(take):
return Y
else:
ZZ = list(set(take)-set(Y))#use significant which is not in Y
if len(ZZ) > 0:
v = ZZ[0]
z=(v,take[v][0])
else:
z = next(take.flatten())
Yzgen = Y**z
Y = (Y - Yzgen) + z #Yn+1
#Lost = Ybar - (Y + ~Y)
#assert len(Lost) == 0
class Context(list):
def __init__(self, *args, **kwargs):
"""Context can be initialized with
- a rectangular text block of 0s and 1s
- a list of ints and a "width" keyword argument.
A "mapping" keyword argument as list associates the bits with objects of any kind.
"""
if isinstance(args[0],str):
lines = [s.strip() for s in args[0].splitlines() if s.strip()]
linelens = [len(tt) for tt in lines]
self.width = linelens[0]
samelen = linelens.count(linelens[0])==len(linelens)
assert samelen, "Context needs all lines to be of same number of 0s and 1s"
super().__init__([int(s,2) for s in lines])
else:
super().__init__(*args)
self.width = kwargs['width']
try:
self.mapping = kwargs['mapping']
except:
self.mapping = [i for i in range(self.width)]
def __add__(self, other):
c = Context(list.__add__(self,other),width=self.width)
return c
def __sub__(self, other):
c = Context(horizontally2(self,other,self.width,other.width),width=self.width+other.width)
return c
def column(self, i):
"""from right"""
return ''.join([str(digitat2(r,i)) for r in self])
def row(self, i):
try:
r = istr(self[i],2,self.width)
except IndexError:
r = '0'*self.width
return r
def __getitem__(self,xy):
if isinstance(xy,tuple):
return digitat2(list.__getitem__(self,xy[0]),xy[1])
else:
return list.__getitem__(self,xy)
def transpose(self):
cs='\n'.join([self.column(i) for i in reversed(range(self.width))])
return Context(cs)
def __str__(self):
rs='\n'.join([self.row(i) for i in range(len(self))])
return rs
def size(self):
return self.width, len(self)
def UV_H(self):
"""
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
This is UV_H function, but the returned implications are respected by all attribute sets of this context.
This corresponds to a multiplication or & operation of the Hg sets.
"""
h = reduce(lambda x,y:x&y,(H(g,self.width-1) for g in self))
return UV_H(h, self.width)
def UV_B(self):
"""
returns UV = all respected U->Ux in ternary coding (1=V,2=U)
"""
h = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
return UV_B(h, self.width)
def v_Us_B(self):
"""
returns the implications {v:Us} based on B
This is L=∪ min L_i in [1]_
"""
Bg = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
gw = self.width
return v_Us_dict(Bg, gw)
def respects(self, implications):
if isinstance(implications,v_Us_dict):
implications = implications.Code012()
for g in self:
for i in implications:
if not respects(g,i):
return False
return True
def __call__(self, intOrCode012, right = None):
"""
mapping from bits to attributes using mapping (which defaults to ints)
- right, if available, is the conclusion of the implication; used if intOrCode012 is int
"""
if isinstance(intOrCode012,v_Us_dict):
return frozenset(self(x,right=i) for i,x in intOrCode012.items())
if isinstance(intOrCode012,list):
return frozenset(self(x,right=right) for x in intOrCode012)
if isinstance(intOrCode012,int):
res = []
pp = 1
for pos in range(self.width):
if intOrCode012&pp:
res.append(self.mapping[-pos-1])
pp = pp*2
if right != None:
return (frozenset(res),frozenset([self.mapping[-right-1]]))
else:
return frozenset(res)
if isinstance(intOrCode012,str):
left = []
right = []
for pos in range(self.width):
if intOrCode012[pos] == '2':
left.append(self.mapping[pos])
elif intOrCode012[pos] == '1':
right.append(self.mapping[pos])
if left:
if right:
return (frozenset(left),frozenset(right))
else:
return frozenset(left)
else:
return frozenset(right)
C = Context
def C1(w,h):
return Context('\n'.join(['1'*w]*h))
def C0(w,h):
return Context('\n'.join(['0'*w]*h))
#HH, LL, BB, AA are `\mathbb{H}`, `\mathbb{L}`, `\mathbb{B}`, `\mathbb{A}` from [1]_.
#They are not needed to construct the implication basis.
def LL(n):
"""constructs the LL context"""
if (n<=0):return Context('0')
else:
LL1=LL(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - LL1
r2 = LL1 - LL1 - LL1
return r1 + r2
def HH(n):
"""constructs the HH context"""
if (n<=0):return Context('1')
else:
LL1=LL(n-1)
HH1=HH(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - HH1
r2 = HH1 - HH1 - HH1
return r1 + r2
def AA(n):
"""constructs the AA context"""
if (n<=1):return Context('10\n00')
else:
AA1=AA(n-1)
r1 = C1(2**(n-1),2**(n-1)) - AA1
r2 = AA1 - AA1
return r1 + r2
def BB(n):
"""constructs the BB context"""
if (n<=1):return Context('0\n1')
else:
BB1=BB(n-1)
AA1=AA(n-1)
r1 = C1((n-1)*2**(n-2),2**(n-1)) - AA1 - BB1
r2 = BB1 - C1(2**(n-1),2**(n-1)) - BB1;
return r1 + r2
#.. _[1]:
#
# `Endliche Hüllensysteme und ihre Implikationenbasen <http://www.emis.de/journals/SLC/wpapers/s49koenig.pdf>`_ by Roman König.
|
pyfca/pyfca
|
pyfca/implications.py
|
LL
|
python
|
def LL(n):
if (n<=0):return Context('0')
else:
LL1=LL(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - LL1
r2 = LL1 - LL1 - LL1
return r1 + r2
|
constructs the LL context
|
train
|
https://github.com/pyfca/pyfca/blob/cf8cea9e76076dbf4bb3f38996dcb5491b0eb0b0/pyfca/implications.py#L532-L539
|
[
"def C1(w,h):\n return Context('\\n'.join(['1'*w]*h))\n",
"def LL(n):\n \"\"\"constructs the LL context\"\"\"\n if (n<=0):return Context('0')\n else:\n LL1=LL(n-1)\n r1 = C1(3**(n-1),2**(n-1)) - LL1 - LL1\n r2 = LL1 - LL1 - LL1\n return r1 + r2\n"
] |
#!/usr/bin/env python3
# encoding: utf-8
"""
Implications
------------
This uses the python int as a bit field to store the FCA context.
See this `blog`_ for more.
.. _`blog`: http://rolandpuntaier.blogspot.com/2015/07/implications.html
"""
from math import trunc, log2
from functools import reduce
from itertools import tee
from collections import defaultdict
def istr(i,b,w,c="0123456789abcdefghijklmnopqrstuvwxyz"):
return ((w<=0 and i==0) and " ") or (istr(i//b, b, w-1, c).lstrip() + c[i%b])
digitat = lambda i,a,b: int(istr(i,b,a+1)[-a],b)
digitat2 = lambda i,a: (i>>a)&1
#concatenate...
horizontally = lambda K1,K2,b,w1,w2: [int(s,b) for s in [istr(k1,b,w1)+istr(k2,b,w2) for k1,k2 in zip(K1,K2)]]
horizontally2 = lambda K1,K2,w1,w2: [(k1<<w2)|k2 for k1,k2 in zip(K1,K2)]
vertically2 = vertically = lambda K1,K2: K1+K2
Lwidth = Hwidth = lambda n: 3**n
def L(g,i):
"""recursively constructs L line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Lwidth(i)
Ln = L(g,i-1)
if g1:
return Ln<<(2*n) | Ln<<n | Ln
else:
return int('1'*n,2)<<(2*n) | Ln<<n | Ln
else:
if g1:
return int('000',2)
else:
return int('100',2)
def H(g,i):
"""recursively constructs H line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Hwidth(i)
i=i-1
Hn = H(g,i)
if g1:
return Hn<<(2*n) | Hn<<n | Hn
else:
return int('1'*n,2)<<(2*n) | L(g,i)<<n | Hn
else:
if g1:
return int('111',2)
else:
return int('101',2)
def UV_H(Hg,gw):
"""
Constructs implications and intents based on H
gw = g width
Hg = H(g), g is the binary coding of the attribute set
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
"""
lefts = set()
K = []
UV = []
p = Hwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Hg&pp:
y = istr(p,3,gw)
yy = y.replace('1','0')
if yy not in lefts:
if y.find('1') == -1:#y∈{0,2}^n
K.append(y)
else:
UV.append(y)
lefts.add(yy)
return (UV,K)
Awidth = lambda n: 2**n
def A(g,i):
"""recursively constructs A line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Awidth(i)
An = A(g,i-1)
if g1:
return An<<n | An
else:
return int('1'*n,2)<<n | An
else:
if g1:
return int('00',2)
else:
return int('10',2)
Bwidth = lambda n:n*2**(n-1)
def B(g,i):
"""recursively constructs B line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
nA = Awidth(i)
nB = Bwidth(i)
i=i-1
Bn = B(g,i)
if g1:
return Bn << (nA+nB) | int('1'*nA,2) << nB | Bn
else:
return int('1'*nB,2) << (nA+nB) | A(g,i) << nB | Bn
else:
if g1:
return 1
else:
return 0
def A012(t,i):
if i<0:
return ""
nA = Awidth(i)
if t < nA:
return "0"+A012(t,i-1)
else:
return "2"+A012(t-nA,i-1)
def B012(t,i):
"""
Constructs ternary implication coding (0=not there, 2=U, 1=V)
t is B column position
i = |M|-1 to 0
"""
if not i:
return "1"
nA = Awidth(i)
nB = Bwidth(i)
nBB = nB + nA
if t < nB:
return "0"+B012(t,i-1)
elif t < nBB:
return "1"+A012(t-nB,i-1)
else:
return "2"+B012(t-nBB,i-1)
def UV_B(Bg,gw):
"""
returns the implications UV based on B
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
UV = []
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
UV.append(uv)
return UV
def omega(imps):
"""
Calculates a measure for the size of the implication basis: \sum |U||V|
"""
if isinstance(imps,v_Us_dict):
return sum([omega(V) for U,V in imps.items()])#|V|=1
if isinstance(imps,list):
return sum([omega(x) for x in imps])
if isinstance(imps,str):
#imps = due[-1]
try:
U,V = imps.split("->")
Us = U.split(",") if "," in U else U.split()
Vs = V.split(",") if "," in V else V.split()
res = len(Us)*len(Vs)
return res
except:
return 0
if isinstance(imps,int):
b=bin(imps)[2:]
res = len([x for x in b if x=='1'])
return res
class v_Us_dict(defaultdict):
"""
In an implication U→u, u is the significant component.
U is coded as int.
u is the bit column of the implication's conclusion.
{u:[U1,U2,...]}
"""
def __init__(self,Bg,gw):
"""
returns the implications {v:Us} based on B
v is the significant component
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
self.width = gw
if isinstance(Bg,int):
defaultdict.__init__(self,list)
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
#let's find minima regarding product order
#{v:[Umin1,Umin2,...]}
v = uv.find('1')#v=significant
u = uv[:v]+'0'+uv[v+1:]
u = int(u.replace('2','1'),2)
Umin_s = self[gw-v-1]#bit position from right
it = [i for i,U in enumerate(Umin_s) if U&u==u]
for i in reversed(it):
del Umin_s[i]
else:
Umin_s.append(u)
elif isinstance(Bg,list):
defaultdict.__init__(self,list)
for k,v in Bg:
assert isinstance(v,list)
self[k] += v
else:
defaultdict.__init__(self,list,Bg)
def __eq__(self, other):
if len(self) != len(other):
return False
for v,U in self.items():
if v not in other:
return False
Uo = other[v]
if not set(Uo)==set(U):
return False
return True
def Code012(self):
for v,Us in self.items():
vleft = self.width - v - 1
for u in Us:
b = bin(u)[2:]
w0 = self.width-len(b)
c01 = '0'*w0+b
c01 = c01.replace('1','2')
c01 = c01[:vleft]+'1'+c01[vleft+1:]
yield c01
def __str__(self):
return defaultdict.__str__(self).replace('defaultdict','v_Us_dict')
def __len__(self):
return sum((len(x) for x in self.values()))
def flatten(self):
for v,Us in self.items():
for u in Us:
yield (v,u)
def __add__(self, other):
res = v_Us_dict([],self.width)
if isinstance(other,tuple):
other = {other[0]:[other[1]]}
keys = set(self)|set(other)
for v in keys:
t = set()
if v in self:
t |= set(self[v])
if v in other:
t |= set(other[v])
if t:
res[v] = list(t)
return res
def __sub__(self, other):
res = v_Us_dict([],self.width)
for v,U in self.items():
r = list(set(U) - set(other[v]))
if r:
res[v] = r
return res
def __mul__(self, other):
"""
This is the o operation in [1]_, that represents the 3rd Armstrong rule.
It returns combinations for i‡j: (i,u1|u2) or (j,u1|u2),
"""
res = v_Us_dict([],self.width)
if id(self)==id(other):
s = iter(self.items())
try:
while True:
v1, us1 = next(s)
vv1 = 2**v1
s, ss = tee(s)#remember s and iterate with copy ss
try:
while True:
v2, us2 = next(ss)
vv2 = 2**v2
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
except StopIteration:
pass
except StopIteration:
pass
else:
for v1,us1 in self.items():
vv1 = 2**v1
for v2,us2 in other.items():
vv2 = 2**v2
if v1 != v2:
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
for v,U in res.items():
res[v] = list(set(U))#remove duplicates
return res
def __invert__(self):
"""
U->v generated from L=∪ min L_i via the 3rd Armstrong rule
Note, that this can become bigger than L.
"""
Y = self
Yn = Y*Y
while True:
YnplusY = Yn+Y
Yg = Yn*YnplusY
#YgenNotInL = Yg - L
#YgenInL = Yg - YgenNotInL
#Yn1 = Yn + YgenInL
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def __pow__(self, other):
"""
'other' is a (v,u) couple
generates U->v involving 'other'
#other = (0,64)
"""
Y = self
Z = v_Us_dict({other[0]:[other[1]]},self.width)
Yn = Y*Z
while True:
YnplusY = Yn+Y
Yg = Z*YnplusY
#this does not work for test_basis1
#YnplusZ = Yn+Z
#Yg = YnplusZ*YnplusY
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def koenig(self):
"""
This needs to be L = contextg.v_Us_B()
"""
L = self
Y = L - (L*L)
while True:
Ybar = Y + ~Y
take = L - Ybar
if not len(take):
return Y
else:
ZZ = list(set(take)-set(Y))#use significant which is not in Y
if len(ZZ) > 0:
v = ZZ[0]
z=(v,take[v][0])
else:
z = next(take.flatten())
Yzgen = Y**z
Y = (Y - Yzgen) + z #Yn+1
#Lost = Ybar - (Y + ~Y)
#assert len(Lost) == 0
def respects(g,imp):
"""
g is an int, where each bit is an attribute
implication UV is ternary coded 1 = ∈V, 2 = ∈U, 0 otherwise
g and UV have the same number of digits
"""
if isinstance(g,str):
g = int(g,2)
if isinstance(imp,int):
imp = istr(imp,3,g.bit_length())
V = int(imp.replace('1','2').replace('2','1'),2)
U = int(imp.replace('1','0').replace('2','1'),2)
ginU = U&g == U
ginV = V&g == V
return not ginU or ginV
class Context(list):
def __init__(self, *args, **kwargs):
"""Context can be initialized with
- a rectangular text block of 0s and 1s
- a list of ints and a "width" keyword argument.
A "mapping" keyword argument as list associates the bits with objects of any kind.
"""
if isinstance(args[0],str):
lines = [s.strip() for s in args[0].splitlines() if s.strip()]
linelens = [len(tt) for tt in lines]
self.width = linelens[0]
samelen = linelens.count(linelens[0])==len(linelens)
assert samelen, "Context needs all lines to be of same number of 0s and 1s"
super().__init__([int(s,2) for s in lines])
else:
super().__init__(*args)
self.width = kwargs['width']
try:
self.mapping = kwargs['mapping']
except:
self.mapping = [i for i in range(self.width)]
def __add__(self, other):
c = Context(list.__add__(self,other),width=self.width)
return c
def __sub__(self, other):
c = Context(horizontally2(self,other,self.width,other.width),width=self.width+other.width)
return c
def column(self, i):
"""from right"""
return ''.join([str(digitat2(r,i)) for r in self])
def row(self, i):
try:
r = istr(self[i],2,self.width)
except IndexError:
r = '0'*self.width
return r
def __getitem__(self,xy):
if isinstance(xy,tuple):
return digitat2(list.__getitem__(self,xy[0]),xy[1])
else:
return list.__getitem__(self,xy)
def transpose(self):
cs='\n'.join([self.column(i) for i in reversed(range(self.width))])
return Context(cs)
def __str__(self):
rs='\n'.join([self.row(i) for i in range(len(self))])
return rs
def size(self):
return self.width, len(self)
def UV_H(self):
"""
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
This is UV_H function, but the returned implications are respected by all attribute sets of this context.
This corresponds to a multiplication or & operation of the Hg sets.
"""
h = reduce(lambda x,y:x&y,(H(g,self.width-1) for g in self))
return UV_H(h, self.width)
def UV_B(self):
"""
returns UV = all respected U->Ux in ternary coding (1=V,2=U)
"""
h = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
return UV_B(h, self.width)
def v_Us_B(self):
"""
returns the implications {v:Us} based on B
This is L=∪ min L_i in [1]_
"""
Bg = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
gw = self.width
return v_Us_dict(Bg, gw)
def respects(self, implications):
if isinstance(implications,v_Us_dict):
implications = implications.Code012()
for g in self:
for i in implications:
if not respects(g,i):
return False
return True
def __call__(self, intOrCode012, right = None):
"""
mapping from bits to attributes using mapping (which defaults to ints)
- right, if available, is the conclusion of the implication; used if intOrCode012 is int
"""
if isinstance(intOrCode012,v_Us_dict):
return frozenset(self(x,right=i) for i,x in intOrCode012.items())
if isinstance(intOrCode012,list):
return frozenset(self(x,right=right) for x in intOrCode012)
if isinstance(intOrCode012,int):
res = []
pp = 1
for pos in range(self.width):
if intOrCode012&pp:
res.append(self.mapping[-pos-1])
pp = pp*2
if right != None:
return (frozenset(res),frozenset([self.mapping[-right-1]]))
else:
return frozenset(res)
if isinstance(intOrCode012,str):
left = []
right = []
for pos in range(self.width):
if intOrCode012[pos] == '2':
left.append(self.mapping[pos])
elif intOrCode012[pos] == '1':
right.append(self.mapping[pos])
if left:
if right:
return (frozenset(left),frozenset(right))
else:
return frozenset(left)
else:
return frozenset(right)
C = Context
def C1(w,h):
return Context('\n'.join(['1'*w]*h))
def C0(w,h):
return Context('\n'.join(['0'*w]*h))
#HH, LL, BB, AA are `\mathbb{H}`, `\mathbb{L}`, `\mathbb{B}`, `\mathbb{A}` from [1]_.
#They are not needed to construct the implication basis.
def HH(n):
"""constructs the HH context"""
if (n<=0):return Context('1')
else:
LL1=LL(n-1)
HH1=HH(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - HH1
r2 = HH1 - HH1 - HH1
return r1 + r2
def AA(n):
"""constructs the AA context"""
if (n<=1):return Context('10\n00')
else:
AA1=AA(n-1)
r1 = C1(2**(n-1),2**(n-1)) - AA1
r2 = AA1 - AA1
return r1 + r2
def BB(n):
"""constructs the BB context"""
if (n<=1):return Context('0\n1')
else:
BB1=BB(n-1)
AA1=AA(n-1)
r1 = C1((n-1)*2**(n-2),2**(n-1)) - AA1 - BB1
r2 = BB1 - C1(2**(n-1),2**(n-1)) - BB1;
return r1 + r2
#.. _[1]:
#
# `Endliche Hüllensysteme und ihre Implikationenbasen <http://www.emis.de/journals/SLC/wpapers/s49koenig.pdf>`_ by Roman König.
|
pyfca/pyfca
|
pyfca/implications.py
|
HH
|
python
|
def HH(n):
if (n<=0):return Context('1')
else:
LL1=LL(n-1)
HH1=HH(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - HH1
r2 = HH1 - HH1 - HH1
return r1 + r2
|
constructs the HH context
|
train
|
https://github.com/pyfca/pyfca/blob/cf8cea9e76076dbf4bb3f38996dcb5491b0eb0b0/pyfca/implications.py#L540-L548
|
[
"def C1(w,h):\n return Context('\\n'.join(['1'*w]*h))\n",
"def LL(n):\n \"\"\"constructs the LL context\"\"\"\n if (n<=0):return Context('0')\n else:\n LL1=LL(n-1)\n r1 = C1(3**(n-1),2**(n-1)) - LL1 - LL1\n r2 = LL1 - LL1 - LL1\n return r1 + r2\n",
"def HH(n):\n \"\"\"constructs the HH context\"\"\"\n if (n<=0):return Context('1')\n else:\n LL1=LL(n-1)\n HH1=HH(n-1)\n r1 = C1(3**(n-1),2**(n-1)) - LL1 - HH1\n r2 = HH1 - HH1 - HH1\n return r1 + r2\n"
] |
#!/usr/bin/env python3
# encoding: utf-8
"""
Implications
------------
This uses the python int as a bit field to store the FCA context.
See this `blog`_ for more.
.. _`blog`: http://rolandpuntaier.blogspot.com/2015/07/implications.html
"""
from math import trunc, log2
from functools import reduce
from itertools import tee
from collections import defaultdict
def istr(i,b,w,c="0123456789abcdefghijklmnopqrstuvwxyz"):
return ((w<=0 and i==0) and " ") or (istr(i//b, b, w-1, c).lstrip() + c[i%b])
digitat = lambda i,a,b: int(istr(i,b,a+1)[-a],b)
digitat2 = lambda i,a: (i>>a)&1
#concatenate...
horizontally = lambda K1,K2,b,w1,w2: [int(s,b) for s in [istr(k1,b,w1)+istr(k2,b,w2) for k1,k2 in zip(K1,K2)]]
horizontally2 = lambda K1,K2,w1,w2: [(k1<<w2)|k2 for k1,k2 in zip(K1,K2)]
vertically2 = vertically = lambda K1,K2: K1+K2
Lwidth = Hwidth = lambda n: 3**n
def L(g,i):
"""recursively constructs L line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Lwidth(i)
Ln = L(g,i-1)
if g1:
return Ln<<(2*n) | Ln<<n | Ln
else:
return int('1'*n,2)<<(2*n) | Ln<<n | Ln
else:
if g1:
return int('000',2)
else:
return int('100',2)
def H(g,i):
"""recursively constructs H line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Hwidth(i)
i=i-1
Hn = H(g,i)
if g1:
return Hn<<(2*n) | Hn<<n | Hn
else:
return int('1'*n,2)<<(2*n) | L(g,i)<<n | Hn
else:
if g1:
return int('111',2)
else:
return int('101',2)
def UV_H(Hg,gw):
"""
Constructs implications and intents based on H
gw = g width
Hg = H(g), g is the binary coding of the attribute set
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
"""
lefts = set()
K = []
UV = []
p = Hwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Hg&pp:
y = istr(p,3,gw)
yy = y.replace('1','0')
if yy not in lefts:
if y.find('1') == -1:#y∈{0,2}^n
K.append(y)
else:
UV.append(y)
lefts.add(yy)
return (UV,K)
Awidth = lambda n: 2**n
def A(g,i):
"""recursively constructs A line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Awidth(i)
An = A(g,i-1)
if g1:
return An<<n | An
else:
return int('1'*n,2)<<n | An
else:
if g1:
return int('00',2)
else:
return int('10',2)
Bwidth = lambda n:n*2**(n-1)
def B(g,i):
"""recursively constructs B line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
nA = Awidth(i)
nB = Bwidth(i)
i=i-1
Bn = B(g,i)
if g1:
return Bn << (nA+nB) | int('1'*nA,2) << nB | Bn
else:
return int('1'*nB,2) << (nA+nB) | A(g,i) << nB | Bn
else:
if g1:
return 1
else:
return 0
def A012(t,i):
if i<0:
return ""
nA = Awidth(i)
if t < nA:
return "0"+A012(t,i-1)
else:
return "2"+A012(t-nA,i-1)
def B012(t,i):
"""
Constructs ternary implication coding (0=not there, 2=U, 1=V)
t is B column position
i = |M|-1 to 0
"""
if not i:
return "1"
nA = Awidth(i)
nB = Bwidth(i)
nBB = nB + nA
if t < nB:
return "0"+B012(t,i-1)
elif t < nBB:
return "1"+A012(t-nB,i-1)
else:
return "2"+B012(t-nBB,i-1)
def UV_B(Bg,gw):
"""
returns the implications UV based on B
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
UV = []
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
UV.append(uv)
return UV
def omega(imps):
"""
Calculates a measure for the size of the implication basis: \sum |U||V|
"""
if isinstance(imps,v_Us_dict):
return sum([omega(V) for U,V in imps.items()])#|V|=1
if isinstance(imps,list):
return sum([omega(x) for x in imps])
if isinstance(imps,str):
#imps = due[-1]
try:
U,V = imps.split("->")
Us = U.split(",") if "," in U else U.split()
Vs = V.split(",") if "," in V else V.split()
res = len(Us)*len(Vs)
return res
except:
return 0
if isinstance(imps,int):
b=bin(imps)[2:]
res = len([x for x in b if x=='1'])
return res
class v_Us_dict(defaultdict):
"""
In an implication U→u, u is the significant component.
U is coded as int.
u is the bit column of the implication's conclusion.
{u:[U1,U2,...]}
"""
def __init__(self,Bg,gw):
"""
returns the implications {v:Us} based on B
v is the significant component
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
self.width = gw
if isinstance(Bg,int):
defaultdict.__init__(self,list)
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
#let's find minima regarding product order
#{v:[Umin1,Umin2,...]}
v = uv.find('1')#v=significant
u = uv[:v]+'0'+uv[v+1:]
u = int(u.replace('2','1'),2)
Umin_s = self[gw-v-1]#bit position from right
it = [i for i,U in enumerate(Umin_s) if U&u==u]
for i in reversed(it):
del Umin_s[i]
else:
Umin_s.append(u)
elif isinstance(Bg,list):
defaultdict.__init__(self,list)
for k,v in Bg:
assert isinstance(v,list)
self[k] += v
else:
defaultdict.__init__(self,list,Bg)
def __eq__(self, other):
if len(self) != len(other):
return False
for v,U in self.items():
if v not in other:
return False
Uo = other[v]
if not set(Uo)==set(U):
return False
return True
def Code012(self):
for v,Us in self.items():
vleft = self.width - v - 1
for u in Us:
b = bin(u)[2:]
w0 = self.width-len(b)
c01 = '0'*w0+b
c01 = c01.replace('1','2')
c01 = c01[:vleft]+'1'+c01[vleft+1:]
yield c01
def __str__(self):
return defaultdict.__str__(self).replace('defaultdict','v_Us_dict')
def __len__(self):
return sum((len(x) for x in self.values()))
def flatten(self):
for v,Us in self.items():
for u in Us:
yield (v,u)
def __add__(self, other):
res = v_Us_dict([],self.width)
if isinstance(other,tuple):
other = {other[0]:[other[1]]}
keys = set(self)|set(other)
for v in keys:
t = set()
if v in self:
t |= set(self[v])
if v in other:
t |= set(other[v])
if t:
res[v] = list(t)
return res
def __sub__(self, other):
res = v_Us_dict([],self.width)
for v,U in self.items():
r = list(set(U) - set(other[v]))
if r:
res[v] = r
return res
def __mul__(self, other):
"""
This is the o operation in [1]_, that represents the 3rd Armstrong rule.
It returns combinations for i‡j: (i,u1|u2) or (j,u1|u2),
"""
res = v_Us_dict([],self.width)
if id(self)==id(other):
s = iter(self.items())
try:
while True:
v1, us1 = next(s)
vv1 = 2**v1
s, ss = tee(s)#remember s and iterate with copy ss
try:
while True:
v2, us2 = next(ss)
vv2 = 2**v2
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
except StopIteration:
pass
except StopIteration:
pass
else:
for v1,us1 in self.items():
vv1 = 2**v1
for v2,us2 in other.items():
vv2 = 2**v2
if v1 != v2:
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
for v,U in res.items():
res[v] = list(set(U))#remove duplicates
return res
def __invert__(self):
"""
U->v generated from L=∪ min L_i via the 3rd Armstrong rule
Note, that this can become bigger than L.
"""
Y = self
Yn = Y*Y
while True:
YnplusY = Yn+Y
Yg = Yn*YnplusY
#YgenNotInL = Yg - L
#YgenInL = Yg - YgenNotInL
#Yn1 = Yn + YgenInL
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def __pow__(self, other):
"""
'other' is a (v,u) couple
generates U->v involving 'other'
#other = (0,64)
"""
Y = self
Z = v_Us_dict({other[0]:[other[1]]},self.width)
Yn = Y*Z
while True:
YnplusY = Yn+Y
Yg = Z*YnplusY
#this does not work for test_basis1
#YnplusZ = Yn+Z
#Yg = YnplusZ*YnplusY
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def koenig(self):
"""
This needs to be L = contextg.v_Us_B()
"""
L = self
Y = L - (L*L)
while True:
Ybar = Y + ~Y
take = L - Ybar
if not len(take):
return Y
else:
ZZ = list(set(take)-set(Y))#use significant which is not in Y
if len(ZZ) > 0:
v = ZZ[0]
z=(v,take[v][0])
else:
z = next(take.flatten())
Yzgen = Y**z
Y = (Y - Yzgen) + z #Yn+1
#Lost = Ybar - (Y + ~Y)
#assert len(Lost) == 0
def respects(g,imp):
"""
g is an int, where each bit is an attribute
implication UV is ternary coded 1 = ∈V, 2 = ∈U, 0 otherwise
g and UV have the same number of digits
"""
if isinstance(g,str):
g = int(g,2)
if isinstance(imp,int):
imp = istr(imp,3,g.bit_length())
V = int(imp.replace('1','2').replace('2','1'),2)
U = int(imp.replace('1','0').replace('2','1'),2)
ginU = U&g == U
ginV = V&g == V
return not ginU or ginV
class Context(list):
def __init__(self, *args, **kwargs):
"""Context can be initialized with
- a rectangular text block of 0s and 1s
- a list of ints and a "width" keyword argument.
A "mapping" keyword argument as list associates the bits with objects of any kind.
"""
if isinstance(args[0],str):
lines = [s.strip() for s in args[0].splitlines() if s.strip()]
linelens = [len(tt) for tt in lines]
self.width = linelens[0]
samelen = linelens.count(linelens[0])==len(linelens)
assert samelen, "Context needs all lines to be of same number of 0s and 1s"
super().__init__([int(s,2) for s in lines])
else:
super().__init__(*args)
self.width = kwargs['width']
try:
self.mapping = kwargs['mapping']
except:
self.mapping = [i for i in range(self.width)]
def __add__(self, other):
c = Context(list.__add__(self,other),width=self.width)
return c
def __sub__(self, other):
c = Context(horizontally2(self,other,self.width,other.width),width=self.width+other.width)
return c
def column(self, i):
"""from right"""
return ''.join([str(digitat2(r,i)) for r in self])
def row(self, i):
try:
r = istr(self[i],2,self.width)
except IndexError:
r = '0'*self.width
return r
def __getitem__(self,xy):
if isinstance(xy,tuple):
return digitat2(list.__getitem__(self,xy[0]),xy[1])
else:
return list.__getitem__(self,xy)
def transpose(self):
cs='\n'.join([self.column(i) for i in reversed(range(self.width))])
return Context(cs)
def __str__(self):
rs='\n'.join([self.row(i) for i in range(len(self))])
return rs
def size(self):
return self.width, len(self)
def UV_H(self):
"""
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
This is UV_H function, but the returned implications are respected by all attribute sets of this context.
This corresponds to a multiplication or & operation of the Hg sets.
"""
h = reduce(lambda x,y:x&y,(H(g,self.width-1) for g in self))
return UV_H(h, self.width)
def UV_B(self):
"""
returns UV = all respected U->Ux in ternary coding (1=V,2=U)
"""
h = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
return UV_B(h, self.width)
def v_Us_B(self):
"""
returns the implications {v:Us} based on B
This is L=∪ min L_i in [1]_
"""
Bg = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
gw = self.width
return v_Us_dict(Bg, gw)
def respects(self, implications):
if isinstance(implications,v_Us_dict):
implications = implications.Code012()
for g in self:
for i in implications:
if not respects(g,i):
return False
return True
def __call__(self, intOrCode012, right = None):
"""
mapping from bits to attributes using mapping (which defaults to ints)
- right, if available, is the conclusion of the implication; used if intOrCode012 is int
"""
if isinstance(intOrCode012,v_Us_dict):
return frozenset(self(x,right=i) for i,x in intOrCode012.items())
if isinstance(intOrCode012,list):
return frozenset(self(x,right=right) for x in intOrCode012)
if isinstance(intOrCode012,int):
res = []
pp = 1
for pos in range(self.width):
if intOrCode012&pp:
res.append(self.mapping[-pos-1])
pp = pp*2
if right != None:
return (frozenset(res),frozenset([self.mapping[-right-1]]))
else:
return frozenset(res)
if isinstance(intOrCode012,str):
left = []
right = []
for pos in range(self.width):
if intOrCode012[pos] == '2':
left.append(self.mapping[pos])
elif intOrCode012[pos] == '1':
right.append(self.mapping[pos])
if left:
if right:
return (frozenset(left),frozenset(right))
else:
return frozenset(left)
else:
return frozenset(right)
C = Context
def C1(w,h):
return Context('\n'.join(['1'*w]*h))
def C0(w,h):
return Context('\n'.join(['0'*w]*h))
#HH, LL, BB, AA are `\mathbb{H}`, `\mathbb{L}`, `\mathbb{B}`, `\mathbb{A}` from [1]_.
#They are not needed to construct the implication basis.
def LL(n):
"""constructs the LL context"""
if (n<=0):return Context('0')
else:
LL1=LL(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - LL1
r2 = LL1 - LL1 - LL1
return r1 + r2
def AA(n):
"""constructs the AA context"""
if (n<=1):return Context('10\n00')
else:
AA1=AA(n-1)
r1 = C1(2**(n-1),2**(n-1)) - AA1
r2 = AA1 - AA1
return r1 + r2
def BB(n):
"""constructs the BB context"""
if (n<=1):return Context('0\n1')
else:
BB1=BB(n-1)
AA1=AA(n-1)
r1 = C1((n-1)*2**(n-2),2**(n-1)) - AA1 - BB1
r2 = BB1 - C1(2**(n-1),2**(n-1)) - BB1;
return r1 + r2
#.. _[1]:
#
# `Endliche Hüllensysteme und ihre Implikationenbasen <http://www.emis.de/journals/SLC/wpapers/s49koenig.pdf>`_ by Roman König.
|
pyfca/pyfca
|
pyfca/implications.py
|
AA
|
python
|
def AA(n):
if (n<=1):return Context('10\n00')
else:
AA1=AA(n-1)
r1 = C1(2**(n-1),2**(n-1)) - AA1
r2 = AA1 - AA1
return r1 + r2
|
constructs the AA context
|
train
|
https://github.com/pyfca/pyfca/blob/cf8cea9e76076dbf4bb3f38996dcb5491b0eb0b0/pyfca/implications.py#L550-L557
|
[
"def C1(w,h):\n return Context('\\n'.join(['1'*w]*h))\n",
"def AA(n):\n \"\"\"constructs the AA context\"\"\"\n if (n<=1):return Context('10\\n00')\n else:\n AA1=AA(n-1)\n r1 = C1(2**(n-1),2**(n-1)) - AA1\n r2 = AA1 - AA1\n return r1 + r2\n"
] |
#!/usr/bin/env python3
# encoding: utf-8
"""
Implications
------------
This uses the python int as a bit field to store the FCA context.
See this `blog`_ for more.
.. _`blog`: http://rolandpuntaier.blogspot.com/2015/07/implications.html
"""
from math import trunc, log2
from functools import reduce
from itertools import tee
from collections import defaultdict
def istr(i,b,w,c="0123456789abcdefghijklmnopqrstuvwxyz"):
return ((w<=0 and i==0) and " ") or (istr(i//b, b, w-1, c).lstrip() + c[i%b])
digitat = lambda i,a,b: int(istr(i,b,a+1)[-a],b)
digitat2 = lambda i,a: (i>>a)&1
#concatenate...
horizontally = lambda K1,K2,b,w1,w2: [int(s,b) for s in [istr(k1,b,w1)+istr(k2,b,w2) for k1,k2 in zip(K1,K2)]]
horizontally2 = lambda K1,K2,w1,w2: [(k1<<w2)|k2 for k1,k2 in zip(K1,K2)]
vertically2 = vertically = lambda K1,K2: K1+K2
Lwidth = Hwidth = lambda n: 3**n
def L(g,i):
"""recursively constructs L line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Lwidth(i)
Ln = L(g,i-1)
if g1:
return Ln<<(2*n) | Ln<<n | Ln
else:
return int('1'*n,2)<<(2*n) | Ln<<n | Ln
else:
if g1:
return int('000',2)
else:
return int('100',2)
def H(g,i):
"""recursively constructs H line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Hwidth(i)
i=i-1
Hn = H(g,i)
if g1:
return Hn<<(2*n) | Hn<<n | Hn
else:
return int('1'*n,2)<<(2*n) | L(g,i)<<n | Hn
else:
if g1:
return int('111',2)
else:
return int('101',2)
def UV_H(Hg,gw):
"""
Constructs implications and intents based on H
gw = g width
Hg = H(g), g is the binary coding of the attribute set
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
"""
lefts = set()
K = []
UV = []
p = Hwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Hg&pp:
y = istr(p,3,gw)
yy = y.replace('1','0')
if yy not in lefts:
if y.find('1') == -1:#y∈{0,2}^n
K.append(y)
else:
UV.append(y)
lefts.add(yy)
return (UV,K)
Awidth = lambda n: 2**n
def A(g,i):
"""recursively constructs A line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Awidth(i)
An = A(g,i-1)
if g1:
return An<<n | An
else:
return int('1'*n,2)<<n | An
else:
if g1:
return int('00',2)
else:
return int('10',2)
Bwidth = lambda n:n*2**(n-1)
def B(g,i):
"""recursively constructs B line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
nA = Awidth(i)
nB = Bwidth(i)
i=i-1
Bn = B(g,i)
if g1:
return Bn << (nA+nB) | int('1'*nA,2) << nB | Bn
else:
return int('1'*nB,2) << (nA+nB) | A(g,i) << nB | Bn
else:
if g1:
return 1
else:
return 0
def A012(t,i):
if i<0:
return ""
nA = Awidth(i)
if t < nA:
return "0"+A012(t,i-1)
else:
return "2"+A012(t-nA,i-1)
def B012(t,i):
"""
Constructs ternary implication coding (0=not there, 2=U, 1=V)
t is B column position
i = |M|-1 to 0
"""
if not i:
return "1"
nA = Awidth(i)
nB = Bwidth(i)
nBB = nB + nA
if t < nB:
return "0"+B012(t,i-1)
elif t < nBB:
return "1"+A012(t-nB,i-1)
else:
return "2"+B012(t-nBB,i-1)
def UV_B(Bg,gw):
"""
returns the implications UV based on B
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
UV = []
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
UV.append(uv)
return UV
def omega(imps):
"""
Calculates a measure for the size of the implication basis: \sum |U||V|
"""
if isinstance(imps,v_Us_dict):
return sum([omega(V) for U,V in imps.items()])#|V|=1
if isinstance(imps,list):
return sum([omega(x) for x in imps])
if isinstance(imps,str):
#imps = due[-1]
try:
U,V = imps.split("->")
Us = U.split(",") if "," in U else U.split()
Vs = V.split(",") if "," in V else V.split()
res = len(Us)*len(Vs)
return res
except:
return 0
if isinstance(imps,int):
b=bin(imps)[2:]
res = len([x for x in b if x=='1'])
return res
class v_Us_dict(defaultdict):
"""
In an implication U→u, u is the significant component.
U is coded as int.
u is the bit column of the implication's conclusion.
{u:[U1,U2,...]}
"""
def __init__(self,Bg,gw):
"""
returns the implications {v:Us} based on B
v is the significant component
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
self.width = gw
if isinstance(Bg,int):
defaultdict.__init__(self,list)
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
#let's find minima regarding product order
#{v:[Umin1,Umin2,...]}
v = uv.find('1')#v=significant
u = uv[:v]+'0'+uv[v+1:]
u = int(u.replace('2','1'),2)
Umin_s = self[gw-v-1]#bit position from right
it = [i for i,U in enumerate(Umin_s) if U&u==u]
for i in reversed(it):
del Umin_s[i]
else:
Umin_s.append(u)
elif isinstance(Bg,list):
defaultdict.__init__(self,list)
for k,v in Bg:
assert isinstance(v,list)
self[k] += v
else:
defaultdict.__init__(self,list,Bg)
def __eq__(self, other):
if len(self) != len(other):
return False
for v,U in self.items():
if v not in other:
return False
Uo = other[v]
if not set(Uo)==set(U):
return False
return True
def Code012(self):
for v,Us in self.items():
vleft = self.width - v - 1
for u in Us:
b = bin(u)[2:]
w0 = self.width-len(b)
c01 = '0'*w0+b
c01 = c01.replace('1','2')
c01 = c01[:vleft]+'1'+c01[vleft+1:]
yield c01
def __str__(self):
return defaultdict.__str__(self).replace('defaultdict','v_Us_dict')
def __len__(self):
return sum((len(x) for x in self.values()))
def flatten(self):
for v,Us in self.items():
for u in Us:
yield (v,u)
def __add__(self, other):
res = v_Us_dict([],self.width)
if isinstance(other,tuple):
other = {other[0]:[other[1]]}
keys = set(self)|set(other)
for v in keys:
t = set()
if v in self:
t |= set(self[v])
if v in other:
t |= set(other[v])
if t:
res[v] = list(t)
return res
def __sub__(self, other):
res = v_Us_dict([],self.width)
for v,U in self.items():
r = list(set(U) - set(other[v]))
if r:
res[v] = r
return res
def __mul__(self, other):
"""
This is the o operation in [1]_, that represents the 3rd Armstrong rule.
It returns combinations for i‡j: (i,u1|u2) or (j,u1|u2),
"""
res = v_Us_dict([],self.width)
if id(self)==id(other):
s = iter(self.items())
try:
while True:
v1, us1 = next(s)
vv1 = 2**v1
s, ss = tee(s)#remember s and iterate with copy ss
try:
while True:
v2, us2 = next(ss)
vv2 = 2**v2
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
except StopIteration:
pass
except StopIteration:
pass
else:
for v1,us1 in self.items():
vv1 = 2**v1
for v2,us2 in other.items():
vv2 = 2**v2
if v1 != v2:
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
for v,U in res.items():
res[v] = list(set(U))#remove duplicates
return res
def __invert__(self):
"""
U->v generated from L=∪ min L_i via the 3rd Armstrong rule
Note, that this can become bigger than L.
"""
Y = self
Yn = Y*Y
while True:
YnplusY = Yn+Y
Yg = Yn*YnplusY
#YgenNotInL = Yg - L
#YgenInL = Yg - YgenNotInL
#Yn1 = Yn + YgenInL
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def __pow__(self, other):
"""
'other' is a (v,u) couple
generates U->v involving 'other'
#other = (0,64)
"""
Y = self
Z = v_Us_dict({other[0]:[other[1]]},self.width)
Yn = Y*Z
while True:
YnplusY = Yn+Y
Yg = Z*YnplusY
#this does not work for test_basis1
#YnplusZ = Yn+Z
#Yg = YnplusZ*YnplusY
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def koenig(self):
"""
This needs to be L = contextg.v_Us_B()
"""
L = self
Y = L - (L*L)
while True:
Ybar = Y + ~Y
take = L - Ybar
if not len(take):
return Y
else:
ZZ = list(set(take)-set(Y))#use significant which is not in Y
if len(ZZ) > 0:
v = ZZ[0]
z=(v,take[v][0])
else:
z = next(take.flatten())
Yzgen = Y**z
Y = (Y - Yzgen) + z #Yn+1
#Lost = Ybar - (Y + ~Y)
#assert len(Lost) == 0
def respects(g,imp):
"""
g is an int, where each bit is an attribute
implication UV is ternary coded 1 = ∈V, 2 = ∈U, 0 otherwise
g and UV have the same number of digits
"""
if isinstance(g,str):
g = int(g,2)
if isinstance(imp,int):
imp = istr(imp,3,g.bit_length())
V = int(imp.replace('1','2').replace('2','1'),2)
U = int(imp.replace('1','0').replace('2','1'),2)
ginU = U&g == U
ginV = V&g == V
return not ginU or ginV
class Context(list):
def __init__(self, *args, **kwargs):
"""Context can be initialized with
- a rectangular text block of 0s and 1s
- a list of ints and a "width" keyword argument.
A "mapping" keyword argument as list associates the bits with objects of any kind.
"""
if isinstance(args[0],str):
lines = [s.strip() for s in args[0].splitlines() if s.strip()]
linelens = [len(tt) for tt in lines]
self.width = linelens[0]
samelen = linelens.count(linelens[0])==len(linelens)
assert samelen, "Context needs all lines to be of same number of 0s and 1s"
super().__init__([int(s,2) for s in lines])
else:
super().__init__(*args)
self.width = kwargs['width']
try:
self.mapping = kwargs['mapping']
except:
self.mapping = [i for i in range(self.width)]
def __add__(self, other):
c = Context(list.__add__(self,other),width=self.width)
return c
def __sub__(self, other):
c = Context(horizontally2(self,other,self.width,other.width),width=self.width+other.width)
return c
def column(self, i):
"""from right"""
return ''.join([str(digitat2(r,i)) for r in self])
def row(self, i):
try:
r = istr(self[i],2,self.width)
except IndexError:
r = '0'*self.width
return r
def __getitem__(self,xy):
if isinstance(xy,tuple):
return digitat2(list.__getitem__(self,xy[0]),xy[1])
else:
return list.__getitem__(self,xy)
def transpose(self):
cs='\n'.join([self.column(i) for i in reversed(range(self.width))])
return Context(cs)
def __str__(self):
rs='\n'.join([self.row(i) for i in range(len(self))])
return rs
def size(self):
return self.width, len(self)
def UV_H(self):
"""
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
This is UV_H function, but the returned implications are respected by all attribute sets of this context.
This corresponds to a multiplication or & operation of the Hg sets.
"""
h = reduce(lambda x,y:x&y,(H(g,self.width-1) for g in self))
return UV_H(h, self.width)
def UV_B(self):
"""
returns UV = all respected U->Ux in ternary coding (1=V,2=U)
"""
h = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
return UV_B(h, self.width)
def v_Us_B(self):
"""
returns the implications {v:Us} based on B
This is L=∪ min L_i in [1]_
"""
Bg = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
gw = self.width
return v_Us_dict(Bg, gw)
def respects(self, implications):
if isinstance(implications,v_Us_dict):
implications = implications.Code012()
for g in self:
for i in implications:
if not respects(g,i):
return False
return True
def __call__(self, intOrCode012, right = None):
"""
mapping from bits to attributes using mapping (which defaults to ints)
- right, if available, is the conclusion of the implication; used if intOrCode012 is int
"""
if isinstance(intOrCode012,v_Us_dict):
return frozenset(self(x,right=i) for i,x in intOrCode012.items())
if isinstance(intOrCode012,list):
return frozenset(self(x,right=right) for x in intOrCode012)
if isinstance(intOrCode012,int):
res = []
pp = 1
for pos in range(self.width):
if intOrCode012&pp:
res.append(self.mapping[-pos-1])
pp = pp*2
if right != None:
return (frozenset(res),frozenset([self.mapping[-right-1]]))
else:
return frozenset(res)
if isinstance(intOrCode012,str):
left = []
right = []
for pos in range(self.width):
if intOrCode012[pos] == '2':
left.append(self.mapping[pos])
elif intOrCode012[pos] == '1':
right.append(self.mapping[pos])
if left:
if right:
return (frozenset(left),frozenset(right))
else:
return frozenset(left)
else:
return frozenset(right)
C = Context
def C1(w,h):
return Context('\n'.join(['1'*w]*h))
def C0(w,h):
return Context('\n'.join(['0'*w]*h))
#HH, LL, BB, AA are `\mathbb{H}`, `\mathbb{L}`, `\mathbb{B}`, `\mathbb{A}` from [1]_.
#They are not needed to construct the implication basis.
def LL(n):
"""constructs the LL context"""
if (n<=0):return Context('0')
else:
LL1=LL(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - LL1
r2 = LL1 - LL1 - LL1
return r1 + r2
def HH(n):
"""constructs the HH context"""
if (n<=0):return Context('1')
else:
LL1=LL(n-1)
HH1=HH(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - HH1
r2 = HH1 - HH1 - HH1
return r1 + r2
def BB(n):
"""constructs the BB context"""
if (n<=1):return Context('0\n1')
else:
BB1=BB(n-1)
AA1=AA(n-1)
r1 = C1((n-1)*2**(n-2),2**(n-1)) - AA1 - BB1
r2 = BB1 - C1(2**(n-1),2**(n-1)) - BB1;
return r1 + r2
#.. _[1]:
#
# `Endliche Hüllensysteme und ihre Implikationenbasen <http://www.emis.de/journals/SLC/wpapers/s49koenig.pdf>`_ by Roman König.
|
pyfca/pyfca
|
pyfca/implications.py
|
BB
|
python
|
def BB(n):
if (n<=1):return Context('0\n1')
else:
BB1=BB(n-1)
AA1=AA(n-1)
r1 = C1((n-1)*2**(n-2),2**(n-1)) - AA1 - BB1
r2 = BB1 - C1(2**(n-1),2**(n-1)) - BB1;
return r1 + r2
|
constructs the BB context
|
train
|
https://github.com/pyfca/pyfca/blob/cf8cea9e76076dbf4bb3f38996dcb5491b0eb0b0/pyfca/implications.py#L558-L566
|
[
"def C1(w,h):\n return Context('\\n'.join(['1'*w]*h))\n",
"def AA(n):\n \"\"\"constructs the AA context\"\"\"\n if (n<=1):return Context('10\\n00')\n else:\n AA1=AA(n-1)\n r1 = C1(2**(n-1),2**(n-1)) - AA1\n r2 = AA1 - AA1\n return r1 + r2\n",
"def BB(n):\n \"\"\"constructs the BB context\"\"\"\n if (n<=1):return Context('0\\n1')\n else:\n BB1=BB(n-1)\n AA1=AA(n-1)\n r1 = C1((n-1)*2**(n-2),2**(n-1)) - AA1 - BB1\n r2 = BB1 - C1(2**(n-1),2**(n-1)) - BB1;\n return r1 + r2\n"
] |
#!/usr/bin/env python3
# encoding: utf-8
"""
Implications
------------
This uses the python int as a bit field to store the FCA context.
See this `blog`_ for more.
.. _`blog`: http://rolandpuntaier.blogspot.com/2015/07/implications.html
"""
from math import trunc, log2
from functools import reduce
from itertools import tee
from collections import defaultdict
def istr(i,b,w,c="0123456789abcdefghijklmnopqrstuvwxyz"):
return ((w<=0 and i==0) and " ") or (istr(i//b, b, w-1, c).lstrip() + c[i%b])
digitat = lambda i,a,b: int(istr(i,b,a+1)[-a],b)
digitat2 = lambda i,a: (i>>a)&1
#concatenate...
horizontally = lambda K1,K2,b,w1,w2: [int(s,b) for s in [istr(k1,b,w1)+istr(k2,b,w2) for k1,k2 in zip(K1,K2)]]
horizontally2 = lambda K1,K2,w1,w2: [(k1<<w2)|k2 for k1,k2 in zip(K1,K2)]
vertically2 = vertically = lambda K1,K2: K1+K2
Lwidth = Hwidth = lambda n: 3**n
def L(g,i):
"""recursively constructs L line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Lwidth(i)
Ln = L(g,i-1)
if g1:
return Ln<<(2*n) | Ln<<n | Ln
else:
return int('1'*n,2)<<(2*n) | Ln<<n | Ln
else:
if g1:
return int('000',2)
else:
return int('100',2)
def H(g,i):
"""recursively constructs H line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Hwidth(i)
i=i-1
Hn = H(g,i)
if g1:
return Hn<<(2*n) | Hn<<n | Hn
else:
return int('1'*n,2)<<(2*n) | L(g,i)<<n | Hn
else:
if g1:
return int('111',2)
else:
return int('101',2)
def UV_H(Hg,gw):
"""
Constructs implications and intents based on H
gw = g width
Hg = H(g), g is the binary coding of the attribute set
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
"""
lefts = set()
K = []
UV = []
p = Hwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Hg&pp:
y = istr(p,3,gw)
yy = y.replace('1','0')
if yy not in lefts:
if y.find('1') == -1:#y∈{0,2}^n
K.append(y)
else:
UV.append(y)
lefts.add(yy)
return (UV,K)
Awidth = lambda n: 2**n
def A(g,i):
"""recursively constructs A line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
n = Awidth(i)
An = A(g,i-1)
if g1:
return An<<n | An
else:
return int('1'*n,2)<<n | An
else:
if g1:
return int('00',2)
else:
return int('10',2)
Bwidth = lambda n:n*2**(n-1)
def B(g,i):
"""recursively constructs B line for g; i = len(g)-1"""
g1 = g&(2**i)
if i:
nA = Awidth(i)
nB = Bwidth(i)
i=i-1
Bn = B(g,i)
if g1:
return Bn << (nA+nB) | int('1'*nA,2) << nB | Bn
else:
return int('1'*nB,2) << (nA+nB) | A(g,i) << nB | Bn
else:
if g1:
return 1
else:
return 0
def A012(t,i):
if i<0:
return ""
nA = Awidth(i)
if t < nA:
return "0"+A012(t,i-1)
else:
return "2"+A012(t-nA,i-1)
def B012(t,i):
"""
Constructs ternary implication coding (0=not there, 2=U, 1=V)
t is B column position
i = |M|-1 to 0
"""
if not i:
return "1"
nA = Awidth(i)
nB = Bwidth(i)
nBB = nB + nA
if t < nB:
return "0"+B012(t,i-1)
elif t < nBB:
return "1"+A012(t-nB,i-1)
else:
return "2"+B012(t-nBB,i-1)
def UV_B(Bg,gw):
"""
returns the implications UV based on B
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
UV = []
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
UV.append(uv)
return UV
def omega(imps):
"""
Calculates a measure for the size of the implication basis: \sum |U||V|
"""
if isinstance(imps,v_Us_dict):
return sum([omega(V) for U,V in imps.items()])#|V|=1
if isinstance(imps,list):
return sum([omega(x) for x in imps])
if isinstance(imps,str):
#imps = due[-1]
try:
U,V = imps.split("->")
Us = U.split(",") if "," in U else U.split()
Vs = V.split(",") if "," in V else V.split()
res = len(Us)*len(Vs)
return res
except:
return 0
if isinstance(imps,int):
b=bin(imps)[2:]
res = len([x for x in b if x=='1'])
return res
class v_Us_dict(defaultdict):
"""
In an implication U→u, u is the significant component.
U is coded as int.
u is the bit column of the implication's conclusion.
{u:[U1,U2,...]}
"""
def __init__(self,Bg,gw):
"""
returns the implications {v:Us} based on B
v is the significant component
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
self.width = gw
if isinstance(Bg,int):
defaultdict.__init__(self,list)
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
#let's find minima regarding product order
#{v:[Umin1,Umin2,...]}
v = uv.find('1')#v=significant
u = uv[:v]+'0'+uv[v+1:]
u = int(u.replace('2','1'),2)
Umin_s = self[gw-v-1]#bit position from right
it = [i for i,U in enumerate(Umin_s) if U&u==u]
for i in reversed(it):
del Umin_s[i]
else:
Umin_s.append(u)
elif isinstance(Bg,list):
defaultdict.__init__(self,list)
for k,v in Bg:
assert isinstance(v,list)
self[k] += v
else:
defaultdict.__init__(self,list,Bg)
def __eq__(self, other):
if len(self) != len(other):
return False
for v,U in self.items():
if v not in other:
return False
Uo = other[v]
if not set(Uo)==set(U):
return False
return True
def Code012(self):
for v,Us in self.items():
vleft = self.width - v - 1
for u in Us:
b = bin(u)[2:]
w0 = self.width-len(b)
c01 = '0'*w0+b
c01 = c01.replace('1','2')
c01 = c01[:vleft]+'1'+c01[vleft+1:]
yield c01
def __str__(self):
return defaultdict.__str__(self).replace('defaultdict','v_Us_dict')
def __len__(self):
return sum((len(x) for x in self.values()))
def flatten(self):
for v,Us in self.items():
for u in Us:
yield (v,u)
def __add__(self, other):
res = v_Us_dict([],self.width)
if isinstance(other,tuple):
other = {other[0]:[other[1]]}
keys = set(self)|set(other)
for v in keys:
t = set()
if v in self:
t |= set(self[v])
if v in other:
t |= set(other[v])
if t:
res[v] = list(t)
return res
def __sub__(self, other):
res = v_Us_dict([],self.width)
for v,U in self.items():
r = list(set(U) - set(other[v]))
if r:
res[v] = r
return res
def __mul__(self, other):
"""
This is the o operation in [1]_, that represents the 3rd Armstrong rule.
It returns combinations for i‡j: (i,u1|u2) or (j,u1|u2),
"""
res = v_Us_dict([],self.width)
if id(self)==id(other):
s = iter(self.items())
try:
while True:
v1, us1 = next(s)
vv1 = 2**v1
s, ss = tee(s)#remember s and iterate with copy ss
try:
while True:
v2, us2 = next(ss)
vv2 = 2**v2
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
except StopIteration:
pass
except StopIteration:
pass
else:
for v1,us1 in self.items():
vv1 = 2**v1
for v2,us2 in other.items():
vv2 = 2**v2
if v1 != v2:
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
for v,U in res.items():
res[v] = list(set(U))#remove duplicates
return res
def __invert__(self):
"""
U->v generated from L=∪ min L_i via the 3rd Armstrong rule
Note, that this can become bigger than L.
"""
Y = self
Yn = Y*Y
while True:
YnplusY = Yn+Y
Yg = Yn*YnplusY
#YgenNotInL = Yg - L
#YgenInL = Yg - YgenNotInL
#Yn1 = Yn + YgenInL
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def __pow__(self, other):
"""
'other' is a (v,u) couple
generates U->v involving 'other'
#other = (0,64)
"""
Y = self
Z = v_Us_dict({other[0]:[other[1]]},self.width)
Yn = Y*Z
while True:
YnplusY = Yn+Y
Yg = Z*YnplusY
#this does not work for test_basis1
#YnplusZ = Yn+Z
#Yg = YnplusZ*YnplusY
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def koenig(self):
"""
This needs to be L = contextg.v_Us_B()
"""
L = self
Y = L - (L*L)
while True:
Ybar = Y + ~Y
take = L - Ybar
if not len(take):
return Y
else:
ZZ = list(set(take)-set(Y))#use significant which is not in Y
if len(ZZ) > 0:
v = ZZ[0]
z=(v,take[v][0])
else:
z = next(take.flatten())
Yzgen = Y**z
Y = (Y - Yzgen) + z #Yn+1
#Lost = Ybar - (Y + ~Y)
#assert len(Lost) == 0
def respects(g,imp):
"""
g is an int, where each bit is an attribute
implication UV is ternary coded 1 = ∈V, 2 = ∈U, 0 otherwise
g and UV have the same number of digits
"""
if isinstance(g,str):
g = int(g,2)
if isinstance(imp,int):
imp = istr(imp,3,g.bit_length())
V = int(imp.replace('1','2').replace('2','1'),2)
U = int(imp.replace('1','0').replace('2','1'),2)
ginU = U&g == U
ginV = V&g == V
return not ginU or ginV
class Context(list):
def __init__(self, *args, **kwargs):
"""Context can be initialized with
- a rectangular text block of 0s and 1s
- a list of ints and a "width" keyword argument.
A "mapping" keyword argument as list associates the bits with objects of any kind.
"""
if isinstance(args[0],str):
lines = [s.strip() for s in args[0].splitlines() if s.strip()]
linelens = [len(tt) for tt in lines]
self.width = linelens[0]
samelen = linelens.count(linelens[0])==len(linelens)
assert samelen, "Context needs all lines to be of same number of 0s and 1s"
super().__init__([int(s,2) for s in lines])
else:
super().__init__(*args)
self.width = kwargs['width']
try:
self.mapping = kwargs['mapping']
except:
self.mapping = [i for i in range(self.width)]
def __add__(self, other):
c = Context(list.__add__(self,other),width=self.width)
return c
def __sub__(self, other):
c = Context(horizontally2(self,other,self.width,other.width),width=self.width+other.width)
return c
def column(self, i):
"""from right"""
return ''.join([str(digitat2(r,i)) for r in self])
def row(self, i):
try:
r = istr(self[i],2,self.width)
except IndexError:
r = '0'*self.width
return r
def __getitem__(self,xy):
if isinstance(xy,tuple):
return digitat2(list.__getitem__(self,xy[0]),xy[1])
else:
return list.__getitem__(self,xy)
def transpose(self):
cs='\n'.join([self.column(i) for i in reversed(range(self.width))])
return Context(cs)
def __str__(self):
rs='\n'.join([self.row(i) for i in range(len(self))])
return rs
def size(self):
return self.width, len(self)
def UV_H(self):
"""
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
This is UV_H function, but the returned implications are respected by all attribute sets of this context.
This corresponds to a multiplication or & operation of the Hg sets.
"""
h = reduce(lambda x,y:x&y,(H(g,self.width-1) for g in self))
return UV_H(h, self.width)
def UV_B(self):
"""
returns UV = all respected U->Ux in ternary coding (1=V,2=U)
"""
h = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
return UV_B(h, self.width)
def v_Us_B(self):
"""
returns the implications {v:Us} based on B
This is L=∪ min L_i in [1]_
"""
Bg = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
gw = self.width
return v_Us_dict(Bg, gw)
def respects(self, implications):
if isinstance(implications,v_Us_dict):
implications = implications.Code012()
for g in self:
for i in implications:
if not respects(g,i):
return False
return True
def __call__(self, intOrCode012, right = None):
"""
mapping from bits to attributes using mapping (which defaults to ints)
- right, if available, is the conclusion of the implication; used if intOrCode012 is int
"""
if isinstance(intOrCode012,v_Us_dict):
return frozenset(self(x,right=i) for i,x in intOrCode012.items())
if isinstance(intOrCode012,list):
return frozenset(self(x,right=right) for x in intOrCode012)
if isinstance(intOrCode012,int):
res = []
pp = 1
for pos in range(self.width):
if intOrCode012&pp:
res.append(self.mapping[-pos-1])
pp = pp*2
if right != None:
return (frozenset(res),frozenset([self.mapping[-right-1]]))
else:
return frozenset(res)
if isinstance(intOrCode012,str):
left = []
right = []
for pos in range(self.width):
if intOrCode012[pos] == '2':
left.append(self.mapping[pos])
elif intOrCode012[pos] == '1':
right.append(self.mapping[pos])
if left:
if right:
return (frozenset(left),frozenset(right))
else:
return frozenset(left)
else:
return frozenset(right)
C = Context
def C1(w,h):
return Context('\n'.join(['1'*w]*h))
def C0(w,h):
return Context('\n'.join(['0'*w]*h))
#HH, LL, BB, AA are `\mathbb{H}`, `\mathbb{L}`, `\mathbb{B}`, `\mathbb{A}` from [1]_.
#They are not needed to construct the implication basis.
def LL(n):
"""constructs the LL context"""
if (n<=0):return Context('0')
else:
LL1=LL(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - LL1
r2 = LL1 - LL1 - LL1
return r1 + r2
def HH(n):
"""constructs the HH context"""
if (n<=0):return Context('1')
else:
LL1=LL(n-1)
HH1=HH(n-1)
r1 = C1(3**(n-1),2**(n-1)) - LL1 - HH1
r2 = HH1 - HH1 - HH1
return r1 + r2
def AA(n):
"""constructs the AA context"""
if (n<=1):return Context('10\n00')
else:
AA1=AA(n-1)
r1 = C1(2**(n-1),2**(n-1)) - AA1
r2 = AA1 - AA1
return r1 + r2
#.. _[1]:
#
# `Endliche Hüllensysteme und ihre Implikationenbasen <http://www.emis.de/journals/SLC/wpapers/s49koenig.pdf>`_ by Roman König.
|
pyfca/pyfca
|
pyfca/implications.py
|
v_Us_dict.koenig
|
python
|
def koenig(self):
L = self
Y = L - (L*L)
while True:
Ybar = Y + ~Y
take = L - Ybar
if not len(take):
return Y
else:
ZZ = list(set(take)-set(Y))#use significant which is not in Y
if len(ZZ) > 0:
v = ZZ[0]
z=(v,take[v][0])
else:
z = next(take.flatten())
Yzgen = Y**z
Y = (Y - Yzgen) + z
|
This needs to be L = contextg.v_Us_B()
|
train
|
https://github.com/pyfca/pyfca/blob/cf8cea9e76076dbf4bb3f38996dcb5491b0eb0b0/pyfca/implications.py#L364-L383
| null |
class v_Us_dict(defaultdict):
"""
In an implication U→u, u is the significant component.
U is coded as int.
u is the bit column of the implication's conclusion.
{u:[U1,U2,...]}
"""
def __init__(self,Bg,gw):
"""
returns the implications {v:Us} based on B
v is the significant component
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
self.width = gw
if isinstance(Bg,int):
defaultdict.__init__(self,list)
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
#let's find minima regarding product order
#{v:[Umin1,Umin2,...]}
v = uv.find('1')#v=significant
u = uv[:v]+'0'+uv[v+1:]
u = int(u.replace('2','1'),2)
Umin_s = self[gw-v-1]#bit position from right
it = [i for i,U in enumerate(Umin_s) if U&u==u]
for i in reversed(it):
del Umin_s[i]
else:
Umin_s.append(u)
elif isinstance(Bg,list):
defaultdict.__init__(self,list)
for k,v in Bg:
assert isinstance(v,list)
self[k] += v
else:
defaultdict.__init__(self,list,Bg)
def __eq__(self, other):
if len(self) != len(other):
return False
for v,U in self.items():
if v not in other:
return False
Uo = other[v]
if not set(Uo)==set(U):
return False
return True
def Code012(self):
for v,Us in self.items():
vleft = self.width - v - 1
for u in Us:
b = bin(u)[2:]
w0 = self.width-len(b)
c01 = '0'*w0+b
c01 = c01.replace('1','2')
c01 = c01[:vleft]+'1'+c01[vleft+1:]
yield c01
def __str__(self):
return defaultdict.__str__(self).replace('defaultdict','v_Us_dict')
def __len__(self):
return sum((len(x) for x in self.values()))
def flatten(self):
for v,Us in self.items():
for u in Us:
yield (v,u)
def __add__(self, other):
res = v_Us_dict([],self.width)
if isinstance(other,tuple):
other = {other[0]:[other[1]]}
keys = set(self)|set(other)
for v in keys:
t = set()
if v in self:
t |= set(self[v])
if v in other:
t |= set(other[v])
if t:
res[v] = list(t)
return res
def __sub__(self, other):
res = v_Us_dict([],self.width)
for v,U in self.items():
r = list(set(U) - set(other[v]))
if r:
res[v] = r
return res
def __mul__(self, other):
"""
This is the o operation in [1]_, that represents the 3rd Armstrong rule.
It returns combinations for i‡j: (i,u1|u2) or (j,u1|u2),
"""
res = v_Us_dict([],self.width)
if id(self)==id(other):
s = iter(self.items())
try:
while True:
v1, us1 = next(s)
vv1 = 2**v1
s, ss = tee(s)#remember s and iterate with copy ss
try:
while True:
v2, us2 = next(ss)
vv2 = 2**v2
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
except StopIteration:
pass
except StopIteration:
pass
else:
for v1,us1 in self.items():
vv1 = 2**v1
for v2,us2 in other.items():
vv2 = 2**v2
if v1 != v2:
for u1 in us1:
for u2 in us2:
if vv2&u1 and not vv1&u2:
res[v1].append((u1|u2)&~vv2)
elif vv1&u2 and not vv2&u1:
res[v2].append((u1|u2)&~vv1)
for v,U in res.items():
res[v] = list(set(U))#remove duplicates
return res
def __invert__(self):
"""
U->v generated from L=∪ min L_i via the 3rd Armstrong rule
Note, that this can become bigger than L.
"""
Y = self
Yn = Y*Y
while True:
YnplusY = Yn+Y
Yg = Yn*YnplusY
#YgenNotInL = Yg - L
#YgenInL = Yg - YgenNotInL
#Yn1 = Yn + YgenInL
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
def __pow__(self, other):
"""
'other' is a (v,u) couple
generates U->v involving 'other'
#other = (0,64)
"""
Y = self
Z = v_Us_dict({other[0]:[other[1]]},self.width)
Yn = Y*Z
while True:
YnplusY = Yn+Y
Yg = Z*YnplusY
#this does not work for test_basis1
#YnplusZ = Yn+Z
#Yg = YnplusZ*YnplusY
Yn1 = Yn + Yg
if Yn1 == Yn:
break
Yn = Yn1
return Yn
#Yn+1
|
pyfca/pyfca
|
pyfca/implications.py
|
Context.column
|
python
|
def column(self, i):
return ''.join([str(digitat2(r,i)) for r in self])
|
from right
|
train
|
https://github.com/pyfca/pyfca/blob/cf8cea9e76076dbf4bb3f38996dcb5491b0eb0b0/pyfca/implications.py#L432-L434
| null |
class Context(list):
def __init__(self, *args, **kwargs):
"""Context can be initialized with
- a rectangular text block of 0s and 1s
- a list of ints and a "width" keyword argument.
A "mapping" keyword argument as list associates the bits with objects of any kind.
"""
if isinstance(args[0],str):
lines = [s.strip() for s in args[0].splitlines() if s.strip()]
linelens = [len(tt) for tt in lines]
self.width = linelens[0]
samelen = linelens.count(linelens[0])==len(linelens)
assert samelen, "Context needs all lines to be of same number of 0s and 1s"
super().__init__([int(s,2) for s in lines])
else:
super().__init__(*args)
self.width = kwargs['width']
try:
self.mapping = kwargs['mapping']
except:
self.mapping = [i for i in range(self.width)]
def __add__(self, other):
c = Context(list.__add__(self,other),width=self.width)
return c
def __sub__(self, other):
c = Context(horizontally2(self,other,self.width,other.width),width=self.width+other.width)
return c
def row(self, i):
try:
r = istr(self[i],2,self.width)
except IndexError:
r = '0'*self.width
return r
def __getitem__(self,xy):
if isinstance(xy,tuple):
return digitat2(list.__getitem__(self,xy[0]),xy[1])
else:
return list.__getitem__(self,xy)
def transpose(self):
cs='\n'.join([self.column(i) for i in reversed(range(self.width))])
return Context(cs)
def __str__(self):
rs='\n'.join([self.row(i) for i in range(len(self))])
return rs
def size(self):
return self.width, len(self)
def UV_H(self):
"""
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
This is UV_H function, but the returned implications are respected by all attribute sets of this context.
This corresponds to a multiplication or & operation of the Hg sets.
"""
h = reduce(lambda x,y:x&y,(H(g,self.width-1) for g in self))
return UV_H(h, self.width)
def UV_B(self):
"""
returns UV = all respected U->Ux in ternary coding (1=V,2=U)
"""
h = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
return UV_B(h, self.width)
def v_Us_B(self):
"""
returns the implications {v:Us} based on B
This is L=∪ min L_i in [1]_
"""
Bg = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
gw = self.width
return v_Us_dict(Bg, gw)
def respects(self, implications):
if isinstance(implications,v_Us_dict):
implications = implications.Code012()
for g in self:
for i in implications:
if not respects(g,i):
return False
return True
def __call__(self, intOrCode012, right = None):
"""
mapping from bits to attributes using mapping (which defaults to ints)
- right, if available, is the conclusion of the implication; used if intOrCode012 is int
"""
if isinstance(intOrCode012,v_Us_dict):
return frozenset(self(x,right=i) for i,x in intOrCode012.items())
if isinstance(intOrCode012,list):
return frozenset(self(x,right=right) for x in intOrCode012)
if isinstance(intOrCode012,int):
res = []
pp = 1
for pos in range(self.width):
if intOrCode012&pp:
res.append(self.mapping[-pos-1])
pp = pp*2
if right != None:
return (frozenset(res),frozenset([self.mapping[-right-1]]))
else:
return frozenset(res)
if isinstance(intOrCode012,str):
left = []
right = []
for pos in range(self.width):
if intOrCode012[pos] == '2':
left.append(self.mapping[pos])
elif intOrCode012[pos] == '1':
right.append(self.mapping[pos])
if left:
if right:
return (frozenset(left),frozenset(right))
else:
return frozenset(left)
else:
return frozenset(right)
|
pyfca/pyfca
|
pyfca/implications.py
|
Context.UV_H
|
python
|
def UV_H(self):
h = reduce(lambda x,y:x&y,(H(g,self.width-1) for g in self))
return UV_H(h, self.width)
|
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
This is UV_H function, but the returned implications are respected by all attribute sets of this context.
This corresponds to a multiplication or & operation of the Hg sets.
|
train
|
https://github.com/pyfca/pyfca/blob/cf8cea9e76076dbf4bb3f38996dcb5491b0eb0b0/pyfca/implications.py#L454-L463
|
[
"def UV_H(Hg,gw):\n \"\"\"\n Constructs implications and intents based on H\n gw = g width\n Hg = H(g), g is the binary coding of the attribute set\n UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)\n K = all closed sets\n \"\"\"\n lefts = set()\n K = []\n UV = []\n p = Hwidth(gw)\n pp = 2**p\n while p:\n pp = pp>>1\n p = p-1\n if Hg&pp:\n y = istr(p,3,gw)\n yy = y.replace('1','0')\n if yy not in lefts: \n if y.find('1') == -1:#y∈{0,2}^n\n K.append(y)\n else:\n UV.append(y)\n lefts.add(yy)\n return (UV,K)\n"
] |
class Context(list):
def __init__(self, *args, **kwargs):
"""Context can be initialized with
- a rectangular text block of 0s and 1s
- a list of ints and a "width" keyword argument.
A "mapping" keyword argument as list associates the bits with objects of any kind.
"""
if isinstance(args[0],str):
lines = [s.strip() for s in args[0].splitlines() if s.strip()]
linelens = [len(tt) for tt in lines]
self.width = linelens[0]
samelen = linelens.count(linelens[0])==len(linelens)
assert samelen, "Context needs all lines to be of same number of 0s and 1s"
super().__init__([int(s,2) for s in lines])
else:
super().__init__(*args)
self.width = kwargs['width']
try:
self.mapping = kwargs['mapping']
except:
self.mapping = [i for i in range(self.width)]
def __add__(self, other):
c = Context(list.__add__(self,other),width=self.width)
return c
def __sub__(self, other):
c = Context(horizontally2(self,other,self.width,other.width),width=self.width+other.width)
return c
def column(self, i):
"""from right"""
return ''.join([str(digitat2(r,i)) for r in self])
def row(self, i):
try:
r = istr(self[i],2,self.width)
except IndexError:
r = '0'*self.width
return r
def __getitem__(self,xy):
if isinstance(xy,tuple):
return digitat2(list.__getitem__(self,xy[0]),xy[1])
else:
return list.__getitem__(self,xy)
def transpose(self):
cs='\n'.join([self.column(i) for i in reversed(range(self.width))])
return Context(cs)
def __str__(self):
rs='\n'.join([self.row(i) for i in range(len(self))])
return rs
def size(self):
return self.width, len(self)
def UV_B(self):
"""
returns UV = all respected U->Ux in ternary coding (1=V,2=U)
"""
h = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
return UV_B(h, self.width)
def v_Us_B(self):
"""
returns the implications {v:Us} based on B
This is L=∪ min L_i in [1]_
"""
Bg = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
gw = self.width
return v_Us_dict(Bg, gw)
def respects(self, implications):
if isinstance(implications,v_Us_dict):
implications = implications.Code012()
for g in self:
for i in implications:
if not respects(g,i):
return False
return True
def __call__(self, intOrCode012, right = None):
"""
mapping from bits to attributes using mapping (which defaults to ints)
- right, if available, is the conclusion of the implication; used if intOrCode012 is int
"""
if isinstance(intOrCode012,v_Us_dict):
return frozenset(self(x,right=i) for i,x in intOrCode012.items())
if isinstance(intOrCode012,list):
return frozenset(self(x,right=right) for x in intOrCode012)
if isinstance(intOrCode012,int):
res = []
pp = 1
for pos in range(self.width):
if intOrCode012&pp:
res.append(self.mapping[-pos-1])
pp = pp*2
if right != None:
return (frozenset(res),frozenset([self.mapping[-right-1]]))
else:
return frozenset(res)
if isinstance(intOrCode012,str):
left = []
right = []
for pos in range(self.width):
if intOrCode012[pos] == '2':
left.append(self.mapping[pos])
elif intOrCode012[pos] == '1':
right.append(self.mapping[pos])
if left:
if right:
return (frozenset(left),frozenset(right))
else:
return frozenset(left)
else:
return frozenset(right)
|
pyfca/pyfca
|
pyfca/implications.py
|
Context.UV_B
|
python
|
def UV_B(self):
h = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
return UV_B(h, self.width)
|
returns UV = all respected U->Ux in ternary coding (1=V,2=U)
|
train
|
https://github.com/pyfca/pyfca/blob/cf8cea9e76076dbf4bb3f38996dcb5491b0eb0b0/pyfca/implications.py#L464-L469
|
[
"def UV_B(Bg,gw):\n \"\"\"\n returns the implications UV based on B\n Bg = B(g), g∈2^M\n gw = |M|, M is the set of all attributes\n \"\"\"\n UV = []\n p = Bwidth(gw)\n pp = 2**p\n while p:\n pp = pp>>1\n p = p-1\n if Bg&pp:\n uv = B012(p,gw-1)\n UV.append(uv)\n return UV\n"
] |
class Context(list):
def __init__(self, *args, **kwargs):
"""Context can be initialized with
- a rectangular text block of 0s and 1s
- a list of ints and a "width" keyword argument.
A "mapping" keyword argument as list associates the bits with objects of any kind.
"""
if isinstance(args[0],str):
lines = [s.strip() for s in args[0].splitlines() if s.strip()]
linelens = [len(tt) for tt in lines]
self.width = linelens[0]
samelen = linelens.count(linelens[0])==len(linelens)
assert samelen, "Context needs all lines to be of same number of 0s and 1s"
super().__init__([int(s,2) for s in lines])
else:
super().__init__(*args)
self.width = kwargs['width']
try:
self.mapping = kwargs['mapping']
except:
self.mapping = [i for i in range(self.width)]
def __add__(self, other):
c = Context(list.__add__(self,other),width=self.width)
return c
def __sub__(self, other):
c = Context(horizontally2(self,other,self.width,other.width),width=self.width+other.width)
return c
def column(self, i):
"""from right"""
return ''.join([str(digitat2(r,i)) for r in self])
def row(self, i):
try:
r = istr(self[i],2,self.width)
except IndexError:
r = '0'*self.width
return r
def __getitem__(self,xy):
if isinstance(xy,tuple):
return digitat2(list.__getitem__(self,xy[0]),xy[1])
else:
return list.__getitem__(self,xy)
def transpose(self):
cs='\n'.join([self.column(i) for i in reversed(range(self.width))])
return Context(cs)
def __str__(self):
rs='\n'.join([self.row(i) for i in range(len(self))])
return rs
def size(self):
return self.width, len(self)
def UV_H(self):
"""
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
This is UV_H function, but the returned implications are respected by all attribute sets of this context.
This corresponds to a multiplication or & operation of the Hg sets.
"""
h = reduce(lambda x,y:x&y,(H(g,self.width-1) for g in self))
return UV_H(h, self.width)
def v_Us_B(self):
"""
returns the implications {v:Us} based on B
This is L=∪ min L_i in [1]_
"""
Bg = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
gw = self.width
return v_Us_dict(Bg, gw)
def respects(self, implications):
if isinstance(implications,v_Us_dict):
implications = implications.Code012()
for g in self:
for i in implications:
if not respects(g,i):
return False
return True
def __call__(self, intOrCode012, right = None):
"""
mapping from bits to attributes using mapping (which defaults to ints)
- right, if available, is the conclusion of the implication; used if intOrCode012 is int
"""
if isinstance(intOrCode012,v_Us_dict):
return frozenset(self(x,right=i) for i,x in intOrCode012.items())
if isinstance(intOrCode012,list):
return frozenset(self(x,right=right) for x in intOrCode012)
if isinstance(intOrCode012,int):
res = []
pp = 1
for pos in range(self.width):
if intOrCode012&pp:
res.append(self.mapping[-pos-1])
pp = pp*2
if right != None:
return (frozenset(res),frozenset([self.mapping[-right-1]]))
else:
return frozenset(res)
if isinstance(intOrCode012,str):
left = []
right = []
for pos in range(self.width):
if intOrCode012[pos] == '2':
left.append(self.mapping[pos])
elif intOrCode012[pos] == '1':
right.append(self.mapping[pos])
if left:
if right:
return (frozenset(left),frozenset(right))
else:
return frozenset(left)
else:
return frozenset(right)
|
pyfca/pyfca
|
pyfca/implications.py
|
Context.v_Us_B
|
python
|
def v_Us_B(self):
Bg = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
gw = self.width
return v_Us_dict(Bg, gw)
|
returns the implications {v:Us} based on B
This is L=∪ min L_i in [1]_
|
train
|
https://github.com/pyfca/pyfca/blob/cf8cea9e76076dbf4bb3f38996dcb5491b0eb0b0/pyfca/implications.py#L470-L477
| null |
class Context(list):
def __init__(self, *args, **kwargs):
"""Context can be initialized with
- a rectangular text block of 0s and 1s
- a list of ints and a "width" keyword argument.
A "mapping" keyword argument as list associates the bits with objects of any kind.
"""
if isinstance(args[0],str):
lines = [s.strip() for s in args[0].splitlines() if s.strip()]
linelens = [len(tt) for tt in lines]
self.width = linelens[0]
samelen = linelens.count(linelens[0])==len(linelens)
assert samelen, "Context needs all lines to be of same number of 0s and 1s"
super().__init__([int(s,2) for s in lines])
else:
super().__init__(*args)
self.width = kwargs['width']
try:
self.mapping = kwargs['mapping']
except:
self.mapping = [i for i in range(self.width)]
def __add__(self, other):
c = Context(list.__add__(self,other),width=self.width)
return c
def __sub__(self, other):
c = Context(horizontally2(self,other,self.width,other.width),width=self.width+other.width)
return c
def column(self, i):
"""from right"""
return ''.join([str(digitat2(r,i)) for r in self])
def row(self, i):
try:
r = istr(self[i],2,self.width)
except IndexError:
r = '0'*self.width
return r
def __getitem__(self,xy):
if isinstance(xy,tuple):
return digitat2(list.__getitem__(self,xy[0]),xy[1])
else:
return list.__getitem__(self,xy)
def transpose(self):
cs='\n'.join([self.column(i) for i in reversed(range(self.width))])
return Context(cs)
def __str__(self):
rs='\n'.join([self.row(i) for i in range(len(self))])
return rs
def size(self):
return self.width, len(self)
def UV_H(self):
"""
UV = all non-trivial (!V⊂U) implications U->V with UuV closed; in ternary coding (1=V,2=U)
K = all closed sets
This is UV_H function, but the returned implications are respected by all attribute sets of this context.
This corresponds to a multiplication or & operation of the Hg sets.
"""
h = reduce(lambda x,y:x&y,(H(g,self.width-1) for g in self))
return UV_H(h, self.width)
def UV_B(self):
"""
returns UV = all respected U->Ux in ternary coding (1=V,2=U)
"""
h = reduce(lambda x,y:x&y,(B(g,self.width-1) for g in self))
return UV_B(h, self.width)
def respects(self, implications):
if isinstance(implications,v_Us_dict):
implications = implications.Code012()
for g in self:
for i in implications:
if not respects(g,i):
return False
return True
def __call__(self, intOrCode012, right = None):
"""
mapping from bits to attributes using mapping (which defaults to ints)
- right, if available, is the conclusion of the implication; used if intOrCode012 is int
"""
if isinstance(intOrCode012,v_Us_dict):
return frozenset(self(x,right=i) for i,x in intOrCode012.items())
if isinstance(intOrCode012,list):
return frozenset(self(x,right=right) for x in intOrCode012)
if isinstance(intOrCode012,int):
res = []
pp = 1
for pos in range(self.width):
if intOrCode012&pp:
res.append(self.mapping[-pos-1])
pp = pp*2
if right != None:
return (frozenset(res),frozenset([self.mapping[-right-1]]))
else:
return frozenset(res)
if isinstance(intOrCode012,str):
left = []
right = []
for pos in range(self.width):
if intOrCode012[pos] == '2':
left.append(self.mapping[pos])
elif intOrCode012[pos] == '1':
right.append(self.mapping[pos])
if left:
if right:
return (frozenset(left),frozenset(right))
else:
return frozenset(left)
else:
return frozenset(right)
|
Metatab/geoid
|
geoid/core.py
|
parse_to_gvid
|
python
|
def parse_to_gvid(v):
from geoid.civick import GVid
from geoid.acs import AcsGeoid
m1 = ''
try:
return GVid.parse(v)
except ValueError as e:
m1 = str(e)
try:
return AcsGeoid.parse(v).convert(GVid)
except ValueError as e:
raise ValueError("Failed to parse to either ACS or GVid: {}; {}".format(m1, str(e)))
|
Parse an ACS Geoid or a GVID to a GVID
|
train
|
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L342-L357
|
[
"def parse(cls, gvid, exception=True):\n \"\"\"\n Parse a string value into the geoid of this class.\n\n :param gvid: String value to parse.\n :param exception: If true ( default) raise an eception on parse erorrs. If False, return a\n 'null' geoid.\n :return:\n \"\"\"\n\n if gvid == 'invalid':\n return cls.get_class('null')(0)\n\n if not bool(gvid):\n return None\n\n if not isinstance(gvid, six.string_types):\n raise TypeError(\"Can't parse; not a string. Got a '{}' \".format(type(gvid)))\n\n try:\n if not cls.sl:\n # Civick and ACS include the SL, so can call from base type.\n if six.PY3:\n fn = cls.decode\n else:\n fn = cls.decode.__func__\n\n sl = fn(gvid[0:cls.sl_width])\n else:\n sl = cls.sl # Otherwise must use derived class.\n\n except ValueError as e:\n if exception:\n raise ValueError(\"Failed to parse gvid '{}': {}\".format(gvid, str(e)))\n else:\n return cls.get_class('null')(0)\n\n try:\n cls = cls.sl_map[sl]\n except KeyError:\n if exception:\n raise ValueError(\"Failed to parse gvid '{}': Unknown summary level '{}' \".format(gvid, sl))\n else:\n return cls.get_class('null')(0)\n\n m = cls.regex.match(gvid)\n\n if not m:\n raise ValueError(\"Failed to match '{}' to '{}' \".format(gvid, cls.regex_str))\n\n d = m.groupdict()\n\n if not d:\n return None\n\n if six.PY3:\n fn = cls.decode\n else:\n fn = cls.decode.__func__\n\n d = {k: fn(v) for k, v in d.items()}\n\n try:\n del d['sl']\n except KeyError:\n pass\n\n return cls(**d)\n"
] |
""" CLasses for working with census Geoids
"""
import inspect
import re
import sys
import six
names = { # (summary level value, base 10 chars, Base 62 chars, prefix fields)
'null': 1,
'us': 10,
'region': 20,
'division': 30,
'state': 40,
'county': 50,
'cosub': 60,
'place': 160,
'ua': 400,
'tract': 140,
'blockgroup': 150,
'block': 101,
'sdelm': 950,
'sdsec': 960,
'sduni': 970,
'zcta': 860,
'zip': 1200,
'sldl': 620,
'sldu': 610,
'cdcurr': 500,
# Other Levels that don't have proper names yet. For these .allval() and
# simplify() don't work properly.
'state_aianhh': 260,
'necta_nectadiv_state_county_cousub': 358,
'state_aianhh_place': 269,
'aianhh_state_county': 270,
'state_cbsa_metdiv': 323,
'state_aianhh280': 280,
'state_place_county': 155,
'aianhh_aitsce_state': 290,
'state_aianhh_aihhtli': 283,
'state_cdcurr_aianhh': 550,
'state_concit': 170,
'state_concit_place': 172,
'state_aianhh_aihhtli286': 286,
'cbsa': 310,
'cbsa_state': 311,
'cbsa_state_place': 312,
'cbsa_state_county': 313,
'cbsa_metdiv': 314,
'cbsa_metdiv_state': 315,
'state_cbsa': 320,
'state_cbsa_place': 321,
'state_cbsa_county': 322,
'state_county_cousub_submcd': 67,
'state_cbsa_metdiv_county': 324,
'state_county_cousub_place': 70,
'necta_state_county': 353,
'state_puma5': 795,
'csa': 330,
'csa_state': 331,
'csa_cbsa': 332,
'csa_cbsa_state': 333,
'cnecta': 335,
'state_county_cousub_place_tract': 80,
'cnecta_necta': 337,
'cnecta_necta_state': 338,
'state_csa': 340,
'state_csa_cbsa': 341,
'state_cnecta': 345,
'state_cnecta_necta': 346,
'necta': 350,
'necta_state': 351,
'necta_state_place': 352,
'cnecta_state': 336,
'necta_state_county_cousub': 354,
'necta_nectadiv': 355,
'necta_nectadiv_state': 356,
'state_anrc': 230,
'necta_nectadiv_state_county': 357,
'state_necta': 360,
'cbsa_metdiv_state_county': 316,
'state_necta_county': 362,
'state_necta_county_cousub': 363,
'state_necta_nectadiv': 364,
'state_necta_nectadiv_county': 365,
'state_necta_nectadiv_county_cousub': 366,
'ua_state': 410,
'ua_state_county': 430,
'state_sldu_county': 612,
'state_sldu': 610,
'state_sldl_county': 622,
'state_sldl': 620,
'state_cdcurr_county': 510,
'state_necta_place': 361,
'aianhh': 250,
'aianhh_aitsce': 251,
'aianhh_aihhtli': 252,
'state_sldl_county': 622,
'aianhh_aihhtli254': 254
}
lengths = {
'null': 1,
'aianhh': 4, # American Indian Area/Alaska Native Area/ Hawaiian Home Land (Census)
'aihhtli': '1', # American Indian Trust Land/ Hawaiian Home Land Indicator. A str b/c Census val is a str
'aitsce': 3, # American Indian Tribal Subdivision (Census)
'anrc': 5, # Alaska Native Regional Corporation (FIPS)
'blkgrp': 1, # Block Group
'blockgroup': 1, # Block Group
'block': 4, # Block
'cbsa': 5, # Metropolitan and Micropolitan Statistical Area
'cdcurr': 2, # Current Congressional District ***
'cnecta': 3, # New England City and Town Combined Statistical Area
'concit': 5, # Consolidated City
'county': 3, # County of current residence
'cousub': 5, # County Subdivision (FIPS)
'cosub': 5, # County Subdivision (FIPS)
'csa': 3, # Combined Statistical Area
'division': 1, # Census Division
'metdiv': 5, # Metropolitan Statistical Area- Metropolitan Division
'necta': 5, # New England City and Town Area
'nectadiv': 5, # New England City and Town Area Division
'place': 5, # Place (FIPS Code)
'puma5': 5, # Public Use Microdata Area 5% File
'region': 1, # Census Region
'sdelm': 5, # State-School District (Elementary)
'sdsec': 5, # State-School District (Secondary)
'sduni': 5, # State-School District (Unified)
'sldl': '3', # State Legislative District Lower. A String to signal that the census value is a string
'sldu': '3', # State Legislative District Upper. A String to signal that the census value is a string
'state': 2, # State (FIPS Code)
'submcd': 5, # Subminor Civil Division (FIPS)
'tract': 6, # Census Tract
'ua': 5, # Urban Area
'ur': 1, # Urban/Rural
'us': 0,
'zcta': 5,
# Nonstandard
'zip': 5,
}
segments = {
1: ['null'], # United States
10: ['us'], # United States
20: ['region'], # Region
30: ['division'], # Division
40: ['state'], # State
50: ['state', 'county'], # County
60: ['state', 'county', 'cousub'], # County Subdivision
67: ['state', 'county', 'cousub', 'submcd'], # State (Puerto Rico Only)-County-County Subdivision-Subbarrio
70: ['state', 'county', 'cousub', 'place'], # County Subdivision-Place/Remainder
80: ['state', 'county', 'cousub', 'place', 'tract'], # County Subdivision-Place/Remainder-Census Tract
101: ['state', 'county', 'tract', 'block'],
140: ['state', 'county', 'tract'], # Census Tract
150: ['state', 'county', 'tract', 'blockgroup'], # Census Tract-Block Group
155: ['state', 'place', 'county'], # Place-County
160: ['state', 'place'], # Place
170: ['state', 'concit'], # Consolidated City
172: ['state', 'concit', 'place'], # Consolidated City-Place Within Consolidated City
230: ['state', 'anrc'], # State-Alaska Native Regional Corporation
250: ['aianhh'], # American Indian Area/Alaska Native Area/Hawaiian Home Land
251: ['aianhh', 'aitsce'], # American Indian Area/Alaska NativeArea/HawaiianHomeLand-Tribal Subdivision/Remainder
252: ['aianhh', 'aihhtli'], # American Indian Area/Alaska Native Area (Reservation or Statistical Entity Only)4
254: ['aianhh', 'aihhtli'], # American Indian Area (Off-Reservation Trust Land Only)/Hawaiian Home Land
260: ['state', 'aianhh'], # American Indian Area/Alaska Native Area/Hawaiian Home Land-State
269: ['state', 'aianhh', 'place'], # American Indian Area/Alaska Native Area/Hawaiian Home Land-Place-Remainder
270: ['aianhh', 'state', 'county'], # American Indian Area/Alaska Native Area/Hawaiian Home Land-State-County
280: ['state', 'aianhh'], # State-American Indian Area/Alaska Native Area/Hawaiian Home Land
283: ['state', 'aianhh', 'aihhtli'],
# State-American Indian Area/Alaska Native Area (Reservation or Statistical Entity Only)
286: ['state', 'aianhh', 'aihhtli'],
# State-American Indian Area (Off-Reservation Trust Land Only)/Hawaiian Home Land
290: ['aianhh', 'aitsce', 'state'],
# American Indian Area/Alaska Native Area/Hawaiian Home Land-Tribal Subdivision/Remainder-State
310: ['cbsa'], # CBSA
311: ['cbsa', 'state'], # CBSA-State-County
312: ['cbsa', 'state', 'place'], # CBSA-State-Principal City
313: ['cbsa', 'state', 'county'], # CBSA-State-County
314: ['cbsa', 'metdiv'], # Metropolitan Statistical Area/Metropolitan Division
315: ['cbsa', 'metdiv', 'state'], # Metropolitan Statistical Area/Metropolitan Division-State
316: ['cbsa', 'metdiv', 'state', 'county'], # Metropolitan Statistical Area/Metropolitan Division-State-County
320: ['state', 'cbsa'], # State- CBSA
321: ['state', 'cbsa', 'place'], # State- CBSA -Principal City
322: ['state', 'cbsa', 'county'], # State- CBSA -County
323: ['state', 'cbsa', 'metdiv'], # State- Metropolitan Statistical Area/Metropolitan Division
324: ['state', 'cbsa', 'metdiv', 'county'], # State- Metropolitan Statistical Area/Metropolitan Division-County
330: ['csa'], # Combined Statistical Area
331: ['csa', 'state'], # Combined Statistical Area-State
332: ['csa', 'cbsa'], # Combined Statistical Area-CBSA
333: ['csa', 'cbsa', 'state'], # Combined Statistical Area-CBSA-State
335: ['cnecta'], # Combined New England City and Town Area
336: ['cnecta', 'state'], # Combined New England City and Town Area -State
337: ['cnecta', 'necta'], # Combined New England City and Town Area -New England City and Town Area
338: ['cnecta', 'necta', 'state'], # Combined New England City and Town Area -New England City and Town Area-State
340: ['state', 'csa'], # State-Combined Statistical Area
341: ['state', 'csa', 'cbsa'], # State-Combined Statistical Area-CBSA
345: ['state', 'cnecta'], # State-Combined New England City and Town Area
346: ['state', 'cnecta', 'necta'], # State-Combined New England City and Town Area-New England City and Town Area
350: ['necta'], # New England City and Town Area
351: ['necta', 'state'], # New England City and Town Area-State
352: ['necta', 'state', 'place'], # New England City and Town Area-State-Principal City
353: ['necta', 'state', 'county'], # New England City and Town Area-State-County
354: ['necta', 'state', 'county', 'cousub'], # New England City and Town Area-State-County-County Subdivision
355: ['necta', 'nectadiv'], # New England City and Town Area (NECTA)-NECTA Division
356: ['necta', 'nectadiv', 'state'], # New England City and Town Area (NECTA)-NECTA Division-State
357: ['necta', 'nectadiv', 'state', 'county'], # New England City and Town Area (NECTA)-NECTA Division-State-County
358: ['necta', 'nectadiv', 'state', 'county', 'cousub'],
# New England City and Town Area (NECTA)-NECTA Division-State-County-County Subdivision
360: ['state', 'necta'], # State-New England City and Town Area
361: ['state', 'necta', 'place'], # State-New England City and Town Area-Principal City
362: ['state', 'necta', 'county'], # State-New England City and Town Area-County
363: ['state', 'necta', 'county', 'cousub'], # State-New England City and Town Area-County-County Subdivision
364: ['state', 'necta', 'nectadiv'], # State-New England City and Town Area (NECTA)-NECTA Division
365: ['state', 'necta', 'nectadiv', 'county'], # State-New England City and Town Area (NECTA)-NECTA Division-County
366: ['state', 'necta', 'nectadiv', 'county', 'cousub'],
# State-New England City and Town Area (NECTA)-NECTA Division-County-County Subdivision
400: ['ua'], # Urban Area,
410: ['ua', 'state'], # Urban Area, State,
430: ['ua','state','county'], # Urban Area, State, County,
500: ['state', 'cdcurr'], # Congressional District
510: ['state', 'cdcurr', 'county'], #
550: ['state', 'cdcurr', 'aianhh'],
# Congressional District-American IndianArea/Alaska NativeArea/Hawaiian Home Land
610: ['state', 'sldu'], # State Senate District
612: ['state', 'sldu', 'county'], # State Senate District-County
620: ['state', 'sldl'], # State House District
622: ['state', 'sldl', 'county'], # State House District-County
795: ['state', 'puma5'], # State-Public Use MicroSample Area 5%
860: ['zcta'],
950: ['state', 'sdelm'], # State-Elementary School District
960: ['state', 'sdsec'], # State-High School District
970: ['state', 'sduni'], # State-Unified School District
# Nonstandard
1200: ['zip']
}
descriptions = {
1: 'United States',
10: 'United States',
20: 'Region',
30: 'Division',
40: 'State',
50: 'County',
60: 'County Subdivision',
67: 'State (Puerto Rico Only)-County-County Subdivision-Subbarrio',
70: 'County Subdivision-Place/Remainder',
80: 'County Subdivision-Place/Remainder-Census Tract',
101: 'block',
140: 'Census Tract',
150: 'Census Tract-Block Group',
155: 'Place-County',
160: 'Place',
170: 'Consolidated City',
172: 'Consolidated City-Place Within Consolidated City',
230: 'State-Alaska Native Regional Corporation',
250: 'American Indian Area/Alaska Native Area/Hawaiian Home Land',
251: 'American Indian Area/Alaska NativeArea/HawaiianHomeLand-Tribal Subdivision/Remainder',
252: 'American Indian Area/Alaska Native Area (Reservation or Statistical Entity Only)',
254: 'American Indian Area (Off-Reservation Trust Land Only)/Hawaiian Home Land',
260: 'American Indian Area/Alaska Native Area/Hawaiian Home Land-State',
269: 'American Indian Area/Alaska Native Area/Hawaiian Home Land-Place-Remainder',
270: 'American Indian Area/Alaska Native Area/Hawaiian Home Land-State-County',
280: 'State-American Indian Area/Alaska Native Area/Hawaiian Home Land',
283: 'aihhtli',
286: 'aihhtli',
290: 'state',
310: 'CBSA',
311: 'CBSA-State-County',
312: 'CBSA-State-Principal City',
313: 'CBSA-State-County',
314: 'Metropolitan Statistical Area/Metropolitan Division',
315: 'Metropolitan Statistical Area/Metropolitan Division-State',
316: 'Metropolitan Statistical Area/Metropolitan Division-State-County',
320: 'State- CBSA',
321: 'State- CBSA -Principal City',
322: 'State- CBSA -County',
323: 'State- Metropolitan Statistical Area/Metropolitan Division',
324: 'State- Metropolitan Statistical Area/Metropolitan Division-County',
330: 'Combined Statistical Area',
331: 'Combined Statistical Area-State',
332: 'Combined Statistical Area-CBSA',
333: 'Combined Statistical Area-CBSA-State',
335: 'Combined New England City and Town Area',
336: 'Combined New England City and Town Area -State',
337: 'Combined New England City and Town Area -New England City and Town Area',
338: 'Combined New England City and Town Area -New England City and Town Area-State',
340: 'State-Combined Statistical Area',
341: 'State-Combined Statistical Area-CBSA',
345: 'State-Combined New England City and Town Area',
346: 'State-Combined New England City and Town Area-New England City and Town Area',
350: 'New England City and Town Area',
351: 'New England City and Town Area-State',
352: 'New England City and Town Area-State-Principal City',
353: 'New England City and Town Area-State-County',
354: 'New England City and Town Area-State-County-County Subdivision',
355: 'New England City and Town Area (NECTA)-NECTA Division',
356: 'New England City and Town Area (NECTA)-NECTA Division-State',
357: 'New England City and Town Area (NECTA)-NECTA Division-State-County',
358: 'New England City and Town Area (NECTA)-NECTA Division-State-County-County Subdivision',
360: 'State-New England City and Town Area',
361: 'State-New England City and Town Area-Principal City',
362: 'State-New England City and Town Area-County',
363: 'State-New England City and Town Area-County-County Subdivision',
364: 'State-New England City and Town Area (NECTA)-NECTA Division',
365: 'State-New England City and Town Area (NECTA)-NECTA Division-County-County Subdivision',
400: 'Urban Area,',
410: 'Urban Area, State,',
430: 'Urban Area, State, County,',
500: 'Congressional District',
510: 'Congressional District, County',
550: 'Congressional District-American IndianArea/Alaska NativeArea/Hawaiian Home Land',
610: 'State Senate District',
612: 'State Senate District-County',
620: 'State House District',
622: 'State House District-County',
795: 'State-Public Use MicroSample Area 5%',
860: 'ZIP Code Tabulation Area',
950: 'State-Elementary School District',
960: 'State-High School District',
970: 'State-Unified School District',
}
plurals = {
'county': 'counties',
'place': 'places',
'Sdlu': 'State '
}
class NotASummaryName(Exception):
"""An argument was not one of the valid summary names"""
class ParseError(Exception):
"""Error parsing a geoid"""
def base62_encode(num):
"""Encode a number in Base X. WIth the built-in alphabet, its base 62
`num`: The number to encode
`alphabet`: The alphabet to use for encoding
Stolen from: http://stackoverflow.com/a/1119769/1144479
"""
num = int(num)
alphabet = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
if (num == 0):
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr)
def base62_decode(string):
"""Decode a Base X encoded string into the number
Arguments:
- `string`: The encoded string
- `alphabet`: The alphabet to use for encoding
Stolen from: http://stackoverflow.com/a/1119769/1144479
"""
alphabet = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
base = len(alphabet)
strlen = len(string)
num = 0
idx = 0
for char in string:
power = (strlen - (idx + 1))
num += alphabet.index(char) * (base ** power)
idx += 1
return int(num)
def augment(module_name, base_class):
"""Call the augment() method for all of the derived classes in the module """
for name, cls in inspect.getmembers(sys.modules[module_name],
lambda x : inspect.isclass(x) and issubclass(x, base_class) ):
if cls == base_class:
continue
cls.augment()
def get_class(module, sl):
for name, named_sl in names.items():
if named_sl == sl or sl == name:
return getattr(module, name.capitalize())
raise NotASummaryName("No class for summary_level {}".format(sl))
def make_classes(base_class, module):
"""Create derived classes and put them into the same module as the base class.
This function is called at the end of each of the derived class modules, acs, census, civik and tiger.
It will create a set of new derived class in the module, one for each of the enries in the `summary_levels`
dict.
"""
from functools import partial
for k in names:
cls = base_class.class_factory(k.capitalize())
cls.augment()
setattr(module, k.capitalize(), cls)
setattr(module, 'get_class', partial(get_class, module))
class CountyName(object):
"""A Census county name, with methods to create shorter versions and return the types of division,
which may be county, parish, borough, etc. """
# Strip the county and state name. THis doesn't work for some locations
# where the county is actually called a parish or a bario.
state_name_pattern = r', (.*)$'
state_name_re = re.compile(state_name_pattern)
def __init__(self, name):
self.name = name
def intuit_name(self, name):
"""Return a numeric value in the range [-1,1), indicating the likelyhood that the name is for a valuable of
of this type. -1 indicates a strong non-match, 1 indicates a strong match, and 0 indicates uncertainty. """
raise NotImplementedError
@property
def state(self):
try:
county, state = self.name.split(',')
return state
except ValueError:
# The search will fail for 'District of Columbia'
return ''
@property
def medium_name(self):
"""The census name without the state"""
return self.state_name_re.sub('', self.name)
type_names = (
'County', 'Municipio', 'Parish', 'Census Area', 'Borough',
'Municipality', 'city', 'City and Borough')
type_name_pattern = '|'.join('({})'.format(e) for e in type_names)
type_names_re = re.compile(type_name_pattern)
@property
def division_name(self):
"""The type designation for the county or county equivalent, such as 'County','Parish' or 'Borough'"""
try:
return next(e for e in self.type_names_re.search(self.name).groups() if e is not None)
except AttributeError:
# The search will fail for 'District of Columbia'
return ''
county_name_pattern = r'(.+) {}, (.+)'.format(type_name_pattern)
county_name_re = re.compile(county_name_pattern)
@property
def short_name(self):
try:
county, state = self.name.split(',')
except ValueError:
return self.name # 'District of Colombia'
return self.type_names_re.sub('', county)
def __str__(self):
return self.name
class Geoid(object):
@classmethod
def resolve_summary_level(cls, sl):
try:
return cls.sl_map[sl]
except KeyError:
return None
@classmethod
def make_format_string(cls, level):
sl_num = names[level]
segs = segments[sl_num]
formats = []
formats.append(cls.sl_format)
for seg in segs:
# Lengths dict may have strings to indicate string format usage.
if int(lengths[seg]) <= 0:
continue
if isinstance(lengths[seg], int):
fmt = cls.elem_format
else:
fmt = cls.elem_str_format
formats.append(fmt.format(seg, cls.part_width(lengths[seg])))
return ''.join(formats)
@classmethod
def make_regex(cls, level):
sl_num = names[level]
segs = segments[sl_num]
# Lengths dict may have strings to indicate string format usage.
regexes = [cls.sl_regex] + [cls.elem_regex.format(seg, cls.part_width(lengths[seg]))
for seg in segs if int(lengths[seg]) > 0]
re_str = '^' + ''.join(regexes) + '$'
return re_str
@classmethod
def augment(cls):
"""Augment the class with computed formats, regexes, and other things. This caches these values so
they don't have to be created for every instance. """
import re
level_name = cls.__name__.lower()
cls.sl = names[level_name]
cls.class_map[cls.__name__.lower()] = cls
cls.sl_map[cls.sl] = cls
cls.fmt = cls.make_format_string(cls.__name__.lower())
cls.regex_str = cls.make_regex(cls.__name__.lower())
cls.regex = re.compile(cls.regex_str)
# List of field names
cls.level = level_name
cls.fields = segments[cls.sl]
@classmethod
def get_class(cls, name_or_sl):
"""Return a derived class based on the class name or the summary_level"""
try:
return cls.sl_map[int(name_or_sl)]
except TypeError as e:
raise TypeError("Bad name or sl: {} : {}".format(name_or_sl, e))
except ValueError:
try:
return cls.class_map[name_or_sl.lower()]
except (KeyError, ValueError):
raise NotASummaryName("Value '{}' is not a valid summary level".format(name_or_sl))
def __init__(self, *args, **kwargs):
# This is a bit unusual, because it means, that , unlike normal
# python args, a kwarg can overwrite a position arg.
d = dict(zip(self.fields, args + ((0,) * 10))) # Add enough zeros to set all fields to zero
d.update(kwargs)
for k, v in d.items():
if k in self.fields:
try:
setattr(self, k, v)
except TypeError as e:
raise TypeError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
except ValueError as e:
raise ValueError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
def __str__(self):
d = self.__dict__
d['sl'] = self.sl
# Hacks for special string cases
if 'sldu' in d:
d['sldu'] = str(d['sldu']).zfill(3)
if 'sldl' in d:
d['sldl'] = str(d['sldl']).zfill(3)
try:
fn = six.get_method_function(self.encode)
kwargs = {k: fn(v) for k, v in d.items()}
return self.fmt.format(**kwargs)
except (ValueError, KeyError) as e:
raise ValueError("Bad value in {}, data {} for format {}: {}".format(type(self), d, self.fmt, e))
@property
def state_name(self):
from geoid.censusnames import geo_names
return geo_names[(self.state, 0)]
@property
def stusab(self):
from geoid.censusnames import stusab
try:
return stusab[int(self.state)]
except (AttributeError, ValueError):
# Assume this is a Us object, or some other national object
return 'US'
@property
def county_name(self):
from geoid.censusnames import geo_names
try:
try:
return CountyName(geo_names[(self.state, self.county)])
except KeyError:
try:
return CountyName("County #{}, {}".format(self.county,geo_names[(self.state, 0)]))
except KeyError:
return CountyName("County #{}, State#{}".format(self.county, self.state))
except Exception:
return CountyName('')
@property
def geo_name(self):
"""
Return a name of the state or county, or, for other lowever levels, the
name of the level type in the county.
:return:
"""
if self.level == 'county':
return str(self.county_name)
elif self.level == 'state':
return self.state_name
else:
if hasattr(self, 'county'):
return "{} in {}".format(self.level,str(self.county_name))
elif hasattr(self, 'state'):
return "{} in {}".format(self.level, self.state_name)
else:
return "a {}".format(self.level)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return str(self) != str(other)
def __lt__(self, other):
return str(self) < str(other)
def __le__(self, other):
return str(self) <= str(other)
def __gt__(self, other):
return str(self) > str(other)
def __ge__(self, other):
return str(self) >= str(other)
@classmethod
def parse(cls, gvid, exception=True):
"""
Parse a string value into the geoid of this class.
:param gvid: String value to parse.
:param exception: If true ( default) raise an eception on parse erorrs. If False, return a
'null' geoid.
:return:
"""
if gvid == 'invalid':
return cls.get_class('null')(0)
if not bool(gvid):
return None
if not isinstance(gvid, six.string_types):
raise TypeError("Can't parse; not a string. Got a '{}' ".format(type(gvid)))
try:
if not cls.sl:
# Civick and ACS include the SL, so can call from base type.
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
sl = fn(gvid[0:cls.sl_width])
else:
sl = cls.sl # Otherwise must use derived class.
except ValueError as e:
if exception:
raise ValueError("Failed to parse gvid '{}': {}".format(gvid, str(e)))
else:
return cls.get_class('null')(0)
try:
cls = cls.sl_map[sl]
except KeyError:
if exception:
raise ValueError("Failed to parse gvid '{}': Unknown summary level '{}' ".format(gvid, sl))
else:
return cls.get_class('null')(0)
m = cls.regex.match(gvid)
if not m:
raise ValueError("Failed to match '{}' to '{}' ".format(gvid, cls.regex_str))
d = m.groupdict()
if not d:
return None
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
d = {k: fn(v) for k, v in d.items()}
try:
del d['sl']
except KeyError:
pass
return cls(**d)
def convert(self, root_cls):
"""Convert to another derived class. cls is the base class for the derived type,
ie AcsGeoid, TigerGeoid, etc. """
d = self.__dict__
d['sl'] = self.sl
try:
cls = root_cls.get_class(root_cls.sl)
except (AttributeError, TypeError):
# Hopefully because root_cls is a module
cls = root_cls.get_class(self.sl)
return cls(**d)
def as_census(self):
from geoid.census import CensusGeoid
return self.convert(CensusGeoid)
def as_acs(self):
from geoid.acs import AcsGeoid
return self.convert(AcsGeoid)
def as_tiger(self):
from geoid.tiger import TigerGeoid
return self.convert(TigerGeoid)
def promote(self, level=None):
"""Convert to the next higher level summary level"""
if level is None:
if len(self.fields) < 2:
if self.level in ('region', 'division', 'state', 'ua'):
cls = self.get_class('us')
else:
return None
else:
cls = self.get_class(self.fields[-2])
else:
cls = self.get_class(level)
d = dict(self.__dict__.items())
d['sl'] = self.sl
return cls(**d)
def summarize(self):
"""Convert all of the values to their max values. This form is used to represent the summary level"""
raise NotImplementedError
def allval(self):
"""Convert the last value to zero. This form represents the entire higher summary level at the granularity
of the lower summary level. For example, for a county, it means 'All counties in the state' """
d = dict(self.__dict__.items())
d['sl'] = self.sl
d[self.level] = 0
cls = self.get_class(self.sl)
return cls(**d)
@classmethod
def nullval(cls):
"""Create a new instance where all of the values are 0"""
d = dict(cls.__dict__.items())
for k in d:
d[k] = 0
d['sl'] = cls.sl
d[cls.level] = 0
return cls(**d)
@property
def tuples(self):
"""Return tuples of field, value, in the order of the levels as they are defined """
return [(field, getattr(self, field, None)) for field in self.fields]
@property
def is_summary(self):
"""Return True if this geoid is an summary -- all of the fields are 0"""
return str(self) == str(self.summarize())
@property
def is_allval(self):
"""Return True if this geoid is an allval -- the last field is zero, but the first is not"""
tups = self.tuples
return tups[-1][1] == 0 and tups[0][1] != 0
@property
def level_plural(self):
"""Return the name of the level as a plural"""
return plurals.get(self.level, self.level + "s")
def generate_all(sumlevel, d):
"""Generate a dict that includes all of the available geoid values, with keys
for the most common names for those values. """
from geoid.civick import GVid
from geoid.tiger import TigerGeoid
from geoid.acs import AcsGeoid
sumlevel = int(sumlevel)
d = dict(d.items())
# Map common name variants
if 'cousub' in d:
d['cosub'] = d['cousub']
del d['cousub']
if 'blkgrp' in d:
d['blockgroup'] = d['blkgrp']
del d['blkgrp']
if 'zcta5' in d:
d['zcta'] = d['zcta5']
del d['zcta5']
gvid_class = GVid.resolve_summary_level(sumlevel)
if not gvid_class:
return {}
geoidt_class = TigerGeoid.resolve_summary_level(sumlevel)
geoid_class = AcsGeoid.resolve_summary_level(sumlevel)
try:
return dict(
gvid=str(gvid_class(**d)),
geoid=str(geoid_class(**d)),
geoidt=str(geoidt_class(**d))
)
except:
raise
def _generate_names():
""" Code to generate the state and county names
>>> python -c 'import geoid; geoid._generate_names()'
"""
from ambry import get_library
l = get_library()
counties = l.partition('census.gov-acs-geofile-2009-geofile50-20095-50')
states = l.partition('census.gov-acs-geofile-2009-geofile40-20095-40')
names = {}
for row in counties.remote_datafile.reader:
names[(row.state, row.county)] = row.name
for row in states.remote_datafile.reader:
if row.component == '00':
names[(row.state, 0)] = row.name
pprint.pprint(names)
|
Metatab/geoid
|
geoid/core.py
|
base62_decode
|
python
|
def base62_decode(string):
alphabet = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
base = len(alphabet)
strlen = len(string)
num = 0
idx = 0
for char in string:
power = (strlen - (idx + 1))
num += alphabet.index(char) * (base ** power)
idx += 1
return int(num)
|
Decode a Base X encoded string into the number
Arguments:
- `string`: The encoded string
- `alphabet`: The alphabet to use for encoding
Stolen from: http://stackoverflow.com/a/1119769/1144479
|
train
|
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L384-L405
| null |
""" CLasses for working with census Geoids
"""
import inspect
import re
import sys
import six
names = { # (summary level value, base 10 chars, Base 62 chars, prefix fields)
'null': 1,
'us': 10,
'region': 20,
'division': 30,
'state': 40,
'county': 50,
'cosub': 60,
'place': 160,
'ua': 400,
'tract': 140,
'blockgroup': 150,
'block': 101,
'sdelm': 950,
'sdsec': 960,
'sduni': 970,
'zcta': 860,
'zip': 1200,
'sldl': 620,
'sldu': 610,
'cdcurr': 500,
# Other Levels that don't have proper names yet. For these .allval() and
# simplify() don't work properly.
'state_aianhh': 260,
'necta_nectadiv_state_county_cousub': 358,
'state_aianhh_place': 269,
'aianhh_state_county': 270,
'state_cbsa_metdiv': 323,
'state_aianhh280': 280,
'state_place_county': 155,
'aianhh_aitsce_state': 290,
'state_aianhh_aihhtli': 283,
'state_cdcurr_aianhh': 550,
'state_concit': 170,
'state_concit_place': 172,
'state_aianhh_aihhtli286': 286,
'cbsa': 310,
'cbsa_state': 311,
'cbsa_state_place': 312,
'cbsa_state_county': 313,
'cbsa_metdiv': 314,
'cbsa_metdiv_state': 315,
'state_cbsa': 320,
'state_cbsa_place': 321,
'state_cbsa_county': 322,
'state_county_cousub_submcd': 67,
'state_cbsa_metdiv_county': 324,
'state_county_cousub_place': 70,
'necta_state_county': 353,
'state_puma5': 795,
'csa': 330,
'csa_state': 331,
'csa_cbsa': 332,
'csa_cbsa_state': 333,
'cnecta': 335,
'state_county_cousub_place_tract': 80,
'cnecta_necta': 337,
'cnecta_necta_state': 338,
'state_csa': 340,
'state_csa_cbsa': 341,
'state_cnecta': 345,
'state_cnecta_necta': 346,
'necta': 350,
'necta_state': 351,
'necta_state_place': 352,
'cnecta_state': 336,
'necta_state_county_cousub': 354,
'necta_nectadiv': 355,
'necta_nectadiv_state': 356,
'state_anrc': 230,
'necta_nectadiv_state_county': 357,
'state_necta': 360,
'cbsa_metdiv_state_county': 316,
'state_necta_county': 362,
'state_necta_county_cousub': 363,
'state_necta_nectadiv': 364,
'state_necta_nectadiv_county': 365,
'state_necta_nectadiv_county_cousub': 366,
'ua_state': 410,
'ua_state_county': 430,
'state_sldu_county': 612,
'state_sldu': 610,
'state_sldl_county': 622,
'state_sldl': 620,
'state_cdcurr_county': 510,
'state_necta_place': 361,
'aianhh': 250,
'aianhh_aitsce': 251,
'aianhh_aihhtli': 252,
'state_sldl_county': 622,
'aianhh_aihhtli254': 254
}
lengths = {
'null': 1,
'aianhh': 4, # American Indian Area/Alaska Native Area/ Hawaiian Home Land (Census)
'aihhtli': '1', # American Indian Trust Land/ Hawaiian Home Land Indicator. A str b/c Census val is a str
'aitsce': 3, # American Indian Tribal Subdivision (Census)
'anrc': 5, # Alaska Native Regional Corporation (FIPS)
'blkgrp': 1, # Block Group
'blockgroup': 1, # Block Group
'block': 4, # Block
'cbsa': 5, # Metropolitan and Micropolitan Statistical Area
'cdcurr': 2, # Current Congressional District ***
'cnecta': 3, # New England City and Town Combined Statistical Area
'concit': 5, # Consolidated City
'county': 3, # County of current residence
'cousub': 5, # County Subdivision (FIPS)
'cosub': 5, # County Subdivision (FIPS)
'csa': 3, # Combined Statistical Area
'division': 1, # Census Division
'metdiv': 5, # Metropolitan Statistical Area- Metropolitan Division
'necta': 5, # New England City and Town Area
'nectadiv': 5, # New England City and Town Area Division
'place': 5, # Place (FIPS Code)
'puma5': 5, # Public Use Microdata Area 5% File
'region': 1, # Census Region
'sdelm': 5, # State-School District (Elementary)
'sdsec': 5, # State-School District (Secondary)
'sduni': 5, # State-School District (Unified)
'sldl': '3', # State Legislative District Lower. A String to signal that the census value is a string
'sldu': '3', # State Legislative District Upper. A String to signal that the census value is a string
'state': 2, # State (FIPS Code)
'submcd': 5, # Subminor Civil Division (FIPS)
'tract': 6, # Census Tract
'ua': 5, # Urban Area
'ur': 1, # Urban/Rural
'us': 0,
'zcta': 5,
# Nonstandard
'zip': 5,
}
segments = {
1: ['null'], # United States
10: ['us'], # United States
20: ['region'], # Region
30: ['division'], # Division
40: ['state'], # State
50: ['state', 'county'], # County
60: ['state', 'county', 'cousub'], # County Subdivision
67: ['state', 'county', 'cousub', 'submcd'], # State (Puerto Rico Only)-County-County Subdivision-Subbarrio
70: ['state', 'county', 'cousub', 'place'], # County Subdivision-Place/Remainder
80: ['state', 'county', 'cousub', 'place', 'tract'], # County Subdivision-Place/Remainder-Census Tract
101: ['state', 'county', 'tract', 'block'],
140: ['state', 'county', 'tract'], # Census Tract
150: ['state', 'county', 'tract', 'blockgroup'], # Census Tract-Block Group
155: ['state', 'place', 'county'], # Place-County
160: ['state', 'place'], # Place
170: ['state', 'concit'], # Consolidated City
172: ['state', 'concit', 'place'], # Consolidated City-Place Within Consolidated City
230: ['state', 'anrc'], # State-Alaska Native Regional Corporation
250: ['aianhh'], # American Indian Area/Alaska Native Area/Hawaiian Home Land
251: ['aianhh', 'aitsce'], # American Indian Area/Alaska NativeArea/HawaiianHomeLand-Tribal Subdivision/Remainder
252: ['aianhh', 'aihhtli'], # American Indian Area/Alaska Native Area (Reservation or Statistical Entity Only)4
254: ['aianhh', 'aihhtli'], # American Indian Area (Off-Reservation Trust Land Only)/Hawaiian Home Land
260: ['state', 'aianhh'], # American Indian Area/Alaska Native Area/Hawaiian Home Land-State
269: ['state', 'aianhh', 'place'], # American Indian Area/Alaska Native Area/Hawaiian Home Land-Place-Remainder
270: ['aianhh', 'state', 'county'], # American Indian Area/Alaska Native Area/Hawaiian Home Land-State-County
280: ['state', 'aianhh'], # State-American Indian Area/Alaska Native Area/Hawaiian Home Land
283: ['state', 'aianhh', 'aihhtli'],
# State-American Indian Area/Alaska Native Area (Reservation or Statistical Entity Only)
286: ['state', 'aianhh', 'aihhtli'],
# State-American Indian Area (Off-Reservation Trust Land Only)/Hawaiian Home Land
290: ['aianhh', 'aitsce', 'state'],
# American Indian Area/Alaska Native Area/Hawaiian Home Land-Tribal Subdivision/Remainder-State
310: ['cbsa'], # CBSA
311: ['cbsa', 'state'], # CBSA-State-County
312: ['cbsa', 'state', 'place'], # CBSA-State-Principal City
313: ['cbsa', 'state', 'county'], # CBSA-State-County
314: ['cbsa', 'metdiv'], # Metropolitan Statistical Area/Metropolitan Division
315: ['cbsa', 'metdiv', 'state'], # Metropolitan Statistical Area/Metropolitan Division-State
316: ['cbsa', 'metdiv', 'state', 'county'], # Metropolitan Statistical Area/Metropolitan Division-State-County
320: ['state', 'cbsa'], # State- CBSA
321: ['state', 'cbsa', 'place'], # State- CBSA -Principal City
322: ['state', 'cbsa', 'county'], # State- CBSA -County
323: ['state', 'cbsa', 'metdiv'], # State- Metropolitan Statistical Area/Metropolitan Division
324: ['state', 'cbsa', 'metdiv', 'county'], # State- Metropolitan Statistical Area/Metropolitan Division-County
330: ['csa'], # Combined Statistical Area
331: ['csa', 'state'], # Combined Statistical Area-State
332: ['csa', 'cbsa'], # Combined Statistical Area-CBSA
333: ['csa', 'cbsa', 'state'], # Combined Statistical Area-CBSA-State
335: ['cnecta'], # Combined New England City and Town Area
336: ['cnecta', 'state'], # Combined New England City and Town Area -State
337: ['cnecta', 'necta'], # Combined New England City and Town Area -New England City and Town Area
338: ['cnecta', 'necta', 'state'], # Combined New England City and Town Area -New England City and Town Area-State
340: ['state', 'csa'], # State-Combined Statistical Area
341: ['state', 'csa', 'cbsa'], # State-Combined Statistical Area-CBSA
345: ['state', 'cnecta'], # State-Combined New England City and Town Area
346: ['state', 'cnecta', 'necta'], # State-Combined New England City and Town Area-New England City and Town Area
350: ['necta'], # New England City and Town Area
351: ['necta', 'state'], # New England City and Town Area-State
352: ['necta', 'state', 'place'], # New England City and Town Area-State-Principal City
353: ['necta', 'state', 'county'], # New England City and Town Area-State-County
354: ['necta', 'state', 'county', 'cousub'], # New England City and Town Area-State-County-County Subdivision
355: ['necta', 'nectadiv'], # New England City and Town Area (NECTA)-NECTA Division
356: ['necta', 'nectadiv', 'state'], # New England City and Town Area (NECTA)-NECTA Division-State
357: ['necta', 'nectadiv', 'state', 'county'], # New England City and Town Area (NECTA)-NECTA Division-State-County
358: ['necta', 'nectadiv', 'state', 'county', 'cousub'],
# New England City and Town Area (NECTA)-NECTA Division-State-County-County Subdivision
360: ['state', 'necta'], # State-New England City and Town Area
361: ['state', 'necta', 'place'], # State-New England City and Town Area-Principal City
362: ['state', 'necta', 'county'], # State-New England City and Town Area-County
363: ['state', 'necta', 'county', 'cousub'], # State-New England City and Town Area-County-County Subdivision
364: ['state', 'necta', 'nectadiv'], # State-New England City and Town Area (NECTA)-NECTA Division
365: ['state', 'necta', 'nectadiv', 'county'], # State-New England City and Town Area (NECTA)-NECTA Division-County
366: ['state', 'necta', 'nectadiv', 'county', 'cousub'],
# State-New England City and Town Area (NECTA)-NECTA Division-County-County Subdivision
400: ['ua'], # Urban Area,
410: ['ua', 'state'], # Urban Area, State,
430: ['ua','state','county'], # Urban Area, State, County,
500: ['state', 'cdcurr'], # Congressional District
510: ['state', 'cdcurr', 'county'], #
550: ['state', 'cdcurr', 'aianhh'],
# Congressional District-American IndianArea/Alaska NativeArea/Hawaiian Home Land
610: ['state', 'sldu'], # State Senate District
612: ['state', 'sldu', 'county'], # State Senate District-County
620: ['state', 'sldl'], # State House District
622: ['state', 'sldl', 'county'], # State House District-County
795: ['state', 'puma5'], # State-Public Use MicroSample Area 5%
860: ['zcta'],
950: ['state', 'sdelm'], # State-Elementary School District
960: ['state', 'sdsec'], # State-High School District
970: ['state', 'sduni'], # State-Unified School District
# Nonstandard
1200: ['zip']
}
descriptions = {
1: 'United States',
10: 'United States',
20: 'Region',
30: 'Division',
40: 'State',
50: 'County',
60: 'County Subdivision',
67: 'State (Puerto Rico Only)-County-County Subdivision-Subbarrio',
70: 'County Subdivision-Place/Remainder',
80: 'County Subdivision-Place/Remainder-Census Tract',
101: 'block',
140: 'Census Tract',
150: 'Census Tract-Block Group',
155: 'Place-County',
160: 'Place',
170: 'Consolidated City',
172: 'Consolidated City-Place Within Consolidated City',
230: 'State-Alaska Native Regional Corporation',
250: 'American Indian Area/Alaska Native Area/Hawaiian Home Land',
251: 'American Indian Area/Alaska NativeArea/HawaiianHomeLand-Tribal Subdivision/Remainder',
252: 'American Indian Area/Alaska Native Area (Reservation or Statistical Entity Only)',
254: 'American Indian Area (Off-Reservation Trust Land Only)/Hawaiian Home Land',
260: 'American Indian Area/Alaska Native Area/Hawaiian Home Land-State',
269: 'American Indian Area/Alaska Native Area/Hawaiian Home Land-Place-Remainder',
270: 'American Indian Area/Alaska Native Area/Hawaiian Home Land-State-County',
280: 'State-American Indian Area/Alaska Native Area/Hawaiian Home Land',
283: 'aihhtli',
286: 'aihhtli',
290: 'state',
310: 'CBSA',
311: 'CBSA-State-County',
312: 'CBSA-State-Principal City',
313: 'CBSA-State-County',
314: 'Metropolitan Statistical Area/Metropolitan Division',
315: 'Metropolitan Statistical Area/Metropolitan Division-State',
316: 'Metropolitan Statistical Area/Metropolitan Division-State-County',
320: 'State- CBSA',
321: 'State- CBSA -Principal City',
322: 'State- CBSA -County',
323: 'State- Metropolitan Statistical Area/Metropolitan Division',
324: 'State- Metropolitan Statistical Area/Metropolitan Division-County',
330: 'Combined Statistical Area',
331: 'Combined Statistical Area-State',
332: 'Combined Statistical Area-CBSA',
333: 'Combined Statistical Area-CBSA-State',
335: 'Combined New England City and Town Area',
336: 'Combined New England City and Town Area -State',
337: 'Combined New England City and Town Area -New England City and Town Area',
338: 'Combined New England City and Town Area -New England City and Town Area-State',
340: 'State-Combined Statistical Area',
341: 'State-Combined Statistical Area-CBSA',
345: 'State-Combined New England City and Town Area',
346: 'State-Combined New England City and Town Area-New England City and Town Area',
350: 'New England City and Town Area',
351: 'New England City and Town Area-State',
352: 'New England City and Town Area-State-Principal City',
353: 'New England City and Town Area-State-County',
354: 'New England City and Town Area-State-County-County Subdivision',
355: 'New England City and Town Area (NECTA)-NECTA Division',
356: 'New England City and Town Area (NECTA)-NECTA Division-State',
357: 'New England City and Town Area (NECTA)-NECTA Division-State-County',
358: 'New England City and Town Area (NECTA)-NECTA Division-State-County-County Subdivision',
360: 'State-New England City and Town Area',
361: 'State-New England City and Town Area-Principal City',
362: 'State-New England City and Town Area-County',
363: 'State-New England City and Town Area-County-County Subdivision',
364: 'State-New England City and Town Area (NECTA)-NECTA Division',
365: 'State-New England City and Town Area (NECTA)-NECTA Division-County-County Subdivision',
400: 'Urban Area,',
410: 'Urban Area, State,',
430: 'Urban Area, State, County,',
500: 'Congressional District',
510: 'Congressional District, County',
550: 'Congressional District-American IndianArea/Alaska NativeArea/Hawaiian Home Land',
610: 'State Senate District',
612: 'State Senate District-County',
620: 'State House District',
622: 'State House District-County',
795: 'State-Public Use MicroSample Area 5%',
860: 'ZIP Code Tabulation Area',
950: 'State-Elementary School District',
960: 'State-High School District',
970: 'State-Unified School District',
}
plurals = {
'county': 'counties',
'place': 'places',
'Sdlu': 'State '
}
class NotASummaryName(Exception):
"""An argument was not one of the valid summary names"""
class ParseError(Exception):
"""Error parsing a geoid"""
def parse_to_gvid(v):
"""Parse an ACS Geoid or a GVID to a GVID"""
from geoid.civick import GVid
from geoid.acs import AcsGeoid
m1 = ''
try:
return GVid.parse(v)
except ValueError as e:
m1 = str(e)
try:
return AcsGeoid.parse(v).convert(GVid)
except ValueError as e:
raise ValueError("Failed to parse to either ACS or GVid: {}; {}".format(m1, str(e)))
def base62_encode(num):
"""Encode a number in Base X. WIth the built-in alphabet, its base 62
`num`: The number to encode
`alphabet`: The alphabet to use for encoding
Stolen from: http://stackoverflow.com/a/1119769/1144479
"""
num = int(num)
alphabet = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
if (num == 0):
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr)
def augment(module_name, base_class):
"""Call the augment() method for all of the derived classes in the module """
for name, cls in inspect.getmembers(sys.modules[module_name],
lambda x : inspect.isclass(x) and issubclass(x, base_class) ):
if cls == base_class:
continue
cls.augment()
def get_class(module, sl):
for name, named_sl in names.items():
if named_sl == sl or sl == name:
return getattr(module, name.capitalize())
raise NotASummaryName("No class for summary_level {}".format(sl))
def make_classes(base_class, module):
"""Create derived classes and put them into the same module as the base class.
This function is called at the end of each of the derived class modules, acs, census, civik and tiger.
It will create a set of new derived class in the module, one for each of the enries in the `summary_levels`
dict.
"""
from functools import partial
for k in names:
cls = base_class.class_factory(k.capitalize())
cls.augment()
setattr(module, k.capitalize(), cls)
setattr(module, 'get_class', partial(get_class, module))
class CountyName(object):
"""A Census county name, with methods to create shorter versions and return the types of division,
which may be county, parish, borough, etc. """
# Strip the county and state name. THis doesn't work for some locations
# where the county is actually called a parish or a bario.
state_name_pattern = r', (.*)$'
state_name_re = re.compile(state_name_pattern)
def __init__(self, name):
self.name = name
def intuit_name(self, name):
"""Return a numeric value in the range [-1,1), indicating the likelyhood that the name is for a valuable of
of this type. -1 indicates a strong non-match, 1 indicates a strong match, and 0 indicates uncertainty. """
raise NotImplementedError
@property
def state(self):
try:
county, state = self.name.split(',')
return state
except ValueError:
# The search will fail for 'District of Columbia'
return ''
@property
def medium_name(self):
"""The census name without the state"""
return self.state_name_re.sub('', self.name)
type_names = (
'County', 'Municipio', 'Parish', 'Census Area', 'Borough',
'Municipality', 'city', 'City and Borough')
type_name_pattern = '|'.join('({})'.format(e) for e in type_names)
type_names_re = re.compile(type_name_pattern)
@property
def division_name(self):
"""The type designation for the county or county equivalent, such as 'County','Parish' or 'Borough'"""
try:
return next(e for e in self.type_names_re.search(self.name).groups() if e is not None)
except AttributeError:
# The search will fail for 'District of Columbia'
return ''
county_name_pattern = r'(.+) {}, (.+)'.format(type_name_pattern)
county_name_re = re.compile(county_name_pattern)
@property
def short_name(self):
try:
county, state = self.name.split(',')
except ValueError:
return self.name # 'District of Colombia'
return self.type_names_re.sub('', county)
def __str__(self):
return self.name
class Geoid(object):
@classmethod
def resolve_summary_level(cls, sl):
try:
return cls.sl_map[sl]
except KeyError:
return None
@classmethod
def make_format_string(cls, level):
sl_num = names[level]
segs = segments[sl_num]
formats = []
formats.append(cls.sl_format)
for seg in segs:
# Lengths dict may have strings to indicate string format usage.
if int(lengths[seg]) <= 0:
continue
if isinstance(lengths[seg], int):
fmt = cls.elem_format
else:
fmt = cls.elem_str_format
formats.append(fmt.format(seg, cls.part_width(lengths[seg])))
return ''.join(formats)
@classmethod
def make_regex(cls, level):
sl_num = names[level]
segs = segments[sl_num]
# Lengths dict may have strings to indicate string format usage.
regexes = [cls.sl_regex] + [cls.elem_regex.format(seg, cls.part_width(lengths[seg]))
for seg in segs if int(lengths[seg]) > 0]
re_str = '^' + ''.join(regexes) + '$'
return re_str
@classmethod
def augment(cls):
"""Augment the class with computed formats, regexes, and other things. This caches these values so
they don't have to be created for every instance. """
import re
level_name = cls.__name__.lower()
cls.sl = names[level_name]
cls.class_map[cls.__name__.lower()] = cls
cls.sl_map[cls.sl] = cls
cls.fmt = cls.make_format_string(cls.__name__.lower())
cls.regex_str = cls.make_regex(cls.__name__.lower())
cls.regex = re.compile(cls.regex_str)
# List of field names
cls.level = level_name
cls.fields = segments[cls.sl]
@classmethod
def get_class(cls, name_or_sl):
"""Return a derived class based on the class name or the summary_level"""
try:
return cls.sl_map[int(name_or_sl)]
except TypeError as e:
raise TypeError("Bad name or sl: {} : {}".format(name_or_sl, e))
except ValueError:
try:
return cls.class_map[name_or_sl.lower()]
except (KeyError, ValueError):
raise NotASummaryName("Value '{}' is not a valid summary level".format(name_or_sl))
def __init__(self, *args, **kwargs):
# This is a bit unusual, because it means, that , unlike normal
# python args, a kwarg can overwrite a position arg.
d = dict(zip(self.fields, args + ((0,) * 10))) # Add enough zeros to set all fields to zero
d.update(kwargs)
for k, v in d.items():
if k in self.fields:
try:
setattr(self, k, v)
except TypeError as e:
raise TypeError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
except ValueError as e:
raise ValueError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
def __str__(self):
d = self.__dict__
d['sl'] = self.sl
# Hacks for special string cases
if 'sldu' in d:
d['sldu'] = str(d['sldu']).zfill(3)
if 'sldl' in d:
d['sldl'] = str(d['sldl']).zfill(3)
try:
fn = six.get_method_function(self.encode)
kwargs = {k: fn(v) for k, v in d.items()}
return self.fmt.format(**kwargs)
except (ValueError, KeyError) as e:
raise ValueError("Bad value in {}, data {} for format {}: {}".format(type(self), d, self.fmt, e))
@property
def state_name(self):
from geoid.censusnames import geo_names
return geo_names[(self.state, 0)]
@property
def stusab(self):
from geoid.censusnames import stusab
try:
return stusab[int(self.state)]
except (AttributeError, ValueError):
# Assume this is a Us object, or some other national object
return 'US'
@property
def county_name(self):
from geoid.censusnames import geo_names
try:
try:
return CountyName(geo_names[(self.state, self.county)])
except KeyError:
try:
return CountyName("County #{}, {}".format(self.county,geo_names[(self.state, 0)]))
except KeyError:
return CountyName("County #{}, State#{}".format(self.county, self.state))
except Exception:
return CountyName('')
@property
def geo_name(self):
"""
Return a name of the state or county, or, for other lowever levels, the
name of the level type in the county.
:return:
"""
if self.level == 'county':
return str(self.county_name)
elif self.level == 'state':
return self.state_name
else:
if hasattr(self, 'county'):
return "{} in {}".format(self.level,str(self.county_name))
elif hasattr(self, 'state'):
return "{} in {}".format(self.level, self.state_name)
else:
return "a {}".format(self.level)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return str(self) != str(other)
def __lt__(self, other):
return str(self) < str(other)
def __le__(self, other):
return str(self) <= str(other)
def __gt__(self, other):
return str(self) > str(other)
def __ge__(self, other):
return str(self) >= str(other)
@classmethod
def parse(cls, gvid, exception=True):
"""
Parse a string value into the geoid of this class.
:param gvid: String value to parse.
:param exception: If true ( default) raise an eception on parse erorrs. If False, return a
'null' geoid.
:return:
"""
if gvid == 'invalid':
return cls.get_class('null')(0)
if not bool(gvid):
return None
if not isinstance(gvid, six.string_types):
raise TypeError("Can't parse; not a string. Got a '{}' ".format(type(gvid)))
try:
if not cls.sl:
# Civick and ACS include the SL, so can call from base type.
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
sl = fn(gvid[0:cls.sl_width])
else:
sl = cls.sl # Otherwise must use derived class.
except ValueError as e:
if exception:
raise ValueError("Failed to parse gvid '{}': {}".format(gvid, str(e)))
else:
return cls.get_class('null')(0)
try:
cls = cls.sl_map[sl]
except KeyError:
if exception:
raise ValueError("Failed to parse gvid '{}': Unknown summary level '{}' ".format(gvid, sl))
else:
return cls.get_class('null')(0)
m = cls.regex.match(gvid)
if not m:
raise ValueError("Failed to match '{}' to '{}' ".format(gvid, cls.regex_str))
d = m.groupdict()
if not d:
return None
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
d = {k: fn(v) for k, v in d.items()}
try:
del d['sl']
except KeyError:
pass
return cls(**d)
def convert(self, root_cls):
"""Convert to another derived class. cls is the base class for the derived type,
ie AcsGeoid, TigerGeoid, etc. """
d = self.__dict__
d['sl'] = self.sl
try:
cls = root_cls.get_class(root_cls.sl)
except (AttributeError, TypeError):
# Hopefully because root_cls is a module
cls = root_cls.get_class(self.sl)
return cls(**d)
def as_census(self):
from geoid.census import CensusGeoid
return self.convert(CensusGeoid)
def as_acs(self):
from geoid.acs import AcsGeoid
return self.convert(AcsGeoid)
def as_tiger(self):
from geoid.tiger import TigerGeoid
return self.convert(TigerGeoid)
def promote(self, level=None):
"""Convert to the next higher level summary level"""
if level is None:
if len(self.fields) < 2:
if self.level in ('region', 'division', 'state', 'ua'):
cls = self.get_class('us')
else:
return None
else:
cls = self.get_class(self.fields[-2])
else:
cls = self.get_class(level)
d = dict(self.__dict__.items())
d['sl'] = self.sl
return cls(**d)
def summarize(self):
"""Convert all of the values to their max values. This form is used to represent the summary level"""
raise NotImplementedError
def allval(self):
"""Convert the last value to zero. This form represents the entire higher summary level at the granularity
of the lower summary level. For example, for a county, it means 'All counties in the state' """
d = dict(self.__dict__.items())
d['sl'] = self.sl
d[self.level] = 0
cls = self.get_class(self.sl)
return cls(**d)
@classmethod
def nullval(cls):
"""Create a new instance where all of the values are 0"""
d = dict(cls.__dict__.items())
for k in d:
d[k] = 0
d['sl'] = cls.sl
d[cls.level] = 0
return cls(**d)
@property
def tuples(self):
"""Return tuples of field, value, in the order of the levels as they are defined """
return [(field, getattr(self, field, None)) for field in self.fields]
@property
def is_summary(self):
"""Return True if this geoid is an summary -- all of the fields are 0"""
return str(self) == str(self.summarize())
@property
def is_allval(self):
"""Return True if this geoid is an allval -- the last field is zero, but the first is not"""
tups = self.tuples
return tups[-1][1] == 0 and tups[0][1] != 0
@property
def level_plural(self):
"""Return the name of the level as a plural"""
return plurals.get(self.level, self.level + "s")
def generate_all(sumlevel, d):
"""Generate a dict that includes all of the available geoid values, with keys
for the most common names for those values. """
from geoid.civick import GVid
from geoid.tiger import TigerGeoid
from geoid.acs import AcsGeoid
sumlevel = int(sumlevel)
d = dict(d.items())
# Map common name variants
if 'cousub' in d:
d['cosub'] = d['cousub']
del d['cousub']
if 'blkgrp' in d:
d['blockgroup'] = d['blkgrp']
del d['blkgrp']
if 'zcta5' in d:
d['zcta'] = d['zcta5']
del d['zcta5']
gvid_class = GVid.resolve_summary_level(sumlevel)
if not gvid_class:
return {}
geoidt_class = TigerGeoid.resolve_summary_level(sumlevel)
geoid_class = AcsGeoid.resolve_summary_level(sumlevel)
try:
return dict(
gvid=str(gvid_class(**d)),
geoid=str(geoid_class(**d)),
geoidt=str(geoidt_class(**d))
)
except:
raise
def _generate_names():
""" Code to generate the state and county names
>>> python -c 'import geoid; geoid._generate_names()'
"""
from ambry import get_library
l = get_library()
counties = l.partition('census.gov-acs-geofile-2009-geofile50-20095-50')
states = l.partition('census.gov-acs-geofile-2009-geofile40-20095-40')
names = {}
for row in counties.remote_datafile.reader:
names[(row.state, row.county)] = row.name
for row in states.remote_datafile.reader:
if row.component == '00':
names[(row.state, 0)] = row.name
pprint.pprint(names)
|
Metatab/geoid
|
geoid/core.py
|
augment
|
python
|
def augment(module_name, base_class):
for name, cls in inspect.getmembers(sys.modules[module_name],
lambda x : inspect.isclass(x) and issubclass(x, base_class) ):
if cls == base_class:
continue
cls.augment()
|
Call the augment() method for all of the derived classes in the module
|
train
|
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L408-L416
| null |
""" CLasses for working with census Geoids
"""
import inspect
import re
import sys
import six
names = { # (summary level value, base 10 chars, Base 62 chars, prefix fields)
'null': 1,
'us': 10,
'region': 20,
'division': 30,
'state': 40,
'county': 50,
'cosub': 60,
'place': 160,
'ua': 400,
'tract': 140,
'blockgroup': 150,
'block': 101,
'sdelm': 950,
'sdsec': 960,
'sduni': 970,
'zcta': 860,
'zip': 1200,
'sldl': 620,
'sldu': 610,
'cdcurr': 500,
# Other Levels that don't have proper names yet. For these .allval() and
# simplify() don't work properly.
'state_aianhh': 260,
'necta_nectadiv_state_county_cousub': 358,
'state_aianhh_place': 269,
'aianhh_state_county': 270,
'state_cbsa_metdiv': 323,
'state_aianhh280': 280,
'state_place_county': 155,
'aianhh_aitsce_state': 290,
'state_aianhh_aihhtli': 283,
'state_cdcurr_aianhh': 550,
'state_concit': 170,
'state_concit_place': 172,
'state_aianhh_aihhtli286': 286,
'cbsa': 310,
'cbsa_state': 311,
'cbsa_state_place': 312,
'cbsa_state_county': 313,
'cbsa_metdiv': 314,
'cbsa_metdiv_state': 315,
'state_cbsa': 320,
'state_cbsa_place': 321,
'state_cbsa_county': 322,
'state_county_cousub_submcd': 67,
'state_cbsa_metdiv_county': 324,
'state_county_cousub_place': 70,
'necta_state_county': 353,
'state_puma5': 795,
'csa': 330,
'csa_state': 331,
'csa_cbsa': 332,
'csa_cbsa_state': 333,
'cnecta': 335,
'state_county_cousub_place_tract': 80,
'cnecta_necta': 337,
'cnecta_necta_state': 338,
'state_csa': 340,
'state_csa_cbsa': 341,
'state_cnecta': 345,
'state_cnecta_necta': 346,
'necta': 350,
'necta_state': 351,
'necta_state_place': 352,
'cnecta_state': 336,
'necta_state_county_cousub': 354,
'necta_nectadiv': 355,
'necta_nectadiv_state': 356,
'state_anrc': 230,
'necta_nectadiv_state_county': 357,
'state_necta': 360,
'cbsa_metdiv_state_county': 316,
'state_necta_county': 362,
'state_necta_county_cousub': 363,
'state_necta_nectadiv': 364,
'state_necta_nectadiv_county': 365,
'state_necta_nectadiv_county_cousub': 366,
'ua_state': 410,
'ua_state_county': 430,
'state_sldu_county': 612,
'state_sldu': 610,
'state_sldl_county': 622,
'state_sldl': 620,
'state_cdcurr_county': 510,
'state_necta_place': 361,
'aianhh': 250,
'aianhh_aitsce': 251,
'aianhh_aihhtli': 252,
'state_sldl_county': 622,
'aianhh_aihhtli254': 254
}
lengths = {
'null': 1,
'aianhh': 4, # American Indian Area/Alaska Native Area/ Hawaiian Home Land (Census)
'aihhtli': '1', # American Indian Trust Land/ Hawaiian Home Land Indicator. A str b/c Census val is a str
'aitsce': 3, # American Indian Tribal Subdivision (Census)
'anrc': 5, # Alaska Native Regional Corporation (FIPS)
'blkgrp': 1, # Block Group
'blockgroup': 1, # Block Group
'block': 4, # Block
'cbsa': 5, # Metropolitan and Micropolitan Statistical Area
'cdcurr': 2, # Current Congressional District ***
'cnecta': 3, # New England City and Town Combined Statistical Area
'concit': 5, # Consolidated City
'county': 3, # County of current residence
'cousub': 5, # County Subdivision (FIPS)
'cosub': 5, # County Subdivision (FIPS)
'csa': 3, # Combined Statistical Area
'division': 1, # Census Division
'metdiv': 5, # Metropolitan Statistical Area- Metropolitan Division
'necta': 5, # New England City and Town Area
'nectadiv': 5, # New England City and Town Area Division
'place': 5, # Place (FIPS Code)
'puma5': 5, # Public Use Microdata Area 5% File
'region': 1, # Census Region
'sdelm': 5, # State-School District (Elementary)
'sdsec': 5, # State-School District (Secondary)
'sduni': 5, # State-School District (Unified)
'sldl': '3', # State Legislative District Lower. A String to signal that the census value is a string
'sldu': '3', # State Legislative District Upper. A String to signal that the census value is a string
'state': 2, # State (FIPS Code)
'submcd': 5, # Subminor Civil Division (FIPS)
'tract': 6, # Census Tract
'ua': 5, # Urban Area
'ur': 1, # Urban/Rural
'us': 0,
'zcta': 5,
# Nonstandard
'zip': 5,
}
segments = {
1: ['null'], # United States
10: ['us'], # United States
20: ['region'], # Region
30: ['division'], # Division
40: ['state'], # State
50: ['state', 'county'], # County
60: ['state', 'county', 'cousub'], # County Subdivision
67: ['state', 'county', 'cousub', 'submcd'], # State (Puerto Rico Only)-County-County Subdivision-Subbarrio
70: ['state', 'county', 'cousub', 'place'], # County Subdivision-Place/Remainder
80: ['state', 'county', 'cousub', 'place', 'tract'], # County Subdivision-Place/Remainder-Census Tract
101: ['state', 'county', 'tract', 'block'],
140: ['state', 'county', 'tract'], # Census Tract
150: ['state', 'county', 'tract', 'blockgroup'], # Census Tract-Block Group
155: ['state', 'place', 'county'], # Place-County
160: ['state', 'place'], # Place
170: ['state', 'concit'], # Consolidated City
172: ['state', 'concit', 'place'], # Consolidated City-Place Within Consolidated City
230: ['state', 'anrc'], # State-Alaska Native Regional Corporation
250: ['aianhh'], # American Indian Area/Alaska Native Area/Hawaiian Home Land
251: ['aianhh', 'aitsce'], # American Indian Area/Alaska NativeArea/HawaiianHomeLand-Tribal Subdivision/Remainder
252: ['aianhh', 'aihhtli'], # American Indian Area/Alaska Native Area (Reservation or Statistical Entity Only)4
254: ['aianhh', 'aihhtli'], # American Indian Area (Off-Reservation Trust Land Only)/Hawaiian Home Land
260: ['state', 'aianhh'], # American Indian Area/Alaska Native Area/Hawaiian Home Land-State
269: ['state', 'aianhh', 'place'], # American Indian Area/Alaska Native Area/Hawaiian Home Land-Place-Remainder
270: ['aianhh', 'state', 'county'], # American Indian Area/Alaska Native Area/Hawaiian Home Land-State-County
280: ['state', 'aianhh'], # State-American Indian Area/Alaska Native Area/Hawaiian Home Land
283: ['state', 'aianhh', 'aihhtli'],
# State-American Indian Area/Alaska Native Area (Reservation or Statistical Entity Only)
286: ['state', 'aianhh', 'aihhtli'],
# State-American Indian Area (Off-Reservation Trust Land Only)/Hawaiian Home Land
290: ['aianhh', 'aitsce', 'state'],
# American Indian Area/Alaska Native Area/Hawaiian Home Land-Tribal Subdivision/Remainder-State
310: ['cbsa'], # CBSA
311: ['cbsa', 'state'], # CBSA-State-County
312: ['cbsa', 'state', 'place'], # CBSA-State-Principal City
313: ['cbsa', 'state', 'county'], # CBSA-State-County
314: ['cbsa', 'metdiv'], # Metropolitan Statistical Area/Metropolitan Division
315: ['cbsa', 'metdiv', 'state'], # Metropolitan Statistical Area/Metropolitan Division-State
316: ['cbsa', 'metdiv', 'state', 'county'], # Metropolitan Statistical Area/Metropolitan Division-State-County
320: ['state', 'cbsa'], # State- CBSA
321: ['state', 'cbsa', 'place'], # State- CBSA -Principal City
322: ['state', 'cbsa', 'county'], # State- CBSA -County
323: ['state', 'cbsa', 'metdiv'], # State- Metropolitan Statistical Area/Metropolitan Division
324: ['state', 'cbsa', 'metdiv', 'county'], # State- Metropolitan Statistical Area/Metropolitan Division-County
330: ['csa'], # Combined Statistical Area
331: ['csa', 'state'], # Combined Statistical Area-State
332: ['csa', 'cbsa'], # Combined Statistical Area-CBSA
333: ['csa', 'cbsa', 'state'], # Combined Statistical Area-CBSA-State
335: ['cnecta'], # Combined New England City and Town Area
336: ['cnecta', 'state'], # Combined New England City and Town Area -State
337: ['cnecta', 'necta'], # Combined New England City and Town Area -New England City and Town Area
338: ['cnecta', 'necta', 'state'], # Combined New England City and Town Area -New England City and Town Area-State
340: ['state', 'csa'], # State-Combined Statistical Area
341: ['state', 'csa', 'cbsa'], # State-Combined Statistical Area-CBSA
345: ['state', 'cnecta'], # State-Combined New England City and Town Area
346: ['state', 'cnecta', 'necta'], # State-Combined New England City and Town Area-New England City and Town Area
350: ['necta'], # New England City and Town Area
351: ['necta', 'state'], # New England City and Town Area-State
352: ['necta', 'state', 'place'], # New England City and Town Area-State-Principal City
353: ['necta', 'state', 'county'], # New England City and Town Area-State-County
354: ['necta', 'state', 'county', 'cousub'], # New England City and Town Area-State-County-County Subdivision
355: ['necta', 'nectadiv'], # New England City and Town Area (NECTA)-NECTA Division
356: ['necta', 'nectadiv', 'state'], # New England City and Town Area (NECTA)-NECTA Division-State
357: ['necta', 'nectadiv', 'state', 'county'], # New England City and Town Area (NECTA)-NECTA Division-State-County
358: ['necta', 'nectadiv', 'state', 'county', 'cousub'],
# New England City and Town Area (NECTA)-NECTA Division-State-County-County Subdivision
360: ['state', 'necta'], # State-New England City and Town Area
361: ['state', 'necta', 'place'], # State-New England City and Town Area-Principal City
362: ['state', 'necta', 'county'], # State-New England City and Town Area-County
363: ['state', 'necta', 'county', 'cousub'], # State-New England City and Town Area-County-County Subdivision
364: ['state', 'necta', 'nectadiv'], # State-New England City and Town Area (NECTA)-NECTA Division
365: ['state', 'necta', 'nectadiv', 'county'], # State-New England City and Town Area (NECTA)-NECTA Division-County
366: ['state', 'necta', 'nectadiv', 'county', 'cousub'],
# State-New England City and Town Area (NECTA)-NECTA Division-County-County Subdivision
400: ['ua'], # Urban Area,
410: ['ua', 'state'], # Urban Area, State,
430: ['ua','state','county'], # Urban Area, State, County,
500: ['state', 'cdcurr'], # Congressional District
510: ['state', 'cdcurr', 'county'], #
550: ['state', 'cdcurr', 'aianhh'],
# Congressional District-American IndianArea/Alaska NativeArea/Hawaiian Home Land
610: ['state', 'sldu'], # State Senate District
612: ['state', 'sldu', 'county'], # State Senate District-County
620: ['state', 'sldl'], # State House District
622: ['state', 'sldl', 'county'], # State House District-County
795: ['state', 'puma5'], # State-Public Use MicroSample Area 5%
860: ['zcta'],
950: ['state', 'sdelm'], # State-Elementary School District
960: ['state', 'sdsec'], # State-High School District
970: ['state', 'sduni'], # State-Unified School District
# Nonstandard
1200: ['zip']
}
descriptions = {
1: 'United States',
10: 'United States',
20: 'Region',
30: 'Division',
40: 'State',
50: 'County',
60: 'County Subdivision',
67: 'State (Puerto Rico Only)-County-County Subdivision-Subbarrio',
70: 'County Subdivision-Place/Remainder',
80: 'County Subdivision-Place/Remainder-Census Tract',
101: 'block',
140: 'Census Tract',
150: 'Census Tract-Block Group',
155: 'Place-County',
160: 'Place',
170: 'Consolidated City',
172: 'Consolidated City-Place Within Consolidated City',
230: 'State-Alaska Native Regional Corporation',
250: 'American Indian Area/Alaska Native Area/Hawaiian Home Land',
251: 'American Indian Area/Alaska NativeArea/HawaiianHomeLand-Tribal Subdivision/Remainder',
252: 'American Indian Area/Alaska Native Area (Reservation or Statistical Entity Only)',
254: 'American Indian Area (Off-Reservation Trust Land Only)/Hawaiian Home Land',
260: 'American Indian Area/Alaska Native Area/Hawaiian Home Land-State',
269: 'American Indian Area/Alaska Native Area/Hawaiian Home Land-Place-Remainder',
270: 'American Indian Area/Alaska Native Area/Hawaiian Home Land-State-County',
280: 'State-American Indian Area/Alaska Native Area/Hawaiian Home Land',
283: 'aihhtli',
286: 'aihhtli',
290: 'state',
310: 'CBSA',
311: 'CBSA-State-County',
312: 'CBSA-State-Principal City',
313: 'CBSA-State-County',
314: 'Metropolitan Statistical Area/Metropolitan Division',
315: 'Metropolitan Statistical Area/Metropolitan Division-State',
316: 'Metropolitan Statistical Area/Metropolitan Division-State-County',
320: 'State- CBSA',
321: 'State- CBSA -Principal City',
322: 'State- CBSA -County',
323: 'State- Metropolitan Statistical Area/Metropolitan Division',
324: 'State- Metropolitan Statistical Area/Metropolitan Division-County',
330: 'Combined Statistical Area',
331: 'Combined Statistical Area-State',
332: 'Combined Statistical Area-CBSA',
333: 'Combined Statistical Area-CBSA-State',
335: 'Combined New England City and Town Area',
336: 'Combined New England City and Town Area -State',
337: 'Combined New England City and Town Area -New England City and Town Area',
338: 'Combined New England City and Town Area -New England City and Town Area-State',
340: 'State-Combined Statistical Area',
341: 'State-Combined Statistical Area-CBSA',
345: 'State-Combined New England City and Town Area',
346: 'State-Combined New England City and Town Area-New England City and Town Area',
350: 'New England City and Town Area',
351: 'New England City and Town Area-State',
352: 'New England City and Town Area-State-Principal City',
353: 'New England City and Town Area-State-County',
354: 'New England City and Town Area-State-County-County Subdivision',
355: 'New England City and Town Area (NECTA)-NECTA Division',
356: 'New England City and Town Area (NECTA)-NECTA Division-State',
357: 'New England City and Town Area (NECTA)-NECTA Division-State-County',
358: 'New England City and Town Area (NECTA)-NECTA Division-State-County-County Subdivision',
360: 'State-New England City and Town Area',
361: 'State-New England City and Town Area-Principal City',
362: 'State-New England City and Town Area-County',
363: 'State-New England City and Town Area-County-County Subdivision',
364: 'State-New England City and Town Area (NECTA)-NECTA Division',
365: 'State-New England City and Town Area (NECTA)-NECTA Division-County-County Subdivision',
400: 'Urban Area,',
410: 'Urban Area, State,',
430: 'Urban Area, State, County,',
500: 'Congressional District',
510: 'Congressional District, County',
550: 'Congressional District-American IndianArea/Alaska NativeArea/Hawaiian Home Land',
610: 'State Senate District',
612: 'State Senate District-County',
620: 'State House District',
622: 'State House District-County',
795: 'State-Public Use MicroSample Area 5%',
860: 'ZIP Code Tabulation Area',
950: 'State-Elementary School District',
960: 'State-High School District',
970: 'State-Unified School District',
}
plurals = {
'county': 'counties',
'place': 'places',
'Sdlu': 'State '
}
class NotASummaryName(Exception):
"""An argument was not one of the valid summary names"""
class ParseError(Exception):
"""Error parsing a geoid"""
def parse_to_gvid(v):
"""Parse an ACS Geoid or a GVID to a GVID"""
from geoid.civick import GVid
from geoid.acs import AcsGeoid
m1 = ''
try:
return GVid.parse(v)
except ValueError as e:
m1 = str(e)
try:
return AcsGeoid.parse(v).convert(GVid)
except ValueError as e:
raise ValueError("Failed to parse to either ACS or GVid: {}; {}".format(m1, str(e)))
def base62_encode(num):
"""Encode a number in Base X. WIth the built-in alphabet, its base 62
`num`: The number to encode
`alphabet`: The alphabet to use for encoding
Stolen from: http://stackoverflow.com/a/1119769/1144479
"""
num = int(num)
alphabet = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
if (num == 0):
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr)
def base62_decode(string):
"""Decode a Base X encoded string into the number
Arguments:
- `string`: The encoded string
- `alphabet`: The alphabet to use for encoding
Stolen from: http://stackoverflow.com/a/1119769/1144479
"""
alphabet = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
base = len(alphabet)
strlen = len(string)
num = 0
idx = 0
for char in string:
power = (strlen - (idx + 1))
num += alphabet.index(char) * (base ** power)
idx += 1
return int(num)
def get_class(module, sl):
for name, named_sl in names.items():
if named_sl == sl or sl == name:
return getattr(module, name.capitalize())
raise NotASummaryName("No class for summary_level {}".format(sl))
def make_classes(base_class, module):
"""Create derived classes and put them into the same module as the base class.
This function is called at the end of each of the derived class modules, acs, census, civik and tiger.
It will create a set of new derived class in the module, one for each of the enries in the `summary_levels`
dict.
"""
from functools import partial
for k in names:
cls = base_class.class_factory(k.capitalize())
cls.augment()
setattr(module, k.capitalize(), cls)
setattr(module, 'get_class', partial(get_class, module))
class CountyName(object):
"""A Census county name, with methods to create shorter versions and return the types of division,
which may be county, parish, borough, etc. """
# Strip the county and state name. THis doesn't work for some locations
# where the county is actually called a parish or a bario.
state_name_pattern = r', (.*)$'
state_name_re = re.compile(state_name_pattern)
def __init__(self, name):
self.name = name
def intuit_name(self, name):
"""Return a numeric value in the range [-1,1), indicating the likelyhood that the name is for a valuable of
of this type. -1 indicates a strong non-match, 1 indicates a strong match, and 0 indicates uncertainty. """
raise NotImplementedError
@property
def state(self):
try:
county, state = self.name.split(',')
return state
except ValueError:
# The search will fail for 'District of Columbia'
return ''
@property
def medium_name(self):
"""The census name without the state"""
return self.state_name_re.sub('', self.name)
type_names = (
'County', 'Municipio', 'Parish', 'Census Area', 'Borough',
'Municipality', 'city', 'City and Borough')
type_name_pattern = '|'.join('({})'.format(e) for e in type_names)
type_names_re = re.compile(type_name_pattern)
@property
def division_name(self):
"""The type designation for the county or county equivalent, such as 'County','Parish' or 'Borough'"""
try:
return next(e for e in self.type_names_re.search(self.name).groups() if e is not None)
except AttributeError:
# The search will fail for 'District of Columbia'
return ''
county_name_pattern = r'(.+) {}, (.+)'.format(type_name_pattern)
county_name_re = re.compile(county_name_pattern)
@property
def short_name(self):
try:
county, state = self.name.split(',')
except ValueError:
return self.name # 'District of Colombia'
return self.type_names_re.sub('', county)
def __str__(self):
return self.name
class Geoid(object):
@classmethod
def resolve_summary_level(cls, sl):
try:
return cls.sl_map[sl]
except KeyError:
return None
@classmethod
def make_format_string(cls, level):
sl_num = names[level]
segs = segments[sl_num]
formats = []
formats.append(cls.sl_format)
for seg in segs:
# Lengths dict may have strings to indicate string format usage.
if int(lengths[seg]) <= 0:
continue
if isinstance(lengths[seg], int):
fmt = cls.elem_format
else:
fmt = cls.elem_str_format
formats.append(fmt.format(seg, cls.part_width(lengths[seg])))
return ''.join(formats)
@classmethod
def make_regex(cls, level):
sl_num = names[level]
segs = segments[sl_num]
# Lengths dict may have strings to indicate string format usage.
regexes = [cls.sl_regex] + [cls.elem_regex.format(seg, cls.part_width(lengths[seg]))
for seg in segs if int(lengths[seg]) > 0]
re_str = '^' + ''.join(regexes) + '$'
return re_str
@classmethod
def augment(cls):
"""Augment the class with computed formats, regexes, and other things. This caches these values so
they don't have to be created for every instance. """
import re
level_name = cls.__name__.lower()
cls.sl = names[level_name]
cls.class_map[cls.__name__.lower()] = cls
cls.sl_map[cls.sl] = cls
cls.fmt = cls.make_format_string(cls.__name__.lower())
cls.regex_str = cls.make_regex(cls.__name__.lower())
cls.regex = re.compile(cls.regex_str)
# List of field names
cls.level = level_name
cls.fields = segments[cls.sl]
@classmethod
def get_class(cls, name_or_sl):
"""Return a derived class based on the class name or the summary_level"""
try:
return cls.sl_map[int(name_or_sl)]
except TypeError as e:
raise TypeError("Bad name or sl: {} : {}".format(name_or_sl, e))
except ValueError:
try:
return cls.class_map[name_or_sl.lower()]
except (KeyError, ValueError):
raise NotASummaryName("Value '{}' is not a valid summary level".format(name_or_sl))
def __init__(self, *args, **kwargs):
# This is a bit unusual, because it means, that , unlike normal
# python args, a kwarg can overwrite a position arg.
d = dict(zip(self.fields, args + ((0,) * 10))) # Add enough zeros to set all fields to zero
d.update(kwargs)
for k, v in d.items():
if k in self.fields:
try:
setattr(self, k, v)
except TypeError as e:
raise TypeError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
except ValueError as e:
raise ValueError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
def __str__(self):
d = self.__dict__
d['sl'] = self.sl
# Hacks for special string cases
if 'sldu' in d:
d['sldu'] = str(d['sldu']).zfill(3)
if 'sldl' in d:
d['sldl'] = str(d['sldl']).zfill(3)
try:
fn = six.get_method_function(self.encode)
kwargs = {k: fn(v) for k, v in d.items()}
return self.fmt.format(**kwargs)
except (ValueError, KeyError) as e:
raise ValueError("Bad value in {}, data {} for format {}: {}".format(type(self), d, self.fmt, e))
@property
def state_name(self):
from geoid.censusnames import geo_names
return geo_names[(self.state, 0)]
@property
def stusab(self):
from geoid.censusnames import stusab
try:
return stusab[int(self.state)]
except (AttributeError, ValueError):
# Assume this is a Us object, or some other national object
return 'US'
@property
def county_name(self):
from geoid.censusnames import geo_names
try:
try:
return CountyName(geo_names[(self.state, self.county)])
except KeyError:
try:
return CountyName("County #{}, {}".format(self.county,geo_names[(self.state, 0)]))
except KeyError:
return CountyName("County #{}, State#{}".format(self.county, self.state))
except Exception:
return CountyName('')
@property
def geo_name(self):
"""
Return a name of the state or county, or, for other lowever levels, the
name of the level type in the county.
:return:
"""
if self.level == 'county':
return str(self.county_name)
elif self.level == 'state':
return self.state_name
else:
if hasattr(self, 'county'):
return "{} in {}".format(self.level,str(self.county_name))
elif hasattr(self, 'state'):
return "{} in {}".format(self.level, self.state_name)
else:
return "a {}".format(self.level)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return str(self) != str(other)
def __lt__(self, other):
return str(self) < str(other)
def __le__(self, other):
return str(self) <= str(other)
def __gt__(self, other):
return str(self) > str(other)
def __ge__(self, other):
return str(self) >= str(other)
@classmethod
def parse(cls, gvid, exception=True):
"""
Parse a string value into the geoid of this class.
:param gvid: String value to parse.
:param exception: If true ( default) raise an eception on parse erorrs. If False, return a
'null' geoid.
:return:
"""
if gvid == 'invalid':
return cls.get_class('null')(0)
if not bool(gvid):
return None
if not isinstance(gvid, six.string_types):
raise TypeError("Can't parse; not a string. Got a '{}' ".format(type(gvid)))
try:
if not cls.sl:
# Civick and ACS include the SL, so can call from base type.
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
sl = fn(gvid[0:cls.sl_width])
else:
sl = cls.sl # Otherwise must use derived class.
except ValueError as e:
if exception:
raise ValueError("Failed to parse gvid '{}': {}".format(gvid, str(e)))
else:
return cls.get_class('null')(0)
try:
cls = cls.sl_map[sl]
except KeyError:
if exception:
raise ValueError("Failed to parse gvid '{}': Unknown summary level '{}' ".format(gvid, sl))
else:
return cls.get_class('null')(0)
m = cls.regex.match(gvid)
if not m:
raise ValueError("Failed to match '{}' to '{}' ".format(gvid, cls.regex_str))
d = m.groupdict()
if not d:
return None
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
d = {k: fn(v) for k, v in d.items()}
try:
del d['sl']
except KeyError:
pass
return cls(**d)
def convert(self, root_cls):
"""Convert to another derived class. cls is the base class for the derived type,
ie AcsGeoid, TigerGeoid, etc. """
d = self.__dict__
d['sl'] = self.sl
try:
cls = root_cls.get_class(root_cls.sl)
except (AttributeError, TypeError):
# Hopefully because root_cls is a module
cls = root_cls.get_class(self.sl)
return cls(**d)
def as_census(self):
from geoid.census import CensusGeoid
return self.convert(CensusGeoid)
def as_acs(self):
from geoid.acs import AcsGeoid
return self.convert(AcsGeoid)
def as_tiger(self):
from geoid.tiger import TigerGeoid
return self.convert(TigerGeoid)
def promote(self, level=None):
"""Convert to the next higher level summary level"""
if level is None:
if len(self.fields) < 2:
if self.level in ('region', 'division', 'state', 'ua'):
cls = self.get_class('us')
else:
return None
else:
cls = self.get_class(self.fields[-2])
else:
cls = self.get_class(level)
d = dict(self.__dict__.items())
d['sl'] = self.sl
return cls(**d)
def summarize(self):
"""Convert all of the values to their max values. This form is used to represent the summary level"""
raise NotImplementedError
def allval(self):
"""Convert the last value to zero. This form represents the entire higher summary level at the granularity
of the lower summary level. For example, for a county, it means 'All counties in the state' """
d = dict(self.__dict__.items())
d['sl'] = self.sl
d[self.level] = 0
cls = self.get_class(self.sl)
return cls(**d)
@classmethod
def nullval(cls):
"""Create a new instance where all of the values are 0"""
d = dict(cls.__dict__.items())
for k in d:
d[k] = 0
d['sl'] = cls.sl
d[cls.level] = 0
return cls(**d)
@property
def tuples(self):
"""Return tuples of field, value, in the order of the levels as they are defined """
return [(field, getattr(self, field, None)) for field in self.fields]
@property
def is_summary(self):
"""Return True if this geoid is an summary -- all of the fields are 0"""
return str(self) == str(self.summarize())
@property
def is_allval(self):
"""Return True if this geoid is an allval -- the last field is zero, but the first is not"""
tups = self.tuples
return tups[-1][1] == 0 and tups[0][1] != 0
@property
def level_plural(self):
"""Return the name of the level as a plural"""
return plurals.get(self.level, self.level + "s")
def generate_all(sumlevel, d):
"""Generate a dict that includes all of the available geoid values, with keys
for the most common names for those values. """
from geoid.civick import GVid
from geoid.tiger import TigerGeoid
from geoid.acs import AcsGeoid
sumlevel = int(sumlevel)
d = dict(d.items())
# Map common name variants
if 'cousub' in d:
d['cosub'] = d['cousub']
del d['cousub']
if 'blkgrp' in d:
d['blockgroup'] = d['blkgrp']
del d['blkgrp']
if 'zcta5' in d:
d['zcta'] = d['zcta5']
del d['zcta5']
gvid_class = GVid.resolve_summary_level(sumlevel)
if not gvid_class:
return {}
geoidt_class = TigerGeoid.resolve_summary_level(sumlevel)
geoid_class = AcsGeoid.resolve_summary_level(sumlevel)
try:
return dict(
gvid=str(gvid_class(**d)),
geoid=str(geoid_class(**d)),
geoidt=str(geoidt_class(**d))
)
except:
raise
def _generate_names():
""" Code to generate the state and county names
>>> python -c 'import geoid; geoid._generate_names()'
"""
from ambry import get_library
l = get_library()
counties = l.partition('census.gov-acs-geofile-2009-geofile50-20095-50')
states = l.partition('census.gov-acs-geofile-2009-geofile40-20095-40')
names = {}
for row in counties.remote_datafile.reader:
names[(row.state, row.county)] = row.name
for row in states.remote_datafile.reader:
if row.component == '00':
names[(row.state, 0)] = row.name
pprint.pprint(names)
|
Metatab/geoid
|
geoid/core.py
|
make_classes
|
python
|
def make_classes(base_class, module):
from functools import partial
for k in names:
cls = base_class.class_factory(k.capitalize())
cls.augment()
setattr(module, k.capitalize(), cls)
setattr(module, 'get_class', partial(get_class, module))
|
Create derived classes and put them into the same module as the base class.
This function is called at the end of each of the derived class modules, acs, census, civik and tiger.
It will create a set of new derived class in the module, one for each of the enries in the `summary_levels`
dict.
|
train
|
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L428-L446
|
[
"def class_factory(cls, name):\n def __init__(self, *args, **kwargs):\n cls.__init__(self, *args, **kwargs)\n\n return type(name, (cls,), {\"__init__\": __init__})\n",
"def class_factory(cls, name):\n def __init__(self, *args, **kwargs):\n cls.__init__(self, *args, **kwargs)\n\n return type(name, (cls,), {\"__init__\": __init__})\n",
"def class_factory(cls, name):\n\n def __init__(self, *args, **kwargs):\n cls.__init__(self, *args, **kwargs)\n\n return type(name, (cls,), {\"__init__\": __init__})\n",
"def class_factory(cls, name):\n\n def __init__(self, *args, **kwargs):\n cls.__init__(self, *args, **kwargs)\n\n return type(name, (cls,), {\"__init__\": __init__})\n"
] |
""" CLasses for working with census Geoids
"""
import inspect
import re
import sys
import six
names = { # (summary level value, base 10 chars, Base 62 chars, prefix fields)
'null': 1,
'us': 10,
'region': 20,
'division': 30,
'state': 40,
'county': 50,
'cosub': 60,
'place': 160,
'ua': 400,
'tract': 140,
'blockgroup': 150,
'block': 101,
'sdelm': 950,
'sdsec': 960,
'sduni': 970,
'zcta': 860,
'zip': 1200,
'sldl': 620,
'sldu': 610,
'cdcurr': 500,
# Other Levels that don't have proper names yet. For these .allval() and
# simplify() don't work properly.
'state_aianhh': 260,
'necta_nectadiv_state_county_cousub': 358,
'state_aianhh_place': 269,
'aianhh_state_county': 270,
'state_cbsa_metdiv': 323,
'state_aianhh280': 280,
'state_place_county': 155,
'aianhh_aitsce_state': 290,
'state_aianhh_aihhtli': 283,
'state_cdcurr_aianhh': 550,
'state_concit': 170,
'state_concit_place': 172,
'state_aianhh_aihhtli286': 286,
'cbsa': 310,
'cbsa_state': 311,
'cbsa_state_place': 312,
'cbsa_state_county': 313,
'cbsa_metdiv': 314,
'cbsa_metdiv_state': 315,
'state_cbsa': 320,
'state_cbsa_place': 321,
'state_cbsa_county': 322,
'state_county_cousub_submcd': 67,
'state_cbsa_metdiv_county': 324,
'state_county_cousub_place': 70,
'necta_state_county': 353,
'state_puma5': 795,
'csa': 330,
'csa_state': 331,
'csa_cbsa': 332,
'csa_cbsa_state': 333,
'cnecta': 335,
'state_county_cousub_place_tract': 80,
'cnecta_necta': 337,
'cnecta_necta_state': 338,
'state_csa': 340,
'state_csa_cbsa': 341,
'state_cnecta': 345,
'state_cnecta_necta': 346,
'necta': 350,
'necta_state': 351,
'necta_state_place': 352,
'cnecta_state': 336,
'necta_state_county_cousub': 354,
'necta_nectadiv': 355,
'necta_nectadiv_state': 356,
'state_anrc': 230,
'necta_nectadiv_state_county': 357,
'state_necta': 360,
'cbsa_metdiv_state_county': 316,
'state_necta_county': 362,
'state_necta_county_cousub': 363,
'state_necta_nectadiv': 364,
'state_necta_nectadiv_county': 365,
'state_necta_nectadiv_county_cousub': 366,
'ua_state': 410,
'ua_state_county': 430,
'state_sldu_county': 612,
'state_sldu': 610,
'state_sldl_county': 622,
'state_sldl': 620,
'state_cdcurr_county': 510,
'state_necta_place': 361,
'aianhh': 250,
'aianhh_aitsce': 251,
'aianhh_aihhtli': 252,
'state_sldl_county': 622,
'aianhh_aihhtli254': 254
}
lengths = {
'null': 1,
'aianhh': 4, # American Indian Area/Alaska Native Area/ Hawaiian Home Land (Census)
'aihhtli': '1', # American Indian Trust Land/ Hawaiian Home Land Indicator. A str b/c Census val is a str
'aitsce': 3, # American Indian Tribal Subdivision (Census)
'anrc': 5, # Alaska Native Regional Corporation (FIPS)
'blkgrp': 1, # Block Group
'blockgroup': 1, # Block Group
'block': 4, # Block
'cbsa': 5, # Metropolitan and Micropolitan Statistical Area
'cdcurr': 2, # Current Congressional District ***
'cnecta': 3, # New England City and Town Combined Statistical Area
'concit': 5, # Consolidated City
'county': 3, # County of current residence
'cousub': 5, # County Subdivision (FIPS)
'cosub': 5, # County Subdivision (FIPS)
'csa': 3, # Combined Statistical Area
'division': 1, # Census Division
'metdiv': 5, # Metropolitan Statistical Area- Metropolitan Division
'necta': 5, # New England City and Town Area
'nectadiv': 5, # New England City and Town Area Division
'place': 5, # Place (FIPS Code)
'puma5': 5, # Public Use Microdata Area 5% File
'region': 1, # Census Region
'sdelm': 5, # State-School District (Elementary)
'sdsec': 5, # State-School District (Secondary)
'sduni': 5, # State-School District (Unified)
'sldl': '3', # State Legislative District Lower. A String to signal that the census value is a string
'sldu': '3', # State Legislative District Upper. A String to signal that the census value is a string
'state': 2, # State (FIPS Code)
'submcd': 5, # Subminor Civil Division (FIPS)
'tract': 6, # Census Tract
'ua': 5, # Urban Area
'ur': 1, # Urban/Rural
'us': 0,
'zcta': 5,
# Nonstandard
'zip': 5,
}
segments = {
1: ['null'], # United States
10: ['us'], # United States
20: ['region'], # Region
30: ['division'], # Division
40: ['state'], # State
50: ['state', 'county'], # County
60: ['state', 'county', 'cousub'], # County Subdivision
67: ['state', 'county', 'cousub', 'submcd'], # State (Puerto Rico Only)-County-County Subdivision-Subbarrio
70: ['state', 'county', 'cousub', 'place'], # County Subdivision-Place/Remainder
80: ['state', 'county', 'cousub', 'place', 'tract'], # County Subdivision-Place/Remainder-Census Tract
101: ['state', 'county', 'tract', 'block'],
140: ['state', 'county', 'tract'], # Census Tract
150: ['state', 'county', 'tract', 'blockgroup'], # Census Tract-Block Group
155: ['state', 'place', 'county'], # Place-County
160: ['state', 'place'], # Place
170: ['state', 'concit'], # Consolidated City
172: ['state', 'concit', 'place'], # Consolidated City-Place Within Consolidated City
230: ['state', 'anrc'], # State-Alaska Native Regional Corporation
250: ['aianhh'], # American Indian Area/Alaska Native Area/Hawaiian Home Land
251: ['aianhh', 'aitsce'], # American Indian Area/Alaska NativeArea/HawaiianHomeLand-Tribal Subdivision/Remainder
252: ['aianhh', 'aihhtli'], # American Indian Area/Alaska Native Area (Reservation or Statistical Entity Only)4
254: ['aianhh', 'aihhtli'], # American Indian Area (Off-Reservation Trust Land Only)/Hawaiian Home Land
260: ['state', 'aianhh'], # American Indian Area/Alaska Native Area/Hawaiian Home Land-State
269: ['state', 'aianhh', 'place'], # American Indian Area/Alaska Native Area/Hawaiian Home Land-Place-Remainder
270: ['aianhh', 'state', 'county'], # American Indian Area/Alaska Native Area/Hawaiian Home Land-State-County
280: ['state', 'aianhh'], # State-American Indian Area/Alaska Native Area/Hawaiian Home Land
283: ['state', 'aianhh', 'aihhtli'],
# State-American Indian Area/Alaska Native Area (Reservation or Statistical Entity Only)
286: ['state', 'aianhh', 'aihhtli'],
# State-American Indian Area (Off-Reservation Trust Land Only)/Hawaiian Home Land
290: ['aianhh', 'aitsce', 'state'],
# American Indian Area/Alaska Native Area/Hawaiian Home Land-Tribal Subdivision/Remainder-State
310: ['cbsa'], # CBSA
311: ['cbsa', 'state'], # CBSA-State-County
312: ['cbsa', 'state', 'place'], # CBSA-State-Principal City
313: ['cbsa', 'state', 'county'], # CBSA-State-County
314: ['cbsa', 'metdiv'], # Metropolitan Statistical Area/Metropolitan Division
315: ['cbsa', 'metdiv', 'state'], # Metropolitan Statistical Area/Metropolitan Division-State
316: ['cbsa', 'metdiv', 'state', 'county'], # Metropolitan Statistical Area/Metropolitan Division-State-County
320: ['state', 'cbsa'], # State- CBSA
321: ['state', 'cbsa', 'place'], # State- CBSA -Principal City
322: ['state', 'cbsa', 'county'], # State- CBSA -County
323: ['state', 'cbsa', 'metdiv'], # State- Metropolitan Statistical Area/Metropolitan Division
324: ['state', 'cbsa', 'metdiv', 'county'], # State- Metropolitan Statistical Area/Metropolitan Division-County
330: ['csa'], # Combined Statistical Area
331: ['csa', 'state'], # Combined Statistical Area-State
332: ['csa', 'cbsa'], # Combined Statistical Area-CBSA
333: ['csa', 'cbsa', 'state'], # Combined Statistical Area-CBSA-State
335: ['cnecta'], # Combined New England City and Town Area
336: ['cnecta', 'state'], # Combined New England City and Town Area -State
337: ['cnecta', 'necta'], # Combined New England City and Town Area -New England City and Town Area
338: ['cnecta', 'necta', 'state'], # Combined New England City and Town Area -New England City and Town Area-State
340: ['state', 'csa'], # State-Combined Statistical Area
341: ['state', 'csa', 'cbsa'], # State-Combined Statistical Area-CBSA
345: ['state', 'cnecta'], # State-Combined New England City and Town Area
346: ['state', 'cnecta', 'necta'], # State-Combined New England City and Town Area-New England City and Town Area
350: ['necta'], # New England City and Town Area
351: ['necta', 'state'], # New England City and Town Area-State
352: ['necta', 'state', 'place'], # New England City and Town Area-State-Principal City
353: ['necta', 'state', 'county'], # New England City and Town Area-State-County
354: ['necta', 'state', 'county', 'cousub'], # New England City and Town Area-State-County-County Subdivision
355: ['necta', 'nectadiv'], # New England City and Town Area (NECTA)-NECTA Division
356: ['necta', 'nectadiv', 'state'], # New England City and Town Area (NECTA)-NECTA Division-State
357: ['necta', 'nectadiv', 'state', 'county'], # New England City and Town Area (NECTA)-NECTA Division-State-County
358: ['necta', 'nectadiv', 'state', 'county', 'cousub'],
# New England City and Town Area (NECTA)-NECTA Division-State-County-County Subdivision
360: ['state', 'necta'], # State-New England City and Town Area
361: ['state', 'necta', 'place'], # State-New England City and Town Area-Principal City
362: ['state', 'necta', 'county'], # State-New England City and Town Area-County
363: ['state', 'necta', 'county', 'cousub'], # State-New England City and Town Area-County-County Subdivision
364: ['state', 'necta', 'nectadiv'], # State-New England City and Town Area (NECTA)-NECTA Division
365: ['state', 'necta', 'nectadiv', 'county'], # State-New England City and Town Area (NECTA)-NECTA Division-County
366: ['state', 'necta', 'nectadiv', 'county', 'cousub'],
# State-New England City and Town Area (NECTA)-NECTA Division-County-County Subdivision
400: ['ua'], # Urban Area,
410: ['ua', 'state'], # Urban Area, State,
430: ['ua','state','county'], # Urban Area, State, County,
500: ['state', 'cdcurr'], # Congressional District
510: ['state', 'cdcurr', 'county'], #
550: ['state', 'cdcurr', 'aianhh'],
# Congressional District-American IndianArea/Alaska NativeArea/Hawaiian Home Land
610: ['state', 'sldu'], # State Senate District
612: ['state', 'sldu', 'county'], # State Senate District-County
620: ['state', 'sldl'], # State House District
622: ['state', 'sldl', 'county'], # State House District-County
795: ['state', 'puma5'], # State-Public Use MicroSample Area 5%
860: ['zcta'],
950: ['state', 'sdelm'], # State-Elementary School District
960: ['state', 'sdsec'], # State-High School District
970: ['state', 'sduni'], # State-Unified School District
# Nonstandard
1200: ['zip']
}
descriptions = {
1: 'United States',
10: 'United States',
20: 'Region',
30: 'Division',
40: 'State',
50: 'County',
60: 'County Subdivision',
67: 'State (Puerto Rico Only)-County-County Subdivision-Subbarrio',
70: 'County Subdivision-Place/Remainder',
80: 'County Subdivision-Place/Remainder-Census Tract',
101: 'block',
140: 'Census Tract',
150: 'Census Tract-Block Group',
155: 'Place-County',
160: 'Place',
170: 'Consolidated City',
172: 'Consolidated City-Place Within Consolidated City',
230: 'State-Alaska Native Regional Corporation',
250: 'American Indian Area/Alaska Native Area/Hawaiian Home Land',
251: 'American Indian Area/Alaska NativeArea/HawaiianHomeLand-Tribal Subdivision/Remainder',
252: 'American Indian Area/Alaska Native Area (Reservation or Statistical Entity Only)',
254: 'American Indian Area (Off-Reservation Trust Land Only)/Hawaiian Home Land',
260: 'American Indian Area/Alaska Native Area/Hawaiian Home Land-State',
269: 'American Indian Area/Alaska Native Area/Hawaiian Home Land-Place-Remainder',
270: 'American Indian Area/Alaska Native Area/Hawaiian Home Land-State-County',
280: 'State-American Indian Area/Alaska Native Area/Hawaiian Home Land',
283: 'aihhtli',
286: 'aihhtli',
290: 'state',
310: 'CBSA',
311: 'CBSA-State-County',
312: 'CBSA-State-Principal City',
313: 'CBSA-State-County',
314: 'Metropolitan Statistical Area/Metropolitan Division',
315: 'Metropolitan Statistical Area/Metropolitan Division-State',
316: 'Metropolitan Statistical Area/Metropolitan Division-State-County',
320: 'State- CBSA',
321: 'State- CBSA -Principal City',
322: 'State- CBSA -County',
323: 'State- Metropolitan Statistical Area/Metropolitan Division',
324: 'State- Metropolitan Statistical Area/Metropolitan Division-County',
330: 'Combined Statistical Area',
331: 'Combined Statistical Area-State',
332: 'Combined Statistical Area-CBSA',
333: 'Combined Statistical Area-CBSA-State',
335: 'Combined New England City and Town Area',
336: 'Combined New England City and Town Area -State',
337: 'Combined New England City and Town Area -New England City and Town Area',
338: 'Combined New England City and Town Area -New England City and Town Area-State',
340: 'State-Combined Statistical Area',
341: 'State-Combined Statistical Area-CBSA',
345: 'State-Combined New England City and Town Area',
346: 'State-Combined New England City and Town Area-New England City and Town Area',
350: 'New England City and Town Area',
351: 'New England City and Town Area-State',
352: 'New England City and Town Area-State-Principal City',
353: 'New England City and Town Area-State-County',
354: 'New England City and Town Area-State-County-County Subdivision',
355: 'New England City and Town Area (NECTA)-NECTA Division',
356: 'New England City and Town Area (NECTA)-NECTA Division-State',
357: 'New England City and Town Area (NECTA)-NECTA Division-State-County',
358: 'New England City and Town Area (NECTA)-NECTA Division-State-County-County Subdivision',
360: 'State-New England City and Town Area',
361: 'State-New England City and Town Area-Principal City',
362: 'State-New England City and Town Area-County',
363: 'State-New England City and Town Area-County-County Subdivision',
364: 'State-New England City and Town Area (NECTA)-NECTA Division',
365: 'State-New England City and Town Area (NECTA)-NECTA Division-County-County Subdivision',
400: 'Urban Area,',
410: 'Urban Area, State,',
430: 'Urban Area, State, County,',
500: 'Congressional District',
510: 'Congressional District, County',
550: 'Congressional District-American IndianArea/Alaska NativeArea/Hawaiian Home Land',
610: 'State Senate District',
612: 'State Senate District-County',
620: 'State House District',
622: 'State House District-County',
795: 'State-Public Use MicroSample Area 5%',
860: 'ZIP Code Tabulation Area',
950: 'State-Elementary School District',
960: 'State-High School District',
970: 'State-Unified School District',
}
plurals = {
'county': 'counties',
'place': 'places',
'Sdlu': 'State '
}
class NotASummaryName(Exception):
"""An argument was not one of the valid summary names"""
class ParseError(Exception):
"""Error parsing a geoid"""
def parse_to_gvid(v):
"""Parse an ACS Geoid or a GVID to a GVID"""
from geoid.civick import GVid
from geoid.acs import AcsGeoid
m1 = ''
try:
return GVid.parse(v)
except ValueError as e:
m1 = str(e)
try:
return AcsGeoid.parse(v).convert(GVid)
except ValueError as e:
raise ValueError("Failed to parse to either ACS or GVid: {}; {}".format(m1, str(e)))
def base62_encode(num):
"""Encode a number in Base X. WIth the built-in alphabet, its base 62
`num`: The number to encode
`alphabet`: The alphabet to use for encoding
Stolen from: http://stackoverflow.com/a/1119769/1144479
"""
num = int(num)
alphabet = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
if (num == 0):
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr)
def base62_decode(string):
"""Decode a Base X encoded string into the number
Arguments:
- `string`: The encoded string
- `alphabet`: The alphabet to use for encoding
Stolen from: http://stackoverflow.com/a/1119769/1144479
"""
alphabet = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
base = len(alphabet)
strlen = len(string)
num = 0
idx = 0
for char in string:
power = (strlen - (idx + 1))
num += alphabet.index(char) * (base ** power)
idx += 1
return int(num)
def augment(module_name, base_class):
"""Call the augment() method for all of the derived classes in the module """
for name, cls in inspect.getmembers(sys.modules[module_name],
lambda x : inspect.isclass(x) and issubclass(x, base_class) ):
if cls == base_class:
continue
cls.augment()
def get_class(module, sl):
for name, named_sl in names.items():
if named_sl == sl or sl == name:
return getattr(module, name.capitalize())
raise NotASummaryName("No class for summary_level {}".format(sl))
class CountyName(object):
"""A Census county name, with methods to create shorter versions and return the types of division,
which may be county, parish, borough, etc. """
# Strip the county and state name. THis doesn't work for some locations
# where the county is actually called a parish or a bario.
state_name_pattern = r', (.*)$'
state_name_re = re.compile(state_name_pattern)
def __init__(self, name):
self.name = name
def intuit_name(self, name):
"""Return a numeric value in the range [-1,1), indicating the likelyhood that the name is for a valuable of
of this type. -1 indicates a strong non-match, 1 indicates a strong match, and 0 indicates uncertainty. """
raise NotImplementedError
@property
def state(self):
try:
county, state = self.name.split(',')
return state
except ValueError:
# The search will fail for 'District of Columbia'
return ''
@property
def medium_name(self):
"""The census name without the state"""
return self.state_name_re.sub('', self.name)
type_names = (
'County', 'Municipio', 'Parish', 'Census Area', 'Borough',
'Municipality', 'city', 'City and Borough')
type_name_pattern = '|'.join('({})'.format(e) for e in type_names)
type_names_re = re.compile(type_name_pattern)
@property
def division_name(self):
"""The type designation for the county or county equivalent, such as 'County','Parish' or 'Borough'"""
try:
return next(e for e in self.type_names_re.search(self.name).groups() if e is not None)
except AttributeError:
# The search will fail for 'District of Columbia'
return ''
county_name_pattern = r'(.+) {}, (.+)'.format(type_name_pattern)
county_name_re = re.compile(county_name_pattern)
@property
def short_name(self):
try:
county, state = self.name.split(',')
except ValueError:
return self.name # 'District of Colombia'
return self.type_names_re.sub('', county)
def __str__(self):
return self.name
class Geoid(object):
@classmethod
def resolve_summary_level(cls, sl):
try:
return cls.sl_map[sl]
except KeyError:
return None
@classmethod
def make_format_string(cls, level):
sl_num = names[level]
segs = segments[sl_num]
formats = []
formats.append(cls.sl_format)
for seg in segs:
# Lengths dict may have strings to indicate string format usage.
if int(lengths[seg]) <= 0:
continue
if isinstance(lengths[seg], int):
fmt = cls.elem_format
else:
fmt = cls.elem_str_format
formats.append(fmt.format(seg, cls.part_width(lengths[seg])))
return ''.join(formats)
@classmethod
def make_regex(cls, level):
sl_num = names[level]
segs = segments[sl_num]
# Lengths dict may have strings to indicate string format usage.
regexes = [cls.sl_regex] + [cls.elem_regex.format(seg, cls.part_width(lengths[seg]))
for seg in segs if int(lengths[seg]) > 0]
re_str = '^' + ''.join(regexes) + '$'
return re_str
@classmethod
def augment(cls):
"""Augment the class with computed formats, regexes, and other things. This caches these values so
they don't have to be created for every instance. """
import re
level_name = cls.__name__.lower()
cls.sl = names[level_name]
cls.class_map[cls.__name__.lower()] = cls
cls.sl_map[cls.sl] = cls
cls.fmt = cls.make_format_string(cls.__name__.lower())
cls.regex_str = cls.make_regex(cls.__name__.lower())
cls.regex = re.compile(cls.regex_str)
# List of field names
cls.level = level_name
cls.fields = segments[cls.sl]
@classmethod
def get_class(cls, name_or_sl):
"""Return a derived class based on the class name or the summary_level"""
try:
return cls.sl_map[int(name_or_sl)]
except TypeError as e:
raise TypeError("Bad name or sl: {} : {}".format(name_or_sl, e))
except ValueError:
try:
return cls.class_map[name_or_sl.lower()]
except (KeyError, ValueError):
raise NotASummaryName("Value '{}' is not a valid summary level".format(name_or_sl))
def __init__(self, *args, **kwargs):
# This is a bit unusual, because it means, that , unlike normal
# python args, a kwarg can overwrite a position arg.
d = dict(zip(self.fields, args + ((0,) * 10))) # Add enough zeros to set all fields to zero
d.update(kwargs)
for k, v in d.items():
if k in self.fields:
try:
setattr(self, k, v)
except TypeError as e:
raise TypeError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
except ValueError as e:
raise ValueError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
def __str__(self):
d = self.__dict__
d['sl'] = self.sl
# Hacks for special string cases
if 'sldu' in d:
d['sldu'] = str(d['sldu']).zfill(3)
if 'sldl' in d:
d['sldl'] = str(d['sldl']).zfill(3)
try:
fn = six.get_method_function(self.encode)
kwargs = {k: fn(v) for k, v in d.items()}
return self.fmt.format(**kwargs)
except (ValueError, KeyError) as e:
raise ValueError("Bad value in {}, data {} for format {}: {}".format(type(self), d, self.fmt, e))
@property
def state_name(self):
from geoid.censusnames import geo_names
return geo_names[(self.state, 0)]
@property
def stusab(self):
from geoid.censusnames import stusab
try:
return stusab[int(self.state)]
except (AttributeError, ValueError):
# Assume this is a Us object, or some other national object
return 'US'
@property
def county_name(self):
from geoid.censusnames import geo_names
try:
try:
return CountyName(geo_names[(self.state, self.county)])
except KeyError:
try:
return CountyName("County #{}, {}".format(self.county,geo_names[(self.state, 0)]))
except KeyError:
return CountyName("County #{}, State#{}".format(self.county, self.state))
except Exception:
return CountyName('')
@property
def geo_name(self):
"""
Return a name of the state or county, or, for other lowever levels, the
name of the level type in the county.
:return:
"""
if self.level == 'county':
return str(self.county_name)
elif self.level == 'state':
return self.state_name
else:
if hasattr(self, 'county'):
return "{} in {}".format(self.level,str(self.county_name))
elif hasattr(self, 'state'):
return "{} in {}".format(self.level, self.state_name)
else:
return "a {}".format(self.level)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return str(self) != str(other)
def __lt__(self, other):
return str(self) < str(other)
def __le__(self, other):
return str(self) <= str(other)
def __gt__(self, other):
return str(self) > str(other)
def __ge__(self, other):
return str(self) >= str(other)
@classmethod
def parse(cls, gvid, exception=True):
"""
Parse a string value into the geoid of this class.
:param gvid: String value to parse.
:param exception: If true ( default) raise an eception on parse erorrs. If False, return a
'null' geoid.
:return:
"""
if gvid == 'invalid':
return cls.get_class('null')(0)
if not bool(gvid):
return None
if not isinstance(gvid, six.string_types):
raise TypeError("Can't parse; not a string. Got a '{}' ".format(type(gvid)))
try:
if not cls.sl:
# Civick and ACS include the SL, so can call from base type.
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
sl = fn(gvid[0:cls.sl_width])
else:
sl = cls.sl # Otherwise must use derived class.
except ValueError as e:
if exception:
raise ValueError("Failed to parse gvid '{}': {}".format(gvid, str(e)))
else:
return cls.get_class('null')(0)
try:
cls = cls.sl_map[sl]
except KeyError:
if exception:
raise ValueError("Failed to parse gvid '{}': Unknown summary level '{}' ".format(gvid, sl))
else:
return cls.get_class('null')(0)
m = cls.regex.match(gvid)
if not m:
raise ValueError("Failed to match '{}' to '{}' ".format(gvid, cls.regex_str))
d = m.groupdict()
if not d:
return None
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
d = {k: fn(v) for k, v in d.items()}
try:
del d['sl']
except KeyError:
pass
return cls(**d)
def convert(self, root_cls):
"""Convert to another derived class. cls is the base class for the derived type,
ie AcsGeoid, TigerGeoid, etc. """
d = self.__dict__
d['sl'] = self.sl
try:
cls = root_cls.get_class(root_cls.sl)
except (AttributeError, TypeError):
# Hopefully because root_cls is a module
cls = root_cls.get_class(self.sl)
return cls(**d)
def as_census(self):
from geoid.census import CensusGeoid
return self.convert(CensusGeoid)
def as_acs(self):
from geoid.acs import AcsGeoid
return self.convert(AcsGeoid)
def as_tiger(self):
from geoid.tiger import TigerGeoid
return self.convert(TigerGeoid)
def promote(self, level=None):
"""Convert to the next higher level summary level"""
if level is None:
if len(self.fields) < 2:
if self.level in ('region', 'division', 'state', 'ua'):
cls = self.get_class('us')
else:
return None
else:
cls = self.get_class(self.fields[-2])
else:
cls = self.get_class(level)
d = dict(self.__dict__.items())
d['sl'] = self.sl
return cls(**d)
def summarize(self):
"""Convert all of the values to their max values. This form is used to represent the summary level"""
raise NotImplementedError
def allval(self):
"""Convert the last value to zero. This form represents the entire higher summary level at the granularity
of the lower summary level. For example, for a county, it means 'All counties in the state' """
d = dict(self.__dict__.items())
d['sl'] = self.sl
d[self.level] = 0
cls = self.get_class(self.sl)
return cls(**d)
@classmethod
def nullval(cls):
"""Create a new instance where all of the values are 0"""
d = dict(cls.__dict__.items())
for k in d:
d[k] = 0
d['sl'] = cls.sl
d[cls.level] = 0
return cls(**d)
@property
def tuples(self):
"""Return tuples of field, value, in the order of the levels as they are defined """
return [(field, getattr(self, field, None)) for field in self.fields]
@property
def is_summary(self):
"""Return True if this geoid is an summary -- all of the fields are 0"""
return str(self) == str(self.summarize())
@property
def is_allval(self):
"""Return True if this geoid is an allval -- the last field is zero, but the first is not"""
tups = self.tuples
return tups[-1][1] == 0 and tups[0][1] != 0
@property
def level_plural(self):
"""Return the name of the level as a plural"""
return plurals.get(self.level, self.level + "s")
def generate_all(sumlevel, d):
"""Generate a dict that includes all of the available geoid values, with keys
for the most common names for those values. """
from geoid.civick import GVid
from geoid.tiger import TigerGeoid
from geoid.acs import AcsGeoid
sumlevel = int(sumlevel)
d = dict(d.items())
# Map common name variants
if 'cousub' in d:
d['cosub'] = d['cousub']
del d['cousub']
if 'blkgrp' in d:
d['blockgroup'] = d['blkgrp']
del d['blkgrp']
if 'zcta5' in d:
d['zcta'] = d['zcta5']
del d['zcta5']
gvid_class = GVid.resolve_summary_level(sumlevel)
if not gvid_class:
return {}
geoidt_class = TigerGeoid.resolve_summary_level(sumlevel)
geoid_class = AcsGeoid.resolve_summary_level(sumlevel)
try:
return dict(
gvid=str(gvid_class(**d)),
geoid=str(geoid_class(**d)),
geoidt=str(geoidt_class(**d))
)
except:
raise
def _generate_names():
""" Code to generate the state and county names
>>> python -c 'import geoid; geoid._generate_names()'
"""
from ambry import get_library
l = get_library()
counties = l.partition('census.gov-acs-geofile-2009-geofile50-20095-50')
states = l.partition('census.gov-acs-geofile-2009-geofile40-20095-40')
names = {}
for row in counties.remote_datafile.reader:
names[(row.state, row.county)] = row.name
for row in states.remote_datafile.reader:
if row.component == '00':
names[(row.state, 0)] = row.name
pprint.pprint(names)
|
Metatab/geoid
|
geoid/core.py
|
generate_all
|
python
|
def generate_all(sumlevel, d):
from geoid.civick import GVid
from geoid.tiger import TigerGeoid
from geoid.acs import AcsGeoid
sumlevel = int(sumlevel)
d = dict(d.items())
# Map common name variants
if 'cousub' in d:
d['cosub'] = d['cousub']
del d['cousub']
if 'blkgrp' in d:
d['blockgroup'] = d['blkgrp']
del d['blkgrp']
if 'zcta5' in d:
d['zcta'] = d['zcta5']
del d['zcta5']
gvid_class = GVid.resolve_summary_level(sumlevel)
if not gvid_class:
return {}
geoidt_class = TigerGeoid.resolve_summary_level(sumlevel)
geoid_class = AcsGeoid.resolve_summary_level(sumlevel)
try:
return dict(
gvid=str(gvid_class(**d)),
geoid=str(geoid_class(**d)),
geoidt=str(geoidt_class(**d))
)
except:
raise
|
Generate a dict that includes all of the available geoid values, with keys
for the most common names for those values.
|
train
|
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L887-L927
|
[
"def resolve_summary_level(cls, sl):\n try:\n return cls.sl_map[sl]\n except KeyError:\n return None\n"
] |
""" CLasses for working with census Geoids
"""
import inspect
import re
import sys
import six
names = { # (summary level value, base 10 chars, Base 62 chars, prefix fields)
'null': 1,
'us': 10,
'region': 20,
'division': 30,
'state': 40,
'county': 50,
'cosub': 60,
'place': 160,
'ua': 400,
'tract': 140,
'blockgroup': 150,
'block': 101,
'sdelm': 950,
'sdsec': 960,
'sduni': 970,
'zcta': 860,
'zip': 1200,
'sldl': 620,
'sldu': 610,
'cdcurr': 500,
# Other Levels that don't have proper names yet. For these .allval() and
# simplify() don't work properly.
'state_aianhh': 260,
'necta_nectadiv_state_county_cousub': 358,
'state_aianhh_place': 269,
'aianhh_state_county': 270,
'state_cbsa_metdiv': 323,
'state_aianhh280': 280,
'state_place_county': 155,
'aianhh_aitsce_state': 290,
'state_aianhh_aihhtli': 283,
'state_cdcurr_aianhh': 550,
'state_concit': 170,
'state_concit_place': 172,
'state_aianhh_aihhtli286': 286,
'cbsa': 310,
'cbsa_state': 311,
'cbsa_state_place': 312,
'cbsa_state_county': 313,
'cbsa_metdiv': 314,
'cbsa_metdiv_state': 315,
'state_cbsa': 320,
'state_cbsa_place': 321,
'state_cbsa_county': 322,
'state_county_cousub_submcd': 67,
'state_cbsa_metdiv_county': 324,
'state_county_cousub_place': 70,
'necta_state_county': 353,
'state_puma5': 795,
'csa': 330,
'csa_state': 331,
'csa_cbsa': 332,
'csa_cbsa_state': 333,
'cnecta': 335,
'state_county_cousub_place_tract': 80,
'cnecta_necta': 337,
'cnecta_necta_state': 338,
'state_csa': 340,
'state_csa_cbsa': 341,
'state_cnecta': 345,
'state_cnecta_necta': 346,
'necta': 350,
'necta_state': 351,
'necta_state_place': 352,
'cnecta_state': 336,
'necta_state_county_cousub': 354,
'necta_nectadiv': 355,
'necta_nectadiv_state': 356,
'state_anrc': 230,
'necta_nectadiv_state_county': 357,
'state_necta': 360,
'cbsa_metdiv_state_county': 316,
'state_necta_county': 362,
'state_necta_county_cousub': 363,
'state_necta_nectadiv': 364,
'state_necta_nectadiv_county': 365,
'state_necta_nectadiv_county_cousub': 366,
'ua_state': 410,
'ua_state_county': 430,
'state_sldu_county': 612,
'state_sldu': 610,
'state_sldl_county': 622,
'state_sldl': 620,
'state_cdcurr_county': 510,
'state_necta_place': 361,
'aianhh': 250,
'aianhh_aitsce': 251,
'aianhh_aihhtli': 252,
'state_sldl_county': 622,
'aianhh_aihhtli254': 254
}
lengths = {
'null': 1,
'aianhh': 4, # American Indian Area/Alaska Native Area/ Hawaiian Home Land (Census)
'aihhtli': '1', # American Indian Trust Land/ Hawaiian Home Land Indicator. A str b/c Census val is a str
'aitsce': 3, # American Indian Tribal Subdivision (Census)
'anrc': 5, # Alaska Native Regional Corporation (FIPS)
'blkgrp': 1, # Block Group
'blockgroup': 1, # Block Group
'block': 4, # Block
'cbsa': 5, # Metropolitan and Micropolitan Statistical Area
'cdcurr': 2, # Current Congressional District ***
'cnecta': 3, # New England City and Town Combined Statistical Area
'concit': 5, # Consolidated City
'county': 3, # County of current residence
'cousub': 5, # County Subdivision (FIPS)
'cosub': 5, # County Subdivision (FIPS)
'csa': 3, # Combined Statistical Area
'division': 1, # Census Division
'metdiv': 5, # Metropolitan Statistical Area- Metropolitan Division
'necta': 5, # New England City and Town Area
'nectadiv': 5, # New England City and Town Area Division
'place': 5, # Place (FIPS Code)
'puma5': 5, # Public Use Microdata Area 5% File
'region': 1, # Census Region
'sdelm': 5, # State-School District (Elementary)
'sdsec': 5, # State-School District (Secondary)
'sduni': 5, # State-School District (Unified)
'sldl': '3', # State Legislative District Lower. A String to signal that the census value is a string
'sldu': '3', # State Legislative District Upper. A String to signal that the census value is a string
'state': 2, # State (FIPS Code)
'submcd': 5, # Subminor Civil Division (FIPS)
'tract': 6, # Census Tract
'ua': 5, # Urban Area
'ur': 1, # Urban/Rural
'us': 0,
'zcta': 5,
# Nonstandard
'zip': 5,
}
segments = {
1: ['null'], # United States
10: ['us'], # United States
20: ['region'], # Region
30: ['division'], # Division
40: ['state'], # State
50: ['state', 'county'], # County
60: ['state', 'county', 'cousub'], # County Subdivision
67: ['state', 'county', 'cousub', 'submcd'], # State (Puerto Rico Only)-County-County Subdivision-Subbarrio
70: ['state', 'county', 'cousub', 'place'], # County Subdivision-Place/Remainder
80: ['state', 'county', 'cousub', 'place', 'tract'], # County Subdivision-Place/Remainder-Census Tract
101: ['state', 'county', 'tract', 'block'],
140: ['state', 'county', 'tract'], # Census Tract
150: ['state', 'county', 'tract', 'blockgroup'], # Census Tract-Block Group
155: ['state', 'place', 'county'], # Place-County
160: ['state', 'place'], # Place
170: ['state', 'concit'], # Consolidated City
172: ['state', 'concit', 'place'], # Consolidated City-Place Within Consolidated City
230: ['state', 'anrc'], # State-Alaska Native Regional Corporation
250: ['aianhh'], # American Indian Area/Alaska Native Area/Hawaiian Home Land
251: ['aianhh', 'aitsce'], # American Indian Area/Alaska NativeArea/HawaiianHomeLand-Tribal Subdivision/Remainder
252: ['aianhh', 'aihhtli'], # American Indian Area/Alaska Native Area (Reservation or Statistical Entity Only)4
254: ['aianhh', 'aihhtli'], # American Indian Area (Off-Reservation Trust Land Only)/Hawaiian Home Land
260: ['state', 'aianhh'], # American Indian Area/Alaska Native Area/Hawaiian Home Land-State
269: ['state', 'aianhh', 'place'], # American Indian Area/Alaska Native Area/Hawaiian Home Land-Place-Remainder
270: ['aianhh', 'state', 'county'], # American Indian Area/Alaska Native Area/Hawaiian Home Land-State-County
280: ['state', 'aianhh'], # State-American Indian Area/Alaska Native Area/Hawaiian Home Land
283: ['state', 'aianhh', 'aihhtli'],
# State-American Indian Area/Alaska Native Area (Reservation or Statistical Entity Only)
286: ['state', 'aianhh', 'aihhtli'],
# State-American Indian Area (Off-Reservation Trust Land Only)/Hawaiian Home Land
290: ['aianhh', 'aitsce', 'state'],
# American Indian Area/Alaska Native Area/Hawaiian Home Land-Tribal Subdivision/Remainder-State
310: ['cbsa'], # CBSA
311: ['cbsa', 'state'], # CBSA-State-County
312: ['cbsa', 'state', 'place'], # CBSA-State-Principal City
313: ['cbsa', 'state', 'county'], # CBSA-State-County
314: ['cbsa', 'metdiv'], # Metropolitan Statistical Area/Metropolitan Division
315: ['cbsa', 'metdiv', 'state'], # Metropolitan Statistical Area/Metropolitan Division-State
316: ['cbsa', 'metdiv', 'state', 'county'], # Metropolitan Statistical Area/Metropolitan Division-State-County
320: ['state', 'cbsa'], # State- CBSA
321: ['state', 'cbsa', 'place'], # State- CBSA -Principal City
322: ['state', 'cbsa', 'county'], # State- CBSA -County
323: ['state', 'cbsa', 'metdiv'], # State- Metropolitan Statistical Area/Metropolitan Division
324: ['state', 'cbsa', 'metdiv', 'county'], # State- Metropolitan Statistical Area/Metropolitan Division-County
330: ['csa'], # Combined Statistical Area
331: ['csa', 'state'], # Combined Statistical Area-State
332: ['csa', 'cbsa'], # Combined Statistical Area-CBSA
333: ['csa', 'cbsa', 'state'], # Combined Statistical Area-CBSA-State
335: ['cnecta'], # Combined New England City and Town Area
336: ['cnecta', 'state'], # Combined New England City and Town Area -State
337: ['cnecta', 'necta'], # Combined New England City and Town Area -New England City and Town Area
338: ['cnecta', 'necta', 'state'], # Combined New England City and Town Area -New England City and Town Area-State
340: ['state', 'csa'], # State-Combined Statistical Area
341: ['state', 'csa', 'cbsa'], # State-Combined Statistical Area-CBSA
345: ['state', 'cnecta'], # State-Combined New England City and Town Area
346: ['state', 'cnecta', 'necta'], # State-Combined New England City and Town Area-New England City and Town Area
350: ['necta'], # New England City and Town Area
351: ['necta', 'state'], # New England City and Town Area-State
352: ['necta', 'state', 'place'], # New England City and Town Area-State-Principal City
353: ['necta', 'state', 'county'], # New England City and Town Area-State-County
354: ['necta', 'state', 'county', 'cousub'], # New England City and Town Area-State-County-County Subdivision
355: ['necta', 'nectadiv'], # New England City and Town Area (NECTA)-NECTA Division
356: ['necta', 'nectadiv', 'state'], # New England City and Town Area (NECTA)-NECTA Division-State
357: ['necta', 'nectadiv', 'state', 'county'], # New England City and Town Area (NECTA)-NECTA Division-State-County
358: ['necta', 'nectadiv', 'state', 'county', 'cousub'],
# New England City and Town Area (NECTA)-NECTA Division-State-County-County Subdivision
360: ['state', 'necta'], # State-New England City and Town Area
361: ['state', 'necta', 'place'], # State-New England City and Town Area-Principal City
362: ['state', 'necta', 'county'], # State-New England City and Town Area-County
363: ['state', 'necta', 'county', 'cousub'], # State-New England City and Town Area-County-County Subdivision
364: ['state', 'necta', 'nectadiv'], # State-New England City and Town Area (NECTA)-NECTA Division
365: ['state', 'necta', 'nectadiv', 'county'], # State-New England City and Town Area (NECTA)-NECTA Division-County
366: ['state', 'necta', 'nectadiv', 'county', 'cousub'],
# State-New England City and Town Area (NECTA)-NECTA Division-County-County Subdivision
400: ['ua'], # Urban Area,
410: ['ua', 'state'], # Urban Area, State,
430: ['ua','state','county'], # Urban Area, State, County,
500: ['state', 'cdcurr'], # Congressional District
510: ['state', 'cdcurr', 'county'], #
550: ['state', 'cdcurr', 'aianhh'],
# Congressional District-American IndianArea/Alaska NativeArea/Hawaiian Home Land
610: ['state', 'sldu'], # State Senate District
612: ['state', 'sldu', 'county'], # State Senate District-County
620: ['state', 'sldl'], # State House District
622: ['state', 'sldl', 'county'], # State House District-County
795: ['state', 'puma5'], # State-Public Use MicroSample Area 5%
860: ['zcta'],
950: ['state', 'sdelm'], # State-Elementary School District
960: ['state', 'sdsec'], # State-High School District
970: ['state', 'sduni'], # State-Unified School District
# Nonstandard
1200: ['zip']
}
descriptions = {
1: 'United States',
10: 'United States',
20: 'Region',
30: 'Division',
40: 'State',
50: 'County',
60: 'County Subdivision',
67: 'State (Puerto Rico Only)-County-County Subdivision-Subbarrio',
70: 'County Subdivision-Place/Remainder',
80: 'County Subdivision-Place/Remainder-Census Tract',
101: 'block',
140: 'Census Tract',
150: 'Census Tract-Block Group',
155: 'Place-County',
160: 'Place',
170: 'Consolidated City',
172: 'Consolidated City-Place Within Consolidated City',
230: 'State-Alaska Native Regional Corporation',
250: 'American Indian Area/Alaska Native Area/Hawaiian Home Land',
251: 'American Indian Area/Alaska NativeArea/HawaiianHomeLand-Tribal Subdivision/Remainder',
252: 'American Indian Area/Alaska Native Area (Reservation or Statistical Entity Only)',
254: 'American Indian Area (Off-Reservation Trust Land Only)/Hawaiian Home Land',
260: 'American Indian Area/Alaska Native Area/Hawaiian Home Land-State',
269: 'American Indian Area/Alaska Native Area/Hawaiian Home Land-Place-Remainder',
270: 'American Indian Area/Alaska Native Area/Hawaiian Home Land-State-County',
280: 'State-American Indian Area/Alaska Native Area/Hawaiian Home Land',
283: 'aihhtli',
286: 'aihhtli',
290: 'state',
310: 'CBSA',
311: 'CBSA-State-County',
312: 'CBSA-State-Principal City',
313: 'CBSA-State-County',
314: 'Metropolitan Statistical Area/Metropolitan Division',
315: 'Metropolitan Statistical Area/Metropolitan Division-State',
316: 'Metropolitan Statistical Area/Metropolitan Division-State-County',
320: 'State- CBSA',
321: 'State- CBSA -Principal City',
322: 'State- CBSA -County',
323: 'State- Metropolitan Statistical Area/Metropolitan Division',
324: 'State- Metropolitan Statistical Area/Metropolitan Division-County',
330: 'Combined Statistical Area',
331: 'Combined Statistical Area-State',
332: 'Combined Statistical Area-CBSA',
333: 'Combined Statistical Area-CBSA-State',
335: 'Combined New England City and Town Area',
336: 'Combined New England City and Town Area -State',
337: 'Combined New England City and Town Area -New England City and Town Area',
338: 'Combined New England City and Town Area -New England City and Town Area-State',
340: 'State-Combined Statistical Area',
341: 'State-Combined Statistical Area-CBSA',
345: 'State-Combined New England City and Town Area',
346: 'State-Combined New England City and Town Area-New England City and Town Area',
350: 'New England City and Town Area',
351: 'New England City and Town Area-State',
352: 'New England City and Town Area-State-Principal City',
353: 'New England City and Town Area-State-County',
354: 'New England City and Town Area-State-County-County Subdivision',
355: 'New England City and Town Area (NECTA)-NECTA Division',
356: 'New England City and Town Area (NECTA)-NECTA Division-State',
357: 'New England City and Town Area (NECTA)-NECTA Division-State-County',
358: 'New England City and Town Area (NECTA)-NECTA Division-State-County-County Subdivision',
360: 'State-New England City and Town Area',
361: 'State-New England City and Town Area-Principal City',
362: 'State-New England City and Town Area-County',
363: 'State-New England City and Town Area-County-County Subdivision',
364: 'State-New England City and Town Area (NECTA)-NECTA Division',
365: 'State-New England City and Town Area (NECTA)-NECTA Division-County-County Subdivision',
400: 'Urban Area,',
410: 'Urban Area, State,',
430: 'Urban Area, State, County,',
500: 'Congressional District',
510: 'Congressional District, County',
550: 'Congressional District-American IndianArea/Alaska NativeArea/Hawaiian Home Land',
610: 'State Senate District',
612: 'State Senate District-County',
620: 'State House District',
622: 'State House District-County',
795: 'State-Public Use MicroSample Area 5%',
860: 'ZIP Code Tabulation Area',
950: 'State-Elementary School District',
960: 'State-High School District',
970: 'State-Unified School District',
}
plurals = {
'county': 'counties',
'place': 'places',
'Sdlu': 'State '
}
class NotASummaryName(Exception):
"""An argument was not one of the valid summary names"""
class ParseError(Exception):
"""Error parsing a geoid"""
def parse_to_gvid(v):
"""Parse an ACS Geoid or a GVID to a GVID"""
from geoid.civick import GVid
from geoid.acs import AcsGeoid
m1 = ''
try:
return GVid.parse(v)
except ValueError as e:
m1 = str(e)
try:
return AcsGeoid.parse(v).convert(GVid)
except ValueError as e:
raise ValueError("Failed to parse to either ACS or GVid: {}; {}".format(m1, str(e)))
def base62_encode(num):
"""Encode a number in Base X. WIth the built-in alphabet, its base 62
`num`: The number to encode
`alphabet`: The alphabet to use for encoding
Stolen from: http://stackoverflow.com/a/1119769/1144479
"""
num = int(num)
alphabet = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
if (num == 0):
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr)
def base62_decode(string):
"""Decode a Base X encoded string into the number
Arguments:
- `string`: The encoded string
- `alphabet`: The alphabet to use for encoding
Stolen from: http://stackoverflow.com/a/1119769/1144479
"""
alphabet = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
base = len(alphabet)
strlen = len(string)
num = 0
idx = 0
for char in string:
power = (strlen - (idx + 1))
num += alphabet.index(char) * (base ** power)
idx += 1
return int(num)
def augment(module_name, base_class):
"""Call the augment() method for all of the derived classes in the module """
for name, cls in inspect.getmembers(sys.modules[module_name],
lambda x : inspect.isclass(x) and issubclass(x, base_class) ):
if cls == base_class:
continue
cls.augment()
def get_class(module, sl):
for name, named_sl in names.items():
if named_sl == sl or sl == name:
return getattr(module, name.capitalize())
raise NotASummaryName("No class for summary_level {}".format(sl))
def make_classes(base_class, module):
"""Create derived classes and put them into the same module as the base class.
This function is called at the end of each of the derived class modules, acs, census, civik and tiger.
It will create a set of new derived class in the module, one for each of the enries in the `summary_levels`
dict.
"""
from functools import partial
for k in names:
cls = base_class.class_factory(k.capitalize())
cls.augment()
setattr(module, k.capitalize(), cls)
setattr(module, 'get_class', partial(get_class, module))
class CountyName(object):
"""A Census county name, with methods to create shorter versions and return the types of division,
which may be county, parish, borough, etc. """
# Strip the county and state name. THis doesn't work for some locations
# where the county is actually called a parish or a bario.
state_name_pattern = r', (.*)$'
state_name_re = re.compile(state_name_pattern)
def __init__(self, name):
self.name = name
def intuit_name(self, name):
"""Return a numeric value in the range [-1,1), indicating the likelyhood that the name is for a valuable of
of this type. -1 indicates a strong non-match, 1 indicates a strong match, and 0 indicates uncertainty. """
raise NotImplementedError
@property
def state(self):
try:
county, state = self.name.split(',')
return state
except ValueError:
# The search will fail for 'District of Columbia'
return ''
@property
def medium_name(self):
"""The census name without the state"""
return self.state_name_re.sub('', self.name)
type_names = (
'County', 'Municipio', 'Parish', 'Census Area', 'Borough',
'Municipality', 'city', 'City and Borough')
type_name_pattern = '|'.join('({})'.format(e) for e in type_names)
type_names_re = re.compile(type_name_pattern)
@property
def division_name(self):
"""The type designation for the county or county equivalent, such as 'County','Parish' or 'Borough'"""
try:
return next(e for e in self.type_names_re.search(self.name).groups() if e is not None)
except AttributeError:
# The search will fail for 'District of Columbia'
return ''
county_name_pattern = r'(.+) {}, (.+)'.format(type_name_pattern)
county_name_re = re.compile(county_name_pattern)
@property
def short_name(self):
try:
county, state = self.name.split(',')
except ValueError:
return self.name # 'District of Colombia'
return self.type_names_re.sub('', county)
def __str__(self):
return self.name
class Geoid(object):
@classmethod
def resolve_summary_level(cls, sl):
try:
return cls.sl_map[sl]
except KeyError:
return None
@classmethod
def make_format_string(cls, level):
sl_num = names[level]
segs = segments[sl_num]
formats = []
formats.append(cls.sl_format)
for seg in segs:
# Lengths dict may have strings to indicate string format usage.
if int(lengths[seg]) <= 0:
continue
if isinstance(lengths[seg], int):
fmt = cls.elem_format
else:
fmt = cls.elem_str_format
formats.append(fmt.format(seg, cls.part_width(lengths[seg])))
return ''.join(formats)
@classmethod
def make_regex(cls, level):
sl_num = names[level]
segs = segments[sl_num]
# Lengths dict may have strings to indicate string format usage.
regexes = [cls.sl_regex] + [cls.elem_regex.format(seg, cls.part_width(lengths[seg]))
for seg in segs if int(lengths[seg]) > 0]
re_str = '^' + ''.join(regexes) + '$'
return re_str
@classmethod
def augment(cls):
"""Augment the class with computed formats, regexes, and other things. This caches these values so
they don't have to be created for every instance. """
import re
level_name = cls.__name__.lower()
cls.sl = names[level_name]
cls.class_map[cls.__name__.lower()] = cls
cls.sl_map[cls.sl] = cls
cls.fmt = cls.make_format_string(cls.__name__.lower())
cls.regex_str = cls.make_regex(cls.__name__.lower())
cls.regex = re.compile(cls.regex_str)
# List of field names
cls.level = level_name
cls.fields = segments[cls.sl]
@classmethod
def get_class(cls, name_or_sl):
"""Return a derived class based on the class name or the summary_level"""
try:
return cls.sl_map[int(name_or_sl)]
except TypeError as e:
raise TypeError("Bad name or sl: {} : {}".format(name_or_sl, e))
except ValueError:
try:
return cls.class_map[name_or_sl.lower()]
except (KeyError, ValueError):
raise NotASummaryName("Value '{}' is not a valid summary level".format(name_or_sl))
def __init__(self, *args, **kwargs):
# This is a bit unusual, because it means, that , unlike normal
# python args, a kwarg can overwrite a position arg.
d = dict(zip(self.fields, args + ((0,) * 10))) # Add enough zeros to set all fields to zero
d.update(kwargs)
for k, v in d.items():
if k in self.fields:
try:
setattr(self, k, v)
except TypeError as e:
raise TypeError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
except ValueError as e:
raise ValueError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
def __str__(self):
d = self.__dict__
d['sl'] = self.sl
# Hacks for special string cases
if 'sldu' in d:
d['sldu'] = str(d['sldu']).zfill(3)
if 'sldl' in d:
d['sldl'] = str(d['sldl']).zfill(3)
try:
fn = six.get_method_function(self.encode)
kwargs = {k: fn(v) for k, v in d.items()}
return self.fmt.format(**kwargs)
except (ValueError, KeyError) as e:
raise ValueError("Bad value in {}, data {} for format {}: {}".format(type(self), d, self.fmt, e))
@property
def state_name(self):
from geoid.censusnames import geo_names
return geo_names[(self.state, 0)]
@property
def stusab(self):
from geoid.censusnames import stusab
try:
return stusab[int(self.state)]
except (AttributeError, ValueError):
# Assume this is a Us object, or some other national object
return 'US'
@property
def county_name(self):
from geoid.censusnames import geo_names
try:
try:
return CountyName(geo_names[(self.state, self.county)])
except KeyError:
try:
return CountyName("County #{}, {}".format(self.county,geo_names[(self.state, 0)]))
except KeyError:
return CountyName("County #{}, State#{}".format(self.county, self.state))
except Exception:
return CountyName('')
@property
def geo_name(self):
"""
Return a name of the state or county, or, for other lowever levels, the
name of the level type in the county.
:return:
"""
if self.level == 'county':
return str(self.county_name)
elif self.level == 'state':
return self.state_name
else:
if hasattr(self, 'county'):
return "{} in {}".format(self.level,str(self.county_name))
elif hasattr(self, 'state'):
return "{} in {}".format(self.level, self.state_name)
else:
return "a {}".format(self.level)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return str(self) != str(other)
def __lt__(self, other):
return str(self) < str(other)
def __le__(self, other):
return str(self) <= str(other)
def __gt__(self, other):
return str(self) > str(other)
def __ge__(self, other):
return str(self) >= str(other)
@classmethod
def parse(cls, gvid, exception=True):
"""
Parse a string value into the geoid of this class.
:param gvid: String value to parse.
:param exception: If true ( default) raise an eception on parse erorrs. If False, return a
'null' geoid.
:return:
"""
if gvid == 'invalid':
return cls.get_class('null')(0)
if not bool(gvid):
return None
if not isinstance(gvid, six.string_types):
raise TypeError("Can't parse; not a string. Got a '{}' ".format(type(gvid)))
try:
if not cls.sl:
# Civick and ACS include the SL, so can call from base type.
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
sl = fn(gvid[0:cls.sl_width])
else:
sl = cls.sl # Otherwise must use derived class.
except ValueError as e:
if exception:
raise ValueError("Failed to parse gvid '{}': {}".format(gvid, str(e)))
else:
return cls.get_class('null')(0)
try:
cls = cls.sl_map[sl]
except KeyError:
if exception:
raise ValueError("Failed to parse gvid '{}': Unknown summary level '{}' ".format(gvid, sl))
else:
return cls.get_class('null')(0)
m = cls.regex.match(gvid)
if not m:
raise ValueError("Failed to match '{}' to '{}' ".format(gvid, cls.regex_str))
d = m.groupdict()
if not d:
return None
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
d = {k: fn(v) for k, v in d.items()}
try:
del d['sl']
except KeyError:
pass
return cls(**d)
def convert(self, root_cls):
"""Convert to another derived class. cls is the base class for the derived type,
ie AcsGeoid, TigerGeoid, etc. """
d = self.__dict__
d['sl'] = self.sl
try:
cls = root_cls.get_class(root_cls.sl)
except (AttributeError, TypeError):
# Hopefully because root_cls is a module
cls = root_cls.get_class(self.sl)
return cls(**d)
def as_census(self):
from geoid.census import CensusGeoid
return self.convert(CensusGeoid)
def as_acs(self):
from geoid.acs import AcsGeoid
return self.convert(AcsGeoid)
def as_tiger(self):
from geoid.tiger import TigerGeoid
return self.convert(TigerGeoid)
def promote(self, level=None):
"""Convert to the next higher level summary level"""
if level is None:
if len(self.fields) < 2:
if self.level in ('region', 'division', 'state', 'ua'):
cls = self.get_class('us')
else:
return None
else:
cls = self.get_class(self.fields[-2])
else:
cls = self.get_class(level)
d = dict(self.__dict__.items())
d['sl'] = self.sl
return cls(**d)
def summarize(self):
"""Convert all of the values to their max values. This form is used to represent the summary level"""
raise NotImplementedError
def allval(self):
"""Convert the last value to zero. This form represents the entire higher summary level at the granularity
of the lower summary level. For example, for a county, it means 'All counties in the state' """
d = dict(self.__dict__.items())
d['sl'] = self.sl
d[self.level] = 0
cls = self.get_class(self.sl)
return cls(**d)
@classmethod
def nullval(cls):
"""Create a new instance where all of the values are 0"""
d = dict(cls.__dict__.items())
for k in d:
d[k] = 0
d['sl'] = cls.sl
d[cls.level] = 0
return cls(**d)
@property
def tuples(self):
"""Return tuples of field, value, in the order of the levels as they are defined """
return [(field, getattr(self, field, None)) for field in self.fields]
@property
def is_summary(self):
"""Return True if this geoid is an summary -- all of the fields are 0"""
return str(self) == str(self.summarize())
@property
def is_allval(self):
"""Return True if this geoid is an allval -- the last field is zero, but the first is not"""
tups = self.tuples
return tups[-1][1] == 0 and tups[0][1] != 0
@property
def level_plural(self):
"""Return the name of the level as a plural"""
return plurals.get(self.level, self.level + "s")
def _generate_names():
""" Code to generate the state and county names
>>> python -c 'import geoid; geoid._generate_names()'
"""
from ambry import get_library
l = get_library()
counties = l.partition('census.gov-acs-geofile-2009-geofile50-20095-50')
states = l.partition('census.gov-acs-geofile-2009-geofile40-20095-40')
names = {}
for row in counties.remote_datafile.reader:
names[(row.state, row.county)] = row.name
for row in states.remote_datafile.reader:
if row.component == '00':
names[(row.state, 0)] = row.name
pprint.pprint(names)
|
Metatab/geoid
|
geoid/core.py
|
_generate_names
|
python
|
def _generate_names():
from ambry import get_library
l = get_library()
counties = l.partition('census.gov-acs-geofile-2009-geofile50-20095-50')
states = l.partition('census.gov-acs-geofile-2009-geofile40-20095-40')
names = {}
for row in counties.remote_datafile.reader:
names[(row.state, row.county)] = row.name
for row in states.remote_datafile.reader:
if row.component == '00':
names[(row.state, 0)] = row.name
pprint.pprint(names)
|
Code to generate the state and county names
>>> python -c 'import geoid; geoid._generate_names()'
|
train
|
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L930-L952
| null |
""" CLasses for working with census Geoids
"""
import inspect
import re
import sys
import six
names = { # (summary level value, base 10 chars, Base 62 chars, prefix fields)
'null': 1,
'us': 10,
'region': 20,
'division': 30,
'state': 40,
'county': 50,
'cosub': 60,
'place': 160,
'ua': 400,
'tract': 140,
'blockgroup': 150,
'block': 101,
'sdelm': 950,
'sdsec': 960,
'sduni': 970,
'zcta': 860,
'zip': 1200,
'sldl': 620,
'sldu': 610,
'cdcurr': 500,
# Other Levels that don't have proper names yet. For these .allval() and
# simplify() don't work properly.
'state_aianhh': 260,
'necta_nectadiv_state_county_cousub': 358,
'state_aianhh_place': 269,
'aianhh_state_county': 270,
'state_cbsa_metdiv': 323,
'state_aianhh280': 280,
'state_place_county': 155,
'aianhh_aitsce_state': 290,
'state_aianhh_aihhtli': 283,
'state_cdcurr_aianhh': 550,
'state_concit': 170,
'state_concit_place': 172,
'state_aianhh_aihhtli286': 286,
'cbsa': 310,
'cbsa_state': 311,
'cbsa_state_place': 312,
'cbsa_state_county': 313,
'cbsa_metdiv': 314,
'cbsa_metdiv_state': 315,
'state_cbsa': 320,
'state_cbsa_place': 321,
'state_cbsa_county': 322,
'state_county_cousub_submcd': 67,
'state_cbsa_metdiv_county': 324,
'state_county_cousub_place': 70,
'necta_state_county': 353,
'state_puma5': 795,
'csa': 330,
'csa_state': 331,
'csa_cbsa': 332,
'csa_cbsa_state': 333,
'cnecta': 335,
'state_county_cousub_place_tract': 80,
'cnecta_necta': 337,
'cnecta_necta_state': 338,
'state_csa': 340,
'state_csa_cbsa': 341,
'state_cnecta': 345,
'state_cnecta_necta': 346,
'necta': 350,
'necta_state': 351,
'necta_state_place': 352,
'cnecta_state': 336,
'necta_state_county_cousub': 354,
'necta_nectadiv': 355,
'necta_nectadiv_state': 356,
'state_anrc': 230,
'necta_nectadiv_state_county': 357,
'state_necta': 360,
'cbsa_metdiv_state_county': 316,
'state_necta_county': 362,
'state_necta_county_cousub': 363,
'state_necta_nectadiv': 364,
'state_necta_nectadiv_county': 365,
'state_necta_nectadiv_county_cousub': 366,
'ua_state': 410,
'ua_state_county': 430,
'state_sldu_county': 612,
'state_sldu': 610,
'state_sldl_county': 622,
'state_sldl': 620,
'state_cdcurr_county': 510,
'state_necta_place': 361,
'aianhh': 250,
'aianhh_aitsce': 251,
'aianhh_aihhtli': 252,
'state_sldl_county': 622,
'aianhh_aihhtli254': 254
}
lengths = {
'null': 1,
'aianhh': 4, # American Indian Area/Alaska Native Area/ Hawaiian Home Land (Census)
'aihhtli': '1', # American Indian Trust Land/ Hawaiian Home Land Indicator. A str b/c Census val is a str
'aitsce': 3, # American Indian Tribal Subdivision (Census)
'anrc': 5, # Alaska Native Regional Corporation (FIPS)
'blkgrp': 1, # Block Group
'blockgroup': 1, # Block Group
'block': 4, # Block
'cbsa': 5, # Metropolitan and Micropolitan Statistical Area
'cdcurr': 2, # Current Congressional District ***
'cnecta': 3, # New England City and Town Combined Statistical Area
'concit': 5, # Consolidated City
'county': 3, # County of current residence
'cousub': 5, # County Subdivision (FIPS)
'cosub': 5, # County Subdivision (FIPS)
'csa': 3, # Combined Statistical Area
'division': 1, # Census Division
'metdiv': 5, # Metropolitan Statistical Area- Metropolitan Division
'necta': 5, # New England City and Town Area
'nectadiv': 5, # New England City and Town Area Division
'place': 5, # Place (FIPS Code)
'puma5': 5, # Public Use Microdata Area 5% File
'region': 1, # Census Region
'sdelm': 5, # State-School District (Elementary)
'sdsec': 5, # State-School District (Secondary)
'sduni': 5, # State-School District (Unified)
'sldl': '3', # State Legislative District Lower. A String to signal that the census value is a string
'sldu': '3', # State Legislative District Upper. A String to signal that the census value is a string
'state': 2, # State (FIPS Code)
'submcd': 5, # Subminor Civil Division (FIPS)
'tract': 6, # Census Tract
'ua': 5, # Urban Area
'ur': 1, # Urban/Rural
'us': 0,
'zcta': 5,
# Nonstandard
'zip': 5,
}
segments = {
1: ['null'], # United States
10: ['us'], # United States
20: ['region'], # Region
30: ['division'], # Division
40: ['state'], # State
50: ['state', 'county'], # County
60: ['state', 'county', 'cousub'], # County Subdivision
67: ['state', 'county', 'cousub', 'submcd'], # State (Puerto Rico Only)-County-County Subdivision-Subbarrio
70: ['state', 'county', 'cousub', 'place'], # County Subdivision-Place/Remainder
80: ['state', 'county', 'cousub', 'place', 'tract'], # County Subdivision-Place/Remainder-Census Tract
101: ['state', 'county', 'tract', 'block'],
140: ['state', 'county', 'tract'], # Census Tract
150: ['state', 'county', 'tract', 'blockgroup'], # Census Tract-Block Group
155: ['state', 'place', 'county'], # Place-County
160: ['state', 'place'], # Place
170: ['state', 'concit'], # Consolidated City
172: ['state', 'concit', 'place'], # Consolidated City-Place Within Consolidated City
230: ['state', 'anrc'], # State-Alaska Native Regional Corporation
250: ['aianhh'], # American Indian Area/Alaska Native Area/Hawaiian Home Land
251: ['aianhh', 'aitsce'], # American Indian Area/Alaska NativeArea/HawaiianHomeLand-Tribal Subdivision/Remainder
252: ['aianhh', 'aihhtli'], # American Indian Area/Alaska Native Area (Reservation or Statistical Entity Only)4
254: ['aianhh', 'aihhtli'], # American Indian Area (Off-Reservation Trust Land Only)/Hawaiian Home Land
260: ['state', 'aianhh'], # American Indian Area/Alaska Native Area/Hawaiian Home Land-State
269: ['state', 'aianhh', 'place'], # American Indian Area/Alaska Native Area/Hawaiian Home Land-Place-Remainder
270: ['aianhh', 'state', 'county'], # American Indian Area/Alaska Native Area/Hawaiian Home Land-State-County
280: ['state', 'aianhh'], # State-American Indian Area/Alaska Native Area/Hawaiian Home Land
283: ['state', 'aianhh', 'aihhtli'],
# State-American Indian Area/Alaska Native Area (Reservation or Statistical Entity Only)
286: ['state', 'aianhh', 'aihhtli'],
# State-American Indian Area (Off-Reservation Trust Land Only)/Hawaiian Home Land
290: ['aianhh', 'aitsce', 'state'],
# American Indian Area/Alaska Native Area/Hawaiian Home Land-Tribal Subdivision/Remainder-State
310: ['cbsa'], # CBSA
311: ['cbsa', 'state'], # CBSA-State-County
312: ['cbsa', 'state', 'place'], # CBSA-State-Principal City
313: ['cbsa', 'state', 'county'], # CBSA-State-County
314: ['cbsa', 'metdiv'], # Metropolitan Statistical Area/Metropolitan Division
315: ['cbsa', 'metdiv', 'state'], # Metropolitan Statistical Area/Metropolitan Division-State
316: ['cbsa', 'metdiv', 'state', 'county'], # Metropolitan Statistical Area/Metropolitan Division-State-County
320: ['state', 'cbsa'], # State- CBSA
321: ['state', 'cbsa', 'place'], # State- CBSA -Principal City
322: ['state', 'cbsa', 'county'], # State- CBSA -County
323: ['state', 'cbsa', 'metdiv'], # State- Metropolitan Statistical Area/Metropolitan Division
324: ['state', 'cbsa', 'metdiv', 'county'], # State- Metropolitan Statistical Area/Metropolitan Division-County
330: ['csa'], # Combined Statistical Area
331: ['csa', 'state'], # Combined Statistical Area-State
332: ['csa', 'cbsa'], # Combined Statistical Area-CBSA
333: ['csa', 'cbsa', 'state'], # Combined Statistical Area-CBSA-State
335: ['cnecta'], # Combined New England City and Town Area
336: ['cnecta', 'state'], # Combined New England City and Town Area -State
337: ['cnecta', 'necta'], # Combined New England City and Town Area -New England City and Town Area
338: ['cnecta', 'necta', 'state'], # Combined New England City and Town Area -New England City and Town Area-State
340: ['state', 'csa'], # State-Combined Statistical Area
341: ['state', 'csa', 'cbsa'], # State-Combined Statistical Area-CBSA
345: ['state', 'cnecta'], # State-Combined New England City and Town Area
346: ['state', 'cnecta', 'necta'], # State-Combined New England City and Town Area-New England City and Town Area
350: ['necta'], # New England City and Town Area
351: ['necta', 'state'], # New England City and Town Area-State
352: ['necta', 'state', 'place'], # New England City and Town Area-State-Principal City
353: ['necta', 'state', 'county'], # New England City and Town Area-State-County
354: ['necta', 'state', 'county', 'cousub'], # New England City and Town Area-State-County-County Subdivision
355: ['necta', 'nectadiv'], # New England City and Town Area (NECTA)-NECTA Division
356: ['necta', 'nectadiv', 'state'], # New England City and Town Area (NECTA)-NECTA Division-State
357: ['necta', 'nectadiv', 'state', 'county'], # New England City and Town Area (NECTA)-NECTA Division-State-County
358: ['necta', 'nectadiv', 'state', 'county', 'cousub'],
# New England City and Town Area (NECTA)-NECTA Division-State-County-County Subdivision
360: ['state', 'necta'], # State-New England City and Town Area
361: ['state', 'necta', 'place'], # State-New England City and Town Area-Principal City
362: ['state', 'necta', 'county'], # State-New England City and Town Area-County
363: ['state', 'necta', 'county', 'cousub'], # State-New England City and Town Area-County-County Subdivision
364: ['state', 'necta', 'nectadiv'], # State-New England City and Town Area (NECTA)-NECTA Division
365: ['state', 'necta', 'nectadiv', 'county'], # State-New England City and Town Area (NECTA)-NECTA Division-County
366: ['state', 'necta', 'nectadiv', 'county', 'cousub'],
# State-New England City and Town Area (NECTA)-NECTA Division-County-County Subdivision
400: ['ua'], # Urban Area,
410: ['ua', 'state'], # Urban Area, State,
430: ['ua','state','county'], # Urban Area, State, County,
500: ['state', 'cdcurr'], # Congressional District
510: ['state', 'cdcurr', 'county'], #
550: ['state', 'cdcurr', 'aianhh'],
# Congressional District-American IndianArea/Alaska NativeArea/Hawaiian Home Land
610: ['state', 'sldu'], # State Senate District
612: ['state', 'sldu', 'county'], # State Senate District-County
620: ['state', 'sldl'], # State House District
622: ['state', 'sldl', 'county'], # State House District-County
795: ['state', 'puma5'], # State-Public Use MicroSample Area 5%
860: ['zcta'],
950: ['state', 'sdelm'], # State-Elementary School District
960: ['state', 'sdsec'], # State-High School District
970: ['state', 'sduni'], # State-Unified School District
# Nonstandard
1200: ['zip']
}
descriptions = {
1: 'United States',
10: 'United States',
20: 'Region',
30: 'Division',
40: 'State',
50: 'County',
60: 'County Subdivision',
67: 'State (Puerto Rico Only)-County-County Subdivision-Subbarrio',
70: 'County Subdivision-Place/Remainder',
80: 'County Subdivision-Place/Remainder-Census Tract',
101: 'block',
140: 'Census Tract',
150: 'Census Tract-Block Group',
155: 'Place-County',
160: 'Place',
170: 'Consolidated City',
172: 'Consolidated City-Place Within Consolidated City',
230: 'State-Alaska Native Regional Corporation',
250: 'American Indian Area/Alaska Native Area/Hawaiian Home Land',
251: 'American Indian Area/Alaska NativeArea/HawaiianHomeLand-Tribal Subdivision/Remainder',
252: 'American Indian Area/Alaska Native Area (Reservation or Statistical Entity Only)',
254: 'American Indian Area (Off-Reservation Trust Land Only)/Hawaiian Home Land',
260: 'American Indian Area/Alaska Native Area/Hawaiian Home Land-State',
269: 'American Indian Area/Alaska Native Area/Hawaiian Home Land-Place-Remainder',
270: 'American Indian Area/Alaska Native Area/Hawaiian Home Land-State-County',
280: 'State-American Indian Area/Alaska Native Area/Hawaiian Home Land',
283: 'aihhtli',
286: 'aihhtli',
290: 'state',
310: 'CBSA',
311: 'CBSA-State-County',
312: 'CBSA-State-Principal City',
313: 'CBSA-State-County',
314: 'Metropolitan Statistical Area/Metropolitan Division',
315: 'Metropolitan Statistical Area/Metropolitan Division-State',
316: 'Metropolitan Statistical Area/Metropolitan Division-State-County',
320: 'State- CBSA',
321: 'State- CBSA -Principal City',
322: 'State- CBSA -County',
323: 'State- Metropolitan Statistical Area/Metropolitan Division',
324: 'State- Metropolitan Statistical Area/Metropolitan Division-County',
330: 'Combined Statistical Area',
331: 'Combined Statistical Area-State',
332: 'Combined Statistical Area-CBSA',
333: 'Combined Statistical Area-CBSA-State',
335: 'Combined New England City and Town Area',
336: 'Combined New England City and Town Area -State',
337: 'Combined New England City and Town Area -New England City and Town Area',
338: 'Combined New England City and Town Area -New England City and Town Area-State',
340: 'State-Combined Statistical Area',
341: 'State-Combined Statistical Area-CBSA',
345: 'State-Combined New England City and Town Area',
346: 'State-Combined New England City and Town Area-New England City and Town Area',
350: 'New England City and Town Area',
351: 'New England City and Town Area-State',
352: 'New England City and Town Area-State-Principal City',
353: 'New England City and Town Area-State-County',
354: 'New England City and Town Area-State-County-County Subdivision',
355: 'New England City and Town Area (NECTA)-NECTA Division',
356: 'New England City and Town Area (NECTA)-NECTA Division-State',
357: 'New England City and Town Area (NECTA)-NECTA Division-State-County',
358: 'New England City and Town Area (NECTA)-NECTA Division-State-County-County Subdivision',
360: 'State-New England City and Town Area',
361: 'State-New England City and Town Area-Principal City',
362: 'State-New England City and Town Area-County',
363: 'State-New England City and Town Area-County-County Subdivision',
364: 'State-New England City and Town Area (NECTA)-NECTA Division',
365: 'State-New England City and Town Area (NECTA)-NECTA Division-County-County Subdivision',
400: 'Urban Area,',
410: 'Urban Area, State,',
430: 'Urban Area, State, County,',
500: 'Congressional District',
510: 'Congressional District, County',
550: 'Congressional District-American IndianArea/Alaska NativeArea/Hawaiian Home Land',
610: 'State Senate District',
612: 'State Senate District-County',
620: 'State House District',
622: 'State House District-County',
795: 'State-Public Use MicroSample Area 5%',
860: 'ZIP Code Tabulation Area',
950: 'State-Elementary School District',
960: 'State-High School District',
970: 'State-Unified School District',
}
plurals = {
'county': 'counties',
'place': 'places',
'Sdlu': 'State '
}
class NotASummaryName(Exception):
"""An argument was not one of the valid summary names"""
class ParseError(Exception):
"""Error parsing a geoid"""
def parse_to_gvid(v):
"""Parse an ACS Geoid or a GVID to a GVID"""
from geoid.civick import GVid
from geoid.acs import AcsGeoid
m1 = ''
try:
return GVid.parse(v)
except ValueError as e:
m1 = str(e)
try:
return AcsGeoid.parse(v).convert(GVid)
except ValueError as e:
raise ValueError("Failed to parse to either ACS or GVid: {}; {}".format(m1, str(e)))
def base62_encode(num):
"""Encode a number in Base X. WIth the built-in alphabet, its base 62
`num`: The number to encode
`alphabet`: The alphabet to use for encoding
Stolen from: http://stackoverflow.com/a/1119769/1144479
"""
num = int(num)
alphabet = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
if (num == 0):
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr)
def base62_decode(string):
"""Decode a Base X encoded string into the number
Arguments:
- `string`: The encoded string
- `alphabet`: The alphabet to use for encoding
Stolen from: http://stackoverflow.com/a/1119769/1144479
"""
alphabet = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
base = len(alphabet)
strlen = len(string)
num = 0
idx = 0
for char in string:
power = (strlen - (idx + 1))
num += alphabet.index(char) * (base ** power)
idx += 1
return int(num)
def augment(module_name, base_class):
"""Call the augment() method for all of the derived classes in the module """
for name, cls in inspect.getmembers(sys.modules[module_name],
lambda x : inspect.isclass(x) and issubclass(x, base_class) ):
if cls == base_class:
continue
cls.augment()
def get_class(module, sl):
for name, named_sl in names.items():
if named_sl == sl or sl == name:
return getattr(module, name.capitalize())
raise NotASummaryName("No class for summary_level {}".format(sl))
def make_classes(base_class, module):
"""Create derived classes and put them into the same module as the base class.
This function is called at the end of each of the derived class modules, acs, census, civik and tiger.
It will create a set of new derived class in the module, one for each of the enries in the `summary_levels`
dict.
"""
from functools import partial
for k in names:
cls = base_class.class_factory(k.capitalize())
cls.augment()
setattr(module, k.capitalize(), cls)
setattr(module, 'get_class', partial(get_class, module))
class CountyName(object):
"""A Census county name, with methods to create shorter versions and return the types of division,
which may be county, parish, borough, etc. """
# Strip the county and state name. THis doesn't work for some locations
# where the county is actually called a parish or a bario.
state_name_pattern = r', (.*)$'
state_name_re = re.compile(state_name_pattern)
def __init__(self, name):
self.name = name
def intuit_name(self, name):
"""Return a numeric value in the range [-1,1), indicating the likelyhood that the name is for a valuable of
of this type. -1 indicates a strong non-match, 1 indicates a strong match, and 0 indicates uncertainty. """
raise NotImplementedError
@property
def state(self):
try:
county, state = self.name.split(',')
return state
except ValueError:
# The search will fail for 'District of Columbia'
return ''
@property
def medium_name(self):
"""The census name without the state"""
return self.state_name_re.sub('', self.name)
type_names = (
'County', 'Municipio', 'Parish', 'Census Area', 'Borough',
'Municipality', 'city', 'City and Borough')
type_name_pattern = '|'.join('({})'.format(e) for e in type_names)
type_names_re = re.compile(type_name_pattern)
@property
def division_name(self):
"""The type designation for the county or county equivalent, such as 'County','Parish' or 'Borough'"""
try:
return next(e for e in self.type_names_re.search(self.name).groups() if e is not None)
except AttributeError:
# The search will fail for 'District of Columbia'
return ''
county_name_pattern = r'(.+) {}, (.+)'.format(type_name_pattern)
county_name_re = re.compile(county_name_pattern)
@property
def short_name(self):
try:
county, state = self.name.split(',')
except ValueError:
return self.name # 'District of Colombia'
return self.type_names_re.sub('', county)
def __str__(self):
return self.name
class Geoid(object):
@classmethod
def resolve_summary_level(cls, sl):
try:
return cls.sl_map[sl]
except KeyError:
return None
@classmethod
def make_format_string(cls, level):
sl_num = names[level]
segs = segments[sl_num]
formats = []
formats.append(cls.sl_format)
for seg in segs:
# Lengths dict may have strings to indicate string format usage.
if int(lengths[seg]) <= 0:
continue
if isinstance(lengths[seg], int):
fmt = cls.elem_format
else:
fmt = cls.elem_str_format
formats.append(fmt.format(seg, cls.part_width(lengths[seg])))
return ''.join(formats)
@classmethod
def make_regex(cls, level):
sl_num = names[level]
segs = segments[sl_num]
# Lengths dict may have strings to indicate string format usage.
regexes = [cls.sl_regex] + [cls.elem_regex.format(seg, cls.part_width(lengths[seg]))
for seg in segs if int(lengths[seg]) > 0]
re_str = '^' + ''.join(regexes) + '$'
return re_str
@classmethod
def augment(cls):
"""Augment the class with computed formats, regexes, and other things. This caches these values so
they don't have to be created for every instance. """
import re
level_name = cls.__name__.lower()
cls.sl = names[level_name]
cls.class_map[cls.__name__.lower()] = cls
cls.sl_map[cls.sl] = cls
cls.fmt = cls.make_format_string(cls.__name__.lower())
cls.regex_str = cls.make_regex(cls.__name__.lower())
cls.regex = re.compile(cls.regex_str)
# List of field names
cls.level = level_name
cls.fields = segments[cls.sl]
@classmethod
def get_class(cls, name_or_sl):
"""Return a derived class based on the class name or the summary_level"""
try:
return cls.sl_map[int(name_or_sl)]
except TypeError as e:
raise TypeError("Bad name or sl: {} : {}".format(name_or_sl, e))
except ValueError:
try:
return cls.class_map[name_or_sl.lower()]
except (KeyError, ValueError):
raise NotASummaryName("Value '{}' is not a valid summary level".format(name_or_sl))
def __init__(self, *args, **kwargs):
# This is a bit unusual, because it means, that , unlike normal
# python args, a kwarg can overwrite a position arg.
d = dict(zip(self.fields, args + ((0,) * 10))) # Add enough zeros to set all fields to zero
d.update(kwargs)
for k, v in d.items():
if k in self.fields:
try:
setattr(self, k, v)
except TypeError as e:
raise TypeError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
except ValueError as e:
raise ValueError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
def __str__(self):
d = self.__dict__
d['sl'] = self.sl
# Hacks for special string cases
if 'sldu' in d:
d['sldu'] = str(d['sldu']).zfill(3)
if 'sldl' in d:
d['sldl'] = str(d['sldl']).zfill(3)
try:
fn = six.get_method_function(self.encode)
kwargs = {k: fn(v) for k, v in d.items()}
return self.fmt.format(**kwargs)
except (ValueError, KeyError) as e:
raise ValueError("Bad value in {}, data {} for format {}: {}".format(type(self), d, self.fmt, e))
@property
def state_name(self):
from geoid.censusnames import geo_names
return geo_names[(self.state, 0)]
@property
def stusab(self):
from geoid.censusnames import stusab
try:
return stusab[int(self.state)]
except (AttributeError, ValueError):
# Assume this is a Us object, or some other national object
return 'US'
@property
def county_name(self):
from geoid.censusnames import geo_names
try:
try:
return CountyName(geo_names[(self.state, self.county)])
except KeyError:
try:
return CountyName("County #{}, {}".format(self.county,geo_names[(self.state, 0)]))
except KeyError:
return CountyName("County #{}, State#{}".format(self.county, self.state))
except Exception:
return CountyName('')
@property
def geo_name(self):
"""
Return a name of the state or county, or, for other lowever levels, the
name of the level type in the county.
:return:
"""
if self.level == 'county':
return str(self.county_name)
elif self.level == 'state':
return self.state_name
else:
if hasattr(self, 'county'):
return "{} in {}".format(self.level,str(self.county_name))
elif hasattr(self, 'state'):
return "{} in {}".format(self.level, self.state_name)
else:
return "a {}".format(self.level)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return str(self) != str(other)
def __lt__(self, other):
return str(self) < str(other)
def __le__(self, other):
return str(self) <= str(other)
def __gt__(self, other):
return str(self) > str(other)
def __ge__(self, other):
return str(self) >= str(other)
@classmethod
def parse(cls, gvid, exception=True):
"""
Parse a string value into the geoid of this class.
:param gvid: String value to parse.
:param exception: If true ( default) raise an eception on parse erorrs. If False, return a
'null' geoid.
:return:
"""
if gvid == 'invalid':
return cls.get_class('null')(0)
if not bool(gvid):
return None
if not isinstance(gvid, six.string_types):
raise TypeError("Can't parse; not a string. Got a '{}' ".format(type(gvid)))
try:
if not cls.sl:
# Civick and ACS include the SL, so can call from base type.
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
sl = fn(gvid[0:cls.sl_width])
else:
sl = cls.sl # Otherwise must use derived class.
except ValueError as e:
if exception:
raise ValueError("Failed to parse gvid '{}': {}".format(gvid, str(e)))
else:
return cls.get_class('null')(0)
try:
cls = cls.sl_map[sl]
except KeyError:
if exception:
raise ValueError("Failed to parse gvid '{}': Unknown summary level '{}' ".format(gvid, sl))
else:
return cls.get_class('null')(0)
m = cls.regex.match(gvid)
if not m:
raise ValueError("Failed to match '{}' to '{}' ".format(gvid, cls.regex_str))
d = m.groupdict()
if not d:
return None
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
d = {k: fn(v) for k, v in d.items()}
try:
del d['sl']
except KeyError:
pass
return cls(**d)
def convert(self, root_cls):
"""Convert to another derived class. cls is the base class for the derived type,
ie AcsGeoid, TigerGeoid, etc. """
d = self.__dict__
d['sl'] = self.sl
try:
cls = root_cls.get_class(root_cls.sl)
except (AttributeError, TypeError):
# Hopefully because root_cls is a module
cls = root_cls.get_class(self.sl)
return cls(**d)
def as_census(self):
from geoid.census import CensusGeoid
return self.convert(CensusGeoid)
def as_acs(self):
from geoid.acs import AcsGeoid
return self.convert(AcsGeoid)
def as_tiger(self):
from geoid.tiger import TigerGeoid
return self.convert(TigerGeoid)
def promote(self, level=None):
"""Convert to the next higher level summary level"""
if level is None:
if len(self.fields) < 2:
if self.level in ('region', 'division', 'state', 'ua'):
cls = self.get_class('us')
else:
return None
else:
cls = self.get_class(self.fields[-2])
else:
cls = self.get_class(level)
d = dict(self.__dict__.items())
d['sl'] = self.sl
return cls(**d)
def summarize(self):
"""Convert all of the values to their max values. This form is used to represent the summary level"""
raise NotImplementedError
def allval(self):
"""Convert the last value to zero. This form represents the entire higher summary level at the granularity
of the lower summary level. For example, for a county, it means 'All counties in the state' """
d = dict(self.__dict__.items())
d['sl'] = self.sl
d[self.level] = 0
cls = self.get_class(self.sl)
return cls(**d)
@classmethod
def nullval(cls):
"""Create a new instance where all of the values are 0"""
d = dict(cls.__dict__.items())
for k in d:
d[k] = 0
d['sl'] = cls.sl
d[cls.level] = 0
return cls(**d)
@property
def tuples(self):
"""Return tuples of field, value, in the order of the levels as they are defined """
return [(field, getattr(self, field, None)) for field in self.fields]
@property
def is_summary(self):
"""Return True if this geoid is an summary -- all of the fields are 0"""
return str(self) == str(self.summarize())
@property
def is_allval(self):
"""Return True if this geoid is an allval -- the last field is zero, but the first is not"""
tups = self.tuples
return tups[-1][1] == 0 and tups[0][1] != 0
@property
def level_plural(self):
"""Return the name of the level as a plural"""
return plurals.get(self.level, self.level + "s")
def generate_all(sumlevel, d):
"""Generate a dict that includes all of the available geoid values, with keys
for the most common names for those values. """
from geoid.civick import GVid
from geoid.tiger import TigerGeoid
from geoid.acs import AcsGeoid
sumlevel = int(sumlevel)
d = dict(d.items())
# Map common name variants
if 'cousub' in d:
d['cosub'] = d['cousub']
del d['cousub']
if 'blkgrp' in d:
d['blockgroup'] = d['blkgrp']
del d['blkgrp']
if 'zcta5' in d:
d['zcta'] = d['zcta5']
del d['zcta5']
gvid_class = GVid.resolve_summary_level(sumlevel)
if not gvid_class:
return {}
geoidt_class = TigerGeoid.resolve_summary_level(sumlevel)
geoid_class = AcsGeoid.resolve_summary_level(sumlevel)
try:
return dict(
gvid=str(gvid_class(**d)),
geoid=str(geoid_class(**d)),
geoidt=str(geoidt_class(**d))
)
except:
raise
|
Metatab/geoid
|
geoid/core.py
|
CountyName.division_name
|
python
|
def division_name(self):
"""The type designation for the county or county equivalent, such as 'County','Parish' or 'Borough'"""
try:
return next(e for e in self.type_names_re.search(self.name).groups() if e is not None)
except AttributeError:
# The search will fail for 'District of Columbia'
return ''
|
The type designation for the county or county equivalent, such as 'County','Parish' or 'Borough
|
train
|
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L489-L495
| null |
class CountyName(object):
"""A Census county name, with methods to create shorter versions and return the types of division,
which may be county, parish, borough, etc. """
# Strip the county and state name. THis doesn't work for some locations
# where the county is actually called a parish or a bario.
state_name_pattern = r', (.*)$'
state_name_re = re.compile(state_name_pattern)
def __init__(self, name):
self.name = name
def intuit_name(self, name):
"""Return a numeric value in the range [-1,1), indicating the likelyhood that the name is for a valuable of
of this type. -1 indicates a strong non-match, 1 indicates a strong match, and 0 indicates uncertainty. """
raise NotImplementedError
@property
def state(self):
try:
county, state = self.name.split(',')
return state
except ValueError:
# The search will fail for 'District of Columbia'
return ''
@property
def medium_name(self):
"""The census name without the state"""
return self.state_name_re.sub('', self.name)
type_names = (
'County', 'Municipio', 'Parish', 'Census Area', 'Borough',
'Municipality', 'city', 'City and Borough')
type_name_pattern = '|'.join('({})'.format(e) for e in type_names)
type_names_re = re.compile(type_name_pattern)
@property
county_name_pattern = r'(.+) {}, (.+)'.format(type_name_pattern)
county_name_re = re.compile(county_name_pattern)
@property
def short_name(self):
try:
county, state = self.name.split(',')
except ValueError:
return self.name # 'District of Colombia'
return self.type_names_re.sub('', county)
def __str__(self):
return self.name
|
Metatab/geoid
|
geoid/core.py
|
Geoid.augment
|
python
|
def augment(cls):
import re
level_name = cls.__name__.lower()
cls.sl = names[level_name]
cls.class_map[cls.__name__.lower()] = cls
cls.sl_map[cls.sl] = cls
cls.fmt = cls.make_format_string(cls.__name__.lower())
cls.regex_str = cls.make_regex(cls.__name__.lower())
cls.regex = re.compile(cls.regex_str)
# List of field names
cls.level = level_name
cls.fields = segments[cls.sl]
|
Augment the class with computed formats, regexes, and other things. This caches these values so
they don't have to be created for every instance.
|
train
|
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L562-L583
| null |
class Geoid(object):
@classmethod
def resolve_summary_level(cls, sl):
try:
return cls.sl_map[sl]
except KeyError:
return None
@classmethod
def make_format_string(cls, level):
sl_num = names[level]
segs = segments[sl_num]
formats = []
formats.append(cls.sl_format)
for seg in segs:
# Lengths dict may have strings to indicate string format usage.
if int(lengths[seg]) <= 0:
continue
if isinstance(lengths[seg], int):
fmt = cls.elem_format
else:
fmt = cls.elem_str_format
formats.append(fmt.format(seg, cls.part_width(lengths[seg])))
return ''.join(formats)
@classmethod
def make_regex(cls, level):
sl_num = names[level]
segs = segments[sl_num]
# Lengths dict may have strings to indicate string format usage.
regexes = [cls.sl_regex] + [cls.elem_regex.format(seg, cls.part_width(lengths[seg]))
for seg in segs if int(lengths[seg]) > 0]
re_str = '^' + ''.join(regexes) + '$'
return re_str
@classmethod
@classmethod
def get_class(cls, name_or_sl):
"""Return a derived class based on the class name or the summary_level"""
try:
return cls.sl_map[int(name_or_sl)]
except TypeError as e:
raise TypeError("Bad name or sl: {} : {}".format(name_or_sl, e))
except ValueError:
try:
return cls.class_map[name_or_sl.lower()]
except (KeyError, ValueError):
raise NotASummaryName("Value '{}' is not a valid summary level".format(name_or_sl))
def __init__(self, *args, **kwargs):
# This is a bit unusual, because it means, that , unlike normal
# python args, a kwarg can overwrite a position arg.
d = dict(zip(self.fields, args + ((0,) * 10))) # Add enough zeros to set all fields to zero
d.update(kwargs)
for k, v in d.items():
if k in self.fields:
try:
setattr(self, k, v)
except TypeError as e:
raise TypeError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
except ValueError as e:
raise ValueError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
def __str__(self):
d = self.__dict__
d['sl'] = self.sl
# Hacks for special string cases
if 'sldu' in d:
d['sldu'] = str(d['sldu']).zfill(3)
if 'sldl' in d:
d['sldl'] = str(d['sldl']).zfill(3)
try:
fn = six.get_method_function(self.encode)
kwargs = {k: fn(v) for k, v in d.items()}
return self.fmt.format(**kwargs)
except (ValueError, KeyError) as e:
raise ValueError("Bad value in {}, data {} for format {}: {}".format(type(self), d, self.fmt, e))
@property
def state_name(self):
from geoid.censusnames import geo_names
return geo_names[(self.state, 0)]
@property
def stusab(self):
from geoid.censusnames import stusab
try:
return stusab[int(self.state)]
except (AttributeError, ValueError):
# Assume this is a Us object, or some other national object
return 'US'
@property
def county_name(self):
from geoid.censusnames import geo_names
try:
try:
return CountyName(geo_names[(self.state, self.county)])
except KeyError:
try:
return CountyName("County #{}, {}".format(self.county,geo_names[(self.state, 0)]))
except KeyError:
return CountyName("County #{}, State#{}".format(self.county, self.state))
except Exception:
return CountyName('')
@property
def geo_name(self):
"""
Return a name of the state or county, or, for other lowever levels, the
name of the level type in the county.
:return:
"""
if self.level == 'county':
return str(self.county_name)
elif self.level == 'state':
return self.state_name
else:
if hasattr(self, 'county'):
return "{} in {}".format(self.level,str(self.county_name))
elif hasattr(self, 'state'):
return "{} in {}".format(self.level, self.state_name)
else:
return "a {}".format(self.level)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return str(self) != str(other)
def __lt__(self, other):
return str(self) < str(other)
def __le__(self, other):
return str(self) <= str(other)
def __gt__(self, other):
return str(self) > str(other)
def __ge__(self, other):
return str(self) >= str(other)
@classmethod
def parse(cls, gvid, exception=True):
"""
Parse a string value into the geoid of this class.
:param gvid: String value to parse.
:param exception: If true ( default) raise an eception on parse erorrs. If False, return a
'null' geoid.
:return:
"""
if gvid == 'invalid':
return cls.get_class('null')(0)
if not bool(gvid):
return None
if not isinstance(gvid, six.string_types):
raise TypeError("Can't parse; not a string. Got a '{}' ".format(type(gvid)))
try:
if not cls.sl:
# Civick and ACS include the SL, so can call from base type.
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
sl = fn(gvid[0:cls.sl_width])
else:
sl = cls.sl # Otherwise must use derived class.
except ValueError as e:
if exception:
raise ValueError("Failed to parse gvid '{}': {}".format(gvid, str(e)))
else:
return cls.get_class('null')(0)
try:
cls = cls.sl_map[sl]
except KeyError:
if exception:
raise ValueError("Failed to parse gvid '{}': Unknown summary level '{}' ".format(gvid, sl))
else:
return cls.get_class('null')(0)
m = cls.regex.match(gvid)
if not m:
raise ValueError("Failed to match '{}' to '{}' ".format(gvid, cls.regex_str))
d = m.groupdict()
if not d:
return None
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
d = {k: fn(v) for k, v in d.items()}
try:
del d['sl']
except KeyError:
pass
return cls(**d)
def convert(self, root_cls):
"""Convert to another derived class. cls is the base class for the derived type,
ie AcsGeoid, TigerGeoid, etc. """
d = self.__dict__
d['sl'] = self.sl
try:
cls = root_cls.get_class(root_cls.sl)
except (AttributeError, TypeError):
# Hopefully because root_cls is a module
cls = root_cls.get_class(self.sl)
return cls(**d)
def as_census(self):
from geoid.census import CensusGeoid
return self.convert(CensusGeoid)
def as_acs(self):
from geoid.acs import AcsGeoid
return self.convert(AcsGeoid)
def as_tiger(self):
from geoid.tiger import TigerGeoid
return self.convert(TigerGeoid)
def promote(self, level=None):
"""Convert to the next higher level summary level"""
if level is None:
if len(self.fields) < 2:
if self.level in ('region', 'division', 'state', 'ua'):
cls = self.get_class('us')
else:
return None
else:
cls = self.get_class(self.fields[-2])
else:
cls = self.get_class(level)
d = dict(self.__dict__.items())
d['sl'] = self.sl
return cls(**d)
def summarize(self):
"""Convert all of the values to their max values. This form is used to represent the summary level"""
raise NotImplementedError
def allval(self):
"""Convert the last value to zero. This form represents the entire higher summary level at the granularity
of the lower summary level. For example, for a county, it means 'All counties in the state' """
d = dict(self.__dict__.items())
d['sl'] = self.sl
d[self.level] = 0
cls = self.get_class(self.sl)
return cls(**d)
@classmethod
def nullval(cls):
"""Create a new instance where all of the values are 0"""
d = dict(cls.__dict__.items())
for k in d:
d[k] = 0
d['sl'] = cls.sl
d[cls.level] = 0
return cls(**d)
@property
def tuples(self):
"""Return tuples of field, value, in the order of the levels as they are defined """
return [(field, getattr(self, field, None)) for field in self.fields]
@property
def is_summary(self):
"""Return True if this geoid is an summary -- all of the fields are 0"""
return str(self) == str(self.summarize())
@property
def is_allval(self):
"""Return True if this geoid is an allval -- the last field is zero, but the first is not"""
tups = self.tuples
return tups[-1][1] == 0 and tups[0][1] != 0
@property
def level_plural(self):
"""Return the name of the level as a plural"""
return plurals.get(self.level, self.level + "s")
|
Metatab/geoid
|
geoid/core.py
|
Geoid.get_class
|
python
|
def get_class(cls, name_or_sl):
try:
return cls.sl_map[int(name_or_sl)]
except TypeError as e:
raise TypeError("Bad name or sl: {} : {}".format(name_or_sl, e))
except ValueError:
try:
return cls.class_map[name_or_sl.lower()]
except (KeyError, ValueError):
raise NotASummaryName("Value '{}' is not a valid summary level".format(name_or_sl))
|
Return a derived class based on the class name or the summary_level
|
train
|
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L587-L598
| null |
class Geoid(object):
@classmethod
def resolve_summary_level(cls, sl):
try:
return cls.sl_map[sl]
except KeyError:
return None
@classmethod
def make_format_string(cls, level):
sl_num = names[level]
segs = segments[sl_num]
formats = []
formats.append(cls.sl_format)
for seg in segs:
# Lengths dict may have strings to indicate string format usage.
if int(lengths[seg]) <= 0:
continue
if isinstance(lengths[seg], int):
fmt = cls.elem_format
else:
fmt = cls.elem_str_format
formats.append(fmt.format(seg, cls.part_width(lengths[seg])))
return ''.join(formats)
@classmethod
def make_regex(cls, level):
sl_num = names[level]
segs = segments[sl_num]
# Lengths dict may have strings to indicate string format usage.
regexes = [cls.sl_regex] + [cls.elem_regex.format(seg, cls.part_width(lengths[seg]))
for seg in segs if int(lengths[seg]) > 0]
re_str = '^' + ''.join(regexes) + '$'
return re_str
@classmethod
def augment(cls):
"""Augment the class with computed formats, regexes, and other things. This caches these values so
they don't have to be created for every instance. """
import re
level_name = cls.__name__.lower()
cls.sl = names[level_name]
cls.class_map[cls.__name__.lower()] = cls
cls.sl_map[cls.sl] = cls
cls.fmt = cls.make_format_string(cls.__name__.lower())
cls.regex_str = cls.make_regex(cls.__name__.lower())
cls.regex = re.compile(cls.regex_str)
# List of field names
cls.level = level_name
cls.fields = segments[cls.sl]
@classmethod
def __init__(self, *args, **kwargs):
# This is a bit unusual, because it means, that , unlike normal
# python args, a kwarg can overwrite a position arg.
d = dict(zip(self.fields, args + ((0,) * 10))) # Add enough zeros to set all fields to zero
d.update(kwargs)
for k, v in d.items():
if k in self.fields:
try:
setattr(self, k, v)
except TypeError as e:
raise TypeError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
except ValueError as e:
raise ValueError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
def __str__(self):
d = self.__dict__
d['sl'] = self.sl
# Hacks for special string cases
if 'sldu' in d:
d['sldu'] = str(d['sldu']).zfill(3)
if 'sldl' in d:
d['sldl'] = str(d['sldl']).zfill(3)
try:
fn = six.get_method_function(self.encode)
kwargs = {k: fn(v) for k, v in d.items()}
return self.fmt.format(**kwargs)
except (ValueError, KeyError) as e:
raise ValueError("Bad value in {}, data {} for format {}: {}".format(type(self), d, self.fmt, e))
@property
def state_name(self):
from geoid.censusnames import geo_names
return geo_names[(self.state, 0)]
@property
def stusab(self):
from geoid.censusnames import stusab
try:
return stusab[int(self.state)]
except (AttributeError, ValueError):
# Assume this is a Us object, or some other national object
return 'US'
@property
def county_name(self):
from geoid.censusnames import geo_names
try:
try:
return CountyName(geo_names[(self.state, self.county)])
except KeyError:
try:
return CountyName("County #{}, {}".format(self.county,geo_names[(self.state, 0)]))
except KeyError:
return CountyName("County #{}, State#{}".format(self.county, self.state))
except Exception:
return CountyName('')
@property
def geo_name(self):
"""
Return a name of the state or county, or, for other lowever levels, the
name of the level type in the county.
:return:
"""
if self.level == 'county':
return str(self.county_name)
elif self.level == 'state':
return self.state_name
else:
if hasattr(self, 'county'):
return "{} in {}".format(self.level,str(self.county_name))
elif hasattr(self, 'state'):
return "{} in {}".format(self.level, self.state_name)
else:
return "a {}".format(self.level)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return str(self) != str(other)
def __lt__(self, other):
return str(self) < str(other)
def __le__(self, other):
return str(self) <= str(other)
def __gt__(self, other):
return str(self) > str(other)
def __ge__(self, other):
return str(self) >= str(other)
@classmethod
def parse(cls, gvid, exception=True):
"""
Parse a string value into the geoid of this class.
:param gvid: String value to parse.
:param exception: If true ( default) raise an eception on parse erorrs. If False, return a
'null' geoid.
:return:
"""
if gvid == 'invalid':
return cls.get_class('null')(0)
if not bool(gvid):
return None
if not isinstance(gvid, six.string_types):
raise TypeError("Can't parse; not a string. Got a '{}' ".format(type(gvid)))
try:
if not cls.sl:
# Civick and ACS include the SL, so can call from base type.
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
sl = fn(gvid[0:cls.sl_width])
else:
sl = cls.sl # Otherwise must use derived class.
except ValueError as e:
if exception:
raise ValueError("Failed to parse gvid '{}': {}".format(gvid, str(e)))
else:
return cls.get_class('null')(0)
try:
cls = cls.sl_map[sl]
except KeyError:
if exception:
raise ValueError("Failed to parse gvid '{}': Unknown summary level '{}' ".format(gvid, sl))
else:
return cls.get_class('null')(0)
m = cls.regex.match(gvid)
if not m:
raise ValueError("Failed to match '{}' to '{}' ".format(gvid, cls.regex_str))
d = m.groupdict()
if not d:
return None
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
d = {k: fn(v) for k, v in d.items()}
try:
del d['sl']
except KeyError:
pass
return cls(**d)
def convert(self, root_cls):
"""Convert to another derived class. cls is the base class for the derived type,
ie AcsGeoid, TigerGeoid, etc. """
d = self.__dict__
d['sl'] = self.sl
try:
cls = root_cls.get_class(root_cls.sl)
except (AttributeError, TypeError):
# Hopefully because root_cls is a module
cls = root_cls.get_class(self.sl)
return cls(**d)
def as_census(self):
from geoid.census import CensusGeoid
return self.convert(CensusGeoid)
def as_acs(self):
from geoid.acs import AcsGeoid
return self.convert(AcsGeoid)
def as_tiger(self):
from geoid.tiger import TigerGeoid
return self.convert(TigerGeoid)
def promote(self, level=None):
"""Convert to the next higher level summary level"""
if level is None:
if len(self.fields) < 2:
if self.level in ('region', 'division', 'state', 'ua'):
cls = self.get_class('us')
else:
return None
else:
cls = self.get_class(self.fields[-2])
else:
cls = self.get_class(level)
d = dict(self.__dict__.items())
d['sl'] = self.sl
return cls(**d)
def summarize(self):
"""Convert all of the values to their max values. This form is used to represent the summary level"""
raise NotImplementedError
def allval(self):
"""Convert the last value to zero. This form represents the entire higher summary level at the granularity
of the lower summary level. For example, for a county, it means 'All counties in the state' """
d = dict(self.__dict__.items())
d['sl'] = self.sl
d[self.level] = 0
cls = self.get_class(self.sl)
return cls(**d)
@classmethod
def nullval(cls):
"""Create a new instance where all of the values are 0"""
d = dict(cls.__dict__.items())
for k in d:
d[k] = 0
d['sl'] = cls.sl
d[cls.level] = 0
return cls(**d)
@property
def tuples(self):
"""Return tuples of field, value, in the order of the levels as they are defined """
return [(field, getattr(self, field, None)) for field in self.fields]
@property
def is_summary(self):
"""Return True if this geoid is an summary -- all of the fields are 0"""
return str(self) == str(self.summarize())
@property
def is_allval(self):
"""Return True if this geoid is an allval -- the last field is zero, but the first is not"""
tups = self.tuples
return tups[-1][1] == 0 and tups[0][1] != 0
@property
def level_plural(self):
"""Return the name of the level as a plural"""
return plurals.get(self.level, self.level + "s")
|
Metatab/geoid
|
geoid/core.py
|
Geoid.geo_name
|
python
|
def geo_name(self):
if self.level == 'county':
return str(self.county_name)
elif self.level == 'state':
return self.state_name
else:
if hasattr(self, 'county'):
return "{} in {}".format(self.level,str(self.county_name))
elif hasattr(self, 'state'):
return "{} in {}".format(self.level, self.state_name)
else:
return "a {}".format(self.level)
|
Return a name of the state or county, or, for other lowever levels, the
name of the level type in the county.
:return:
|
train
|
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L668-L689
| null |
class Geoid(object):
@classmethod
def resolve_summary_level(cls, sl):
try:
return cls.sl_map[sl]
except KeyError:
return None
@classmethod
def make_format_string(cls, level):
sl_num = names[level]
segs = segments[sl_num]
formats = []
formats.append(cls.sl_format)
for seg in segs:
# Lengths dict may have strings to indicate string format usage.
if int(lengths[seg]) <= 0:
continue
if isinstance(lengths[seg], int):
fmt = cls.elem_format
else:
fmt = cls.elem_str_format
formats.append(fmt.format(seg, cls.part_width(lengths[seg])))
return ''.join(formats)
@classmethod
def make_regex(cls, level):
sl_num = names[level]
segs = segments[sl_num]
# Lengths dict may have strings to indicate string format usage.
regexes = [cls.sl_regex] + [cls.elem_regex.format(seg, cls.part_width(lengths[seg]))
for seg in segs if int(lengths[seg]) > 0]
re_str = '^' + ''.join(regexes) + '$'
return re_str
@classmethod
def augment(cls):
"""Augment the class with computed formats, regexes, and other things. This caches these values so
they don't have to be created for every instance. """
import re
level_name = cls.__name__.lower()
cls.sl = names[level_name]
cls.class_map[cls.__name__.lower()] = cls
cls.sl_map[cls.sl] = cls
cls.fmt = cls.make_format_string(cls.__name__.lower())
cls.regex_str = cls.make_regex(cls.__name__.lower())
cls.regex = re.compile(cls.regex_str)
# List of field names
cls.level = level_name
cls.fields = segments[cls.sl]
@classmethod
def get_class(cls, name_or_sl):
"""Return a derived class based on the class name or the summary_level"""
try:
return cls.sl_map[int(name_or_sl)]
except TypeError as e:
raise TypeError("Bad name or sl: {} : {}".format(name_or_sl, e))
except ValueError:
try:
return cls.class_map[name_or_sl.lower()]
except (KeyError, ValueError):
raise NotASummaryName("Value '{}' is not a valid summary level".format(name_or_sl))
def __init__(self, *args, **kwargs):
# This is a bit unusual, because it means, that , unlike normal
# python args, a kwarg can overwrite a position arg.
d = dict(zip(self.fields, args + ((0,) * 10))) # Add enough zeros to set all fields to zero
d.update(kwargs)
for k, v in d.items():
if k in self.fields:
try:
setattr(self, k, v)
except TypeError as e:
raise TypeError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
except ValueError as e:
raise ValueError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
def __str__(self):
d = self.__dict__
d['sl'] = self.sl
# Hacks for special string cases
if 'sldu' in d:
d['sldu'] = str(d['sldu']).zfill(3)
if 'sldl' in d:
d['sldl'] = str(d['sldl']).zfill(3)
try:
fn = six.get_method_function(self.encode)
kwargs = {k: fn(v) for k, v in d.items()}
return self.fmt.format(**kwargs)
except (ValueError, KeyError) as e:
raise ValueError("Bad value in {}, data {} for format {}: {}".format(type(self), d, self.fmt, e))
@property
def state_name(self):
from geoid.censusnames import geo_names
return geo_names[(self.state, 0)]
@property
def stusab(self):
from geoid.censusnames import stusab
try:
return stusab[int(self.state)]
except (AttributeError, ValueError):
# Assume this is a Us object, or some other national object
return 'US'
@property
def county_name(self):
from geoid.censusnames import geo_names
try:
try:
return CountyName(geo_names[(self.state, self.county)])
except KeyError:
try:
return CountyName("County #{}, {}".format(self.county,geo_names[(self.state, 0)]))
except KeyError:
return CountyName("County #{}, State#{}".format(self.county, self.state))
except Exception:
return CountyName('')
@property
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return str(self) != str(other)
def __lt__(self, other):
return str(self) < str(other)
def __le__(self, other):
return str(self) <= str(other)
def __gt__(self, other):
return str(self) > str(other)
def __ge__(self, other):
return str(self) >= str(other)
@classmethod
def parse(cls, gvid, exception=True):
"""
Parse a string value into the geoid of this class.
:param gvid: String value to parse.
:param exception: If true ( default) raise an eception on parse erorrs. If False, return a
'null' geoid.
:return:
"""
if gvid == 'invalid':
return cls.get_class('null')(0)
if not bool(gvid):
return None
if not isinstance(gvid, six.string_types):
raise TypeError("Can't parse; not a string. Got a '{}' ".format(type(gvid)))
try:
if not cls.sl:
# Civick and ACS include the SL, so can call from base type.
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
sl = fn(gvid[0:cls.sl_width])
else:
sl = cls.sl # Otherwise must use derived class.
except ValueError as e:
if exception:
raise ValueError("Failed to parse gvid '{}': {}".format(gvid, str(e)))
else:
return cls.get_class('null')(0)
try:
cls = cls.sl_map[sl]
except KeyError:
if exception:
raise ValueError("Failed to parse gvid '{}': Unknown summary level '{}' ".format(gvid, sl))
else:
return cls.get_class('null')(0)
m = cls.regex.match(gvid)
if not m:
raise ValueError("Failed to match '{}' to '{}' ".format(gvid, cls.regex_str))
d = m.groupdict()
if not d:
return None
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
d = {k: fn(v) for k, v in d.items()}
try:
del d['sl']
except KeyError:
pass
return cls(**d)
def convert(self, root_cls):
"""Convert to another derived class. cls is the base class for the derived type,
ie AcsGeoid, TigerGeoid, etc. """
d = self.__dict__
d['sl'] = self.sl
try:
cls = root_cls.get_class(root_cls.sl)
except (AttributeError, TypeError):
# Hopefully because root_cls is a module
cls = root_cls.get_class(self.sl)
return cls(**d)
def as_census(self):
from geoid.census import CensusGeoid
return self.convert(CensusGeoid)
def as_acs(self):
from geoid.acs import AcsGeoid
return self.convert(AcsGeoid)
def as_tiger(self):
from geoid.tiger import TigerGeoid
return self.convert(TigerGeoid)
def promote(self, level=None):
"""Convert to the next higher level summary level"""
if level is None:
if len(self.fields) < 2:
if self.level in ('region', 'division', 'state', 'ua'):
cls = self.get_class('us')
else:
return None
else:
cls = self.get_class(self.fields[-2])
else:
cls = self.get_class(level)
d = dict(self.__dict__.items())
d['sl'] = self.sl
return cls(**d)
def summarize(self):
"""Convert all of the values to their max values. This form is used to represent the summary level"""
raise NotImplementedError
def allval(self):
"""Convert the last value to zero. This form represents the entire higher summary level at the granularity
of the lower summary level. For example, for a county, it means 'All counties in the state' """
d = dict(self.__dict__.items())
d['sl'] = self.sl
d[self.level] = 0
cls = self.get_class(self.sl)
return cls(**d)
@classmethod
def nullval(cls):
"""Create a new instance where all of the values are 0"""
d = dict(cls.__dict__.items())
for k in d:
d[k] = 0
d['sl'] = cls.sl
d[cls.level] = 0
return cls(**d)
@property
def tuples(self):
"""Return tuples of field, value, in the order of the levels as they are defined """
return [(field, getattr(self, field, None)) for field in self.fields]
@property
def is_summary(self):
"""Return True if this geoid is an summary -- all of the fields are 0"""
return str(self) == str(self.summarize())
@property
def is_allval(self):
"""Return True if this geoid is an allval -- the last field is zero, but the first is not"""
tups = self.tuples
return tups[-1][1] == 0 and tups[0][1] != 0
@property
def level_plural(self):
"""Return the name of the level as a plural"""
return plurals.get(self.level, self.level + "s")
|
Metatab/geoid
|
geoid/core.py
|
Geoid.parse
|
python
|
def parse(cls, gvid, exception=True):
if gvid == 'invalid':
return cls.get_class('null')(0)
if not bool(gvid):
return None
if not isinstance(gvid, six.string_types):
raise TypeError("Can't parse; not a string. Got a '{}' ".format(type(gvid)))
try:
if not cls.sl:
# Civick and ACS include the SL, so can call from base type.
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
sl = fn(gvid[0:cls.sl_width])
else:
sl = cls.sl # Otherwise must use derived class.
except ValueError as e:
if exception:
raise ValueError("Failed to parse gvid '{}': {}".format(gvid, str(e)))
else:
return cls.get_class('null')(0)
try:
cls = cls.sl_map[sl]
except KeyError:
if exception:
raise ValueError("Failed to parse gvid '{}': Unknown summary level '{}' ".format(gvid, sl))
else:
return cls.get_class('null')(0)
m = cls.regex.match(gvid)
if not m:
raise ValueError("Failed to match '{}' to '{}' ".format(gvid, cls.regex_str))
d = m.groupdict()
if not d:
return None
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
d = {k: fn(v) for k, v in d.items()}
try:
del d['sl']
except KeyError:
pass
return cls(**d)
|
Parse a string value into the geoid of this class.
:param gvid: String value to parse.
:param exception: If true ( default) raise an eception on parse erorrs. If False, return a
'null' geoid.
:return:
|
train
|
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L714-L781
|
[
"def get_class(cls, name_or_sl):\n \"\"\"Return a derived class based on the class name or the summary_level\"\"\"\n try:\n return cls.sl_map[int(name_or_sl)]\n\n except TypeError as e:\n raise TypeError(\"Bad name or sl: {} : {}\".format(name_or_sl, e))\n except ValueError:\n try:\n return cls.class_map[name_or_sl.lower()]\n except (KeyError, ValueError):\n raise NotASummaryName(\"Value '{}' is not a valid summary level\".format(name_or_sl))\n"
] |
class Geoid(object):
@classmethod
def resolve_summary_level(cls, sl):
try:
return cls.sl_map[sl]
except KeyError:
return None
@classmethod
def make_format_string(cls, level):
sl_num = names[level]
segs = segments[sl_num]
formats = []
formats.append(cls.sl_format)
for seg in segs:
# Lengths dict may have strings to indicate string format usage.
if int(lengths[seg]) <= 0:
continue
if isinstance(lengths[seg], int):
fmt = cls.elem_format
else:
fmt = cls.elem_str_format
formats.append(fmt.format(seg, cls.part_width(lengths[seg])))
return ''.join(formats)
@classmethod
def make_regex(cls, level):
sl_num = names[level]
segs = segments[sl_num]
# Lengths dict may have strings to indicate string format usage.
regexes = [cls.sl_regex] + [cls.elem_regex.format(seg, cls.part_width(lengths[seg]))
for seg in segs if int(lengths[seg]) > 0]
re_str = '^' + ''.join(regexes) + '$'
return re_str
@classmethod
def augment(cls):
"""Augment the class with computed formats, regexes, and other things. This caches these values so
they don't have to be created for every instance. """
import re
level_name = cls.__name__.lower()
cls.sl = names[level_name]
cls.class_map[cls.__name__.lower()] = cls
cls.sl_map[cls.sl] = cls
cls.fmt = cls.make_format_string(cls.__name__.lower())
cls.regex_str = cls.make_regex(cls.__name__.lower())
cls.regex = re.compile(cls.regex_str)
# List of field names
cls.level = level_name
cls.fields = segments[cls.sl]
@classmethod
def get_class(cls, name_or_sl):
"""Return a derived class based on the class name or the summary_level"""
try:
return cls.sl_map[int(name_or_sl)]
except TypeError as e:
raise TypeError("Bad name or sl: {} : {}".format(name_or_sl, e))
except ValueError:
try:
return cls.class_map[name_or_sl.lower()]
except (KeyError, ValueError):
raise NotASummaryName("Value '{}' is not a valid summary level".format(name_or_sl))
def __init__(self, *args, **kwargs):
# This is a bit unusual, because it means, that , unlike normal
# python args, a kwarg can overwrite a position arg.
d = dict(zip(self.fields, args + ((0,) * 10))) # Add enough zeros to set all fields to zero
d.update(kwargs)
for k, v in d.items():
if k in self.fields:
try:
setattr(self, k, v)
except TypeError as e:
raise TypeError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
except ValueError as e:
raise ValueError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
def __str__(self):
d = self.__dict__
d['sl'] = self.sl
# Hacks for special string cases
if 'sldu' in d:
d['sldu'] = str(d['sldu']).zfill(3)
if 'sldl' in d:
d['sldl'] = str(d['sldl']).zfill(3)
try:
fn = six.get_method_function(self.encode)
kwargs = {k: fn(v) for k, v in d.items()}
return self.fmt.format(**kwargs)
except (ValueError, KeyError) as e:
raise ValueError("Bad value in {}, data {} for format {}: {}".format(type(self), d, self.fmt, e))
@property
def state_name(self):
from geoid.censusnames import geo_names
return geo_names[(self.state, 0)]
@property
def stusab(self):
from geoid.censusnames import stusab
try:
return stusab[int(self.state)]
except (AttributeError, ValueError):
# Assume this is a Us object, or some other national object
return 'US'
@property
def county_name(self):
from geoid.censusnames import geo_names
try:
try:
return CountyName(geo_names[(self.state, self.county)])
except KeyError:
try:
return CountyName("County #{}, {}".format(self.county,geo_names[(self.state, 0)]))
except KeyError:
return CountyName("County #{}, State#{}".format(self.county, self.state))
except Exception:
return CountyName('')
@property
def geo_name(self):
"""
Return a name of the state or county, or, for other lowever levels, the
name of the level type in the county.
:return:
"""
if self.level == 'county':
return str(self.county_name)
elif self.level == 'state':
return self.state_name
else:
if hasattr(self, 'county'):
return "{} in {}".format(self.level,str(self.county_name))
elif hasattr(self, 'state'):
return "{} in {}".format(self.level, self.state_name)
else:
return "a {}".format(self.level)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return str(self) != str(other)
def __lt__(self, other):
return str(self) < str(other)
def __le__(self, other):
return str(self) <= str(other)
def __gt__(self, other):
return str(self) > str(other)
def __ge__(self, other):
return str(self) >= str(other)
@classmethod
def convert(self, root_cls):
"""Convert to another derived class. cls is the base class for the derived type,
ie AcsGeoid, TigerGeoid, etc. """
d = self.__dict__
d['sl'] = self.sl
try:
cls = root_cls.get_class(root_cls.sl)
except (AttributeError, TypeError):
# Hopefully because root_cls is a module
cls = root_cls.get_class(self.sl)
return cls(**d)
def as_census(self):
from geoid.census import CensusGeoid
return self.convert(CensusGeoid)
def as_acs(self):
from geoid.acs import AcsGeoid
return self.convert(AcsGeoid)
def as_tiger(self):
from geoid.tiger import TigerGeoid
return self.convert(TigerGeoid)
def promote(self, level=None):
"""Convert to the next higher level summary level"""
if level is None:
if len(self.fields) < 2:
if self.level in ('region', 'division', 'state', 'ua'):
cls = self.get_class('us')
else:
return None
else:
cls = self.get_class(self.fields[-2])
else:
cls = self.get_class(level)
d = dict(self.__dict__.items())
d['sl'] = self.sl
return cls(**d)
def summarize(self):
"""Convert all of the values to their max values. This form is used to represent the summary level"""
raise NotImplementedError
def allval(self):
"""Convert the last value to zero. This form represents the entire higher summary level at the granularity
of the lower summary level. For example, for a county, it means 'All counties in the state' """
d = dict(self.__dict__.items())
d['sl'] = self.sl
d[self.level] = 0
cls = self.get_class(self.sl)
return cls(**d)
@classmethod
def nullval(cls):
"""Create a new instance where all of the values are 0"""
d = dict(cls.__dict__.items())
for k in d:
d[k] = 0
d['sl'] = cls.sl
d[cls.level] = 0
return cls(**d)
@property
def tuples(self):
"""Return tuples of field, value, in the order of the levels as they are defined """
return [(field, getattr(self, field, None)) for field in self.fields]
@property
def is_summary(self):
"""Return True if this geoid is an summary -- all of the fields are 0"""
return str(self) == str(self.summarize())
@property
def is_allval(self):
"""Return True if this geoid is an allval -- the last field is zero, but the first is not"""
tups = self.tuples
return tups[-1][1] == 0 and tups[0][1] != 0
@property
def level_plural(self):
"""Return the name of the level as a plural"""
return plurals.get(self.level, self.level + "s")
|
Metatab/geoid
|
geoid/core.py
|
Geoid.convert
|
python
|
def convert(self, root_cls):
d = self.__dict__
d['sl'] = self.sl
try:
cls = root_cls.get_class(root_cls.sl)
except (AttributeError, TypeError):
# Hopefully because root_cls is a module
cls = root_cls.get_class(self.sl)
return cls(**d)
|
Convert to another derived class. cls is the base class for the derived type,
ie AcsGeoid, TigerGeoid, etc.
|
train
|
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L783-L796
|
[
"def get_class(cls, name_or_sl):\n \"\"\"Return a derived class based on the class name or the summary_level\"\"\"\n try:\n return cls.sl_map[int(name_or_sl)]\n\n except TypeError as e:\n raise TypeError(\"Bad name or sl: {} : {}\".format(name_or_sl, e))\n except ValueError:\n try:\n return cls.class_map[name_or_sl.lower()]\n except (KeyError, ValueError):\n raise NotASummaryName(\"Value '{}' is not a valid summary level\".format(name_or_sl))\n"
] |
class Geoid(object):
@classmethod
def resolve_summary_level(cls, sl):
try:
return cls.sl_map[sl]
except KeyError:
return None
@classmethod
def make_format_string(cls, level):
sl_num = names[level]
segs = segments[sl_num]
formats = []
formats.append(cls.sl_format)
for seg in segs:
# Lengths dict may have strings to indicate string format usage.
if int(lengths[seg]) <= 0:
continue
if isinstance(lengths[seg], int):
fmt = cls.elem_format
else:
fmt = cls.elem_str_format
formats.append(fmt.format(seg, cls.part_width(lengths[seg])))
return ''.join(formats)
@classmethod
def make_regex(cls, level):
sl_num = names[level]
segs = segments[sl_num]
# Lengths dict may have strings to indicate string format usage.
regexes = [cls.sl_regex] + [cls.elem_regex.format(seg, cls.part_width(lengths[seg]))
for seg in segs if int(lengths[seg]) > 0]
re_str = '^' + ''.join(regexes) + '$'
return re_str
@classmethod
def augment(cls):
"""Augment the class with computed formats, regexes, and other things. This caches these values so
they don't have to be created for every instance. """
import re
level_name = cls.__name__.lower()
cls.sl = names[level_name]
cls.class_map[cls.__name__.lower()] = cls
cls.sl_map[cls.sl] = cls
cls.fmt = cls.make_format_string(cls.__name__.lower())
cls.regex_str = cls.make_regex(cls.__name__.lower())
cls.regex = re.compile(cls.regex_str)
# List of field names
cls.level = level_name
cls.fields = segments[cls.sl]
@classmethod
def get_class(cls, name_or_sl):
"""Return a derived class based on the class name or the summary_level"""
try:
return cls.sl_map[int(name_or_sl)]
except TypeError as e:
raise TypeError("Bad name or sl: {} : {}".format(name_or_sl, e))
except ValueError:
try:
return cls.class_map[name_or_sl.lower()]
except (KeyError, ValueError):
raise NotASummaryName("Value '{}' is not a valid summary level".format(name_or_sl))
def __init__(self, *args, **kwargs):
# This is a bit unusual, because it means, that , unlike normal
# python args, a kwarg can overwrite a position arg.
d = dict(zip(self.fields, args + ((0,) * 10))) # Add enough zeros to set all fields to zero
d.update(kwargs)
for k, v in d.items():
if k in self.fields:
try:
setattr(self, k, v)
except TypeError as e:
raise TypeError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
except ValueError as e:
raise ValueError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
def __str__(self):
d = self.__dict__
d['sl'] = self.sl
# Hacks for special string cases
if 'sldu' in d:
d['sldu'] = str(d['sldu']).zfill(3)
if 'sldl' in d:
d['sldl'] = str(d['sldl']).zfill(3)
try:
fn = six.get_method_function(self.encode)
kwargs = {k: fn(v) for k, v in d.items()}
return self.fmt.format(**kwargs)
except (ValueError, KeyError) as e:
raise ValueError("Bad value in {}, data {} for format {}: {}".format(type(self), d, self.fmt, e))
@property
def state_name(self):
from geoid.censusnames import geo_names
return geo_names[(self.state, 0)]
@property
def stusab(self):
from geoid.censusnames import stusab
try:
return stusab[int(self.state)]
except (AttributeError, ValueError):
# Assume this is a Us object, or some other national object
return 'US'
@property
def county_name(self):
from geoid.censusnames import geo_names
try:
try:
return CountyName(geo_names[(self.state, self.county)])
except KeyError:
try:
return CountyName("County #{}, {}".format(self.county,geo_names[(self.state, 0)]))
except KeyError:
return CountyName("County #{}, State#{}".format(self.county, self.state))
except Exception:
return CountyName('')
@property
def geo_name(self):
"""
Return a name of the state or county, or, for other lowever levels, the
name of the level type in the county.
:return:
"""
if self.level == 'county':
return str(self.county_name)
elif self.level == 'state':
return self.state_name
else:
if hasattr(self, 'county'):
return "{} in {}".format(self.level,str(self.county_name))
elif hasattr(self, 'state'):
return "{} in {}".format(self.level, self.state_name)
else:
return "a {}".format(self.level)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return str(self) != str(other)
def __lt__(self, other):
return str(self) < str(other)
def __le__(self, other):
return str(self) <= str(other)
def __gt__(self, other):
return str(self) > str(other)
def __ge__(self, other):
return str(self) >= str(other)
@classmethod
def parse(cls, gvid, exception=True):
"""
Parse a string value into the geoid of this class.
:param gvid: String value to parse.
:param exception: If true ( default) raise an eception on parse erorrs. If False, return a
'null' geoid.
:return:
"""
if gvid == 'invalid':
return cls.get_class('null')(0)
if not bool(gvid):
return None
if not isinstance(gvid, six.string_types):
raise TypeError("Can't parse; not a string. Got a '{}' ".format(type(gvid)))
try:
if not cls.sl:
# Civick and ACS include the SL, so can call from base type.
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
sl = fn(gvid[0:cls.sl_width])
else:
sl = cls.sl # Otherwise must use derived class.
except ValueError as e:
if exception:
raise ValueError("Failed to parse gvid '{}': {}".format(gvid, str(e)))
else:
return cls.get_class('null')(0)
try:
cls = cls.sl_map[sl]
except KeyError:
if exception:
raise ValueError("Failed to parse gvid '{}': Unknown summary level '{}' ".format(gvid, sl))
else:
return cls.get_class('null')(0)
m = cls.regex.match(gvid)
if not m:
raise ValueError("Failed to match '{}' to '{}' ".format(gvid, cls.regex_str))
d = m.groupdict()
if not d:
return None
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
d = {k: fn(v) for k, v in d.items()}
try:
del d['sl']
except KeyError:
pass
return cls(**d)
def as_census(self):
from geoid.census import CensusGeoid
return self.convert(CensusGeoid)
def as_acs(self):
from geoid.acs import AcsGeoid
return self.convert(AcsGeoid)
def as_tiger(self):
from geoid.tiger import TigerGeoid
return self.convert(TigerGeoid)
def promote(self, level=None):
"""Convert to the next higher level summary level"""
if level is None:
if len(self.fields) < 2:
if self.level in ('region', 'division', 'state', 'ua'):
cls = self.get_class('us')
else:
return None
else:
cls = self.get_class(self.fields[-2])
else:
cls = self.get_class(level)
d = dict(self.__dict__.items())
d['sl'] = self.sl
return cls(**d)
def summarize(self):
"""Convert all of the values to their max values. This form is used to represent the summary level"""
raise NotImplementedError
def allval(self):
"""Convert the last value to zero. This form represents the entire higher summary level at the granularity
of the lower summary level. For example, for a county, it means 'All counties in the state' """
d = dict(self.__dict__.items())
d['sl'] = self.sl
d[self.level] = 0
cls = self.get_class(self.sl)
return cls(**d)
@classmethod
def nullval(cls):
"""Create a new instance where all of the values are 0"""
d = dict(cls.__dict__.items())
for k in d:
d[k] = 0
d['sl'] = cls.sl
d[cls.level] = 0
return cls(**d)
@property
def tuples(self):
"""Return tuples of field, value, in the order of the levels as they are defined """
return [(field, getattr(self, field, None)) for field in self.fields]
@property
def is_summary(self):
"""Return True if this geoid is an summary -- all of the fields are 0"""
return str(self) == str(self.summarize())
@property
def is_allval(self):
"""Return True if this geoid is an allval -- the last field is zero, but the first is not"""
tups = self.tuples
return tups[-1][1] == 0 and tups[0][1] != 0
@property
def level_plural(self):
"""Return the name of the level as a plural"""
return plurals.get(self.level, self.level + "s")
|
Metatab/geoid
|
geoid/core.py
|
Geoid.promote
|
python
|
def promote(self, level=None):
if level is None:
if len(self.fields) < 2:
if self.level in ('region', 'division', 'state', 'ua'):
cls = self.get_class('us')
else:
return None
else:
cls = self.get_class(self.fields[-2])
else:
cls = self.get_class(level)
d = dict(self.__dict__.items())
d['sl'] = self.sl
return cls(**d)
|
Convert to the next higher level summary level
|
train
|
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L810-L828
|
[
"def get_class(cls, name_or_sl):\n \"\"\"Return a derived class based on the class name or the summary_level\"\"\"\n try:\n return cls.sl_map[int(name_or_sl)]\n\n except TypeError as e:\n raise TypeError(\"Bad name or sl: {} : {}\".format(name_or_sl, e))\n except ValueError:\n try:\n return cls.class_map[name_or_sl.lower()]\n except (KeyError, ValueError):\n raise NotASummaryName(\"Value '{}' is not a valid summary level\".format(name_or_sl))\n"
] |
class Geoid(object):
@classmethod
def resolve_summary_level(cls, sl):
try:
return cls.sl_map[sl]
except KeyError:
return None
@classmethod
def make_format_string(cls, level):
sl_num = names[level]
segs = segments[sl_num]
formats = []
formats.append(cls.sl_format)
for seg in segs:
# Lengths dict may have strings to indicate string format usage.
if int(lengths[seg]) <= 0:
continue
if isinstance(lengths[seg], int):
fmt = cls.elem_format
else:
fmt = cls.elem_str_format
formats.append(fmt.format(seg, cls.part_width(lengths[seg])))
return ''.join(formats)
@classmethod
def make_regex(cls, level):
sl_num = names[level]
segs = segments[sl_num]
# Lengths dict may have strings to indicate string format usage.
regexes = [cls.sl_regex] + [cls.elem_regex.format(seg, cls.part_width(lengths[seg]))
for seg in segs if int(lengths[seg]) > 0]
re_str = '^' + ''.join(regexes) + '$'
return re_str
@classmethod
def augment(cls):
"""Augment the class with computed formats, regexes, and other things. This caches these values so
they don't have to be created for every instance. """
import re
level_name = cls.__name__.lower()
cls.sl = names[level_name]
cls.class_map[cls.__name__.lower()] = cls
cls.sl_map[cls.sl] = cls
cls.fmt = cls.make_format_string(cls.__name__.lower())
cls.regex_str = cls.make_regex(cls.__name__.lower())
cls.regex = re.compile(cls.regex_str)
# List of field names
cls.level = level_name
cls.fields = segments[cls.sl]
@classmethod
def get_class(cls, name_or_sl):
"""Return a derived class based on the class name or the summary_level"""
try:
return cls.sl_map[int(name_or_sl)]
except TypeError as e:
raise TypeError("Bad name or sl: {} : {}".format(name_or_sl, e))
except ValueError:
try:
return cls.class_map[name_or_sl.lower()]
except (KeyError, ValueError):
raise NotASummaryName("Value '{}' is not a valid summary level".format(name_or_sl))
def __init__(self, *args, **kwargs):
# This is a bit unusual, because it means, that , unlike normal
# python args, a kwarg can overwrite a position arg.
d = dict(zip(self.fields, args + ((0,) * 10))) # Add enough zeros to set all fields to zero
d.update(kwargs)
for k, v in d.items():
if k in self.fields:
try:
setattr(self, k, v)
except TypeError as e:
raise TypeError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
except ValueError as e:
raise ValueError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
def __str__(self):
d = self.__dict__
d['sl'] = self.sl
# Hacks for special string cases
if 'sldu' in d:
d['sldu'] = str(d['sldu']).zfill(3)
if 'sldl' in d:
d['sldl'] = str(d['sldl']).zfill(3)
try:
fn = six.get_method_function(self.encode)
kwargs = {k: fn(v) for k, v in d.items()}
return self.fmt.format(**kwargs)
except (ValueError, KeyError) as e:
raise ValueError("Bad value in {}, data {} for format {}: {}".format(type(self), d, self.fmt, e))
@property
def state_name(self):
from geoid.censusnames import geo_names
return geo_names[(self.state, 0)]
@property
def stusab(self):
from geoid.censusnames import stusab
try:
return stusab[int(self.state)]
except (AttributeError, ValueError):
# Assume this is a Us object, or some other national object
return 'US'
@property
def county_name(self):
from geoid.censusnames import geo_names
try:
try:
return CountyName(geo_names[(self.state, self.county)])
except KeyError:
try:
return CountyName("County #{}, {}".format(self.county,geo_names[(self.state, 0)]))
except KeyError:
return CountyName("County #{}, State#{}".format(self.county, self.state))
except Exception:
return CountyName('')
@property
def geo_name(self):
"""
Return a name of the state or county, or, for other lowever levels, the
name of the level type in the county.
:return:
"""
if self.level == 'county':
return str(self.county_name)
elif self.level == 'state':
return self.state_name
else:
if hasattr(self, 'county'):
return "{} in {}".format(self.level,str(self.county_name))
elif hasattr(self, 'state'):
return "{} in {}".format(self.level, self.state_name)
else:
return "a {}".format(self.level)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return str(self) != str(other)
def __lt__(self, other):
return str(self) < str(other)
def __le__(self, other):
return str(self) <= str(other)
def __gt__(self, other):
return str(self) > str(other)
def __ge__(self, other):
return str(self) >= str(other)
@classmethod
def parse(cls, gvid, exception=True):
"""
Parse a string value into the geoid of this class.
:param gvid: String value to parse.
:param exception: If true ( default) raise an eception on parse erorrs. If False, return a
'null' geoid.
:return:
"""
if gvid == 'invalid':
return cls.get_class('null')(0)
if not bool(gvid):
return None
if not isinstance(gvid, six.string_types):
raise TypeError("Can't parse; not a string. Got a '{}' ".format(type(gvid)))
try:
if not cls.sl:
# Civick and ACS include the SL, so can call from base type.
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
sl = fn(gvid[0:cls.sl_width])
else:
sl = cls.sl # Otherwise must use derived class.
except ValueError as e:
if exception:
raise ValueError("Failed to parse gvid '{}': {}".format(gvid, str(e)))
else:
return cls.get_class('null')(0)
try:
cls = cls.sl_map[sl]
except KeyError:
if exception:
raise ValueError("Failed to parse gvid '{}': Unknown summary level '{}' ".format(gvid, sl))
else:
return cls.get_class('null')(0)
m = cls.regex.match(gvid)
if not m:
raise ValueError("Failed to match '{}' to '{}' ".format(gvid, cls.regex_str))
d = m.groupdict()
if not d:
return None
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
d = {k: fn(v) for k, v in d.items()}
try:
del d['sl']
except KeyError:
pass
return cls(**d)
def convert(self, root_cls):
"""Convert to another derived class. cls is the base class for the derived type,
ie AcsGeoid, TigerGeoid, etc. """
d = self.__dict__
d['sl'] = self.sl
try:
cls = root_cls.get_class(root_cls.sl)
except (AttributeError, TypeError):
# Hopefully because root_cls is a module
cls = root_cls.get_class(self.sl)
return cls(**d)
def as_census(self):
from geoid.census import CensusGeoid
return self.convert(CensusGeoid)
def as_acs(self):
from geoid.acs import AcsGeoid
return self.convert(AcsGeoid)
def as_tiger(self):
from geoid.tiger import TigerGeoid
return self.convert(TigerGeoid)
def summarize(self):
"""Convert all of the values to their max values. This form is used to represent the summary level"""
raise NotImplementedError
def allval(self):
"""Convert the last value to zero. This form represents the entire higher summary level at the granularity
of the lower summary level. For example, for a county, it means 'All counties in the state' """
d = dict(self.__dict__.items())
d['sl'] = self.sl
d[self.level] = 0
cls = self.get_class(self.sl)
return cls(**d)
@classmethod
def nullval(cls):
"""Create a new instance where all of the values are 0"""
d = dict(cls.__dict__.items())
for k in d:
d[k] = 0
d['sl'] = cls.sl
d[cls.level] = 0
return cls(**d)
@property
def tuples(self):
"""Return tuples of field, value, in the order of the levels as they are defined """
return [(field, getattr(self, field, None)) for field in self.fields]
@property
def is_summary(self):
"""Return True if this geoid is an summary -- all of the fields are 0"""
return str(self) == str(self.summarize())
@property
def is_allval(self):
"""Return True if this geoid is an allval -- the last field is zero, but the first is not"""
tups = self.tuples
return tups[-1][1] == 0 and tups[0][1] != 0
@property
def level_plural(self):
"""Return the name of the level as a plural"""
return plurals.get(self.level, self.level + "s")
|
Metatab/geoid
|
geoid/core.py
|
Geoid.allval
|
python
|
def allval(self):
d = dict(self.__dict__.items())
d['sl'] = self.sl
d[self.level] = 0
cls = self.get_class(self.sl)
return cls(**d)
|
Convert the last value to zero. This form represents the entire higher summary level at the granularity
of the lower summary level. For example, for a county, it means 'All counties in the state'
|
train
|
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L835-L846
|
[
"def get_class(cls, name_or_sl):\n \"\"\"Return a derived class based on the class name or the summary_level\"\"\"\n try:\n return cls.sl_map[int(name_or_sl)]\n\n except TypeError as e:\n raise TypeError(\"Bad name or sl: {} : {}\".format(name_or_sl, e))\n except ValueError:\n try:\n return cls.class_map[name_or_sl.lower()]\n except (KeyError, ValueError):\n raise NotASummaryName(\"Value '{}' is not a valid summary level\".format(name_or_sl))\n"
] |
class Geoid(object):
@classmethod
def resolve_summary_level(cls, sl):
try:
return cls.sl_map[sl]
except KeyError:
return None
@classmethod
def make_format_string(cls, level):
sl_num = names[level]
segs = segments[sl_num]
formats = []
formats.append(cls.sl_format)
for seg in segs:
# Lengths dict may have strings to indicate string format usage.
if int(lengths[seg]) <= 0:
continue
if isinstance(lengths[seg], int):
fmt = cls.elem_format
else:
fmt = cls.elem_str_format
formats.append(fmt.format(seg, cls.part_width(lengths[seg])))
return ''.join(formats)
@classmethod
def make_regex(cls, level):
sl_num = names[level]
segs = segments[sl_num]
# Lengths dict may have strings to indicate string format usage.
regexes = [cls.sl_regex] + [cls.elem_regex.format(seg, cls.part_width(lengths[seg]))
for seg in segs if int(lengths[seg]) > 0]
re_str = '^' + ''.join(regexes) + '$'
return re_str
@classmethod
def augment(cls):
"""Augment the class with computed formats, regexes, and other things. This caches these values so
they don't have to be created for every instance. """
import re
level_name = cls.__name__.lower()
cls.sl = names[level_name]
cls.class_map[cls.__name__.lower()] = cls
cls.sl_map[cls.sl] = cls
cls.fmt = cls.make_format_string(cls.__name__.lower())
cls.regex_str = cls.make_regex(cls.__name__.lower())
cls.regex = re.compile(cls.regex_str)
# List of field names
cls.level = level_name
cls.fields = segments[cls.sl]
@classmethod
def get_class(cls, name_or_sl):
"""Return a derived class based on the class name or the summary_level"""
try:
return cls.sl_map[int(name_or_sl)]
except TypeError as e:
raise TypeError("Bad name or sl: {} : {}".format(name_or_sl, e))
except ValueError:
try:
return cls.class_map[name_or_sl.lower()]
except (KeyError, ValueError):
raise NotASummaryName("Value '{}' is not a valid summary level".format(name_or_sl))
def __init__(self, *args, **kwargs):
# This is a bit unusual, because it means, that , unlike normal
# python args, a kwarg can overwrite a position arg.
d = dict(zip(self.fields, args + ((0,) * 10))) # Add enough zeros to set all fields to zero
d.update(kwargs)
for k, v in d.items():
if k in self.fields:
try:
setattr(self, k, v)
except TypeError as e:
raise TypeError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
except ValueError as e:
raise ValueError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
def __str__(self):
d = self.__dict__
d['sl'] = self.sl
# Hacks for special string cases
if 'sldu' in d:
d['sldu'] = str(d['sldu']).zfill(3)
if 'sldl' in d:
d['sldl'] = str(d['sldl']).zfill(3)
try:
fn = six.get_method_function(self.encode)
kwargs = {k: fn(v) for k, v in d.items()}
return self.fmt.format(**kwargs)
except (ValueError, KeyError) as e:
raise ValueError("Bad value in {}, data {} for format {}: {}".format(type(self), d, self.fmt, e))
@property
def state_name(self):
from geoid.censusnames import geo_names
return geo_names[(self.state, 0)]
@property
def stusab(self):
from geoid.censusnames import stusab
try:
return stusab[int(self.state)]
except (AttributeError, ValueError):
# Assume this is a Us object, or some other national object
return 'US'
@property
def county_name(self):
from geoid.censusnames import geo_names
try:
try:
return CountyName(geo_names[(self.state, self.county)])
except KeyError:
try:
return CountyName("County #{}, {}".format(self.county,geo_names[(self.state, 0)]))
except KeyError:
return CountyName("County #{}, State#{}".format(self.county, self.state))
except Exception:
return CountyName('')
@property
def geo_name(self):
"""
Return a name of the state or county, or, for other lowever levels, the
name of the level type in the county.
:return:
"""
if self.level == 'county':
return str(self.county_name)
elif self.level == 'state':
return self.state_name
else:
if hasattr(self, 'county'):
return "{} in {}".format(self.level,str(self.county_name))
elif hasattr(self, 'state'):
return "{} in {}".format(self.level, self.state_name)
else:
return "a {}".format(self.level)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return str(self) != str(other)
def __lt__(self, other):
return str(self) < str(other)
def __le__(self, other):
return str(self) <= str(other)
def __gt__(self, other):
return str(self) > str(other)
def __ge__(self, other):
return str(self) >= str(other)
@classmethod
def parse(cls, gvid, exception=True):
"""
Parse a string value into the geoid of this class.
:param gvid: String value to parse.
:param exception: If true ( default) raise an eception on parse erorrs. If False, return a
'null' geoid.
:return:
"""
if gvid == 'invalid':
return cls.get_class('null')(0)
if not bool(gvid):
return None
if not isinstance(gvid, six.string_types):
raise TypeError("Can't parse; not a string. Got a '{}' ".format(type(gvid)))
try:
if not cls.sl:
# Civick and ACS include the SL, so can call from base type.
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
sl = fn(gvid[0:cls.sl_width])
else:
sl = cls.sl # Otherwise must use derived class.
except ValueError as e:
if exception:
raise ValueError("Failed to parse gvid '{}': {}".format(gvid, str(e)))
else:
return cls.get_class('null')(0)
try:
cls = cls.sl_map[sl]
except KeyError:
if exception:
raise ValueError("Failed to parse gvid '{}': Unknown summary level '{}' ".format(gvid, sl))
else:
return cls.get_class('null')(0)
m = cls.regex.match(gvid)
if not m:
raise ValueError("Failed to match '{}' to '{}' ".format(gvid, cls.regex_str))
d = m.groupdict()
if not d:
return None
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
d = {k: fn(v) for k, v in d.items()}
try:
del d['sl']
except KeyError:
pass
return cls(**d)
def convert(self, root_cls):
"""Convert to another derived class. cls is the base class for the derived type,
ie AcsGeoid, TigerGeoid, etc. """
d = self.__dict__
d['sl'] = self.sl
try:
cls = root_cls.get_class(root_cls.sl)
except (AttributeError, TypeError):
# Hopefully because root_cls is a module
cls = root_cls.get_class(self.sl)
return cls(**d)
def as_census(self):
from geoid.census import CensusGeoid
return self.convert(CensusGeoid)
def as_acs(self):
from geoid.acs import AcsGeoid
return self.convert(AcsGeoid)
def as_tiger(self):
from geoid.tiger import TigerGeoid
return self.convert(TigerGeoid)
def promote(self, level=None):
"""Convert to the next higher level summary level"""
if level is None:
if len(self.fields) < 2:
if self.level in ('region', 'division', 'state', 'ua'):
cls = self.get_class('us')
else:
return None
else:
cls = self.get_class(self.fields[-2])
else:
cls = self.get_class(level)
d = dict(self.__dict__.items())
d['sl'] = self.sl
return cls(**d)
def summarize(self):
"""Convert all of the values to their max values. This form is used to represent the summary level"""
raise NotImplementedError
@classmethod
def nullval(cls):
"""Create a new instance where all of the values are 0"""
d = dict(cls.__dict__.items())
for k in d:
d[k] = 0
d['sl'] = cls.sl
d[cls.level] = 0
return cls(**d)
@property
def tuples(self):
"""Return tuples of field, value, in the order of the levels as they are defined """
return [(field, getattr(self, field, None)) for field in self.fields]
@property
def is_summary(self):
"""Return True if this geoid is an summary -- all of the fields are 0"""
return str(self) == str(self.summarize())
@property
def is_allval(self):
"""Return True if this geoid is an allval -- the last field is zero, but the first is not"""
tups = self.tuples
return tups[-1][1] == 0 and tups[0][1] != 0
@property
def level_plural(self):
"""Return the name of the level as a plural"""
return plurals.get(self.level, self.level + "s")
|
Metatab/geoid
|
geoid/core.py
|
Geoid.nullval
|
python
|
def nullval(cls):
d = dict(cls.__dict__.items())
for k in d:
d[k] = 0
d['sl'] = cls.sl
d[cls.level] = 0
return cls(**d)
|
Create a new instance where all of the values are 0
|
train
|
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L849-L860
| null |
class Geoid(object):
@classmethod
def resolve_summary_level(cls, sl):
try:
return cls.sl_map[sl]
except KeyError:
return None
@classmethod
def make_format_string(cls, level):
sl_num = names[level]
segs = segments[sl_num]
formats = []
formats.append(cls.sl_format)
for seg in segs:
# Lengths dict may have strings to indicate string format usage.
if int(lengths[seg]) <= 0:
continue
if isinstance(lengths[seg], int):
fmt = cls.elem_format
else:
fmt = cls.elem_str_format
formats.append(fmt.format(seg, cls.part_width(lengths[seg])))
return ''.join(formats)
@classmethod
def make_regex(cls, level):
sl_num = names[level]
segs = segments[sl_num]
# Lengths dict may have strings to indicate string format usage.
regexes = [cls.sl_regex] + [cls.elem_regex.format(seg, cls.part_width(lengths[seg]))
for seg in segs if int(lengths[seg]) > 0]
re_str = '^' + ''.join(regexes) + '$'
return re_str
@classmethod
def augment(cls):
"""Augment the class with computed formats, regexes, and other things. This caches these values so
they don't have to be created for every instance. """
import re
level_name = cls.__name__.lower()
cls.sl = names[level_name]
cls.class_map[cls.__name__.lower()] = cls
cls.sl_map[cls.sl] = cls
cls.fmt = cls.make_format_string(cls.__name__.lower())
cls.regex_str = cls.make_regex(cls.__name__.lower())
cls.regex = re.compile(cls.regex_str)
# List of field names
cls.level = level_name
cls.fields = segments[cls.sl]
@classmethod
def get_class(cls, name_or_sl):
"""Return a derived class based on the class name or the summary_level"""
try:
return cls.sl_map[int(name_or_sl)]
except TypeError as e:
raise TypeError("Bad name or sl: {} : {}".format(name_or_sl, e))
except ValueError:
try:
return cls.class_map[name_or_sl.lower()]
except (KeyError, ValueError):
raise NotASummaryName("Value '{}' is not a valid summary level".format(name_or_sl))
def __init__(self, *args, **kwargs):
# This is a bit unusual, because it means, that , unlike normal
# python args, a kwarg can overwrite a position arg.
d = dict(zip(self.fields, args + ((0,) * 10))) # Add enough zeros to set all fields to zero
d.update(kwargs)
for k, v in d.items():
if k in self.fields:
try:
setattr(self, k, v)
except TypeError as e:
raise TypeError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
except ValueError as e:
raise ValueError("Failed to convert '{}' ({}) for field '{}' in {}: {}"
.format(v, type(v), k, type(self), e))
def __str__(self):
d = self.__dict__
d['sl'] = self.sl
# Hacks for special string cases
if 'sldu' in d:
d['sldu'] = str(d['sldu']).zfill(3)
if 'sldl' in d:
d['sldl'] = str(d['sldl']).zfill(3)
try:
fn = six.get_method_function(self.encode)
kwargs = {k: fn(v) for k, v in d.items()}
return self.fmt.format(**kwargs)
except (ValueError, KeyError) as e:
raise ValueError("Bad value in {}, data {} for format {}: {}".format(type(self), d, self.fmt, e))
@property
def state_name(self):
from geoid.censusnames import geo_names
return geo_names[(self.state, 0)]
@property
def stusab(self):
from geoid.censusnames import stusab
try:
return stusab[int(self.state)]
except (AttributeError, ValueError):
# Assume this is a Us object, or some other national object
return 'US'
@property
def county_name(self):
from geoid.censusnames import geo_names
try:
try:
return CountyName(geo_names[(self.state, self.county)])
except KeyError:
try:
return CountyName("County #{}, {}".format(self.county,geo_names[(self.state, 0)]))
except KeyError:
return CountyName("County #{}, State#{}".format(self.county, self.state))
except Exception:
return CountyName('')
@property
def geo_name(self):
"""
Return a name of the state or county, or, for other lowever levels, the
name of the level type in the county.
:return:
"""
if self.level == 'county':
return str(self.county_name)
elif self.level == 'state':
return self.state_name
else:
if hasattr(self, 'county'):
return "{} in {}".format(self.level,str(self.county_name))
elif hasattr(self, 'state'):
return "{} in {}".format(self.level, self.state_name)
else:
return "a {}".format(self.level)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return str(self) != str(other)
def __lt__(self, other):
return str(self) < str(other)
def __le__(self, other):
return str(self) <= str(other)
def __gt__(self, other):
return str(self) > str(other)
def __ge__(self, other):
return str(self) >= str(other)
@classmethod
def parse(cls, gvid, exception=True):
"""
Parse a string value into the geoid of this class.
:param gvid: String value to parse.
:param exception: If true ( default) raise an eception on parse erorrs. If False, return a
'null' geoid.
:return:
"""
if gvid == 'invalid':
return cls.get_class('null')(0)
if not bool(gvid):
return None
if not isinstance(gvid, six.string_types):
raise TypeError("Can't parse; not a string. Got a '{}' ".format(type(gvid)))
try:
if not cls.sl:
# Civick and ACS include the SL, so can call from base type.
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
sl = fn(gvid[0:cls.sl_width])
else:
sl = cls.sl # Otherwise must use derived class.
except ValueError as e:
if exception:
raise ValueError("Failed to parse gvid '{}': {}".format(gvid, str(e)))
else:
return cls.get_class('null')(0)
try:
cls = cls.sl_map[sl]
except KeyError:
if exception:
raise ValueError("Failed to parse gvid '{}': Unknown summary level '{}' ".format(gvid, sl))
else:
return cls.get_class('null')(0)
m = cls.regex.match(gvid)
if not m:
raise ValueError("Failed to match '{}' to '{}' ".format(gvid, cls.regex_str))
d = m.groupdict()
if not d:
return None
if six.PY3:
fn = cls.decode
else:
fn = cls.decode.__func__
d = {k: fn(v) for k, v in d.items()}
try:
del d['sl']
except KeyError:
pass
return cls(**d)
def convert(self, root_cls):
"""Convert to another derived class. cls is the base class for the derived type,
ie AcsGeoid, TigerGeoid, etc. """
d = self.__dict__
d['sl'] = self.sl
try:
cls = root_cls.get_class(root_cls.sl)
except (AttributeError, TypeError):
# Hopefully because root_cls is a module
cls = root_cls.get_class(self.sl)
return cls(**d)
def as_census(self):
from geoid.census import CensusGeoid
return self.convert(CensusGeoid)
def as_acs(self):
from geoid.acs import AcsGeoid
return self.convert(AcsGeoid)
def as_tiger(self):
from geoid.tiger import TigerGeoid
return self.convert(TigerGeoid)
def promote(self, level=None):
"""Convert to the next higher level summary level"""
if level is None:
if len(self.fields) < 2:
if self.level in ('region', 'division', 'state', 'ua'):
cls = self.get_class('us')
else:
return None
else:
cls = self.get_class(self.fields[-2])
else:
cls = self.get_class(level)
d = dict(self.__dict__.items())
d['sl'] = self.sl
return cls(**d)
def summarize(self):
"""Convert all of the values to their max values. This form is used to represent the summary level"""
raise NotImplementedError
def allval(self):
"""Convert the last value to zero. This form represents the entire higher summary level at the granularity
of the lower summary level. For example, for a county, it means 'All counties in the state' """
d = dict(self.__dict__.items())
d['sl'] = self.sl
d[self.level] = 0
cls = self.get_class(self.sl)
return cls(**d)
@classmethod
@property
def tuples(self):
"""Return tuples of field, value, in the order of the levels as they are defined """
return [(field, getattr(self, field, None)) for field in self.fields]
@property
def is_summary(self):
"""Return True if this geoid is an summary -- all of the fields are 0"""
return str(self) == str(self.summarize())
@property
def is_allval(self):
"""Return True if this geoid is an allval -- the last field is zero, but the first is not"""
tups = self.tuples
return tups[-1][1] == 0 and tups[0][1] != 0
@property
def level_plural(self):
"""Return the name of the level as a plural"""
return plurals.get(self.level, self.level + "s")
|
Metatab/geoid
|
geoid/civick.py
|
GVid.summarize
|
python
|
def summarize(self):
s = str(self.allval())
return self.parse(s[:2]+ ''.join(['Z']*len(s[2:])))
|
Convert all of the values to their max values. This form is used to represent the summary level
|
train
|
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/civick.py#L41-L46
|
[
"def parse(cls, gvid, exception=True):\n \"\"\"\n Parse a string value into the geoid of this class.\n\n :param gvid: String value to parse.\n :param exception: If true ( default) raise an eception on parse erorrs. If False, return a\n 'null' geoid.\n :return:\n \"\"\"\n\n if gvid == 'invalid':\n return cls.get_class('null')(0)\n\n if not bool(gvid):\n return None\n\n if not isinstance(gvid, six.string_types):\n raise TypeError(\"Can't parse; not a string. Got a '{}' \".format(type(gvid)))\n\n try:\n if not cls.sl:\n # Civick and ACS include the SL, so can call from base type.\n if six.PY3:\n fn = cls.decode\n else:\n fn = cls.decode.__func__\n\n sl = fn(gvid[0:cls.sl_width])\n else:\n sl = cls.sl # Otherwise must use derived class.\n\n except ValueError as e:\n if exception:\n raise ValueError(\"Failed to parse gvid '{}': {}\".format(gvid, str(e)))\n else:\n return cls.get_class('null')(0)\n\n try:\n cls = cls.sl_map[sl]\n except KeyError:\n if exception:\n raise ValueError(\"Failed to parse gvid '{}': Unknown summary level '{}' \".format(gvid, sl))\n else:\n return cls.get_class('null')(0)\n\n m = cls.regex.match(gvid)\n\n if not m:\n raise ValueError(\"Failed to match '{}' to '{}' \".format(gvid, cls.regex_str))\n\n d = m.groupdict()\n\n if not d:\n return None\n\n if six.PY3:\n fn = cls.decode\n else:\n fn = cls.decode.__func__\n\n d = {k: fn(v) for k, v in d.items()}\n\n try:\n del d['sl']\n except KeyError:\n pass\n\n return cls(**d)\n",
"def allval(self):\n \"\"\"Convert the last value to zero. This form represents the entire higher summary level at the granularity\n of the lower summary level. For example, for a county, it means 'All counties in the state' \"\"\"\n\n d = dict(self.__dict__.items())\n d['sl'] = self.sl\n\n d[self.level] = 0\n\n cls = self.get_class(self.sl)\n\n return cls(**d)\n"
] |
class GVid(Geoid):
sl = None
fmt = None
class_map = {}
sl_map = {}
sl_width = 2
sl_format = '{sl:0>2s}'
elem_format = '{{{}:0>{}s}}'
elem_str_format = '{{{}:0>{}s}}'
sl_regex = '(?P<sl>.{2})'
elem_regex = '(?P<{}>.{{{}}})'
encode = base62_encode
decode = base62_decode
@classmethod
def part_width(cls, dec_width):
# Convert a decimal number of digits to a base 62 number of digits, via strings.
# Maybe would be faster to use log()?
return len(base62_encode(int('9'*int(dec_width))))
@classmethod
def class_factory(cls, name):
def __init__(self, *args, **kwargs):
cls.__init__(self, *args, **kwargs)
return type(name, (cls,), {"__init__": __init__})
def __str__(self):
try:
r = super(GVid, self).__str__()
assert r != '0' and r != 0
return r
except ValueError:
# There are a few types of geoids that can have strings in their values instead of numbers:
# aihhtli and sdlu
# FIXME: Do more analysis to determine if these values can be converted to numbers.
return 'invalid'
|
Metatab/geoid
|
geoid/util.py
|
simplify
|
python
|
def simplify(geoids):
from collections import defaultdict
aggregated = defaultdict(set)
d = {}
for g in geoids:
if not bool(g):
continue
av = g.allval()
d[av] = None
aggregated[av].add(g)
compiled = set()
for k, v in aggregated.items():
if len(v) >= 5:
compiled.add(k)
compiled.add(k.promote())
else:
compiled |= v
return compiled
|
Given a list of geoids, reduce it to a simpler set. If there are five or more geoids at one summary level
convert them to a single geoid at the higher level.
:param geoids:
:return:
|
train
|
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/util.py#L3-L38
| null |
"""Utilities"""
def isimplify(geoids):
"""Iteratively simplify until the set stops getting smaller. """
s0 = list(geoids)
for i in range(10):
s1 = simplify(s0)
if len(s1) == len(s0):
return s1
s0 = s1
def iallval(t):
"""Recursively promote and compute allvals """
if t:
return [t.allval()] + iallval(t.promote())
else:
return []
|
Metatab/geoid
|
geoid/util.py
|
isimplify
|
python
|
def isimplify(geoids):
s0 = list(geoids)
for i in range(10):
s1 = simplify(s0)
if len(s1) == len(s0):
return s1
s0 = s1
|
Iteratively simplify until the set stops getting smaller.
|
train
|
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/util.py#L40-L51
|
[
"def simplify(geoids):\n \"\"\"\n Given a list of geoids, reduce it to a simpler set. If there are five or more geoids at one summary level\n convert them to a single geoid at the higher level.\n\n :param geoids:\n :return:\n \"\"\"\n\n from collections import defaultdict\n\n aggregated = defaultdict(set)\n\n d = {}\n\n for g in geoids:\n\n if not bool(g):\n continue\n\n av = g.allval()\n\n d[av] = None\n\n aggregated[av].add(g)\n\n compiled = set()\n\n for k, v in aggregated.items():\n if len(v) >= 5:\n compiled.add(k)\n compiled.add(k.promote())\n else:\n compiled |= v\n\n return compiled\n"
] |
"""Utilities"""
def simplify(geoids):
"""
Given a list of geoids, reduce it to a simpler set. If there are five or more geoids at one summary level
convert them to a single geoid at the higher level.
:param geoids:
:return:
"""
from collections import defaultdict
aggregated = defaultdict(set)
d = {}
for g in geoids:
if not bool(g):
continue
av = g.allval()
d[av] = None
aggregated[av].add(g)
compiled = set()
for k, v in aggregated.items():
if len(v) >= 5:
compiled.add(k)
compiled.add(k.promote())
else:
compiled |= v
return compiled
def iallval(t):
"""Recursively promote and compute allvals """
if t:
return [t.allval()] + iallval(t.promote())
else:
return []
|
jpscaletti/authcode
|
authcode/auth.py
|
Auth.set_hasher
|
python
|
def set_hasher(self, hash, rounds=None):
hash = hash.replace('-', '_')
if hash not in VALID_HASHERS:
raise WrongHashAlgorithm(WRONG_HASH_MESSAGE)
hasher = getattr(ph, hash)
utils.test_hasher(hasher)
default_rounds = getattr(hasher, 'default_rounds', 1)
min_rounds = getattr(hasher, 'min_rounds', 1)
max_rounds = getattr(hasher, 'max_rounds', float("inf"))
rounds = min(max(rounds or default_rounds, min_rounds), max_rounds)
op = {
'schemes': VALID_HASHERS + DEPRECATED_HASHERS,
'deprecated': DEPRECATED_HASHERS,
'default': hash,
hash + '__default_rounds': rounds
}
self.hasher = CryptContext(**op)
self.hash = hash.replace('_', '-') # For testing
self.rounds = rounds
|
Updates the has algorithm and, optionally, the number of rounds
to use.
Raises:
`~WrongHashAlgorithm` if new algorithm isn't one of the three
recomended options.
|
train
|
https://github.com/jpscaletti/authcode/blob/91529b6d0caec07d1452758d937e1e0745826139/authcode/auth.py#L140-L167
|
[
"def test_hasher(hasher):\n hasher.encrypt('test', rounds=hasher.min_rounds)\n"
] |
class Auth(AuthenticationMixin, AuthorizationMixin, ViewsMixin):
default_settings = {
'session_key': '_uhmac',
'user_name': 'user',
'csrf_key': '_csrf_token',
'csrf_header': 'X-CSRFToken',
'csrf_header_alt': 'X-CSRF-TOKEN',
'redirect_key': 'next',
'sign_in_redirect': '/',
'sign_out_redirect': '/',
'views': 'sign_in sign_out reset_password change_password'.split(' '),
'url_sign_in': '/sign-in/',
'url_sign_out': '/sign-out/',
'url_reset_password': '/reset-password/',
'url_change_password': '/change-password/',
'template_sign_in': None,
'template_sign_out': None,
'template_reset': None,
'template_reset_email': None,
'template_change_password': None,
'reset_email_subject': u'Reset your password',
# Should logins be case insensitive?
'case_insensitive': True,
# Prevent session fixation attacks, but
# block having multiple logins at the same time.
# If you set this to False, make sure to delete on logout all user's
# information stored in the session.
'clear_session_on_logout': True,
# Trying to save any password shorter than this will raise a ValueError.
'password_minlen': 5,
# To help preventing denial-of-service via large passwords
# See: https://www.djangoproject.com/weblog/2013/sep/15/security/
# Authenticaction with any password longer than this will automatically fail.
# Trying to save any password longer than this will raise a ValueError.
'password_maxlen': 1024,
'token_life': 3 * 60, # minutes
'update_hash': True,
'wsgi': wsgi.werkzeug,
'pepper': u'', # considering deprecating it
}
def __init__(self, secret_key, db=None, hash=DEFAULT_HASHER, rounds=None,
UserMixin=None, RoleMixin=None, roles=False,
prefix=None, views_prefix=None,
users_model_name=None, roles_model_name=None,
**settings):
self.secret_key = str(secret_key)
assert len(self.secret_key) >= MIN_SECRET_LENGTH, \
"`secret_key` must be at least {} chars long".format(MIN_SECRET_LENGTH)
self.set_hasher(hash, rounds)
if prefix:
prefix = prefix.lower().replace(' ', '')
if not users_model_name:
users_model_name = '{}User'.format(prefix.title())
if not roles_model_name:
roles_model_name = '{}Role'.format(prefix.title())
if not views_prefix:
views_prefix = '{}_'.format(prefix)
settings.setdefault(
'url_sign_in',
'/{prefix}{url}'.format(
prefix=prefix,
url=self.default_settings['url_sign_in']
)
)
settings.setdefault(
'url_sign_out',
'/{prefix}{url}'.format(
prefix=prefix,
url=self.default_settings['url_sign_out']
)
)
settings.setdefault(
'url_reset_password',
'/{prefix}{url}'.format(
prefix=prefix,
url=self.default_settings['url_reset_password']
)
)
settings.setdefault(
'url_change_password',
'/{prefix}{url}'.format(
prefix=prefix,
url=self.default_settings['url_change_password']
)
)
self.db = db
if db:
self.users_model_name = users_model_name or 'User'
roles = roles or RoleMixin
self.User = extend_user_model(self, UserMixin, roles=roles)
if roles:
self.roles_model_name = roles_model_name or 'Role'
self.Role = extend_role_model(self, self.User, RoleMixin)
self.backends = [
self.auth_password,
self.auth_token,
]
self.session = {}
self.views_prefix = views_prefix or u''
for name in self.default_settings:
setattr(self, name, settings.get(name, self.default_settings[name]))
|
jpscaletti/authcode
|
authcode/auth_views_mixin.py
|
ViewsMixin.render_template
|
python
|
def render_template(self, name, **kwargs):
custom_template = getattr(self, 'template_' + name)
if custom_template:
return self.render(custom_template, **kwargs)
template = TEMPLATES.get(name)
return self.default_render(template, **kwargs)
|
Search for a setting named ``template_<name>`` and renders it.
If one is not defined it uses the default template of the library
at ``autchode/templates/<name>,html``.
To render the template uses the ``render`` function, a property that
has been probably overwritten in a ``auth.setup_for_something``
function (eg. ``setup_for_flask``).
|
train
|
https://github.com/jpscaletti/authcode/blob/91529b6d0caec07d1452758d937e1e0745826139/authcode/auth_views_mixin.py#L44-L57
|
[
"def default_render(self, template, **kwargs):\n tmpl = def_env.get_template(template)\n return tmpl.render(kwargs)\n",
"def render(self, template, **kwargs):\n \"\"\"Should be overwritten in the setup\"\"\"\n return self.default_render(template, **kwargs) # pragma: no cover\n"
] |
class ViewsMixin(object):
ERROR_BAD_CSRF = 'BAD CSRF TOKEN'
ERROR_SUSPENDED = 'ACCOUNT SUSPENDED'
ERROR_CREDENTIALS = 'BAD CREDENTIALS'
ERROR_BAD_TOKEN = 'WRONG TOKEN'
ERROR_WRONG_TOKEN_USER = 'WRONG USER'
ERROR_PASSW_TOO_SHORT = 'TOO SHORT'
ERROR_PASSW_TOO_LONG = 'TOO LONG'
ERROR_PASSW_MISMATCH = 'MISMATCH'
ERROR_PASSW_CURRENT = 'FAIL'
def auth_sign_in(self, *args, **kwargs):
request = self.request or kwargs.get('request') or args and args[0]
return views.sign_in(self, request, self.session, *args, **kwargs)
def auth_sign_out(self, *args, **kwargs):
request = self.request or kwargs.get('request') or args and args[0]
return views.sign_out(self, request, **kwargs)
def auth_reset_password(self, *args, **kwargs):
request = self.request or kwargs.get('request') or args and args[0]
return views.reset_password(self, request, **kwargs)
def auth_change_password(self, *args, **kwargs):
request = self.request or kwargs.get('request') or args and args[0]
return views.change_password(self, request, **kwargs)
def default_render(self, template, **kwargs):
tmpl = def_env.get_template(template)
return tmpl.render(kwargs)
def render(self, template, **kwargs):
"""Should be overwritten in the setup"""
return self.default_render(template, **kwargs) # pragma: no cover
def send_email(self, user, subject, msg):
"""Should be overwritten in the setup"""
print('To:', user)
print('Subject:', subject)
print(msg)
|
jpscaletti/authcode
|
authcode/auth_views_mixin.py
|
ViewsMixin.send_email
|
python
|
def send_email(self, user, subject, msg):
print('To:', user)
print('Subject:', subject)
print(msg)
|
Should be overwritten in the setup
|
train
|
https://github.com/jpscaletti/authcode/blob/91529b6d0caec07d1452758d937e1e0745826139/authcode/auth_views_mixin.py#L67-L71
| null |
class ViewsMixin(object):
ERROR_BAD_CSRF = 'BAD CSRF TOKEN'
ERROR_SUSPENDED = 'ACCOUNT SUSPENDED'
ERROR_CREDENTIALS = 'BAD CREDENTIALS'
ERROR_BAD_TOKEN = 'WRONG TOKEN'
ERROR_WRONG_TOKEN_USER = 'WRONG USER'
ERROR_PASSW_TOO_SHORT = 'TOO SHORT'
ERROR_PASSW_TOO_LONG = 'TOO LONG'
ERROR_PASSW_MISMATCH = 'MISMATCH'
ERROR_PASSW_CURRENT = 'FAIL'
def auth_sign_in(self, *args, **kwargs):
request = self.request or kwargs.get('request') or args and args[0]
return views.sign_in(self, request, self.session, *args, **kwargs)
def auth_sign_out(self, *args, **kwargs):
request = self.request or kwargs.get('request') or args and args[0]
return views.sign_out(self, request, **kwargs)
def auth_reset_password(self, *args, **kwargs):
request = self.request or kwargs.get('request') or args and args[0]
return views.reset_password(self, request, **kwargs)
def auth_change_password(self, *args, **kwargs):
request = self.request or kwargs.get('request') or args and args[0]
return views.change_password(self, request, **kwargs)
def render_template(self, name, **kwargs):
"""Search for a setting named ``template_<name>`` and renders it.
If one is not defined it uses the default template of the library
at ``autchode/templates/<name>,html``.
To render the template uses the ``render`` function, a property that
has been probably overwritten in a ``auth.setup_for_something``
function (eg. ``setup_for_flask``).
"""
custom_template = getattr(self, 'template_' + name)
if custom_template:
return self.render(custom_template, **kwargs)
template = TEMPLATES.get(name)
return self.default_render(template, **kwargs)
def default_render(self, template, **kwargs):
tmpl = def_env.get_template(template)
return tmpl.render(kwargs)
def render(self, template, **kwargs):
"""Should be overwritten in the setup"""
return self.default_render(template, **kwargs) # pragma: no cover
|
jpscaletti/authcode
|
authcode/setups/setup_for_bottle.py
|
setup_for_bottle
|
python
|
def setup_for_bottle(
auth, app, send_email=None, render=None,
session=None, request=None, urloptions=None):
import bottle
auth.request = request or bottle.request
if session is not None:
auth.session = session
if send_email:
auth.send_email = send_email
auth.render = render or bottle.template
bottle.BaseTemplate.defaults['csrf_token'] = auth.get_csrf_token
bottle.BaseTemplate.defaults['auth'] = auth
@bottle.hook('before_request')
def after_request():
auth.session = session or getattr(bottle.request, 'session') \
or bottle.request.environ.get('beaker.session')
assert auth.session, 'Session not found'
# By doing this, ``bottle.request`` now has a ``user`` attribute
# that it's replaced by the real user object the first time is used.
LazyUser(auth, bottle.request, user_name=auth.user_name)
if auth.views:
assert auth.render
setup_for_bottle_views(auth, app, urloptions)
|
Set the session **before** calling ``setup_for_bottle`` like this:
@hook('before_request')
def setup_request():
request.session = request.environ['beaker.session']
|
train
|
https://github.com/jpscaletti/authcode/blob/91529b6d0caec07d1452758d937e1e0745826139/authcode/setups/setup_for_bottle.py#L5-L40
|
[
"def setup_for_bottle_views(auth, app, urloptions):\n urloptions = urloptions or {}\n\n if 'sign_in' in auth.views:\n url_sign_in = eval_url(auth.url_sign_in)\n app.route(\n url_sign_in,\n method=['GET', 'POST'],\n name='{prefix}{name}'.format(\n prefix=auth.views_prefix,\n name='auth_sign_in'\n ),\n callback=auth.auth_sign_in,\n **urloptions\n )\n\n if 'sign_out' in auth.views:\n url_sign_out = eval_url(auth.url_sign_out)\n app.route(\n url_sign_out,\n method=['GET', 'POST'],\n name='{prefix}{name}'.format(\n prefix=auth.views_prefix,\n name='auth_sign_out'\n ),\n callback=auth.auth_sign_out,\n **urloptions\n )\n\n if 'change_password' in auth.views:\n url_change_password = eval_url(auth.url_change_password)\n app.route(\n url_change_password,\n method=['GET', 'POST'],\n name='{prefix}{name}'.format(\n prefix=auth.views_prefix,\n name='auth_change_password'\n ),\n callback=auth.auth_change_password,\n **urloptions\n )\n\n if 'reset_password' in auth.views:\n url_reset_password = eval_url(auth.url_reset_password)\n app.route(\n url_reset_password,\n method=['GET', 'POST'],\n name='{prefix}{name}'.format(\n prefix=auth.views_prefix,\n name='auth_reset_password'\n ),\n callback=auth.auth_reset_password,\n **urloptions\n )\n app.route(\n url_reset_password.rstrip('/') + '/<token>/',\n method=['GET', 'POST'],\n name='{prefix}{name}'.format(\n prefix=auth.views_prefix,\n name='auth_reset_password'\n ),\n callback=auth.auth_reset_password,\n **urloptions\n )\n"
] |
# coding=utf-8
from ..utils import LazyUser, eval_url
def setup_for_bottle_views(auth, app, urloptions):
urloptions = urloptions or {}
if 'sign_in' in auth.views:
url_sign_in = eval_url(auth.url_sign_in)
app.route(
url_sign_in,
method=['GET', 'POST'],
name='{prefix}{name}'.format(
prefix=auth.views_prefix,
name='auth_sign_in'
),
callback=auth.auth_sign_in,
**urloptions
)
if 'sign_out' in auth.views:
url_sign_out = eval_url(auth.url_sign_out)
app.route(
url_sign_out,
method=['GET', 'POST'],
name='{prefix}{name}'.format(
prefix=auth.views_prefix,
name='auth_sign_out'
),
callback=auth.auth_sign_out,
**urloptions
)
if 'change_password' in auth.views:
url_change_password = eval_url(auth.url_change_password)
app.route(
url_change_password,
method=['GET', 'POST'],
name='{prefix}{name}'.format(
prefix=auth.views_prefix,
name='auth_change_password'
),
callback=auth.auth_change_password,
**urloptions
)
if 'reset_password' in auth.views:
url_reset_password = eval_url(auth.url_reset_password)
app.route(
url_reset_password,
method=['GET', 'POST'],
name='{prefix}{name}'.format(
prefix=auth.views_prefix,
name='auth_reset_password'
),
callback=auth.auth_reset_password,
**urloptions
)
app.route(
url_reset_password.rstrip('/') + '/<token>/',
method=['GET', 'POST'],
name='{prefix}{name}'.format(
prefix=auth.views_prefix,
name='auth_reset_password'
),
callback=auth.auth_reset_password,
**urloptions
)
|
jpscaletti/authcode
|
authcode/wsgi/bottle.py
|
get_site_name
|
python
|
def get_site_name(request):
urlparts = request.urlparts
return ':'.join([urlparts.hostname, str(urlparts.port)])
|
Return the domain:port part of the URL without scheme.
Eg: facebook.com, 127.0.0.1:8080, etc.
|
train
|
https://github.com/jpscaletti/authcode/blob/91529b6d0caec07d1452758d937e1e0745826139/authcode/wsgi/bottle.py#L10-L15
| null |
# coding=utf-8
from __future__ import absolute_import
from .._compat import to_native
HTTP_FORBIDDEN = 403
def get_full_path(request):
"""Return the current relative path including the query string.
Eg: “/foo/bar/?page=1”
"""
path = request.fullpath
query_string = request.environ.get('QUERY_STRING')
if query_string:
path += '?' + to_native(query_string)
return path
def make_full_url(request, url):
"""Get a relative URL and returns the absolute version.
Eg: “/foo/bar?q=is-open” ==> “http://example.com/foo/bar?q=is-open”
"""
urlparts = request.urlparts
return '{scheme}://{site}/{url}'.format(
scheme=urlparts.scheme,
site=get_site_name(request),
url=url.lstrip('/'),
)
def is_post(request):
"""Return ``True`` if the method of the request is ``POST``.
"""
return request.method.upper() == 'POST'
def is_idempotent(request):
"""Return ``True`` if the method of the request is ``GET`` or ``HEAD``.
"""
return request.method.upper() in ('GET', 'HEAD')
def redirect(url):
"""Return an HTTP 303 See Other response for this url, in the
idiom of the framework.
"""
from bottle import redirect
redirect(url)
def raise_forbidden(msg='You are not allowed to access this.'):
"""Return an HTTP 403 Forbidden response (with the passed message), in the
idiom of the framework.
"""
from bottle import abort
abort(HTTP_FORBIDDEN, msg)
def get_from_params(request, key):
"""Try to read a value named ``key`` from the GET parameters.
"""
return request.query.get(key)
def get_from_headers(request, key):
"""Try to read a value named ``key`` from the headers.
"""
return request.headers.get(key)
def get_post_data(request):
"""Return all the POST data from the request.
"""
return request.forms
def make_response(body, mimetype='text/html'):
"""Build a framework specific HTPP response, containing ``body`` and
marked as the type ``mimetype``.
"""
from bottle import response
response.content_type = mimetype
return body or u''
|
jpscaletti/authcode
|
authcode/wsgi/bottle.py
|
get_full_path
|
python
|
def get_full_path(request):
path = request.fullpath
query_string = request.environ.get('QUERY_STRING')
if query_string:
path += '?' + to_native(query_string)
return path
|
Return the current relative path including the query string.
Eg: “/foo/bar/?page=1”
|
train
|
https://github.com/jpscaletti/authcode/blob/91529b6d0caec07d1452758d937e1e0745826139/authcode/wsgi/bottle.py#L18-L26
|
[
"def to_native(x, charset='utf8', errors='ignore'):\n bb = to_bytes(x, charset=charset, errors=errors)\n if not bb:\n return bb\n return bb.decode('utf8')\n"
] |
# coding=utf-8
from __future__ import absolute_import
from .._compat import to_native
HTTP_FORBIDDEN = 403
def get_site_name(request):
"""Return the domain:port part of the URL without scheme.
Eg: facebook.com, 127.0.0.1:8080, etc.
"""
urlparts = request.urlparts
return ':'.join([urlparts.hostname, str(urlparts.port)])
def make_full_url(request, url):
"""Get a relative URL and returns the absolute version.
Eg: “/foo/bar?q=is-open” ==> “http://example.com/foo/bar?q=is-open”
"""
urlparts = request.urlparts
return '{scheme}://{site}/{url}'.format(
scheme=urlparts.scheme,
site=get_site_name(request),
url=url.lstrip('/'),
)
def is_post(request):
"""Return ``True`` if the method of the request is ``POST``.
"""
return request.method.upper() == 'POST'
def is_idempotent(request):
"""Return ``True`` if the method of the request is ``GET`` or ``HEAD``.
"""
return request.method.upper() in ('GET', 'HEAD')
def redirect(url):
"""Return an HTTP 303 See Other response for this url, in the
idiom of the framework.
"""
from bottle import redirect
redirect(url)
def raise_forbidden(msg='You are not allowed to access this.'):
"""Return an HTTP 403 Forbidden response (with the passed message), in the
idiom of the framework.
"""
from bottle import abort
abort(HTTP_FORBIDDEN, msg)
def get_from_params(request, key):
"""Try to read a value named ``key`` from the GET parameters.
"""
return request.query.get(key)
def get_from_headers(request, key):
"""Try to read a value named ``key`` from the headers.
"""
return request.headers.get(key)
def get_post_data(request):
"""Return all the POST data from the request.
"""
return request.forms
def make_response(body, mimetype='text/html'):
"""Build a framework specific HTPP response, containing ``body`` and
marked as the type ``mimetype``.
"""
from bottle import response
response.content_type = mimetype
return body or u''
|
jpscaletti/authcode
|
authcode/wsgi/bottle.py
|
make_full_url
|
python
|
def make_full_url(request, url):
urlparts = request.urlparts
return '{scheme}://{site}/{url}'.format(
scheme=urlparts.scheme,
site=get_site_name(request),
url=url.lstrip('/'),
)
|
Get a relative URL and returns the absolute version.
Eg: “/foo/bar?q=is-open” ==> “http://example.com/foo/bar?q=is-open”
|
train
|
https://github.com/jpscaletti/authcode/blob/91529b6d0caec07d1452758d937e1e0745826139/authcode/wsgi/bottle.py#L29-L38
|
[
"def get_site_name(request):\n \"\"\"Return the domain:port part of the URL without scheme.\n Eg: facebook.com, 127.0.0.1:8080, etc.\n \"\"\"\n urlparts = request.urlparts\n return ':'.join([urlparts.hostname, str(urlparts.port)])\n"
] |
# coding=utf-8
from __future__ import absolute_import
from .._compat import to_native
HTTP_FORBIDDEN = 403
def get_site_name(request):
"""Return the domain:port part of the URL without scheme.
Eg: facebook.com, 127.0.0.1:8080, etc.
"""
urlparts = request.urlparts
return ':'.join([urlparts.hostname, str(urlparts.port)])
def get_full_path(request):
"""Return the current relative path including the query string.
Eg: “/foo/bar/?page=1”
"""
path = request.fullpath
query_string = request.environ.get('QUERY_STRING')
if query_string:
path += '?' + to_native(query_string)
return path
def is_post(request):
"""Return ``True`` if the method of the request is ``POST``.
"""
return request.method.upper() == 'POST'
def is_idempotent(request):
"""Return ``True`` if the method of the request is ``GET`` or ``HEAD``.
"""
return request.method.upper() in ('GET', 'HEAD')
def redirect(url):
"""Return an HTTP 303 See Other response for this url, in the
idiom of the framework.
"""
from bottle import redirect
redirect(url)
def raise_forbidden(msg='You are not allowed to access this.'):
"""Return an HTTP 403 Forbidden response (with the passed message), in the
idiom of the framework.
"""
from bottle import abort
abort(HTTP_FORBIDDEN, msg)
def get_from_params(request, key):
"""Try to read a value named ``key`` from the GET parameters.
"""
return request.query.get(key)
def get_from_headers(request, key):
"""Try to read a value named ``key`` from the headers.
"""
return request.headers.get(key)
def get_post_data(request):
"""Return all the POST data from the request.
"""
return request.forms
def make_response(body, mimetype='text/html'):
"""Build a framework specific HTPP response, containing ``body`` and
marked as the type ``mimetype``.
"""
from bottle import response
response.content_type = mimetype
return body or u''
|
jpscaletti/authcode
|
authcode/wsgi/bottle.py
|
make_response
|
python
|
def make_response(body, mimetype='text/html'):
from bottle import response
response.content_type = mimetype
return body or u''
|
Build a framework specific HTPP response, containing ``body`` and
marked as the type ``mimetype``.
|
train
|
https://github.com/jpscaletti/authcode/blob/91529b6d0caec07d1452758d937e1e0745826139/authcode/wsgi/bottle.py#L87-L93
| null |
# coding=utf-8
from __future__ import absolute_import
from .._compat import to_native
HTTP_FORBIDDEN = 403
def get_site_name(request):
"""Return the domain:port part of the URL without scheme.
Eg: facebook.com, 127.0.0.1:8080, etc.
"""
urlparts = request.urlparts
return ':'.join([urlparts.hostname, str(urlparts.port)])
def get_full_path(request):
"""Return the current relative path including the query string.
Eg: “/foo/bar/?page=1”
"""
path = request.fullpath
query_string = request.environ.get('QUERY_STRING')
if query_string:
path += '?' + to_native(query_string)
return path
def make_full_url(request, url):
"""Get a relative URL and returns the absolute version.
Eg: “/foo/bar?q=is-open” ==> “http://example.com/foo/bar?q=is-open”
"""
urlparts = request.urlparts
return '{scheme}://{site}/{url}'.format(
scheme=urlparts.scheme,
site=get_site_name(request),
url=url.lstrip('/'),
)
def is_post(request):
"""Return ``True`` if the method of the request is ``POST``.
"""
return request.method.upper() == 'POST'
def is_idempotent(request):
"""Return ``True`` if the method of the request is ``GET`` or ``HEAD``.
"""
return request.method.upper() in ('GET', 'HEAD')
def redirect(url):
"""Return an HTTP 303 See Other response for this url, in the
idiom of the framework.
"""
from bottle import redirect
redirect(url)
def raise_forbidden(msg='You are not allowed to access this.'):
"""Return an HTTP 403 Forbidden response (with the passed message), in the
idiom of the framework.
"""
from bottle import abort
abort(HTTP_FORBIDDEN, msg)
def get_from_params(request, key):
"""Try to read a value named ``key`` from the GET parameters.
"""
return request.query.get(key)
def get_from_headers(request, key):
"""Try to read a value named ``key`` from the headers.
"""
return request.headers.get(key)
def get_post_data(request):
"""Return all the POST data from the request.
"""
return request.forms
|
jpscaletti/authcode
|
authcode/auth_authentication_mixin.py
|
AuthenticationMixin.login
|
python
|
def login(self, user, remember=True, session=None):
logger = logging.getLogger(__name__)
logger.debug(u'User `{0}` logged in'.format(user.login))
if session is None:
session = self.session
session['permanent'] = remember
session[self.session_key] = user.get_uhmac()
if callable(getattr(session, 'save', None)):
session.save()
|
Sets the current user UID in the session.
Instead of just storing the user's id, it generates a hash from the
password *salt*. That way, an admin or the user herself can invalidate
the login in other computers just by changing (or re-saving)
her password.
|
train
|
https://github.com/jpscaletti/authcode/blob/91529b6d0caec07d1452758d937e1e0745826139/authcode/auth_authentication_mixin.py#L130-L146
| null |
class AuthenticationMixin(object):
def prepare_password(self, secret):
return u'{pepper}{secret}'.format(
pepper=to_unicode(self.pepper),
secret=to_unicode(secret)
)
def hash_password(self, secret):
if secret is None:
return None
len_secret = len(secret)
if len_secret < self.password_minlen:
raise ValueError(
'Password is too short. Must have at least {} chars long'.format(
self.password_minlen))
if len_secret > self.password_maxlen:
raise ValueError(
'Password is too long. Must have at most {} chars long'.format(
self.password_maxlen))
secret = self.prepare_password(secret)
hashed = self.hasher.encrypt(secret)
return hashed
def password_is_valid(self, secret, hashed):
if secret is None or hashed is None:
return False
# To help preventing denial-of-service via large passwords
# See: https://www.djangoproject.com/weblog/2013/sep/15/security/
if len(secret) > self.password_maxlen:
return False
secret = self.prepare_password(secret)
try:
return self.hasher.verify(secret, hashed)
except ValueError:
return False
def authenticate(self, credentials):
for backend in self.backends:
user = backend(credentials)
if user:
return user
return None
def auth_password(self, credentials):
logger = logging.getLogger(__name__)
login = credentials.get('login')
secret = credentials.get('password')
if login is None or secret is None:
return None
user = self.User.by_login(login)
if not user:
logger.debug(u'User `{0}` not found'.format(login))
return None
if not user.password:
logger.debug(u'User `{0}` has no password'.format(login))
return None
if not self.password_is_valid(secret, user.password):
logger.debug(u'Invalid password for user `{0}`'.format(login))
return None
self._update_password_hash(secret, user)
return user
def _update_password_hash(self, secret, user):
if not self.update_hash:
return
new_hash = self.hash_password(secret)
if new_hash.split('$')[:3] == user.password.split('$')[:3]:
return
user.set_raw_password(new_hash)
def auth_token(self, credentials, token_life=None):
logger = logging.getLogger(__name__)
token = credentials.get('token')
if token is None:
return None
try:
timestamp, uid = utils.split_token(str(token))
except ValueError:
logger.info(u'Invalid auth token format')
return None
token_life = token_life or self.token_life
user = self.User.by_id(uid)
if not user:
logger.info(u'Tampered auth token? uid `{0} not found'.format(uid[:20]))
return None
valid = user.get_token(timestamp) == token
not_expired = timestamp + token_life >= int(time())
if valid and not_expired:
return user
logger.info(u'Invalid auth token')
return None
def get_user(self, session=None):
if session is None:
session = self.session
user = None
uhmac = session.get(self.session_key)
if uhmac:
try:
uid = utils.split_uhmac(uhmac)
user = self.User.by_id(uid)
if not user or uhmac != user.get_uhmac() or not user.login:
raise ValueError
except ValueError:
logger = logging.getLogger(__name__)
logger.warn(u'Tampered uhmac?')
user = None
self.logout(session)
return user
def logout(self, session=None):
if session is None:
session = self.session
if self.session_key in session:
del session[self.session_key]
if self.clear_session_on_logout:
session.clear()
if callable(getattr(session, 'save', None)):
session.save()
|
jpscaletti/authcode
|
authcode/auth_authorization_mixin.py
|
AuthorizationMixin.protected
|
python
|
def protected(self, *tests, **kwargs):
_role = kwargs.pop('role', None)
_roles = kwargs.pop('roles', None) or []
_csrf = kwargs.pop('csrf', None)
_url_sign_in = kwargs.pop('url_sign_in', None)
_request = kwargs.pop('request', None)
if _role:
_roles.append(_role)
_roles = [to_unicode(r) for r in _roles]
_tests = tests
_user_tests = kwargs
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
logger = logging.getLogger(__name__)
request = _request or self.request or args and args[0]
url_sign_in = self._get_url_sign_in(request, _url_sign_in)
user = self.get_user()
if not user:
return self._login_required(request, url_sign_in)
if hasattr(user, 'has_role') and _roles:
if not user.has_role(*_roles):
logger.debug(u'User `{0}`: has_role fail'.format(user.login))
logger.debug(u'User roles: {0}'.format([r.name for r in user.roles]))
return self.wsgi.raise_forbidden()
for test in _tests:
test_pass = test(user, *args, **kwargs)
if not test_pass:
logger.debug(u'User `{0}`: test fail'.format(user.login))
return self.wsgi.raise_forbidden()
for name, value in _user_tests.items():
user_test = getattr(user, name)
test_pass = user_test(value, *args, **kwargs)
if not test_pass:
logger.debug(u'User `{0}`: test fail'.format(user.login))
return self.wsgi.raise_forbidden()
disable_csrf = _csrf == False # noqa
if (not self.wsgi.is_idempotent(request) and not disable_csrf) or _csrf:
if not self.csrf_token_is_valid(request):
logger.debug(u'User `{0}`: invalid CSFR token'.format(user.login))
return self.wsgi.raise_forbidden("CSFR token isn't valid")
return f(*args, **kwargs)
return wrapper
return decorator
|
Factory of decorators for limit the access to views.
:tests: *function, optional
One or more functions that takes the args and kwargs of the
view and returns either `True` or `False`.
All test must return True to show the view.
Options:
:role: str, optional
Test for the user having a role with this name.
:roles: list, optional
Test for the user having **any** role in this list of names.
:csrf: bool, None, optional
If ``None`` (the default), the decorator will check the value
of the CSFR token for POST, PUT or DELETE requests.
If ``True`` it will do the same also for all requests.
If ``False``, the value of the CSFR token will not be checked.
:url_sign_in: str, function, optional
If any required condition fail, redirect to this place.
Override the default URL. This can also be a callable.
:request: obj, optional
Overwrite the request for testing.
The rest of the ``key=value`` pairs in ``kwargs`` are interpreted as tests.
The user must have a property `key` with a value equals to `value`.
If the user has a method named `key`, that method is called with
`value` as a single argument and must return True to show the view.
|
train
|
https://github.com/jpscaletti/authcode/blob/91529b6d0caec07d1452758d937e1e0745826139/authcode/auth_authorization_mixin.py#L31-L117
| null |
class AuthorizationMixin(object):
# Useful for setting a cookie only if the CSRF token has changed.
csrf_token_has_changed = False
def get_csrf_token(self, session=None):
logger = logging.getLogger(__name__)
if session is None:
session = self.session
csrf_token = session.get(self.csrf_key)
if not csrf_token:
logger.debug(u'New CSFR token')
csrf_token = self.make_csrf_token()
session[self.csrf_key] = csrf_token
if callable(getattr(session, 'save', None)):
session.save()
return csrf_token
def make_csrf_token(self):
self.csrf_token_has_changed = True
return str(uuid4()).replace('-', '')
def replace_flask_route(self, bp, *args, **kwargs):
"""Replace the Flask `app.route` or `blueprint.route` with a version
that first apply the protected decorator to the view, so all views
are automatically protected."""
protected = self.protected
def protected_route(rule, **options):
"""Like :meth:`Flask.route` but for a blueprint. The endpoint for the
:func:`url_for` function is prefixed with the name of the blueprint.
"""
def decorator(f):
endpoint = options.pop("endpoint", f.__name__)
protected_f = protected(*args, **kwargs)(f)
bp.add_url_rule(rule, endpoint, protected_f, **options)
return f
return decorator
bp.route = protected_route
def csrf_token_is_valid(self, request, session=None):
token = self._get_csrf_token_from_request(request)
return token and self._csrf_token_is_valid(token, session)
def _csrf_token_is_valid(self, token, session=None):
new_token = self.get_csrf_token(session=session)
return new_token == token
def _login_required(self, request, url_sign_in):
self.session[self.redirect_key] = self.wsgi.get_full_path(request)
if callable(getattr(self.session, 'save', None)):
self.session.save()
return self.wsgi.redirect(url_sign_in)
def _get_url_sign_in(self, request, url_sign_in):
url_sign_in = url_sign_in or self.url_sign_in
if callable(url_sign_in):
url_sign_in = url_sign_in(request)
return url_sign_in or '/'
def _get_csrf_token_from_request(self, request):
token = self.wsgi.get_from_params(request, self.csrf_key) or \
self.wsgi.get_from_headers(request, self.csrf_header) or \
self.wsgi.get_from_headers(request, self.csrf_header_alt)
return token
|
jpscaletti/authcode
|
authcode/auth_authorization_mixin.py
|
AuthorizationMixin.replace_flask_route
|
python
|
def replace_flask_route(self, bp, *args, **kwargs):
protected = self.protected
def protected_route(rule, **options):
"""Like :meth:`Flask.route` but for a blueprint. The endpoint for the
:func:`url_for` function is prefixed with the name of the blueprint.
"""
def decorator(f):
endpoint = options.pop("endpoint", f.__name__)
protected_f = protected(*args, **kwargs)(f)
bp.add_url_rule(rule, endpoint, protected_f, **options)
return f
return decorator
bp.route = protected_route
|
Replace the Flask `app.route` or `blueprint.route` with a version
that first apply the protected decorator to the view, so all views
are automatically protected.
|
train
|
https://github.com/jpscaletti/authcode/blob/91529b6d0caec07d1452758d937e1e0745826139/authcode/auth_authorization_mixin.py#L119-L136
| null |
class AuthorizationMixin(object):
# Useful for setting a cookie only if the CSRF token has changed.
csrf_token_has_changed = False
def get_csrf_token(self, session=None):
logger = logging.getLogger(__name__)
if session is None:
session = self.session
csrf_token = session.get(self.csrf_key)
if not csrf_token:
logger.debug(u'New CSFR token')
csrf_token = self.make_csrf_token()
session[self.csrf_key] = csrf_token
if callable(getattr(session, 'save', None)):
session.save()
return csrf_token
def make_csrf_token(self):
self.csrf_token_has_changed = True
return str(uuid4()).replace('-', '')
def protected(self, *tests, **kwargs):
"""Factory of decorators for limit the access to views.
:tests: *function, optional
One or more functions that takes the args and kwargs of the
view and returns either `True` or `False`.
All test must return True to show the view.
Options:
:role: str, optional
Test for the user having a role with this name.
:roles: list, optional
Test for the user having **any** role in this list of names.
:csrf: bool, None, optional
If ``None`` (the default), the decorator will check the value
of the CSFR token for POST, PUT or DELETE requests.
If ``True`` it will do the same also for all requests.
If ``False``, the value of the CSFR token will not be checked.
:url_sign_in: str, function, optional
If any required condition fail, redirect to this place.
Override the default URL. This can also be a callable.
:request: obj, optional
Overwrite the request for testing.
The rest of the ``key=value`` pairs in ``kwargs`` are interpreted as tests.
The user must have a property `key` with a value equals to `value`.
If the user has a method named `key`, that method is called with
`value` as a single argument and must return True to show the view.
"""
_role = kwargs.pop('role', None)
_roles = kwargs.pop('roles', None) or []
_csrf = kwargs.pop('csrf', None)
_url_sign_in = kwargs.pop('url_sign_in', None)
_request = kwargs.pop('request', None)
if _role:
_roles.append(_role)
_roles = [to_unicode(r) for r in _roles]
_tests = tests
_user_tests = kwargs
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
logger = logging.getLogger(__name__)
request = _request or self.request or args and args[0]
url_sign_in = self._get_url_sign_in(request, _url_sign_in)
user = self.get_user()
if not user:
return self._login_required(request, url_sign_in)
if hasattr(user, 'has_role') and _roles:
if not user.has_role(*_roles):
logger.debug(u'User `{0}`: has_role fail'.format(user.login))
logger.debug(u'User roles: {0}'.format([r.name for r in user.roles]))
return self.wsgi.raise_forbidden()
for test in _tests:
test_pass = test(user, *args, **kwargs)
if not test_pass:
logger.debug(u'User `{0}`: test fail'.format(user.login))
return self.wsgi.raise_forbidden()
for name, value in _user_tests.items():
user_test = getattr(user, name)
test_pass = user_test(value, *args, **kwargs)
if not test_pass:
logger.debug(u'User `{0}`: test fail'.format(user.login))
return self.wsgi.raise_forbidden()
disable_csrf = _csrf == False # noqa
if (not self.wsgi.is_idempotent(request) and not disable_csrf) or _csrf:
if not self.csrf_token_is_valid(request):
logger.debug(u'User `{0}`: invalid CSFR token'.format(user.login))
return self.wsgi.raise_forbidden("CSFR token isn't valid")
return f(*args, **kwargs)
return wrapper
return decorator
def csrf_token_is_valid(self, request, session=None):
token = self._get_csrf_token_from_request(request)
return token and self._csrf_token_is_valid(token, session)
def _csrf_token_is_valid(self, token, session=None):
new_token = self.get_csrf_token(session=session)
return new_token == token
def _login_required(self, request, url_sign_in):
self.session[self.redirect_key] = self.wsgi.get_full_path(request)
if callable(getattr(self.session, 'save', None)):
self.session.save()
return self.wsgi.redirect(url_sign_in)
def _get_url_sign_in(self, request, url_sign_in):
url_sign_in = url_sign_in or self.url_sign_in
if callable(url_sign_in):
url_sign_in = url_sign_in(request)
return url_sign_in or '/'
def _get_csrf_token_from_request(self, request):
token = self.wsgi.get_from_params(request, self.csrf_key) or \
self.wsgi.get_from_headers(request, self.csrf_header) or \
self.wsgi.get_from_headers(request, self.csrf_header_alt)
return token
|
jpscaletti/authcode
|
authcode/wsgi/werkzeug.py
|
get_full_path
|
python
|
def get_full_path(request):
path = request.path
if request.query_string:
path += '?' + to_native(request.query_string)
return path
|
Return the current relative path including the query string.
Eg: “/foo/bar/?page=1”
|
train
|
https://github.com/jpscaletti/authcode/blob/91529b6d0caec07d1452758d937e1e0745826139/authcode/wsgi/werkzeug.py#L10-L17
|
[
"def to_native(x, charset='utf8', errors='ignore'):\n bb = to_bytes(x, charset=charset, errors=errors)\n if not bb:\n return bb\n return bb.decode('utf8')\n"
] |
# coding=utf-8
from __future__ import absolute_import
from .._compat import to_native
HTTP_SEE_OTHER = 303
def make_full_url(request, url):
"""Get a relative URL and returns the absolute version.
Eg: “/foo/bar?q=is-open” ==> “http://example.com/foo/bar?q=is-open”
"""
return request.url_root + url.lstrip('/')
def is_post(request):
"""Return ``True`` if the method of the request is ``POST``.
"""
return request.method.upper() == 'POST'
def is_idempotent(request):
"""Return ``True`` if the method of the request is ``GET`` or ``HEAD``.
"""
return request.method.upper() in ('GET', 'HEAD')
def redirect(url):
"""Return an HTTP 303 See Other response for this url, in the
idiom of the framework.
"""
from werkzeug.utils import redirect
return redirect(url, code=HTTP_SEE_OTHER)
def raise_forbidden(msg='You are not allowed to access this.'):
"""Return an HTTP 403 Forbidden response (with the passed message), in the
idiom of the framework.
"""
from werkzeug.exceptions import Forbidden
raise Forbidden(msg)
def get_from_params(request, key):
"""Try to read a value named ``key`` from the GET parameters.
"""
data = getattr(request, 'json', None) or request.values
value = data.get(key)
return to_native(value)
def get_from_headers(request, key):
"""Try to read a value named ``key`` from the headers.
"""
value = request.headers.get(key)
return to_native(value)
def get_post_data(request):
"""Return all the POST data from the request.
"""
return getattr(request, 'json', None) or request.form or {}
def make_response(body, mimetype='text/html'):
"""Build a framework specific HTPP response, containing ``body`` and
marked as the type ``mimetype``.
"""
from werkzeug.wrappers import Response
if isinstance(body, Response):
body.mimetype = mimetype
return body
return Response(body, mimetype=mimetype)
|
jpscaletti/authcode
|
authcode/wsgi/werkzeug.py
|
get_from_params
|
python
|
def get_from_params(request, key):
data = getattr(request, 'json', None) or request.values
value = data.get(key)
return to_native(value)
|
Try to read a value named ``key`` from the GET parameters.
|
train
|
https://github.com/jpscaletti/authcode/blob/91529b6d0caec07d1452758d937e1e0745826139/authcode/wsgi/werkzeug.py#L55-L60
|
[
"def to_native(x, charset='utf8', errors='ignore'):\n bb = to_bytes(x, charset=charset, errors=errors)\n if not bb:\n return bb\n return bb.decode('utf8')\n"
] |
# coding=utf-8
from __future__ import absolute_import
from .._compat import to_native
HTTP_SEE_OTHER = 303
def get_full_path(request):
"""Return the current relative path including the query string.
Eg: “/foo/bar/?page=1”
"""
path = request.path
if request.query_string:
path += '?' + to_native(request.query_string)
return path
def make_full_url(request, url):
"""Get a relative URL and returns the absolute version.
Eg: “/foo/bar?q=is-open” ==> “http://example.com/foo/bar?q=is-open”
"""
return request.url_root + url.lstrip('/')
def is_post(request):
"""Return ``True`` if the method of the request is ``POST``.
"""
return request.method.upper() == 'POST'
def is_idempotent(request):
"""Return ``True`` if the method of the request is ``GET`` or ``HEAD``.
"""
return request.method.upper() in ('GET', 'HEAD')
def redirect(url):
"""Return an HTTP 303 See Other response for this url, in the
idiom of the framework.
"""
from werkzeug.utils import redirect
return redirect(url, code=HTTP_SEE_OTHER)
def raise_forbidden(msg='You are not allowed to access this.'):
"""Return an HTTP 403 Forbidden response (with the passed message), in the
idiom of the framework.
"""
from werkzeug.exceptions import Forbidden
raise Forbidden(msg)
def get_from_headers(request, key):
"""Try to read a value named ``key`` from the headers.
"""
value = request.headers.get(key)
return to_native(value)
def get_post_data(request):
"""Return all the POST data from the request.
"""
return getattr(request, 'json', None) or request.form or {}
def make_response(body, mimetype='text/html'):
"""Build a framework specific HTPP response, containing ``body`` and
marked as the type ``mimetype``.
"""
from werkzeug.wrappers import Response
if isinstance(body, Response):
body.mimetype = mimetype
return body
return Response(body, mimetype=mimetype)
|
jpscaletti/authcode
|
authcode/wsgi/werkzeug.py
|
get_from_headers
|
python
|
def get_from_headers(request, key):
value = request.headers.get(key)
return to_native(value)
|
Try to read a value named ``key`` from the headers.
|
train
|
https://github.com/jpscaletti/authcode/blob/91529b6d0caec07d1452758d937e1e0745826139/authcode/wsgi/werkzeug.py#L63-L67
|
[
"def to_native(x, charset='utf8', errors='ignore'):\n bb = to_bytes(x, charset=charset, errors=errors)\n if not bb:\n return bb\n return bb.decode('utf8')\n"
] |
# coding=utf-8
from __future__ import absolute_import
from .._compat import to_native
HTTP_SEE_OTHER = 303
def get_full_path(request):
"""Return the current relative path including the query string.
Eg: “/foo/bar/?page=1”
"""
path = request.path
if request.query_string:
path += '?' + to_native(request.query_string)
return path
def make_full_url(request, url):
"""Get a relative URL and returns the absolute version.
Eg: “/foo/bar?q=is-open” ==> “http://example.com/foo/bar?q=is-open”
"""
return request.url_root + url.lstrip('/')
def is_post(request):
"""Return ``True`` if the method of the request is ``POST``.
"""
return request.method.upper() == 'POST'
def is_idempotent(request):
"""Return ``True`` if the method of the request is ``GET`` or ``HEAD``.
"""
return request.method.upper() in ('GET', 'HEAD')
def redirect(url):
"""Return an HTTP 303 See Other response for this url, in the
idiom of the framework.
"""
from werkzeug.utils import redirect
return redirect(url, code=HTTP_SEE_OTHER)
def raise_forbidden(msg='You are not allowed to access this.'):
"""Return an HTTP 403 Forbidden response (with the passed message), in the
idiom of the framework.
"""
from werkzeug.exceptions import Forbidden
raise Forbidden(msg)
def get_from_params(request, key):
"""Try to read a value named ``key`` from the GET parameters.
"""
data = getattr(request, 'json', None) or request.values
value = data.get(key)
return to_native(value)
def get_post_data(request):
"""Return all the POST data from the request.
"""
return getattr(request, 'json', None) or request.form or {}
def make_response(body, mimetype='text/html'):
"""Build a framework specific HTPP response, containing ``body`` and
marked as the type ``mimetype``.
"""
from werkzeug.wrappers import Response
if isinstance(body, Response):
body.mimetype = mimetype
return body
return Response(body, mimetype=mimetype)
|
jpscaletti/authcode
|
authcode/wsgi/werkzeug.py
|
make_response
|
python
|
def make_response(body, mimetype='text/html'):
from werkzeug.wrappers import Response
if isinstance(body, Response):
body.mimetype = mimetype
return body
return Response(body, mimetype=mimetype)
|
Build a framework specific HTPP response, containing ``body`` and
marked as the type ``mimetype``.
|
train
|
https://github.com/jpscaletti/authcode/blob/91529b6d0caec07d1452758d937e1e0745826139/authcode/wsgi/werkzeug.py#L76-L84
| null |
# coding=utf-8
from __future__ import absolute_import
from .._compat import to_native
HTTP_SEE_OTHER = 303
def get_full_path(request):
"""Return the current relative path including the query string.
Eg: “/foo/bar/?page=1”
"""
path = request.path
if request.query_string:
path += '?' + to_native(request.query_string)
return path
def make_full_url(request, url):
"""Get a relative URL and returns the absolute version.
Eg: “/foo/bar?q=is-open” ==> “http://example.com/foo/bar?q=is-open”
"""
return request.url_root + url.lstrip('/')
def is_post(request):
"""Return ``True`` if the method of the request is ``POST``.
"""
return request.method.upper() == 'POST'
def is_idempotent(request):
"""Return ``True`` if the method of the request is ``GET`` or ``HEAD``.
"""
return request.method.upper() in ('GET', 'HEAD')
def redirect(url):
"""Return an HTTP 303 See Other response for this url, in the
idiom of the framework.
"""
from werkzeug.utils import redirect
return redirect(url, code=HTTP_SEE_OTHER)
def raise_forbidden(msg='You are not allowed to access this.'):
"""Return an HTTP 403 Forbidden response (with the passed message), in the
idiom of the framework.
"""
from werkzeug.exceptions import Forbidden
raise Forbidden(msg)
def get_from_params(request, key):
"""Try to read a value named ``key`` from the GET parameters.
"""
data = getattr(request, 'json', None) or request.values
value = data.get(key)
return to_native(value)
def get_from_headers(request, key):
"""Try to read a value named ``key`` from the headers.
"""
value = request.headers.get(key)
return to_native(value)
def get_post_data(request):
"""Return all the POST data from the request.
"""
return getattr(request, 'json', None) or request.form or {}
|
jpscaletti/authcode
|
authcode/utils.py
|
get_uhmac
|
python
|
def get_uhmac(user, secret):
secret = to_bytes(secret)
key = '|'.join([
hashlib.sha1(secret).hexdigest(),
str(user.id),
get_hash_extract(user.password),
])
key = key.encode('utf8', 'ignore')
mac = hmac.new(key, msg=None, digestmod=hashlib.sha512)
mac = mac.hexdigest()[:50]
uhmac = '{0}${1}'.format(user.id, mac)
return uhmac
|
Make an unique identifier for the user (stored in the session),
so it can stay logged between requests.
By hashing a snippet of the current password hash salt, it makes possible
to automatically logout from all other devices just by changing
(or refreshing) the password.
|
train
|
https://github.com/jpscaletti/authcode/blob/91529b6d0caec07d1452758d937e1e0745826139/authcode/utils.py#L42-L60
|
[
"def to_bytes(x, charset='utf8', errors='ignore'):\n if x is None:\n return None\n if isinstance(x, (bytes, bytearray, memoryview)):\n return bytes(x)\n if isinstance(x, str):\n return x.encode(charset, errors)\n raise TypeError('Expected bytes')\n",
"def get_hash_extract(hash):\n if not hash:\n return u''\n half = hash.rsplit('$', 1)[0]\n return half[-10:]\n"
] |
# coding=utf-8
import hashlib
import hmac
from time import time
from ._compat import to_bytes, to_unicode
def eval_url(url):
if callable(url):
url = url()
return url
def test_hasher(hasher):
hasher.encrypt('test', rounds=hasher.min_rounds)
def to36(number):
assert int(number) >= 0, 'Must be a positive integer'
alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
base36 = ''
while number:
number, i = divmod(number, 36)
base36 = alphabet[i] + base36
return base36 or alphabet[0]
def from36(snumber):
snumber = snumber.upper()
return int(snumber, 36)
def get_hash_extract(hash):
if not hash:
return u''
half = hash.rsplit('$', 1)[0]
return half[-10:]
def get_token(user, secret, timestamp=None):
"""Make a timestamped one-time-use token that can be used to
identifying the user.
By hashing the `last_sign_in` attribute and a snippet of the current
password hash salt, it produces a token that will be invalidated as soon
as the user log in again or the is changed.
A hash of the user ID is used, so the HMAC part of the token is always
unique for each user.
It also hash a secret key, so without access to the source code,
fake tokens cannot be generated even if the database is compromised.
"""
timestamp = int(timestamp or time())
secret = to_bytes(secret)
key = '|'.join([
hashlib.sha1(secret).hexdigest(),
str(user.id),
get_hash_extract(user.password),
str(getattr(user, 'last_sign_in', 0)),
str(timestamp),
])
key = key.encode('utf8', 'ignore')
mac = hmac.new(key, msg=None, digestmod=hashlib.sha512)
mac = mac.hexdigest()[:50]
token = '{0}${1}${2}'.format(user.id, to36(timestamp), mac)
return token
def split_uhmac(uhmac):
uid, mac = uhmac.split('$', 1)
return uid
def split_token(token):
uid, t36, mac = token.split('$', 2)
return from36(t36), uid
class LazyUser(object):
"""Acts as a proxy for the current user. Forwards all operations to
the proxied user. The only operations not supported for forwarding
are right handed operands and any kind of assignment.
"""
__slots__ = ('__auth', '__storage', '__dict__')
def __init__(self, auth, storage, user_name='user'):
object.__setattr__(self, '_LazyUser__auth', auth)
object.__setattr__(self, '_LazyUser__storage', storage)
object.__setattr__(self, '_LazyUser__user_name', user_name)
setattr(storage, user_name, self)
def __get_user(self):
"""Return the real user object.
"""
storage = object.__getattribute__(self, '_LazyUser__storage')
user = getattr(self.__auth, 'get_user')()
setattr(storage, self.__user_name, user)
return user
@property
def __dict__(self):
return self.__get_user().__dict__
@property
def __doc__(self):
return self.__get_user().__doc__
def __repr__(self):
return repr(self.__get_user())
def __bool__(self):
user = self.__get_user()
return user is not None
__nonzero__ = __bool__
def __str__(self):
return str(self.__get_user())
def __unicode__(self):
return to_unicode(self.__get_user())
def __dir__(self):
return dir(self.__get_user())
def __getattr__(self, name):
return getattr(self.__get_user(), name)
def __setattr__(self, name, value):
setattr(self.__get_user(), name, value)
def __delattr__(self, name):
delattr(self.__get_user(), name)
def __hash__(self):
return hash(self.__get_user())
def __call__(self, *args, **kwargs):
return self.__get_user()(*args, **kwargs)
def __eq__(self, other):
return self.__get_user() == other
def __ne__(self, other):
return self.__get_user() != other
def __setitem__(self, key, value):
self.__get_user()[key] = value
def __delitem__(self, key):
del self.__get_user()[key]
def __lt__(self, other):
return self.__get_user() < other
def __le__(self, other):
return self.__get_user() <= other
def __gt__(self, other):
return self.__get_user() > other
def __ge__(self, other):
return self.__get_user() >= other
def __getitem__(self, i):
return self.__get_user()[i]
def __iter__(self):
return iter(self.__get_user())
def __contains__(self, i):
return i in self.__get_user()
def __add__(self, other):
return self.__get_user() + other
def __sub__(self, other):
return self.__get_user() - other
def __mul__(self, other):
return self.__get_user() * other
def __floordiv__(self, other):
return self.__get_user() // other
def __mod__(self, other):
return self.__get_user() % other
def __divmod__(self, other):
return self.__get_user().__divmod__(other)
def __pow__(self, other):
return self.__get_user() ** other
def __lshift__(self, other):
return self.__get_user() << other
def __rshift__(self, other):
return self.__get_user() >> other
def __and__(self, other):
return self.__get_user() & other
def __xor__(self, other):
return self.__get_user() ^ other
def __or__(self, other):
return self.__get_user() | other
def __div__(self, other):
return self.__get_user().__div__(other)
def __truediv__(self, other):
return self.__get_user().__truediv__(other)
def __neg__(self):
return -(self.__get_user())
def __pos__(self):
return +(self.__get_user())
def __abs__(self):
return abs(self.__get_user())
def __len__(self):
return len(self.__get_user())
def __invert__(self):
return ~(self.__get_user())
def __complex__(self):
return complex(self.__get_user())
def __int__(self):
return int(self.__get_user())
def __float__(self):
return float(self.__get_user())
def __enter__(self):
return self.__get_user().__enter__()
def __exit__(self, *args, **kwargs):
return self.__get_user().__exit__(*args, **kwargs)
|
jpscaletti/authcode
|
authcode/utils.py
|
get_token
|
python
|
def get_token(user, secret, timestamp=None):
timestamp = int(timestamp or time())
secret = to_bytes(secret)
key = '|'.join([
hashlib.sha1(secret).hexdigest(),
str(user.id),
get_hash_extract(user.password),
str(getattr(user, 'last_sign_in', 0)),
str(timestamp),
])
key = key.encode('utf8', 'ignore')
mac = hmac.new(key, msg=None, digestmod=hashlib.sha512)
mac = mac.hexdigest()[:50]
token = '{0}${1}${2}'.format(user.id, to36(timestamp), mac)
return token
|
Make a timestamped one-time-use token that can be used to
identifying the user.
By hashing the `last_sign_in` attribute and a snippet of the current
password hash salt, it produces a token that will be invalidated as soon
as the user log in again or the is changed.
A hash of the user ID is used, so the HMAC part of the token is always
unique for each user.
It also hash a secret key, so without access to the source code,
fake tokens cannot be generated even if the database is compromised.
|
train
|
https://github.com/jpscaletti/authcode/blob/91529b6d0caec07d1452758d937e1e0745826139/authcode/utils.py#L63-L90
|
[
"def to_bytes(x, charset='utf8', errors='ignore'):\n if x is None:\n return None\n if isinstance(x, (bytes, bytearray, memoryview)):\n return bytes(x)\n if isinstance(x, str):\n return x.encode(charset, errors)\n raise TypeError('Expected bytes')\n",
"def get_hash_extract(hash):\n if not hash:\n return u''\n half = hash.rsplit('$', 1)[0]\n return half[-10:]\n",
"def to36(number):\n assert int(number) >= 0, 'Must be a positive integer'\n alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n base36 = ''\n while number:\n number, i = divmod(number, 36)\n base36 = alphabet[i] + base36\n\n return base36 or alphabet[0]\n"
] |
# coding=utf-8
import hashlib
import hmac
from time import time
from ._compat import to_bytes, to_unicode
def eval_url(url):
if callable(url):
url = url()
return url
def test_hasher(hasher):
hasher.encrypt('test', rounds=hasher.min_rounds)
def to36(number):
assert int(number) >= 0, 'Must be a positive integer'
alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
base36 = ''
while number:
number, i = divmod(number, 36)
base36 = alphabet[i] + base36
return base36 or alphabet[0]
def from36(snumber):
snumber = snumber.upper()
return int(snumber, 36)
def get_hash_extract(hash):
if not hash:
return u''
half = hash.rsplit('$', 1)[0]
return half[-10:]
def get_uhmac(user, secret):
"""Make an unique identifier for the user (stored in the session),
so it can stay logged between requests.
By hashing a snippet of the current password hash salt, it makes possible
to automatically logout from all other devices just by changing
(or refreshing) the password.
"""
secret = to_bytes(secret)
key = '|'.join([
hashlib.sha1(secret).hexdigest(),
str(user.id),
get_hash_extract(user.password),
])
key = key.encode('utf8', 'ignore')
mac = hmac.new(key, msg=None, digestmod=hashlib.sha512)
mac = mac.hexdigest()[:50]
uhmac = '{0}${1}'.format(user.id, mac)
return uhmac
def split_uhmac(uhmac):
uid, mac = uhmac.split('$', 1)
return uid
def split_token(token):
uid, t36, mac = token.split('$', 2)
return from36(t36), uid
class LazyUser(object):
"""Acts as a proxy for the current user. Forwards all operations to
the proxied user. The only operations not supported for forwarding
are right handed operands and any kind of assignment.
"""
__slots__ = ('__auth', '__storage', '__dict__')
def __init__(self, auth, storage, user_name='user'):
object.__setattr__(self, '_LazyUser__auth', auth)
object.__setattr__(self, '_LazyUser__storage', storage)
object.__setattr__(self, '_LazyUser__user_name', user_name)
setattr(storage, user_name, self)
def __get_user(self):
"""Return the real user object.
"""
storage = object.__getattribute__(self, '_LazyUser__storage')
user = getattr(self.__auth, 'get_user')()
setattr(storage, self.__user_name, user)
return user
@property
def __dict__(self):
return self.__get_user().__dict__
@property
def __doc__(self):
return self.__get_user().__doc__
def __repr__(self):
return repr(self.__get_user())
def __bool__(self):
user = self.__get_user()
return user is not None
__nonzero__ = __bool__
def __str__(self):
return str(self.__get_user())
def __unicode__(self):
return to_unicode(self.__get_user())
def __dir__(self):
return dir(self.__get_user())
def __getattr__(self, name):
return getattr(self.__get_user(), name)
def __setattr__(self, name, value):
setattr(self.__get_user(), name, value)
def __delattr__(self, name):
delattr(self.__get_user(), name)
def __hash__(self):
return hash(self.__get_user())
def __call__(self, *args, **kwargs):
return self.__get_user()(*args, **kwargs)
def __eq__(self, other):
return self.__get_user() == other
def __ne__(self, other):
return self.__get_user() != other
def __setitem__(self, key, value):
self.__get_user()[key] = value
def __delitem__(self, key):
del self.__get_user()[key]
def __lt__(self, other):
return self.__get_user() < other
def __le__(self, other):
return self.__get_user() <= other
def __gt__(self, other):
return self.__get_user() > other
def __ge__(self, other):
return self.__get_user() >= other
def __getitem__(self, i):
return self.__get_user()[i]
def __iter__(self):
return iter(self.__get_user())
def __contains__(self, i):
return i in self.__get_user()
def __add__(self, other):
return self.__get_user() + other
def __sub__(self, other):
return self.__get_user() - other
def __mul__(self, other):
return self.__get_user() * other
def __floordiv__(self, other):
return self.__get_user() // other
def __mod__(self, other):
return self.__get_user() % other
def __divmod__(self, other):
return self.__get_user().__divmod__(other)
def __pow__(self, other):
return self.__get_user() ** other
def __lshift__(self, other):
return self.__get_user() << other
def __rshift__(self, other):
return self.__get_user() >> other
def __and__(self, other):
return self.__get_user() & other
def __xor__(self, other):
return self.__get_user() ^ other
def __or__(self, other):
return self.__get_user() | other
def __div__(self, other):
return self.__get_user().__div__(other)
def __truediv__(self, other):
return self.__get_user().__truediv__(other)
def __neg__(self):
return -(self.__get_user())
def __pos__(self):
return +(self.__get_user())
def __abs__(self):
return abs(self.__get_user())
def __len__(self):
return len(self.__get_user())
def __invert__(self):
return ~(self.__get_user())
def __complex__(self):
return complex(self.__get_user())
def __int__(self):
return int(self.__get_user())
def __float__(self):
return float(self.__get_user())
def __enter__(self):
return self.__get_user().__enter__()
def __exit__(self, *args, **kwargs):
return self.__get_user().__exit__(*args, **kwargs)
|
jpscaletti/authcode
|
authcode/utils.py
|
LazyUser.__get_user
|
python
|
def __get_user(self):
storage = object.__getattribute__(self, '_LazyUser__storage')
user = getattr(self.__auth, 'get_user')()
setattr(storage, self.__user_name, user)
return user
|
Return the real user object.
|
train
|
https://github.com/jpscaletti/authcode/blob/91529b6d0caec07d1452758d937e1e0745826139/authcode/utils.py#L116-L122
| null |
class LazyUser(object):
"""Acts as a proxy for the current user. Forwards all operations to
the proxied user. The only operations not supported for forwarding
are right handed operands and any kind of assignment.
"""
__slots__ = ('__auth', '__storage', '__dict__')
def __init__(self, auth, storage, user_name='user'):
object.__setattr__(self, '_LazyUser__auth', auth)
object.__setattr__(self, '_LazyUser__storage', storage)
object.__setattr__(self, '_LazyUser__user_name', user_name)
setattr(storage, user_name, self)
@property
def __dict__(self):
return self.__get_user().__dict__
@property
def __doc__(self):
return self.__get_user().__doc__
def __repr__(self):
return repr(self.__get_user())
def __bool__(self):
user = self.__get_user()
return user is not None
__nonzero__ = __bool__
def __str__(self):
return str(self.__get_user())
def __unicode__(self):
return to_unicode(self.__get_user())
def __dir__(self):
return dir(self.__get_user())
def __getattr__(self, name):
return getattr(self.__get_user(), name)
def __setattr__(self, name, value):
setattr(self.__get_user(), name, value)
def __delattr__(self, name):
delattr(self.__get_user(), name)
def __hash__(self):
return hash(self.__get_user())
def __call__(self, *args, **kwargs):
return self.__get_user()(*args, **kwargs)
def __eq__(self, other):
return self.__get_user() == other
def __ne__(self, other):
return self.__get_user() != other
def __setitem__(self, key, value):
self.__get_user()[key] = value
def __delitem__(self, key):
del self.__get_user()[key]
def __lt__(self, other):
return self.__get_user() < other
def __le__(self, other):
return self.__get_user() <= other
def __gt__(self, other):
return self.__get_user() > other
def __ge__(self, other):
return self.__get_user() >= other
def __getitem__(self, i):
return self.__get_user()[i]
def __iter__(self):
return iter(self.__get_user())
def __contains__(self, i):
return i in self.__get_user()
def __add__(self, other):
return self.__get_user() + other
def __sub__(self, other):
return self.__get_user() - other
def __mul__(self, other):
return self.__get_user() * other
def __floordiv__(self, other):
return self.__get_user() // other
def __mod__(self, other):
return self.__get_user() % other
def __divmod__(self, other):
return self.__get_user().__divmod__(other)
def __pow__(self, other):
return self.__get_user() ** other
def __lshift__(self, other):
return self.__get_user() << other
def __rshift__(self, other):
return self.__get_user() >> other
def __and__(self, other):
return self.__get_user() & other
def __xor__(self, other):
return self.__get_user() ^ other
def __or__(self, other):
return self.__get_user() | other
def __div__(self, other):
return self.__get_user().__div__(other)
def __truediv__(self, other):
return self.__get_user().__truediv__(other)
def __neg__(self):
return -(self.__get_user())
def __pos__(self):
return +(self.__get_user())
def __abs__(self):
return abs(self.__get_user())
def __len__(self):
return len(self.__get_user())
def __invert__(self):
return ~(self.__get_user())
def __complex__(self):
return complex(self.__get_user())
def __int__(self):
return int(self.__get_user())
def __float__(self):
return float(self.__get_user())
def __enter__(self):
return self.__get_user().__enter__()
def __exit__(self, *args, **kwargs):
return self.__get_user().__exit__(*args, **kwargs)
|
bearyinnovative/bearychat.py
|
bearychat/openapi/client.py
|
Requester.request
|
python
|
def request(self, request_method, api_method, *args, **kwargs):
url = self._build_url(api_method)
resp = requests.request(request_method, url, *args, **kwargs)
try:
rv = resp.json()
except ValueError:
raise RequestFailedError(resp, 'not a json body')
if not resp.ok:
raise RequestFailedError(resp, rv.get('error'))
return rv
|
Perform a request.
Args:
request_method: HTTP method for this request.
api_method: API method name for this request.
*args: Extra arguments to pass to the request.
**kwargs: Extra keyword arguments to pass to the request.
Returns:
A dict contains the request response data.
Raises:
RequestFailedError: Raises when BearyChat's OpenAPI responses
with status code != 2xx
|
train
|
https://github.com/bearyinnovative/bearychat.py/blob/6c7af2d215c2ff7135bb5af66ca333d0ea1089fd/bearychat/openapi/client.py#L82-L109
|
[
"def _build_url(self, api_method):\n path = '{}/{}'.format(self.base_url.path, api_method.lstrip('/'))\n url = self.base_url._replace(path=path)\n return urlparse.urlunparse(url).rstrip('/')\n"
] |
class Requester(object):
def __init__(self, base_url):
self.base_url = urlparse.urlparse(base_url.rstrip('/'))
def _build_url(self, api_method):
path = '{}/{}'.format(self.base_url.path, api_method.lstrip('/'))
url = self.base_url._replace(path=path)
return urlparse.urlunparse(url).rstrip('/')
|
bearyinnovative/bearychat.py
|
bearychat/incoming.py
|
validate
|
python
|
def validate(data):
text = data.get('text')
if not isinstance(text, _string_types) or len(text) == 0:
raise ValueError('text field is required and should not be empty')
if 'markdown' in data and not type(data['markdown']) is bool:
raise ValueError('markdown field should be bool')
if 'attachments' in data:
if not isinstance(data['attachments'], (list, tuple)):
raise ValueError('attachments field should be list or tuple')
for attachment in data['attachments']:
if 'text' not in attachment and 'title' not in attachment:
raise ValueError('text or title is required in attachment')
return True
|
Validates incoming data
Args:
data(dict): the incoming data
Returns:
True if the data is valid
Raises:
ValueError: the data is not valid
|
train
|
https://github.com/bearyinnovative/bearychat.py/blob/6c7af2d215c2ff7135bb5af66ca333d0ea1089fd/bearychat/incoming.py#L13-L40
| null |
import sys
PY3 = sys.version_info[0] == 3
import requests
if PY3:
_string_types = str,
else:
_string_types = basestring, # noqa
def send(url, data):
"""Sends an incoming message
Args:
url(str): the incoming hook url
data(dict): the sending data
Returns:
requests.Response
"""
validate(data)
return requests.post(url, json=data)
|
bearyinnovative/bearychat.py
|
bearychat/incoming.py
|
send
|
python
|
def send(url, data):
validate(data)
return requests.post(url, json=data)
|
Sends an incoming message
Args:
url(str): the incoming hook url
data(dict): the sending data
Returns:
requests.Response
|
train
|
https://github.com/bearyinnovative/bearychat.py/blob/6c7af2d215c2ff7135bb5af66ca333d0ea1089fd/bearychat/incoming.py#L43-L55
|
[
"def validate(data):\n \"\"\"Validates incoming data\n\n Args:\n data(dict): the incoming data\n\n Returns:\n True if the data is valid\n\n Raises:\n ValueError: the data is not valid\n \"\"\"\n text = data.get('text')\n if not isinstance(text, _string_types) or len(text) == 0:\n raise ValueError('text field is required and should not be empty')\n\n if 'markdown' in data and not type(data['markdown']) is bool:\n raise ValueError('markdown field should be bool')\n\n if 'attachments' in data:\n if not isinstance(data['attachments'], (list, tuple)):\n raise ValueError('attachments field should be list or tuple')\n\n for attachment in data['attachments']:\n if 'text' not in attachment and 'title' not in attachment:\n raise ValueError('text or title is required in attachment')\n\n return True\n"
] |
import sys
PY3 = sys.version_info[0] == 3
import requests
if PY3:
_string_types = str,
else:
_string_types = basestring, # noqa
def validate(data):
"""Validates incoming data
Args:
data(dict): the incoming data
Returns:
True if the data is valid
Raises:
ValueError: the data is not valid
"""
text = data.get('text')
if not isinstance(text, _string_types) or len(text) == 0:
raise ValueError('text field is required and should not be empty')
if 'markdown' in data and not type(data['markdown']) is bool:
raise ValueError('markdown field should be bool')
if 'attachments' in data:
if not isinstance(data['attachments'], (list, tuple)):
raise ValueError('attachments field should be list or tuple')
for attachment in data['attachments']:
if 'text' not in attachment and 'title' not in attachment:
raise ValueError('text or title is required in attachment')
return True
|
bearyinnovative/bearychat.py
|
examples/rtm_loop.py
|
RTMLoop.on_open
|
python
|
def on_open(self, ws):
def keep_alive(interval):
while True:
time.sleep(interval)
self.ping()
start_new_thread(keep_alive, (self.keep_alive_interval, ))
|
Websocket on_open event handler
|
train
|
https://github.com/bearyinnovative/bearychat.py/blob/6c7af2d215c2ff7135bb5af66ca333d0ea1089fd/examples/rtm_loop.py#L50-L57
| null |
class RTMLoop(object):
"""Real Time Message loop
_errors(Queue): contains error message(dict("result", "msg")),
looks self._set_error()
_inbox(Queue): contains RTMMessage
_worker(threading.Thread): a thread for running the loop
Args:
ws_host(str): websocket host
"""
def __init__(self, ws_host):
self._call_id = 0
self._inbox = Queue()
self._errors = Queue()
self._ws = websocket.WebSocketApp(
ws_host,
on_open=self.on_open,
on_message=self.on_message,
on_close=self.on_close,
on_error=self.on_error)
self._worker = threading.Thread(
target=self._ws.run_forever,
kwargs={'sslopt': sslopt_with_ca_certs})
def on_message(self, ws, message):
"""Websocket on_message event handler
Saves message as RTMMessage in self._inbox
"""
try:
data = json.loads(message)
except Exception:
self._set_error(message, "decode message failed")
else:
self._inbox.put(RTMMessage(data))
def on_error(self, ws, error):
"""Websocket on_error event handler
Saves error message in self._errors
"""
self._set_error(error, "read socket failed")
def on_close(self, ws):
"""Websocket on_close event handler"""
self._set_error("closed", "websocket closed")
def _set_error(self, result, msg):
"""Puts a error to self._errors
Args:
result(mix): received data
msg(str): message
"""
self._errors.put({"result": result, "msg": msg})
def start(self, keep_alive_interval=2):
"""Starts the main loop
Args:
keep_alive_interval(int): the interval(second) of sending keep
alive message
"""
self.keep_alive_interval = keep_alive_interval
self._worker.start()
def stop(self):
"""Stops the main loop
"""
self._ws.close()
def ping(self):
"""Sends ping message
"""
self.send(RTMMessage({"type": RTMMessageType.Ping}))
def gen_call_id(self):
"""Generates a call_id
Returns:
int: the call_id
"""
self._call_id += 1
return self._call_id
def send(self, message):
"""Sends a RTMMessage
Should be called after starting the loop
Args:
message(RTMMessage): the sending message
Raises:
WebSocketConnectionClosedException: if the loop is closed
"""
if "call_id" not in message:
message["call_id"] = self.gen_call_id()
self._ws.send(message.to_json())
def get_message(self, block=False, timeout=None):
"""Removes and returns a RTMMessage from self._inbox
Args:
block(bool): if True block until a RTMMessage is available,
else it will return None when self._inbox is empty
timeout(int): it blocks at most timeout seconds
Returns:
RTMMessage if self._inbox is not empty, else None
"""
try:
message = self._inbox.get(block=block, timeout=timeout)
return message
except Exception:
return None
def get_error(self, block=False, timeout=None):
"""Removes and returns an error from self._errors
Args:
block(bool): if True block until a RTMMessage is available,
else it will return None when self._inbox is empty
timeout(int): it blocks at most timeout seconds
Returns:
error if inbox is not empty, else None
"""
try:
error = self._errors.get(block=block, timeout=timeout)
return error
except Exception:
return None
|
bearyinnovative/bearychat.py
|
examples/rtm_loop.py
|
RTMLoop.on_message
|
python
|
def on_message(self, ws, message):
try:
data = json.loads(message)
except Exception:
self._set_error(message, "decode message failed")
else:
self._inbox.put(RTMMessage(data))
|
Websocket on_message event handler
Saves message as RTMMessage in self._inbox
|
train
|
https://github.com/bearyinnovative/bearychat.py/blob/6c7af2d215c2ff7135bb5af66ca333d0ea1089fd/examples/rtm_loop.py#L59-L69
|
[
"def _set_error(self, result, msg):\n \"\"\"Puts a error to self._errors\n\n Args:\n result(mix): received data\n msg(str): message\n \"\"\"\n self._errors.put({\"result\": result, \"msg\": msg})\n"
] |
class RTMLoop(object):
"""Real Time Message loop
_errors(Queue): contains error message(dict("result", "msg")),
looks self._set_error()
_inbox(Queue): contains RTMMessage
_worker(threading.Thread): a thread for running the loop
Args:
ws_host(str): websocket host
"""
def __init__(self, ws_host):
self._call_id = 0
self._inbox = Queue()
self._errors = Queue()
self._ws = websocket.WebSocketApp(
ws_host,
on_open=self.on_open,
on_message=self.on_message,
on_close=self.on_close,
on_error=self.on_error)
self._worker = threading.Thread(
target=self._ws.run_forever,
kwargs={'sslopt': sslopt_with_ca_certs})
def on_open(self, ws):
"""Websocket on_open event handler"""
def keep_alive(interval):
while True:
time.sleep(interval)
self.ping()
start_new_thread(keep_alive, (self.keep_alive_interval, ))
def on_error(self, ws, error):
"""Websocket on_error event handler
Saves error message in self._errors
"""
self._set_error(error, "read socket failed")
def on_close(self, ws):
"""Websocket on_close event handler"""
self._set_error("closed", "websocket closed")
def _set_error(self, result, msg):
"""Puts a error to self._errors
Args:
result(mix): received data
msg(str): message
"""
self._errors.put({"result": result, "msg": msg})
def start(self, keep_alive_interval=2):
"""Starts the main loop
Args:
keep_alive_interval(int): the interval(second) of sending keep
alive message
"""
self.keep_alive_interval = keep_alive_interval
self._worker.start()
def stop(self):
"""Stops the main loop
"""
self._ws.close()
def ping(self):
"""Sends ping message
"""
self.send(RTMMessage({"type": RTMMessageType.Ping}))
def gen_call_id(self):
"""Generates a call_id
Returns:
int: the call_id
"""
self._call_id += 1
return self._call_id
def send(self, message):
"""Sends a RTMMessage
Should be called after starting the loop
Args:
message(RTMMessage): the sending message
Raises:
WebSocketConnectionClosedException: if the loop is closed
"""
if "call_id" not in message:
message["call_id"] = self.gen_call_id()
self._ws.send(message.to_json())
def get_message(self, block=False, timeout=None):
"""Removes and returns a RTMMessage from self._inbox
Args:
block(bool): if True block until a RTMMessage is available,
else it will return None when self._inbox is empty
timeout(int): it blocks at most timeout seconds
Returns:
RTMMessage if self._inbox is not empty, else None
"""
try:
message = self._inbox.get(block=block, timeout=timeout)
return message
except Exception:
return None
def get_error(self, block=False, timeout=None):
"""Removes and returns an error from self._errors
Args:
block(bool): if True block until a RTMMessage is available,
else it will return None when self._inbox is empty
timeout(int): it blocks at most timeout seconds
Returns:
error if inbox is not empty, else None
"""
try:
error = self._errors.get(block=block, timeout=timeout)
return error
except Exception:
return None
|
bearyinnovative/bearychat.py
|
examples/rtm_loop.py
|
RTMLoop.send
|
python
|
def send(self, message):
if "call_id" not in message:
message["call_id"] = self.gen_call_id()
self._ws.send(message.to_json())
|
Sends a RTMMessage
Should be called after starting the loop
Args:
message(RTMMessage): the sending message
Raises:
WebSocketConnectionClosedException: if the loop is closed
|
train
|
https://github.com/bearyinnovative/bearychat.py/blob/6c7af2d215c2ff7135bb5af66ca333d0ea1089fd/examples/rtm_loop.py#L120-L133
|
[
"def gen_call_id(self):\n \"\"\"Generates a call_id\n\n Returns:\n int: the call_id\n \"\"\"\n self._call_id += 1\n return self._call_id\n",
"def to_json(self):\n \"\"\"Transfers current message to json\n\n Returns:\n json\n \"\"\"\n return json.dumps(self._data)\n"
] |
class RTMLoop(object):
"""Real Time Message loop
_errors(Queue): contains error message(dict("result", "msg")),
looks self._set_error()
_inbox(Queue): contains RTMMessage
_worker(threading.Thread): a thread for running the loop
Args:
ws_host(str): websocket host
"""
def __init__(self, ws_host):
self._call_id = 0
self._inbox = Queue()
self._errors = Queue()
self._ws = websocket.WebSocketApp(
ws_host,
on_open=self.on_open,
on_message=self.on_message,
on_close=self.on_close,
on_error=self.on_error)
self._worker = threading.Thread(
target=self._ws.run_forever,
kwargs={'sslopt': sslopt_with_ca_certs})
def on_open(self, ws):
"""Websocket on_open event handler"""
def keep_alive(interval):
while True:
time.sleep(interval)
self.ping()
start_new_thread(keep_alive, (self.keep_alive_interval, ))
def on_message(self, ws, message):
"""Websocket on_message event handler
Saves message as RTMMessage in self._inbox
"""
try:
data = json.loads(message)
except Exception:
self._set_error(message, "decode message failed")
else:
self._inbox.put(RTMMessage(data))
def on_error(self, ws, error):
"""Websocket on_error event handler
Saves error message in self._errors
"""
self._set_error(error, "read socket failed")
def on_close(self, ws):
"""Websocket on_close event handler"""
self._set_error("closed", "websocket closed")
def _set_error(self, result, msg):
"""Puts a error to self._errors
Args:
result(mix): received data
msg(str): message
"""
self._errors.put({"result": result, "msg": msg})
def start(self, keep_alive_interval=2):
"""Starts the main loop
Args:
keep_alive_interval(int): the interval(second) of sending keep
alive message
"""
self.keep_alive_interval = keep_alive_interval
self._worker.start()
def stop(self):
"""Stops the main loop
"""
self._ws.close()
def ping(self):
"""Sends ping message
"""
self.send(RTMMessage({"type": RTMMessageType.Ping}))
def gen_call_id(self):
"""Generates a call_id
Returns:
int: the call_id
"""
self._call_id += 1
return self._call_id
def get_message(self, block=False, timeout=None):
"""Removes and returns a RTMMessage from self._inbox
Args:
block(bool): if True block until a RTMMessage is available,
else it will return None when self._inbox is empty
timeout(int): it blocks at most timeout seconds
Returns:
RTMMessage if self._inbox is not empty, else None
"""
try:
message = self._inbox.get(block=block, timeout=timeout)
return message
except Exception:
return None
def get_error(self, block=False, timeout=None):
"""Removes and returns an error from self._errors
Args:
block(bool): if True block until a RTMMessage is available,
else it will return None when self._inbox is empty
timeout(int): it blocks at most timeout seconds
Returns:
error if inbox is not empty, else None
"""
try:
error = self._errors.get(block=block, timeout=timeout)
return error
except Exception:
return None
|
bearyinnovative/bearychat.py
|
examples/rtm_loop.py
|
RTMLoop.get_message
|
python
|
def get_message(self, block=False, timeout=None):
try:
message = self._inbox.get(block=block, timeout=timeout)
return message
except Exception:
return None
|
Removes and returns a RTMMessage from self._inbox
Args:
block(bool): if True block until a RTMMessage is available,
else it will return None when self._inbox is empty
timeout(int): it blocks at most timeout seconds
Returns:
RTMMessage if self._inbox is not empty, else None
|
train
|
https://github.com/bearyinnovative/bearychat.py/blob/6c7af2d215c2ff7135bb5af66ca333d0ea1089fd/examples/rtm_loop.py#L135-L150
| null |
class RTMLoop(object):
"""Real Time Message loop
_errors(Queue): contains error message(dict("result", "msg")),
looks self._set_error()
_inbox(Queue): contains RTMMessage
_worker(threading.Thread): a thread for running the loop
Args:
ws_host(str): websocket host
"""
def __init__(self, ws_host):
self._call_id = 0
self._inbox = Queue()
self._errors = Queue()
self._ws = websocket.WebSocketApp(
ws_host,
on_open=self.on_open,
on_message=self.on_message,
on_close=self.on_close,
on_error=self.on_error)
self._worker = threading.Thread(
target=self._ws.run_forever,
kwargs={'sslopt': sslopt_with_ca_certs})
def on_open(self, ws):
"""Websocket on_open event handler"""
def keep_alive(interval):
while True:
time.sleep(interval)
self.ping()
start_new_thread(keep_alive, (self.keep_alive_interval, ))
def on_message(self, ws, message):
"""Websocket on_message event handler
Saves message as RTMMessage in self._inbox
"""
try:
data = json.loads(message)
except Exception:
self._set_error(message, "decode message failed")
else:
self._inbox.put(RTMMessage(data))
def on_error(self, ws, error):
"""Websocket on_error event handler
Saves error message in self._errors
"""
self._set_error(error, "read socket failed")
def on_close(self, ws):
"""Websocket on_close event handler"""
self._set_error("closed", "websocket closed")
def _set_error(self, result, msg):
"""Puts a error to self._errors
Args:
result(mix): received data
msg(str): message
"""
self._errors.put({"result": result, "msg": msg})
def start(self, keep_alive_interval=2):
"""Starts the main loop
Args:
keep_alive_interval(int): the interval(second) of sending keep
alive message
"""
self.keep_alive_interval = keep_alive_interval
self._worker.start()
def stop(self):
"""Stops the main loop
"""
self._ws.close()
def ping(self):
"""Sends ping message
"""
self.send(RTMMessage({"type": RTMMessageType.Ping}))
def gen_call_id(self):
"""Generates a call_id
Returns:
int: the call_id
"""
self._call_id += 1
return self._call_id
def send(self, message):
"""Sends a RTMMessage
Should be called after starting the loop
Args:
message(RTMMessage): the sending message
Raises:
WebSocketConnectionClosedException: if the loop is closed
"""
if "call_id" not in message:
message["call_id"] = self.gen_call_id()
self._ws.send(message.to_json())
def get_error(self, block=False, timeout=None):
"""Removes and returns an error from self._errors
Args:
block(bool): if True block until a RTMMessage is available,
else it will return None when self._inbox is empty
timeout(int): it blocks at most timeout seconds
Returns:
error if inbox is not empty, else None
"""
try:
error = self._errors.get(block=block, timeout=timeout)
return error
except Exception:
return None
|
bearyinnovative/bearychat.py
|
examples/rtm_loop.py
|
RTMLoop.get_error
|
python
|
def get_error(self, block=False, timeout=None):
try:
error = self._errors.get(block=block, timeout=timeout)
return error
except Exception:
return None
|
Removes and returns an error from self._errors
Args:
block(bool): if True block until a RTMMessage is available,
else it will return None when self._inbox is empty
timeout(int): it blocks at most timeout seconds
Returns:
error if inbox is not empty, else None
|
train
|
https://github.com/bearyinnovative/bearychat.py/blob/6c7af2d215c2ff7135bb5af66ca333d0ea1089fd/examples/rtm_loop.py#L152-L167
| null |
class RTMLoop(object):
"""Real Time Message loop
_errors(Queue): contains error message(dict("result", "msg")),
looks self._set_error()
_inbox(Queue): contains RTMMessage
_worker(threading.Thread): a thread for running the loop
Args:
ws_host(str): websocket host
"""
def __init__(self, ws_host):
self._call_id = 0
self._inbox = Queue()
self._errors = Queue()
self._ws = websocket.WebSocketApp(
ws_host,
on_open=self.on_open,
on_message=self.on_message,
on_close=self.on_close,
on_error=self.on_error)
self._worker = threading.Thread(
target=self._ws.run_forever,
kwargs={'sslopt': sslopt_with_ca_certs})
def on_open(self, ws):
"""Websocket on_open event handler"""
def keep_alive(interval):
while True:
time.sleep(interval)
self.ping()
start_new_thread(keep_alive, (self.keep_alive_interval, ))
def on_message(self, ws, message):
"""Websocket on_message event handler
Saves message as RTMMessage in self._inbox
"""
try:
data = json.loads(message)
except Exception:
self._set_error(message, "decode message failed")
else:
self._inbox.put(RTMMessage(data))
def on_error(self, ws, error):
"""Websocket on_error event handler
Saves error message in self._errors
"""
self._set_error(error, "read socket failed")
def on_close(self, ws):
"""Websocket on_close event handler"""
self._set_error("closed", "websocket closed")
def _set_error(self, result, msg):
"""Puts a error to self._errors
Args:
result(mix): received data
msg(str): message
"""
self._errors.put({"result": result, "msg": msg})
def start(self, keep_alive_interval=2):
"""Starts the main loop
Args:
keep_alive_interval(int): the interval(second) of sending keep
alive message
"""
self.keep_alive_interval = keep_alive_interval
self._worker.start()
def stop(self):
"""Stops the main loop
"""
self._ws.close()
def ping(self):
"""Sends ping message
"""
self.send(RTMMessage({"type": RTMMessageType.Ping}))
def gen_call_id(self):
"""Generates a call_id
Returns:
int: the call_id
"""
self._call_id += 1
return self._call_id
def send(self, message):
"""Sends a RTMMessage
Should be called after starting the loop
Args:
message(RTMMessage): the sending message
Raises:
WebSocketConnectionClosedException: if the loop is closed
"""
if "call_id" not in message:
message["call_id"] = self.gen_call_id()
self._ws.send(message.to_json())
def get_message(self, block=False, timeout=None):
"""Removes and returns a RTMMessage from self._inbox
Args:
block(bool): if True block until a RTMMessage is available,
else it will return None when self._inbox is empty
timeout(int): it blocks at most timeout seconds
Returns:
RTMMessage if self._inbox is not empty, else None
"""
try:
message = self._inbox.get(block=block, timeout=timeout)
return message
except Exception:
return None
|
bearyinnovative/bearychat.py
|
bearychat/rtm_client_service.py
|
RTMCurrentTeam.members
|
python
|
def members(self):
resp = self._rtm_client.get('v1/current_team.members?all=true')
if resp.is_fail():
raise RTMServiceError(
'Failed to get members of current team',
resp
)
return resp.data['result']
|
Gets members of current team
Returns:
list of User
Throws:
RTMServiceError when request failed
|
train
|
https://github.com/bearyinnovative/bearychat.py/blob/6c7af2d215c2ff7135bb5af66ca333d0ea1089fd/bearychat/rtm_client_service.py#L33-L48
| null |
class RTMCurrentTeam(RTMService):
def info(self):
"""Gets current team infomation
Returns:
Team if success
Throws:
RTMServiceError when request failed
"""
resp = self._rtm_client.get('v1/current_team.info')
if resp.is_fail():
raise RTMServiceError(
'Failed to get current team infomation',
resp
)
return resp.data['result']
def channels(self):
"""Gets channels of current team
Returns:
list of Channel
Throws:
RTMServiceError when request failed
"""
resp = self._rtm_client.get('v1/current_team.channels')
if resp.is_fail():
raise RTMServiceError(
'Failed to get channels of current team',
resp
)
return resp.data['result']
|
bearyinnovative/bearychat.py
|
bearychat/rtm_client_service.py
|
RTMCurrentTeam.channels
|
python
|
def channels(self):
resp = self._rtm_client.get('v1/current_team.channels')
if resp.is_fail():
raise RTMServiceError(
'Failed to get channels of current team',
resp
)
return resp.data['result']
|
Gets channels of current team
Returns:
list of Channel
Throws:
RTMServiceError when request failed
|
train
|
https://github.com/bearyinnovative/bearychat.py/blob/6c7af2d215c2ff7135bb5af66ca333d0ea1089fd/bearychat/rtm_client_service.py#L50-L65
| null |
class RTMCurrentTeam(RTMService):
def info(self):
"""Gets current team infomation
Returns:
Team if success
Throws:
RTMServiceError when request failed
"""
resp = self._rtm_client.get('v1/current_team.info')
if resp.is_fail():
raise RTMServiceError(
'Failed to get current team infomation',
resp
)
return resp.data['result']
def members(self):
"""Gets members of current team
Returns:
list of User
Throws:
RTMServiceError when request failed
"""
resp = self._rtm_client.get('v1/current_team.members?all=true')
if resp.is_fail():
raise RTMServiceError(
'Failed to get members of current team',
resp
)
return resp.data['result']
|
bearyinnovative/bearychat.py
|
bearychat/rtm_client_service.py
|
RTMUser.info
|
python
|
def info(self, user_id):
resp = self._rtm_client.get('v1/user.info?user_id={}'.format(user_id))
if resp.is_fail():
raise RTMServiceError('Failed to get user information', resp)
return resp.data['result']
|
Gets user information by user id
Args:
user_id(int): the id of user
Returns:
User
Throws:
RTMServiceError when request failed
|
train
|
https://github.com/bearyinnovative/bearychat.py/blob/6c7af2d215c2ff7135bb5af66ca333d0ea1089fd/bearychat/rtm_client_service.py#L69-L85
| null |
class RTMUser(RTMService):
|
bearyinnovative/bearychat.py
|
bearychat/rtm_client_service.py
|
RTMChannel.info
|
python
|
def info(self, channel_id):
resource = 'v1/channel.info?channel_id={}'.format(channel_id)
resp = self._rtm_client.get(resource)
if resp.is_fail():
raise RTMServiceError("Failed to get channel information", resp)
return resp.data['result']
|
Gets channel information by channel id
Args:
channel_id(int): the id of channel
Returns:
Channel
Throws:
RTMServiceError when request failed
|
train
|
https://github.com/bearyinnovative/bearychat.py/blob/6c7af2d215c2ff7135bb5af66ca333d0ea1089fd/bearychat/rtm_client_service.py#L89-L106
| null |
class RTMChannel(RTMService):
|
bearyinnovative/bearychat.py
|
bearychat/rtm_message.py
|
RTMMessage.reply
|
python
|
def reply(self, text):
data = {'text': text, 'vchannel_id': self['vchannel_id']}
if self.is_p2p():
data['type'] = RTMMessageType.P2PMessage
data['to_uid'] = self['uid']
else:
data['type'] = RTMMessageType.ChannelMessage
data['channel_id'] = self['channel_id']
return RTMMessage(data)
|
Replys a text message
Args:
text(str): message content
Returns:
RTMMessage
|
train
|
https://github.com/bearyinnovative/bearychat.py/blob/6c7af2d215c2ff7135bb5af66ca333d0ea1089fd/bearychat/rtm_message.py#L53-L69
|
[
"def is_p2p(self):\n \"\"\"\n Returns:\n True if current message is p2p message\n \"\"\"\n return self['type'] in (RTMMessageType.P2PMessage,\n RTMMessageType.P2PTyping)\n"
] |
class RTMMessage(object):
"""RTM Message
Args:
data(dict): message dict
"""
def __init__(self, data):
self._data = data
self.mention_user_ids = set()
self.parse_mention_user_ids()
def __setitem__(self, name, value):
self._data[name] = value
def __getitem__(self, name):
return self._data[name]
def __contains__(self, name):
return name in self._data
def parse_mention_user_ids(self):
if not self.is_chat_message():
return
if len(self['text']) == 0:
return
self.mention_user_ids = set(mention_regex.findall(self['text']))
def refer(self, text):
"""Refers current message and replys a new message
Args:
text(str): message content
Returns:
RTMMessage
"""
data = self.reply(text)
data['refer_key'] = self['key']
return data
def is_p2p(self):
"""
Returns:
True if current message is p2p message
"""
return self['type'] in (RTMMessageType.P2PMessage,
RTMMessageType.P2PTyping)
def is_chat_message(self):
"""
Returns:
True if current message is chatting message
"""
return self['type'] in (RTMMessageType.P2PMessage,
RTMMessageType.ChannelMessage)
def is_mention_user(self, user):
"""Check if current message mentions user
Args:
user(User)
Returns:
True if message mentions user
"""
return user.get('id') in self.mention_user_ids
def is_from(self, user):
"""Checks if current message is sent by user
Args:
user(User)
Returns:
True if current message is sent by the user
"""
return self['uid'] == user.get('id')
def to_json(self):
"""Transfers current message to json
Returns:
json
"""
return json.dumps(self._data)
|
bearyinnovative/bearychat.py
|
bearychat/rtm_message.py
|
RTMMessage.refer
|
python
|
def refer(self, text):
data = self.reply(text)
data['refer_key'] = self['key']
return data
|
Refers current message and replys a new message
Args:
text(str): message content
Returns:
RTMMessage
|
train
|
https://github.com/bearyinnovative/bearychat.py/blob/6c7af2d215c2ff7135bb5af66ca333d0ea1089fd/bearychat/rtm_message.py#L71-L82
|
[
"def reply(self, text):\n \"\"\"Replys a text message\n\n Args:\n text(str): message content\n\n Returns:\n RTMMessage\n \"\"\"\n data = {'text': text, 'vchannel_id': self['vchannel_id']}\n if self.is_p2p():\n data['type'] = RTMMessageType.P2PMessage\n data['to_uid'] = self['uid']\n else:\n data['type'] = RTMMessageType.ChannelMessage\n data['channel_id'] = self['channel_id']\n return RTMMessage(data)\n"
] |
class RTMMessage(object):
"""RTM Message
Args:
data(dict): message dict
"""
def __init__(self, data):
self._data = data
self.mention_user_ids = set()
self.parse_mention_user_ids()
def __setitem__(self, name, value):
self._data[name] = value
def __getitem__(self, name):
return self._data[name]
def __contains__(self, name):
return name in self._data
def parse_mention_user_ids(self):
if not self.is_chat_message():
return
if len(self['text']) == 0:
return
self.mention_user_ids = set(mention_regex.findall(self['text']))
def reply(self, text):
"""Replys a text message
Args:
text(str): message content
Returns:
RTMMessage
"""
data = {'text': text, 'vchannel_id': self['vchannel_id']}
if self.is_p2p():
data['type'] = RTMMessageType.P2PMessage
data['to_uid'] = self['uid']
else:
data['type'] = RTMMessageType.ChannelMessage
data['channel_id'] = self['channel_id']
return RTMMessage(data)
def is_p2p(self):
"""
Returns:
True if current message is p2p message
"""
return self['type'] in (RTMMessageType.P2PMessage,
RTMMessageType.P2PTyping)
def is_chat_message(self):
"""
Returns:
True if current message is chatting message
"""
return self['type'] in (RTMMessageType.P2PMessage,
RTMMessageType.ChannelMessage)
def is_mention_user(self, user):
"""Check if current message mentions user
Args:
user(User)
Returns:
True if message mentions user
"""
return user.get('id') in self.mention_user_ids
def is_from(self, user):
"""Checks if current message is sent by user
Args:
user(User)
Returns:
True if current message is sent by the user
"""
return self['uid'] == user.get('id')
def to_json(self):
"""Transfers current message to json
Returns:
json
"""
return json.dumps(self._data)
|
bearyinnovative/bearychat.py
|
bearychat/rtm_client.py
|
RTMClient.start
|
python
|
def start(self):
resp = self.post('start')
if resp.is_fail():
return None
if 'result' not in resp.data:
return None
result = resp.data['result']
return {
'user': result['user'],
'ws_host': result['ws_host'],
}
|
Gets the rtm ws_host and user information
Returns:
None if request failed,
else a dict containing "user"(User) and "ws_host"
|
train
|
https://github.com/bearyinnovative/bearychat.py/blob/6c7af2d215c2ff7135bb5af66ca333d0ea1089fd/bearychat/rtm_client.py#L52-L70
|
[
"def is_fail(self):\n return not self.is_ok()\n",
"def post(self, resource, data=None, json=None):\n \"\"\"Sends a POST request\n\n Returns:\n RTMResponse\n \"\"\"\n return self.do(resource, 'POST', data=data, json=json)\n"
] |
class RTMClient(object):
"""Real Time Message client
Attributes:
current_team(RTMCurrentTeam): service of current team
user(RTMUser): service of current user
channel(RTMChannel): service of current channel
"""
def __init__(self, token, api_base="https://rtm.bearychat.com"):
"""
Args:
token(str): rtm token
api_base(str): api url base
"""
self._token = token
self._api_base = api_base
self.current_team = RTMCurrentTeam(self)
self.user = RTMUser(self)
self.channel = RTMChannel(self)
def do(self,
resource,
method,
params=None,
data=None,
json=None,
headers=None):
"""Does the request job
Args:
resource(str): resource uri(relative path)
method(str): HTTP method
params(dict): uri queries
data(dict): HTTP body(form)
json(dict): HTTP body(json)
headers(dict): HTTP headers
Returns:
RTMResponse
"""
uri = "{0}/{1}".format(self._api_base, resource)
if not params:
params = {}
params.update({'token': self._token})
req = Request(
method=method,
url=uri,
params=params,
headers=headers,
data=data,
json=json)
s = Session()
prepped = s.prepare_request(req)
resp = s.send(prepped)
return RTMResponse(resp)
def get(self, resource, params=None, headers=None):
"""Sends a GET request
Returns:
RTMResponse
"""
return self.do(resource, 'GET', params=params, headers=headers)
def post(self, resource, data=None, json=None):
"""Sends a POST request
Returns:
RTMResponse
"""
return self.do(resource, 'POST', data=data, json=json)
|
bearyinnovative/bearychat.py
|
bearychat/rtm_client.py
|
RTMClient.do
|
python
|
def do(self,
resource,
method,
params=None,
data=None,
json=None,
headers=None):
uri = "{0}/{1}".format(self._api_base, resource)
if not params:
params = {}
params.update({'token': self._token})
req = Request(
method=method,
url=uri,
params=params,
headers=headers,
data=data,
json=json)
s = Session()
prepped = s.prepare_request(req)
resp = s.send(prepped)
return RTMResponse(resp)
|
Does the request job
Args:
resource(str): resource uri(relative path)
method(str): HTTP method
params(dict): uri queries
data(dict): HTTP body(form)
json(dict): HTTP body(json)
headers(dict): HTTP headers
Returns:
RTMResponse
|
train
|
https://github.com/bearyinnovative/bearychat.py/blob/6c7af2d215c2ff7135bb5af66ca333d0ea1089fd/bearychat/rtm_client.py#L72-L108
| null |
class RTMClient(object):
"""Real Time Message client
Attributes:
current_team(RTMCurrentTeam): service of current team
user(RTMUser): service of current user
channel(RTMChannel): service of current channel
"""
def __init__(self, token, api_base="https://rtm.bearychat.com"):
"""
Args:
token(str): rtm token
api_base(str): api url base
"""
self._token = token
self._api_base = api_base
self.current_team = RTMCurrentTeam(self)
self.user = RTMUser(self)
self.channel = RTMChannel(self)
def start(self):
"""Gets the rtm ws_host and user information
Returns:
None if request failed,
else a dict containing "user"(User) and "ws_host"
"""
resp = self.post('start')
if resp.is_fail():
return None
if 'result' not in resp.data:
return None
result = resp.data['result']
return {
'user': result['user'],
'ws_host': result['ws_host'],
}
def get(self, resource, params=None, headers=None):
"""Sends a GET request
Returns:
RTMResponse
"""
return self.do(resource, 'GET', params=params, headers=headers)
def post(self, resource, data=None, json=None):
"""Sends a POST request
Returns:
RTMResponse
"""
return self.do(resource, 'POST', data=data, json=json)
|
bearyinnovative/bearychat.py
|
bearychat/rtm_client.py
|
RTMClient.get
|
python
|
def get(self, resource, params=None, headers=None):
return self.do(resource, 'GET', params=params, headers=headers)
|
Sends a GET request
Returns:
RTMResponse
|
train
|
https://github.com/bearyinnovative/bearychat.py/blob/6c7af2d215c2ff7135bb5af66ca333d0ea1089fd/bearychat/rtm_client.py#L110-L116
|
[
"def do(self,\n resource,\n method,\n params=None,\n data=None,\n json=None,\n headers=None):\n \"\"\"Does the request job\n\n Args:\n resource(str): resource uri(relative path)\n method(str): HTTP method\n params(dict): uri queries\n data(dict): HTTP body(form)\n json(dict): HTTP body(json)\n headers(dict): HTTP headers\n\n Returns:\n RTMResponse\n \"\"\"\n uri = \"{0}/{1}\".format(self._api_base, resource)\n if not params:\n params = {}\n params.update({'token': self._token})\n\n req = Request(\n method=method,\n url=uri,\n params=params,\n headers=headers,\n data=data,\n json=json)\n s = Session()\n prepped = s.prepare_request(req)\n resp = s.send(prepped)\n\n return RTMResponse(resp)\n"
] |
class RTMClient(object):
"""Real Time Message client
Attributes:
current_team(RTMCurrentTeam): service of current team
user(RTMUser): service of current user
channel(RTMChannel): service of current channel
"""
def __init__(self, token, api_base="https://rtm.bearychat.com"):
"""
Args:
token(str): rtm token
api_base(str): api url base
"""
self._token = token
self._api_base = api_base
self.current_team = RTMCurrentTeam(self)
self.user = RTMUser(self)
self.channel = RTMChannel(self)
def start(self):
"""Gets the rtm ws_host and user information
Returns:
None if request failed,
else a dict containing "user"(User) and "ws_host"
"""
resp = self.post('start')
if resp.is_fail():
return None
if 'result' not in resp.data:
return None
result = resp.data['result']
return {
'user': result['user'],
'ws_host': result['ws_host'],
}
def do(self,
resource,
method,
params=None,
data=None,
json=None,
headers=None):
"""Does the request job
Args:
resource(str): resource uri(relative path)
method(str): HTTP method
params(dict): uri queries
data(dict): HTTP body(form)
json(dict): HTTP body(json)
headers(dict): HTTP headers
Returns:
RTMResponse
"""
uri = "{0}/{1}".format(self._api_base, resource)
if not params:
params = {}
params.update({'token': self._token})
req = Request(
method=method,
url=uri,
params=params,
headers=headers,
data=data,
json=json)
s = Session()
prepped = s.prepare_request(req)
resp = s.send(prepped)
return RTMResponse(resp)
def post(self, resource, data=None, json=None):
"""Sends a POST request
Returns:
RTMResponse
"""
return self.do(resource, 'POST', data=data, json=json)
|
bearyinnovative/bearychat.py
|
bearychat/rtm_client.py
|
RTMClient.post
|
python
|
def post(self, resource, data=None, json=None):
return self.do(resource, 'POST', data=data, json=json)
|
Sends a POST request
Returns:
RTMResponse
|
train
|
https://github.com/bearyinnovative/bearychat.py/blob/6c7af2d215c2ff7135bb5af66ca333d0ea1089fd/bearychat/rtm_client.py#L118-L124
|
[
"def do(self,\n resource,\n method,\n params=None,\n data=None,\n json=None,\n headers=None):\n \"\"\"Does the request job\n\n Args:\n resource(str): resource uri(relative path)\n method(str): HTTP method\n params(dict): uri queries\n data(dict): HTTP body(form)\n json(dict): HTTP body(json)\n headers(dict): HTTP headers\n\n Returns:\n RTMResponse\n \"\"\"\n uri = \"{0}/{1}\".format(self._api_base, resource)\n if not params:\n params = {}\n params.update({'token': self._token})\n\n req = Request(\n method=method,\n url=uri,\n params=params,\n headers=headers,\n data=data,\n json=json)\n s = Session()\n prepped = s.prepare_request(req)\n resp = s.send(prepped)\n\n return RTMResponse(resp)\n"
] |
class RTMClient(object):
"""Real Time Message client
Attributes:
current_team(RTMCurrentTeam): service of current team
user(RTMUser): service of current user
channel(RTMChannel): service of current channel
"""
def __init__(self, token, api_base="https://rtm.bearychat.com"):
"""
Args:
token(str): rtm token
api_base(str): api url base
"""
self._token = token
self._api_base = api_base
self.current_team = RTMCurrentTeam(self)
self.user = RTMUser(self)
self.channel = RTMChannel(self)
def start(self):
"""Gets the rtm ws_host and user information
Returns:
None if request failed,
else a dict containing "user"(User) and "ws_host"
"""
resp = self.post('start')
if resp.is_fail():
return None
if 'result' not in resp.data:
return None
result = resp.data['result']
return {
'user': result['user'],
'ws_host': result['ws_host'],
}
def do(self,
resource,
method,
params=None,
data=None,
json=None,
headers=None):
"""Does the request job
Args:
resource(str): resource uri(relative path)
method(str): HTTP method
params(dict): uri queries
data(dict): HTTP body(form)
json(dict): HTTP body(json)
headers(dict): HTTP headers
Returns:
RTMResponse
"""
uri = "{0}/{1}".format(self._api_base, resource)
if not params:
params = {}
params.update({'token': self._token})
req = Request(
method=method,
url=uri,
params=params,
headers=headers,
data=data,
json=json)
s = Session()
prepped = s.prepare_request(req)
resp = s.send(prepped)
return RTMResponse(resp)
def get(self, resource, params=None, headers=None):
"""Sends a GET request
Returns:
RTMResponse
"""
return self.do(resource, 'GET', params=params, headers=headers)
|
gmdzy2010/dingtalk_sdk_gmdzy2010
|
dingtalk_sdk_gmdzy2010/base_request.py
|
BaseRequest.set_logger
|
python
|
def set_logger(self):
"""Method to build the base logging system. By default, logging level
is set to INFO."""
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
logger_file = os.path.join(self.logs_path, 'dingtalk_sdk.logs')
logger_handler = logging.FileHandler(logger_file)
logger_handler.setLevel(logging.INFO)
logger_formatter = logging.Formatter(
'[%(asctime)s | %(name)s | %(levelname)s] %(message)s'
)
logger_handler.setFormatter(logger_formatter)
logger.addHandler(logger_handler)
return logger
|
Method to build the base logging system. By default, logging level
is set to INFO.
|
train
|
https://github.com/gmdzy2010/dingtalk_sdk_gmdzy2010/blob/b06cb1f78f89be9554dcb6101af8bc72718a9ecd/dingtalk_sdk_gmdzy2010/base_request.py#L22-L35
| null |
class BaseRequest(object):
"""The base request for dingtalk"""
logs_path = os.path.dirname(os.path.abspath(__file__))
request_url = None
request_methods_valid = [
"get", "post", "put", "delete", "head", "options", "patch"
]
def __init__(self, **kwargs):
self.kwargs = kwargs
self.logger = self.set_logger()
self.response = None
self.json_response = None
self.call_status = False
self._request_method = "get"
def set_logger(self):
"""Method to build the base logging system. By default, logging level
is set to INFO."""
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
logger_file = os.path.join(self.logs_path, 'dingtalk_sdk.logs')
logger_handler = logging.FileHandler(logger_file)
logger_handler.setLevel(logging.INFO)
logger_formatter = logging.Formatter(
'[%(asctime)s | %(name)s | %(levelname)s] %(message)s'
)
logger_handler.setFormatter(logger_formatter)
logger.addHandler(logger_handler)
return logger
@property
def request_method(self):
"""Mostly, the get method is used to request wanted json data, as a
result, the property of request_method is set to get by default."""
return self._request_method
@request_method.setter
def request_method(self, method_str):
request_method_lower = method_str.lower()
if request_method_lower in self.request_methods_valid:
self._request_method = request_method_lower
else:
raise ValueError(
"%s is not a valid HTTP request method, please choose one"
"of %s to perform a normal http request, correct it now."
"" % (method_str, ",".join(self.request_methods_valid))
)
def get_response(self):
"""Get the original response of requests"""
request = getattr(requests, self.request_method, None)
if request is None and self._request_method is None:
raise ValueError("A effective http request method must be set")
if self.request_url is None:
raise ValueError(
"Fatal error occurred, the class property \"request_url\" is"
"set to None, reset it with an effective url of dingtalk api."
)
response = request(self.request_url, **self.kwargs)
self.response = response
return response
def get_json_response(self):
"""This method aims at catching the exception of ValueError, detail:
http://docs.python-requests.org/zh_CN/latest/user/quickstart.html#json
"""
self.json_response = self.get_response().json()
if self.json_response is not None:
error_code = self.json_response.get("errcode", None)
self.call_status = True if error_code == 0 else False
return self.json_response
def get_call_status(self):
"""The global status of api calling."""
return self.call_status
|
gmdzy2010/dingtalk_sdk_gmdzy2010
|
dingtalk_sdk_gmdzy2010/base_request.py
|
BaseRequest.get_response
|
python
|
def get_response(self):
"""Get the original response of requests"""
request = getattr(requests, self.request_method, None)
if request is None and self._request_method is None:
raise ValueError("A effective http request method must be set")
if self.request_url is None:
raise ValueError(
"Fatal error occurred, the class property \"request_url\" is"
"set to None, reset it with an effective url of dingtalk api."
)
response = request(self.request_url, **self.kwargs)
self.response = response
return response
|
Get the original response of requests
|
train
|
https://github.com/gmdzy2010/dingtalk_sdk_gmdzy2010/blob/b06cb1f78f89be9554dcb6101af8bc72718a9ecd/dingtalk_sdk_gmdzy2010/base_request.py#L55-L67
| null |
class BaseRequest(object):
"""The base request for dingtalk"""
logs_path = os.path.dirname(os.path.abspath(__file__))
request_url = None
request_methods_valid = [
"get", "post", "put", "delete", "head", "options", "patch"
]
def __init__(self, **kwargs):
self.kwargs = kwargs
self.logger = self.set_logger()
self.response = None
self.json_response = None
self.call_status = False
self._request_method = "get"
def set_logger(self):
"""Method to build the base logging system. By default, logging level
is set to INFO."""
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
logger_file = os.path.join(self.logs_path, 'dingtalk_sdk.logs')
logger_handler = logging.FileHandler(logger_file)
logger_handler.setLevel(logging.INFO)
logger_formatter = logging.Formatter(
'[%(asctime)s | %(name)s | %(levelname)s] %(message)s'
)
logger_handler.setFormatter(logger_formatter)
logger.addHandler(logger_handler)
return logger
@property
def request_method(self):
"""Mostly, the get method is used to request wanted json data, as a
result, the property of request_method is set to get by default."""
return self._request_method
@request_method.setter
def request_method(self, method_str):
request_method_lower = method_str.lower()
if request_method_lower in self.request_methods_valid:
self._request_method = request_method_lower
else:
raise ValueError(
"%s is not a valid HTTP request method, please choose one"
"of %s to perform a normal http request, correct it now."
"" % (method_str, ",".join(self.request_methods_valid))
)
def get_response(self):
"""Get the original response of requests"""
request = getattr(requests, self.request_method, None)
if request is None and self._request_method is None:
raise ValueError("A effective http request method must be set")
if self.request_url is None:
raise ValueError(
"Fatal error occurred, the class property \"request_url\" is"
"set to None, reset it with an effective url of dingtalk api."
)
response = request(self.request_url, **self.kwargs)
self.response = response
return response
def get_json_response(self):
"""This method aims at catching the exception of ValueError, detail:
http://docs.python-requests.org/zh_CN/latest/user/quickstart.html#json
"""
self.json_response = self.get_response().json()
if self.json_response is not None:
error_code = self.json_response.get("errcode", None)
self.call_status = True if error_code == 0 else False
return self.json_response
def get_call_status(self):
"""The global status of api calling."""
return self.call_status
|
gmdzy2010/dingtalk_sdk_gmdzy2010
|
dingtalk_sdk_gmdzy2010/base_request.py
|
BaseRequest.get_json_response
|
python
|
def get_json_response(self):
"""This method aims at catching the exception of ValueError, detail:
http://docs.python-requests.org/zh_CN/latest/user/quickstart.html#json
"""
self.json_response = self.get_response().json()
if self.json_response is not None:
error_code = self.json_response.get("errcode", None)
self.call_status = True if error_code == 0 else False
return self.json_response
|
This method aims at catching the exception of ValueError, detail:
http://docs.python-requests.org/zh_CN/latest/user/quickstart.html#json
|
train
|
https://github.com/gmdzy2010/dingtalk_sdk_gmdzy2010/blob/b06cb1f78f89be9554dcb6101af8bc72718a9ecd/dingtalk_sdk_gmdzy2010/base_request.py#L69-L77
|
[
"def get_response(self):\n \"\"\"Get the original response of requests\"\"\"\n request = getattr(requests, self.request_method, None)\n if request is None and self._request_method is None:\n raise ValueError(\"A effective http request method must be set\")\n if self.request_url is None:\n raise ValueError(\n \"Fatal error occurred, the class property \\\"request_url\\\" is\"\n \"set to None, reset it with an effective url of dingtalk api.\"\n )\n response = request(self.request_url, **self.kwargs)\n self.response = response\n return response\n"
] |
class BaseRequest(object):
"""The base request for dingtalk"""
logs_path = os.path.dirname(os.path.abspath(__file__))
request_url = None
request_methods_valid = [
"get", "post", "put", "delete", "head", "options", "patch"
]
def __init__(self, **kwargs):
self.kwargs = kwargs
self.logger = self.set_logger()
self.response = None
self.json_response = None
self.call_status = False
self._request_method = "get"
def set_logger(self):
"""Method to build the base logging system. By default, logging level
is set to INFO."""
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
logger_file = os.path.join(self.logs_path, 'dingtalk_sdk.logs')
logger_handler = logging.FileHandler(logger_file)
logger_handler.setLevel(logging.INFO)
logger_formatter = logging.Formatter(
'[%(asctime)s | %(name)s | %(levelname)s] %(message)s'
)
logger_handler.setFormatter(logger_formatter)
logger.addHandler(logger_handler)
return logger
@property
def request_method(self):
"""Mostly, the get method is used to request wanted json data, as a
result, the property of request_method is set to get by default."""
return self._request_method
@request_method.setter
def request_method(self, method_str):
request_method_lower = method_str.lower()
if request_method_lower in self.request_methods_valid:
self._request_method = request_method_lower
else:
raise ValueError(
"%s is not a valid HTTP request method, please choose one"
"of %s to perform a normal http request, correct it now."
"" % (method_str, ",".join(self.request_methods_valid))
)
def get_response(self):
"""Get the original response of requests"""
request = getattr(requests, self.request_method, None)
if request is None and self._request_method is None:
raise ValueError("A effective http request method must be set")
if self.request_url is None:
raise ValueError(
"Fatal error occurred, the class property \"request_url\" is"
"set to None, reset it with an effective url of dingtalk api."
)
response = request(self.request_url, **self.kwargs)
self.response = response
return response
def get_json_response(self):
"""This method aims at catching the exception of ValueError, detail:
http://docs.python-requests.org/zh_CN/latest/user/quickstart.html#json
"""
self.json_response = self.get_response().json()
if self.json_response is not None:
error_code = self.json_response.get("errcode", None)
self.call_status = True if error_code == 0 else False
return self.json_response
def get_call_status(self):
"""The global status of api calling."""
return self.call_status
|
gmdzy2010/dingtalk_sdk_gmdzy2010
|
dingtalk_sdk_gmdzy2010/authority_request.py
|
PersistentCodeRequest.get_ticket_for_sns_token
|
python
|
def get_ticket_for_sns_token(self):
"""This is a shortcut for getting the sns_token, as a post data of
request body."""
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return {
"openid": self.get_openid(),
"persistent_code": self.get_persistent_code(),
}
|
This is a shortcut for getting the sns_token, as a post data of
request body.
|
train
|
https://github.com/gmdzy2010/dingtalk_sdk_gmdzy2010/blob/b06cb1f78f89be9554dcb6101af8bc72718a9ecd/dingtalk_sdk_gmdzy2010/authority_request.py#L86-L93
|
[
"def get_openid(self):\n openid = self.json_response.get(\"openid\", None)\n self.logger.info(\"%s\\t%s\" % (self.request_method, self.request_url))\n return openid\n",
"def get_persistent_code(self):\n persistent_code = self.json_response.get(\"persistent_code\", None)\n self.logger.info(\"%s\\t%s\" % (self.request_method, self.request_url))\n return persistent_code\n"
] |
class PersistentCodeRequest(BaseRequest):
"""
Description: The persistent_code, openid and unionid are contained in the
json response of this api, the parameter <access_token> and post data
<tmp_auth_code> is required.
parameter_R: <access_token>
parameter_O: None
post_data_R: <tmp_auth_code>
post_data_O: None
Return: the persistent_code, openid and unionid of dingtalk api.
doc_links: https://open-doc.dingtalk.com/microapp/serverapi2/athq8o
"""
request_url = settings.GET_PERSISTENT_CODE
def get_openid(self):
openid = self.json_response.get("openid", None)
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return openid
def get_unionid(self):
unionid = self.json_response.get("unionid", None)
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return unionid
def get_persistent_code(self):
persistent_code = self.json_response.get("persistent_code", None)
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return persistent_code
def get_ticket_for_sns_token(self):
"""This is a shortcut for getting the sns_token, as a post data of
request body."""
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return {
"openid": self.get_openid(),
"persistent_code": self.get_persistent_code(),
}
|
gmdzy2010/dingtalk_sdk_gmdzy2010
|
dingtalk_sdk_gmdzy2010/message_request.py
|
WorkNoticeRequest.get_task_id
|
python
|
def get_task_id(self):
"""Method to get all department members."""
task_id = self.json_response.get("task_id", None)
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return task_id
|
Method to get all department members.
|
train
|
https://github.com/gmdzy2010/dingtalk_sdk_gmdzy2010/blob/b06cb1f78f89be9554dcb6101af8bc72718a9ecd/dingtalk_sdk_gmdzy2010/message_request.py#L24-L28
| null |
class WorkNoticeRequest(BaseRequest):
"""
Description: The WorkNoticeRequest send work notice to specified user
(userid), parameters of <userid_list> and <dept_id_list> should NOT keep
null simultaneously, or request would fail
parameter_R: <access_token>
parameter_O: None
post_data_R: <agent_id>, <msg>
post_data_O: <userid_list>, <dept_id_list>, <to_all_user>
Return: send result json response
doc_links: https://open-doc.dingtalk.com/microapp/serverapi2/pgoxpy
"""
request_url = settings.SEND_WORK_NOTICE
task_id = None
def get_task_id(self):
"""Method to get all department members."""
task_id = self.json_response.get("task_id", None)
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return task_id
|
gmdzy2010/dingtalk_sdk_gmdzy2010
|
dingtalk_sdk_gmdzy2010/message_request.py
|
GetWorkNoticeSendProgressRequest.get_progress
|
python
|
def get_progress(self):
"""Method to get the progress of work notice sending."""
progress = self.json_response.get("progress", None)
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return progress
|
Method to get the progress of work notice sending.
|
train
|
https://github.com/gmdzy2010/dingtalk_sdk_gmdzy2010/blob/b06cb1f78f89be9554dcb6101af8bc72718a9ecd/dingtalk_sdk_gmdzy2010/message_request.py#L48-L52
| null |
class GetWorkNoticeSendProgressRequest(BaseRequest):
"""
Description: The response of GetWorkNoticeSendProgressRequest shows the
progress of sending work notice
parameter_R: <access_token>
parameter_O: None
post_data_R: <agent_id>, <msg>
post_data_O: <userid_list>, <dept_id_list>, <to_all_user>
Return: the progress of sending work notice
doc_links: https://open-doc.dingtalk.com/microapp/serverapi2/pgoxpy
"""
request_url = settings.GET_WORK_NOTICE_SEND_PROGRESS
def get_progress(self):
"""Method to get the progress of work notice sending."""
progress = self.json_response.get("progress", None)
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return progress
|
gmdzy2010/dingtalk_sdk_gmdzy2010
|
dingtalk_sdk_gmdzy2010/message_request.py
|
GetWorkNoticeSendResultRequest.get_send_result
|
python
|
def get_send_result(self):
"""Method to get the progress of work notice sending."""
send_result = self.json_response.get("send_result", None)
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return send_result
|
Method to get the progress of work notice sending.
|
train
|
https://github.com/gmdzy2010/dingtalk_sdk_gmdzy2010/blob/b06cb1f78f89be9554dcb6101af8bc72718a9ecd/dingtalk_sdk_gmdzy2010/message_request.py#L72-L76
| null |
class GetWorkNoticeSendResultRequest(BaseRequest):
"""
Description: The response of GetWorkNoticeSendResultRequest shows the
result of sending work notice
parameter_R: <access_token>
parameter_O: None
post_data_R: None
post_data_O: <agent_id>, <task_id>
Return: send result json response
doc_links: https://open-doc.dingtalk.com/microapp/serverapi2/pgoxpy
"""
request_url = settings.GET_WORK_NOTICE_SEND_RESULT
def get_send_result(self):
"""Method to get the progress of work notice sending."""
send_result = self.json_response.get("send_result", None)
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return send_result
|
gmdzy2010/dingtalk_sdk_gmdzy2010
|
dingtalk_sdk_gmdzy2010/message_request.py
|
CreateGroupChatRequest.get_chat_id
|
python
|
def get_chat_id(self):
"""Method to get chatid of group created."""
chat_id = self.json_response.get("chatid", None)
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return chat_id
|
Method to get chatid of group created.
|
train
|
https://github.com/gmdzy2010/dingtalk_sdk_gmdzy2010/blob/b06cb1f78f89be9554dcb6101af8bc72718a9ecd/dingtalk_sdk_gmdzy2010/message_request.py#L96-L100
| null |
class CreateGroupChatRequest(BaseRequest):
"""
Description: The CreateGroupChatRequest aims to create a group chat for
some user(userid)
parameter_R: <access_token>
parameter_O: None
post_data_R: <name>, <owner>, <useridlist>
post_data_O: <showHistoryType>
Return: the chatid of group create json response
doc_links: https://open-doc.dingtalk.com/microapp/serverapi2/isu6nk
"""
request_url = settings.CREATE_GROUP_CHAT
def get_chat_id(self):
"""Method to get chatid of group created."""
chat_id = self.json_response.get("chatid", None)
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return chat_id
|
gmdzy2010/dingtalk_sdk_gmdzy2010
|
dingtalk_sdk_gmdzy2010/message_request.py
|
GetGroupChatRequest.get_chat_info
|
python
|
def get_chat_info(self):
"""Method to get chatid of group created."""
chat_info = self.json_response.get("chat_info", None)
self.chat_info = chat_info
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return chat_info
|
Method to get chatid of group created.
|
train
|
https://github.com/gmdzy2010/dingtalk_sdk_gmdzy2010/blob/b06cb1f78f89be9554dcb6101af8bc72718a9ecd/dingtalk_sdk_gmdzy2010/message_request.py#L139-L144
| null |
class GetGroupChatRequest(BaseRequest):
"""
Description: The GetGroupChatRequest aims to get information of a existed
group chat by chatid
parameter_R: <access_token>
parameter_O: None
post_data_R: <chatid>
post_data_O: None
Return: the chatid of group create json response
doc_links: https://open-doc.dingtalk.com/microapp/serverapi2/isu6nk
"""
request_url = settings.UPDATE_GROUP_CHAT
chat_info = None
def get_chat_info(self):
"""Method to get chatid of group created."""
chat_info = self.json_response.get("chat_info", None)
self.chat_info = chat_info
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return chat_info
def get_specified_group_user_ids(self):
user_ids = self.chat_info.get("useridlist", None)
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return user_ids
|
gmdzy2010/dingtalk_sdk_gmdzy2010
|
dingtalk_sdk_gmdzy2010/message_request.py
|
SendGroupChatRequest.get_message_id
|
python
|
def get_message_id(self):
"""Method to get messageId of group created."""
message_id = self.json_response.get("messageId", None)
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return message_id
|
Method to get messageId of group created.
|
train
|
https://github.com/gmdzy2010/dingtalk_sdk_gmdzy2010/blob/b06cb1f78f89be9554dcb6101af8bc72718a9ecd/dingtalk_sdk_gmdzy2010/message_request.py#L168-L172
| null |
class SendGroupChatRequest(BaseRequest):
"""
Description: The SendGroupChatRequest aims to send group chat message
parameter_R: <access_token>
parameter_O: None
post_data_R: <chatid>, <msg>
post_data_O: None
Return: the message id of group chat
doc_links: https://open-doc.dingtalk.com/microapp/serverapi2/isu6nk
"""
request_url = settings.SEND_GROUP_CHAT
def get_message_id(self):
"""Method to get messageId of group created."""
message_id = self.json_response.get("messageId", None)
self.logger.info("%s\t%s" % (self.request_method, self.request_url))
return message_id
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.