text stringlengths 81 112k |
|---|
Return the score of *member*, or *default* if it is not in the
collection.
def get_score(self, member, default=None, pipe=None):
"""
Return the score of *member*, or *default* if it is not in the
collection.
"""
pipe = self.redis if pipe is None else pipe
score = pipe.zscore(self.key, self._pickle(member))
if (score is None) and (default is not None):
score = float(default)
return score |
If *member* is in the collection, return its value. If not, store it
with a score of *default* and return *default*. *default* defaults to
0.
def get_or_set_score(self, member, default=0):
"""
If *member* is in the collection, return its value. If not, store it
with a score of *default* and return *default*. *default* defaults to
0.
"""
default = float(default)
def get_or_set_score_trans(pipe):
pickled_member = self._pickle(member)
score = pipe.zscore(self.key, pickled_member)
if score is None:
pipe.zadd(self.key, {self._pickle(member): default})
return default
return score
return self._transaction(get_or_set_score_trans) |
Return the rank of *member* in the collection.
By default, the member with the lowest score has rank 0.
If *reverse* is ``True``, the member with the highest score has rank 0.
def get_rank(self, member, reverse=False, pipe=None):
"""
Return the rank of *member* in the collection.
By default, the member with the lowest score has rank 0.
If *reverse* is ``True``, the member with the highest score has rank 0.
"""
pipe = self.redis if pipe is None else pipe
method = getattr(pipe, 'zrevrank' if reverse else 'zrank')
rank = method(self.key, self._pickle(member))
return rank |
Adjust the score of *member* by *amount*. If *member* is not in the
collection it will be stored with a score of *amount*.
def increment_score(self, member, amount=1):
"""
Adjust the score of *member* by *amount*. If *member* is not in the
collection it will be stored with a score of *amount*.
"""
return self.redis.zincrby(
self.key, float(amount), self._pickle(member)
) |
Return a list of ``(member, score)`` tuples whose ranking is between
*min_rank* and *max_rank* AND whose score is between *min_score* and
*max_score* (both ranges inclusive). If no bounds are specified, all
items will be returned.
def items(
self,
min_rank=None,
max_rank=None,
min_score=None,
max_score=None,
reverse=False,
pipe=None,
):
"""
Return a list of ``(member, score)`` tuples whose ranking is between
*min_rank* and *max_rank* AND whose score is between *min_score* and
*max_score* (both ranges inclusive). If no bounds are specified, all
items will be returned.
"""
pipe = self.redis if pipe is None else pipe
no_ranks = (min_rank is None) and (max_rank is None)
no_scores = (min_score is None) and (max_score is None)
# Default scope: everything
if no_ranks and no_scores:
ret = self.items_by_score(min_score, max_score, reverse, pipe)
# Scope narrows to given score range
elif no_ranks and (not no_scores):
ret = self.items_by_score(min_score, max_score, reverse, pipe)
# Scope narrows to given rank range
elif (not no_ranks) and no_scores:
ret = self.items_by_rank(min_rank, max_rank, reverse, pipe)
# Scope narrows twice - once by rank and once by score
else:
results = self.items_by_rank(min_rank, max_rank, reverse, pipe)
ret = []
for member, score in results:
if (min_score is not None) and (score < min_score):
continue
if (max_score is not None) and (score > max_score):
continue
ret.append((member, score))
return ret |
Set the score of *member* to *score*.
def set_score(self, member, score, pipe=None):
"""
Set the score of *member* to *score*.
"""
pipe = self.redis if pipe is None else pipe
pipe.zadd(self.key, {self._pickle(member): float(score)}) |
Return the great-circle distance between *place_1* and *place_2*,
in the *unit* specified.
The default unit is ``'km'``, but ``'m'``, ``'mi'``, and ``'ft'`` can
also be specified.
def distance_between(self, place_1, place_2, unit='km'):
"""
Return the great-circle distance between *place_1* and *place_2*,
in the *unit* specified.
The default unit is ``'km'``, but ``'m'``, ``'mi'``, and ``'ft'`` can
also be specified.
"""
pickled_place_1 = self._pickle(place_1)
pickled_place_2 = self._pickle(place_2)
try:
return self.redis.geodist(
self.key, pickled_place_1, pickled_place_2, unit=unit
)
except TypeError:
return None |
Return the Geohash of *place*.
If it's not present in the collection, ``None`` will be returned
instead.
def get_hash(self, place):
"""
Return the Geohash of *place*.
If it's not present in the collection, ``None`` will be returned
instead.
"""
pickled_place = self._pickle(place)
try:
return self.redis.geohash(self.key, pickled_place)[0]
except (AttributeError, TypeError):
return None |
Return a dict with the coordinates *place*. The dict's keys are
``'latitude'`` and ``'longitude'``.
If it's not present in the collection, ``None`` will be returned
instead.
def get_location(self, place):
"""
Return a dict with the coordinates *place*. The dict's keys are
``'latitude'`` and ``'longitude'``.
If it's not present in the collection, ``None`` will be returned
instead.
"""
pickled_place = self._pickle(place)
try:
longitude, latitude = self.redis.geopos(self.key, pickled_place)[0]
except (AttributeError, TypeError):
return None
return {'latitude': latitude, 'longitude': longitude} |
Return descriptions of the places stored in the collection that are
within the circle specified by the given location and radius.
A list of dicts will be returned.
The center of the circle can be specified by the identifier of another
place in the collection with the *place* keyword argument.
Or, it can be specified by using both the *latitude* and *longitude*
keyword arguments.
By default the *radius* is given in kilometers, but you may also set
the *unit* keyword argument to ``'m'``, ``'mi'``, or ``'ft'``.
Limit the number of results returned with the *count* keyword argument.
Change the sorted order by setting the *sort* keyword argument to
``b'DESC'``.
def places_within_radius(
self, place=None, latitude=None, longitude=None, radius=0, **kwargs
):
"""
Return descriptions of the places stored in the collection that are
within the circle specified by the given location and radius.
A list of dicts will be returned.
The center of the circle can be specified by the identifier of another
place in the collection with the *place* keyword argument.
Or, it can be specified by using both the *latitude* and *longitude*
keyword arguments.
By default the *radius* is given in kilometers, but you may also set
the *unit* keyword argument to ``'m'``, ``'mi'``, or ``'ft'``.
Limit the number of results returned with the *count* keyword argument.
Change the sorted order by setting the *sort* keyword argument to
``b'DESC'``.
"""
kwargs['withdist'] = True
kwargs['withcoord'] = True
kwargs['withhash'] = False
kwargs.setdefault('sort', 'ASC')
unit = kwargs.setdefault('unit', 'km')
# Make the query
if place is not None:
response = self.redis.georadiusbymember(
self.key, self._pickle(place), radius, **kwargs
)
elif (latitude is not None) and (longitude is not None):
response = self.redis.georadius(
self.key, longitude, latitude, radius, **kwargs
)
else:
raise ValueError(
'Must specify place, or both latitude and longitude'
)
# Assemble the result
ret = []
for item in response:
ret.append(
{
'place': self._unpickle(item[0]),
'distance': item[1],
'unit': unit,
'latitude': item[2][1],
'longitude': item[2][0],
}
)
return ret |
Set the location of *place* to the location specified by
*latitude* and *longitude*.
*place* can be any pickle-able Python object.
def set_location(self, place, latitude, longitude, pipe=None):
"""
Set the location of *place* to the location specified by
*latitude* and *longitude*.
*place* can be any pickle-able Python object.
"""
pipe = self.redis if pipe is None else pipe
pipe.geoadd(self.key, longitude, latitude, self._pickle(place)) |
Update the collection with items from *other*. Accepts other
:class:`GeoDB` instances, dictionaries mapping places to
``{'latitude': latitude, 'longitude': longitude}`` dicts,
or sequences of ``(place, latitude, longitude)`` tuples.
def update(self, other):
"""
Update the collection with items from *other*. Accepts other
:class:`GeoDB` instances, dictionaries mapping places to
``{'latitude': latitude, 'longitude': longitude}`` dicts,
or sequences of ``(place, latitude, longitude)`` tuples.
"""
# other is another Sorted Set
def update_sortedset_trans(pipe):
items = other._data(pipe=pipe) if use_redis else other._data()
pipe.multi()
for member, score in items:
pipe.zadd(self.key, {self._pickle(member): float(score)})
# other is dict-like
def update_mapping_trans(pipe):
items = other.items(pipe=pipe) if use_redis else other.items()
pipe.multi()
for place, value in items:
self.set_location(
place, value['latitude'], value['longitude'], pipe=pipe
)
# other is a list of tuples
def update_tuples_trans(pipe):
items = (
other.__iter__(pipe=pipe) if use_redis else other.__iter__()
)
pipe.multi()
for place, latitude, longitude in items:
self.set_location(place, latitude, longitude, pipe=pipe)
watches = []
if self._same_redis(other, RedisCollection):
use_redis = True
watches.append(other.key)
else:
use_redis = False
if isinstance(other, SortedSetBase):
func = update_sortedset_trans
elif hasattr(other, 'items'):
func = update_mapping_trans
elif hasattr(other, '__iter__'):
func = update_tuples_trans
self._transaction(func, *watches) |
Creates another collection with the same items and maxsize with
the given *key*.
def copy(self, key=None):
"""
Creates another collection with the same items and maxsize with
the given *key*.
"""
other = self.__class__(
maxsize=self.maxsize, redis=self.persistence.redis, key=key
)
other.update(self)
return other |
Create a new collection with keys from *seq* and values set to
*value*. The keyword arguments are passed to the persistent ``Dict``.
def fromkeys(cls, seq, value=None, **kwargs):
"""
Create a new collection with keys from *seq* and values set to
*value*. The keyword arguments are passed to the persistent ``Dict``.
"""
other = cls(**kwargs)
other.update(((key, value) for key in seq))
return other |
Copy items from the local cache to the persistent Dict.
If *clear_cache* is ``True``, clear out the local cache after
pushing its items to Redis.
def sync(self, clear_cache=False):
"""
Copy items from the local cache to the persistent Dict.
If *clear_cache* is ``True``, clear out the local cache after
pushing its items to Redis.
"""
self.persistence.update(self)
if clear_cache:
self.cache.clear() |
Return a :obj:`list` of all values from Redis
(without checking the local cache).
def _data(self, pipe=None):
"""
Return a :obj:`list` of all values from Redis
(without checking the local cache).
"""
pipe = self.redis if pipe is None else pipe
return [self._unpickle(v) for v in pipe.lrange(self.key, 0, -1)] |
Insert *value* at the end of this collection.
def append(self, value):
"""Insert *value* at the end of this collection."""
len_self = self.redis.rpush(self.key, self._pickle(value))
if self.writeback:
self.cache[len_self - 1] = value |
Return a new collection with the same items as this one.
If *key* is specified, create the new collection with the given
Redis key.
def copy(self, key=None):
"""
Return a new collection with the same items as this one.
If *key* is specified, create the new collection with the given
Redis key.
"""
other = self.__class__(
redis=self.redis, key=key, writeback=self.writeback
)
other.extend(self)
return other |
Adds the values from the iterable *other* to the end of this
collection.
def extend(self, other):
"""
Adds the values from the iterable *other* to the end of this
collection.
"""
def extend_trans(pipe):
values = list(other.__iter__(pipe)) if use_redis else other
len_self = pipe.rpush(self.key, *(self._pickle(v) for v in values))
if self.writeback:
for i, v in enumerate(values, len_self - len(values)):
self.cache[i] = v
if self._same_redis(other, RedisCollection):
use_redis = True
self._transaction(extend_trans, other.key)
else:
use_redis = False
self._transaction(extend_trans) |
Return the index of the first occurence of *value*.
If *start* or *stop* are provided, return the smallest
index such that ``s[index] == value`` and ``start <= index < stop``.
def index(self, value, start=None, stop=None):
"""
Return the index of the first occurence of *value*.
If *start* or *stop* are provided, return the smallest
index such that ``s[index] == value`` and ``start <= index < stop``.
"""
def index_trans(pipe):
len_self, normal_start = self._normalize_index(start or 0, pipe)
__, normal_stop = self._normalize_index(stop or len_self, pipe)
for i, v in enumerate(self.__iter__(pipe=pipe)):
if v == value:
if i < normal_start:
continue
if i >= normal_stop:
break
return i
raise ValueError
return self._transaction(index_trans) |
Insert *value* into the collection at *index*.
def insert(self, index, value):
"""
Insert *value* into the collection at *index*.
"""
if index == 0:
return self._insert_left(value)
def insert_middle_trans(pipe):
self._insert_middle(index, value, pipe=pipe)
return self._transaction(insert_middle_trans) |
Retrieve the value at *index*, remove it from the collection, and
return it.
def pop(self, index=-1):
"""
Retrieve the value at *index*, remove it from the collection, and
return it.
"""
if index == 0:
return self._pop_left()
elif index == -1:
return self._pop_right()
else:
return self._pop_middle(index) |
Remove the first occurence of *value*.
def remove(self, value):
"""Remove the first occurence of *value*."""
def remove_trans(pipe):
# If we're caching, we'll need to synchronize before removing.
if self.writeback:
self._sync_helper(pipe)
delete_count = pipe.lrem(self.key, 1, self._pickle(value))
if delete_count == 0:
raise ValueError
self._transaction(remove_trans) |
Reverses the items of this collection "in place" (only two values are
retrieved from Redis at a time).
def reverse(self):
"""
Reverses the items of this collection "in place" (only two values are
retrieved from Redis at a time).
"""
def reverse_trans(pipe):
if self.writeback:
self._sync_helper(pipe)
n = self.__len__(pipe)
for i in range(n // 2):
left = pipe.lindex(self.key, i)
right = pipe.lindex(self.key, n - i - 1)
pipe.lset(self.key, i, right)
pipe.lset(self.key, n - i - 1, left)
self._transaction(reverse_trans) |
Sort the items of this collection according to the optional callable
*key*. If *reverse* is set then the sort order is reversed.
.. note::
This sort requires all items to be retrieved from Redis and stored
in memory.
def sort(self, key=None, reverse=False):
"""
Sort the items of this collection according to the optional callable
*key*. If *reverse* is set then the sort order is reversed.
.. note::
This sort requires all items to be retrieved from Redis and stored
in memory.
"""
def sort_trans(pipe):
values = list(self.__iter__(pipe))
values.sort(key=key, reverse=reverse)
pipe.multi()
pipe.delete(self.key)
pipe.rpush(self.key, *(self._pickle(v) for v in values))
if self.writeback:
self.cache = {}
return self._transaction(sort_trans) |
Add *value* to the right side of the collection.
def append(self, value):
"""Add *value* to the right side of the collection."""
def append_trans(pipe):
self._append_helper(value, pipe)
self._transaction(append_trans) |
Add *value* to the left side of the collection.
def appendleft(self, value):
"""Add *value* to the left side of the collection."""
def appendleft_trans(pipe):
self._appendleft_helper(value, pipe)
self._transaction(appendleft_trans) |
Return a new collection with the same items as this one.
If *key* is specified, create the new collection with the given
Redis key.
def copy(self, key=None):
"""
Return a new collection with the same items as this one.
If *key* is specified, create the new collection with the given
Redis key.
"""
other = self.__class__(
self.__iter__(),
self.maxlen,
redis=self.redis,
key=key,
writeback=self.writeback,
)
return other |
Extend the right side of the the collection by appending values from
the iterable *other*.
def extend(self, other):
"""
Extend the right side of the the collection by appending values from
the iterable *other*.
"""
def extend_trans(pipe):
values = list(other.__iter__(pipe)) if use_redis else other
for v in values:
self._append_helper(v, pipe)
if self._same_redis(other, RedisCollection):
use_redis = True
self._transaction(extend_trans, other.key)
else:
use_redis = False
self._transaction(extend_trans) |
Extend the left side of the the collection by appending values from
the iterable *other*. Note that the appends will reverse the order
of the given values.
def extendleft(self, other):
"""
Extend the left side of the the collection by appending values from
the iterable *other*. Note that the appends will reverse the order
of the given values.
"""
def extendleft_trans(pipe):
values = list(other.__iter__(pipe)) if use_redis else other
for v in values:
self._appendleft_helper(v, pipe)
if self._same_redis(other, RedisCollection):
use_redis = True
self._transaction(extendleft_trans, other.key)
else:
use_redis = False
self._transaction(extendleft_trans) |
Insert *value* into the collection at *index*.
If the insertion would the collection to grow beyond ``maxlen``,
raise ``IndexError``.
def insert(self, index, value):
"""
Insert *value* into the collection at *index*.
If the insertion would the collection to grow beyond ``maxlen``,
raise ``IndexError``.
"""
def insert_trans(pipe):
len_self = self.__len__(pipe)
if (self.maxlen is not None) and (len_self >= self.maxlen):
raise IndexError
if index == 0:
self._insert_left(value, pipe)
else:
self._insert_middle(index, value, pipe=pipe)
self._transaction(insert_trans) |
Rotate the deque n steps to the right.
If n is negative, rotate to the left.
def rotate(self, n=1):
"""
Rotate the deque n steps to the right.
If n is negative, rotate to the left.
"""
# No work to do for a 0-step rotate
if n == 0:
return
def rotate_trans(pipe):
# Synchronize the cache before rotating
if self.writeback:
self._sync_helper(pipe)
# Rotating len(self) times has no effect.
len_self = self.__len__(pipe)
steps = abs_n % len_self
# When n is positive we can use the built-in Redis command
if forward:
pipe.multi()
for __ in range(steps):
pipe.rpoplpush(self.key, self.key)
# When n is negative we must use Python
else:
for __ in range(steps):
pickled_value = pipe.lpop(self.key)
pipe.rpush(self.key, pickled_value)
forward = n >= 0
abs_n = abs(n)
self._transaction(rotate_trans) |
Builds a dict with keys of entity kinds if and values are another dict. Each of these dicts are keyed
off of a super entity id and optional have an 'all' key for any group that has a null super entity.
Example structure:
{
entity_kind_id: {
entity1_id: [1, 2, 3],
entity2_id: [4, 5, 6],
'all': [1, 2, 3, 4, 5, 6]
}
}
:rtype: dict
def get_entities_by_kind(membership_cache=None, is_active=True):
"""
Builds a dict with keys of entity kinds if and values are another dict. Each of these dicts are keyed
off of a super entity id and optional have an 'all' key for any group that has a null super entity.
Example structure:
{
entity_kind_id: {
entity1_id: [1, 2, 3],
entity2_id: [4, 5, 6],
'all': [1, 2, 3, 4, 5, 6]
}
}
:rtype: dict
"""
# Accept an existing cache or build a new one
if membership_cache is None:
membership_cache = EntityGroup.objects.get_membership_cache(is_active=is_active)
entities_by_kind = {}
kinds_with_all = set()
kinds_with_supers = set()
super_ids = set()
# Loop over each group
for group_id, memberships in membership_cache.items():
# Look at each membership
for entity_id, entity_kind_id in memberships:
# Only care about memberships with entity kind
if entity_kind_id:
# Make sure a dict exists for this kind
entities_by_kind.setdefault(entity_kind_id, {})
# Check if this is all entities of a kind under a specific entity
if entity_id:
entities_by_kind[entity_kind_id][entity_id] = []
kinds_with_supers.add(entity_kind_id)
super_ids.add(entity_id)
else:
# This is all entities of this kind
entities_by_kind[entity_kind_id]['all'] = []
kinds_with_all.add(entity_kind_id)
# Get entities for 'all'
all_entities_for_types = Entity.objects.filter(
entity_kind_id__in=kinds_with_all
).values_list('id', 'entity_kind_id')
# Add entity ids to entity kind's all list
for id, entity_kind_id in all_entities_for_types:
entities_by_kind[entity_kind_id]['all'].append(id)
# Get relationships
relationships = EntityRelationship.objects.filter(
super_entity_id__in=super_ids,
sub_entity__entity_kind_id__in=kinds_with_supers
).values_list(
'super_entity_id', 'sub_entity_id', 'sub_entity__entity_kind_id'
)
# Add entity ids to each super entity's list
for super_entity_id, sub_entity_id, sub_entity__entity_kind_id in relationships:
entities_by_kind[sub_entity__entity_kind_id].setdefault(super_entity_id, [])
entities_by_kind[sub_entity__entity_kind_id][super_entity_id].append(sub_entity_id)
return entities_by_kind |
Given a list of super entities, return the entities that have those as a subset of their super entities.
def is_sub_to_all(self, *super_entities):
"""
Given a list of super entities, return the entities that have those as a subset of their super entities.
"""
if super_entities:
if len(super_entities) == 1:
# Optimize for the case of just one super entity since this is a much less intensive query
has_subset = EntityRelationship.objects.filter(
super_entity=super_entities[0]).values_list('sub_entity', flat=True)
else:
# Get a list of entities that have super entities with all types
has_subset = EntityRelationship.objects.filter(
super_entity__in=super_entities).values('sub_entity').annotate(Count('super_entity')).filter(
super_entity__count=len(set(super_entities))).values_list('sub_entity', flat=True)
return self.filter(id__in=has_subset)
else:
return self |
Given a list of super entities, return the entities that have super entities that interset with those provided.
def is_sub_to_any(self, *super_entities):
"""
Given a list of super entities, return the entities that have super entities that interset with those provided.
"""
if super_entities:
return self.filter(id__in=EntityRelationship.objects.filter(
super_entity__in=super_entities).values_list('sub_entity', flat=True))
else:
return self |
Each returned entity will have superentites whos combined entity_kinds included *super_entity_kinds
def is_sub_to_all_kinds(self, *super_entity_kinds):
"""
Each returned entity will have superentites whos combined entity_kinds included *super_entity_kinds
"""
if super_entity_kinds:
if len(super_entity_kinds) == 1:
# Optimize for the case of just one
has_subset = EntityRelationship.objects.filter(
super_entity__entity_kind=super_entity_kinds[0]).values_list('sub_entity', flat=True)
else:
# Get a list of entities that have super entities with all types
has_subset = EntityRelationship.objects.filter(
super_entity__entity_kind__in=super_entity_kinds).values('sub_entity').annotate(
Count('super_entity')).filter(super_entity__count=len(set(super_entity_kinds))).values_list(
'sub_entity', flat=True)
return self.filter(pk__in=has_subset)
else:
return self |
Find all entities that have super_entities of any of the specified kinds
def is_sub_to_any_kind(self, *super_entity_kinds):
"""
Find all entities that have super_entities of any of the specified kinds
"""
if super_entity_kinds:
# get the pks of the desired subs from the relationships table
if len(super_entity_kinds) == 1:
entity_pks = EntityRelationship.objects.filter(
super_entity__entity_kind=super_entity_kinds[0]
).select_related('entity_kind', 'sub_entity').values_list('sub_entity', flat=True)
else:
entity_pks = EntityRelationship.objects.filter(
super_entity__entity_kind__in=super_entity_kinds
).select_related('entity_kind', 'sub_entity').values_list('sub_entity', flat=True)
# return a queryset limited to only those pks
return self.filter(pk__in=entity_pks)
else:
return self |
Caches the super and sub relationships by doing a prefetch_related.
def cache_relationships(self, cache_super=True, cache_sub=True):
"""
Caches the super and sub relationships by doing a prefetch_related.
"""
relationships_to_cache = compress(
['super_relationships__super_entity', 'sub_relationships__sub_entity'], [cache_super, cache_sub])
return self.prefetch_related(*relationships_to_cache) |
Given a saved entity model object, return the associated entity.
def get_for_obj(self, entity_model_obj):
"""
Given a saved entity model object, return the associated entity.
"""
return self.get(entity_type=ContentType.objects.get_for_model(
entity_model_obj, for_concrete_model=False), entity_id=entity_model_obj.id) |
Delete the entities associated with a model object.
def delete_for_obj(self, entity_model_obj):
"""
Delete the entities associated with a model object.
"""
return self.filter(
entity_type=ContentType.objects.get_for_model(
entity_model_obj, for_concrete_model=False), entity_id=entity_model_obj.id).delete(
force=True) |
Caches the super and sub relationships by doing a prefetch_related.
def cache_relationships(self, cache_super=True, cache_sub=True):
"""
Caches the super and sub relationships by doing a prefetch_related.
"""
return self.get_queryset().cache_relationships(cache_super=cache_super, cache_sub=cache_sub) |
Build a dict cache with the group membership info. Keyed off the group id and the values are
a 2 element list of entity id and entity kind id (same values as the membership model). If no group ids
are passed, then all groups will be fetched
:param is_active: Flag indicating whether to filter on entity active status. None will not filter.
:rtype: dict
def get_membership_cache(self, group_ids=None, is_active=True):
"""
Build a dict cache with the group membership info. Keyed off the group id and the values are
a 2 element list of entity id and entity kind id (same values as the membership model). If no group ids
are passed, then all groups will be fetched
:param is_active: Flag indicating whether to filter on entity active status. None will not filter.
:rtype: dict
"""
membership_queryset = EntityGroupMembership.objects.filter(
Q(entity__isnull=True) | (Q(entity__isnull=False) & Q(entity__is_active=is_active))
)
if is_active is None:
membership_queryset = EntityGroupMembership.objects.all()
if group_ids:
membership_queryset = membership_queryset.filter(entity_group_id__in=group_ids)
membership_queryset = membership_queryset.values_list('entity_group_id', 'entity_id', 'sub_entity_kind_id')
# Iterate over the query results and build the cache dict
membership_cache = {}
for entity_group_id, entity_id, sub_entity_kind_id in membership_queryset:
membership_cache.setdefault(entity_group_id, [])
membership_cache[entity_group_id].append([entity_id, sub_entity_kind_id])
return membership_cache |
Return all the entities in the group.
Because groups can contain both individual entities, as well
as whole groups of entities, this method acts as a convenient
way to get a queryset of all the entities in the group.
def all_entities(self, is_active=True):
"""
Return all the entities in the group.
Because groups can contain both individual entities, as well
as whole groups of entities, this method acts as a convenient
way to get a queryset of all the entities in the group.
"""
return self.get_all_entities(return_models=True, is_active=is_active) |
Returns a list of all entity ids in this group or optionally returns a queryset for all entity models.
In order to reduce queries for multiple group lookups, it is expected that the membership_cache and
entities_by_kind are built outside of this method and passed in as arguments.
:param membership_cache: A group cache dict generated from `EntityGroup.objects.get_membership_cache()`
:type membership_cache: dict
:param entities_by_kind: An entities by kind dict generated from the `get_entities_by_kind` function
:type entities_by_kind: dict
:param return_models: If True, returns an Entity queryset, if False, returns a set of entity ids
:type return_models: bool
:param is_active: Flag to control entities being returned. Defaults to True for active entities only
:type is_active: bool
def get_all_entities(self, membership_cache=None, entities_by_kind=None, return_models=False, is_active=True):
"""
Returns a list of all entity ids in this group or optionally returns a queryset for all entity models.
In order to reduce queries for multiple group lookups, it is expected that the membership_cache and
entities_by_kind are built outside of this method and passed in as arguments.
:param membership_cache: A group cache dict generated from `EntityGroup.objects.get_membership_cache()`
:type membership_cache: dict
:param entities_by_kind: An entities by kind dict generated from the `get_entities_by_kind` function
:type entities_by_kind: dict
:param return_models: If True, returns an Entity queryset, if False, returns a set of entity ids
:type return_models: bool
:param is_active: Flag to control entities being returned. Defaults to True for active entities only
:type is_active: bool
"""
# If cache args were not passed, generate the cache
if membership_cache is None:
membership_cache = EntityGroup.objects.get_membership_cache([self.id], is_active=is_active)
if entities_by_kind is None:
entities_by_kind = entities_by_kind or get_entities_by_kind(membership_cache=membership_cache)
# Build set of all entity ids for this group
entity_ids = set()
# This group does have entities
if membership_cache.get(self.id):
# Loop over each membership in this group
for entity_id, entity_kind_id in membership_cache[self.id]:
if entity_id:
if entity_kind_id:
# All sub entities of this kind under this entity
entity_ids.update(entities_by_kind[entity_kind_id][entity_id])
else:
# Individual entity
entity_ids.add(entity_id)
else:
# All entities of this kind
entity_ids.update(entities_by_kind[entity_kind_id]['all'])
# Check if a queryset needs to be returned
if return_models:
return Entity.objects.filter(id__in=entity_ids)
return entity_ids |
Add an entity, or sub-entity group to this EntityGroup.
:type entity: Entity
:param entity: The entity to add.
:type sub_entity_kind: Optional EntityKind
:param sub_entity_kind: If a sub_entity_kind is given, all
sub_entities of the entity will be added to this
EntityGroup.
def add_entity(self, entity, sub_entity_kind=None):
"""
Add an entity, or sub-entity group to this EntityGroup.
:type entity: Entity
:param entity: The entity to add.
:type sub_entity_kind: Optional EntityKind
:param sub_entity_kind: If a sub_entity_kind is given, all
sub_entities of the entity will be added to this
EntityGroup.
"""
membership = EntityGroupMembership.objects.create(
entity_group=self,
entity=entity,
sub_entity_kind=sub_entity_kind,
)
return membership |
Add many entities and sub-entity groups to this EntityGroup.
:type entities_and_kinds: List of (Entity, EntityKind) pairs.
:param entities_and_kinds: A list of entity, entity-kind pairs
to add to the group. In the pairs the entity-kind can be
``None``, to add a single entity, or some entity kind to
add all sub-entities of that kind.
def bulk_add_entities(self, entities_and_kinds):
"""
Add many entities and sub-entity groups to this EntityGroup.
:type entities_and_kinds: List of (Entity, EntityKind) pairs.
:param entities_and_kinds: A list of entity, entity-kind pairs
to add to the group. In the pairs the entity-kind can be
``None``, to add a single entity, or some entity kind to
add all sub-entities of that kind.
"""
memberships = [EntityGroupMembership(
entity_group=self,
entity=entity,
sub_entity_kind=sub_entity_kind,
) for entity, sub_entity_kind in entities_and_kinds]
created = EntityGroupMembership.objects.bulk_create(memberships)
return created |
Remove an entity, or sub-entity group to this EntityGroup.
:type entity: Entity
:param entity: The entity to remove.
:type sub_entity_kind: Optional EntityKind
:param sub_entity_kind: If a sub_entity_kind is given, all
sub_entities of the entity will be removed from this
EntityGroup.
def remove_entity(self, entity, sub_entity_kind=None):
"""
Remove an entity, or sub-entity group to this EntityGroup.
:type entity: Entity
:param entity: The entity to remove.
:type sub_entity_kind: Optional EntityKind
:param sub_entity_kind: If a sub_entity_kind is given, all
sub_entities of the entity will be removed from this
EntityGroup.
"""
EntityGroupMembership.objects.get(
entity_group=self,
entity=entity,
sub_entity_kind=sub_entity_kind,
).delete() |
Remove many entities and sub-entity groups to this EntityGroup.
:type entities_and_kinds: List of (Entity, EntityKind) pairs.
:param entities_and_kinds: A list of entity, entity-kind pairs
to remove from the group. In the pairs, the entity-kind
can be ``None``, to add a single entity, or some entity
kind to add all sub-entities of that kind.
def bulk_remove_entities(self, entities_and_kinds):
"""
Remove many entities and sub-entity groups to this EntityGroup.
:type entities_and_kinds: List of (Entity, EntityKind) pairs.
:param entities_and_kinds: A list of entity, entity-kind pairs
to remove from the group. In the pairs, the entity-kind
can be ``None``, to add a single entity, or some entity
kind to add all sub-entities of that kind.
"""
criteria = [
Q(entity=entity, sub_entity_kind=entity_kind)
for entity, entity_kind in entities_and_kinds
]
criteria = reduce(lambda q1, q2: q1 | q2, criteria, Q())
EntityGroupMembership.objects.filter(
criteria, entity_group=self).delete() |
Update the group to the given entities and sub-entity groups.
After this operation, the only members of this EntityGroup
will be the given entities, and sub-entity groups.
:type entities_and_kinds: List of (Entity, EntityKind) pairs.
:param entities_and_kinds: A list of entity, entity-kind pairs
to set to the EntityGroup. In the pairs the entity-kind
can be ``None``, to add a single entity, or some entity
kind to add all sub-entities of that kind.
def bulk_overwrite(self, entities_and_kinds):
"""
Update the group to the given entities and sub-entity groups.
After this operation, the only members of this EntityGroup
will be the given entities, and sub-entity groups.
:type entities_and_kinds: List of (Entity, EntityKind) pairs.
:param entities_and_kinds: A list of entity, entity-kind pairs
to set to the EntityGroup. In the pairs the entity-kind
can be ``None``, to add a single entity, or some entity
kind to add all sub-entities of that kind.
"""
EntityGroupMembership.objects.filter(entity_group=self).delete()
return self.bulk_add_entities(entities_and_kinds) |
A copy of spectator.core.models.SluggedModelMixin._generate_slug()
def generate_slug(value):
"A copy of spectator.core.models.SluggedModelMixin._generate_slug()"
alphabet = 'abcdefghijkmnopqrstuvwxyz23456789'
salt = 'Django Spectator'
if hasattr(settings, 'SPECTATOR_SLUG_ALPHABET'):
alphabet = settings.SPECTATOR_SLUG_ALPHABET
if hasattr(settings, 'SPECTATOR_SLUG_SALT'):
salt = settings.SPECTATOR_SLUG_SALT
hashids = Hashids(alphabet=alphabet, salt=salt, min_length=5)
return hashids.encode(value) |
Create a slug for each Work already in the DB.
def set_slug(apps, schema_editor, class_name):
"""
Create a slug for each Work already in the DB.
"""
Cls = apps.get_model('spectator_events', class_name)
for obj in Cls.objects.all():
obj.slug = generate_slug(obj.pk)
obj.save(update_fields=['slug']) |
e.g. 'Gig' or 'Movie'.
def kind_name(self):
"e.g. 'Gig' or 'Movie'."
return {k:v for (k,v) in self.KIND_CHOICES}[self.kind] |
e.g. 'Gigs' or 'Movies'.
def get_kind_name_plural(kind):
"e.g. 'Gigs' or 'Movies'."
if kind in ['comedy', 'cinema', 'dance', 'theatre']:
return kind.title()
elif kind == 'museum':
return 'Galleries/Museums'
else:
return '{}s'.format(Event.get_kind_name(kind)) |
Returns a dict of all the data about the kinds, keyed to the kind
value. e.g:
{
'gig': {
'name': 'Gig',
'slug': 'gigs',
'name_plural': 'Gigs',
},
# etc
}
def get_kinds_data():
"""
Returns a dict of all the data about the kinds, keyed to the kind
value. e.g:
{
'gig': {
'name': 'Gig',
'slug': 'gigs',
'name_plural': 'Gigs',
},
# etc
}
"""
kinds = {k:{'name':v} for k,v in Event.KIND_CHOICES}
for k,data in kinds.items():
kinds[k]['slug'] = Event.KIND_SLUGS[k]
kinds[k]['name_plural'] = Event.get_kind_name_plural(k)
return kinds |
Get the list URL for this Work.
You can also pass a kind_slug in (e.g. 'movies') and it will use that
instead of the Work's kind_slug. (Why? Useful in views. Or tests of
views, at least.)
def get_list_url(self, kind_slug=None):
"""
Get the list URL for this Work.
You can also pass a kind_slug in (e.g. 'movies') and it will use that
instead of the Work's kind_slug. (Why? Useful in views. Or tests of
views, at least.)
"""
if kind_slug is None:
kind_slug = self.KIND_SLUGS[self.kind]
return reverse('spectator:events:work_list',
kwargs={'kind_slug': kind_slug}) |
Convert descriptor and rows to Pandas
def convert_descriptor_and_rows(self, descriptor, rows):
"""Convert descriptor and rows to Pandas
"""
# Prepare
primary_key = None
schema = tableschema.Schema(descriptor)
if len(schema.primary_key) == 1:
primary_key = schema.primary_key[0]
elif len(schema.primary_key) > 1:
message = 'Multi-column primary keys are not supported'
raise tableschema.exceptions.StorageError(message)
# Get data/index
data_rows = []
index_rows = []
jtstypes_map = {}
for row in rows:
values = []
index = None
for field, value in zip(schema.fields, row):
try:
if isinstance(value, float) and np.isnan(value):
value = None
if value and field.type == 'integer':
value = int(value)
value = field.cast_value(value)
except tableschema.exceptions.CastError:
value = json.loads(value)
# http://pandas.pydata.org/pandas-docs/stable/gotchas.html#support-for-integer-na
if value is None and field.type in ('number', 'integer'):
jtstypes_map[field.name] = 'number'
value = np.NaN
if field.name == primary_key:
index = value
else:
values.append(value)
data_rows.append(tuple(values))
index_rows.append(index)
# Get dtypes
dtypes = []
for field in schema.fields:
if field.name != primary_key:
field_name = field.name
if six.PY2:
field_name = field.name.encode('utf-8')
dtype = self.convert_type(jtstypes_map.get(field.name, field.type))
dtypes.append((field_name, dtype))
# Create dataframe
index = None
columns = schema.headers
array = np.array(data_rows, dtype=dtypes)
if primary_key:
index_field = schema.get_field(primary_key)
index_dtype = self.convert_type(index_field.type)
index_class = pd.Index
if index_field.type in ['datetime', 'date']:
index_class = pd.DatetimeIndex
index = index_class(index_rows, name=primary_key, dtype=index_dtype)
columns = filter(lambda column: column != primary_key, schema.headers)
dataframe = pd.DataFrame(array, index=index, columns=columns)
return dataframe |
Convert type to Pandas
def convert_type(self, type):
"""Convert type to Pandas
"""
# Mapping
mapping = {
'any': np.dtype('O'),
'array': np.dtype(list),
'boolean': np.dtype(bool),
'date': np.dtype('O'),
'datetime': np.dtype('datetime64[ns]'),
'duration': np.dtype('O'),
'geojson': np.dtype('O'),
'geopoint': np.dtype('O'),
'integer': np.dtype(int),
'number': np.dtype(float),
'object': np.dtype(dict),
'string': np.dtype('O'),
'time': np.dtype('O'),
'year': np.dtype(int),
'yearmonth': np.dtype('O'),
}
# Get type
if type not in mapping:
message = 'Type "%s" is not supported' % type
raise tableschema.exceptions.StorageError(message)
return mapping[type] |
Restore descriptor from Pandas
def restore_descriptor(self, dataframe):
"""Restore descriptor from Pandas
"""
# Prepare
fields = []
primary_key = None
# Primary key
if dataframe.index.name:
field_type = self.restore_type(dataframe.index.dtype)
field = {
'name': dataframe.index.name,
'type': field_type,
'constraints': {'required': True},
}
fields.append(field)
primary_key = dataframe.index.name
# Fields
for column, dtype in dataframe.dtypes.iteritems():
sample = dataframe[column].iloc[0] if len(dataframe) else None
field_type = self.restore_type(dtype, sample=sample)
field = {'name': column, 'type': field_type}
# TODO: provide better required indication
# if dataframe[column].isnull().sum() == 0:
# field['constraints'] = {'required': True}
fields.append(field)
# Descriptor
descriptor = {}
descriptor['fields'] = fields
if primary_key:
descriptor['primaryKey'] = primary_key
return descriptor |
Restore row from Pandas
def restore_row(self, row, schema, pk):
"""Restore row from Pandas
"""
result = []
for field in schema.fields:
if schema.primary_key and schema.primary_key[0] == field.name:
if field.type == 'number' and np.isnan(pk):
pk = None
if pk and field.type == 'integer':
pk = int(pk)
result.append(field.cast_value(pk))
else:
value = row[field.name]
if field.type == 'number' and np.isnan(value):
value = None
if value and field.type == 'integer':
value = int(value)
elif field.type == 'datetime':
value = value.to_pydatetime()
result.append(field.cast_value(value))
return result |
Restore type from Pandas
def restore_type(self, dtype, sample=None):
"""Restore type from Pandas
"""
# Pandas types
if pdc.is_bool_dtype(dtype):
return 'boolean'
elif pdc.is_datetime64_any_dtype(dtype):
return 'datetime'
elif pdc.is_integer_dtype(dtype):
return 'integer'
elif pdc.is_numeric_dtype(dtype):
return 'number'
# Python types
if sample is not None:
if isinstance(sample, (list, tuple)):
return 'array'
elif isinstance(sample, datetime.date):
return 'date'
elif isinstance(sample, isodate.Duration):
return 'duration'
elif isinstance(sample, dict):
return 'object'
elif isinstance(sample, six.string_types):
return 'string'
elif isinstance(sample, datetime.time):
return 'time'
return 'string' |
If the user has permission to change `obj`, show a link to its Admin page.
obj -- An object like Movie, Play, ClassicalWork, Publication, etc.
perms -- The `perms` object that it's the template.
def change_object_link_card(obj, perms):
"""
If the user has permission to change `obj`, show a link to its Admin page.
obj -- An object like Movie, Play, ClassicalWork, Publication, etc.
perms -- The `perms` object that it's the template.
"""
# eg: 'movie' or 'classicalwork':
name = obj.__class__.__name__.lower()
permission = 'spectator.can_edit_{}'.format(name)
# eg: 'admin:events_classicalwork_change':
change_url_name = 'admin:{}_{}_change'.format(obj._meta.app_label, name)
return {
'display_link': (permission in perms),
'change_url': reverse(change_url_name, args=[obj.id])
} |
Returns an HTML link to the supplied URL, but only using the domain as the
text. Strips 'www.' from the start of the domain, if present.
e.g. if `my_url` is 'http://www.example.org/foo/' then:
{{ my_url|domain_urlize }}
returns:
<a href="http://www.example.org/foo/" rel="nofollow">example.org</a>
def domain_urlize(value):
"""
Returns an HTML link to the supplied URL, but only using the domain as the
text. Strips 'www.' from the start of the domain, if present.
e.g. if `my_url` is 'http://www.example.org/foo/' then:
{{ my_url|domain_urlize }}
returns:
<a href="http://www.example.org/foo/" rel="nofollow">example.org</a>
"""
parsed_uri = urlparse(value)
domain = '{uri.netloc}'.format(uri=parsed_uri)
if domain.startswith('www.'):
domain = domain[4:]
return format_html('<a href="{}" rel="nofollow">{}</a>',
value,
domain
) |
Returns the name of the current URL, namespaced, or False.
Example usage:
{% current_url_name as url_name %}
<a href="#"{% if url_name == 'myapp:home' %} class="active"{% endif %}">Home</a>
def current_url_name(context):
"""
Returns the name of the current URL, namespaced, or False.
Example usage:
{% current_url_name as url_name %}
<a href="#"{% if url_name == 'myapp:home' %} class="active"{% endif %}">Home</a>
"""
url_name = False
if context.request.resolver_match:
url_name = "{}:{}".format(
context.request.resolver_match.namespace,
context.request.resolver_match.url_name
)
return url_name |
For adding/replacing a key=value pair to the GET string for a URL.
eg, if we're viewing ?p=3 and we do {% query_string order 'taken' %}
then this returns "p=3&order=taken"
And, if we're viewing ?p=3&order=uploaded and we do the same thing, we get
the same result (ie, the existing "order=uploaded" is replaced).
Expects the request object in context to do the above; otherwise it will
just return a query string with the supplied key=value pair.
def query_string(context, key, value):
"""
For adding/replacing a key=value pair to the GET string for a URL.
eg, if we're viewing ?p=3 and we do {% query_string order 'taken' %}
then this returns "p=3&order=taken"
And, if we're viewing ?p=3&order=uploaded and we do the same thing, we get
the same result (ie, the existing "order=uploaded" is replaced).
Expects the request object in context to do the above; otherwise it will
just return a query string with the supplied key=value pair.
"""
try:
request = context['request']
args = request.GET.copy()
except KeyError:
args = QueryDict('').copy()
args[key] = value
return args.urlencode() |
Displays a card showing the Creators who have the most Readings
associated with their Publications.
In spectator_core tags, rather than spectator_reading so it can still be
used on core pages, even if spectator_reading isn't installed.
def most_read_creators_card(num=10):
"""
Displays a card showing the Creators who have the most Readings
associated with their Publications.
In spectator_core tags, rather than spectator_reading so it can still be
used on core pages, even if spectator_reading isn't installed.
"""
if spectator_apps.is_enabled('reading'):
object_list = most_read_creators(num=num)
object_list = chartify(object_list, 'num_readings', cutoff=1)
return {
'card_title': 'Most read authors',
'score_attr': 'num_readings',
'object_list': object_list,
} |
Displays a card showing the Venues that have the most Events.
In spectator_core tags, rather than spectator_events so it can still be
used on core pages, even if spectator_events isn't installed.
def most_visited_venues_card(num=10):
"""
Displays a card showing the Venues that have the most Events.
In spectator_core tags, rather than spectator_events so it can still be
used on core pages, even if spectator_events isn't installed.
"""
if spectator_apps.is_enabled('events'):
object_list = most_visited_venues(num=num)
object_list = chartify(object_list, 'num_visits', cutoff=1)
return {
'card_title': 'Most visited venues',
'score_attr': 'num_visits',
'object_list': object_list,
} |
Handy for templates.
def has_urls(self):
"Handy for templates."
if self.isbn_uk or self.isbn_us or self.official_url or self.notes_url:
return True
else:
return False |
eg, get_entity('spectator', 'version') returns `__version__` value in
`__init__.py`.
def get_entity(package, entity):
"""
eg, get_entity('spectator', 'version') returns `__version__` value in
`__init__.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
find = "__%s__ = ['\"]([^'\"]+)['\"]" % entity
return re.search(find, init_py).group(1) |
Reduce the number of queries and speed things up.
def get_queryset(self):
"Reduce the number of queries and speed things up."
qs = super().get_queryset()
qs = qs.select_related('publication__series') \
.prefetch_related('publication__roles__creator')
return qs |
Create a slug for each Creator already in the DB.
def set_slug(apps, schema_editor):
"""
Create a slug for each Creator already in the DB.
"""
Creator = apps.get_model('spectator_core', 'Creator')
for c in Creator.objects.all():
c.slug = generate_slug(c.pk)
c.save(update_fields=['slug']) |
Copy the ClassicalWork and DancePiece data to use the new through models.
def forwards(apps, schema_editor):
"""
Copy the ClassicalWork and DancePiece data to use the new through models.
"""
Event = apps.get_model('spectator_events', 'Event')
ClassicalWorkSelection = apps.get_model(
'spectator_events', 'ClassicalWorkSelection')
DancePieceSelection = apps.get_model(
'spectator_events', 'DancePieceSelection')
for event in Event.objects.all():
for work in event.classicalworks.all():
selection = ClassicalWorkSelection(
classical_work=work,
event=event)
selection.save()
for piece in event.dancepieces.all():
selection = DancePieceSelection(
dance_piece=piece,
event=event)
selection.save() |
Set the venue_name field of all Events that have a Venue.
def forwards(apps, schema_editor):
"""
Set the venue_name field of all Events that have a Venue.
"""
Event = apps.get_model('spectator_events', 'Event')
for event in Event.objects.all():
if event.venue is not None:
event.venue_name = event.venue.name
event.save() |
Migrate all 'exhibition' Events to the new 'museum' Event kind.
def forwards(apps, schema_editor):
"""
Migrate all 'exhibition' Events to the new 'museum' Event kind.
"""
Event = apps.get_model('spectator_events', 'Event')
for ev in Event.objects.filter(kind='exhibition'):
ev.kind = 'museum'
ev.save() |
Truncate a string to a certain length, removing line breaks and mutliple
spaces, optionally removing HTML, and appending a 'truncate' string.
Keyword arguments:
strip_html -- boolean.
chars -- Number of characters to return.
at_word_boundary -- Only truncate at a word boundary, which will probably
result in a string shorter than chars.
truncate -- String to add to the end.
def truncate_string(text, strip_html=True, chars=255, truncate='…', at_word_boundary=False):
"""Truncate a string to a certain length, removing line breaks and mutliple
spaces, optionally removing HTML, and appending a 'truncate' string.
Keyword arguments:
strip_html -- boolean.
chars -- Number of characters to return.
at_word_boundary -- Only truncate at a word boundary, which will probably
result in a string shorter than chars.
truncate -- String to add to the end.
"""
if strip_html:
text = strip_tags(text)
text = text.replace('\n', ' ').replace('\r', '')
text = ' '.join(text.split())
if at_word_boundary:
if len(text) > chars:
text = text[:chars].rsplit(' ', 1)[0] + truncate
else:
text = Truncator(text).chars(chars, html=False, truncate=truncate)
return text |
Given a QuerySet it will go through and add a `chart_position` property to
each object returning a list of the objects.
If adjacent objects have the same 'score' (based on `score_field`) then
they will have the same `chart_position`. This can then be used in
templates for the `value` of <li> elements in an <ol>.
By default any objects with a score of 0 or less will be removed.
By default, if all the items in the chart have the same position, no items
will be returned (it's not much of a chart).
Keyword arguments:
qs -- The QuerySet
score_field -- The name of the numeric field that each object in the
QuerySet has, that will be used to compare their positions.
cutoff -- Any objects with a score of this value or below will be removed
from the list. Set to None to disable this.
ensure_chartiness -- If True, then if all items in the list have the same
score, an empty list will be returned.
def chartify(qs, score_field, cutoff=0, ensure_chartiness=True):
"""
Given a QuerySet it will go through and add a `chart_position` property to
each object returning a list of the objects.
If adjacent objects have the same 'score' (based on `score_field`) then
they will have the same `chart_position`. This can then be used in
templates for the `value` of <li> elements in an <ol>.
By default any objects with a score of 0 or less will be removed.
By default, if all the items in the chart have the same position, no items
will be returned (it's not much of a chart).
Keyword arguments:
qs -- The QuerySet
score_field -- The name of the numeric field that each object in the
QuerySet has, that will be used to compare their positions.
cutoff -- Any objects with a score of this value or below will be removed
from the list. Set to None to disable this.
ensure_chartiness -- If True, then if all items in the list have the same
score, an empty list will be returned.
"""
chart = []
position = 0
prev_obj = None
for counter, obj in enumerate(qs):
score = getattr(obj, score_field)
if score != getattr(prev_obj, score_field, None):
position = counter + 1
if cutoff is None or score > cutoff:
obj.chart_position = position
chart.append(obj)
prev_obj = obj
if ensure_chartiness and len(chart) > 0:
if getattr(chart[0], score_field) == getattr(chart[-1], score_field):
chart = []
return chart |
Gets Venues in order of how many Events have been held there.
Adds a `num_visits` field to each one.
event_kind filters by kind of Event, e.g. 'theatre', 'cinema', etc.
def by_visits(self, event_kind=None):
"""
Gets Venues in order of how many Events have been held there.
Adds a `num_visits` field to each one.
event_kind filters by kind of Event, e.g. 'theatre', 'cinema', etc.
"""
qs = self.get_queryset()
if event_kind is not None:
qs = qs.filter(event__kind=event_kind)
qs = qs.annotate(num_visits=Count('event')) \
.order_by('-num_visits', 'name_sort')
return qs |
Gets Works in order of how many times they've been attached to
Events.
kind is the kind of Work, e.g. 'play', 'movie', etc.
def by_views(self, kind=None):
"""
Gets Works in order of how many times they've been attached to
Events.
kind is the kind of Work, e.g. 'play', 'movie', etc.
"""
qs = self.get_queryset()
if kind is not None:
qs = qs.filter(kind=kind)
qs = qs.annotate(num_views=Count('event')) \
.order_by('-num_views', 'title_sort')
return qs |
Make a naturalized version of a general string, not a person's name.
e.g., title of a book, a band's name, etc.
string -- a lowercase string.
def naturalize_thing(self, string):
"""
Make a naturalized version of a general string, not a person's name.
e.g., title of a book, a band's name, etc.
string -- a lowercase string.
"""
# Things we want to move to the back of the string:
articles = [
'a', 'an', 'the',
'un', 'une', 'le', 'la', 'les', "l'", "l’",
'ein', 'eine', 'der', 'die', 'das',
'una', 'el', 'los', 'las',
]
sort_string = string
parts = string.split(' ')
if len(parts) > 1 and parts[0] in articles:
if parts[0] != parts[1]:
# Don't do this if the name is 'The The' or 'La La Land'.
# Makes 'long blondes, the':
sort_string = '{}, {}'.format(' '.join(parts[1:]), parts[0])
sort_string = self._naturalize_numbers(sort_string)
return sort_string |
Attempt to make a version of the string that has the surname, if any,
at the start.
'John, Brown' to 'Brown, John'
'Sir John Brown Jr' to 'Brown, Sir John Jr'
'Prince' to 'Prince'
string -- The string to change.
def naturalize_person(self, string):
"""
Attempt to make a version of the string that has the surname, if any,
at the start.
'John, Brown' to 'Brown, John'
'Sir John Brown Jr' to 'Brown, Sir John Jr'
'Prince' to 'Prince'
string -- The string to change.
"""
suffixes = [
'Jr', 'Jr.', 'Sr', 'Sr.',
'I', 'II', 'III', 'IV', 'V',
]
# Add lowercase versions:
suffixes = suffixes + [s.lower() for s in suffixes]
# If a name has a capitalised particle in we use that to sort.
# So 'Le Carre, John' but 'Carre, John le'.
particles = [
'Le', 'La',
'Von', 'Van',
'Du', 'De',
]
surname = '' # Smith
names = '' # Fred James
suffix = '' # Jr
sort_string = string
parts = string.split(' ')
if parts[-1] in suffixes:
# Remove suffixes entirely, as we'll add them back on the end.
suffix = parts[-1]
parts = parts[0:-1] # Remove suffix from parts
sort_string = ' '.join(parts)
if len(parts) > 1:
if parts[-2] in particles:
# From ['Alan', 'Barry', 'Le', 'Carré']
# to ['Alan', 'Barry', 'Le Carré']:
parts = parts[0:-2] + [ ' '.join(parts[-2:]) ]
# From 'David Foster Wallace' to 'Wallace, David Foster':
sort_string = '{}, {}'.format(parts[-1], ' '.join(parts[:-1]))
if suffix:
# Add it back on.
sort_string = '{} {}'.format(sort_string, suffix)
# In case this name has any numbers in it.
sort_string = self._naturalize_numbers(sort_string)
return sort_string |
Makes any integers into very zero-padded numbers.
e.g. '1' becomes '00000001'.
def _naturalize_numbers(self, string):
"""
Makes any integers into very zero-padded numbers.
e.g. '1' becomes '00000001'.
"""
def naturalize_int_match(match):
return '%08d' % (int(match.group(0)),)
string = re.sub(r'\d+', naturalize_int_match, string)
return string |
Returns a list of dicts, one per year of reading. In year order.
Each dict is like this (if kind is 'all'):
{'year': datetime.date(2003, 1, 1),
'book': 12, # only included if kind is 'all' or 'book'
'periodical': 18, # only included if kind is 'all' or 'periodical'
'total': 30, # only included if kind is 'all'
}
We use the end_date of a Reading to count when that thing was read.
kind is one of 'book', 'periodical' or 'all', for both.
def annual_reading_counts(kind='all'):
"""
Returns a list of dicts, one per year of reading. In year order.
Each dict is like this (if kind is 'all'):
{'year': datetime.date(2003, 1, 1),
'book': 12, # only included if kind is 'all' or 'book'
'periodical': 18, # only included if kind is 'all' or 'periodical'
'total': 30, # only included if kind is 'all'
}
We use the end_date of a Reading to count when that thing was read.
kind is one of 'book', 'periodical' or 'all', for both.
"""
if kind == 'all':
kinds = ['book', 'periodical']
else:
kinds = [kind]
# This will have keys of years (strings) and dicts of data:
# {
# '2003': {'books': 12, 'periodicals': 18},
# }
counts = OrderedDict()
for k in kinds:
qs = Reading.objects.exclude(end_date__isnull=True) \
.filter(publication__kind=k) \
.annotate(year=TruncYear('end_date')) \
.values('year') \
.annotate(count=Count('id')) \
.order_by('year')
for year_data in qs:
year_str = year_data['year'].strftime('%Y')
if not year_str in counts:
counts[year_str] = {
'year': year_data['year'],
}
counts[year_str][k] = year_data['count']
# Now translate counts into our final list, with totals, and 0s for kinds
# when they have no Readings for that year.
counts_list = []
for year_str, data in counts.items():
year_data = {
'year': data['year'],
}
if kind == 'all':
year_data['total'] = 0
for k in kinds:
if k in data:
year_data[k] = data[k]
if kind == 'all':
year_data['total'] += data[k]
else:
year_data[k] = 0
counts_list.append(year_data)
return counts_list |
Returns a list of tuples like:
[
('AU', 'Australia'),
('GB', 'UK'),
('US', 'USA'),
]
One for each country that has at least one Venue.
Sorted by the label names.
def lookups(self, request, model_admin):
"""
Returns a list of tuples like:
[
('AU', 'Australia'),
('GB', 'UK'),
('US', 'USA'),
]
One for each country that has at least one Venue.
Sorted by the label names.
"""
list_of_countries = []
# We don't need the country_count but we need to annotate them in order
# to group the results.
qs = Venue.objects.exclude(country='') \
.values('country') \
.annotate(country_count=Count('country')) \
.order_by('country')
for obj in qs:
country = obj['country']
list_of_countries.append(
(country, Venue.COUNTRIES[country])
)
return sorted(list_of_countries, key=lambda c: c[1]) |
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
"""
if self.value():
return queryset.filter(country=self.value())
else:
return queryset |
Copying data from the old `Event.movie` and `Event.play` ForeignKey fields
into the new `Event.movies` and `Event.plays` ManyToManyFields.
def forward(apps, schema_editor):
"""
Copying data from the old `Event.movie` and `Event.play` ForeignKey fields
into the new `Event.movies` and `Event.plays` ManyToManyFields.
"""
Event = apps.get_model('spectator_events', 'Event')
MovieSelection = apps.get_model('spectator_events', 'MovieSelection')
PlaySelection = apps.get_model('spectator_events', 'PlaySelection')
for event in Event.objects.all():
if event.movie is not None:
selection = MovieSelection(event=event, movie=event.movie)
selection.save()
if event.play is not None:
selection = PlaySelection(event=event, play=event.play)
selection.save() |
Create a slug for each Event already in the DB.
def set_slug(apps, schema_editor):
"""
Create a slug for each Event already in the DB.
"""
Event = apps.get_model('spectator_events', 'Event')
for e in Event.objects.all():
e.slug = generate_slug(e.pk)
e.save(update_fields=['slug']) |
Return a standard ``Page`` instance with custom, digg-specific
page ranges attached.
def page(self, number, *args, **kwargs):
"""Return a standard ``Page`` instance with custom, digg-specific
page ranges attached.
"""
page = super().page(number, *args, **kwargs)
number = int(number) # we know this will work
# easier access
num_pages, body, tail, padding, margin = \
self.num_pages, self.body, self.tail, self.padding, self.margin
# put active page in middle of main range
main_range = list(map(int, [
math.floor(number-body/2.0)+1, # +1 = shift odd body to right
math.floor(number+body/2.0)]))
# adjust bounds
if main_range[0] < 1:
main_range = list(map(abs(main_range[0]-1).__add__, main_range))
if main_range[1] > num_pages:
main_range = list(map((num_pages-main_range[1]).__add__, main_range))
# Determine leading and trailing ranges; if possible and appropriate,
# combine them with the main range, in which case the resulting main
# block might end up considerable larger than requested. While we
# can't guarantee the exact size in those cases, we can at least try
# to come as close as possible: we can reduce the other boundary to
# max padding, instead of using half the body size, which would
# otherwise be the case. If the padding is large enough, this will
# of course have no effect.
# Example:
# total pages=100, page=4, body=5, (default padding=2)
# 1 2 3 [4] 5 6 ... 99 100
# total pages=100, page=4, body=5, padding=1
# 1 2 3 [4] 5 ... 99 100
# If it were not for this adjustment, both cases would result in the
# first output, regardless of the padding value.
if main_range[0] <= tail+margin:
leading = []
main_range = [1, max(body, min(number+padding, main_range[1]))]
main_range[0] = 1
else:
leading = list(range(1, tail+1))
# basically same for trailing range, but not in ``left_align`` mode
if self.align_left:
trailing = []
else:
if main_range[1] >= num_pages-(tail+margin)+1:
trailing = []
if not leading:
# ... but handle the special case of neither leading nor
# trailing ranges; otherwise, we would now modify the
# main range low bound, which we just set in the previous
# section, again.
main_range = [1, num_pages]
else:
main_range = [min(num_pages-body+1, max(number-padding, main_range[0])), num_pages]
else:
trailing = list(range(num_pages-tail+1, num_pages+1))
# finally, normalize values that are out of bound; this basically
# fixes all the things the above code screwed up in the simple case
# of few enough pages where one range would suffice.
main_range = [max(main_range[0], 1), min(main_range[1], num_pages)]
# make the result of our calculations available as custom ranges
# on the ``Page`` instance.
page.main_range = list(range(main_range[0], main_range[1]+1))
page.leading_range = leading
page.trailing_range = trailing
page.page_range = reduce(lambda x, y: x+((x and y) and [False])+y,
[page.leading_range, page.main_range, page.trailing_range])
page.__class__ = DiggPage
return page |
Get the version number without importing the mrcfile package.
def version():
"""Get the version number without importing the mrcfile package."""
namespace = {}
with open(os.path.join('mrcfile', 'version.py')) as f:
exec(f.read(), namespace)
return namespace['__version__'] |
Returns a dict like:
{'counts': {
'all': 30,
'movie': 12,
'gig': 10,
}}
def get_event_counts(self):
"""
Returns a dict like:
{'counts': {
'all': 30,
'movie': 12,
'gig': 10,
}}
"""
counts = {'all': Event.objects.count(),}
for k,v in Event.KIND_CHOICES:
# e.g. 'movie_count':
counts[k] = Event.objects.filter(kind=k).count()
return {'counts': counts,} |
Unless we're on the front page we'll have a kind_slug like 'movies'.
We need to translate that into an event `kind` like 'movie'.
def get_event_kind(self):
"""
Unless we're on the front page we'll have a kind_slug like 'movies'.
We need to translate that into an event `kind` like 'movie'.
"""
slug = self.kwargs.get('kind_slug', None)
if slug is None:
return None # Front page; showing all Event kinds.
else:
slugs_to_kinds = {v:k for k,v in Event.KIND_SLUGS.items()}
return slugs_to_kinds.get(slug, None) |
Restrict to a single kind of event, if any, and include Venue data.
def get_queryset(self):
"Restrict to a single kind of event, if any, and include Venue data."
qs = super().get_queryset()
kind = self.get_event_kind()
if kind is not None:
qs = qs.filter(kind=kind)
qs = qs.select_related('venue')
return qs |
We'll have a kind_slug like 'movies'.
We need to translate that into a work `kind` like 'movie'.
def get_work_kind(self):
"""
We'll have a kind_slug like 'movies'.
We need to translate that into a work `kind` like 'movie'.
"""
slugs_to_kinds = {v:k for k,v in Work.KIND_SLUGS.items()}
return slugs_to_kinds.get(self.kind_slug, None) |
Returns a list of dicts, one per country that has at least one Venue
in it.
Each dict has 'code' and 'name' elements.
The list is sorted by the country 'name's.
def get_countries(self):
"""
Returns a list of dicts, one per country that has at least one Venue
in it.
Each dict has 'code' and 'name' elements.
The list is sorted by the country 'name's.
"""
qs = Venue.objects.values('country') \
.exclude(country='') \
.distinct() \
.order_by('country')
countries = []
for c in qs:
countries.append({
'code': c['country'],
'name': Venue.get_country_name(c['country'])
})
return sorted(countries, key=lambda k: k['name']) |
Re-save all the Works because something earlier didn't create their slugs.
def forwards(apps, schema_editor):
"""
Re-save all the Works because something earlier didn't create their slugs.
"""
Work = apps.get_model('spectator_events', 'Work')
for work in Work.objects.all():
if not work.slug:
work.slug = generate_slug(work.pk)
work.save() |
Returns a QuerySet of dicts, each one with these keys:
* year - a date object representing the year
* total - the number of events of `kind` that year
kind - The Event `kind`, or 'all' for all kinds (default).
def annual_event_counts(kind='all'):
"""
Returns a QuerySet of dicts, each one with these keys:
* year - a date object representing the year
* total - the number of events of `kind` that year
kind - The Event `kind`, or 'all' for all kinds (default).
"""
qs = Event.objects
if kind != 'all':
qs = qs.filter(kind=kind)
qs = qs.annotate(year=TruncYear('date')) \
.values('year') \
.annotate(total=Count('id')) \
.order_by('year')
return qs |
Displays years and the number of events per year.
kind is an Event kind (like 'cinema', 'gig', etc.) or 'all' (default).
current_year is an optional date object representing the year we're already
showing information about.
def annual_event_counts_card(kind='all', current_year=None):
"""
Displays years and the number of events per year.
kind is an Event kind (like 'cinema', 'gig', etc.) or 'all' (default).
current_year is an optional date object representing the year we're already
showing information about.
"""
if kind == 'all':
card_title = 'Events per year'
else:
card_title = '{} per year'.format(Event.get_kind_name_plural(kind))
return {
'card_title': card_title,
'kind': kind,
'years': annual_event_counts(kind=kind),
'current_year': current_year
} |
Render a date/datetime (d) as a date, using the SPECTATOR_DATE_FORMAT
setting. Wrap the output in a <time> tag.
Time tags: http://www.brucelawson.co.uk/2012/best-of-time/
def display_date(d):
"""
Render a date/datetime (d) as a date, using the SPECTATOR_DATE_FORMAT
setting. Wrap the output in a <time> tag.
Time tags: http://www.brucelawson.co.uk/2012/best-of-time/
"""
stamp = d.strftime('%Y-%m-%d')
visible_date = d.strftime(app_settings.DATE_FORMAT)
return format_html('<time datetime="%(stamp)s">%(visible)s</time>' % {
'stamp': stamp,
'visible': visible_date
}) |
Displays the tabs to different event_list pages.
`counts` is a dict of number of events for each kind, like:
{'all': 30, 'gig': 12, 'movie': 18,}
`current_kind` is the event kind that's active, if any. e.g. 'gig',
'movie', etc.
`page_number` is the current page of this kind of events we're on.
def event_list_tabs(counts, current_kind, page_number=1):
"""
Displays the tabs to different event_list pages.
`counts` is a dict of number of events for each kind, like:
{'all': 30, 'gig': 12, 'movie': 18,}
`current_kind` is the event kind that's active, if any. e.g. 'gig',
'movie', etc.
`page_number` is the current page of this kind of events we're on.
"""
return {
'counts': counts,
'current_kind': current_kind,
'page_number': page_number,
# A list of all the kinds we might show tabs for, like
# ['gig', 'movie', 'play', ...]
'event_kinds': Event.get_kinds(),
# A dict of data about each kind, keyed by kind ('gig') including
# data about 'name', 'name_plural' and 'slug':
'event_kinds_data': Event.get_kinds_data(),
} |
Displays Events that happened on the supplied date.
`date` is a date object.
def day_events_card(date):
"""
Displays Events that happened on the supplied date.
`date` is a date object.
"""
d = date.strftime(app_settings.DATE_FORMAT)
card_title = 'Events on {}'.format(d)
return {
'card_title': card_title,
'event_list': day_events(date=date),
} |
Displays a card showing the Creators that are associated with the most Events.
def most_seen_creators_card(event_kind=None, num=10):
"""
Displays a card showing the Creators that are associated with the most Events.
"""
object_list = most_seen_creators(event_kind=event_kind, num=num)
object_list = chartify(object_list, 'num_events', cutoff=1)
return {
'card_title': 'Most seen people/groups',
'score_attr': 'num_events',
'object_list': object_list,
} |
Returns a QuerySet of the Creators that are associated with the most Works.
def most_seen_creators_by_works(work_kind=None, role_name=None, num=10):
"""
Returns a QuerySet of the Creators that are associated with the most Works.
"""
return Creator.objects.by_works(kind=work_kind, role_name=role_name)[:num] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.