repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
β | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
michaelliao/sinaweibopy
|
weibo.py
|
APIClient._parse_access_token
|
python
|
def _parse_access_token(self, r):
'''
new:return access token as a JsonDict: {"access_token":"your-access-token","expires_in":12345678,"uid":1234}, expires_in is represented using standard unix-epoch-time
'''
current = int(time.time())
expires = r.expires_in + current
remind_in = r.get('remind_in', None)
if remind_in:
rtime = int(remind_in) + current
if rtime < expires:
expires = rtime
return JsonDict(access_token=r.access_token, expires=expires, expires_in=expires, uid=r.get('uid', None))
|
new:return access token as a JsonDict: {"access_token":"your-access-token","expires_in":12345678,"uid":1234}, expires_in is represented using standard unix-epoch-time
|
train
|
https://github.com/michaelliao/sinaweibopy/blob/0f19dd71c1fbd16ee539620c7e9e986887f5c665/weibo.py#L272-L283
| null |
class APIClient(object):
'''
API client using synchronized invocation.
'''
def __init__(self, app_key, app_secret, redirect_uri=None, response_type='code', domain='api.weibo.com', version='2'):
self.client_id = str(app_key)
self.client_secret = str(app_secret)
self.redirect_uri = redirect_uri
self.response_type = response_type
self.auth_url = 'https://%s/oauth2/' % domain
self.api_url = 'https://%s/%s/' % (domain, version)
self.access_token = None
self.expires = 0.0
self.get = HttpObject(self, _HTTP_GET)
self.post = HttpObject(self, _HTTP_POST)
self.upload = HttpObject(self, _HTTP_UPLOAD)
def parse_signed_request(self, signed_request):
'''
parse signed request when using in-site app.
Returns:
dict object like { 'uid': 12345, 'access_token': 'ABC123XYZ', 'expires': unix-timestamp },
or None if parse failed.
'''
def _b64_normalize(s):
appendix = '=' * (4 - len(s) % 4)
return s.replace('-', '+').replace('_', '/') + appendix
sr = str(signed_request)
logging.info('parse signed request: %s' % sr)
enc_sig, enc_payload = sr.split('.', 1)
sig = base64.b64decode(_b64_normalize(enc_sig))
data = _parse_json(base64.b64decode(_b64_normalize(enc_payload)))
if data['algorithm'] != u'HMAC-SHA256':
return None
expected_sig = hmac.new(self.client_secret, enc_payload, hashlib.sha256).digest()
if expected_sig == sig:
data.user_id = data.uid = data.get('user_id', None)
data.access_token = data.get('oauth_token', None)
expires = data.get('expires', None)
if expires:
data.expires = data.expires_in = time.time() + expires
return data
return None
def set_access_token(self, access_token, expires):
self.access_token = str(access_token)
self.expires = float(expires)
def get_authorize_url(self, redirect_uri=None, **kw):
'''
return the authorization url that the user should be redirected to.
'''
redirect = redirect_uri if redirect_uri else self.redirect_uri
if not redirect:
raise APIError('21305', 'Parameter absent: redirect_uri', 'OAuth2 request')
response_type = kw.pop('response_type', 'code')
return '%s%s?%s' % (self.auth_url, 'authorize',
_encode_params(client_id=self.client_id,
response_type=response_type,
redirect_uri=redirect, **kw))
def request_access_token(self, code, redirect_uri=None):
redirect = redirect_uri if redirect_uri else self.redirect_uri
if not redirect:
raise APIError('21305', 'Parameter absent: redirect_uri', 'OAuth2 request')
r = _http_post('%s%s' % (self.auth_url, 'access_token'),
client_id=self.client_id,
client_secret=self.client_secret,
redirect_uri=redirect,
code=code,
grant_type='authorization_code')
return self._parse_access_token(r)
def refresh_token(self, refresh_token):
req_str = '%s%s' % (self.auth_url, 'access_token')
r = _http_post(req_str,
client_id=self.client_id,
client_secret=self.client_secret,
refresh_token=refresh_token,
grant_type='refresh_token')
return self._parse_access_token(r)
def is_expires(self):
return not self.access_token or time.time() > self.expires
def __getattr__(self, attr):
if '__' in attr:
return getattr(self.get, attr)
return _Callable(self, attr)
|
c0fec0de/anytree
|
anytree/node/nodemixin.py
|
NodeMixin.siblings
|
python
|
def siblings(self):
parent = self.parent
if parent is None:
return tuple()
else:
return tuple([node for node in parent.children if node != self])
|
Tuple of nodes with the same parent.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> loui = Node("Loui", parent=marc)
>>> lazy = Node("Lazy", parent=marc)
>>> udo.siblings
()
>>> marc.siblings
()
>>> lian.siblings
(Node('/Udo/Marc/Loui'), Node('/Udo/Marc/Lazy'))
>>> loui.siblings
(Node('/Udo/Marc/Lian'), Node('/Udo/Marc/Lazy'))
|
train
|
https://github.com/c0fec0de/anytree/blob/775477e206a75e697983e70dae6372b5a7e42dcf/anytree/node/nodemixin.py#L384-L407
| null |
class NodeMixin(object):
__slots__ = ("__parent", "__children")
separator = "/"
u"""
The :any:`NodeMixin` class extends any Python class to a tree node.
The only tree relevant information is the `parent` attribute.
If `None` the :any:`NodeMixin` is root node.
If set to another node, the :any:`NodeMixin` becomes the child of it.
The `children` attribute can be used likewise.
If `None` the :any:`NodeMixin` has no children (unless the node is set *as* parent).
If set to any iterable of :any:`NodeMixin` instances, the nodes become children.
>>> from anytree import NodeMixin, RenderTree
>>> class MyBaseClass(object):
... foo = 4
>>> class MyClass(MyBaseClass, NodeMixin): # Add Node feature
... def __init__(self, name, length, width, parent=None, children=None):
... super(MyClass, self).__init__()
... self.name = name
... self.length = length
... self.width = width
... self.parent = parent
... if children:
... self.children = children
Construction via `parent`:
>>> my0 = MyClass('my0', 0, 0)
>>> my1 = MyClass('my1', 1, 0, parent=my0)
>>> my2 = MyClass('my2', 0, 2, parent=my0)
>>> for pre, _, node in RenderTree(my0):
... treestr = u"%s%s" % (pre, node.name)
... print(treestr.ljust(8), node.length, node.width)
my0 0 0
βββ my1 1 0
βββ my2 0 2
Construction via `children`:
>>> my0 = MyClass('my0', 0, 0, children=[
... MyClass('my1', 1, 0),
... MyClass('my2', 0, 2),
... ]
>>> for pre, _, node in RenderTree(my0):
... treestr = u"%s%s" % (pre, node.name)
... print(treestr.ljust(8), node.length, node.width)
my0 0 0
βββ my1 1 0
βββ my2 0 2
Both approaches can be mixed:
>>> my0 = MyClass('my0', 0, 0, children=[
... MyClass('my1', 1, 0),
... ]
>>> my2 = MyClass('my2', 0, 2, parent=my0)
>>> for pre, _, node in RenderTree(my0):
... treestr = u"%s%s" % (pre, node.name)
... print(treestr.ljust(8), node.length, node.width)
my0 0 0
βββ my1 1 0
βββ my2 0 2
"""
@property
def parent(self):
u"""
Parent Node.
On set, the node is detached from any previous parent node and attached
to the new node.
>>> from anytree import Node, RenderTree
>>> udo = Node("Udo")
>>> marc = Node("Marc")
>>> lian = Node("Lian", parent=marc)
>>> print(RenderTree(udo))
Node('/Udo')
>>> print(RenderTree(marc))
Node('/Marc')
βββ Node('/Marc/Lian')
**Attach**
>>> marc.parent = udo
>>> print(RenderTree(udo))
Node('/Udo')
βββ Node('/Udo/Marc')
βββ Node('/Udo/Marc/Lian')
**Detach**
To make a node to a root node, just set this attribute to `None`.
>>> marc.is_root
False
>>> marc.parent = None
>>> marc.is_root
True
"""
try:
return self.__parent
except AttributeError:
return None
@parent.setter
def parent(self, value):
if value is not None and not isinstance(value, NodeMixin):
msg = "Parent node %r is not of type 'NodeMixin'." % (value)
raise TreeError(msg)
try:
parent = self.__parent
except AttributeError:
parent = None
if parent is not value:
self.__check_loop(value)
self.__detach(parent)
self.__attach(value)
def __check_loop(self, node):
if node is not None:
if node is self:
msg = "Cannot set parent. %r cannot be parent of itself."
raise LoopError(msg % self)
if self in node.path:
msg = "Cannot set parent. %r is parent of %r."
raise LoopError(msg % (self, node))
def __detach(self, parent):
if parent is not None:
self._pre_detach(parent)
parentchildren = parent.__children_
assert any([child is self for child in parentchildren]), "Tree internal data is corrupt."
# ATOMIC START
parentchildren.remove(self)
self.__parent = None
# ATOMIC END
self._post_detach(parent)
def __attach(self, parent):
if parent is not None:
self._pre_attach(parent)
parentchildren = parent.__children_
assert not any([child is self for child in parentchildren]), "Tree internal data is corrupt."
# ATOMIC START
parentchildren.append(self)
self.__parent = parent
# ATOMIC END
self._post_attach(parent)
@property
def __children_(self):
try:
return self.__children
except AttributeError:
self.__children = []
return self.__children
@property
def children(self):
"""
All child nodes.
>>> from anytree import Node
>>> n = Node("n")
>>> a = Node("a", parent=n)
>>> b = Node("b", parent=n)
>>> c = Node("c", parent=n)
>>> n.children
(Node('/n/a'), Node('/n/b'), Node('/n/c'))
Modifying the children attribute modifies the tree.
**Detach**
The children attribute can be updated by setting to an iterable.
>>> n.children = [a, b]
>>> n.children
(Node('/n/a'), Node('/n/b'))
Node `c` is removed from the tree.
In case of an existing reference, the node `c` does not vanish and is the root of its own tree.
>>> c
Node('/c')
**Attach**
>>> d = Node("d")
>>> d
Node('/d')
>>> n.children = [a, b, d]
>>> n.children
(Node('/n/a'), Node('/n/b'), Node('/n/d'))
>>> d
Node('/n/d')
**Duplicate**
A node can just be the children once. Duplicates cause a :any:`TreeError`:
>>> n.children = [a, b, d, a]
Traceback (most recent call last):
...
anytree.node.exceptions.TreeError: Cannot add node Node('/n/a') multiple times as child.
"""
return tuple(self.__children_)
@staticmethod
def __check_children(children):
seen = set()
for child in children:
if not isinstance(child, NodeMixin):
msg = ("Cannot add non-node object %r. "
"It is not a subclass of 'NodeMixin'.") % child
raise TreeError(msg)
if child not in seen:
seen.add(child)
else:
msg = "Cannot add node %r multiple times as child." % child
raise TreeError(msg)
@children.setter
def children(self, children):
# convert iterable to tuple
children = tuple(children)
NodeMixin.__check_children(children)
# ATOMIC start
old_children = self.children
del self.children
try:
self._pre_attach_children(children)
for child in children:
child.parent = self
self._post_attach_children(children)
assert len(self.children) == len(children)
except Exception:
self.children = old_children
raise
# ATOMIC end
@children.deleter
def children(self):
children = self.children
self._pre_detach_children(children)
for child in self.children:
child.parent = None
assert len(self.children) == 0
self._post_detach_children(children)
def _pre_detach_children(self, children):
"""Method call before detaching `children`."""
pass
def _post_detach_children(self, children):
"""Method call after detaching `children`."""
pass
def _pre_attach_children(self, children):
"""Method call before attaching `children`."""
pass
def _post_attach_children(self, children):
"""Method call after attaching `children`."""
pass
@property
def path(self):
"""
Path of this `Node`.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.path
(Node('/Udo'),)
>>> marc.path
(Node('/Udo'), Node('/Udo/Marc'))
>>> lian.path
(Node('/Udo'), Node('/Udo/Marc'), Node('/Udo/Marc/Lian'))
"""
return self._path
@property
def _path(self):
path = []
node = self
while node:
path.insert(0, node)
node = node.parent
return tuple(path)
@property
def ancestors(self):
"""
All parent nodes and their parent nodes.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.ancestors
()
>>> marc.ancestors
(Node('/Udo'),)
>>> lian.ancestors
(Node('/Udo'), Node('/Udo/Marc'))
"""
return self._path[:-1]
@property
def anchestors(self):
"""
All parent nodes and their parent nodes - see :any:`ancestors`.
The attribute `anchestors` is just a typo of `ancestors`. Please use `ancestors`.
This attribute will be removed in the 2.0.0 release.
"""
warnings.warn(".anchestors was a typo and will be removed in version 3.0.0", DeprecationWarning)
return self.ancestors
@property
def descendants(self):
"""
All child nodes and all their child nodes.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> loui = Node("Loui", parent=marc)
>>> soe = Node("Soe", parent=lian)
>>> udo.descendants
(Node('/Udo/Marc'), Node('/Udo/Marc/Lian'), Node('/Udo/Marc/Lian/Soe'), Node('/Udo/Marc/Loui'))
>>> marc.descendants
(Node('/Udo/Marc/Lian'), Node('/Udo/Marc/Lian/Soe'), Node('/Udo/Marc/Loui'))
>>> lian.descendants
(Node('/Udo/Marc/Lian/Soe'),)
"""
return tuple(PreOrderIter(self))[1:]
@property
def root(self):
"""
Tree Root Node.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.root
Node('/Udo')
>>> marc.root
Node('/Udo')
>>> lian.root
Node('/Udo')
"""
if self.parent:
return self._path[0]
else:
return self
@property
@property
def leaves(self):
"""
Tuple of all leaf nodes.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> loui = Node("Loui", parent=marc)
>>> lazy = Node("Lazy", parent=marc)
>>> udo.leaves
(Node('/Udo/Marc/Lian'), Node('/Udo/Marc/Loui'), Node('/Udo/Marc/Lazy'))
>>> marc.leaves
(Node('/Udo/Marc/Lian'), Node('/Udo/Marc/Loui'), Node('/Udo/Marc/Lazy'))
"""
return tuple(PreOrderIter(self, filter_=lambda node: node.is_leaf))
@property
def is_leaf(self):
"""
`Node` has no children (External Node).
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.is_leaf
False
>>> marc.is_leaf
False
>>> lian.is_leaf
True
"""
return len(self.__children_) == 0
@property
def is_root(self):
"""
`Node` is tree root.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.is_root
True
>>> marc.is_root
False
>>> lian.is_root
False
"""
return self.parent is None
@property
def height(self):
"""
Number of edges on the longest path to a leaf `Node`.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.height
2
>>> marc.height
1
>>> lian.height
0
"""
if self.__children_:
return max([child.height for child in self.__children_]) + 1
else:
return 0
@property
def depth(self):
"""
Number of edges to the root `Node`.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.depth
0
>>> marc.depth
1
>>> lian.depth
2
"""
return len(self._path) - 1
def _pre_detach(self, parent):
"""Method call before detaching from `parent`."""
pass
def _post_detach(self, parent):
"""Method call after detaching from `parent`."""
pass
def _pre_attach(self, parent):
"""Method call before attaching to `parent`."""
pass
def _post_attach(self, parent):
"""Method call after attaching to `parent`."""
pass
|
c0fec0de/anytree
|
anytree/node/nodemixin.py
|
NodeMixin.height
|
python
|
def height(self):
if self.__children_:
return max([child.height for child in self.__children_]) + 1
else:
return 0
|
Number of edges on the longest path to a leaf `Node`.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.height
2
>>> marc.height
1
>>> lian.height
0
|
train
|
https://github.com/c0fec0de/anytree/blob/775477e206a75e697983e70dae6372b5a7e42dcf/anytree/node/nodemixin.py#L464-L482
| null |
class NodeMixin(object):
__slots__ = ("__parent", "__children")
separator = "/"
u"""
The :any:`NodeMixin` class extends any Python class to a tree node.
The only tree relevant information is the `parent` attribute.
If `None` the :any:`NodeMixin` is root node.
If set to another node, the :any:`NodeMixin` becomes the child of it.
The `children` attribute can be used likewise.
If `None` the :any:`NodeMixin` has no children (unless the node is set *as* parent).
If set to any iterable of :any:`NodeMixin` instances, the nodes become children.
>>> from anytree import NodeMixin, RenderTree
>>> class MyBaseClass(object):
... foo = 4
>>> class MyClass(MyBaseClass, NodeMixin): # Add Node feature
... def __init__(self, name, length, width, parent=None, children=None):
... super(MyClass, self).__init__()
... self.name = name
... self.length = length
... self.width = width
... self.parent = parent
... if children:
... self.children = children
Construction via `parent`:
>>> my0 = MyClass('my0', 0, 0)
>>> my1 = MyClass('my1', 1, 0, parent=my0)
>>> my2 = MyClass('my2', 0, 2, parent=my0)
>>> for pre, _, node in RenderTree(my0):
... treestr = u"%s%s" % (pre, node.name)
... print(treestr.ljust(8), node.length, node.width)
my0 0 0
βββ my1 1 0
βββ my2 0 2
Construction via `children`:
>>> my0 = MyClass('my0', 0, 0, children=[
... MyClass('my1', 1, 0),
... MyClass('my2', 0, 2),
... ]
>>> for pre, _, node in RenderTree(my0):
... treestr = u"%s%s" % (pre, node.name)
... print(treestr.ljust(8), node.length, node.width)
my0 0 0
βββ my1 1 0
βββ my2 0 2
Both approaches can be mixed:
>>> my0 = MyClass('my0', 0, 0, children=[
... MyClass('my1', 1, 0),
... ]
>>> my2 = MyClass('my2', 0, 2, parent=my0)
>>> for pre, _, node in RenderTree(my0):
... treestr = u"%s%s" % (pre, node.name)
... print(treestr.ljust(8), node.length, node.width)
my0 0 0
βββ my1 1 0
βββ my2 0 2
"""
@property
def parent(self):
u"""
Parent Node.
On set, the node is detached from any previous parent node and attached
to the new node.
>>> from anytree import Node, RenderTree
>>> udo = Node("Udo")
>>> marc = Node("Marc")
>>> lian = Node("Lian", parent=marc)
>>> print(RenderTree(udo))
Node('/Udo')
>>> print(RenderTree(marc))
Node('/Marc')
βββ Node('/Marc/Lian')
**Attach**
>>> marc.parent = udo
>>> print(RenderTree(udo))
Node('/Udo')
βββ Node('/Udo/Marc')
βββ Node('/Udo/Marc/Lian')
**Detach**
To make a node to a root node, just set this attribute to `None`.
>>> marc.is_root
False
>>> marc.parent = None
>>> marc.is_root
True
"""
try:
return self.__parent
except AttributeError:
return None
@parent.setter
def parent(self, value):
if value is not None and not isinstance(value, NodeMixin):
msg = "Parent node %r is not of type 'NodeMixin'." % (value)
raise TreeError(msg)
try:
parent = self.__parent
except AttributeError:
parent = None
if parent is not value:
self.__check_loop(value)
self.__detach(parent)
self.__attach(value)
def __check_loop(self, node):
if node is not None:
if node is self:
msg = "Cannot set parent. %r cannot be parent of itself."
raise LoopError(msg % self)
if self in node.path:
msg = "Cannot set parent. %r is parent of %r."
raise LoopError(msg % (self, node))
def __detach(self, parent):
if parent is not None:
self._pre_detach(parent)
parentchildren = parent.__children_
assert any([child is self for child in parentchildren]), "Tree internal data is corrupt."
# ATOMIC START
parentchildren.remove(self)
self.__parent = None
# ATOMIC END
self._post_detach(parent)
def __attach(self, parent):
if parent is not None:
self._pre_attach(parent)
parentchildren = parent.__children_
assert not any([child is self for child in parentchildren]), "Tree internal data is corrupt."
# ATOMIC START
parentchildren.append(self)
self.__parent = parent
# ATOMIC END
self._post_attach(parent)
@property
def __children_(self):
try:
return self.__children
except AttributeError:
self.__children = []
return self.__children
@property
def children(self):
"""
All child nodes.
>>> from anytree import Node
>>> n = Node("n")
>>> a = Node("a", parent=n)
>>> b = Node("b", parent=n)
>>> c = Node("c", parent=n)
>>> n.children
(Node('/n/a'), Node('/n/b'), Node('/n/c'))
Modifying the children attribute modifies the tree.
**Detach**
The children attribute can be updated by setting to an iterable.
>>> n.children = [a, b]
>>> n.children
(Node('/n/a'), Node('/n/b'))
Node `c` is removed from the tree.
In case of an existing reference, the node `c` does not vanish and is the root of its own tree.
>>> c
Node('/c')
**Attach**
>>> d = Node("d")
>>> d
Node('/d')
>>> n.children = [a, b, d]
>>> n.children
(Node('/n/a'), Node('/n/b'), Node('/n/d'))
>>> d
Node('/n/d')
**Duplicate**
A node can just be the children once. Duplicates cause a :any:`TreeError`:
>>> n.children = [a, b, d, a]
Traceback (most recent call last):
...
anytree.node.exceptions.TreeError: Cannot add node Node('/n/a') multiple times as child.
"""
return tuple(self.__children_)
@staticmethod
def __check_children(children):
seen = set()
for child in children:
if not isinstance(child, NodeMixin):
msg = ("Cannot add non-node object %r. "
"It is not a subclass of 'NodeMixin'.") % child
raise TreeError(msg)
if child not in seen:
seen.add(child)
else:
msg = "Cannot add node %r multiple times as child." % child
raise TreeError(msg)
@children.setter
def children(self, children):
# convert iterable to tuple
children = tuple(children)
NodeMixin.__check_children(children)
# ATOMIC start
old_children = self.children
del self.children
try:
self._pre_attach_children(children)
for child in children:
child.parent = self
self._post_attach_children(children)
assert len(self.children) == len(children)
except Exception:
self.children = old_children
raise
# ATOMIC end
@children.deleter
def children(self):
children = self.children
self._pre_detach_children(children)
for child in self.children:
child.parent = None
assert len(self.children) == 0
self._post_detach_children(children)
def _pre_detach_children(self, children):
"""Method call before detaching `children`."""
pass
def _post_detach_children(self, children):
"""Method call after detaching `children`."""
pass
def _pre_attach_children(self, children):
"""Method call before attaching `children`."""
pass
def _post_attach_children(self, children):
"""Method call after attaching `children`."""
pass
@property
def path(self):
"""
Path of this `Node`.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.path
(Node('/Udo'),)
>>> marc.path
(Node('/Udo'), Node('/Udo/Marc'))
>>> lian.path
(Node('/Udo'), Node('/Udo/Marc'), Node('/Udo/Marc/Lian'))
"""
return self._path
@property
def _path(self):
path = []
node = self
while node:
path.insert(0, node)
node = node.parent
return tuple(path)
@property
def ancestors(self):
"""
All parent nodes and their parent nodes.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.ancestors
()
>>> marc.ancestors
(Node('/Udo'),)
>>> lian.ancestors
(Node('/Udo'), Node('/Udo/Marc'))
"""
return self._path[:-1]
@property
def anchestors(self):
"""
All parent nodes and their parent nodes - see :any:`ancestors`.
The attribute `anchestors` is just a typo of `ancestors`. Please use `ancestors`.
This attribute will be removed in the 2.0.0 release.
"""
warnings.warn(".anchestors was a typo and will be removed in version 3.0.0", DeprecationWarning)
return self.ancestors
@property
def descendants(self):
"""
All child nodes and all their child nodes.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> loui = Node("Loui", parent=marc)
>>> soe = Node("Soe", parent=lian)
>>> udo.descendants
(Node('/Udo/Marc'), Node('/Udo/Marc/Lian'), Node('/Udo/Marc/Lian/Soe'), Node('/Udo/Marc/Loui'))
>>> marc.descendants
(Node('/Udo/Marc/Lian'), Node('/Udo/Marc/Lian/Soe'), Node('/Udo/Marc/Loui'))
>>> lian.descendants
(Node('/Udo/Marc/Lian/Soe'),)
"""
return tuple(PreOrderIter(self))[1:]
@property
def root(self):
"""
Tree Root Node.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.root
Node('/Udo')
>>> marc.root
Node('/Udo')
>>> lian.root
Node('/Udo')
"""
if self.parent:
return self._path[0]
else:
return self
@property
def siblings(self):
"""
Tuple of nodes with the same parent.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> loui = Node("Loui", parent=marc)
>>> lazy = Node("Lazy", parent=marc)
>>> udo.siblings
()
>>> marc.siblings
()
>>> lian.siblings
(Node('/Udo/Marc/Loui'), Node('/Udo/Marc/Lazy'))
>>> loui.siblings
(Node('/Udo/Marc/Lian'), Node('/Udo/Marc/Lazy'))
"""
parent = self.parent
if parent is None:
return tuple()
else:
return tuple([node for node in parent.children if node != self])
@property
def leaves(self):
"""
Tuple of all leaf nodes.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> loui = Node("Loui", parent=marc)
>>> lazy = Node("Lazy", parent=marc)
>>> udo.leaves
(Node('/Udo/Marc/Lian'), Node('/Udo/Marc/Loui'), Node('/Udo/Marc/Lazy'))
>>> marc.leaves
(Node('/Udo/Marc/Lian'), Node('/Udo/Marc/Loui'), Node('/Udo/Marc/Lazy'))
"""
return tuple(PreOrderIter(self, filter_=lambda node: node.is_leaf))
@property
def is_leaf(self):
"""
`Node` has no children (External Node).
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.is_leaf
False
>>> marc.is_leaf
False
>>> lian.is_leaf
True
"""
return len(self.__children_) == 0
@property
def is_root(self):
"""
`Node` is tree root.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.is_root
True
>>> marc.is_root
False
>>> lian.is_root
False
"""
return self.parent is None
@property
@property
def depth(self):
"""
Number of edges to the root `Node`.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.depth
0
>>> marc.depth
1
>>> lian.depth
2
"""
return len(self._path) - 1
def _pre_detach(self, parent):
"""Method call before detaching from `parent`."""
pass
def _post_detach(self, parent):
"""Method call after detaching from `parent`."""
pass
def _pre_attach(self, parent):
"""Method call before attaching to `parent`."""
pass
def _post_attach(self, parent):
"""Method call after attaching to `parent`."""
pass
|
c0fec0de/anytree
|
anytree/importer/jsonimporter.py
|
JsonImporter.import_
|
python
|
def import_(self, data):
return self.__import(json.loads(data, **self.kwargs))
|
Read JSON from `data`.
|
train
|
https://github.com/c0fec0de/anytree/blob/775477e206a75e697983e70dae6372b5a7e42dcf/anytree/importer/jsonimporter.py#L60-L62
|
[
"def __import(self, data):\n dictimporter = self.dictimporter or DictImporter()\n return dictimporter.import_(data)\n"
] |
class JsonImporter(object):
def __init__(self, dictimporter=None, **kwargs):
u"""
Import Tree from JSON.
The JSON is read and converted to a dictionary via `dictimporter`.
Keyword Arguments:
dictimporter: Dictionary Importer used (see :any:`DictImporter`).
kwargs: All other arguments are passed to
:any:`json.load`/:any:`json.loads`.
See documentation for reference.
>>> from anytree.importer import JsonImporter
>>> from anytree import RenderTree
>>> importer = JsonImporter()
>>> data = '''
... {
... "a": "root",
... "children": [
... {
... "a": "sub0",
... "children": [
... {
... "a": "sub0A",
... "b": "foo"
... },
... {
... "a": "sub0B"
... }
... ]
... },
... {
... "a": "sub1"
... }
... ]
... }'''
>>> root = importer.import_(data)
>>> print(RenderTree(root))
AnyNode(a='root')
βββ AnyNode(a='sub0')
β βββ AnyNode(a='sub0A', b='foo')
β βββ AnyNode(a='sub0B')
βββ AnyNode(a='sub1')
"""
self.dictimporter = dictimporter
self.kwargs = kwargs
def __import(self, data):
dictimporter = self.dictimporter or DictImporter()
return dictimporter.import_(data)
def read(self, filehandle):
"""Read JSON from `filehandle`."""
return self.__import(json.load(filehandle, **self.kwargs))
|
c0fec0de/anytree
|
anytree/importer/jsonimporter.py
|
JsonImporter.read
|
python
|
def read(self, filehandle):
return self.__import(json.load(filehandle, **self.kwargs))
|
Read JSON from `filehandle`.
|
train
|
https://github.com/c0fec0de/anytree/blob/775477e206a75e697983e70dae6372b5a7e42dcf/anytree/importer/jsonimporter.py#L64-L66
|
[
"def __import(self, data):\n dictimporter = self.dictimporter or DictImporter()\n return dictimporter.import_(data)\n"
] |
class JsonImporter(object):
def __init__(self, dictimporter=None, **kwargs):
u"""
Import Tree from JSON.
The JSON is read and converted to a dictionary via `dictimporter`.
Keyword Arguments:
dictimporter: Dictionary Importer used (see :any:`DictImporter`).
kwargs: All other arguments are passed to
:any:`json.load`/:any:`json.loads`.
See documentation for reference.
>>> from anytree.importer import JsonImporter
>>> from anytree import RenderTree
>>> importer = JsonImporter()
>>> data = '''
... {
... "a": "root",
... "children": [
... {
... "a": "sub0",
... "children": [
... {
... "a": "sub0A",
... "b": "foo"
... },
... {
... "a": "sub0B"
... }
... ]
... },
... {
... "a": "sub1"
... }
... ]
... }'''
>>> root = importer.import_(data)
>>> print(RenderTree(root))
AnyNode(a='root')
βββ AnyNode(a='sub0')
β βββ AnyNode(a='sub0A', b='foo')
β βββ AnyNode(a='sub0B')
βββ AnyNode(a='sub1')
"""
self.dictimporter = dictimporter
self.kwargs = kwargs
def __import(self, data):
dictimporter = self.dictimporter or DictImporter()
return dictimporter.import_(data)
def import_(self, data):
"""Read JSON from `data`."""
return self.__import(json.loads(data, **self.kwargs))
|
c0fec0de/anytree
|
anytree/walker.py
|
Walker.walk
|
python
|
def walk(self, start, end):
s = start.path
e = end.path
if start.root != end.root:
msg = "%r and %r are not part of the same tree." % (start, end)
raise WalkError(msg)
# common
c = Walker.__calc_common(s, e)
assert c[0] is start.root
len_c = len(c)
# up
if start is c[-1]:
up = tuple()
else:
up = tuple(reversed(s[len_c:]))
# down
if end is c[-1]:
down = tuple()
else:
down = e[len_c:]
return up, c[-1], down
|
Walk from `start` node to `end` node.
Returns:
(upwards, common, downwards): `upwards` is a list of nodes to go upward to.
`common` top node. `downwards` is a list of nodes to go downward to.
Raises:
WalkError: on no common root node.
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()))
Node('/f')
|-- Node('/f/b')
| |-- Node('/f/b/a')
| +-- Node('/f/b/d')
| |-- Node('/f/b/d/c')
| +-- Node('/f/b/d/e')
+-- Node('/f/g')
+-- Node('/f/g/i')
+-- Node('/f/g/i/h')
Create a walker:
>>> w = Walker()
This class is made for walking:
>>> w.walk(f, f)
((), Node('/f'), ())
>>> w.walk(f, b)
((), Node('/f'), (Node('/f/b'),))
>>> w.walk(b, f)
((Node('/f/b'),), Node('/f'), ())
>>> w.walk(h, e)
((Node('/f/g/i/h'), Node('/f/g/i'), Node('/f/g')), Node('/f'), (Node('/f/b'), Node('/f/b/d'), Node('/f/b/d/e')))
>>> w.walk(d, e)
((), Node('/f/b/d'), (Node('/f/b/d/e'),))
For a proper walking the nodes need to be part of the same tree:
>>> w.walk(Node("a"), Node("b"))
Traceback (most recent call last):
...
anytree.walker.WalkError: Node('/a') and Node('/b') are not part of the same tree.
|
train
|
https://github.com/c0fec0de/anytree/blob/775477e206a75e697983e70dae6372b5a7e42dcf/anytree/walker.py#L10-L85
|
[
"def __calc_common(s, e):\n return tuple([si for si, ei in zip(s, e) if si is ei])\n"
] |
class Walker(object):
def __init__(self):
"""Walk from one node to another."""
super(Walker, self).__init__()
@staticmethod
def __calc_common(s, e):
return tuple([si for si, ei in zip(s, e) if si is ei])
|
c0fec0de/anytree
|
anytree/exporter/jsonexporter.py
|
JsonExporter.export
|
python
|
def export(self, node):
dictexporter = self.dictexporter or DictExporter()
data = dictexporter.export(node)
return json.dumps(data, **self.kwargs)
|
Return JSON for tree starting at `node`.
|
train
|
https://github.com/c0fec0de/anytree/blob/775477e206a75e697983e70dae6372b5a7e42dcf/anytree/exporter/jsonexporter.py#L54-L58
| null |
class JsonExporter(object):
def __init__(self, dictexporter=None, **kwargs):
"""
Tree to JSON exporter.
The tree is converted to a dictionary via `dictexporter` and exported to JSON.
Keyword Arguments:
dictexporter: Dictionary Exporter used (see :any:`DictExporter`).
kwargs: All other arguments are passed to
:any:`json.dump`/:any:`json.dumps`.
See documentation for reference.
>>> from anytree import AnyNode
>>> from anytree.exporter import JsonExporter
>>> root = AnyNode(a="root")
>>> s0 = AnyNode(a="sub0", parent=root)
>>> s0a = AnyNode(a="sub0A", b="foo", parent=s0)
>>> s0b = AnyNode(a="sub0B", parent=s0)
>>> s1 = AnyNode(a="sub1", parent=root)
>>> exporter = JsonExporter(indent=2, sort_keys=True)
>>> print(exporter.export(root))
{
"a": "root",
"children": [
{
"a": "sub0",
"children": [
{
"a": "sub0A",
"b": "foo"
},
{
"a": "sub0B"
}
]
},
{
"a": "sub1"
}
]
}
"""
self.dictexporter = dictexporter
self.kwargs = kwargs
def write(self, node, filehandle):
"""Write JSON to `filehandle` starting at `node`."""
dictexporter = self.dictexporter or DictExporter()
data = dictexporter.export(node)
return json.dump(data, filehandle, **self.kwargs)
|
c0fec0de/anytree
|
anytree/exporter/jsonexporter.py
|
JsonExporter.write
|
python
|
def write(self, node, filehandle):
dictexporter = self.dictexporter or DictExporter()
data = dictexporter.export(node)
return json.dump(data, filehandle, **self.kwargs)
|
Write JSON to `filehandle` starting at `node`.
|
train
|
https://github.com/c0fec0de/anytree/blob/775477e206a75e697983e70dae6372b5a7e42dcf/anytree/exporter/jsonexporter.py#L60-L64
| null |
class JsonExporter(object):
def __init__(self, dictexporter=None, **kwargs):
"""
Tree to JSON exporter.
The tree is converted to a dictionary via `dictexporter` and exported to JSON.
Keyword Arguments:
dictexporter: Dictionary Exporter used (see :any:`DictExporter`).
kwargs: All other arguments are passed to
:any:`json.dump`/:any:`json.dumps`.
See documentation for reference.
>>> from anytree import AnyNode
>>> from anytree.exporter import JsonExporter
>>> root = AnyNode(a="root")
>>> s0 = AnyNode(a="sub0", parent=root)
>>> s0a = AnyNode(a="sub0A", b="foo", parent=s0)
>>> s0b = AnyNode(a="sub0B", parent=s0)
>>> s1 = AnyNode(a="sub1", parent=root)
>>> exporter = JsonExporter(indent=2, sort_keys=True)
>>> print(exporter.export(root))
{
"a": "root",
"children": [
{
"a": "sub0",
"children": [
{
"a": "sub0A",
"b": "foo"
},
{
"a": "sub0B"
}
]
},
{
"a": "sub1"
}
]
}
"""
self.dictexporter = dictexporter
self.kwargs = kwargs
def export(self, node):
"""Return JSON for tree starting at `node`."""
dictexporter = self.dictexporter or DictExporter()
data = dictexporter.export(node)
return json.dumps(data, **self.kwargs)
|
c0fec0de/anytree
|
anytree/search.py
|
findall
|
python
|
def findall(node, filter_=None, stop=None, maxlevel=None, mincount=None, maxcount=None):
return _findall(node, filter_=filter_, stop=stop,
maxlevel=maxlevel, mincount=mincount, maxcount=maxcount)
|
Search nodes matching `filter_` but stop at `maxlevel` or `stop`.
Return tuple with matching nodes.
Args:
node: top node, start searching.
Keyword Args:
filter_: function called with every `node` as argument, `node` is returned if `True`.
stop: stop iteration at `node` if `stop` function returns `True` for `node`.
maxlevel (int): maximum decending in the node hierarchy.
mincount (int): minimum number of nodes.
maxcount (int): maximum number of nodes.
Example tree:
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> findall(f, filter_=lambda node: node.name in ("a", "b"))
(Node('/f/b'), Node('/f/b/a'))
>>> findall(f, filter_=lambda node: d in node.path)
(Node('/f/b/d'), Node('/f/b/d/c'), Node('/f/b/d/e'))
The number of matches can be limited:
>>> findall(f, filter_=lambda node: d in node.path, mincount=4) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
anytree.search.CountError: Expecting at least 4 elements, but found 3. ... Node('/f/b/d/e'))
>>> findall(f, filter_=lambda node: d in node.path, maxcount=2) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
anytree.search.CountError: Expecting 2 elements at maximum, but found 3. ... Node('/f/b/d/e'))
|
train
|
https://github.com/c0fec0de/anytree/blob/775477e206a75e697983e70dae6372b5a7e42dcf/anytree/search.py#L6-L62
|
[
"def _findall(node, filter_, stop=None, maxlevel=None, mincount=None, maxcount=None):\n result = tuple(PreOrderIter(node, filter_, stop, maxlevel))\n resultlen = len(result)\n if mincount is not None and resultlen < mincount:\n msg = \"Expecting at least %d elements, but found %d.\"\n raise CountError(msg % (mincount, resultlen), result)\n if maxcount is not None and resultlen > maxcount:\n msg = \"Expecting %d elements at maximum, but found %d.\"\n raise CountError(msg % (maxcount, resultlen), result)\n return result\n"
] |
"""Node Searching."""
from anytree.iterators import PreOrderIter
def findall_by_attr(node, value, name="name", maxlevel=None, mincount=None, maxcount=None):
"""
Search nodes with attribute `name` having `value` but stop at `maxlevel`.
Return tuple with matching nodes.
Args:
node: top node, start searching.
value: value which need to match
Keyword Args:
name (str): attribute name need to match
maxlevel (int): maximum decending in the node hierarchy.
mincount (int): minimum number of nodes.
maxcount (int): maximum number of nodes.
Example tree:
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> findall_by_attr(f, "d")
(Node('/f/b/d'),)
"""
return _findall(node, filter_=lambda n: _filter_by_name(n, name, value),
maxlevel=maxlevel, mincount=mincount, maxcount=maxcount)
def find(node, filter_=None, stop=None, maxlevel=None):
"""
Search for *single* node matching `filter_` but stop at `maxlevel` or `stop`.
Return matching node.
Args:
node: top node, start searching.
Keyword Args:
filter_: function called with every `node` as argument, `node` is returned if `True`.
stop: stop iteration at `node` if `stop` function returns `True` for `node`.
maxlevel (int): maximum decending in the node hierarchy.
Example tree:
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> find(f, lambda node: node.name == "d")
Node('/f/b/d')
>>> find(f, lambda node: node.name == "z")
>>> find(f, lambda node: b in node.path) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
anytree.search.CountError: Expecting 1 elements at maximum, but found 5. (Node('/f/b')... Node('/f/b/d/e'))
"""
return _find(node, filter_=filter_, stop=stop, maxlevel=maxlevel)
def find_by_attr(node, value, name="name", maxlevel=None):
"""
Search for *single* node with attribute `name` having `value` but stop at `maxlevel`.
Return tuple with matching nodes.
Args:
node: top node, start searching.
value: value which need to match
Keyword Args:
name (str): attribute name need to match
maxlevel (int): maximum decending in the node hierarchy.
Example tree:
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d, foo=4)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> find_by_attr(f, "d")
Node('/f/b/d')
>>> find_by_attr(f, name="foo", value=4)
Node('/f/b/d/c', foo=4)
>>> find_by_attr(f, name="foo", value=8)
"""
return _find(node, filter_=lambda n: _filter_by_name(n, name, value),
maxlevel=maxlevel)
def _find(node, filter_, stop=None, maxlevel=None):
items = _findall(node, filter_, stop=stop, maxlevel=maxlevel, maxcount=1)
return items[0] if items else None
def _findall(node, filter_, stop=None, maxlevel=None, mincount=None, maxcount=None):
result = tuple(PreOrderIter(node, filter_, stop, maxlevel))
resultlen = len(result)
if mincount is not None and resultlen < mincount:
msg = "Expecting at least %d elements, but found %d."
raise CountError(msg % (mincount, resultlen), result)
if maxcount is not None and resultlen > maxcount:
msg = "Expecting %d elements at maximum, but found %d."
raise CountError(msg % (maxcount, resultlen), result)
return result
def _filter_by_name(node, name, value):
try:
return getattr(node, name) == value
except AttributeError:
return False
class CountError(RuntimeError):
def __init__(self, msg, result):
"""Error raised on `mincount` or `maxcount` mismatch."""
if result:
msg += " " + repr(result)
super(CountError, self).__init__(msg)
|
c0fec0de/anytree
|
anytree/search.py
|
findall_by_attr
|
python
|
def findall_by_attr(node, value, name="name", maxlevel=None, mincount=None, maxcount=None):
return _findall(node, filter_=lambda n: _filter_by_name(n, name, value),
maxlevel=maxlevel, mincount=mincount, maxcount=maxcount)
|
Search nodes with attribute `name` having `value` but stop at `maxlevel`.
Return tuple with matching nodes.
Args:
node: top node, start searching.
value: value which need to match
Keyword Args:
name (str): attribute name need to match
maxlevel (int): maximum decending in the node hierarchy.
mincount (int): minimum number of nodes.
maxcount (int): maximum number of nodes.
Example tree:
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> findall_by_attr(f, "d")
(Node('/f/b/d'),)
|
train
|
https://github.com/c0fec0de/anytree/blob/775477e206a75e697983e70dae6372b5a7e42dcf/anytree/search.py#L65-L108
|
[
"def _findall(node, filter_, stop=None, maxlevel=None, mincount=None, maxcount=None):\n result = tuple(PreOrderIter(node, filter_, stop, maxlevel))\n resultlen = len(result)\n if mincount is not None and resultlen < mincount:\n msg = \"Expecting at least %d elements, but found %d.\"\n raise CountError(msg % (mincount, resultlen), result)\n if maxcount is not None and resultlen > maxcount:\n msg = \"Expecting %d elements at maximum, but found %d.\"\n raise CountError(msg % (maxcount, resultlen), result)\n return result\n"
] |
"""Node Searching."""
from anytree.iterators import PreOrderIter
def findall(node, filter_=None, stop=None, maxlevel=None, mincount=None, maxcount=None):
"""
Search nodes matching `filter_` but stop at `maxlevel` or `stop`.
Return tuple with matching nodes.
Args:
node: top node, start searching.
Keyword Args:
filter_: function called with every `node` as argument, `node` is returned if `True`.
stop: stop iteration at `node` if `stop` function returns `True` for `node`.
maxlevel (int): maximum decending in the node hierarchy.
mincount (int): minimum number of nodes.
maxcount (int): maximum number of nodes.
Example tree:
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> findall(f, filter_=lambda node: node.name in ("a", "b"))
(Node('/f/b'), Node('/f/b/a'))
>>> findall(f, filter_=lambda node: d in node.path)
(Node('/f/b/d'), Node('/f/b/d/c'), Node('/f/b/d/e'))
The number of matches can be limited:
>>> findall(f, filter_=lambda node: d in node.path, mincount=4) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
anytree.search.CountError: Expecting at least 4 elements, but found 3. ... Node('/f/b/d/e'))
>>> findall(f, filter_=lambda node: d in node.path, maxcount=2) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
anytree.search.CountError: Expecting 2 elements at maximum, but found 3. ... Node('/f/b/d/e'))
"""
return _findall(node, filter_=filter_, stop=stop,
maxlevel=maxlevel, mincount=mincount, maxcount=maxcount)
def find(node, filter_=None, stop=None, maxlevel=None):
"""
Search for *single* node matching `filter_` but stop at `maxlevel` or `stop`.
Return matching node.
Args:
node: top node, start searching.
Keyword Args:
filter_: function called with every `node` as argument, `node` is returned if `True`.
stop: stop iteration at `node` if `stop` function returns `True` for `node`.
maxlevel (int): maximum decending in the node hierarchy.
Example tree:
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> find(f, lambda node: node.name == "d")
Node('/f/b/d')
>>> find(f, lambda node: node.name == "z")
>>> find(f, lambda node: b in node.path) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
anytree.search.CountError: Expecting 1 elements at maximum, but found 5. (Node('/f/b')... Node('/f/b/d/e'))
"""
return _find(node, filter_=filter_, stop=stop, maxlevel=maxlevel)
def find_by_attr(node, value, name="name", maxlevel=None):
"""
Search for *single* node with attribute `name` having `value` but stop at `maxlevel`.
Return tuple with matching nodes.
Args:
node: top node, start searching.
value: value which need to match
Keyword Args:
name (str): attribute name need to match
maxlevel (int): maximum decending in the node hierarchy.
Example tree:
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d, foo=4)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> find_by_attr(f, "d")
Node('/f/b/d')
>>> find_by_attr(f, name="foo", value=4)
Node('/f/b/d/c', foo=4)
>>> find_by_attr(f, name="foo", value=8)
"""
return _find(node, filter_=lambda n: _filter_by_name(n, name, value),
maxlevel=maxlevel)
def _find(node, filter_, stop=None, maxlevel=None):
items = _findall(node, filter_, stop=stop, maxlevel=maxlevel, maxcount=1)
return items[0] if items else None
def _findall(node, filter_, stop=None, maxlevel=None, mincount=None, maxcount=None):
result = tuple(PreOrderIter(node, filter_, stop, maxlevel))
resultlen = len(result)
if mincount is not None and resultlen < mincount:
msg = "Expecting at least %d elements, but found %d."
raise CountError(msg % (mincount, resultlen), result)
if maxcount is not None and resultlen > maxcount:
msg = "Expecting %d elements at maximum, but found %d."
raise CountError(msg % (maxcount, resultlen), result)
return result
def _filter_by_name(node, name, value):
try:
return getattr(node, name) == value
except AttributeError:
return False
class CountError(RuntimeError):
def __init__(self, msg, result):
"""Error raised on `mincount` or `maxcount` mismatch."""
if result:
msg += " " + repr(result)
super(CountError, self).__init__(msg)
|
c0fec0de/anytree
|
anytree/search.py
|
find
|
python
|
def find(node, filter_=None, stop=None, maxlevel=None):
return _find(node, filter_=filter_, stop=stop, maxlevel=maxlevel)
|
Search for *single* node matching `filter_` but stop at `maxlevel` or `stop`.
Return matching node.
Args:
node: top node, start searching.
Keyword Args:
filter_: function called with every `node` as argument, `node` is returned if `True`.
stop: stop iteration at `node` if `stop` function returns `True` for `node`.
maxlevel (int): maximum decending in the node hierarchy.
Example tree:
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> find(f, lambda node: node.name == "d")
Node('/f/b/d')
>>> find(f, lambda node: node.name == "z")
>>> find(f, lambda node: b in node.path) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
anytree.search.CountError: Expecting 1 elements at maximum, but found 5. (Node('/f/b')... Node('/f/b/d/e'))
|
train
|
https://github.com/c0fec0de/anytree/blob/775477e206a75e697983e70dae6372b5a7e42dcf/anytree/search.py#L111-L156
|
[
"def _find(node, filter_, stop=None, maxlevel=None):\n items = _findall(node, filter_, stop=stop, maxlevel=maxlevel, maxcount=1)\n return items[0] if items else None\n"
] |
"""Node Searching."""
from anytree.iterators import PreOrderIter
def findall(node, filter_=None, stop=None, maxlevel=None, mincount=None, maxcount=None):
"""
Search nodes matching `filter_` but stop at `maxlevel` or `stop`.
Return tuple with matching nodes.
Args:
node: top node, start searching.
Keyword Args:
filter_: function called with every `node` as argument, `node` is returned if `True`.
stop: stop iteration at `node` if `stop` function returns `True` for `node`.
maxlevel (int): maximum decending in the node hierarchy.
mincount (int): minimum number of nodes.
maxcount (int): maximum number of nodes.
Example tree:
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> findall(f, filter_=lambda node: node.name in ("a", "b"))
(Node('/f/b'), Node('/f/b/a'))
>>> findall(f, filter_=lambda node: d in node.path)
(Node('/f/b/d'), Node('/f/b/d/c'), Node('/f/b/d/e'))
The number of matches can be limited:
>>> findall(f, filter_=lambda node: d in node.path, mincount=4) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
anytree.search.CountError: Expecting at least 4 elements, but found 3. ... Node('/f/b/d/e'))
>>> findall(f, filter_=lambda node: d in node.path, maxcount=2) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
anytree.search.CountError: Expecting 2 elements at maximum, but found 3. ... Node('/f/b/d/e'))
"""
return _findall(node, filter_=filter_, stop=stop,
maxlevel=maxlevel, mincount=mincount, maxcount=maxcount)
def findall_by_attr(node, value, name="name", maxlevel=None, mincount=None, maxcount=None):
"""
Search nodes with attribute `name` having `value` but stop at `maxlevel`.
Return tuple with matching nodes.
Args:
node: top node, start searching.
value: value which need to match
Keyword Args:
name (str): attribute name need to match
maxlevel (int): maximum decending in the node hierarchy.
mincount (int): minimum number of nodes.
maxcount (int): maximum number of nodes.
Example tree:
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> findall_by_attr(f, "d")
(Node('/f/b/d'),)
"""
return _findall(node, filter_=lambda n: _filter_by_name(n, name, value),
maxlevel=maxlevel, mincount=mincount, maxcount=maxcount)
def find_by_attr(node, value, name="name", maxlevel=None):
"""
Search for *single* node with attribute `name` having `value` but stop at `maxlevel`.
Return tuple with matching nodes.
Args:
node: top node, start searching.
value: value which need to match
Keyword Args:
name (str): attribute name need to match
maxlevel (int): maximum decending in the node hierarchy.
Example tree:
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d, foo=4)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> find_by_attr(f, "d")
Node('/f/b/d')
>>> find_by_attr(f, name="foo", value=4)
Node('/f/b/d/c', foo=4)
>>> find_by_attr(f, name="foo", value=8)
"""
return _find(node, filter_=lambda n: _filter_by_name(n, name, value),
maxlevel=maxlevel)
def _find(node, filter_, stop=None, maxlevel=None):
items = _findall(node, filter_, stop=stop, maxlevel=maxlevel, maxcount=1)
return items[0] if items else None
def _findall(node, filter_, stop=None, maxlevel=None, mincount=None, maxcount=None):
result = tuple(PreOrderIter(node, filter_, stop, maxlevel))
resultlen = len(result)
if mincount is not None and resultlen < mincount:
msg = "Expecting at least %d elements, but found %d."
raise CountError(msg % (mincount, resultlen), result)
if maxcount is not None and resultlen > maxcount:
msg = "Expecting %d elements at maximum, but found %d."
raise CountError(msg % (maxcount, resultlen), result)
return result
def _filter_by_name(node, name, value):
try:
return getattr(node, name) == value
except AttributeError:
return False
class CountError(RuntimeError):
def __init__(self, msg, result):
"""Error raised on `mincount` or `maxcount` mismatch."""
if result:
msg += " " + repr(result)
super(CountError, self).__init__(msg)
|
c0fec0de/anytree
|
anytree/search.py
|
find_by_attr
|
python
|
def find_by_attr(node, value, name="name", maxlevel=None):
return _find(node, filter_=lambda n: _filter_by_name(n, name, value),
maxlevel=maxlevel)
|
Search for *single* node with attribute `name` having `value` but stop at `maxlevel`.
Return tuple with matching nodes.
Args:
node: top node, start searching.
value: value which need to match
Keyword Args:
name (str): attribute name need to match
maxlevel (int): maximum decending in the node hierarchy.
Example tree:
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d, foo=4)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> find_by_attr(f, "d")
Node('/f/b/d')
>>> find_by_attr(f, name="foo", value=4)
Node('/f/b/d/c', foo=4)
>>> find_by_attr(f, name="foo", value=8)
|
train
|
https://github.com/c0fec0de/anytree/blob/775477e206a75e697983e70dae6372b5a7e42dcf/anytree/search.py#L159-L204
|
[
"def _find(node, filter_, stop=None, maxlevel=None):\n items = _findall(node, filter_, stop=stop, maxlevel=maxlevel, maxcount=1)\n return items[0] if items else None\n"
] |
"""Node Searching."""
from anytree.iterators import PreOrderIter
def findall(node, filter_=None, stop=None, maxlevel=None, mincount=None, maxcount=None):
"""
Search nodes matching `filter_` but stop at `maxlevel` or `stop`.
Return tuple with matching nodes.
Args:
node: top node, start searching.
Keyword Args:
filter_: function called with every `node` as argument, `node` is returned if `True`.
stop: stop iteration at `node` if `stop` function returns `True` for `node`.
maxlevel (int): maximum decending in the node hierarchy.
mincount (int): minimum number of nodes.
maxcount (int): maximum number of nodes.
Example tree:
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> findall(f, filter_=lambda node: node.name in ("a", "b"))
(Node('/f/b'), Node('/f/b/a'))
>>> findall(f, filter_=lambda node: d in node.path)
(Node('/f/b/d'), Node('/f/b/d/c'), Node('/f/b/d/e'))
The number of matches can be limited:
>>> findall(f, filter_=lambda node: d in node.path, mincount=4) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
anytree.search.CountError: Expecting at least 4 elements, but found 3. ... Node('/f/b/d/e'))
>>> findall(f, filter_=lambda node: d in node.path, maxcount=2) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
anytree.search.CountError: Expecting 2 elements at maximum, but found 3. ... Node('/f/b/d/e'))
"""
return _findall(node, filter_=filter_, stop=stop,
maxlevel=maxlevel, mincount=mincount, maxcount=maxcount)
def findall_by_attr(node, value, name="name", maxlevel=None, mincount=None, maxcount=None):
"""
Search nodes with attribute `name` having `value` but stop at `maxlevel`.
Return tuple with matching nodes.
Args:
node: top node, start searching.
value: value which need to match
Keyword Args:
name (str): attribute name need to match
maxlevel (int): maximum decending in the node hierarchy.
mincount (int): minimum number of nodes.
maxcount (int): maximum number of nodes.
Example tree:
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> findall_by_attr(f, "d")
(Node('/f/b/d'),)
"""
return _findall(node, filter_=lambda n: _filter_by_name(n, name, value),
maxlevel=maxlevel, mincount=mincount, maxcount=maxcount)
def find(node, filter_=None, stop=None, maxlevel=None):
"""
Search for *single* node matching `filter_` but stop at `maxlevel` or `stop`.
Return matching node.
Args:
node: top node, start searching.
Keyword Args:
filter_: function called with every `node` as argument, `node` is returned if `True`.
stop: stop iteration at `node` if `stop` function returns `True` for `node`.
maxlevel (int): maximum decending in the node hierarchy.
Example tree:
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> find(f, lambda node: node.name == "d")
Node('/f/b/d')
>>> find(f, lambda node: node.name == "z")
>>> find(f, lambda node: b in node.path) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
anytree.search.CountError: Expecting 1 elements at maximum, but found 5. (Node('/f/b')... Node('/f/b/d/e'))
"""
return _find(node, filter_=filter_, stop=stop, maxlevel=maxlevel)
def _find(node, filter_, stop=None, maxlevel=None):
items = _findall(node, filter_, stop=stop, maxlevel=maxlevel, maxcount=1)
return items[0] if items else None
def _findall(node, filter_, stop=None, maxlevel=None, mincount=None, maxcount=None):
result = tuple(PreOrderIter(node, filter_, stop, maxlevel))
resultlen = len(result)
if mincount is not None and resultlen < mincount:
msg = "Expecting at least %d elements, but found %d."
raise CountError(msg % (mincount, resultlen), result)
if maxcount is not None and resultlen > maxcount:
msg = "Expecting %d elements at maximum, but found %d."
raise CountError(msg % (maxcount, resultlen), result)
return result
def _filter_by_name(node, name, value):
try:
return getattr(node, name) == value
except AttributeError:
return False
class CountError(RuntimeError):
def __init__(self, msg, result):
"""Error raised on `mincount` or `maxcount` mismatch."""
if result:
msg += " " + repr(result)
super(CountError, self).__init__(msg)
|
c0fec0de/anytree
|
anytree/resolver.py
|
Resolver.get
|
python
|
def get(self, node, path):
node, parts = self.__start(node, path)
for part in parts:
if part == "..":
node = node.parent
elif part in ("", "."):
pass
else:
node = self.__get(node, part)
return node
|
Return instance at `path`.
An example module tree:
>>> from anytree import Node
>>> top = Node("top", parent=None)
>>> sub0 = Node("sub0", parent=top)
>>> sub0sub0 = Node("sub0sub0", parent=sub0)
>>> sub0sub1 = Node("sub0sub1", parent=sub0)
>>> sub1 = Node("sub1", parent=top)
A resolver using the `name` attribute:
>>> r = Resolver('name')
Relative paths:
>>> r.get(top, "sub0/sub0sub0")
Node('/top/sub0/sub0sub0')
>>> r.get(sub1, "..")
Node('/top')
>>> r.get(sub1, "../sub0/sub0sub1")
Node('/top/sub0/sub0sub1')
>>> r.get(sub1, ".")
Node('/top/sub1')
>>> r.get(sub1, "")
Node('/top/sub1')
>>> r.get(top, "sub2")
Traceback (most recent call last):
...
anytree.resolver.ChildResolverError: Node('/top') has no child sub2. Children are: 'sub0', 'sub1'.
Absolute paths:
>>> r.get(sub0sub0, "/top")
Node('/top')
>>> r.get(sub0sub0, "/top/sub0")
Node('/top/sub0')
>>> r.get(sub0sub0, "/")
Traceback (most recent call last):
...
anytree.resolver.ResolverError: root node missing. root is '/top'.
>>> r.get(sub0sub0, "/bar")
Traceback (most recent call last):
...
anytree.resolver.ResolverError: unknown root node '/bar'. root is '/top'.
|
train
|
https://github.com/c0fec0de/anytree/blob/775477e206a75e697983e70dae6372b5a7e42dcf/anytree/resolver.py#L20-L77
|
[
"def __get(self, node, name):\n for child in node.children:\n if _getattr(child, self.pathattr) == name:\n return child\n raise ChildResolverError(node, name, self.pathattr)\n",
"def __start(self, node, path):\n sep = node.separator\n parts = path.split(sep)\n if path.startswith(sep):\n node = node.root\n rootpart = _getattr(node, self.pathattr)\n parts.pop(0)\n if not parts[0]:\n msg = \"root node missing. root is '%s%s'.\"\n raise ResolverError(node, \"\", msg % (sep, str(rootpart)))\n elif parts[0] != rootpart:\n msg = \"unknown root node '%s%s'. root is '%s%s'.\"\n raise ResolverError(node, \"\", msg % (sep, parts[0], sep, str(rootpart)))\n parts.pop(0)\n return node, parts\n"
] |
class Resolver(object):
_match_cache = {}
def __init__(self, pathattr='name'):
"""Resolve :any:`NodeMixin` paths using attribute `pathattr`."""
super(Resolver, self).__init__()
self.pathattr = pathattr
def __get(self, node, name):
for child in node.children:
if _getattr(child, self.pathattr) == name:
return child
raise ChildResolverError(node, name, self.pathattr)
def glob(self, node, path):
"""
Return instances at `path` supporting wildcards.
Behaves identical to :any:`get`, but accepts wildcards and returns
a list of found nodes.
* `*` matches any characters, except '/'.
* `?` matches a single character, except '/'.
An example module tree:
>>> from anytree import Node
>>> top = Node("top", parent=None)
>>> sub0 = Node("sub0", parent=top)
>>> sub0sub0 = Node("sub0", parent=sub0)
>>> sub0sub1 = Node("sub1", parent=sub0)
>>> sub1 = Node("sub1", parent=top)
>>> sub1sub0 = Node("sub0", parent=sub1)
A resolver using the `name` attribute:
>>> r = Resolver('name')
Relative paths:
>>> r.glob(top, "sub0/sub?")
[Node('/top/sub0/sub0'), Node('/top/sub0/sub1')]
>>> r.glob(sub1, ".././*")
[Node('/top/sub0'), Node('/top/sub1')]
>>> r.glob(top, "*/*")
[Node('/top/sub0/sub0'), Node('/top/sub0/sub1'), Node('/top/sub1/sub0')]
>>> r.glob(top, "*/sub0")
[Node('/top/sub0/sub0'), Node('/top/sub1/sub0')]
>>> r.glob(top, "sub1/sub1")
Traceback (most recent call last):
...
anytree.resolver.ChildResolverError: Node('/top/sub1') has no child sub1. Children are: 'sub0'.
Non-matching wildcards are no error:
>>> r.glob(top, "bar*")
[]
>>> r.glob(top, "sub2")
Traceback (most recent call last):
...
anytree.resolver.ChildResolverError: Node('/top') has no child sub2. Children are: 'sub0', 'sub1'.
Absolute paths:
>>> r.glob(sub0sub0, "/top/*")
[Node('/top/sub0'), Node('/top/sub1')]
>>> r.glob(sub0sub0, "/")
Traceback (most recent call last):
...
anytree.resolver.ResolverError: root node missing. root is '/top'.
>>> r.glob(sub0sub0, "/bar")
Traceback (most recent call last):
...
anytree.resolver.ResolverError: unknown root node '/bar'. root is '/top'.
"""
node, parts = self.__start(node, path)
return self.__glob(node, parts)
def __start(self, node, path):
sep = node.separator
parts = path.split(sep)
if path.startswith(sep):
node = node.root
rootpart = _getattr(node, self.pathattr)
parts.pop(0)
if not parts[0]:
msg = "root node missing. root is '%s%s'."
raise ResolverError(node, "", msg % (sep, str(rootpart)))
elif parts[0] != rootpart:
msg = "unknown root node '%s%s'. root is '%s%s'."
raise ResolverError(node, "", msg % (sep, parts[0], sep, str(rootpart)))
parts.pop(0)
return node, parts
def __glob(self, node, parts):
nodes = []
name = parts[0]
remainder = parts[1:]
# handle relative
if name == "..":
nodes += self.__glob(node.parent, remainder)
elif name in ("", "."):
nodes += self.__glob(node, remainder)
else:
matches = self.__find(node, name, remainder)
if not matches and not Resolver.is_wildcard(name):
raise ChildResolverError(node, name, self.pathattr)
nodes += matches
return nodes
def __find(self, node, pat, remainder):
matches = []
for child in node.children:
name = _getattr(child, self.pathattr)
try:
if Resolver.__match(name, pat):
if remainder:
matches += self.__glob(child, remainder)
else:
matches.append(child)
except ResolverError as exc:
if not Resolver.is_wildcard(pat):
raise exc
return matches
@staticmethod
def is_wildcard(path):
"""Return `True` is a wildcard."""
return "?" in path or "*" in path
@staticmethod
def __match(name, pat):
try:
re_pat = Resolver._match_cache[pat]
except KeyError:
res = Resolver.__translate(pat)
if len(Resolver._match_cache) >= _MAXCACHE:
Resolver._match_cache.clear()
Resolver._match_cache[pat] = re_pat = re.compile(res)
return re_pat.match(name) is not None
@staticmethod
def __translate(pat):
re_pat = ''
for char in pat:
if char == "*":
re_pat += ".*"
elif char == "?":
re_pat += "."
else:
re_pat += re.escape(char)
return re_pat + r'\Z(?ms)'
|
c0fec0de/anytree
|
anytree/resolver.py
|
Resolver.glob
|
python
|
def glob(self, node, path):
node, parts = self.__start(node, path)
return self.__glob(node, parts)
|
Return instances at `path` supporting wildcards.
Behaves identical to :any:`get`, but accepts wildcards and returns
a list of found nodes.
* `*` matches any characters, except '/'.
* `?` matches a single character, except '/'.
An example module tree:
>>> from anytree import Node
>>> top = Node("top", parent=None)
>>> sub0 = Node("sub0", parent=top)
>>> sub0sub0 = Node("sub0", parent=sub0)
>>> sub0sub1 = Node("sub1", parent=sub0)
>>> sub1 = Node("sub1", parent=top)
>>> sub1sub0 = Node("sub0", parent=sub1)
A resolver using the `name` attribute:
>>> r = Resolver('name')
Relative paths:
>>> r.glob(top, "sub0/sub?")
[Node('/top/sub0/sub0'), Node('/top/sub0/sub1')]
>>> r.glob(sub1, ".././*")
[Node('/top/sub0'), Node('/top/sub1')]
>>> r.glob(top, "*/*")
[Node('/top/sub0/sub0'), Node('/top/sub0/sub1'), Node('/top/sub1/sub0')]
>>> r.glob(top, "*/sub0")
[Node('/top/sub0/sub0'), Node('/top/sub1/sub0')]
>>> r.glob(top, "sub1/sub1")
Traceback (most recent call last):
...
anytree.resolver.ChildResolverError: Node('/top/sub1') has no child sub1. Children are: 'sub0'.
Non-matching wildcards are no error:
>>> r.glob(top, "bar*")
[]
>>> r.glob(top, "sub2")
Traceback (most recent call last):
...
anytree.resolver.ChildResolverError: Node('/top') has no child sub2. Children are: 'sub0', 'sub1'.
Absolute paths:
>>> r.glob(sub0sub0, "/top/*")
[Node('/top/sub0'), Node('/top/sub1')]
>>> r.glob(sub0sub0, "/")
Traceback (most recent call last):
...
anytree.resolver.ResolverError: root node missing. root is '/top'.
>>> r.glob(sub0sub0, "/bar")
Traceback (most recent call last):
...
anytree.resolver.ResolverError: unknown root node '/bar'. root is '/top'.
|
train
|
https://github.com/c0fec0de/anytree/blob/775477e206a75e697983e70dae6372b5a7e42dcf/anytree/resolver.py#L85-L147
|
[
"def __start(self, node, path):\n sep = node.separator\n parts = path.split(sep)\n if path.startswith(sep):\n node = node.root\n rootpart = _getattr(node, self.pathattr)\n parts.pop(0)\n if not parts[0]:\n msg = \"root node missing. root is '%s%s'.\"\n raise ResolverError(node, \"\", msg % (sep, str(rootpart)))\n elif parts[0] != rootpart:\n msg = \"unknown root node '%s%s'. root is '%s%s'.\"\n raise ResolverError(node, \"\", msg % (sep, parts[0], sep, str(rootpart)))\n parts.pop(0)\n return node, parts\n",
"def __glob(self, node, parts):\n nodes = []\n name = parts[0]\n remainder = parts[1:]\n # handle relative\n if name == \"..\":\n nodes += self.__glob(node.parent, remainder)\n elif name in (\"\", \".\"):\n nodes += self.__glob(node, remainder)\n else:\n matches = self.__find(node, name, remainder)\n if not matches and not Resolver.is_wildcard(name):\n raise ChildResolverError(node, name, self.pathattr)\n nodes += matches\n return nodes\n"
] |
class Resolver(object):
_match_cache = {}
def __init__(self, pathattr='name'):
"""Resolve :any:`NodeMixin` paths using attribute `pathattr`."""
super(Resolver, self).__init__()
self.pathattr = pathattr
def get(self, node, path):
"""
Return instance at `path`.
An example module tree:
>>> from anytree import Node
>>> top = Node("top", parent=None)
>>> sub0 = Node("sub0", parent=top)
>>> sub0sub0 = Node("sub0sub0", parent=sub0)
>>> sub0sub1 = Node("sub0sub1", parent=sub0)
>>> sub1 = Node("sub1", parent=top)
A resolver using the `name` attribute:
>>> r = Resolver('name')
Relative paths:
>>> r.get(top, "sub0/sub0sub0")
Node('/top/sub0/sub0sub0')
>>> r.get(sub1, "..")
Node('/top')
>>> r.get(sub1, "../sub0/sub0sub1")
Node('/top/sub0/sub0sub1')
>>> r.get(sub1, ".")
Node('/top/sub1')
>>> r.get(sub1, "")
Node('/top/sub1')
>>> r.get(top, "sub2")
Traceback (most recent call last):
...
anytree.resolver.ChildResolverError: Node('/top') has no child sub2. Children are: 'sub0', 'sub1'.
Absolute paths:
>>> r.get(sub0sub0, "/top")
Node('/top')
>>> r.get(sub0sub0, "/top/sub0")
Node('/top/sub0')
>>> r.get(sub0sub0, "/")
Traceback (most recent call last):
...
anytree.resolver.ResolverError: root node missing. root is '/top'.
>>> r.get(sub0sub0, "/bar")
Traceback (most recent call last):
...
anytree.resolver.ResolverError: unknown root node '/bar'. root is '/top'.
"""
node, parts = self.__start(node, path)
for part in parts:
if part == "..":
node = node.parent
elif part in ("", "."):
pass
else:
node = self.__get(node, part)
return node
def __get(self, node, name):
for child in node.children:
if _getattr(child, self.pathattr) == name:
return child
raise ChildResolverError(node, name, self.pathattr)
def __start(self, node, path):
sep = node.separator
parts = path.split(sep)
if path.startswith(sep):
node = node.root
rootpart = _getattr(node, self.pathattr)
parts.pop(0)
if not parts[0]:
msg = "root node missing. root is '%s%s'."
raise ResolverError(node, "", msg % (sep, str(rootpart)))
elif parts[0] != rootpart:
msg = "unknown root node '%s%s'. root is '%s%s'."
raise ResolverError(node, "", msg % (sep, parts[0], sep, str(rootpart)))
parts.pop(0)
return node, parts
def __glob(self, node, parts):
nodes = []
name = parts[0]
remainder = parts[1:]
# handle relative
if name == "..":
nodes += self.__glob(node.parent, remainder)
elif name in ("", "."):
nodes += self.__glob(node, remainder)
else:
matches = self.__find(node, name, remainder)
if not matches and not Resolver.is_wildcard(name):
raise ChildResolverError(node, name, self.pathattr)
nodes += matches
return nodes
def __find(self, node, pat, remainder):
matches = []
for child in node.children:
name = _getattr(child, self.pathattr)
try:
if Resolver.__match(name, pat):
if remainder:
matches += self.__glob(child, remainder)
else:
matches.append(child)
except ResolverError as exc:
if not Resolver.is_wildcard(pat):
raise exc
return matches
@staticmethod
def is_wildcard(path):
"""Return `True` is a wildcard."""
return "?" in path or "*" in path
@staticmethod
def __match(name, pat):
try:
re_pat = Resolver._match_cache[pat]
except KeyError:
res = Resolver.__translate(pat)
if len(Resolver._match_cache) >= _MAXCACHE:
Resolver._match_cache.clear()
Resolver._match_cache[pat] = re_pat = re.compile(res)
return re_pat.match(name) is not None
@staticmethod
def __translate(pat):
re_pat = ''
for char in pat:
if char == "*":
re_pat += ".*"
elif char == "?":
re_pat += "."
else:
re_pat += re.escape(char)
return re_pat + r'\Z(?ms)'
|
c0fec0de/anytree
|
anytree/exporter/dotexporter.py
|
DotExporter.to_dotfile
|
python
|
def to_dotfile(self, filename):
with codecs.open(filename, "w", "utf-8") as file:
for line in self:
file.write("%s\n" % line)
|
Write graph to `filename`.
>>> from anytree import Node
>>> root = Node("root")
>>> s0 = Node("sub0", parent=root)
>>> s0b = Node("sub0B", parent=s0)
>>> s0a = Node("sub0A", parent=s0)
>>> s1 = Node("sub1", parent=root)
>>> s1a = Node("sub1A", parent=s1)
>>> s1b = Node("sub1B", parent=s1)
>>> s1c = Node("sub1C", parent=s1)
>>> s1ca = Node("sub1Ca", parent=s1c)
>>> from anytree.exporter import DotExporter
>>> DotExporter(root).to_dotfile("tree.dot")
The generated file should be handed over to the `dot` tool from the
http://www.graphviz.org/ package::
$ dot tree.dot -T png -o tree.png
|
train
|
https://github.com/c0fec0de/anytree/blob/775477e206a75e697983e70dae6372b5a7e42dcf/anytree/exporter/dotexporter.py#L190-L215
| null |
class DotExporter(object):
def __init__(self, node, graph="digraph", name="tree", options=None,
indent=4, nodenamefunc=None, nodeattrfunc=None,
edgeattrfunc=None, edgetypefunc=None):
"""
Dot Language Exporter.
Args:
node (Node): start node.
Keyword Args:
graph: DOT graph type.
name: DOT graph name.
options: list of options added to the graph.
indent (int): number of spaces for indent.
nodenamefunc: Function to extract node name from `node` object.
The function shall accept one `node` object as
argument and return the name of it.
nodeattrfunc: Function to decorate a node with attributes.
The function shall accept one `node` object as
argument and return the attributes.
edgeattrfunc: Function to decorate a edge with attributes.
The function shall accept two `node` objects as
argument. The first the node and the second the child
and return the attributes.
edgetypefunc: Function to which gives the edge type.
The function shall accept two `node` objects as
argument. The first the node and the second the child
and return the edge (i.e. '->').
>>> from anytree import Node
>>> root = Node("root")
>>> s0 = Node("sub0", parent=root, edge=2)
>>> s0b = Node("sub0B", parent=s0, foo=4, edge=109)
>>> s0a = Node("sub0A", parent=s0, edge="")
>>> s1 = Node("sub1", parent=root, edge="")
>>> s1a = Node("sub1A", parent=s1, edge=7)
>>> s1b = Node("sub1B", parent=s1, edge=8)
>>> s1c = Node("sub1C", parent=s1, edge=22)
>>> s1ca = Node("sub1Ca", parent=s1c, edge=42)
A directed graph:
>>> from anytree.exporter import DotExporter
>>> for line in DotExporter(root):
... print(line)
digraph tree {
"root";
"sub0";
"sub0B";
"sub0A";
"sub1";
"sub1A";
"sub1B";
"sub1C";
"sub1Ca";
"root" -> "sub0";
"root" -> "sub1";
"sub0" -> "sub0B";
"sub0" -> "sub0A";
"sub1" -> "sub1A";
"sub1" -> "sub1B";
"sub1" -> "sub1C";
"sub1C" -> "sub1Ca";
}
An undirected graph:
>>> def nodenamefunc(node):
... return '%s:%s' % (node.name, node.depth)
>>> def edgeattrfunc(node, child):
... return 'label="%s:%s"' % (node.name, child.name)
>>> def edgetypefunc(node, child):
... return '--'
>>> from anytree.exporter import DotExporter
>>> for line in DotExporter(root, graph="graph",
... nodenamefunc=nodenamefunc,
... nodeattrfunc=lambda node: "shape=box",
... edgeattrfunc=edgeattrfunc,
... edgetypefunc=edgetypefunc):
... print(line)
graph tree {
"root:0" [shape=box];
"sub0:1" [shape=box];
"sub0B:2" [shape=box];
"sub0A:2" [shape=box];
"sub1:1" [shape=box];
"sub1A:2" [shape=box];
"sub1B:2" [shape=box];
"sub1C:2" [shape=box];
"sub1Ca:3" [shape=box];
"root:0" -- "sub0:1" [label="root:sub0"];
"root:0" -- "sub1:1" [label="root:sub1"];
"sub0:1" -- "sub0B:2" [label="sub0:sub0B"];
"sub0:1" -- "sub0A:2" [label="sub0:sub0A"];
"sub1:1" -- "sub1A:2" [label="sub1:sub1A"];
"sub1:1" -- "sub1B:2" [label="sub1:sub1B"];
"sub1:1" -- "sub1C:2" [label="sub1:sub1C"];
"sub1C:2" -- "sub1Ca:3" [label="sub1C:sub1Ca"];
}
"""
self.node = node
self.graph = graph
self.name = name
self.options = options
self.indent = indent
self.nodenamefunc = nodenamefunc
self.nodeattrfunc = nodeattrfunc
self.edgeattrfunc = edgeattrfunc
self.edgetypefunc = edgetypefunc
def __iter__(self):
# prepare
indent = " " * self.indent
nodenamefunc = self.nodenamefunc or DotExporter.__default_nodenamefunc
nodeattrfunc = self.nodeattrfunc or DotExporter.__default_nodeattrfunc
edgeattrfunc = self.edgeattrfunc or DotExporter.__default_edgeattrfunc
edgetypefunc = self.edgetypefunc or DotExporter.__default_edgetypefunc
return self.__iter(indent, nodenamefunc, nodeattrfunc, edgeattrfunc,
edgetypefunc)
@staticmethod
def __default_nodenamefunc(node):
return node.name
@staticmethod
def __default_nodeattrfunc(node):
return None
@staticmethod
def __default_edgeattrfunc(node, child):
return None
@staticmethod
def __default_edgetypefunc(node, child):
return "->"
def __iter(self, indent, nodenamefunc, nodeattrfunc, edgeattrfunc, edgetypefunc):
yield "{self.graph} {self.name} {{".format(self=self)
for option in self.__iter_options(indent):
yield option
for node in self.__iter_nodes(indent, nodenamefunc, nodeattrfunc):
yield node
for edge in self.__iter_edges(indent, nodenamefunc, edgeattrfunc, edgetypefunc):
yield edge
yield "}"
def __iter_options(self, indent):
options = self.options
if options:
for option in options:
yield "%s%s" % (indent, option)
def __iter_nodes(self, indent, nodenamefunc, nodeattrfunc):
for node in PreOrderIter(self.node):
nodename = nodenamefunc(node)
nodeattr = nodeattrfunc(node)
nodeattr = " [%s]" % nodeattr if nodeattr is not None else ""
yield '%s"%s"%s;' % (indent, DotExporter.esc(nodename), nodeattr)
def __iter_edges(self, indent, nodenamefunc, edgeattrfunc, edgetypefunc):
for node in PreOrderIter(self.node):
nodename = nodenamefunc(node)
for child in node.children:
childname = nodenamefunc(child)
edgeattr = edgeattrfunc(node, child)
edgetype = edgetypefunc(node, child)
edgeattr = " [%s]" % edgeattr if edgeattr is not None else ""
yield '%s"%s" %s "%s"%s;' % (indent, DotExporter.esc(nodename), edgetype,
DotExporter.esc(childname), edgeattr)
def to_picture(self, filename):
"""
Write graph to a temporary file and invoke `dot`.
The output file type is automatically detected from the file suffix.
*`graphviz` needs to be installed, before usage of this method.*
"""
fileformat = path.splitext(filename)[1][1:]
with NamedTemporaryFile("wb", delete=False) as dotfile:
dotfilename = dotfile.name
for line in self:
dotfile.write(("%s\n" % line).encode("utf-8"))
dotfile.flush()
cmd = ["dot", dotfilename, "-T", fileformat, "-o", filename]
check_call(cmd)
try:
remove(dotfilename)
except Exception: # pragma: no cover
msg = 'Could not remove temporary file %s' % dotfilename
logging.getLogger(__name__).warn(msg)
@staticmethod
def esc(str):
"""Escape Strings."""
return str.replace("\"", "\\\"")
|
c0fec0de/anytree
|
anytree/exporter/dotexporter.py
|
DotExporter.to_picture
|
python
|
def to_picture(self, filename):
fileformat = path.splitext(filename)[1][1:]
with NamedTemporaryFile("wb", delete=False) as dotfile:
dotfilename = dotfile.name
for line in self:
dotfile.write(("%s\n" % line).encode("utf-8"))
dotfile.flush()
cmd = ["dot", dotfilename, "-T", fileformat, "-o", filename]
check_call(cmd)
try:
remove(dotfilename)
except Exception: # pragma: no cover
msg = 'Could not remove temporary file %s' % dotfilename
logging.getLogger(__name__).warn(msg)
|
Write graph to a temporary file and invoke `dot`.
The output file type is automatically detected from the file suffix.
*`graphviz` needs to be installed, before usage of this method.*
|
train
|
https://github.com/c0fec0de/anytree/blob/775477e206a75e697983e70dae6372b5a7e42dcf/anytree/exporter/dotexporter.py#L217-L237
| null |
class DotExporter(object):
def __init__(self, node, graph="digraph", name="tree", options=None,
indent=4, nodenamefunc=None, nodeattrfunc=None,
edgeattrfunc=None, edgetypefunc=None):
"""
Dot Language Exporter.
Args:
node (Node): start node.
Keyword Args:
graph: DOT graph type.
name: DOT graph name.
options: list of options added to the graph.
indent (int): number of spaces for indent.
nodenamefunc: Function to extract node name from `node` object.
The function shall accept one `node` object as
argument and return the name of it.
nodeattrfunc: Function to decorate a node with attributes.
The function shall accept one `node` object as
argument and return the attributes.
edgeattrfunc: Function to decorate a edge with attributes.
The function shall accept two `node` objects as
argument. The first the node and the second the child
and return the attributes.
edgetypefunc: Function to which gives the edge type.
The function shall accept two `node` objects as
argument. The first the node and the second the child
and return the edge (i.e. '->').
>>> from anytree import Node
>>> root = Node("root")
>>> s0 = Node("sub0", parent=root, edge=2)
>>> s0b = Node("sub0B", parent=s0, foo=4, edge=109)
>>> s0a = Node("sub0A", parent=s0, edge="")
>>> s1 = Node("sub1", parent=root, edge="")
>>> s1a = Node("sub1A", parent=s1, edge=7)
>>> s1b = Node("sub1B", parent=s1, edge=8)
>>> s1c = Node("sub1C", parent=s1, edge=22)
>>> s1ca = Node("sub1Ca", parent=s1c, edge=42)
A directed graph:
>>> from anytree.exporter import DotExporter
>>> for line in DotExporter(root):
... print(line)
digraph tree {
"root";
"sub0";
"sub0B";
"sub0A";
"sub1";
"sub1A";
"sub1B";
"sub1C";
"sub1Ca";
"root" -> "sub0";
"root" -> "sub1";
"sub0" -> "sub0B";
"sub0" -> "sub0A";
"sub1" -> "sub1A";
"sub1" -> "sub1B";
"sub1" -> "sub1C";
"sub1C" -> "sub1Ca";
}
An undirected graph:
>>> def nodenamefunc(node):
... return '%s:%s' % (node.name, node.depth)
>>> def edgeattrfunc(node, child):
... return 'label="%s:%s"' % (node.name, child.name)
>>> def edgetypefunc(node, child):
... return '--'
>>> from anytree.exporter import DotExporter
>>> for line in DotExporter(root, graph="graph",
... nodenamefunc=nodenamefunc,
... nodeattrfunc=lambda node: "shape=box",
... edgeattrfunc=edgeattrfunc,
... edgetypefunc=edgetypefunc):
... print(line)
graph tree {
"root:0" [shape=box];
"sub0:1" [shape=box];
"sub0B:2" [shape=box];
"sub0A:2" [shape=box];
"sub1:1" [shape=box];
"sub1A:2" [shape=box];
"sub1B:2" [shape=box];
"sub1C:2" [shape=box];
"sub1Ca:3" [shape=box];
"root:0" -- "sub0:1" [label="root:sub0"];
"root:0" -- "sub1:1" [label="root:sub1"];
"sub0:1" -- "sub0B:2" [label="sub0:sub0B"];
"sub0:1" -- "sub0A:2" [label="sub0:sub0A"];
"sub1:1" -- "sub1A:2" [label="sub1:sub1A"];
"sub1:1" -- "sub1B:2" [label="sub1:sub1B"];
"sub1:1" -- "sub1C:2" [label="sub1:sub1C"];
"sub1C:2" -- "sub1Ca:3" [label="sub1C:sub1Ca"];
}
"""
self.node = node
self.graph = graph
self.name = name
self.options = options
self.indent = indent
self.nodenamefunc = nodenamefunc
self.nodeattrfunc = nodeattrfunc
self.edgeattrfunc = edgeattrfunc
self.edgetypefunc = edgetypefunc
def __iter__(self):
# prepare
indent = " " * self.indent
nodenamefunc = self.nodenamefunc or DotExporter.__default_nodenamefunc
nodeattrfunc = self.nodeattrfunc or DotExporter.__default_nodeattrfunc
edgeattrfunc = self.edgeattrfunc or DotExporter.__default_edgeattrfunc
edgetypefunc = self.edgetypefunc or DotExporter.__default_edgetypefunc
return self.__iter(indent, nodenamefunc, nodeattrfunc, edgeattrfunc,
edgetypefunc)
@staticmethod
def __default_nodenamefunc(node):
return node.name
@staticmethod
def __default_nodeattrfunc(node):
return None
@staticmethod
def __default_edgeattrfunc(node, child):
return None
@staticmethod
def __default_edgetypefunc(node, child):
return "->"
def __iter(self, indent, nodenamefunc, nodeattrfunc, edgeattrfunc, edgetypefunc):
yield "{self.graph} {self.name} {{".format(self=self)
for option in self.__iter_options(indent):
yield option
for node in self.__iter_nodes(indent, nodenamefunc, nodeattrfunc):
yield node
for edge in self.__iter_edges(indent, nodenamefunc, edgeattrfunc, edgetypefunc):
yield edge
yield "}"
def __iter_options(self, indent):
options = self.options
if options:
for option in options:
yield "%s%s" % (indent, option)
def __iter_nodes(self, indent, nodenamefunc, nodeattrfunc):
for node in PreOrderIter(self.node):
nodename = nodenamefunc(node)
nodeattr = nodeattrfunc(node)
nodeattr = " [%s]" % nodeattr if nodeattr is not None else ""
yield '%s"%s"%s;' % (indent, DotExporter.esc(nodename), nodeattr)
def __iter_edges(self, indent, nodenamefunc, edgeattrfunc, edgetypefunc):
for node in PreOrderIter(self.node):
nodename = nodenamefunc(node)
for child in node.children:
childname = nodenamefunc(child)
edgeattr = edgeattrfunc(node, child)
edgetype = edgetypefunc(node, child)
edgeattr = " [%s]" % edgeattr if edgeattr is not None else ""
yield '%s"%s" %s "%s"%s;' % (indent, DotExporter.esc(nodename), edgetype,
DotExporter.esc(childname), edgeattr)
def to_dotfile(self, filename):
"""
Write graph to `filename`.
>>> from anytree import Node
>>> root = Node("root")
>>> s0 = Node("sub0", parent=root)
>>> s0b = Node("sub0B", parent=s0)
>>> s0a = Node("sub0A", parent=s0)
>>> s1 = Node("sub1", parent=root)
>>> s1a = Node("sub1A", parent=s1)
>>> s1b = Node("sub1B", parent=s1)
>>> s1c = Node("sub1C", parent=s1)
>>> s1ca = Node("sub1Ca", parent=s1c)
>>> from anytree.exporter import DotExporter
>>> DotExporter(root).to_dotfile("tree.dot")
The generated file should be handed over to the `dot` tool from the
http://www.graphviz.org/ package::
$ dot tree.dot -T png -o tree.png
"""
with codecs.open(filename, "w", "utf-8") as file:
for line in self:
file.write("%s\n" % line)
@staticmethod
def esc(str):
"""Escape Strings."""
return str.replace("\"", "\\\"")
|
c0fec0de/anytree
|
anytree/util/__init__.py
|
commonancestors
|
python
|
def commonancestors(*nodes):
ancestors = [node.ancestors for node in nodes]
common = []
for parentnodes in zip(*ancestors):
parentnode = parentnodes[0]
if all([parentnode is p for p in parentnodes[1:]]):
common.append(parentnode)
else:
break
return tuple(common)
|
Determine common ancestors of `nodes`.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> dan = Node("Dan", parent=udo)
>>> jet = Node("Jet", parent=dan)
>>> jan = Node("Jan", parent=dan)
>>> joe = Node("Joe", parent=dan)
>>> commonancestors(jet, joe)
(Node('/Udo'), Node('/Udo/Dan'))
>>> commonancestors(jet, marc)
(Node('/Udo'),)
>>> commonancestors(jet)
(Node('/Udo'), Node('/Udo/Dan'))
>>> commonancestors()
()
|
train
|
https://github.com/c0fec0de/anytree/blob/775477e206a75e697983e70dae6372b5a7e42dcf/anytree/util/__init__.py#L4-L34
| null |
"""Utilities."""
def leftsibling(node):
"""
Return Left Sibling of `node`.
>>> from anytree import Node
>>> dan = Node("Dan")
>>> jet = Node("Jet", parent=dan)
>>> jan = Node("Jan", parent=dan)
>>> joe = Node("Joe", parent=dan)
>>> leftsibling(dan)
>>> leftsibling(jet)
>>> leftsibling(jan)
Node('/Dan/Jet')
>>> leftsibling(joe)
Node('/Dan/Jan')
"""
if node.parent:
pchildren = node.parent.children
idx = pchildren.index(node)
if idx:
return pchildren[idx - 1]
else:
return None
else:
return None
def rightsibling(node):
"""
Return Right Sibling of `node`.
>>> from anytree import Node
>>> dan = Node("Dan")
>>> jet = Node("Jet", parent=dan)
>>> jan = Node("Jan", parent=dan)
>>> joe = Node("Joe", parent=dan)
>>> rightsibling(dan)
>>> rightsibling(jet)
Node('/Dan/Jan')
>>> rightsibling(jan)
Node('/Dan/Joe')
>>> rightsibling(joe)
"""
if node.parent:
pchildren = node.parent.children
idx = pchildren.index(node)
try:
return pchildren[idx + 1]
except IndexError:
return None
else:
return None
|
c0fec0de/anytree
|
anytree/util/__init__.py
|
leftsibling
|
python
|
def leftsibling(node):
if node.parent:
pchildren = node.parent.children
idx = pchildren.index(node)
if idx:
return pchildren[idx - 1]
else:
return None
else:
return None
|
Return Left Sibling of `node`.
>>> from anytree import Node
>>> dan = Node("Dan")
>>> jet = Node("Jet", parent=dan)
>>> jan = Node("Jan", parent=dan)
>>> joe = Node("Joe", parent=dan)
>>> leftsibling(dan)
>>> leftsibling(jet)
>>> leftsibling(jan)
Node('/Dan/Jet')
>>> leftsibling(joe)
Node('/Dan/Jan')
|
train
|
https://github.com/c0fec0de/anytree/blob/775477e206a75e697983e70dae6372b5a7e42dcf/anytree/util/__init__.py#L37-L61
| null |
"""Utilities."""
def commonancestors(*nodes):
"""
Determine common ancestors of `nodes`.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> dan = Node("Dan", parent=udo)
>>> jet = Node("Jet", parent=dan)
>>> jan = Node("Jan", parent=dan)
>>> joe = Node("Joe", parent=dan)
>>> commonancestors(jet, joe)
(Node('/Udo'), Node('/Udo/Dan'))
>>> commonancestors(jet, marc)
(Node('/Udo'),)
>>> commonancestors(jet)
(Node('/Udo'), Node('/Udo/Dan'))
>>> commonancestors()
()
"""
ancestors = [node.ancestors for node in nodes]
common = []
for parentnodes in zip(*ancestors):
parentnode = parentnodes[0]
if all([parentnode is p for p in parentnodes[1:]]):
common.append(parentnode)
else:
break
return tuple(common)
def rightsibling(node):
"""
Return Right Sibling of `node`.
>>> from anytree import Node
>>> dan = Node("Dan")
>>> jet = Node("Jet", parent=dan)
>>> jan = Node("Jan", parent=dan)
>>> joe = Node("Joe", parent=dan)
>>> rightsibling(dan)
>>> rightsibling(jet)
Node('/Dan/Jan')
>>> rightsibling(jan)
Node('/Dan/Joe')
>>> rightsibling(joe)
"""
if node.parent:
pchildren = node.parent.children
idx = pchildren.index(node)
try:
return pchildren[idx + 1]
except IndexError:
return None
else:
return None
|
c0fec0de/anytree
|
anytree/util/__init__.py
|
rightsibling
|
python
|
def rightsibling(node):
if node.parent:
pchildren = node.parent.children
idx = pchildren.index(node)
try:
return pchildren[idx + 1]
except IndexError:
return None
else:
return None
|
Return Right Sibling of `node`.
>>> from anytree import Node
>>> dan = Node("Dan")
>>> jet = Node("Jet", parent=dan)
>>> jan = Node("Jan", parent=dan)
>>> joe = Node("Joe", parent=dan)
>>> rightsibling(dan)
>>> rightsibling(jet)
Node('/Dan/Jan')
>>> rightsibling(jan)
Node('/Dan/Joe')
>>> rightsibling(joe)
|
train
|
https://github.com/c0fec0de/anytree/blob/775477e206a75e697983e70dae6372b5a7e42dcf/anytree/util/__init__.py#L64-L88
| null |
"""Utilities."""
def commonancestors(*nodes):
"""
Determine common ancestors of `nodes`.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> dan = Node("Dan", parent=udo)
>>> jet = Node("Jet", parent=dan)
>>> jan = Node("Jan", parent=dan)
>>> joe = Node("Joe", parent=dan)
>>> commonancestors(jet, joe)
(Node('/Udo'), Node('/Udo/Dan'))
>>> commonancestors(jet, marc)
(Node('/Udo'),)
>>> commonancestors(jet)
(Node('/Udo'), Node('/Udo/Dan'))
>>> commonancestors()
()
"""
ancestors = [node.ancestors for node in nodes]
common = []
for parentnodes in zip(*ancestors):
parentnode = parentnodes[0]
if all([parentnode is p for p in parentnodes[1:]]):
common.append(parentnode)
else:
break
return tuple(common)
def leftsibling(node):
"""
Return Left Sibling of `node`.
>>> from anytree import Node
>>> dan = Node("Dan")
>>> jet = Node("Jet", parent=dan)
>>> jan = Node("Jan", parent=dan)
>>> joe = Node("Joe", parent=dan)
>>> leftsibling(dan)
>>> leftsibling(jet)
>>> leftsibling(jan)
Node('/Dan/Jet')
>>> leftsibling(joe)
Node('/Dan/Jan')
"""
if node.parent:
pchildren = node.parent.children
idx = pchildren.index(node)
if idx:
return pchildren[idx - 1]
else:
return None
else:
return None
|
c0fec0de/anytree
|
anytree/exporter/dictexporter.py
|
DictExporter.export
|
python
|
def export(self, node):
attriter = self.attriter or (lambda attr_values: attr_values)
return self.__export(node, self.dictcls, attriter, self.childiter)
|
Export tree starting at `node`.
|
train
|
https://github.com/c0fec0de/anytree/blob/775477e206a75e697983e70dae6372b5a7e42dcf/anytree/exporter/dictexporter.py#L70-L73
|
[
"def __export(self, node, dictcls, attriter, childiter):\n attr_values = attriter(self._iter_attr_values(node))\n data = dictcls(attr_values)\n children = [self.__export(child, dictcls, attriter, childiter)\n for child in childiter(node.children)]\n if children:\n data['children'] = children\n return data\n"
] |
class DictExporter(object):
def __init__(self, dictcls=dict, attriter=None, childiter=list):
"""
Tree to dictionary exporter.
Every node is converted to a dictionary with all instance
attributes as key-value pairs.
Child nodes are exported to the children attribute.
A list of dictionaries.
Keyword Args:
dictcls: class used as dictionary. :any:`dict` by default.
attriter: attribute iterator for sorting and/or filtering.
childiter: child iterator for sorting and/or filtering.
>>> from pprint import pprint # just for nice printing
>>> from anytree import AnyNode
>>> from anytree.exporter import DictExporter
>>> root = AnyNode(a="root")
>>> s0 = AnyNode(a="sub0", parent=root)
>>> s0a = AnyNode(a="sub0A", b="foo", parent=s0)
>>> s0b = AnyNode(a="sub0B", parent=s0)
>>> s1 = AnyNode(a="sub1", parent=root)
>>> exporter = DictExporter()
>>> pprint(exporter.export(root)) # order within dictionary might vary!
{'a': 'root',
'children': [{'a': 'sub0',
'children': [{'a': 'sub0A', 'b': 'foo'}, {'a': 'sub0B'}]},
{'a': 'sub1'}]}
Pythons dictionary `dict` does not preserve order.
:any:`collections.OrderedDict` does.
In this case attributes can be ordered via `attriter`.
>>> from collections import OrderedDict
>>> exporter = DictExporter(dictcls=OrderedDict, attriter=sorted)
>>> pprint(exporter.export(root))
OrderedDict([('a', 'root'),
('children',
[OrderedDict([('a', 'sub0'),
('children',
[OrderedDict([('a', 'sub0A'), ('b', 'foo')]),
OrderedDict([('a', 'sub0B')])])]),
OrderedDict([('a', 'sub1')])])])
The attribute iterator `attriter` may be used for filtering too.
For example, just dump attributes named `a`:
>>> exporter = DictExporter(attriter=lambda attrs: [(k, v) for k, v in attrs if k == "a"])
>>> pprint(exporter.export(root))
{'a': 'root',
'children': [{'a': 'sub0', 'children': [{'a': 'sub0A'}, {'a': 'sub0B'}]},
{'a': 'sub1'}]}
The child iterator `childiter` can be used for sorting and filtering likewise:
>>> exporter = DictExporter(childiter=lambda children: [child for child in children if "0" in child.a])
>>> pprint(exporter.export(root))
{'a': 'root',
'children': [{'a': 'sub0',
'children': [{'a': 'sub0A', 'b': 'foo'}, {'a': 'sub0B'}]}]}
"""
self.dictcls = dictcls
self.attriter = attriter
self.childiter = childiter
def __export(self, node, dictcls, attriter, childiter):
attr_values = attriter(self._iter_attr_values(node))
data = dictcls(attr_values)
children = [self.__export(child, dictcls, attriter, childiter)
for child in childiter(node.children)]
if children:
data['children'] = children
return data
def _iter_attr_values(self, node):
return node.__dict__.items()
|
gouthambs/Flask-Blogging
|
flask_blogging/engine.py
|
BloggingEngine.init_app
|
python
|
def init_app(self, app, storage=None, cache=None, file_upload=None):
self.app = app
self.config = self.app.config
self.storage = storage or self.storage
self.file_upload = file_upload or self.file_upload
self.cache = cache or self.cache
self._register_plugins(self.app, self.config)
from .views import create_blueprint
blog_app = create_blueprint(__name__, self)
# external urls
blueprint_created.send(self.app, engine=self, blueprint=blog_app)
self.app.register_blueprint(
blog_app, url_prefix=self.config.get("BLOGGING_URL_PREFIX"))
self.app.extensions["FLASK_BLOGGING_ENGINE"] = self # duplicate
self.app.extensions["blogging"] = self
self.principal = Principal(self.app)
engine_initialised.send(self.app, engine=self)
if self.config.get("BLOGGING_ALLOW_FILEUPLOAD", True):
self.ffu = self.file_upload or FlaskFileUpload(app)
|
Initialize the engine.
:param app: The app to use
:type app: Object
:param storage: The blog storage instance that implements the
:type storage: Object
:param cache: (Optional) A Flask-Cache object to enable caching
:type cache: Object
``Storage`` class interface.
|
train
|
https://github.com/gouthambs/Flask-Blogging/blob/6636b8941175e9910f116a329521f96b8b05a9ac/flask_blogging/engine.py#L78-L111
|
[
"def create_blueprint(import_name, blogging_engine):\n\n blog_app = Blueprint(\"blogging\", import_name, template_folder='templates')\n\n # register index\n index_func = cached_func(blogging_engine, index)\n blog_app.add_url_rule(\"/\", defaults={\"count\": None, \"page\": 1},\n view_func=index_func)\n blog_app.add_url_rule(\"/<int:count>/\", defaults={\"page\": 1},\n view_func=index_func)\n blog_app.add_url_rule(\"/<int:count>/<int:page>/\", view_func=index_func)\n\n # register page_by_id\n page_by_id_func = cached_func(blogging_engine, page_by_id)\n blog_app.add_url_rule(\"/page/<post_id>/\", defaults={\"slug\": \"\"},\n view_func=page_by_id_func)\n blog_app.add_url_rule(\"/page/<post_id>/<slug>/\",\n view_func=page_by_id_func)\n\n # register posts_by_tag\n posts_by_tag_func = cached_func(blogging_engine, posts_by_tag)\n blog_app.add_url_rule(\"/tag/<tag>/\", defaults=dict(count=None, page=1),\n view_func=posts_by_tag_func)\n blog_app.add_url_rule(\"/tag/<tag>/<int:count>/\", defaults=dict(page=1),\n view_func=posts_by_tag_func)\n blog_app.add_url_rule(\"/tag/<tag>/<int:count>/<int:page>/\",\n view_func=posts_by_tag_func)\n\n # register posts_by_author\n posts_by_author_func = cached_func(blogging_engine, posts_by_author)\n blog_app.add_url_rule(\"/author/<user_id>/\",\n defaults=dict(count=None, page=1),\n view_func=posts_by_author_func)\n blog_app.add_url_rule(\"/author/<user_id>/<int:count>/\",\n defaults=dict(page=1),\n view_func=posts_by_author_func)\n blog_app.add_url_rule(\"/author/<user_id>/<int:count>/<int:page>/\",\n view_func=posts_by_author_func)\n\n # register editor\n editor_func = editor # For now lets not cache this\n blog_app.add_url_rule('/editor/', methods=[\"GET\", \"POST\"],\n defaults={\"post_id\": None},\n view_func=editor_func)\n blog_app.add_url_rule('/editor/<post_id>/', methods=[\"GET\", \"POST\"],\n view_func=editor_func)\n\n # register delete\n delete_func = delete # For now lets not cache this\n blog_app.add_url_rule(\"/delete/<post_id>/\", methods=[\"POST\"],\n view_func=delete_func)\n\n # register sitemap\n sitemap_func = cached_func(blogging_engine, sitemap)\n blog_app.add_url_rule(\"/sitemap.xml\", view_func=sitemap_func)\n\n # register feed\n feed_func = cached_func(blogging_engine, feed)\n blog_app.add_url_rule('/feeds/all.atom.xml', view_func=feed_func)\n\n return blog_app\n",
"def _register_plugins(cls, app, config):\n plugins = config.get(\"BLOGGING_PLUGINS\")\n if plugins:\n for plugin in plugins:\n lib = __import__(plugin, globals(), locals(), str(\"module\"))\n lib.register(app)\n"
] |
class BloggingEngine(object):
"""
The BloggingEngine is the class for initializing the blog support for your
web app. Here is an example usage:
.. code:: python
from flask import Flask
from flask_blogging import BloggingEngine, SQLAStorage
from sqlalchemy import create_engine
app = Flask(__name__)
db_engine = create_engine("sqlite:////tmp/sqlite.db")
meta = MetaData()
storage = SQLAStorage(db_engine, metadata=meta)
blog_engine = BloggingEngine(app, storage)
"""
def __init__(self, app=None, storage=None, post_processor=None,
extensions=None, cache=None, file_upload=None):
"""
:param app: Optional app to use
:type app: object
:param storage: The blog storage instance that implements the
``Storage`` class interface.
:type storage: object
:param post_processor: (optional) The post processor object. If none
provided, the default post processor is used.
:type post_processor: object
:param extensions: (optional) A list of markdown extensions to add to
post processing step.
:type extensions: list
:param cache: (Optional) A Flask-Cache object to enable caching
:type cache: Object
:param file_upload: (Optional) A FileUpload object from
flask_fileupload extension
:type file_upload: Object
:return:
"""
self.app = None
self.storage = storage
self.config = None
self.ffu = None
self.cache = cache
self._blogger_permission = None
self.post_processor = PostProcessor() if post_processor is None \
else post_processor
if extensions:
self.post_processor.set_custom_extensions(extensions)
self.user_callback = None
self.file_upload = file_upload
if app is not None and storage is not None:
self.init_app(app, storage)
self.principal = None
@classmethod
def _register_plugins(cls, app, config):
plugins = config.get("BLOGGING_PLUGINS")
if plugins:
for plugin in plugins:
lib = __import__(plugin, globals(), locals(), str("module"))
lib.register(app)
@property
def blogger_permission(self):
if self._blogger_permission is None:
if self.config.get("BLOGGING_PERMISSIONS", False):
self._blogger_permission = Permission(RoleNeed(
self.config.get("BLOGGING_PERMISSIONNAME", "blogger")))
else:
self._blogger_permission = Permission()
return self._blogger_permission
def user_loader(self, callback):
"""
The decorator for loading the user.
:param callback: The callback function that can load a user given a
unicode ``user_id``.
:return: The callback function
"""
self.user_callback = callback
return callback
def is_user_blogger(self):
return self.blogger_permission.require().can()
def get_posts(self, count=10, offset=0, recent=True, tag=None,
user_id=None, include_draft=False, render=False):
posts = self.storage(count, offset, recent, tag, user_id,
include_draft)
for post in posts:
self.process_post(post, render=False)
def process_post(self, post, render=True):
"""
A high level view to create post processing.
:param post: Dictionary representing the post
:type post: dict
:param render: Choice if the markdown text has to be converted or not
:type render: bool
:return:
"""
post_processor = self.post_processor
post_processor.process(post, render)
try:
author = self.user_callback(post["user_id"])
except Exception:
raise Exception("No user_loader has been installed for this "
"BloggingEngine. Add one with the "
"'BloggingEngine.user_loader' decorator.")
if author is not None:
post["user_name"] = self.get_user_name(author)
post_processed.send(self.app, engine=self, post=post, render=render)
@classmethod
def get_user_name(cls, user):
user_name = user.get_name() if hasattr(user, "get_name") else str(user)
return user_name
|
gouthambs/Flask-Blogging
|
flask_blogging/engine.py
|
BloggingEngine.process_post
|
python
|
def process_post(self, post, render=True):
post_processor = self.post_processor
post_processor.process(post, render)
try:
author = self.user_callback(post["user_id"])
except Exception:
raise Exception("No user_loader has been installed for this "
"BloggingEngine. Add one with the "
"'BloggingEngine.user_loader' decorator.")
if author is not None:
post["user_name"] = self.get_user_name(author)
post_processed.send(self.app, engine=self, post=post, render=render)
|
A high level view to create post processing.
:param post: Dictionary representing the post
:type post: dict
:param render: Choice if the markdown text has to be converted or not
:type render: bool
:return:
|
train
|
https://github.com/gouthambs/Flask-Blogging/blob/6636b8941175e9910f116a329521f96b8b05a9ac/flask_blogging/engine.py#L144-L163
|
[
"def get_user_name(cls, user):\n user_name = user.get_name() if hasattr(user, \"get_name\") else str(user)\n return user_name\n"
] |
class BloggingEngine(object):
"""
The BloggingEngine is the class for initializing the blog support for your
web app. Here is an example usage:
.. code:: python
from flask import Flask
from flask_blogging import BloggingEngine, SQLAStorage
from sqlalchemy import create_engine
app = Flask(__name__)
db_engine = create_engine("sqlite:////tmp/sqlite.db")
meta = MetaData()
storage = SQLAStorage(db_engine, metadata=meta)
blog_engine = BloggingEngine(app, storage)
"""
def __init__(self, app=None, storage=None, post_processor=None,
extensions=None, cache=None, file_upload=None):
"""
:param app: Optional app to use
:type app: object
:param storage: The blog storage instance that implements the
``Storage`` class interface.
:type storage: object
:param post_processor: (optional) The post processor object. If none
provided, the default post processor is used.
:type post_processor: object
:param extensions: (optional) A list of markdown extensions to add to
post processing step.
:type extensions: list
:param cache: (Optional) A Flask-Cache object to enable caching
:type cache: Object
:param file_upload: (Optional) A FileUpload object from
flask_fileupload extension
:type file_upload: Object
:return:
"""
self.app = None
self.storage = storage
self.config = None
self.ffu = None
self.cache = cache
self._blogger_permission = None
self.post_processor = PostProcessor() if post_processor is None \
else post_processor
if extensions:
self.post_processor.set_custom_extensions(extensions)
self.user_callback = None
self.file_upload = file_upload
if app is not None and storage is not None:
self.init_app(app, storage)
self.principal = None
@classmethod
def _register_plugins(cls, app, config):
plugins = config.get("BLOGGING_PLUGINS")
if plugins:
for plugin in plugins:
lib = __import__(plugin, globals(), locals(), str("module"))
lib.register(app)
def init_app(self, app, storage=None, cache=None, file_upload=None):
"""
Initialize the engine.
:param app: The app to use
:type app: Object
:param storage: The blog storage instance that implements the
:type storage: Object
:param cache: (Optional) A Flask-Cache object to enable caching
:type cache: Object
``Storage`` class interface.
"""
self.app = app
self.config = self.app.config
self.storage = storage or self.storage
self.file_upload = file_upload or self.file_upload
self.cache = cache or self.cache
self._register_plugins(self.app, self.config)
from .views import create_blueprint
blog_app = create_blueprint(__name__, self)
# external urls
blueprint_created.send(self.app, engine=self, blueprint=blog_app)
self.app.register_blueprint(
blog_app, url_prefix=self.config.get("BLOGGING_URL_PREFIX"))
self.app.extensions["FLASK_BLOGGING_ENGINE"] = self # duplicate
self.app.extensions["blogging"] = self
self.principal = Principal(self.app)
engine_initialised.send(self.app, engine=self)
if self.config.get("BLOGGING_ALLOW_FILEUPLOAD", True):
self.ffu = self.file_upload or FlaskFileUpload(app)
@property
def blogger_permission(self):
if self._blogger_permission is None:
if self.config.get("BLOGGING_PERMISSIONS", False):
self._blogger_permission = Permission(RoleNeed(
self.config.get("BLOGGING_PERMISSIONNAME", "blogger")))
else:
self._blogger_permission = Permission()
return self._blogger_permission
def user_loader(self, callback):
"""
The decorator for loading the user.
:param callback: The callback function that can load a user given a
unicode ``user_id``.
:return: The callback function
"""
self.user_callback = callback
return callback
def is_user_blogger(self):
return self.blogger_permission.require().can()
def get_posts(self, count=10, offset=0, recent=True, tag=None,
user_id=None, include_draft=False, render=False):
posts = self.storage(count, offset, recent, tag, user_id,
include_draft)
for post in posts:
self.process_post(post, render=False)
@classmethod
def get_user_name(cls, user):
user_name = user.get_name() if hasattr(user, "get_name") else str(user)
return user_name
|
gouthambs/Flask-Blogging
|
flask_blogging/processor.py
|
PostProcessor.process
|
python
|
def process(cls, post, render=True):
post["slug"] = cls.create_slug(post["title"])
post["editable"] = cls.is_author(post, current_user)
post["url"] = cls.construct_url(post)
post["priority"] = 0.8
if render:
cls.render_text(post)
post["meta"]["images"] = cls.extract_images(post)
|
This method takes the post data and renders it
:param post:
:param render:
:return:
|
train
|
https://github.com/gouthambs/Flask-Blogging/blob/6636b8941175e9910f116a329521f96b8b05a9ac/flask_blogging/processor.py#L67-L80
| null |
class PostProcessor(object):
_markdown_extensions = [MathJaxExtension(), MetaExtension()]
@staticmethod
def create_slug(title):
return slugify(title)
@staticmethod
def extract_images(post):
regex = re.compile(r'<\s*img [^>]*src="([^"]+)')
return regex.findall(post["rendered_text"])
@classmethod
def construct_url(cls, post):
url = url_for("blogging.page_by_id", post_id=post["post_id"],
slug=cls.create_slug(post["title"]))
return url
@classmethod
def render_text(cls, post):
md = markdown.Markdown(extensions=cls.all_extensions())
post["rendered_text"] = md.convert(post["text"])
post["meta"] = md.Meta
@classmethod
def is_author(cls, post, user):
return user.get_id() == u''+str(post['user_id'])
@classmethod
@classmethod
def all_extensions(cls):
return cls._markdown_extensions
@classmethod
def set_custom_extensions(cls, extensions):
if type(extensions) == list:
cls._markdown_extensions.extend(extensions)
|
gouthambs/Flask-Blogging
|
flask_blogging/storage.py
|
Storage.save_post
|
python
|
def save_post(self, title, text, user_id, tags, draft=False,
post_date=None, last_modified_date=None, meta_data=None,
post_id=None):
raise NotImplementedError("This method needs to be implemented by "
"the inheriting class")
|
Persist the blog post data. If ``post_id`` is ``None`` or ``post_id``
is invalid, the post must be inserted into the storage. If ``post_id``
is a valid id, then the data must be updated.
:param title: The title of the blog post
:type title: str
:param text: The text of the blog post
:type text: str
:param user_id: The user identifier
:type user_id: str
:param tags: A list of tags
:type tags: list
:param draft: If the post is a draft of if needs to be published.
:type draft: bool
:param post_date: (Optional) The date the blog was posted (default
datetime.datetime.utcnow())
:type post_date: datetime.datetime
:param last_modified_date: (Optional) The date when blog was last
modified (default datetime.datetime.utcnow())
:type last_modified_date: datetime.datetime
:param meta_data: The meta data for the blog post
:type meta_data: dict
:param post_id: The post identifier. This should be ``None`` for an
insert call, and a valid value for update.
:type post_id: int
:return: The post_id value, in case of a successful insert or update.
Return ``None`` if there were errors.
|
train
|
https://github.com/gouthambs/Flask-Blogging/blob/6636b8941175e9910f116a329521f96b8b05a9ac/flask_blogging/storage.py#L9-L43
| null |
class Storage(object):
def get_post_by_id(self, post_id):
"""
Fetch the blog post given by ``post_id``
:param post_id: The post identifier for the blog post
:type post_id: int
:return: If the ``post_id`` is valid, the post data is retrieved,
else returns ``None``.
"""
raise NotImplementedError("This method needs to be implemented by the "
"inheriting class")
def get_posts(self, count=10, offset=0, recent=True, tag=None,
user_id=None, include_draft=False):
"""
Get posts given by filter criteria
:param count: The number of posts to retrieve (default 10). If count
is ``None``, all posts are returned.
:type count: int
:param offset: The number of posts to offset (default 0)
:type offset: int
:param recent: Order by recent posts or not
:type recent: bool
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: A list of posts, with each element a dict containing values
for the following keys: (title, text, draft, post_date,
last_modified_date). If count is ``None``, then all the posts are
returned.
"""
raise NotImplementedError("This method needs to be implemented by the "
"inheriting class")
def count_posts(self, tag=None, user_id=None, include_draft=False):
"""
Returns the total number of posts for the give filter
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: The number of posts for the given filter.
"""
raise NotImplementedError("This method needs to be implemented by the "
"inheriting class")
def delete_post(self, post_id):
"""
Delete the post defined by ``post_id``
:param post_id: The identifier corresponding to a post
:type post_id: int
:return: Returns True if the post was successfully deleted and False
otherwise.
"""
raise NotImplementedError("This method needs to be implemented by the "
"inheriting class")
@classmethod
def normalize_tags(cls, tags):
return [cls.normalize_tag(tag) for tag in tags]
@staticmethod
def normalize_tag(tag):
return tag.upper().strip()
|
gouthambs/Flask-Blogging
|
flask_blogging/storage.py
|
Storage.get_posts
|
python
|
def get_posts(self, count=10, offset=0, recent=True, tag=None,
user_id=None, include_draft=False):
raise NotImplementedError("This method needs to be implemented by the "
"inheriting class")
|
Get posts given by filter criteria
:param count: The number of posts to retrieve (default 10). If count
is ``None``, all posts are returned.
:type count: int
:param offset: The number of posts to offset (default 0)
:type offset: int
:param recent: Order by recent posts or not
:type recent: bool
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: A list of posts, with each element a dict containing values
for the following keys: (title, text, draft, post_date,
last_modified_date). If count is ``None``, then all the posts are
returned.
|
train
|
https://github.com/gouthambs/Flask-Blogging/blob/6636b8941175e9910f116a329521f96b8b05a9ac/flask_blogging/storage.py#L57-L82
| null |
class Storage(object):
def save_post(self, title, text, user_id, tags, draft=False,
post_date=None, last_modified_date=None, meta_data=None,
post_id=None):
"""
Persist the blog post data. If ``post_id`` is ``None`` or ``post_id``
is invalid, the post must be inserted into the storage. If ``post_id``
is a valid id, then the data must be updated.
:param title: The title of the blog post
:type title: str
:param text: The text of the blog post
:type text: str
:param user_id: The user identifier
:type user_id: str
:param tags: A list of tags
:type tags: list
:param draft: If the post is a draft of if needs to be published.
:type draft: bool
:param post_date: (Optional) The date the blog was posted (default
datetime.datetime.utcnow())
:type post_date: datetime.datetime
:param last_modified_date: (Optional) The date when blog was last
modified (default datetime.datetime.utcnow())
:type last_modified_date: datetime.datetime
:param meta_data: The meta data for the blog post
:type meta_data: dict
:param post_id: The post identifier. This should be ``None`` for an
insert call, and a valid value for update.
:type post_id: int
:return: The post_id value, in case of a successful insert or update.
Return ``None`` if there were errors.
"""
raise NotImplementedError("This method needs to be implemented by "
"the inheriting class")
def get_post_by_id(self, post_id):
"""
Fetch the blog post given by ``post_id``
:param post_id: The post identifier for the blog post
:type post_id: int
:return: If the ``post_id`` is valid, the post data is retrieved,
else returns ``None``.
"""
raise NotImplementedError("This method needs to be implemented by the "
"inheriting class")
def count_posts(self, tag=None, user_id=None, include_draft=False):
"""
Returns the total number of posts for the give filter
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: The number of posts for the given filter.
"""
raise NotImplementedError("This method needs to be implemented by the "
"inheriting class")
def delete_post(self, post_id):
"""
Delete the post defined by ``post_id``
:param post_id: The identifier corresponding to a post
:type post_id: int
:return: Returns True if the post was successfully deleted and False
otherwise.
"""
raise NotImplementedError("This method needs to be implemented by the "
"inheriting class")
@classmethod
def normalize_tags(cls, tags):
return [cls.normalize_tag(tag) for tag in tags]
@staticmethod
def normalize_tag(tag):
return tag.upper().strip()
|
gouthambs/Flask-Blogging
|
flask_blogging/gcdatastore.py
|
GoogleCloudDatastore.get_posts
|
python
|
def get_posts(self, count=10, offset=0, recent=True, tag=None,
user_id=None, include_draft=False):
query = self._client.query(kind='Post')
if tag:
norm_tag = self.normalize_tag(tag)
posts_ids = self._filter_posts_by_tag(norm_tag)
if posts_ids:
keys = [self._client.key('Post', id) for id in posts_ids]
posts = self._client.get_multi(keys)
else:
posts = []
else:
if user_id:
query.add_filter('user_id', '=', user_id)
if include_draft:
query.add_filter('draft', '=', include_draft)
if recent:
query.order = ['-post_date']
posts = list(query.fetch(offset=offset, limit=count))
if not posts:
return []
res = []
for post in posts:
p = dict(post)
res.append(p)
if tag and recent:
res = sorted(res, key=itemgetter('post_date'), reverse=True)
elif tag and not recent:
res = sorted(res, key=itemgetter('post_date'))
if tag:
res = res[offset:offset+count]
return res
|
TODO: implement cursors support, if it will be needed.
But for the regular blog, it is overhead and
cost savings are minimal.
|
train
|
https://github.com/gouthambs/Flask-Blogging/blob/6636b8941175e9910f116a329521f96b8b05a9ac/flask_blogging/gcdatastore.py#L111-L153
|
[
"def _filter_posts_by_tag(self, tag):\n if not tag:\n return []\n else:\n query = self._client.query(kind='Post')\n query.projection = ['post_id', 'tags']\n proj_result = list(query.fetch())\n\n proj_result = [dict(entity) for entity in proj_result]\n ids = set()\n\n for entity in proj_result:\n if entity[\"tags\"] == tag:\n ids.add(entity[\"post_id\"])\n\n return list(ids)\n",
"def normalize_tag(tag):\n return tag.upper().strip()\n"
] |
class GoogleCloudDatastore(Storage):
def __init__(self, namespace=None):
self._logger = logging.getLogger("flask-blogging")
self._client = datastore.Client(namespace=namespace)
def _get_new_post_id(self):
key = self._client.key('PostIDCounter', 'Counter')
query = self._client.get(key)
if query:
counter = dict(query)
else:
counter = None
if counter:
counter = counter["value"]+1
key = self._client.key('PostIDCounter', 'Counter')
task = self._client.get(key)
task['value'] = counter
self._client.put(task)
return int(counter)
else:
# Create a new counter
key = self._client.key('PostIDCounter', 'Counter')
counter = datastore.Entity(key=key)
counter.update({
'value': 1,
})
self._client.put(counter)
return 1
def save_post(self, title, text, user_id, tags, draft=False,
post_date=None, last_modified_date=None, meta_data=None,
post_id=None):
if post_id is not None:
update_op = True
else:
update_op = False
post_id = post_id or self._get_new_post_id()
current_datetime = datetime.datetime.utcnow()
post_date = post_date or current_datetime
last_modified_date = last_modified_date or current_datetime
tags = self.normalize_tags(tags)
draft = True if draft else False
if not update_op:
key = self._client.key('Post', int(post_id))
post = datastore.Entity(key=key, exclude_from_indexes=['text'])
post.update({
'title': title,
'text': text,
'user_id': user_id,
'tags': tags or [],
'draft': draft,
'post_date': post_date,
'last_modified_date': last_modified_date,
'meta_data': meta_data,
'post_id': int(post_id)
})
self._client.put(post)
return post_id
else:
key = self._client.key('Post', int(post_id))
post = self._client.get(key)
if not post:
post_id = self._get_new_post_id()
key = self._client.key('Post', int(post_id))
post = datastore.Entity(key=key, exclude_from_indexes=['text'])
post.update({
'title': title,
'text': text,
'user_id': user_id,
'tags': tags or [],
'draft': draft,
'post_date': post_date,
'last_modified_date': last_modified_date,
'meta_data': meta_data,
'post_id': int(post_id)
})
self._client.put(post)
return int(post_id)
def _filter_posts_by_tag(self, tag):
if not tag:
return []
else:
query = self._client.query(kind='Post')
query.projection = ['post_id', 'tags']
proj_result = list(query.fetch())
proj_result = [dict(entity) for entity in proj_result]
ids = set()
for entity in proj_result:
if entity["tags"] == tag:
ids.add(entity["post_id"])
return list(ids)
def count_posts(self, tag=None, user_id=None, include_draft=False):
query = self._client.query(kind='Post')
if tag:
norm_tag = self.normalize_tag(tag)
query.add_filter('tags', '=', norm_tag)
if user_id:
query.add_filter('user_id', '=', user_id)
if include_draft:
query.add_filter('draft', '=', include_draft)
posts = list(query.fetch())
result = len(posts)
return result
def get_post_by_id(self, post_id):
if post_id:
query = self._client.query(kind='Post')
query.add_filter('post_id', '=', int(post_id))
post = list(query.fetch())
if post:
res = dict(post[0])
return res
return None
def delete_post(self, post_id):
if post_id:
key = self._client.key('Post', int(post_id))
try:
self._client.delete(key)
except Exception as ex:
self._logger.error(str(ex))
return False
return True
else:
return False
|
gouthambs/Flask-Blogging
|
flask_blogging/sqlastorage.py
|
SQLAStorage.save_post
|
python
|
def save_post(self, title, text, user_id, tags, draft=False,
post_date=None, last_modified_date=None, meta_data=None,
post_id=None):
new_post = post_id is None
post_id = _as_int(post_id)
current_datetime = datetime.datetime.utcnow()
draft = 1 if draft is True else 0
post_date = post_date if post_date is not None else current_datetime
last_modified_date = last_modified_date if last_modified_date is not \
None else current_datetime
with self._engine.begin() as conn:
try:
if post_id is not None: # validate post_id
exists_statement = sqla.select([self._post_table]).where(
self._post_table.c.id == post_id)
exists = \
conn.execute(exists_statement).fetchone() is not None
post_id = post_id if exists else None
post_statement = \
self._post_table.insert() if post_id is None else \
self._post_table.update().where(
self._post_table.c.id == post_id)
post_statement = post_statement.values(
title=title, text=text, post_date=post_date,
last_modified_date=last_modified_date, draft=draft
)
post_result = conn.execute(post_statement)
post_id = post_result.inserted_primary_key[0] \
if post_id is None else post_id
self._save_tags(tags, post_id, conn)
self._save_user_post(user_id, post_id, conn)
except Exception as e:
self._logger.exception(str(e))
post_id = None
return post_id
|
Persist the blog post data. If ``post_id`` is ``None`` or ``post_id``
is invalid, the post must be inserted into the storage. If ``post_id``
is a valid id, then the data must be updated.
:param title: The title of the blog post
:type title: str
:param text: The text of the blog post
:type text: str
:param user_id: The user identifier
:type user_id: str
:param tags: A list of tags
:type tags: list
:param draft: (Optional) If the post is a draft of if needs to be
published. (default ``False``)
:type draft: bool
:param post_date: (Optional) The date the blog was posted (default
datetime.datetime.utcnow() )
:type post_date: datetime.datetime
:param last_modified_date: (Optional) The date when blog was last
modified (default datetime.datetime.utcnow() )
:type last_modified_date: datetime.datetime
:param post_id: (Optional) The post identifier. This should be ``None``
for an insert call,
and a valid value for update. (default ``None``)
:type post_id: str
:return: The post_id value, in case of a successful insert or update.
Return ``None`` if there were errors.
|
train
|
https://github.com/gouthambs/Flask-Blogging/blob/6636b8941175e9910f116a329521f96b8b05a9ac/flask_blogging/sqlastorage.py#L130-L197
|
[
"def _as_int(s):\n try:\n n = int(s) if s is not None else None\n return n\n except ValueError:\n return None\n",
"def _save_tags(self, tags, post_id, conn):\n\n tags = self.normalize_tags(tags)\n tag_ids = []\n\n for tag in tags: # iterate over given tags\n try:\n # check if the tag exists\n statement = self._tag_table.select().where(\n self._tag_table.c.text == tag)\n tag_result = conn.execute(statement).fetchone()\n if tag_result is None:\n # insert if it is a new tag\n tag_insert_statement = self._tag_table.insert().\\\n values(text=tag)\n result = conn.execute(tag_insert_statement)\n tag_id = result.inserted_primary_key[0]\n else:\n # tag already exists\n tag_id = tag_result[0]\n\n except sqla.exc.IntegrityError as e:\n # some database error occurred;\n tag_id = None\n self._logger.exception(str(e))\n\n except Exception as e:\n # unknown exception occurred\n tag_id = None\n self._logger.exception(str(e))\n\n if tag_id is not None:\n # for a valid tag_id\n tag_ids.append(tag_id)\n\n try:\n # check if given post has tag given by tag_id\n statement = self._tag_posts_table.select().where(\n sqla.and_(self._tag_posts_table.c.tag_id == tag_id,\n self._tag_posts_table.c.post_id == post_id))\n tag_post_result = conn.execute(statement).fetchone()\n\n if tag_post_result is None:\n # if tag_id not present for the post given by post_id\n tag_post_statement = self._tag_posts_table.insert().\\\n values(tag_id=tag_id, post_id=post_id)\n conn.execute(tag_post_statement)\n\n except sqla.exc.IntegrityError as e:\n self._logger.exception(str(e))\n except Exception as e:\n self._logger.exception(str(e))\n try:\n # remove tags that have been deleted\n statement = self._tag_posts_table.delete().where(\n sqla.and_(sqla.not_(\n self._tag_posts_table.c.tag_id.in_(tag_ids)),\n self._tag_posts_table.c.post_id == post_id\n )\n )\n conn.execute(statement)\n except Exception as e:\n self._logger.exception(str(e))\n",
"def _save_user_post(self, user_id, post_id, conn):\n user_id = str(user_id)\n statement = sqla.select([self._user_posts_table]).where(\n self._user_posts_table.c.post_id == post_id)\n result = conn.execute(statement).fetchone()\n if result is None:\n try:\n statement = self._user_posts_table.insert().values(\n user_id=user_id, post_id=post_id)\n conn.execute(statement)\n except Exception as e:\n self._logger.exception(str(e))\n else:\n if result[0] != user_id:\n try:\n statement = self._user_posts_table.update().where(\n self._user_posts_table.c.post_id == post_id). \\\n values(user_id=user_id)\n conn.execute(statement)\n except Exception as e:\n self._logger.exception(str(e))\n"
] |
class SQLAStorage(Storage):
"""
The ``SQLAStorage`` implements the interface specified by the ``Storage``
class. This class uses SQLAlchemy to implement storage and retrieval of
data from any of the databases supported by SQLAlchemy.
"""
_db = None
_logger = logging.getLogger("flask-blogging")
def __init__(self, engine=None, table_prefix="", metadata=None, db=None,
bind=None):
"""
The constructor for the ``SQLAStorage`` class.
:param engine: The ``SQLAlchemy`` engine instance created by calling
``create_engine``. One can also use Flask-SQLAlchemy, and pass the
engine property.
:type engine: object
:param table_prefix: (Optional) Prefix to use for the tables created
(default ``""``).
:type table_prefix: str
:param metadata: (Optional) The SQLAlchemy MetaData object
:type metadata: object
:param db: (Optional) The Flask-SQLAlchemy SQLAlchemy object
:type db: object
:param bind: (Optional) Reference the database to bind for multiple
database scenario with binds
:type bind: str
"""
self._bind = bind
if db:
self._engine = db.get_engine(db.get_app(), bind=self._bind)
self._metadata = db.metadata
else:
if engine is None:
raise ValueError("Both db and engine args cannot be None")
self._engine = engine
self._metadata = metadata or sqla.MetaData()
self._info = {} if self._bind is None else {"bind_key": self._bind}
self._table_prefix = table_prefix
self._metadata.reflect(bind=self._engine)
self._create_all_tables()
# automap base and restrict to the required tables here.
table_suffix = ['post', 'tag', 'user_posts', 'tag_posts']
table_names = [self._table_name(t) for t in table_suffix]
self._metadata.create_all(bind=self._engine, tables=self.all_tables)
meta = sqla.MetaData()
meta.reflect(bind=self._engine, only=table_names)
self._Base = automap_base(metadata=meta)
self._Base.prepare()
self._inject_models()
sqla_initialized.send(self, engine=self._engine,
table_prefix=self._table_prefix,
meta=self.metadata,
bind=self._bind)
def _inject_models(self):
global this
this.Post = getattr(self._Base.classes, self._table_name("post"))
this.Post.__name__ = 'Post'
this.Tag = getattr(self._Base.classes, self._table_name("tag"))
this.Tag.__name__ = 'Tag'
@property
def metadata(self):
return self._metadata
@property
def post_table(self):
return self._post_table
@property
def post_model(self):
return getattr(self._Base.classes, self._table_name("post"))
@property
def tag_model(self):
return getattr(self._Base.classes, self._table_name("tag"))
@property
def tag_table(self):
return self._tag_table
@property
def tag_posts_table(self):
return self._tag_posts_table
@property
def user_posts_table(self):
return self._user_posts_table
@property
def all_tables(self):
return [self._post_table, self._tag_table,
self._user_posts_table, self._tag_posts_table]
@property
def engine(self):
return self._engine
@classmethod
def _serialise_posts_and_tags_from_joined_rows(cls, joined_rows):
"""
Translates multiple rows of joined post and tag information
into the dictionary format expected by flask-blogging.
There will be one row per post/tag pairing.
"""
posts_by_id = OrderedDict()
tags_by_post_id = defaultdict(list)
for joined_row in joined_rows:
post_id = joined_row.post_id
post = cls._serialise_post_from_joined_row(joined_row)
posts_by_id[post_id] = post
tags_by_post_id[post_id].append(joined_row.tag_text)
for id, post in posts_by_id.items():
tags = tags_by_post_id.get(id)
if tags:
post["tags"] = tags
return [post for post in posts_by_id.values()]
@staticmethod
def _serialise_post_from_joined_row(joined_row):
return dict(
post_id=joined_row.post_id,
title=joined_row.post_title,
text=joined_row.post_text,
post_date=joined_row.post_post_date,
last_modified_date=joined_row.post_last_modified_date,
draft=joined_row.post_draft,
user_id=joined_row.user_posts_user_id
)
def get_post_by_id(self, post_id):
"""
Fetch the blog post given by ``post_id``
:param post_id: The post identifier for the blog post
:type post_id: str
:return: If the ``post_id`` is valid, the post data is retrieved, else
returns ``None``.
"""
r = None
post_id = _as_int(post_id)
with self._engine.begin() as conn:
try:
post_statement = sqla.select([self._post_table]) \
.where(self._post_table.c.id == post_id) \
.alias('post')
joined_statement = post_statement.join(self._tag_posts_table) \
.join(self._tag_table) \
.join(self._user_posts_table) \
.alias('join')
# Note this will retrieve one row per tag
all_rows = conn.execute(
sqla.select([joined_statement])
).fetchall()
r = self._serialise_posts_and_tags_from_joined_rows(
all_rows
)[0]
except Exception as e:
self._logger.exception(str(e))
r = None
return r
def get_posts(self, count=10, offset=0, recent=True, tag=None,
user_id=None, include_draft=False):
"""
Get posts given by filter criteria
:param count: The number of posts to retrieve (default 10)
:type count: int
:param offset: The number of posts to offset (default 0)
:type offset: int
:param recent: Order by recent posts or not
:type recent: bool
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: A list of posts, with each element a dict containing values
for the following keys: (title, text, draft, post_date,
last_modified_date). If count is ``None``, then all the posts are
returned.
"""
user_id = str(user_id) if user_id else user_id
with self._engine.begin() as conn:
try:
# post_statement ensures the correct posts are selected
# in the correct order
post_statement = sqla.select([self._post_table])
post_filter = self._get_filter(
tag, user_id, include_draft, conn
)
if post_filter is not None:
post_statement = post_statement.where(post_filter)
if count:
post_statement = post_statement.limit(count)
if offset:
post_statement = post_statement.offset(offset)
post_ordering = \
sqla.desc(self._post_table.c.post_date) if recent \
else self._post_table.c.post_date
post_statement = post_statement.order_by(post_ordering)
post_statement = post_statement.alias('post')
# joined_statement ensures other data is retrieved
joined_statement = post_statement.join(self._tag_posts_table) \
.join(self._tag_table) \
.join(self._user_posts_table) \
.alias('join')
joined_ordering = \
sqla.desc(joined_statement.c.post_post_date) if recent \
else joined_statement.c.post_post_date
joined_statement = sqla.select([joined_statement]) \
.order_by(joined_ordering)
all_rows = conn.execute(joined_statement).fetchall()
result = \
self._serialise_posts_and_tags_from_joined_rows(all_rows)
except Exception as e:
self._logger.exception(str(e))
result = []
return result
def count_posts(self, tag=None, user_id=None, include_draft=False):
"""
Returns the total number of posts for the give filter
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: The number of posts for the given filter.
"""
result = 0
with self._engine.begin() as conn:
try:
count_statement = sqla.select([sqla.func.count()]). \
select_from(self._post_table)
sql_filter = self._get_filter(tag, user_id, include_draft,
conn)
count_statement = count_statement.where(sql_filter)
result = conn.execute(count_statement).scalar()
except Exception as e:
self._logger.exception(str(e))
result = 0
return result
def delete_post(self, post_id):
"""
Delete the post defined by ``post_id``
:param post_id: The identifier corresponding to a post
:type post_id: int
:return: Returns True if the post was successfully deleted and False
otherwise.
"""
status = False
success = 0
post_id = _as_int(post_id)
with self._engine.begin() as conn:
try:
post_del_statement = self._post_table.delete().where(
self._post_table.c.id == post_id)
conn.execute(post_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
try:
user_posts_del_statement = self._user_posts_table.delete(). \
where(self._user_posts_table.c.post_id == post_id)
conn.execute(user_posts_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
try:
tag_posts_del_statement = self._tag_posts_table.delete(). \
where(self._tag_posts_table.c.post_id == post_id)
conn.execute(tag_posts_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
status = success == 3
return status
def _get_filter(self, tag, user_id, include_draft, conn):
filters = []
if tag:
tag = tag.upper()
tag_statement = sqla.select([self._tag_table.c.id]).where(
self._tag_table.c.text == tag)
tag_result = conn.execute(tag_statement).fetchone()
if tag_result is not None:
tag_id = tag_result[0]
tag_filter = sqla.and_(
self._tag_posts_table.c.tag_id == tag_id,
self._post_table.c.id == self._tag_posts_table.c.post_id
)
filters.append(tag_filter)
if user_id:
user_filter = sqla.and_(
self._user_posts_table.c.user_id == user_id,
self._post_table.c.id == self._user_posts_table.c.post_id
)
filters.append(user_filter)
draft_filter = self._post_table.c.draft == 1 if include_draft else \
self._post_table.c.draft == 0
filters.append(draft_filter)
sql_filter = sqla.and_(*filters)
return sql_filter
def _save_tags(self, tags, post_id, conn):
tags = self.normalize_tags(tags)
tag_ids = []
for tag in tags: # iterate over given tags
try:
# check if the tag exists
statement = self._tag_table.select().where(
self._tag_table.c.text == tag)
tag_result = conn.execute(statement).fetchone()
if tag_result is None:
# insert if it is a new tag
tag_insert_statement = self._tag_table.insert().\
values(text=tag)
result = conn.execute(tag_insert_statement)
tag_id = result.inserted_primary_key[0]
else:
# tag already exists
tag_id = tag_result[0]
except sqla.exc.IntegrityError as e:
# some database error occurred;
tag_id = None
self._logger.exception(str(e))
except Exception as e:
# unknown exception occurred
tag_id = None
self._logger.exception(str(e))
if tag_id is not None:
# for a valid tag_id
tag_ids.append(tag_id)
try:
# check if given post has tag given by tag_id
statement = self._tag_posts_table.select().where(
sqla.and_(self._tag_posts_table.c.tag_id == tag_id,
self._tag_posts_table.c.post_id == post_id))
tag_post_result = conn.execute(statement).fetchone()
if tag_post_result is None:
# if tag_id not present for the post given by post_id
tag_post_statement = self._tag_posts_table.insert().\
values(tag_id=tag_id, post_id=post_id)
conn.execute(tag_post_statement)
except sqla.exc.IntegrityError as e:
self._logger.exception(str(e))
except Exception as e:
self._logger.exception(str(e))
try:
# remove tags that have been deleted
statement = self._tag_posts_table.delete().where(
sqla.and_(sqla.not_(
self._tag_posts_table.c.tag_id.in_(tag_ids)),
self._tag_posts_table.c.post_id == post_id
)
)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
def _save_user_post(self, user_id, post_id, conn):
user_id = str(user_id)
statement = sqla.select([self._user_posts_table]).where(
self._user_posts_table.c.post_id == post_id)
result = conn.execute(statement).fetchone()
if result is None:
try:
statement = self._user_posts_table.insert().values(
user_id=user_id, post_id=post_id)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
else:
if result[0] != user_id:
try:
statement = self._user_posts_table.update().where(
self._user_posts_table.c.post_id == post_id). \
values(user_id=user_id)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
def _table_name(self, table_name):
return self._table_prefix + table_name
def _create_all_tables(self):
"""
Creates all the required tables by calling the required functions.
:return:
"""
self._create_post_table()
self._create_tag_table()
self._create_tag_posts_table()
self._create_user_posts_table()
def _create_post_table(self):
"""
Creates the table to store the blog posts.
:return:
"""
with self._engine.begin() as conn:
post_table_name = self._table_name("post")
if not conn.dialect.has_table(conn, post_table_name):
self._post_table = sqla.Table(
post_table_name, self._metadata,
sqla.Column("id", sqla.Integer, primary_key=True),
sqla.Column("title", sqla.String(256)),
sqla.Column("text", sqla.Text),
sqla.Column("post_date", sqla.DateTime),
sqla.Column("last_modified_date", sqla.DateTime),
# if 1 then make it a draft
sqla.Column("draft", sqla.SmallInteger, default=0),
info=self._info
)
self._logger.debug("Created table with table name %s" %
post_table_name)
else:
self._post_table = self._metadata.tables[post_table_name]
self._logger.debug("Reflecting to table with table name %s" %
post_table_name)
def _create_tag_table(self):
"""
Creates the table to store blog post tags.
:return:
"""
with self._engine.begin() as conn:
tag_table_name = self._table_name("tag")
if not conn.dialect.has_table(conn, tag_table_name):
self._tag_table = sqla.Table(
tag_table_name, self._metadata,
sqla.Column("id", sqla.Integer, primary_key=True),
sqla.Column("text", sqla.String(128), unique=True,
index=True),
info=self._info
)
self._logger.debug("Created table with table name %s" %
tag_table_name)
else:
self._tag_table = self._metadata.tables[tag_table_name]
self._logger.debug("Reflecting to table with table name %s" %
tag_table_name)
def _create_tag_posts_table(self):
"""
Creates the table to store association info between blog posts and
tags.
:return:
"""
with self._engine.begin() as conn:
tag_posts_table_name = self._table_name("tag_posts")
if not conn.dialect.has_table(conn, tag_posts_table_name):
tag_id_key = self._table_name("tag") + ".id"
post_id_key = self._table_name("post") + ".id"
self._tag_posts_table = sqla.Table(
tag_posts_table_name, self._metadata,
sqla.Column('tag_id', sqla.Integer,
sqla.ForeignKey(tag_id_key, onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.Column('post_id', sqla.Integer,
sqla.ForeignKey(post_id_key,
onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.UniqueConstraint('tag_id', 'post_id', name='uix_1'),
info=self._info
)
self._logger.debug("Created table with table name %s" %
tag_posts_table_name)
else:
self._tag_posts_table = \
self._metadata.tables[tag_posts_table_name]
self._logger.debug("Reflecting to table with table name %s" %
tag_posts_table_name)
def _create_user_posts_table(self):
"""
Creates the table to store association info between user and blog
posts.
:return:
"""
with self._engine.begin() as conn:
user_posts_table_name = self._table_name("user_posts")
if not conn.dialect.has_table(conn, user_posts_table_name):
post_id_key = self._table_name("post") + ".id"
self._user_posts_table = sqla.Table(
user_posts_table_name, self._metadata,
sqla.Column("user_id", sqla.String(128), index=True),
sqla.Column("post_id", sqla.Integer,
sqla.ForeignKey(post_id_key,
onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.UniqueConstraint('user_id', 'post_id', name='uix_2'),
info=self._info
)
self._logger.debug("Created table with table name %s" %
user_posts_table_name)
else:
self._user_posts_table = \
self._metadata.tables[user_posts_table_name]
self._logger.debug("Reflecting to table with table name %s" %
user_posts_table_name)
|
gouthambs/Flask-Blogging
|
flask_blogging/sqlastorage.py
|
SQLAStorage._serialise_posts_and_tags_from_joined_rows
|
python
|
def _serialise_posts_and_tags_from_joined_rows(cls, joined_rows):
posts_by_id = OrderedDict()
tags_by_post_id = defaultdict(list)
for joined_row in joined_rows:
post_id = joined_row.post_id
post = cls._serialise_post_from_joined_row(joined_row)
posts_by_id[post_id] = post
tags_by_post_id[post_id].append(joined_row.tag_text)
for id, post in posts_by_id.items():
tags = tags_by_post_id.get(id)
if tags:
post["tags"] = tags
return [post for post in posts_by_id.values()]
|
Translates multiple rows of joined post and tag information
into the dictionary format expected by flask-blogging.
There will be one row per post/tag pairing.
|
train
|
https://github.com/gouthambs/Flask-Blogging/blob/6636b8941175e9910f116a329521f96b8b05a9ac/flask_blogging/sqlastorage.py#L200-L219
|
[
"def _serialise_post_from_joined_row(joined_row):\n return dict(\n post_id=joined_row.post_id,\n title=joined_row.post_title,\n text=joined_row.post_text,\n post_date=joined_row.post_post_date,\n last_modified_date=joined_row.post_last_modified_date,\n draft=joined_row.post_draft,\n user_id=joined_row.user_posts_user_id\n )\n"
] |
class SQLAStorage(Storage):
"""
The ``SQLAStorage`` implements the interface specified by the ``Storage``
class. This class uses SQLAlchemy to implement storage and retrieval of
data from any of the databases supported by SQLAlchemy.
"""
_db = None
_logger = logging.getLogger("flask-blogging")
def __init__(self, engine=None, table_prefix="", metadata=None, db=None,
bind=None):
"""
The constructor for the ``SQLAStorage`` class.
:param engine: The ``SQLAlchemy`` engine instance created by calling
``create_engine``. One can also use Flask-SQLAlchemy, and pass the
engine property.
:type engine: object
:param table_prefix: (Optional) Prefix to use for the tables created
(default ``""``).
:type table_prefix: str
:param metadata: (Optional) The SQLAlchemy MetaData object
:type metadata: object
:param db: (Optional) The Flask-SQLAlchemy SQLAlchemy object
:type db: object
:param bind: (Optional) Reference the database to bind for multiple
database scenario with binds
:type bind: str
"""
self._bind = bind
if db:
self._engine = db.get_engine(db.get_app(), bind=self._bind)
self._metadata = db.metadata
else:
if engine is None:
raise ValueError("Both db and engine args cannot be None")
self._engine = engine
self._metadata = metadata or sqla.MetaData()
self._info = {} if self._bind is None else {"bind_key": self._bind}
self._table_prefix = table_prefix
self._metadata.reflect(bind=self._engine)
self._create_all_tables()
# automap base and restrict to the required tables here.
table_suffix = ['post', 'tag', 'user_posts', 'tag_posts']
table_names = [self._table_name(t) for t in table_suffix]
self._metadata.create_all(bind=self._engine, tables=self.all_tables)
meta = sqla.MetaData()
meta.reflect(bind=self._engine, only=table_names)
self._Base = automap_base(metadata=meta)
self._Base.prepare()
self._inject_models()
sqla_initialized.send(self, engine=self._engine,
table_prefix=self._table_prefix,
meta=self.metadata,
bind=self._bind)
def _inject_models(self):
global this
this.Post = getattr(self._Base.classes, self._table_name("post"))
this.Post.__name__ = 'Post'
this.Tag = getattr(self._Base.classes, self._table_name("tag"))
this.Tag.__name__ = 'Tag'
@property
def metadata(self):
return self._metadata
@property
def post_table(self):
return self._post_table
@property
def post_model(self):
return getattr(self._Base.classes, self._table_name("post"))
@property
def tag_model(self):
return getattr(self._Base.classes, self._table_name("tag"))
@property
def tag_table(self):
return self._tag_table
@property
def tag_posts_table(self):
return self._tag_posts_table
@property
def user_posts_table(self):
return self._user_posts_table
@property
def all_tables(self):
return [self._post_table, self._tag_table,
self._user_posts_table, self._tag_posts_table]
@property
def engine(self):
return self._engine
def save_post(self, title, text, user_id, tags, draft=False,
post_date=None, last_modified_date=None, meta_data=None,
post_id=None):
"""
Persist the blog post data. If ``post_id`` is ``None`` or ``post_id``
is invalid, the post must be inserted into the storage. If ``post_id``
is a valid id, then the data must be updated.
:param title: The title of the blog post
:type title: str
:param text: The text of the blog post
:type text: str
:param user_id: The user identifier
:type user_id: str
:param tags: A list of tags
:type tags: list
:param draft: (Optional) If the post is a draft of if needs to be
published. (default ``False``)
:type draft: bool
:param post_date: (Optional) The date the blog was posted (default
datetime.datetime.utcnow() )
:type post_date: datetime.datetime
:param last_modified_date: (Optional) The date when blog was last
modified (default datetime.datetime.utcnow() )
:type last_modified_date: datetime.datetime
:param post_id: (Optional) The post identifier. This should be ``None``
for an insert call,
and a valid value for update. (default ``None``)
:type post_id: str
:return: The post_id value, in case of a successful insert or update.
Return ``None`` if there were errors.
"""
new_post = post_id is None
post_id = _as_int(post_id)
current_datetime = datetime.datetime.utcnow()
draft = 1 if draft is True else 0
post_date = post_date if post_date is not None else current_datetime
last_modified_date = last_modified_date if last_modified_date is not \
None else current_datetime
with self._engine.begin() as conn:
try:
if post_id is not None: # validate post_id
exists_statement = sqla.select([self._post_table]).where(
self._post_table.c.id == post_id)
exists = \
conn.execute(exists_statement).fetchone() is not None
post_id = post_id if exists else None
post_statement = \
self._post_table.insert() if post_id is None else \
self._post_table.update().where(
self._post_table.c.id == post_id)
post_statement = post_statement.values(
title=title, text=text, post_date=post_date,
last_modified_date=last_modified_date, draft=draft
)
post_result = conn.execute(post_statement)
post_id = post_result.inserted_primary_key[0] \
if post_id is None else post_id
self._save_tags(tags, post_id, conn)
self._save_user_post(user_id, post_id, conn)
except Exception as e:
self._logger.exception(str(e))
post_id = None
return post_id
@classmethod
@staticmethod
def _serialise_post_from_joined_row(joined_row):
return dict(
post_id=joined_row.post_id,
title=joined_row.post_title,
text=joined_row.post_text,
post_date=joined_row.post_post_date,
last_modified_date=joined_row.post_last_modified_date,
draft=joined_row.post_draft,
user_id=joined_row.user_posts_user_id
)
def get_post_by_id(self, post_id):
"""
Fetch the blog post given by ``post_id``
:param post_id: The post identifier for the blog post
:type post_id: str
:return: If the ``post_id`` is valid, the post data is retrieved, else
returns ``None``.
"""
r = None
post_id = _as_int(post_id)
with self._engine.begin() as conn:
try:
post_statement = sqla.select([self._post_table]) \
.where(self._post_table.c.id == post_id) \
.alias('post')
joined_statement = post_statement.join(self._tag_posts_table) \
.join(self._tag_table) \
.join(self._user_posts_table) \
.alias('join')
# Note this will retrieve one row per tag
all_rows = conn.execute(
sqla.select([joined_statement])
).fetchall()
r = self._serialise_posts_and_tags_from_joined_rows(
all_rows
)[0]
except Exception as e:
self._logger.exception(str(e))
r = None
return r
def get_posts(self, count=10, offset=0, recent=True, tag=None,
user_id=None, include_draft=False):
"""
Get posts given by filter criteria
:param count: The number of posts to retrieve (default 10)
:type count: int
:param offset: The number of posts to offset (default 0)
:type offset: int
:param recent: Order by recent posts or not
:type recent: bool
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: A list of posts, with each element a dict containing values
for the following keys: (title, text, draft, post_date,
last_modified_date). If count is ``None``, then all the posts are
returned.
"""
user_id = str(user_id) if user_id else user_id
with self._engine.begin() as conn:
try:
# post_statement ensures the correct posts are selected
# in the correct order
post_statement = sqla.select([self._post_table])
post_filter = self._get_filter(
tag, user_id, include_draft, conn
)
if post_filter is not None:
post_statement = post_statement.where(post_filter)
if count:
post_statement = post_statement.limit(count)
if offset:
post_statement = post_statement.offset(offset)
post_ordering = \
sqla.desc(self._post_table.c.post_date) if recent \
else self._post_table.c.post_date
post_statement = post_statement.order_by(post_ordering)
post_statement = post_statement.alias('post')
# joined_statement ensures other data is retrieved
joined_statement = post_statement.join(self._tag_posts_table) \
.join(self._tag_table) \
.join(self._user_posts_table) \
.alias('join')
joined_ordering = \
sqla.desc(joined_statement.c.post_post_date) if recent \
else joined_statement.c.post_post_date
joined_statement = sqla.select([joined_statement]) \
.order_by(joined_ordering)
all_rows = conn.execute(joined_statement).fetchall()
result = \
self._serialise_posts_and_tags_from_joined_rows(all_rows)
except Exception as e:
self._logger.exception(str(e))
result = []
return result
def count_posts(self, tag=None, user_id=None, include_draft=False):
"""
Returns the total number of posts for the give filter
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: The number of posts for the given filter.
"""
result = 0
with self._engine.begin() as conn:
try:
count_statement = sqla.select([sqla.func.count()]). \
select_from(self._post_table)
sql_filter = self._get_filter(tag, user_id, include_draft,
conn)
count_statement = count_statement.where(sql_filter)
result = conn.execute(count_statement).scalar()
except Exception as e:
self._logger.exception(str(e))
result = 0
return result
def delete_post(self, post_id):
"""
Delete the post defined by ``post_id``
:param post_id: The identifier corresponding to a post
:type post_id: int
:return: Returns True if the post was successfully deleted and False
otherwise.
"""
status = False
success = 0
post_id = _as_int(post_id)
with self._engine.begin() as conn:
try:
post_del_statement = self._post_table.delete().where(
self._post_table.c.id == post_id)
conn.execute(post_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
try:
user_posts_del_statement = self._user_posts_table.delete(). \
where(self._user_posts_table.c.post_id == post_id)
conn.execute(user_posts_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
try:
tag_posts_del_statement = self._tag_posts_table.delete(). \
where(self._tag_posts_table.c.post_id == post_id)
conn.execute(tag_posts_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
status = success == 3
return status
def _get_filter(self, tag, user_id, include_draft, conn):
filters = []
if tag:
tag = tag.upper()
tag_statement = sqla.select([self._tag_table.c.id]).where(
self._tag_table.c.text == tag)
tag_result = conn.execute(tag_statement).fetchone()
if tag_result is not None:
tag_id = tag_result[0]
tag_filter = sqla.and_(
self._tag_posts_table.c.tag_id == tag_id,
self._post_table.c.id == self._tag_posts_table.c.post_id
)
filters.append(tag_filter)
if user_id:
user_filter = sqla.and_(
self._user_posts_table.c.user_id == user_id,
self._post_table.c.id == self._user_posts_table.c.post_id
)
filters.append(user_filter)
draft_filter = self._post_table.c.draft == 1 if include_draft else \
self._post_table.c.draft == 0
filters.append(draft_filter)
sql_filter = sqla.and_(*filters)
return sql_filter
def _save_tags(self, tags, post_id, conn):
tags = self.normalize_tags(tags)
tag_ids = []
for tag in tags: # iterate over given tags
try:
# check if the tag exists
statement = self._tag_table.select().where(
self._tag_table.c.text == tag)
tag_result = conn.execute(statement).fetchone()
if tag_result is None:
# insert if it is a new tag
tag_insert_statement = self._tag_table.insert().\
values(text=tag)
result = conn.execute(tag_insert_statement)
tag_id = result.inserted_primary_key[0]
else:
# tag already exists
tag_id = tag_result[0]
except sqla.exc.IntegrityError as e:
# some database error occurred;
tag_id = None
self._logger.exception(str(e))
except Exception as e:
# unknown exception occurred
tag_id = None
self._logger.exception(str(e))
if tag_id is not None:
# for a valid tag_id
tag_ids.append(tag_id)
try:
# check if given post has tag given by tag_id
statement = self._tag_posts_table.select().where(
sqla.and_(self._tag_posts_table.c.tag_id == tag_id,
self._tag_posts_table.c.post_id == post_id))
tag_post_result = conn.execute(statement).fetchone()
if tag_post_result is None:
# if tag_id not present for the post given by post_id
tag_post_statement = self._tag_posts_table.insert().\
values(tag_id=tag_id, post_id=post_id)
conn.execute(tag_post_statement)
except sqla.exc.IntegrityError as e:
self._logger.exception(str(e))
except Exception as e:
self._logger.exception(str(e))
try:
# remove tags that have been deleted
statement = self._tag_posts_table.delete().where(
sqla.and_(sqla.not_(
self._tag_posts_table.c.tag_id.in_(tag_ids)),
self._tag_posts_table.c.post_id == post_id
)
)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
def _save_user_post(self, user_id, post_id, conn):
user_id = str(user_id)
statement = sqla.select([self._user_posts_table]).where(
self._user_posts_table.c.post_id == post_id)
result = conn.execute(statement).fetchone()
if result is None:
try:
statement = self._user_posts_table.insert().values(
user_id=user_id, post_id=post_id)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
else:
if result[0] != user_id:
try:
statement = self._user_posts_table.update().where(
self._user_posts_table.c.post_id == post_id). \
values(user_id=user_id)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
def _table_name(self, table_name):
return self._table_prefix + table_name
def _create_all_tables(self):
"""
Creates all the required tables by calling the required functions.
:return:
"""
self._create_post_table()
self._create_tag_table()
self._create_tag_posts_table()
self._create_user_posts_table()
def _create_post_table(self):
"""
Creates the table to store the blog posts.
:return:
"""
with self._engine.begin() as conn:
post_table_name = self._table_name("post")
if not conn.dialect.has_table(conn, post_table_name):
self._post_table = sqla.Table(
post_table_name, self._metadata,
sqla.Column("id", sqla.Integer, primary_key=True),
sqla.Column("title", sqla.String(256)),
sqla.Column("text", sqla.Text),
sqla.Column("post_date", sqla.DateTime),
sqla.Column("last_modified_date", sqla.DateTime),
# if 1 then make it a draft
sqla.Column("draft", sqla.SmallInteger, default=0),
info=self._info
)
self._logger.debug("Created table with table name %s" %
post_table_name)
else:
self._post_table = self._metadata.tables[post_table_name]
self._logger.debug("Reflecting to table with table name %s" %
post_table_name)
def _create_tag_table(self):
"""
Creates the table to store blog post tags.
:return:
"""
with self._engine.begin() as conn:
tag_table_name = self._table_name("tag")
if not conn.dialect.has_table(conn, tag_table_name):
self._tag_table = sqla.Table(
tag_table_name, self._metadata,
sqla.Column("id", sqla.Integer, primary_key=True),
sqla.Column("text", sqla.String(128), unique=True,
index=True),
info=self._info
)
self._logger.debug("Created table with table name %s" %
tag_table_name)
else:
self._tag_table = self._metadata.tables[tag_table_name]
self._logger.debug("Reflecting to table with table name %s" %
tag_table_name)
def _create_tag_posts_table(self):
"""
Creates the table to store association info between blog posts and
tags.
:return:
"""
with self._engine.begin() as conn:
tag_posts_table_name = self._table_name("tag_posts")
if not conn.dialect.has_table(conn, tag_posts_table_name):
tag_id_key = self._table_name("tag") + ".id"
post_id_key = self._table_name("post") + ".id"
self._tag_posts_table = sqla.Table(
tag_posts_table_name, self._metadata,
sqla.Column('tag_id', sqla.Integer,
sqla.ForeignKey(tag_id_key, onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.Column('post_id', sqla.Integer,
sqla.ForeignKey(post_id_key,
onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.UniqueConstraint('tag_id', 'post_id', name='uix_1'),
info=self._info
)
self._logger.debug("Created table with table name %s" %
tag_posts_table_name)
else:
self._tag_posts_table = \
self._metadata.tables[tag_posts_table_name]
self._logger.debug("Reflecting to table with table name %s" %
tag_posts_table_name)
def _create_user_posts_table(self):
"""
Creates the table to store association info between user and blog
posts.
:return:
"""
with self._engine.begin() as conn:
user_posts_table_name = self._table_name("user_posts")
if not conn.dialect.has_table(conn, user_posts_table_name):
post_id_key = self._table_name("post") + ".id"
self._user_posts_table = sqla.Table(
user_posts_table_name, self._metadata,
sqla.Column("user_id", sqla.String(128), index=True),
sqla.Column("post_id", sqla.Integer,
sqla.ForeignKey(post_id_key,
onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.UniqueConstraint('user_id', 'post_id', name='uix_2'),
info=self._info
)
self._logger.debug("Created table with table name %s" %
user_posts_table_name)
else:
self._user_posts_table = \
self._metadata.tables[user_posts_table_name]
self._logger.debug("Reflecting to table with table name %s" %
user_posts_table_name)
|
gouthambs/Flask-Blogging
|
flask_blogging/sqlastorage.py
|
SQLAStorage.get_post_by_id
|
python
|
def get_post_by_id(self, post_id):
r = None
post_id = _as_int(post_id)
with self._engine.begin() as conn:
try:
post_statement = sqla.select([self._post_table]) \
.where(self._post_table.c.id == post_id) \
.alias('post')
joined_statement = post_statement.join(self._tag_posts_table) \
.join(self._tag_table) \
.join(self._user_posts_table) \
.alias('join')
# Note this will retrieve one row per tag
all_rows = conn.execute(
sqla.select([joined_statement])
).fetchall()
r = self._serialise_posts_and_tags_from_joined_rows(
all_rows
)[0]
except Exception as e:
self._logger.exception(str(e))
r = None
return r
|
Fetch the blog post given by ``post_id``
:param post_id: The post identifier for the blog post
:type post_id: str
:return: If the ``post_id`` is valid, the post data is retrieved, else
returns ``None``.
|
train
|
https://github.com/gouthambs/Flask-Blogging/blob/6636b8941175e9910f116a329521f96b8b05a9ac/flask_blogging/sqlastorage.py#L233-L266
|
[
"def _as_int(s):\n try:\n n = int(s) if s is not None else None\n return n\n except ValueError:\n return None\n",
"def _serialise_posts_and_tags_from_joined_rows(cls, joined_rows):\n \"\"\"\n Translates multiple rows of joined post and tag information\n into the dictionary format expected by flask-blogging.\n There will be one row per post/tag pairing.\n \"\"\"\n posts_by_id = OrderedDict()\n tags_by_post_id = defaultdict(list)\n for joined_row in joined_rows:\n post_id = joined_row.post_id\n post = cls._serialise_post_from_joined_row(joined_row)\n posts_by_id[post_id] = post\n tags_by_post_id[post_id].append(joined_row.tag_text)\n\n for id, post in posts_by_id.items():\n tags = tags_by_post_id.get(id)\n if tags:\n post[\"tags\"] = tags\n\n return [post for post in posts_by_id.values()]\n"
] |
class SQLAStorage(Storage):
"""
The ``SQLAStorage`` implements the interface specified by the ``Storage``
class. This class uses SQLAlchemy to implement storage and retrieval of
data from any of the databases supported by SQLAlchemy.
"""
_db = None
_logger = logging.getLogger("flask-blogging")
def __init__(self, engine=None, table_prefix="", metadata=None, db=None,
bind=None):
"""
The constructor for the ``SQLAStorage`` class.
:param engine: The ``SQLAlchemy`` engine instance created by calling
``create_engine``. One can also use Flask-SQLAlchemy, and pass the
engine property.
:type engine: object
:param table_prefix: (Optional) Prefix to use for the tables created
(default ``""``).
:type table_prefix: str
:param metadata: (Optional) The SQLAlchemy MetaData object
:type metadata: object
:param db: (Optional) The Flask-SQLAlchemy SQLAlchemy object
:type db: object
:param bind: (Optional) Reference the database to bind for multiple
database scenario with binds
:type bind: str
"""
self._bind = bind
if db:
self._engine = db.get_engine(db.get_app(), bind=self._bind)
self._metadata = db.metadata
else:
if engine is None:
raise ValueError("Both db and engine args cannot be None")
self._engine = engine
self._metadata = metadata or sqla.MetaData()
self._info = {} if self._bind is None else {"bind_key": self._bind}
self._table_prefix = table_prefix
self._metadata.reflect(bind=self._engine)
self._create_all_tables()
# automap base and restrict to the required tables here.
table_suffix = ['post', 'tag', 'user_posts', 'tag_posts']
table_names = [self._table_name(t) for t in table_suffix]
self._metadata.create_all(bind=self._engine, tables=self.all_tables)
meta = sqla.MetaData()
meta.reflect(bind=self._engine, only=table_names)
self._Base = automap_base(metadata=meta)
self._Base.prepare()
self._inject_models()
sqla_initialized.send(self, engine=self._engine,
table_prefix=self._table_prefix,
meta=self.metadata,
bind=self._bind)
def _inject_models(self):
global this
this.Post = getattr(self._Base.classes, self._table_name("post"))
this.Post.__name__ = 'Post'
this.Tag = getattr(self._Base.classes, self._table_name("tag"))
this.Tag.__name__ = 'Tag'
@property
def metadata(self):
return self._metadata
@property
def post_table(self):
return self._post_table
@property
def post_model(self):
return getattr(self._Base.classes, self._table_name("post"))
@property
def tag_model(self):
return getattr(self._Base.classes, self._table_name("tag"))
@property
def tag_table(self):
return self._tag_table
@property
def tag_posts_table(self):
return self._tag_posts_table
@property
def user_posts_table(self):
return self._user_posts_table
@property
def all_tables(self):
return [self._post_table, self._tag_table,
self._user_posts_table, self._tag_posts_table]
@property
def engine(self):
return self._engine
def save_post(self, title, text, user_id, tags, draft=False,
post_date=None, last_modified_date=None, meta_data=None,
post_id=None):
"""
Persist the blog post data. If ``post_id`` is ``None`` or ``post_id``
is invalid, the post must be inserted into the storage. If ``post_id``
is a valid id, then the data must be updated.
:param title: The title of the blog post
:type title: str
:param text: The text of the blog post
:type text: str
:param user_id: The user identifier
:type user_id: str
:param tags: A list of tags
:type tags: list
:param draft: (Optional) If the post is a draft of if needs to be
published. (default ``False``)
:type draft: bool
:param post_date: (Optional) The date the blog was posted (default
datetime.datetime.utcnow() )
:type post_date: datetime.datetime
:param last_modified_date: (Optional) The date when blog was last
modified (default datetime.datetime.utcnow() )
:type last_modified_date: datetime.datetime
:param post_id: (Optional) The post identifier. This should be ``None``
for an insert call,
and a valid value for update. (default ``None``)
:type post_id: str
:return: The post_id value, in case of a successful insert or update.
Return ``None`` if there were errors.
"""
new_post = post_id is None
post_id = _as_int(post_id)
current_datetime = datetime.datetime.utcnow()
draft = 1 if draft is True else 0
post_date = post_date if post_date is not None else current_datetime
last_modified_date = last_modified_date if last_modified_date is not \
None else current_datetime
with self._engine.begin() as conn:
try:
if post_id is not None: # validate post_id
exists_statement = sqla.select([self._post_table]).where(
self._post_table.c.id == post_id)
exists = \
conn.execute(exists_statement).fetchone() is not None
post_id = post_id if exists else None
post_statement = \
self._post_table.insert() if post_id is None else \
self._post_table.update().where(
self._post_table.c.id == post_id)
post_statement = post_statement.values(
title=title, text=text, post_date=post_date,
last_modified_date=last_modified_date, draft=draft
)
post_result = conn.execute(post_statement)
post_id = post_result.inserted_primary_key[0] \
if post_id is None else post_id
self._save_tags(tags, post_id, conn)
self._save_user_post(user_id, post_id, conn)
except Exception as e:
self._logger.exception(str(e))
post_id = None
return post_id
@classmethod
def _serialise_posts_and_tags_from_joined_rows(cls, joined_rows):
"""
Translates multiple rows of joined post and tag information
into the dictionary format expected by flask-blogging.
There will be one row per post/tag pairing.
"""
posts_by_id = OrderedDict()
tags_by_post_id = defaultdict(list)
for joined_row in joined_rows:
post_id = joined_row.post_id
post = cls._serialise_post_from_joined_row(joined_row)
posts_by_id[post_id] = post
tags_by_post_id[post_id].append(joined_row.tag_text)
for id, post in posts_by_id.items():
tags = tags_by_post_id.get(id)
if tags:
post["tags"] = tags
return [post for post in posts_by_id.values()]
@staticmethod
def _serialise_post_from_joined_row(joined_row):
return dict(
post_id=joined_row.post_id,
title=joined_row.post_title,
text=joined_row.post_text,
post_date=joined_row.post_post_date,
last_modified_date=joined_row.post_last_modified_date,
draft=joined_row.post_draft,
user_id=joined_row.user_posts_user_id
)
def get_posts(self, count=10, offset=0, recent=True, tag=None,
user_id=None, include_draft=False):
"""
Get posts given by filter criteria
:param count: The number of posts to retrieve (default 10)
:type count: int
:param offset: The number of posts to offset (default 0)
:type offset: int
:param recent: Order by recent posts or not
:type recent: bool
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: A list of posts, with each element a dict containing values
for the following keys: (title, text, draft, post_date,
last_modified_date). If count is ``None``, then all the posts are
returned.
"""
user_id = str(user_id) if user_id else user_id
with self._engine.begin() as conn:
try:
# post_statement ensures the correct posts are selected
# in the correct order
post_statement = sqla.select([self._post_table])
post_filter = self._get_filter(
tag, user_id, include_draft, conn
)
if post_filter is not None:
post_statement = post_statement.where(post_filter)
if count:
post_statement = post_statement.limit(count)
if offset:
post_statement = post_statement.offset(offset)
post_ordering = \
sqla.desc(self._post_table.c.post_date) if recent \
else self._post_table.c.post_date
post_statement = post_statement.order_by(post_ordering)
post_statement = post_statement.alias('post')
# joined_statement ensures other data is retrieved
joined_statement = post_statement.join(self._tag_posts_table) \
.join(self._tag_table) \
.join(self._user_posts_table) \
.alias('join')
joined_ordering = \
sqla.desc(joined_statement.c.post_post_date) if recent \
else joined_statement.c.post_post_date
joined_statement = sqla.select([joined_statement]) \
.order_by(joined_ordering)
all_rows = conn.execute(joined_statement).fetchall()
result = \
self._serialise_posts_and_tags_from_joined_rows(all_rows)
except Exception as e:
self._logger.exception(str(e))
result = []
return result
def count_posts(self, tag=None, user_id=None, include_draft=False):
"""
Returns the total number of posts for the give filter
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: The number of posts for the given filter.
"""
result = 0
with self._engine.begin() as conn:
try:
count_statement = sqla.select([sqla.func.count()]). \
select_from(self._post_table)
sql_filter = self._get_filter(tag, user_id, include_draft,
conn)
count_statement = count_statement.where(sql_filter)
result = conn.execute(count_statement).scalar()
except Exception as e:
self._logger.exception(str(e))
result = 0
return result
def delete_post(self, post_id):
"""
Delete the post defined by ``post_id``
:param post_id: The identifier corresponding to a post
:type post_id: int
:return: Returns True if the post was successfully deleted and False
otherwise.
"""
status = False
success = 0
post_id = _as_int(post_id)
with self._engine.begin() as conn:
try:
post_del_statement = self._post_table.delete().where(
self._post_table.c.id == post_id)
conn.execute(post_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
try:
user_posts_del_statement = self._user_posts_table.delete(). \
where(self._user_posts_table.c.post_id == post_id)
conn.execute(user_posts_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
try:
tag_posts_del_statement = self._tag_posts_table.delete(). \
where(self._tag_posts_table.c.post_id == post_id)
conn.execute(tag_posts_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
status = success == 3
return status
def _get_filter(self, tag, user_id, include_draft, conn):
filters = []
if tag:
tag = tag.upper()
tag_statement = sqla.select([self._tag_table.c.id]).where(
self._tag_table.c.text == tag)
tag_result = conn.execute(tag_statement).fetchone()
if tag_result is not None:
tag_id = tag_result[0]
tag_filter = sqla.and_(
self._tag_posts_table.c.tag_id == tag_id,
self._post_table.c.id == self._tag_posts_table.c.post_id
)
filters.append(tag_filter)
if user_id:
user_filter = sqla.and_(
self._user_posts_table.c.user_id == user_id,
self._post_table.c.id == self._user_posts_table.c.post_id
)
filters.append(user_filter)
draft_filter = self._post_table.c.draft == 1 if include_draft else \
self._post_table.c.draft == 0
filters.append(draft_filter)
sql_filter = sqla.and_(*filters)
return sql_filter
def _save_tags(self, tags, post_id, conn):
tags = self.normalize_tags(tags)
tag_ids = []
for tag in tags: # iterate over given tags
try:
# check if the tag exists
statement = self._tag_table.select().where(
self._tag_table.c.text == tag)
tag_result = conn.execute(statement).fetchone()
if tag_result is None:
# insert if it is a new tag
tag_insert_statement = self._tag_table.insert().\
values(text=tag)
result = conn.execute(tag_insert_statement)
tag_id = result.inserted_primary_key[0]
else:
# tag already exists
tag_id = tag_result[0]
except sqla.exc.IntegrityError as e:
# some database error occurred;
tag_id = None
self._logger.exception(str(e))
except Exception as e:
# unknown exception occurred
tag_id = None
self._logger.exception(str(e))
if tag_id is not None:
# for a valid tag_id
tag_ids.append(tag_id)
try:
# check if given post has tag given by tag_id
statement = self._tag_posts_table.select().where(
sqla.and_(self._tag_posts_table.c.tag_id == tag_id,
self._tag_posts_table.c.post_id == post_id))
tag_post_result = conn.execute(statement).fetchone()
if tag_post_result is None:
# if tag_id not present for the post given by post_id
tag_post_statement = self._tag_posts_table.insert().\
values(tag_id=tag_id, post_id=post_id)
conn.execute(tag_post_statement)
except sqla.exc.IntegrityError as e:
self._logger.exception(str(e))
except Exception as e:
self._logger.exception(str(e))
try:
# remove tags that have been deleted
statement = self._tag_posts_table.delete().where(
sqla.and_(sqla.not_(
self._tag_posts_table.c.tag_id.in_(tag_ids)),
self._tag_posts_table.c.post_id == post_id
)
)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
def _save_user_post(self, user_id, post_id, conn):
user_id = str(user_id)
statement = sqla.select([self._user_posts_table]).where(
self._user_posts_table.c.post_id == post_id)
result = conn.execute(statement).fetchone()
if result is None:
try:
statement = self._user_posts_table.insert().values(
user_id=user_id, post_id=post_id)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
else:
if result[0] != user_id:
try:
statement = self._user_posts_table.update().where(
self._user_posts_table.c.post_id == post_id). \
values(user_id=user_id)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
def _table_name(self, table_name):
return self._table_prefix + table_name
def _create_all_tables(self):
"""
Creates all the required tables by calling the required functions.
:return:
"""
self._create_post_table()
self._create_tag_table()
self._create_tag_posts_table()
self._create_user_posts_table()
def _create_post_table(self):
"""
Creates the table to store the blog posts.
:return:
"""
with self._engine.begin() as conn:
post_table_name = self._table_name("post")
if not conn.dialect.has_table(conn, post_table_name):
self._post_table = sqla.Table(
post_table_name, self._metadata,
sqla.Column("id", sqla.Integer, primary_key=True),
sqla.Column("title", sqla.String(256)),
sqla.Column("text", sqla.Text),
sqla.Column("post_date", sqla.DateTime),
sqla.Column("last_modified_date", sqla.DateTime),
# if 1 then make it a draft
sqla.Column("draft", sqla.SmallInteger, default=0),
info=self._info
)
self._logger.debug("Created table with table name %s" %
post_table_name)
else:
self._post_table = self._metadata.tables[post_table_name]
self._logger.debug("Reflecting to table with table name %s" %
post_table_name)
def _create_tag_table(self):
"""
Creates the table to store blog post tags.
:return:
"""
with self._engine.begin() as conn:
tag_table_name = self._table_name("tag")
if not conn.dialect.has_table(conn, tag_table_name):
self._tag_table = sqla.Table(
tag_table_name, self._metadata,
sqla.Column("id", sqla.Integer, primary_key=True),
sqla.Column("text", sqla.String(128), unique=True,
index=True),
info=self._info
)
self._logger.debug("Created table with table name %s" %
tag_table_name)
else:
self._tag_table = self._metadata.tables[tag_table_name]
self._logger.debug("Reflecting to table with table name %s" %
tag_table_name)
def _create_tag_posts_table(self):
"""
Creates the table to store association info between blog posts and
tags.
:return:
"""
with self._engine.begin() as conn:
tag_posts_table_name = self._table_name("tag_posts")
if not conn.dialect.has_table(conn, tag_posts_table_name):
tag_id_key = self._table_name("tag") + ".id"
post_id_key = self._table_name("post") + ".id"
self._tag_posts_table = sqla.Table(
tag_posts_table_name, self._metadata,
sqla.Column('tag_id', sqla.Integer,
sqla.ForeignKey(tag_id_key, onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.Column('post_id', sqla.Integer,
sqla.ForeignKey(post_id_key,
onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.UniqueConstraint('tag_id', 'post_id', name='uix_1'),
info=self._info
)
self._logger.debug("Created table with table name %s" %
tag_posts_table_name)
else:
self._tag_posts_table = \
self._metadata.tables[tag_posts_table_name]
self._logger.debug("Reflecting to table with table name %s" %
tag_posts_table_name)
def _create_user_posts_table(self):
"""
Creates the table to store association info between user and blog
posts.
:return:
"""
with self._engine.begin() as conn:
user_posts_table_name = self._table_name("user_posts")
if not conn.dialect.has_table(conn, user_posts_table_name):
post_id_key = self._table_name("post") + ".id"
self._user_posts_table = sqla.Table(
user_posts_table_name, self._metadata,
sqla.Column("user_id", sqla.String(128), index=True),
sqla.Column("post_id", sqla.Integer,
sqla.ForeignKey(post_id_key,
onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.UniqueConstraint('user_id', 'post_id', name='uix_2'),
info=self._info
)
self._logger.debug("Created table with table name %s" %
user_posts_table_name)
else:
self._user_posts_table = \
self._metadata.tables[user_posts_table_name]
self._logger.debug("Reflecting to table with table name %s" %
user_posts_table_name)
|
gouthambs/Flask-Blogging
|
flask_blogging/sqlastorage.py
|
SQLAStorage.get_posts
|
python
|
def get_posts(self, count=10, offset=0, recent=True, tag=None,
user_id=None, include_draft=False):
user_id = str(user_id) if user_id else user_id
with self._engine.begin() as conn:
try:
# post_statement ensures the correct posts are selected
# in the correct order
post_statement = sqla.select([self._post_table])
post_filter = self._get_filter(
tag, user_id, include_draft, conn
)
if post_filter is not None:
post_statement = post_statement.where(post_filter)
if count:
post_statement = post_statement.limit(count)
if offset:
post_statement = post_statement.offset(offset)
post_ordering = \
sqla.desc(self._post_table.c.post_date) if recent \
else self._post_table.c.post_date
post_statement = post_statement.order_by(post_ordering)
post_statement = post_statement.alias('post')
# joined_statement ensures other data is retrieved
joined_statement = post_statement.join(self._tag_posts_table) \
.join(self._tag_table) \
.join(self._user_posts_table) \
.alias('join')
joined_ordering = \
sqla.desc(joined_statement.c.post_post_date) if recent \
else joined_statement.c.post_post_date
joined_statement = sqla.select([joined_statement]) \
.order_by(joined_ordering)
all_rows = conn.execute(joined_statement).fetchall()
result = \
self._serialise_posts_and_tags_from_joined_rows(all_rows)
except Exception as e:
self._logger.exception(str(e))
result = []
return result
|
Get posts given by filter criteria
:param count: The number of posts to retrieve (default 10)
:type count: int
:param offset: The number of posts to offset (default 0)
:type offset: int
:param recent: Order by recent posts or not
:type recent: bool
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: A list of posts, with each element a dict containing values
for the following keys: (title, text, draft, post_date,
last_modified_date). If count is ``None``, then all the posts are
returned.
|
train
|
https://github.com/gouthambs/Flask-Blogging/blob/6636b8941175e9910f116a329521f96b8b05a9ac/flask_blogging/sqlastorage.py#L268-L334
|
[
"def _serialise_posts_and_tags_from_joined_rows(cls, joined_rows):\n \"\"\"\n Translates multiple rows of joined post and tag information\n into the dictionary format expected by flask-blogging.\n There will be one row per post/tag pairing.\n \"\"\"\n posts_by_id = OrderedDict()\n tags_by_post_id = defaultdict(list)\n for joined_row in joined_rows:\n post_id = joined_row.post_id\n post = cls._serialise_post_from_joined_row(joined_row)\n posts_by_id[post_id] = post\n tags_by_post_id[post_id].append(joined_row.tag_text)\n\n for id, post in posts_by_id.items():\n tags = tags_by_post_id.get(id)\n if tags:\n post[\"tags\"] = tags\n\n return [post for post in posts_by_id.values()]\n",
"def _get_filter(self, tag, user_id, include_draft, conn):\n filters = []\n if tag:\n tag = tag.upper()\n tag_statement = sqla.select([self._tag_table.c.id]).where(\n self._tag_table.c.text == tag)\n tag_result = conn.execute(tag_statement).fetchone()\n if tag_result is not None:\n tag_id = tag_result[0]\n tag_filter = sqla.and_(\n self._tag_posts_table.c.tag_id == tag_id,\n self._post_table.c.id == self._tag_posts_table.c.post_id\n )\n filters.append(tag_filter)\n\n if user_id:\n user_filter = sqla.and_(\n self._user_posts_table.c.user_id == user_id,\n self._post_table.c.id == self._user_posts_table.c.post_id\n )\n filters.append(user_filter)\n\n draft_filter = self._post_table.c.draft == 1 if include_draft else \\\n self._post_table.c.draft == 0\n filters.append(draft_filter)\n sql_filter = sqla.and_(*filters)\n return sql_filter\n"
] |
class SQLAStorage(Storage):
"""
The ``SQLAStorage`` implements the interface specified by the ``Storage``
class. This class uses SQLAlchemy to implement storage and retrieval of
data from any of the databases supported by SQLAlchemy.
"""
_db = None
_logger = logging.getLogger("flask-blogging")
def __init__(self, engine=None, table_prefix="", metadata=None, db=None,
bind=None):
"""
The constructor for the ``SQLAStorage`` class.
:param engine: The ``SQLAlchemy`` engine instance created by calling
``create_engine``. One can also use Flask-SQLAlchemy, and pass the
engine property.
:type engine: object
:param table_prefix: (Optional) Prefix to use for the tables created
(default ``""``).
:type table_prefix: str
:param metadata: (Optional) The SQLAlchemy MetaData object
:type metadata: object
:param db: (Optional) The Flask-SQLAlchemy SQLAlchemy object
:type db: object
:param bind: (Optional) Reference the database to bind for multiple
database scenario with binds
:type bind: str
"""
self._bind = bind
if db:
self._engine = db.get_engine(db.get_app(), bind=self._bind)
self._metadata = db.metadata
else:
if engine is None:
raise ValueError("Both db and engine args cannot be None")
self._engine = engine
self._metadata = metadata or sqla.MetaData()
self._info = {} if self._bind is None else {"bind_key": self._bind}
self._table_prefix = table_prefix
self._metadata.reflect(bind=self._engine)
self._create_all_tables()
# automap base and restrict to the required tables here.
table_suffix = ['post', 'tag', 'user_posts', 'tag_posts']
table_names = [self._table_name(t) for t in table_suffix]
self._metadata.create_all(bind=self._engine, tables=self.all_tables)
meta = sqla.MetaData()
meta.reflect(bind=self._engine, only=table_names)
self._Base = automap_base(metadata=meta)
self._Base.prepare()
self._inject_models()
sqla_initialized.send(self, engine=self._engine,
table_prefix=self._table_prefix,
meta=self.metadata,
bind=self._bind)
def _inject_models(self):
global this
this.Post = getattr(self._Base.classes, self._table_name("post"))
this.Post.__name__ = 'Post'
this.Tag = getattr(self._Base.classes, self._table_name("tag"))
this.Tag.__name__ = 'Tag'
@property
def metadata(self):
return self._metadata
@property
def post_table(self):
return self._post_table
@property
def post_model(self):
return getattr(self._Base.classes, self._table_name("post"))
@property
def tag_model(self):
return getattr(self._Base.classes, self._table_name("tag"))
@property
def tag_table(self):
return self._tag_table
@property
def tag_posts_table(self):
return self._tag_posts_table
@property
def user_posts_table(self):
return self._user_posts_table
@property
def all_tables(self):
return [self._post_table, self._tag_table,
self._user_posts_table, self._tag_posts_table]
@property
def engine(self):
return self._engine
def save_post(self, title, text, user_id, tags, draft=False,
post_date=None, last_modified_date=None, meta_data=None,
post_id=None):
"""
Persist the blog post data. If ``post_id`` is ``None`` or ``post_id``
is invalid, the post must be inserted into the storage. If ``post_id``
is a valid id, then the data must be updated.
:param title: The title of the blog post
:type title: str
:param text: The text of the blog post
:type text: str
:param user_id: The user identifier
:type user_id: str
:param tags: A list of tags
:type tags: list
:param draft: (Optional) If the post is a draft of if needs to be
published. (default ``False``)
:type draft: bool
:param post_date: (Optional) The date the blog was posted (default
datetime.datetime.utcnow() )
:type post_date: datetime.datetime
:param last_modified_date: (Optional) The date when blog was last
modified (default datetime.datetime.utcnow() )
:type last_modified_date: datetime.datetime
:param post_id: (Optional) The post identifier. This should be ``None``
for an insert call,
and a valid value for update. (default ``None``)
:type post_id: str
:return: The post_id value, in case of a successful insert or update.
Return ``None`` if there were errors.
"""
new_post = post_id is None
post_id = _as_int(post_id)
current_datetime = datetime.datetime.utcnow()
draft = 1 if draft is True else 0
post_date = post_date if post_date is not None else current_datetime
last_modified_date = last_modified_date if last_modified_date is not \
None else current_datetime
with self._engine.begin() as conn:
try:
if post_id is not None: # validate post_id
exists_statement = sqla.select([self._post_table]).where(
self._post_table.c.id == post_id)
exists = \
conn.execute(exists_statement).fetchone() is not None
post_id = post_id if exists else None
post_statement = \
self._post_table.insert() if post_id is None else \
self._post_table.update().where(
self._post_table.c.id == post_id)
post_statement = post_statement.values(
title=title, text=text, post_date=post_date,
last_modified_date=last_modified_date, draft=draft
)
post_result = conn.execute(post_statement)
post_id = post_result.inserted_primary_key[0] \
if post_id is None else post_id
self._save_tags(tags, post_id, conn)
self._save_user_post(user_id, post_id, conn)
except Exception as e:
self._logger.exception(str(e))
post_id = None
return post_id
@classmethod
def _serialise_posts_and_tags_from_joined_rows(cls, joined_rows):
"""
Translates multiple rows of joined post and tag information
into the dictionary format expected by flask-blogging.
There will be one row per post/tag pairing.
"""
posts_by_id = OrderedDict()
tags_by_post_id = defaultdict(list)
for joined_row in joined_rows:
post_id = joined_row.post_id
post = cls._serialise_post_from_joined_row(joined_row)
posts_by_id[post_id] = post
tags_by_post_id[post_id].append(joined_row.tag_text)
for id, post in posts_by_id.items():
tags = tags_by_post_id.get(id)
if tags:
post["tags"] = tags
return [post for post in posts_by_id.values()]
@staticmethod
def _serialise_post_from_joined_row(joined_row):
return dict(
post_id=joined_row.post_id,
title=joined_row.post_title,
text=joined_row.post_text,
post_date=joined_row.post_post_date,
last_modified_date=joined_row.post_last_modified_date,
draft=joined_row.post_draft,
user_id=joined_row.user_posts_user_id
)
def get_post_by_id(self, post_id):
"""
Fetch the blog post given by ``post_id``
:param post_id: The post identifier for the blog post
:type post_id: str
:return: If the ``post_id`` is valid, the post data is retrieved, else
returns ``None``.
"""
r = None
post_id = _as_int(post_id)
with self._engine.begin() as conn:
try:
post_statement = sqla.select([self._post_table]) \
.where(self._post_table.c.id == post_id) \
.alias('post')
joined_statement = post_statement.join(self._tag_posts_table) \
.join(self._tag_table) \
.join(self._user_posts_table) \
.alias('join')
# Note this will retrieve one row per tag
all_rows = conn.execute(
sqla.select([joined_statement])
).fetchall()
r = self._serialise_posts_and_tags_from_joined_rows(
all_rows
)[0]
except Exception as e:
self._logger.exception(str(e))
r = None
return r
def count_posts(self, tag=None, user_id=None, include_draft=False):
"""
Returns the total number of posts for the give filter
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: The number of posts for the given filter.
"""
result = 0
with self._engine.begin() as conn:
try:
count_statement = sqla.select([sqla.func.count()]). \
select_from(self._post_table)
sql_filter = self._get_filter(tag, user_id, include_draft,
conn)
count_statement = count_statement.where(sql_filter)
result = conn.execute(count_statement).scalar()
except Exception as e:
self._logger.exception(str(e))
result = 0
return result
def delete_post(self, post_id):
"""
Delete the post defined by ``post_id``
:param post_id: The identifier corresponding to a post
:type post_id: int
:return: Returns True if the post was successfully deleted and False
otherwise.
"""
status = False
success = 0
post_id = _as_int(post_id)
with self._engine.begin() as conn:
try:
post_del_statement = self._post_table.delete().where(
self._post_table.c.id == post_id)
conn.execute(post_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
try:
user_posts_del_statement = self._user_posts_table.delete(). \
where(self._user_posts_table.c.post_id == post_id)
conn.execute(user_posts_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
try:
tag_posts_del_statement = self._tag_posts_table.delete(). \
where(self._tag_posts_table.c.post_id == post_id)
conn.execute(tag_posts_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
status = success == 3
return status
def _get_filter(self, tag, user_id, include_draft, conn):
filters = []
if tag:
tag = tag.upper()
tag_statement = sqla.select([self._tag_table.c.id]).where(
self._tag_table.c.text == tag)
tag_result = conn.execute(tag_statement).fetchone()
if tag_result is not None:
tag_id = tag_result[0]
tag_filter = sqla.and_(
self._tag_posts_table.c.tag_id == tag_id,
self._post_table.c.id == self._tag_posts_table.c.post_id
)
filters.append(tag_filter)
if user_id:
user_filter = sqla.and_(
self._user_posts_table.c.user_id == user_id,
self._post_table.c.id == self._user_posts_table.c.post_id
)
filters.append(user_filter)
draft_filter = self._post_table.c.draft == 1 if include_draft else \
self._post_table.c.draft == 0
filters.append(draft_filter)
sql_filter = sqla.and_(*filters)
return sql_filter
def _save_tags(self, tags, post_id, conn):
tags = self.normalize_tags(tags)
tag_ids = []
for tag in tags: # iterate over given tags
try:
# check if the tag exists
statement = self._tag_table.select().where(
self._tag_table.c.text == tag)
tag_result = conn.execute(statement).fetchone()
if tag_result is None:
# insert if it is a new tag
tag_insert_statement = self._tag_table.insert().\
values(text=tag)
result = conn.execute(tag_insert_statement)
tag_id = result.inserted_primary_key[0]
else:
# tag already exists
tag_id = tag_result[0]
except sqla.exc.IntegrityError as e:
# some database error occurred;
tag_id = None
self._logger.exception(str(e))
except Exception as e:
# unknown exception occurred
tag_id = None
self._logger.exception(str(e))
if tag_id is not None:
# for a valid tag_id
tag_ids.append(tag_id)
try:
# check if given post has tag given by tag_id
statement = self._tag_posts_table.select().where(
sqla.and_(self._tag_posts_table.c.tag_id == tag_id,
self._tag_posts_table.c.post_id == post_id))
tag_post_result = conn.execute(statement).fetchone()
if tag_post_result is None:
# if tag_id not present for the post given by post_id
tag_post_statement = self._tag_posts_table.insert().\
values(tag_id=tag_id, post_id=post_id)
conn.execute(tag_post_statement)
except sqla.exc.IntegrityError as e:
self._logger.exception(str(e))
except Exception as e:
self._logger.exception(str(e))
try:
# remove tags that have been deleted
statement = self._tag_posts_table.delete().where(
sqla.and_(sqla.not_(
self._tag_posts_table.c.tag_id.in_(tag_ids)),
self._tag_posts_table.c.post_id == post_id
)
)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
def _save_user_post(self, user_id, post_id, conn):
user_id = str(user_id)
statement = sqla.select([self._user_posts_table]).where(
self._user_posts_table.c.post_id == post_id)
result = conn.execute(statement).fetchone()
if result is None:
try:
statement = self._user_posts_table.insert().values(
user_id=user_id, post_id=post_id)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
else:
if result[0] != user_id:
try:
statement = self._user_posts_table.update().where(
self._user_posts_table.c.post_id == post_id). \
values(user_id=user_id)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
def _table_name(self, table_name):
return self._table_prefix + table_name
def _create_all_tables(self):
"""
Creates all the required tables by calling the required functions.
:return:
"""
self._create_post_table()
self._create_tag_table()
self._create_tag_posts_table()
self._create_user_posts_table()
def _create_post_table(self):
"""
Creates the table to store the blog posts.
:return:
"""
with self._engine.begin() as conn:
post_table_name = self._table_name("post")
if not conn.dialect.has_table(conn, post_table_name):
self._post_table = sqla.Table(
post_table_name, self._metadata,
sqla.Column("id", sqla.Integer, primary_key=True),
sqla.Column("title", sqla.String(256)),
sqla.Column("text", sqla.Text),
sqla.Column("post_date", sqla.DateTime),
sqla.Column("last_modified_date", sqla.DateTime),
# if 1 then make it a draft
sqla.Column("draft", sqla.SmallInteger, default=0),
info=self._info
)
self._logger.debug("Created table with table name %s" %
post_table_name)
else:
self._post_table = self._metadata.tables[post_table_name]
self._logger.debug("Reflecting to table with table name %s" %
post_table_name)
def _create_tag_table(self):
"""
Creates the table to store blog post tags.
:return:
"""
with self._engine.begin() as conn:
tag_table_name = self._table_name("tag")
if not conn.dialect.has_table(conn, tag_table_name):
self._tag_table = sqla.Table(
tag_table_name, self._metadata,
sqla.Column("id", sqla.Integer, primary_key=True),
sqla.Column("text", sqla.String(128), unique=True,
index=True),
info=self._info
)
self._logger.debug("Created table with table name %s" %
tag_table_name)
else:
self._tag_table = self._metadata.tables[tag_table_name]
self._logger.debug("Reflecting to table with table name %s" %
tag_table_name)
def _create_tag_posts_table(self):
"""
Creates the table to store association info between blog posts and
tags.
:return:
"""
with self._engine.begin() as conn:
tag_posts_table_name = self._table_name("tag_posts")
if not conn.dialect.has_table(conn, tag_posts_table_name):
tag_id_key = self._table_name("tag") + ".id"
post_id_key = self._table_name("post") + ".id"
self._tag_posts_table = sqla.Table(
tag_posts_table_name, self._metadata,
sqla.Column('tag_id', sqla.Integer,
sqla.ForeignKey(tag_id_key, onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.Column('post_id', sqla.Integer,
sqla.ForeignKey(post_id_key,
onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.UniqueConstraint('tag_id', 'post_id', name='uix_1'),
info=self._info
)
self._logger.debug("Created table with table name %s" %
tag_posts_table_name)
else:
self._tag_posts_table = \
self._metadata.tables[tag_posts_table_name]
self._logger.debug("Reflecting to table with table name %s" %
tag_posts_table_name)
def _create_user_posts_table(self):
"""
Creates the table to store association info between user and blog
posts.
:return:
"""
with self._engine.begin() as conn:
user_posts_table_name = self._table_name("user_posts")
if not conn.dialect.has_table(conn, user_posts_table_name):
post_id_key = self._table_name("post") + ".id"
self._user_posts_table = sqla.Table(
user_posts_table_name, self._metadata,
sqla.Column("user_id", sqla.String(128), index=True),
sqla.Column("post_id", sqla.Integer,
sqla.ForeignKey(post_id_key,
onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.UniqueConstraint('user_id', 'post_id', name='uix_2'),
info=self._info
)
self._logger.debug("Created table with table name %s" %
user_posts_table_name)
else:
self._user_posts_table = \
self._metadata.tables[user_posts_table_name]
self._logger.debug("Reflecting to table with table name %s" %
user_posts_table_name)
|
gouthambs/Flask-Blogging
|
flask_blogging/sqlastorage.py
|
SQLAStorage.count_posts
|
python
|
def count_posts(self, tag=None, user_id=None, include_draft=False):
result = 0
with self._engine.begin() as conn:
try:
count_statement = sqla.select([sqla.func.count()]). \
select_from(self._post_table)
sql_filter = self._get_filter(tag, user_id, include_draft,
conn)
count_statement = count_statement.where(sql_filter)
result = conn.execute(count_statement).scalar()
except Exception as e:
self._logger.exception(str(e))
result = 0
return result
|
Returns the total number of posts for the give filter
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: The number of posts for the given filter.
|
train
|
https://github.com/gouthambs/Flask-Blogging/blob/6636b8941175e9910f116a329521f96b8b05a9ac/flask_blogging/sqlastorage.py#L336-L360
|
[
"def _get_filter(self, tag, user_id, include_draft, conn):\n filters = []\n if tag:\n tag = tag.upper()\n tag_statement = sqla.select([self._tag_table.c.id]).where(\n self._tag_table.c.text == tag)\n tag_result = conn.execute(tag_statement).fetchone()\n if tag_result is not None:\n tag_id = tag_result[0]\n tag_filter = sqla.and_(\n self._tag_posts_table.c.tag_id == tag_id,\n self._post_table.c.id == self._tag_posts_table.c.post_id\n )\n filters.append(tag_filter)\n\n if user_id:\n user_filter = sqla.and_(\n self._user_posts_table.c.user_id == user_id,\n self._post_table.c.id == self._user_posts_table.c.post_id\n )\n filters.append(user_filter)\n\n draft_filter = self._post_table.c.draft == 1 if include_draft else \\\n self._post_table.c.draft == 0\n filters.append(draft_filter)\n sql_filter = sqla.and_(*filters)\n return sql_filter\n"
] |
class SQLAStorage(Storage):
"""
The ``SQLAStorage`` implements the interface specified by the ``Storage``
class. This class uses SQLAlchemy to implement storage and retrieval of
data from any of the databases supported by SQLAlchemy.
"""
_db = None
_logger = logging.getLogger("flask-blogging")
def __init__(self, engine=None, table_prefix="", metadata=None, db=None,
bind=None):
"""
The constructor for the ``SQLAStorage`` class.
:param engine: The ``SQLAlchemy`` engine instance created by calling
``create_engine``. One can also use Flask-SQLAlchemy, and pass the
engine property.
:type engine: object
:param table_prefix: (Optional) Prefix to use for the tables created
(default ``""``).
:type table_prefix: str
:param metadata: (Optional) The SQLAlchemy MetaData object
:type metadata: object
:param db: (Optional) The Flask-SQLAlchemy SQLAlchemy object
:type db: object
:param bind: (Optional) Reference the database to bind for multiple
database scenario with binds
:type bind: str
"""
self._bind = bind
if db:
self._engine = db.get_engine(db.get_app(), bind=self._bind)
self._metadata = db.metadata
else:
if engine is None:
raise ValueError("Both db and engine args cannot be None")
self._engine = engine
self._metadata = metadata or sqla.MetaData()
self._info = {} if self._bind is None else {"bind_key": self._bind}
self._table_prefix = table_prefix
self._metadata.reflect(bind=self._engine)
self._create_all_tables()
# automap base and restrict to the required tables here.
table_suffix = ['post', 'tag', 'user_posts', 'tag_posts']
table_names = [self._table_name(t) for t in table_suffix]
self._metadata.create_all(bind=self._engine, tables=self.all_tables)
meta = sqla.MetaData()
meta.reflect(bind=self._engine, only=table_names)
self._Base = automap_base(metadata=meta)
self._Base.prepare()
self._inject_models()
sqla_initialized.send(self, engine=self._engine,
table_prefix=self._table_prefix,
meta=self.metadata,
bind=self._bind)
def _inject_models(self):
global this
this.Post = getattr(self._Base.classes, self._table_name("post"))
this.Post.__name__ = 'Post'
this.Tag = getattr(self._Base.classes, self._table_name("tag"))
this.Tag.__name__ = 'Tag'
@property
def metadata(self):
return self._metadata
@property
def post_table(self):
return self._post_table
@property
def post_model(self):
return getattr(self._Base.classes, self._table_name("post"))
@property
def tag_model(self):
return getattr(self._Base.classes, self._table_name("tag"))
@property
def tag_table(self):
return self._tag_table
@property
def tag_posts_table(self):
return self._tag_posts_table
@property
def user_posts_table(self):
return self._user_posts_table
@property
def all_tables(self):
return [self._post_table, self._tag_table,
self._user_posts_table, self._tag_posts_table]
@property
def engine(self):
return self._engine
def save_post(self, title, text, user_id, tags, draft=False,
post_date=None, last_modified_date=None, meta_data=None,
post_id=None):
"""
Persist the blog post data. If ``post_id`` is ``None`` or ``post_id``
is invalid, the post must be inserted into the storage. If ``post_id``
is a valid id, then the data must be updated.
:param title: The title of the blog post
:type title: str
:param text: The text of the blog post
:type text: str
:param user_id: The user identifier
:type user_id: str
:param tags: A list of tags
:type tags: list
:param draft: (Optional) If the post is a draft of if needs to be
published. (default ``False``)
:type draft: bool
:param post_date: (Optional) The date the blog was posted (default
datetime.datetime.utcnow() )
:type post_date: datetime.datetime
:param last_modified_date: (Optional) The date when blog was last
modified (default datetime.datetime.utcnow() )
:type last_modified_date: datetime.datetime
:param post_id: (Optional) The post identifier. This should be ``None``
for an insert call,
and a valid value for update. (default ``None``)
:type post_id: str
:return: The post_id value, in case of a successful insert or update.
Return ``None`` if there were errors.
"""
new_post = post_id is None
post_id = _as_int(post_id)
current_datetime = datetime.datetime.utcnow()
draft = 1 if draft is True else 0
post_date = post_date if post_date is not None else current_datetime
last_modified_date = last_modified_date if last_modified_date is not \
None else current_datetime
with self._engine.begin() as conn:
try:
if post_id is not None: # validate post_id
exists_statement = sqla.select([self._post_table]).where(
self._post_table.c.id == post_id)
exists = \
conn.execute(exists_statement).fetchone() is not None
post_id = post_id if exists else None
post_statement = \
self._post_table.insert() if post_id is None else \
self._post_table.update().where(
self._post_table.c.id == post_id)
post_statement = post_statement.values(
title=title, text=text, post_date=post_date,
last_modified_date=last_modified_date, draft=draft
)
post_result = conn.execute(post_statement)
post_id = post_result.inserted_primary_key[0] \
if post_id is None else post_id
self._save_tags(tags, post_id, conn)
self._save_user_post(user_id, post_id, conn)
except Exception as e:
self._logger.exception(str(e))
post_id = None
return post_id
@classmethod
def _serialise_posts_and_tags_from_joined_rows(cls, joined_rows):
"""
Translates multiple rows of joined post and tag information
into the dictionary format expected by flask-blogging.
There will be one row per post/tag pairing.
"""
posts_by_id = OrderedDict()
tags_by_post_id = defaultdict(list)
for joined_row in joined_rows:
post_id = joined_row.post_id
post = cls._serialise_post_from_joined_row(joined_row)
posts_by_id[post_id] = post
tags_by_post_id[post_id].append(joined_row.tag_text)
for id, post in posts_by_id.items():
tags = tags_by_post_id.get(id)
if tags:
post["tags"] = tags
return [post for post in posts_by_id.values()]
@staticmethod
def _serialise_post_from_joined_row(joined_row):
return dict(
post_id=joined_row.post_id,
title=joined_row.post_title,
text=joined_row.post_text,
post_date=joined_row.post_post_date,
last_modified_date=joined_row.post_last_modified_date,
draft=joined_row.post_draft,
user_id=joined_row.user_posts_user_id
)
def get_post_by_id(self, post_id):
"""
Fetch the blog post given by ``post_id``
:param post_id: The post identifier for the blog post
:type post_id: str
:return: If the ``post_id`` is valid, the post data is retrieved, else
returns ``None``.
"""
r = None
post_id = _as_int(post_id)
with self._engine.begin() as conn:
try:
post_statement = sqla.select([self._post_table]) \
.where(self._post_table.c.id == post_id) \
.alias('post')
joined_statement = post_statement.join(self._tag_posts_table) \
.join(self._tag_table) \
.join(self._user_posts_table) \
.alias('join')
# Note this will retrieve one row per tag
all_rows = conn.execute(
sqla.select([joined_statement])
).fetchall()
r = self._serialise_posts_and_tags_from_joined_rows(
all_rows
)[0]
except Exception as e:
self._logger.exception(str(e))
r = None
return r
def get_posts(self, count=10, offset=0, recent=True, tag=None,
user_id=None, include_draft=False):
"""
Get posts given by filter criteria
:param count: The number of posts to retrieve (default 10)
:type count: int
:param offset: The number of posts to offset (default 0)
:type offset: int
:param recent: Order by recent posts or not
:type recent: bool
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: A list of posts, with each element a dict containing values
for the following keys: (title, text, draft, post_date,
last_modified_date). If count is ``None``, then all the posts are
returned.
"""
user_id = str(user_id) if user_id else user_id
with self._engine.begin() as conn:
try:
# post_statement ensures the correct posts are selected
# in the correct order
post_statement = sqla.select([self._post_table])
post_filter = self._get_filter(
tag, user_id, include_draft, conn
)
if post_filter is not None:
post_statement = post_statement.where(post_filter)
if count:
post_statement = post_statement.limit(count)
if offset:
post_statement = post_statement.offset(offset)
post_ordering = \
sqla.desc(self._post_table.c.post_date) if recent \
else self._post_table.c.post_date
post_statement = post_statement.order_by(post_ordering)
post_statement = post_statement.alias('post')
# joined_statement ensures other data is retrieved
joined_statement = post_statement.join(self._tag_posts_table) \
.join(self._tag_table) \
.join(self._user_posts_table) \
.alias('join')
joined_ordering = \
sqla.desc(joined_statement.c.post_post_date) if recent \
else joined_statement.c.post_post_date
joined_statement = sqla.select([joined_statement]) \
.order_by(joined_ordering)
all_rows = conn.execute(joined_statement).fetchall()
result = \
self._serialise_posts_and_tags_from_joined_rows(all_rows)
except Exception as e:
self._logger.exception(str(e))
result = []
return result
def delete_post(self, post_id):
"""
Delete the post defined by ``post_id``
:param post_id: The identifier corresponding to a post
:type post_id: int
:return: Returns True if the post was successfully deleted and False
otherwise.
"""
status = False
success = 0
post_id = _as_int(post_id)
with self._engine.begin() as conn:
try:
post_del_statement = self._post_table.delete().where(
self._post_table.c.id == post_id)
conn.execute(post_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
try:
user_posts_del_statement = self._user_posts_table.delete(). \
where(self._user_posts_table.c.post_id == post_id)
conn.execute(user_posts_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
try:
tag_posts_del_statement = self._tag_posts_table.delete(). \
where(self._tag_posts_table.c.post_id == post_id)
conn.execute(tag_posts_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
status = success == 3
return status
def _get_filter(self, tag, user_id, include_draft, conn):
filters = []
if tag:
tag = tag.upper()
tag_statement = sqla.select([self._tag_table.c.id]).where(
self._tag_table.c.text == tag)
tag_result = conn.execute(tag_statement).fetchone()
if tag_result is not None:
tag_id = tag_result[0]
tag_filter = sqla.and_(
self._tag_posts_table.c.tag_id == tag_id,
self._post_table.c.id == self._tag_posts_table.c.post_id
)
filters.append(tag_filter)
if user_id:
user_filter = sqla.and_(
self._user_posts_table.c.user_id == user_id,
self._post_table.c.id == self._user_posts_table.c.post_id
)
filters.append(user_filter)
draft_filter = self._post_table.c.draft == 1 if include_draft else \
self._post_table.c.draft == 0
filters.append(draft_filter)
sql_filter = sqla.and_(*filters)
return sql_filter
def _save_tags(self, tags, post_id, conn):
tags = self.normalize_tags(tags)
tag_ids = []
for tag in tags: # iterate over given tags
try:
# check if the tag exists
statement = self._tag_table.select().where(
self._tag_table.c.text == tag)
tag_result = conn.execute(statement).fetchone()
if tag_result is None:
# insert if it is a new tag
tag_insert_statement = self._tag_table.insert().\
values(text=tag)
result = conn.execute(tag_insert_statement)
tag_id = result.inserted_primary_key[0]
else:
# tag already exists
tag_id = tag_result[0]
except sqla.exc.IntegrityError as e:
# some database error occurred;
tag_id = None
self._logger.exception(str(e))
except Exception as e:
# unknown exception occurred
tag_id = None
self._logger.exception(str(e))
if tag_id is not None:
# for a valid tag_id
tag_ids.append(tag_id)
try:
# check if given post has tag given by tag_id
statement = self._tag_posts_table.select().where(
sqla.and_(self._tag_posts_table.c.tag_id == tag_id,
self._tag_posts_table.c.post_id == post_id))
tag_post_result = conn.execute(statement).fetchone()
if tag_post_result is None:
# if tag_id not present for the post given by post_id
tag_post_statement = self._tag_posts_table.insert().\
values(tag_id=tag_id, post_id=post_id)
conn.execute(tag_post_statement)
except sqla.exc.IntegrityError as e:
self._logger.exception(str(e))
except Exception as e:
self._logger.exception(str(e))
try:
# remove tags that have been deleted
statement = self._tag_posts_table.delete().where(
sqla.and_(sqla.not_(
self._tag_posts_table.c.tag_id.in_(tag_ids)),
self._tag_posts_table.c.post_id == post_id
)
)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
def _save_user_post(self, user_id, post_id, conn):
user_id = str(user_id)
statement = sqla.select([self._user_posts_table]).where(
self._user_posts_table.c.post_id == post_id)
result = conn.execute(statement).fetchone()
if result is None:
try:
statement = self._user_posts_table.insert().values(
user_id=user_id, post_id=post_id)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
else:
if result[0] != user_id:
try:
statement = self._user_posts_table.update().where(
self._user_posts_table.c.post_id == post_id). \
values(user_id=user_id)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
def _table_name(self, table_name):
return self._table_prefix + table_name
def _create_all_tables(self):
"""
Creates all the required tables by calling the required functions.
:return:
"""
self._create_post_table()
self._create_tag_table()
self._create_tag_posts_table()
self._create_user_posts_table()
def _create_post_table(self):
"""
Creates the table to store the blog posts.
:return:
"""
with self._engine.begin() as conn:
post_table_name = self._table_name("post")
if not conn.dialect.has_table(conn, post_table_name):
self._post_table = sqla.Table(
post_table_name, self._metadata,
sqla.Column("id", sqla.Integer, primary_key=True),
sqla.Column("title", sqla.String(256)),
sqla.Column("text", sqla.Text),
sqla.Column("post_date", sqla.DateTime),
sqla.Column("last_modified_date", sqla.DateTime),
# if 1 then make it a draft
sqla.Column("draft", sqla.SmallInteger, default=0),
info=self._info
)
self._logger.debug("Created table with table name %s" %
post_table_name)
else:
self._post_table = self._metadata.tables[post_table_name]
self._logger.debug("Reflecting to table with table name %s" %
post_table_name)
def _create_tag_table(self):
"""
Creates the table to store blog post tags.
:return:
"""
with self._engine.begin() as conn:
tag_table_name = self._table_name("tag")
if not conn.dialect.has_table(conn, tag_table_name):
self._tag_table = sqla.Table(
tag_table_name, self._metadata,
sqla.Column("id", sqla.Integer, primary_key=True),
sqla.Column("text", sqla.String(128), unique=True,
index=True),
info=self._info
)
self._logger.debug("Created table with table name %s" %
tag_table_name)
else:
self._tag_table = self._metadata.tables[tag_table_name]
self._logger.debug("Reflecting to table with table name %s" %
tag_table_name)
def _create_tag_posts_table(self):
"""
Creates the table to store association info between blog posts and
tags.
:return:
"""
with self._engine.begin() as conn:
tag_posts_table_name = self._table_name("tag_posts")
if not conn.dialect.has_table(conn, tag_posts_table_name):
tag_id_key = self._table_name("tag") + ".id"
post_id_key = self._table_name("post") + ".id"
self._tag_posts_table = sqla.Table(
tag_posts_table_name, self._metadata,
sqla.Column('tag_id', sqla.Integer,
sqla.ForeignKey(tag_id_key, onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.Column('post_id', sqla.Integer,
sqla.ForeignKey(post_id_key,
onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.UniqueConstraint('tag_id', 'post_id', name='uix_1'),
info=self._info
)
self._logger.debug("Created table with table name %s" %
tag_posts_table_name)
else:
self._tag_posts_table = \
self._metadata.tables[tag_posts_table_name]
self._logger.debug("Reflecting to table with table name %s" %
tag_posts_table_name)
def _create_user_posts_table(self):
"""
Creates the table to store association info between user and blog
posts.
:return:
"""
with self._engine.begin() as conn:
user_posts_table_name = self._table_name("user_posts")
if not conn.dialect.has_table(conn, user_posts_table_name):
post_id_key = self._table_name("post") + ".id"
self._user_posts_table = sqla.Table(
user_posts_table_name, self._metadata,
sqla.Column("user_id", sqla.String(128), index=True),
sqla.Column("post_id", sqla.Integer,
sqla.ForeignKey(post_id_key,
onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.UniqueConstraint('user_id', 'post_id', name='uix_2'),
info=self._info
)
self._logger.debug("Created table with table name %s" %
user_posts_table_name)
else:
self._user_posts_table = \
self._metadata.tables[user_posts_table_name]
self._logger.debug("Reflecting to table with table name %s" %
user_posts_table_name)
|
gouthambs/Flask-Blogging
|
flask_blogging/sqlastorage.py
|
SQLAStorage.delete_post
|
python
|
def delete_post(self, post_id):
status = False
success = 0
post_id = _as_int(post_id)
with self._engine.begin() as conn:
try:
post_del_statement = self._post_table.delete().where(
self._post_table.c.id == post_id)
conn.execute(post_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
try:
user_posts_del_statement = self._user_posts_table.delete(). \
where(self._user_posts_table.c.post_id == post_id)
conn.execute(user_posts_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
try:
tag_posts_del_statement = self._tag_posts_table.delete(). \
where(self._tag_posts_table.c.post_id == post_id)
conn.execute(tag_posts_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
status = success == 3
return status
|
Delete the post defined by ``post_id``
:param post_id: The identifier corresponding to a post
:type post_id: int
:return: Returns True if the post was successfully deleted and False
otherwise.
|
train
|
https://github.com/gouthambs/Flask-Blogging/blob/6636b8941175e9910f116a329521f96b8b05a9ac/flask_blogging/sqlastorage.py#L362-L397
|
[
"def _as_int(s):\n try:\n n = int(s) if s is not None else None\n return n\n except ValueError:\n return None\n"
] |
class SQLAStorage(Storage):
"""
The ``SQLAStorage`` implements the interface specified by the ``Storage``
class. This class uses SQLAlchemy to implement storage and retrieval of
data from any of the databases supported by SQLAlchemy.
"""
_db = None
_logger = logging.getLogger("flask-blogging")
def __init__(self, engine=None, table_prefix="", metadata=None, db=None,
bind=None):
"""
The constructor for the ``SQLAStorage`` class.
:param engine: The ``SQLAlchemy`` engine instance created by calling
``create_engine``. One can also use Flask-SQLAlchemy, and pass the
engine property.
:type engine: object
:param table_prefix: (Optional) Prefix to use for the tables created
(default ``""``).
:type table_prefix: str
:param metadata: (Optional) The SQLAlchemy MetaData object
:type metadata: object
:param db: (Optional) The Flask-SQLAlchemy SQLAlchemy object
:type db: object
:param bind: (Optional) Reference the database to bind for multiple
database scenario with binds
:type bind: str
"""
self._bind = bind
if db:
self._engine = db.get_engine(db.get_app(), bind=self._bind)
self._metadata = db.metadata
else:
if engine is None:
raise ValueError("Both db and engine args cannot be None")
self._engine = engine
self._metadata = metadata or sqla.MetaData()
self._info = {} if self._bind is None else {"bind_key": self._bind}
self._table_prefix = table_prefix
self._metadata.reflect(bind=self._engine)
self._create_all_tables()
# automap base and restrict to the required tables here.
table_suffix = ['post', 'tag', 'user_posts', 'tag_posts']
table_names = [self._table_name(t) for t in table_suffix]
self._metadata.create_all(bind=self._engine, tables=self.all_tables)
meta = sqla.MetaData()
meta.reflect(bind=self._engine, only=table_names)
self._Base = automap_base(metadata=meta)
self._Base.prepare()
self._inject_models()
sqla_initialized.send(self, engine=self._engine,
table_prefix=self._table_prefix,
meta=self.metadata,
bind=self._bind)
def _inject_models(self):
global this
this.Post = getattr(self._Base.classes, self._table_name("post"))
this.Post.__name__ = 'Post'
this.Tag = getattr(self._Base.classes, self._table_name("tag"))
this.Tag.__name__ = 'Tag'
@property
def metadata(self):
return self._metadata
@property
def post_table(self):
return self._post_table
@property
def post_model(self):
return getattr(self._Base.classes, self._table_name("post"))
@property
def tag_model(self):
return getattr(self._Base.classes, self._table_name("tag"))
@property
def tag_table(self):
return self._tag_table
@property
def tag_posts_table(self):
return self._tag_posts_table
@property
def user_posts_table(self):
return self._user_posts_table
@property
def all_tables(self):
return [self._post_table, self._tag_table,
self._user_posts_table, self._tag_posts_table]
@property
def engine(self):
return self._engine
def save_post(self, title, text, user_id, tags, draft=False,
post_date=None, last_modified_date=None, meta_data=None,
post_id=None):
"""
Persist the blog post data. If ``post_id`` is ``None`` or ``post_id``
is invalid, the post must be inserted into the storage. If ``post_id``
is a valid id, then the data must be updated.
:param title: The title of the blog post
:type title: str
:param text: The text of the blog post
:type text: str
:param user_id: The user identifier
:type user_id: str
:param tags: A list of tags
:type tags: list
:param draft: (Optional) If the post is a draft of if needs to be
published. (default ``False``)
:type draft: bool
:param post_date: (Optional) The date the blog was posted (default
datetime.datetime.utcnow() )
:type post_date: datetime.datetime
:param last_modified_date: (Optional) The date when blog was last
modified (default datetime.datetime.utcnow() )
:type last_modified_date: datetime.datetime
:param post_id: (Optional) The post identifier. This should be ``None``
for an insert call,
and a valid value for update. (default ``None``)
:type post_id: str
:return: The post_id value, in case of a successful insert or update.
Return ``None`` if there were errors.
"""
new_post = post_id is None
post_id = _as_int(post_id)
current_datetime = datetime.datetime.utcnow()
draft = 1 if draft is True else 0
post_date = post_date if post_date is not None else current_datetime
last_modified_date = last_modified_date if last_modified_date is not \
None else current_datetime
with self._engine.begin() as conn:
try:
if post_id is not None: # validate post_id
exists_statement = sqla.select([self._post_table]).where(
self._post_table.c.id == post_id)
exists = \
conn.execute(exists_statement).fetchone() is not None
post_id = post_id if exists else None
post_statement = \
self._post_table.insert() if post_id is None else \
self._post_table.update().where(
self._post_table.c.id == post_id)
post_statement = post_statement.values(
title=title, text=text, post_date=post_date,
last_modified_date=last_modified_date, draft=draft
)
post_result = conn.execute(post_statement)
post_id = post_result.inserted_primary_key[0] \
if post_id is None else post_id
self._save_tags(tags, post_id, conn)
self._save_user_post(user_id, post_id, conn)
except Exception as e:
self._logger.exception(str(e))
post_id = None
return post_id
@classmethod
def _serialise_posts_and_tags_from_joined_rows(cls, joined_rows):
"""
Translates multiple rows of joined post and tag information
into the dictionary format expected by flask-blogging.
There will be one row per post/tag pairing.
"""
posts_by_id = OrderedDict()
tags_by_post_id = defaultdict(list)
for joined_row in joined_rows:
post_id = joined_row.post_id
post = cls._serialise_post_from_joined_row(joined_row)
posts_by_id[post_id] = post
tags_by_post_id[post_id].append(joined_row.tag_text)
for id, post in posts_by_id.items():
tags = tags_by_post_id.get(id)
if tags:
post["tags"] = tags
return [post for post in posts_by_id.values()]
@staticmethod
def _serialise_post_from_joined_row(joined_row):
return dict(
post_id=joined_row.post_id,
title=joined_row.post_title,
text=joined_row.post_text,
post_date=joined_row.post_post_date,
last_modified_date=joined_row.post_last_modified_date,
draft=joined_row.post_draft,
user_id=joined_row.user_posts_user_id
)
def get_post_by_id(self, post_id):
"""
Fetch the blog post given by ``post_id``
:param post_id: The post identifier for the blog post
:type post_id: str
:return: If the ``post_id`` is valid, the post data is retrieved, else
returns ``None``.
"""
r = None
post_id = _as_int(post_id)
with self._engine.begin() as conn:
try:
post_statement = sqla.select([self._post_table]) \
.where(self._post_table.c.id == post_id) \
.alias('post')
joined_statement = post_statement.join(self._tag_posts_table) \
.join(self._tag_table) \
.join(self._user_posts_table) \
.alias('join')
# Note this will retrieve one row per tag
all_rows = conn.execute(
sqla.select([joined_statement])
).fetchall()
r = self._serialise_posts_and_tags_from_joined_rows(
all_rows
)[0]
except Exception as e:
self._logger.exception(str(e))
r = None
return r
def get_posts(self, count=10, offset=0, recent=True, tag=None,
user_id=None, include_draft=False):
"""
Get posts given by filter criteria
:param count: The number of posts to retrieve (default 10)
:type count: int
:param offset: The number of posts to offset (default 0)
:type offset: int
:param recent: Order by recent posts or not
:type recent: bool
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: A list of posts, with each element a dict containing values
for the following keys: (title, text, draft, post_date,
last_modified_date). If count is ``None``, then all the posts are
returned.
"""
user_id = str(user_id) if user_id else user_id
with self._engine.begin() as conn:
try:
# post_statement ensures the correct posts are selected
# in the correct order
post_statement = sqla.select([self._post_table])
post_filter = self._get_filter(
tag, user_id, include_draft, conn
)
if post_filter is not None:
post_statement = post_statement.where(post_filter)
if count:
post_statement = post_statement.limit(count)
if offset:
post_statement = post_statement.offset(offset)
post_ordering = \
sqla.desc(self._post_table.c.post_date) if recent \
else self._post_table.c.post_date
post_statement = post_statement.order_by(post_ordering)
post_statement = post_statement.alias('post')
# joined_statement ensures other data is retrieved
joined_statement = post_statement.join(self._tag_posts_table) \
.join(self._tag_table) \
.join(self._user_posts_table) \
.alias('join')
joined_ordering = \
sqla.desc(joined_statement.c.post_post_date) if recent \
else joined_statement.c.post_post_date
joined_statement = sqla.select([joined_statement]) \
.order_by(joined_ordering)
all_rows = conn.execute(joined_statement).fetchall()
result = \
self._serialise_posts_and_tags_from_joined_rows(all_rows)
except Exception as e:
self._logger.exception(str(e))
result = []
return result
def count_posts(self, tag=None, user_id=None, include_draft=False):
"""
Returns the total number of posts for the give filter
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: The number of posts for the given filter.
"""
result = 0
with self._engine.begin() as conn:
try:
count_statement = sqla.select([sqla.func.count()]). \
select_from(self._post_table)
sql_filter = self._get_filter(tag, user_id, include_draft,
conn)
count_statement = count_statement.where(sql_filter)
result = conn.execute(count_statement).scalar()
except Exception as e:
self._logger.exception(str(e))
result = 0
return result
def _get_filter(self, tag, user_id, include_draft, conn):
filters = []
if tag:
tag = tag.upper()
tag_statement = sqla.select([self._tag_table.c.id]).where(
self._tag_table.c.text == tag)
tag_result = conn.execute(tag_statement).fetchone()
if tag_result is not None:
tag_id = tag_result[0]
tag_filter = sqla.and_(
self._tag_posts_table.c.tag_id == tag_id,
self._post_table.c.id == self._tag_posts_table.c.post_id
)
filters.append(tag_filter)
if user_id:
user_filter = sqla.and_(
self._user_posts_table.c.user_id == user_id,
self._post_table.c.id == self._user_posts_table.c.post_id
)
filters.append(user_filter)
draft_filter = self._post_table.c.draft == 1 if include_draft else \
self._post_table.c.draft == 0
filters.append(draft_filter)
sql_filter = sqla.and_(*filters)
return sql_filter
def _save_tags(self, tags, post_id, conn):
tags = self.normalize_tags(tags)
tag_ids = []
for tag in tags: # iterate over given tags
try:
# check if the tag exists
statement = self._tag_table.select().where(
self._tag_table.c.text == tag)
tag_result = conn.execute(statement).fetchone()
if tag_result is None:
# insert if it is a new tag
tag_insert_statement = self._tag_table.insert().\
values(text=tag)
result = conn.execute(tag_insert_statement)
tag_id = result.inserted_primary_key[0]
else:
# tag already exists
tag_id = tag_result[0]
except sqla.exc.IntegrityError as e:
# some database error occurred;
tag_id = None
self._logger.exception(str(e))
except Exception as e:
# unknown exception occurred
tag_id = None
self._logger.exception(str(e))
if tag_id is not None:
# for a valid tag_id
tag_ids.append(tag_id)
try:
# check if given post has tag given by tag_id
statement = self._tag_posts_table.select().where(
sqla.and_(self._tag_posts_table.c.tag_id == tag_id,
self._tag_posts_table.c.post_id == post_id))
tag_post_result = conn.execute(statement).fetchone()
if tag_post_result is None:
# if tag_id not present for the post given by post_id
tag_post_statement = self._tag_posts_table.insert().\
values(tag_id=tag_id, post_id=post_id)
conn.execute(tag_post_statement)
except sqla.exc.IntegrityError as e:
self._logger.exception(str(e))
except Exception as e:
self._logger.exception(str(e))
try:
# remove tags that have been deleted
statement = self._tag_posts_table.delete().where(
sqla.and_(sqla.not_(
self._tag_posts_table.c.tag_id.in_(tag_ids)),
self._tag_posts_table.c.post_id == post_id
)
)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
def _save_user_post(self, user_id, post_id, conn):
user_id = str(user_id)
statement = sqla.select([self._user_posts_table]).where(
self._user_posts_table.c.post_id == post_id)
result = conn.execute(statement).fetchone()
if result is None:
try:
statement = self._user_posts_table.insert().values(
user_id=user_id, post_id=post_id)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
else:
if result[0] != user_id:
try:
statement = self._user_posts_table.update().where(
self._user_posts_table.c.post_id == post_id). \
values(user_id=user_id)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
def _table_name(self, table_name):
return self._table_prefix + table_name
def _create_all_tables(self):
"""
Creates all the required tables by calling the required functions.
:return:
"""
self._create_post_table()
self._create_tag_table()
self._create_tag_posts_table()
self._create_user_posts_table()
def _create_post_table(self):
"""
Creates the table to store the blog posts.
:return:
"""
with self._engine.begin() as conn:
post_table_name = self._table_name("post")
if not conn.dialect.has_table(conn, post_table_name):
self._post_table = sqla.Table(
post_table_name, self._metadata,
sqla.Column("id", sqla.Integer, primary_key=True),
sqla.Column("title", sqla.String(256)),
sqla.Column("text", sqla.Text),
sqla.Column("post_date", sqla.DateTime),
sqla.Column("last_modified_date", sqla.DateTime),
# if 1 then make it a draft
sqla.Column("draft", sqla.SmallInteger, default=0),
info=self._info
)
self._logger.debug("Created table with table name %s" %
post_table_name)
else:
self._post_table = self._metadata.tables[post_table_name]
self._logger.debug("Reflecting to table with table name %s" %
post_table_name)
def _create_tag_table(self):
"""
Creates the table to store blog post tags.
:return:
"""
with self._engine.begin() as conn:
tag_table_name = self._table_name("tag")
if not conn.dialect.has_table(conn, tag_table_name):
self._tag_table = sqla.Table(
tag_table_name, self._metadata,
sqla.Column("id", sqla.Integer, primary_key=True),
sqla.Column("text", sqla.String(128), unique=True,
index=True),
info=self._info
)
self._logger.debug("Created table with table name %s" %
tag_table_name)
else:
self._tag_table = self._metadata.tables[tag_table_name]
self._logger.debug("Reflecting to table with table name %s" %
tag_table_name)
def _create_tag_posts_table(self):
"""
Creates the table to store association info between blog posts and
tags.
:return:
"""
with self._engine.begin() as conn:
tag_posts_table_name = self._table_name("tag_posts")
if not conn.dialect.has_table(conn, tag_posts_table_name):
tag_id_key = self._table_name("tag") + ".id"
post_id_key = self._table_name("post") + ".id"
self._tag_posts_table = sqla.Table(
tag_posts_table_name, self._metadata,
sqla.Column('tag_id', sqla.Integer,
sqla.ForeignKey(tag_id_key, onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.Column('post_id', sqla.Integer,
sqla.ForeignKey(post_id_key,
onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.UniqueConstraint('tag_id', 'post_id', name='uix_1'),
info=self._info
)
self._logger.debug("Created table with table name %s" %
tag_posts_table_name)
else:
self._tag_posts_table = \
self._metadata.tables[tag_posts_table_name]
self._logger.debug("Reflecting to table with table name %s" %
tag_posts_table_name)
def _create_user_posts_table(self):
"""
Creates the table to store association info between user and blog
posts.
:return:
"""
with self._engine.begin() as conn:
user_posts_table_name = self._table_name("user_posts")
if not conn.dialect.has_table(conn, user_posts_table_name):
post_id_key = self._table_name("post") + ".id"
self._user_posts_table = sqla.Table(
user_posts_table_name, self._metadata,
sqla.Column("user_id", sqla.String(128), index=True),
sqla.Column("post_id", sqla.Integer,
sqla.ForeignKey(post_id_key,
onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.UniqueConstraint('user_id', 'post_id', name='uix_2'),
info=self._info
)
self._logger.debug("Created table with table name %s" %
user_posts_table_name)
else:
self._user_posts_table = \
self._metadata.tables[user_posts_table_name]
self._logger.debug("Reflecting to table with table name %s" %
user_posts_table_name)
|
gouthambs/Flask-Blogging
|
flask_blogging/sqlastorage.py
|
SQLAStorage._create_all_tables
|
python
|
def _create_all_tables(self):
self._create_post_table()
self._create_tag_table()
self._create_tag_posts_table()
self._create_user_posts_table()
|
Creates all the required tables by calling the required functions.
:return:
|
train
|
https://github.com/gouthambs/Flask-Blogging/blob/6636b8941175e9910f116a329521f96b8b05a9ac/flask_blogging/sqlastorage.py#L516-L524
|
[
"def _create_post_table(self):\n \"\"\"\n Creates the table to store the blog posts.\n :return:\n \"\"\"\n with self._engine.begin() as conn:\n post_table_name = self._table_name(\"post\")\n if not conn.dialect.has_table(conn, post_table_name):\n\n self._post_table = sqla.Table(\n post_table_name, self._metadata,\n sqla.Column(\"id\", sqla.Integer, primary_key=True),\n sqla.Column(\"title\", sqla.String(256)),\n sqla.Column(\"text\", sqla.Text),\n sqla.Column(\"post_date\", sqla.DateTime),\n sqla.Column(\"last_modified_date\", sqla.DateTime),\n # if 1 then make it a draft\n sqla.Column(\"draft\", sqla.SmallInteger, default=0),\n info=self._info\n\n )\n self._logger.debug(\"Created table with table name %s\" %\n post_table_name)\n else:\n self._post_table = self._metadata.tables[post_table_name]\n self._logger.debug(\"Reflecting to table with table name %s\" %\n post_table_name)\n",
"def _create_tag_table(self):\n \"\"\"\n Creates the table to store blog post tags.\n :return:\n \"\"\"\n with self._engine.begin() as conn:\n tag_table_name = self._table_name(\"tag\")\n if not conn.dialect.has_table(conn, tag_table_name):\n self._tag_table = sqla.Table(\n tag_table_name, self._metadata,\n sqla.Column(\"id\", sqla.Integer, primary_key=True),\n sqla.Column(\"text\", sqla.String(128), unique=True,\n index=True),\n info=self._info\n )\n self._logger.debug(\"Created table with table name %s\" %\n tag_table_name)\n else:\n self._tag_table = self._metadata.tables[tag_table_name]\n self._logger.debug(\"Reflecting to table with table name %s\" %\n tag_table_name)\n",
"def _create_tag_posts_table(self):\n \"\"\"\n Creates the table to store association info between blog posts and\n tags.\n :return:\n \"\"\"\n with self._engine.begin() as conn:\n tag_posts_table_name = self._table_name(\"tag_posts\")\n if not conn.dialect.has_table(conn, tag_posts_table_name):\n tag_id_key = self._table_name(\"tag\") + \".id\"\n post_id_key = self._table_name(\"post\") + \".id\"\n self._tag_posts_table = sqla.Table(\n tag_posts_table_name, self._metadata,\n sqla.Column('tag_id', sqla.Integer,\n sqla.ForeignKey(tag_id_key, onupdate=\"CASCADE\",\n ondelete=\"CASCADE\"),\n index=True),\n sqla.Column('post_id', sqla.Integer,\n sqla.ForeignKey(post_id_key,\n onupdate=\"CASCADE\",\n ondelete=\"CASCADE\"),\n index=True),\n sqla.UniqueConstraint('tag_id', 'post_id', name='uix_1'),\n info=self._info\n )\n self._logger.debug(\"Created table with table name %s\" %\n tag_posts_table_name)\n else:\n self._tag_posts_table = \\\n self._metadata.tables[tag_posts_table_name]\n self._logger.debug(\"Reflecting to table with table name %s\" %\n tag_posts_table_name)\n",
"def _create_user_posts_table(self):\n \"\"\"\n Creates the table to store association info between user and blog\n posts.\n :return:\n \"\"\"\n with self._engine.begin() as conn:\n user_posts_table_name = self._table_name(\"user_posts\")\n if not conn.dialect.has_table(conn, user_posts_table_name):\n post_id_key = self._table_name(\"post\") + \".id\"\n self._user_posts_table = sqla.Table(\n user_posts_table_name, self._metadata,\n sqla.Column(\"user_id\", sqla.String(128), index=True),\n sqla.Column(\"post_id\", sqla.Integer,\n sqla.ForeignKey(post_id_key,\n onupdate=\"CASCADE\",\n ondelete=\"CASCADE\"),\n index=True),\n sqla.UniqueConstraint('user_id', 'post_id', name='uix_2'),\n info=self._info\n )\n self._logger.debug(\"Created table with table name %s\" %\n user_posts_table_name)\n else:\n self._user_posts_table = \\\n self._metadata.tables[user_posts_table_name]\n self._logger.debug(\"Reflecting to table with table name %s\" %\n user_posts_table_name)\n"
] |
class SQLAStorage(Storage):
"""
The ``SQLAStorage`` implements the interface specified by the ``Storage``
class. This class uses SQLAlchemy to implement storage and retrieval of
data from any of the databases supported by SQLAlchemy.
"""
_db = None
_logger = logging.getLogger("flask-blogging")
def __init__(self, engine=None, table_prefix="", metadata=None, db=None,
bind=None):
"""
The constructor for the ``SQLAStorage`` class.
:param engine: The ``SQLAlchemy`` engine instance created by calling
``create_engine``. One can also use Flask-SQLAlchemy, and pass the
engine property.
:type engine: object
:param table_prefix: (Optional) Prefix to use for the tables created
(default ``""``).
:type table_prefix: str
:param metadata: (Optional) The SQLAlchemy MetaData object
:type metadata: object
:param db: (Optional) The Flask-SQLAlchemy SQLAlchemy object
:type db: object
:param bind: (Optional) Reference the database to bind for multiple
database scenario with binds
:type bind: str
"""
self._bind = bind
if db:
self._engine = db.get_engine(db.get_app(), bind=self._bind)
self._metadata = db.metadata
else:
if engine is None:
raise ValueError("Both db and engine args cannot be None")
self._engine = engine
self._metadata = metadata or sqla.MetaData()
self._info = {} if self._bind is None else {"bind_key": self._bind}
self._table_prefix = table_prefix
self._metadata.reflect(bind=self._engine)
self._create_all_tables()
# automap base and restrict to the required tables here.
table_suffix = ['post', 'tag', 'user_posts', 'tag_posts']
table_names = [self._table_name(t) for t in table_suffix]
self._metadata.create_all(bind=self._engine, tables=self.all_tables)
meta = sqla.MetaData()
meta.reflect(bind=self._engine, only=table_names)
self._Base = automap_base(metadata=meta)
self._Base.prepare()
self._inject_models()
sqla_initialized.send(self, engine=self._engine,
table_prefix=self._table_prefix,
meta=self.metadata,
bind=self._bind)
def _inject_models(self):
global this
this.Post = getattr(self._Base.classes, self._table_name("post"))
this.Post.__name__ = 'Post'
this.Tag = getattr(self._Base.classes, self._table_name("tag"))
this.Tag.__name__ = 'Tag'
@property
def metadata(self):
return self._metadata
@property
def post_table(self):
return self._post_table
@property
def post_model(self):
return getattr(self._Base.classes, self._table_name("post"))
@property
def tag_model(self):
return getattr(self._Base.classes, self._table_name("tag"))
@property
def tag_table(self):
return self._tag_table
@property
def tag_posts_table(self):
return self._tag_posts_table
@property
def user_posts_table(self):
return self._user_posts_table
@property
def all_tables(self):
return [self._post_table, self._tag_table,
self._user_posts_table, self._tag_posts_table]
@property
def engine(self):
return self._engine
def save_post(self, title, text, user_id, tags, draft=False,
post_date=None, last_modified_date=None, meta_data=None,
post_id=None):
"""
Persist the blog post data. If ``post_id`` is ``None`` or ``post_id``
is invalid, the post must be inserted into the storage. If ``post_id``
is a valid id, then the data must be updated.
:param title: The title of the blog post
:type title: str
:param text: The text of the blog post
:type text: str
:param user_id: The user identifier
:type user_id: str
:param tags: A list of tags
:type tags: list
:param draft: (Optional) If the post is a draft of if needs to be
published. (default ``False``)
:type draft: bool
:param post_date: (Optional) The date the blog was posted (default
datetime.datetime.utcnow() )
:type post_date: datetime.datetime
:param last_modified_date: (Optional) The date when blog was last
modified (default datetime.datetime.utcnow() )
:type last_modified_date: datetime.datetime
:param post_id: (Optional) The post identifier. This should be ``None``
for an insert call,
and a valid value for update. (default ``None``)
:type post_id: str
:return: The post_id value, in case of a successful insert or update.
Return ``None`` if there were errors.
"""
new_post = post_id is None
post_id = _as_int(post_id)
current_datetime = datetime.datetime.utcnow()
draft = 1 if draft is True else 0
post_date = post_date if post_date is not None else current_datetime
last_modified_date = last_modified_date if last_modified_date is not \
None else current_datetime
with self._engine.begin() as conn:
try:
if post_id is not None: # validate post_id
exists_statement = sqla.select([self._post_table]).where(
self._post_table.c.id == post_id)
exists = \
conn.execute(exists_statement).fetchone() is not None
post_id = post_id if exists else None
post_statement = \
self._post_table.insert() if post_id is None else \
self._post_table.update().where(
self._post_table.c.id == post_id)
post_statement = post_statement.values(
title=title, text=text, post_date=post_date,
last_modified_date=last_modified_date, draft=draft
)
post_result = conn.execute(post_statement)
post_id = post_result.inserted_primary_key[0] \
if post_id is None else post_id
self._save_tags(tags, post_id, conn)
self._save_user_post(user_id, post_id, conn)
except Exception as e:
self._logger.exception(str(e))
post_id = None
return post_id
@classmethod
def _serialise_posts_and_tags_from_joined_rows(cls, joined_rows):
"""
Translates multiple rows of joined post and tag information
into the dictionary format expected by flask-blogging.
There will be one row per post/tag pairing.
"""
posts_by_id = OrderedDict()
tags_by_post_id = defaultdict(list)
for joined_row in joined_rows:
post_id = joined_row.post_id
post = cls._serialise_post_from_joined_row(joined_row)
posts_by_id[post_id] = post
tags_by_post_id[post_id].append(joined_row.tag_text)
for id, post in posts_by_id.items():
tags = tags_by_post_id.get(id)
if tags:
post["tags"] = tags
return [post for post in posts_by_id.values()]
@staticmethod
def _serialise_post_from_joined_row(joined_row):
return dict(
post_id=joined_row.post_id,
title=joined_row.post_title,
text=joined_row.post_text,
post_date=joined_row.post_post_date,
last_modified_date=joined_row.post_last_modified_date,
draft=joined_row.post_draft,
user_id=joined_row.user_posts_user_id
)
def get_post_by_id(self, post_id):
"""
Fetch the blog post given by ``post_id``
:param post_id: The post identifier for the blog post
:type post_id: str
:return: If the ``post_id`` is valid, the post data is retrieved, else
returns ``None``.
"""
r = None
post_id = _as_int(post_id)
with self._engine.begin() as conn:
try:
post_statement = sqla.select([self._post_table]) \
.where(self._post_table.c.id == post_id) \
.alias('post')
joined_statement = post_statement.join(self._tag_posts_table) \
.join(self._tag_table) \
.join(self._user_posts_table) \
.alias('join')
# Note this will retrieve one row per tag
all_rows = conn.execute(
sqla.select([joined_statement])
).fetchall()
r = self._serialise_posts_and_tags_from_joined_rows(
all_rows
)[0]
except Exception as e:
self._logger.exception(str(e))
r = None
return r
def get_posts(self, count=10, offset=0, recent=True, tag=None,
user_id=None, include_draft=False):
"""
Get posts given by filter criteria
:param count: The number of posts to retrieve (default 10)
:type count: int
:param offset: The number of posts to offset (default 0)
:type offset: int
:param recent: Order by recent posts or not
:type recent: bool
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: A list of posts, with each element a dict containing values
for the following keys: (title, text, draft, post_date,
last_modified_date). If count is ``None``, then all the posts are
returned.
"""
user_id = str(user_id) if user_id else user_id
with self._engine.begin() as conn:
try:
# post_statement ensures the correct posts are selected
# in the correct order
post_statement = sqla.select([self._post_table])
post_filter = self._get_filter(
tag, user_id, include_draft, conn
)
if post_filter is not None:
post_statement = post_statement.where(post_filter)
if count:
post_statement = post_statement.limit(count)
if offset:
post_statement = post_statement.offset(offset)
post_ordering = \
sqla.desc(self._post_table.c.post_date) if recent \
else self._post_table.c.post_date
post_statement = post_statement.order_by(post_ordering)
post_statement = post_statement.alias('post')
# joined_statement ensures other data is retrieved
joined_statement = post_statement.join(self._tag_posts_table) \
.join(self._tag_table) \
.join(self._user_posts_table) \
.alias('join')
joined_ordering = \
sqla.desc(joined_statement.c.post_post_date) if recent \
else joined_statement.c.post_post_date
joined_statement = sqla.select([joined_statement]) \
.order_by(joined_ordering)
all_rows = conn.execute(joined_statement).fetchall()
result = \
self._serialise_posts_and_tags_from_joined_rows(all_rows)
except Exception as e:
self._logger.exception(str(e))
result = []
return result
def count_posts(self, tag=None, user_id=None, include_draft=False):
"""
Returns the total number of posts for the give filter
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: The number of posts for the given filter.
"""
result = 0
with self._engine.begin() as conn:
try:
count_statement = sqla.select([sqla.func.count()]). \
select_from(self._post_table)
sql_filter = self._get_filter(tag, user_id, include_draft,
conn)
count_statement = count_statement.where(sql_filter)
result = conn.execute(count_statement).scalar()
except Exception as e:
self._logger.exception(str(e))
result = 0
return result
def delete_post(self, post_id):
"""
Delete the post defined by ``post_id``
:param post_id: The identifier corresponding to a post
:type post_id: int
:return: Returns True if the post was successfully deleted and False
otherwise.
"""
status = False
success = 0
post_id = _as_int(post_id)
with self._engine.begin() as conn:
try:
post_del_statement = self._post_table.delete().where(
self._post_table.c.id == post_id)
conn.execute(post_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
try:
user_posts_del_statement = self._user_posts_table.delete(). \
where(self._user_posts_table.c.post_id == post_id)
conn.execute(user_posts_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
try:
tag_posts_del_statement = self._tag_posts_table.delete(). \
where(self._tag_posts_table.c.post_id == post_id)
conn.execute(tag_posts_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
status = success == 3
return status
def _get_filter(self, tag, user_id, include_draft, conn):
filters = []
if tag:
tag = tag.upper()
tag_statement = sqla.select([self._tag_table.c.id]).where(
self._tag_table.c.text == tag)
tag_result = conn.execute(tag_statement).fetchone()
if tag_result is not None:
tag_id = tag_result[0]
tag_filter = sqla.and_(
self._tag_posts_table.c.tag_id == tag_id,
self._post_table.c.id == self._tag_posts_table.c.post_id
)
filters.append(tag_filter)
if user_id:
user_filter = sqla.and_(
self._user_posts_table.c.user_id == user_id,
self._post_table.c.id == self._user_posts_table.c.post_id
)
filters.append(user_filter)
draft_filter = self._post_table.c.draft == 1 if include_draft else \
self._post_table.c.draft == 0
filters.append(draft_filter)
sql_filter = sqla.and_(*filters)
return sql_filter
def _save_tags(self, tags, post_id, conn):
tags = self.normalize_tags(tags)
tag_ids = []
for tag in tags: # iterate over given tags
try:
# check if the tag exists
statement = self._tag_table.select().where(
self._tag_table.c.text == tag)
tag_result = conn.execute(statement).fetchone()
if tag_result is None:
# insert if it is a new tag
tag_insert_statement = self._tag_table.insert().\
values(text=tag)
result = conn.execute(tag_insert_statement)
tag_id = result.inserted_primary_key[0]
else:
# tag already exists
tag_id = tag_result[0]
except sqla.exc.IntegrityError as e:
# some database error occurred;
tag_id = None
self._logger.exception(str(e))
except Exception as e:
# unknown exception occurred
tag_id = None
self._logger.exception(str(e))
if tag_id is not None:
# for a valid tag_id
tag_ids.append(tag_id)
try:
# check if given post has tag given by tag_id
statement = self._tag_posts_table.select().where(
sqla.and_(self._tag_posts_table.c.tag_id == tag_id,
self._tag_posts_table.c.post_id == post_id))
tag_post_result = conn.execute(statement).fetchone()
if tag_post_result is None:
# if tag_id not present for the post given by post_id
tag_post_statement = self._tag_posts_table.insert().\
values(tag_id=tag_id, post_id=post_id)
conn.execute(tag_post_statement)
except sqla.exc.IntegrityError as e:
self._logger.exception(str(e))
except Exception as e:
self._logger.exception(str(e))
try:
# remove tags that have been deleted
statement = self._tag_posts_table.delete().where(
sqla.and_(sqla.not_(
self._tag_posts_table.c.tag_id.in_(tag_ids)),
self._tag_posts_table.c.post_id == post_id
)
)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
def _save_user_post(self, user_id, post_id, conn):
user_id = str(user_id)
statement = sqla.select([self._user_posts_table]).where(
self._user_posts_table.c.post_id == post_id)
result = conn.execute(statement).fetchone()
if result is None:
try:
statement = self._user_posts_table.insert().values(
user_id=user_id, post_id=post_id)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
else:
if result[0] != user_id:
try:
statement = self._user_posts_table.update().where(
self._user_posts_table.c.post_id == post_id). \
values(user_id=user_id)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
def _table_name(self, table_name):
return self._table_prefix + table_name
def _create_post_table(self):
"""
Creates the table to store the blog posts.
:return:
"""
with self._engine.begin() as conn:
post_table_name = self._table_name("post")
if not conn.dialect.has_table(conn, post_table_name):
self._post_table = sqla.Table(
post_table_name, self._metadata,
sqla.Column("id", sqla.Integer, primary_key=True),
sqla.Column("title", sqla.String(256)),
sqla.Column("text", sqla.Text),
sqla.Column("post_date", sqla.DateTime),
sqla.Column("last_modified_date", sqla.DateTime),
# if 1 then make it a draft
sqla.Column("draft", sqla.SmallInteger, default=0),
info=self._info
)
self._logger.debug("Created table with table name %s" %
post_table_name)
else:
self._post_table = self._metadata.tables[post_table_name]
self._logger.debug("Reflecting to table with table name %s" %
post_table_name)
def _create_tag_table(self):
"""
Creates the table to store blog post tags.
:return:
"""
with self._engine.begin() as conn:
tag_table_name = self._table_name("tag")
if not conn.dialect.has_table(conn, tag_table_name):
self._tag_table = sqla.Table(
tag_table_name, self._metadata,
sqla.Column("id", sqla.Integer, primary_key=True),
sqla.Column("text", sqla.String(128), unique=True,
index=True),
info=self._info
)
self._logger.debug("Created table with table name %s" %
tag_table_name)
else:
self._tag_table = self._metadata.tables[tag_table_name]
self._logger.debug("Reflecting to table with table name %s" %
tag_table_name)
def _create_tag_posts_table(self):
"""
Creates the table to store association info between blog posts and
tags.
:return:
"""
with self._engine.begin() as conn:
tag_posts_table_name = self._table_name("tag_posts")
if not conn.dialect.has_table(conn, tag_posts_table_name):
tag_id_key = self._table_name("tag") + ".id"
post_id_key = self._table_name("post") + ".id"
self._tag_posts_table = sqla.Table(
tag_posts_table_name, self._metadata,
sqla.Column('tag_id', sqla.Integer,
sqla.ForeignKey(tag_id_key, onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.Column('post_id', sqla.Integer,
sqla.ForeignKey(post_id_key,
onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.UniqueConstraint('tag_id', 'post_id', name='uix_1'),
info=self._info
)
self._logger.debug("Created table with table name %s" %
tag_posts_table_name)
else:
self._tag_posts_table = \
self._metadata.tables[tag_posts_table_name]
self._logger.debug("Reflecting to table with table name %s" %
tag_posts_table_name)
def _create_user_posts_table(self):
"""
Creates the table to store association info between user and blog
posts.
:return:
"""
with self._engine.begin() as conn:
user_posts_table_name = self._table_name("user_posts")
if not conn.dialect.has_table(conn, user_posts_table_name):
post_id_key = self._table_name("post") + ".id"
self._user_posts_table = sqla.Table(
user_posts_table_name, self._metadata,
sqla.Column("user_id", sqla.String(128), index=True),
sqla.Column("post_id", sqla.Integer,
sqla.ForeignKey(post_id_key,
onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.UniqueConstraint('user_id', 'post_id', name='uix_2'),
info=self._info
)
self._logger.debug("Created table with table name %s" %
user_posts_table_name)
else:
self._user_posts_table = \
self._metadata.tables[user_posts_table_name]
self._logger.debug("Reflecting to table with table name %s" %
user_posts_table_name)
|
gouthambs/Flask-Blogging
|
flask_blogging/sqlastorage.py
|
SQLAStorage._create_post_table
|
python
|
def _create_post_table(self):
with self._engine.begin() as conn:
post_table_name = self._table_name("post")
if not conn.dialect.has_table(conn, post_table_name):
self._post_table = sqla.Table(
post_table_name, self._metadata,
sqla.Column("id", sqla.Integer, primary_key=True),
sqla.Column("title", sqla.String(256)),
sqla.Column("text", sqla.Text),
sqla.Column("post_date", sqla.DateTime),
sqla.Column("last_modified_date", sqla.DateTime),
# if 1 then make it a draft
sqla.Column("draft", sqla.SmallInteger, default=0),
info=self._info
)
self._logger.debug("Created table with table name %s" %
post_table_name)
else:
self._post_table = self._metadata.tables[post_table_name]
self._logger.debug("Reflecting to table with table name %s" %
post_table_name)
|
Creates the table to store the blog posts.
:return:
|
train
|
https://github.com/gouthambs/Flask-Blogging/blob/6636b8941175e9910f116a329521f96b8b05a9ac/flask_blogging/sqlastorage.py#L526-L552
| null |
class SQLAStorage(Storage):
"""
The ``SQLAStorage`` implements the interface specified by the ``Storage``
class. This class uses SQLAlchemy to implement storage and retrieval of
data from any of the databases supported by SQLAlchemy.
"""
_db = None
_logger = logging.getLogger("flask-blogging")
def __init__(self, engine=None, table_prefix="", metadata=None, db=None,
bind=None):
"""
The constructor for the ``SQLAStorage`` class.
:param engine: The ``SQLAlchemy`` engine instance created by calling
``create_engine``. One can also use Flask-SQLAlchemy, and pass the
engine property.
:type engine: object
:param table_prefix: (Optional) Prefix to use for the tables created
(default ``""``).
:type table_prefix: str
:param metadata: (Optional) The SQLAlchemy MetaData object
:type metadata: object
:param db: (Optional) The Flask-SQLAlchemy SQLAlchemy object
:type db: object
:param bind: (Optional) Reference the database to bind for multiple
database scenario with binds
:type bind: str
"""
self._bind = bind
if db:
self._engine = db.get_engine(db.get_app(), bind=self._bind)
self._metadata = db.metadata
else:
if engine is None:
raise ValueError("Both db and engine args cannot be None")
self._engine = engine
self._metadata = metadata or sqla.MetaData()
self._info = {} if self._bind is None else {"bind_key": self._bind}
self._table_prefix = table_prefix
self._metadata.reflect(bind=self._engine)
self._create_all_tables()
# automap base and restrict to the required tables here.
table_suffix = ['post', 'tag', 'user_posts', 'tag_posts']
table_names = [self._table_name(t) for t in table_suffix]
self._metadata.create_all(bind=self._engine, tables=self.all_tables)
meta = sqla.MetaData()
meta.reflect(bind=self._engine, only=table_names)
self._Base = automap_base(metadata=meta)
self._Base.prepare()
self._inject_models()
sqla_initialized.send(self, engine=self._engine,
table_prefix=self._table_prefix,
meta=self.metadata,
bind=self._bind)
def _inject_models(self):
global this
this.Post = getattr(self._Base.classes, self._table_name("post"))
this.Post.__name__ = 'Post'
this.Tag = getattr(self._Base.classes, self._table_name("tag"))
this.Tag.__name__ = 'Tag'
@property
def metadata(self):
return self._metadata
@property
def post_table(self):
return self._post_table
@property
def post_model(self):
return getattr(self._Base.classes, self._table_name("post"))
@property
def tag_model(self):
return getattr(self._Base.classes, self._table_name("tag"))
@property
def tag_table(self):
return self._tag_table
@property
def tag_posts_table(self):
return self._tag_posts_table
@property
def user_posts_table(self):
return self._user_posts_table
@property
def all_tables(self):
return [self._post_table, self._tag_table,
self._user_posts_table, self._tag_posts_table]
@property
def engine(self):
return self._engine
def save_post(self, title, text, user_id, tags, draft=False,
post_date=None, last_modified_date=None, meta_data=None,
post_id=None):
"""
Persist the blog post data. If ``post_id`` is ``None`` or ``post_id``
is invalid, the post must be inserted into the storage. If ``post_id``
is a valid id, then the data must be updated.
:param title: The title of the blog post
:type title: str
:param text: The text of the blog post
:type text: str
:param user_id: The user identifier
:type user_id: str
:param tags: A list of tags
:type tags: list
:param draft: (Optional) If the post is a draft of if needs to be
published. (default ``False``)
:type draft: bool
:param post_date: (Optional) The date the blog was posted (default
datetime.datetime.utcnow() )
:type post_date: datetime.datetime
:param last_modified_date: (Optional) The date when blog was last
modified (default datetime.datetime.utcnow() )
:type last_modified_date: datetime.datetime
:param post_id: (Optional) The post identifier. This should be ``None``
for an insert call,
and a valid value for update. (default ``None``)
:type post_id: str
:return: The post_id value, in case of a successful insert or update.
Return ``None`` if there were errors.
"""
new_post = post_id is None
post_id = _as_int(post_id)
current_datetime = datetime.datetime.utcnow()
draft = 1 if draft is True else 0
post_date = post_date if post_date is not None else current_datetime
last_modified_date = last_modified_date if last_modified_date is not \
None else current_datetime
with self._engine.begin() as conn:
try:
if post_id is not None: # validate post_id
exists_statement = sqla.select([self._post_table]).where(
self._post_table.c.id == post_id)
exists = \
conn.execute(exists_statement).fetchone() is not None
post_id = post_id if exists else None
post_statement = \
self._post_table.insert() if post_id is None else \
self._post_table.update().where(
self._post_table.c.id == post_id)
post_statement = post_statement.values(
title=title, text=text, post_date=post_date,
last_modified_date=last_modified_date, draft=draft
)
post_result = conn.execute(post_statement)
post_id = post_result.inserted_primary_key[0] \
if post_id is None else post_id
self._save_tags(tags, post_id, conn)
self._save_user_post(user_id, post_id, conn)
except Exception as e:
self._logger.exception(str(e))
post_id = None
return post_id
@classmethod
def _serialise_posts_and_tags_from_joined_rows(cls, joined_rows):
"""
Translates multiple rows of joined post and tag information
into the dictionary format expected by flask-blogging.
There will be one row per post/tag pairing.
"""
posts_by_id = OrderedDict()
tags_by_post_id = defaultdict(list)
for joined_row in joined_rows:
post_id = joined_row.post_id
post = cls._serialise_post_from_joined_row(joined_row)
posts_by_id[post_id] = post
tags_by_post_id[post_id].append(joined_row.tag_text)
for id, post in posts_by_id.items():
tags = tags_by_post_id.get(id)
if tags:
post["tags"] = tags
return [post for post in posts_by_id.values()]
@staticmethod
def _serialise_post_from_joined_row(joined_row):
return dict(
post_id=joined_row.post_id,
title=joined_row.post_title,
text=joined_row.post_text,
post_date=joined_row.post_post_date,
last_modified_date=joined_row.post_last_modified_date,
draft=joined_row.post_draft,
user_id=joined_row.user_posts_user_id
)
def get_post_by_id(self, post_id):
"""
Fetch the blog post given by ``post_id``
:param post_id: The post identifier for the blog post
:type post_id: str
:return: If the ``post_id`` is valid, the post data is retrieved, else
returns ``None``.
"""
r = None
post_id = _as_int(post_id)
with self._engine.begin() as conn:
try:
post_statement = sqla.select([self._post_table]) \
.where(self._post_table.c.id == post_id) \
.alias('post')
joined_statement = post_statement.join(self._tag_posts_table) \
.join(self._tag_table) \
.join(self._user_posts_table) \
.alias('join')
# Note this will retrieve one row per tag
all_rows = conn.execute(
sqla.select([joined_statement])
).fetchall()
r = self._serialise_posts_and_tags_from_joined_rows(
all_rows
)[0]
except Exception as e:
self._logger.exception(str(e))
r = None
return r
def get_posts(self, count=10, offset=0, recent=True, tag=None,
user_id=None, include_draft=False):
"""
Get posts given by filter criteria
:param count: The number of posts to retrieve (default 10)
:type count: int
:param offset: The number of posts to offset (default 0)
:type offset: int
:param recent: Order by recent posts or not
:type recent: bool
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: A list of posts, with each element a dict containing values
for the following keys: (title, text, draft, post_date,
last_modified_date). If count is ``None``, then all the posts are
returned.
"""
user_id = str(user_id) if user_id else user_id
with self._engine.begin() as conn:
try:
# post_statement ensures the correct posts are selected
# in the correct order
post_statement = sqla.select([self._post_table])
post_filter = self._get_filter(
tag, user_id, include_draft, conn
)
if post_filter is not None:
post_statement = post_statement.where(post_filter)
if count:
post_statement = post_statement.limit(count)
if offset:
post_statement = post_statement.offset(offset)
post_ordering = \
sqla.desc(self._post_table.c.post_date) if recent \
else self._post_table.c.post_date
post_statement = post_statement.order_by(post_ordering)
post_statement = post_statement.alias('post')
# joined_statement ensures other data is retrieved
joined_statement = post_statement.join(self._tag_posts_table) \
.join(self._tag_table) \
.join(self._user_posts_table) \
.alias('join')
joined_ordering = \
sqla.desc(joined_statement.c.post_post_date) if recent \
else joined_statement.c.post_post_date
joined_statement = sqla.select([joined_statement]) \
.order_by(joined_ordering)
all_rows = conn.execute(joined_statement).fetchall()
result = \
self._serialise_posts_and_tags_from_joined_rows(all_rows)
except Exception as e:
self._logger.exception(str(e))
result = []
return result
def count_posts(self, tag=None, user_id=None, include_draft=False):
"""
Returns the total number of posts for the give filter
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: The number of posts for the given filter.
"""
result = 0
with self._engine.begin() as conn:
try:
count_statement = sqla.select([sqla.func.count()]). \
select_from(self._post_table)
sql_filter = self._get_filter(tag, user_id, include_draft,
conn)
count_statement = count_statement.where(sql_filter)
result = conn.execute(count_statement).scalar()
except Exception as e:
self._logger.exception(str(e))
result = 0
return result
def delete_post(self, post_id):
"""
Delete the post defined by ``post_id``
:param post_id: The identifier corresponding to a post
:type post_id: int
:return: Returns True if the post was successfully deleted and False
otherwise.
"""
status = False
success = 0
post_id = _as_int(post_id)
with self._engine.begin() as conn:
try:
post_del_statement = self._post_table.delete().where(
self._post_table.c.id == post_id)
conn.execute(post_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
try:
user_posts_del_statement = self._user_posts_table.delete(). \
where(self._user_posts_table.c.post_id == post_id)
conn.execute(user_posts_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
try:
tag_posts_del_statement = self._tag_posts_table.delete(). \
where(self._tag_posts_table.c.post_id == post_id)
conn.execute(tag_posts_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
status = success == 3
return status
def _get_filter(self, tag, user_id, include_draft, conn):
filters = []
if tag:
tag = tag.upper()
tag_statement = sqla.select([self._tag_table.c.id]).where(
self._tag_table.c.text == tag)
tag_result = conn.execute(tag_statement).fetchone()
if tag_result is not None:
tag_id = tag_result[0]
tag_filter = sqla.and_(
self._tag_posts_table.c.tag_id == tag_id,
self._post_table.c.id == self._tag_posts_table.c.post_id
)
filters.append(tag_filter)
if user_id:
user_filter = sqla.and_(
self._user_posts_table.c.user_id == user_id,
self._post_table.c.id == self._user_posts_table.c.post_id
)
filters.append(user_filter)
draft_filter = self._post_table.c.draft == 1 if include_draft else \
self._post_table.c.draft == 0
filters.append(draft_filter)
sql_filter = sqla.and_(*filters)
return sql_filter
def _save_tags(self, tags, post_id, conn):
tags = self.normalize_tags(tags)
tag_ids = []
for tag in tags: # iterate over given tags
try:
# check if the tag exists
statement = self._tag_table.select().where(
self._tag_table.c.text == tag)
tag_result = conn.execute(statement).fetchone()
if tag_result is None:
# insert if it is a new tag
tag_insert_statement = self._tag_table.insert().\
values(text=tag)
result = conn.execute(tag_insert_statement)
tag_id = result.inserted_primary_key[0]
else:
# tag already exists
tag_id = tag_result[0]
except sqla.exc.IntegrityError as e:
# some database error occurred;
tag_id = None
self._logger.exception(str(e))
except Exception as e:
# unknown exception occurred
tag_id = None
self._logger.exception(str(e))
if tag_id is not None:
# for a valid tag_id
tag_ids.append(tag_id)
try:
# check if given post has tag given by tag_id
statement = self._tag_posts_table.select().where(
sqla.and_(self._tag_posts_table.c.tag_id == tag_id,
self._tag_posts_table.c.post_id == post_id))
tag_post_result = conn.execute(statement).fetchone()
if tag_post_result is None:
# if tag_id not present for the post given by post_id
tag_post_statement = self._tag_posts_table.insert().\
values(tag_id=tag_id, post_id=post_id)
conn.execute(tag_post_statement)
except sqla.exc.IntegrityError as e:
self._logger.exception(str(e))
except Exception as e:
self._logger.exception(str(e))
try:
# remove tags that have been deleted
statement = self._tag_posts_table.delete().where(
sqla.and_(sqla.not_(
self._tag_posts_table.c.tag_id.in_(tag_ids)),
self._tag_posts_table.c.post_id == post_id
)
)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
def _save_user_post(self, user_id, post_id, conn):
user_id = str(user_id)
statement = sqla.select([self._user_posts_table]).where(
self._user_posts_table.c.post_id == post_id)
result = conn.execute(statement).fetchone()
if result is None:
try:
statement = self._user_posts_table.insert().values(
user_id=user_id, post_id=post_id)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
else:
if result[0] != user_id:
try:
statement = self._user_posts_table.update().where(
self._user_posts_table.c.post_id == post_id). \
values(user_id=user_id)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
def _table_name(self, table_name):
return self._table_prefix + table_name
def _create_all_tables(self):
"""
Creates all the required tables by calling the required functions.
:return:
"""
self._create_post_table()
self._create_tag_table()
self._create_tag_posts_table()
self._create_user_posts_table()
def _create_tag_table(self):
"""
Creates the table to store blog post tags.
:return:
"""
with self._engine.begin() as conn:
tag_table_name = self._table_name("tag")
if not conn.dialect.has_table(conn, tag_table_name):
self._tag_table = sqla.Table(
tag_table_name, self._metadata,
sqla.Column("id", sqla.Integer, primary_key=True),
sqla.Column("text", sqla.String(128), unique=True,
index=True),
info=self._info
)
self._logger.debug("Created table with table name %s" %
tag_table_name)
else:
self._tag_table = self._metadata.tables[tag_table_name]
self._logger.debug("Reflecting to table with table name %s" %
tag_table_name)
def _create_tag_posts_table(self):
"""
Creates the table to store association info between blog posts and
tags.
:return:
"""
with self._engine.begin() as conn:
tag_posts_table_name = self._table_name("tag_posts")
if not conn.dialect.has_table(conn, tag_posts_table_name):
tag_id_key = self._table_name("tag") + ".id"
post_id_key = self._table_name("post") + ".id"
self._tag_posts_table = sqla.Table(
tag_posts_table_name, self._metadata,
sqla.Column('tag_id', sqla.Integer,
sqla.ForeignKey(tag_id_key, onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.Column('post_id', sqla.Integer,
sqla.ForeignKey(post_id_key,
onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.UniqueConstraint('tag_id', 'post_id', name='uix_1'),
info=self._info
)
self._logger.debug("Created table with table name %s" %
tag_posts_table_name)
else:
self._tag_posts_table = \
self._metadata.tables[tag_posts_table_name]
self._logger.debug("Reflecting to table with table name %s" %
tag_posts_table_name)
def _create_user_posts_table(self):
"""
Creates the table to store association info between user and blog
posts.
:return:
"""
with self._engine.begin() as conn:
user_posts_table_name = self._table_name("user_posts")
if not conn.dialect.has_table(conn, user_posts_table_name):
post_id_key = self._table_name("post") + ".id"
self._user_posts_table = sqla.Table(
user_posts_table_name, self._metadata,
sqla.Column("user_id", sqla.String(128), index=True),
sqla.Column("post_id", sqla.Integer,
sqla.ForeignKey(post_id_key,
onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.UniqueConstraint('user_id', 'post_id', name='uix_2'),
info=self._info
)
self._logger.debug("Created table with table name %s" %
user_posts_table_name)
else:
self._user_posts_table = \
self._metadata.tables[user_posts_table_name]
self._logger.debug("Reflecting to table with table name %s" %
user_posts_table_name)
|
gouthambs/Flask-Blogging
|
flask_blogging/sqlastorage.py
|
SQLAStorage._create_tag_table
|
python
|
def _create_tag_table(self):
with self._engine.begin() as conn:
tag_table_name = self._table_name("tag")
if not conn.dialect.has_table(conn, tag_table_name):
self._tag_table = sqla.Table(
tag_table_name, self._metadata,
sqla.Column("id", sqla.Integer, primary_key=True),
sqla.Column("text", sqla.String(128), unique=True,
index=True),
info=self._info
)
self._logger.debug("Created table with table name %s" %
tag_table_name)
else:
self._tag_table = self._metadata.tables[tag_table_name]
self._logger.debug("Reflecting to table with table name %s" %
tag_table_name)
|
Creates the table to store blog post tags.
:return:
|
train
|
https://github.com/gouthambs/Flask-Blogging/blob/6636b8941175e9910f116a329521f96b8b05a9ac/flask_blogging/sqlastorage.py#L554-L574
| null |
class SQLAStorage(Storage):
"""
The ``SQLAStorage`` implements the interface specified by the ``Storage``
class. This class uses SQLAlchemy to implement storage and retrieval of
data from any of the databases supported by SQLAlchemy.
"""
_db = None
_logger = logging.getLogger("flask-blogging")
def __init__(self, engine=None, table_prefix="", metadata=None, db=None,
bind=None):
"""
The constructor for the ``SQLAStorage`` class.
:param engine: The ``SQLAlchemy`` engine instance created by calling
``create_engine``. One can also use Flask-SQLAlchemy, and pass the
engine property.
:type engine: object
:param table_prefix: (Optional) Prefix to use for the tables created
(default ``""``).
:type table_prefix: str
:param metadata: (Optional) The SQLAlchemy MetaData object
:type metadata: object
:param db: (Optional) The Flask-SQLAlchemy SQLAlchemy object
:type db: object
:param bind: (Optional) Reference the database to bind for multiple
database scenario with binds
:type bind: str
"""
self._bind = bind
if db:
self._engine = db.get_engine(db.get_app(), bind=self._bind)
self._metadata = db.metadata
else:
if engine is None:
raise ValueError("Both db and engine args cannot be None")
self._engine = engine
self._metadata = metadata or sqla.MetaData()
self._info = {} if self._bind is None else {"bind_key": self._bind}
self._table_prefix = table_prefix
self._metadata.reflect(bind=self._engine)
self._create_all_tables()
# automap base and restrict to the required tables here.
table_suffix = ['post', 'tag', 'user_posts', 'tag_posts']
table_names = [self._table_name(t) for t in table_suffix]
self._metadata.create_all(bind=self._engine, tables=self.all_tables)
meta = sqla.MetaData()
meta.reflect(bind=self._engine, only=table_names)
self._Base = automap_base(metadata=meta)
self._Base.prepare()
self._inject_models()
sqla_initialized.send(self, engine=self._engine,
table_prefix=self._table_prefix,
meta=self.metadata,
bind=self._bind)
def _inject_models(self):
global this
this.Post = getattr(self._Base.classes, self._table_name("post"))
this.Post.__name__ = 'Post'
this.Tag = getattr(self._Base.classes, self._table_name("tag"))
this.Tag.__name__ = 'Tag'
@property
def metadata(self):
return self._metadata
@property
def post_table(self):
return self._post_table
@property
def post_model(self):
return getattr(self._Base.classes, self._table_name("post"))
@property
def tag_model(self):
return getattr(self._Base.classes, self._table_name("tag"))
@property
def tag_table(self):
return self._tag_table
@property
def tag_posts_table(self):
return self._tag_posts_table
@property
def user_posts_table(self):
return self._user_posts_table
@property
def all_tables(self):
return [self._post_table, self._tag_table,
self._user_posts_table, self._tag_posts_table]
@property
def engine(self):
return self._engine
def save_post(self, title, text, user_id, tags, draft=False,
post_date=None, last_modified_date=None, meta_data=None,
post_id=None):
"""
Persist the blog post data. If ``post_id`` is ``None`` or ``post_id``
is invalid, the post must be inserted into the storage. If ``post_id``
is a valid id, then the data must be updated.
:param title: The title of the blog post
:type title: str
:param text: The text of the blog post
:type text: str
:param user_id: The user identifier
:type user_id: str
:param tags: A list of tags
:type tags: list
:param draft: (Optional) If the post is a draft of if needs to be
published. (default ``False``)
:type draft: bool
:param post_date: (Optional) The date the blog was posted (default
datetime.datetime.utcnow() )
:type post_date: datetime.datetime
:param last_modified_date: (Optional) The date when blog was last
modified (default datetime.datetime.utcnow() )
:type last_modified_date: datetime.datetime
:param post_id: (Optional) The post identifier. This should be ``None``
for an insert call,
and a valid value for update. (default ``None``)
:type post_id: str
:return: The post_id value, in case of a successful insert or update.
Return ``None`` if there were errors.
"""
new_post = post_id is None
post_id = _as_int(post_id)
current_datetime = datetime.datetime.utcnow()
draft = 1 if draft is True else 0
post_date = post_date if post_date is not None else current_datetime
last_modified_date = last_modified_date if last_modified_date is not \
None else current_datetime
with self._engine.begin() as conn:
try:
if post_id is not None: # validate post_id
exists_statement = sqla.select([self._post_table]).where(
self._post_table.c.id == post_id)
exists = \
conn.execute(exists_statement).fetchone() is not None
post_id = post_id if exists else None
post_statement = \
self._post_table.insert() if post_id is None else \
self._post_table.update().where(
self._post_table.c.id == post_id)
post_statement = post_statement.values(
title=title, text=text, post_date=post_date,
last_modified_date=last_modified_date, draft=draft
)
post_result = conn.execute(post_statement)
post_id = post_result.inserted_primary_key[0] \
if post_id is None else post_id
self._save_tags(tags, post_id, conn)
self._save_user_post(user_id, post_id, conn)
except Exception as e:
self._logger.exception(str(e))
post_id = None
return post_id
@classmethod
def _serialise_posts_and_tags_from_joined_rows(cls, joined_rows):
"""
Translates multiple rows of joined post and tag information
into the dictionary format expected by flask-blogging.
There will be one row per post/tag pairing.
"""
posts_by_id = OrderedDict()
tags_by_post_id = defaultdict(list)
for joined_row in joined_rows:
post_id = joined_row.post_id
post = cls._serialise_post_from_joined_row(joined_row)
posts_by_id[post_id] = post
tags_by_post_id[post_id].append(joined_row.tag_text)
for id, post in posts_by_id.items():
tags = tags_by_post_id.get(id)
if tags:
post["tags"] = tags
return [post for post in posts_by_id.values()]
@staticmethod
def _serialise_post_from_joined_row(joined_row):
return dict(
post_id=joined_row.post_id,
title=joined_row.post_title,
text=joined_row.post_text,
post_date=joined_row.post_post_date,
last_modified_date=joined_row.post_last_modified_date,
draft=joined_row.post_draft,
user_id=joined_row.user_posts_user_id
)
def get_post_by_id(self, post_id):
"""
Fetch the blog post given by ``post_id``
:param post_id: The post identifier for the blog post
:type post_id: str
:return: If the ``post_id`` is valid, the post data is retrieved, else
returns ``None``.
"""
r = None
post_id = _as_int(post_id)
with self._engine.begin() as conn:
try:
post_statement = sqla.select([self._post_table]) \
.where(self._post_table.c.id == post_id) \
.alias('post')
joined_statement = post_statement.join(self._tag_posts_table) \
.join(self._tag_table) \
.join(self._user_posts_table) \
.alias('join')
# Note this will retrieve one row per tag
all_rows = conn.execute(
sqla.select([joined_statement])
).fetchall()
r = self._serialise_posts_and_tags_from_joined_rows(
all_rows
)[0]
except Exception as e:
self._logger.exception(str(e))
r = None
return r
def get_posts(self, count=10, offset=0, recent=True, tag=None,
user_id=None, include_draft=False):
"""
Get posts given by filter criteria
:param count: The number of posts to retrieve (default 10)
:type count: int
:param offset: The number of posts to offset (default 0)
:type offset: int
:param recent: Order by recent posts or not
:type recent: bool
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: A list of posts, with each element a dict containing values
for the following keys: (title, text, draft, post_date,
last_modified_date). If count is ``None``, then all the posts are
returned.
"""
user_id = str(user_id) if user_id else user_id
with self._engine.begin() as conn:
try:
# post_statement ensures the correct posts are selected
# in the correct order
post_statement = sqla.select([self._post_table])
post_filter = self._get_filter(
tag, user_id, include_draft, conn
)
if post_filter is not None:
post_statement = post_statement.where(post_filter)
if count:
post_statement = post_statement.limit(count)
if offset:
post_statement = post_statement.offset(offset)
post_ordering = \
sqla.desc(self._post_table.c.post_date) if recent \
else self._post_table.c.post_date
post_statement = post_statement.order_by(post_ordering)
post_statement = post_statement.alias('post')
# joined_statement ensures other data is retrieved
joined_statement = post_statement.join(self._tag_posts_table) \
.join(self._tag_table) \
.join(self._user_posts_table) \
.alias('join')
joined_ordering = \
sqla.desc(joined_statement.c.post_post_date) if recent \
else joined_statement.c.post_post_date
joined_statement = sqla.select([joined_statement]) \
.order_by(joined_ordering)
all_rows = conn.execute(joined_statement).fetchall()
result = \
self._serialise_posts_and_tags_from_joined_rows(all_rows)
except Exception as e:
self._logger.exception(str(e))
result = []
return result
def count_posts(self, tag=None, user_id=None, include_draft=False):
"""
Returns the total number of posts for the give filter
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: The number of posts for the given filter.
"""
result = 0
with self._engine.begin() as conn:
try:
count_statement = sqla.select([sqla.func.count()]). \
select_from(self._post_table)
sql_filter = self._get_filter(tag, user_id, include_draft,
conn)
count_statement = count_statement.where(sql_filter)
result = conn.execute(count_statement).scalar()
except Exception as e:
self._logger.exception(str(e))
result = 0
return result
def delete_post(self, post_id):
"""
Delete the post defined by ``post_id``
:param post_id: The identifier corresponding to a post
:type post_id: int
:return: Returns True if the post was successfully deleted and False
otherwise.
"""
status = False
success = 0
post_id = _as_int(post_id)
with self._engine.begin() as conn:
try:
post_del_statement = self._post_table.delete().where(
self._post_table.c.id == post_id)
conn.execute(post_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
try:
user_posts_del_statement = self._user_posts_table.delete(). \
where(self._user_posts_table.c.post_id == post_id)
conn.execute(user_posts_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
try:
tag_posts_del_statement = self._tag_posts_table.delete(). \
where(self._tag_posts_table.c.post_id == post_id)
conn.execute(tag_posts_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
status = success == 3
return status
def _get_filter(self, tag, user_id, include_draft, conn):
filters = []
if tag:
tag = tag.upper()
tag_statement = sqla.select([self._tag_table.c.id]).where(
self._tag_table.c.text == tag)
tag_result = conn.execute(tag_statement).fetchone()
if tag_result is not None:
tag_id = tag_result[0]
tag_filter = sqla.and_(
self._tag_posts_table.c.tag_id == tag_id,
self._post_table.c.id == self._tag_posts_table.c.post_id
)
filters.append(tag_filter)
if user_id:
user_filter = sqla.and_(
self._user_posts_table.c.user_id == user_id,
self._post_table.c.id == self._user_posts_table.c.post_id
)
filters.append(user_filter)
draft_filter = self._post_table.c.draft == 1 if include_draft else \
self._post_table.c.draft == 0
filters.append(draft_filter)
sql_filter = sqla.and_(*filters)
return sql_filter
def _save_tags(self, tags, post_id, conn):
tags = self.normalize_tags(tags)
tag_ids = []
for tag in tags: # iterate over given tags
try:
# check if the tag exists
statement = self._tag_table.select().where(
self._tag_table.c.text == tag)
tag_result = conn.execute(statement).fetchone()
if tag_result is None:
# insert if it is a new tag
tag_insert_statement = self._tag_table.insert().\
values(text=tag)
result = conn.execute(tag_insert_statement)
tag_id = result.inserted_primary_key[0]
else:
# tag already exists
tag_id = tag_result[0]
except sqla.exc.IntegrityError as e:
# some database error occurred;
tag_id = None
self._logger.exception(str(e))
except Exception as e:
# unknown exception occurred
tag_id = None
self._logger.exception(str(e))
if tag_id is not None:
# for a valid tag_id
tag_ids.append(tag_id)
try:
# check if given post has tag given by tag_id
statement = self._tag_posts_table.select().where(
sqla.and_(self._tag_posts_table.c.tag_id == tag_id,
self._tag_posts_table.c.post_id == post_id))
tag_post_result = conn.execute(statement).fetchone()
if tag_post_result is None:
# if tag_id not present for the post given by post_id
tag_post_statement = self._tag_posts_table.insert().\
values(tag_id=tag_id, post_id=post_id)
conn.execute(tag_post_statement)
except sqla.exc.IntegrityError as e:
self._logger.exception(str(e))
except Exception as e:
self._logger.exception(str(e))
try:
# remove tags that have been deleted
statement = self._tag_posts_table.delete().where(
sqla.and_(sqla.not_(
self._tag_posts_table.c.tag_id.in_(tag_ids)),
self._tag_posts_table.c.post_id == post_id
)
)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
def _save_user_post(self, user_id, post_id, conn):
user_id = str(user_id)
statement = sqla.select([self._user_posts_table]).where(
self._user_posts_table.c.post_id == post_id)
result = conn.execute(statement).fetchone()
if result is None:
try:
statement = self._user_posts_table.insert().values(
user_id=user_id, post_id=post_id)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
else:
if result[0] != user_id:
try:
statement = self._user_posts_table.update().where(
self._user_posts_table.c.post_id == post_id). \
values(user_id=user_id)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
def _table_name(self, table_name):
return self._table_prefix + table_name
def _create_all_tables(self):
"""
Creates all the required tables by calling the required functions.
:return:
"""
self._create_post_table()
self._create_tag_table()
self._create_tag_posts_table()
self._create_user_posts_table()
def _create_post_table(self):
"""
Creates the table to store the blog posts.
:return:
"""
with self._engine.begin() as conn:
post_table_name = self._table_name("post")
if not conn.dialect.has_table(conn, post_table_name):
self._post_table = sqla.Table(
post_table_name, self._metadata,
sqla.Column("id", sqla.Integer, primary_key=True),
sqla.Column("title", sqla.String(256)),
sqla.Column("text", sqla.Text),
sqla.Column("post_date", sqla.DateTime),
sqla.Column("last_modified_date", sqla.DateTime),
# if 1 then make it a draft
sqla.Column("draft", sqla.SmallInteger, default=0),
info=self._info
)
self._logger.debug("Created table with table name %s" %
post_table_name)
else:
self._post_table = self._metadata.tables[post_table_name]
self._logger.debug("Reflecting to table with table name %s" %
post_table_name)
def _create_tag_posts_table(self):
"""
Creates the table to store association info between blog posts and
tags.
:return:
"""
with self._engine.begin() as conn:
tag_posts_table_name = self._table_name("tag_posts")
if not conn.dialect.has_table(conn, tag_posts_table_name):
tag_id_key = self._table_name("tag") + ".id"
post_id_key = self._table_name("post") + ".id"
self._tag_posts_table = sqla.Table(
tag_posts_table_name, self._metadata,
sqla.Column('tag_id', sqla.Integer,
sqla.ForeignKey(tag_id_key, onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.Column('post_id', sqla.Integer,
sqla.ForeignKey(post_id_key,
onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.UniqueConstraint('tag_id', 'post_id', name='uix_1'),
info=self._info
)
self._logger.debug("Created table with table name %s" %
tag_posts_table_name)
else:
self._tag_posts_table = \
self._metadata.tables[tag_posts_table_name]
self._logger.debug("Reflecting to table with table name %s" %
tag_posts_table_name)
def _create_user_posts_table(self):
"""
Creates the table to store association info between user and blog
posts.
:return:
"""
with self._engine.begin() as conn:
user_posts_table_name = self._table_name("user_posts")
if not conn.dialect.has_table(conn, user_posts_table_name):
post_id_key = self._table_name("post") + ".id"
self._user_posts_table = sqla.Table(
user_posts_table_name, self._metadata,
sqla.Column("user_id", sqla.String(128), index=True),
sqla.Column("post_id", sqla.Integer,
sqla.ForeignKey(post_id_key,
onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.UniqueConstraint('user_id', 'post_id', name='uix_2'),
info=self._info
)
self._logger.debug("Created table with table name %s" %
user_posts_table_name)
else:
self._user_posts_table = \
self._metadata.tables[user_posts_table_name]
self._logger.debug("Reflecting to table with table name %s" %
user_posts_table_name)
|
gouthambs/Flask-Blogging
|
flask_blogging/sqlastorage.py
|
SQLAStorage._create_tag_posts_table
|
python
|
def _create_tag_posts_table(self):
with self._engine.begin() as conn:
tag_posts_table_name = self._table_name("tag_posts")
if not conn.dialect.has_table(conn, tag_posts_table_name):
tag_id_key = self._table_name("tag") + ".id"
post_id_key = self._table_name("post") + ".id"
self._tag_posts_table = sqla.Table(
tag_posts_table_name, self._metadata,
sqla.Column('tag_id', sqla.Integer,
sqla.ForeignKey(tag_id_key, onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.Column('post_id', sqla.Integer,
sqla.ForeignKey(post_id_key,
onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.UniqueConstraint('tag_id', 'post_id', name='uix_1'),
info=self._info
)
self._logger.debug("Created table with table name %s" %
tag_posts_table_name)
else:
self._tag_posts_table = \
self._metadata.tables[tag_posts_table_name]
self._logger.debug("Reflecting to table with table name %s" %
tag_posts_table_name)
|
Creates the table to store association info between blog posts and
tags.
:return:
|
train
|
https://github.com/gouthambs/Flask-Blogging/blob/6636b8941175e9910f116a329521f96b8b05a9ac/flask_blogging/sqlastorage.py#L576-L607
| null |
class SQLAStorage(Storage):
"""
The ``SQLAStorage`` implements the interface specified by the ``Storage``
class. This class uses SQLAlchemy to implement storage and retrieval of
data from any of the databases supported by SQLAlchemy.
"""
_db = None
_logger = logging.getLogger("flask-blogging")
def __init__(self, engine=None, table_prefix="", metadata=None, db=None,
bind=None):
"""
The constructor for the ``SQLAStorage`` class.
:param engine: The ``SQLAlchemy`` engine instance created by calling
``create_engine``. One can also use Flask-SQLAlchemy, and pass the
engine property.
:type engine: object
:param table_prefix: (Optional) Prefix to use for the tables created
(default ``""``).
:type table_prefix: str
:param metadata: (Optional) The SQLAlchemy MetaData object
:type metadata: object
:param db: (Optional) The Flask-SQLAlchemy SQLAlchemy object
:type db: object
:param bind: (Optional) Reference the database to bind for multiple
database scenario with binds
:type bind: str
"""
self._bind = bind
if db:
self._engine = db.get_engine(db.get_app(), bind=self._bind)
self._metadata = db.metadata
else:
if engine is None:
raise ValueError("Both db and engine args cannot be None")
self._engine = engine
self._metadata = metadata or sqla.MetaData()
self._info = {} if self._bind is None else {"bind_key": self._bind}
self._table_prefix = table_prefix
self._metadata.reflect(bind=self._engine)
self._create_all_tables()
# automap base and restrict to the required tables here.
table_suffix = ['post', 'tag', 'user_posts', 'tag_posts']
table_names = [self._table_name(t) for t in table_suffix]
self._metadata.create_all(bind=self._engine, tables=self.all_tables)
meta = sqla.MetaData()
meta.reflect(bind=self._engine, only=table_names)
self._Base = automap_base(metadata=meta)
self._Base.prepare()
self._inject_models()
sqla_initialized.send(self, engine=self._engine,
table_prefix=self._table_prefix,
meta=self.metadata,
bind=self._bind)
def _inject_models(self):
global this
this.Post = getattr(self._Base.classes, self._table_name("post"))
this.Post.__name__ = 'Post'
this.Tag = getattr(self._Base.classes, self._table_name("tag"))
this.Tag.__name__ = 'Tag'
@property
def metadata(self):
return self._metadata
@property
def post_table(self):
return self._post_table
@property
def post_model(self):
return getattr(self._Base.classes, self._table_name("post"))
@property
def tag_model(self):
return getattr(self._Base.classes, self._table_name("tag"))
@property
def tag_table(self):
return self._tag_table
@property
def tag_posts_table(self):
return self._tag_posts_table
@property
def user_posts_table(self):
return self._user_posts_table
@property
def all_tables(self):
return [self._post_table, self._tag_table,
self._user_posts_table, self._tag_posts_table]
@property
def engine(self):
return self._engine
def save_post(self, title, text, user_id, tags, draft=False,
post_date=None, last_modified_date=None, meta_data=None,
post_id=None):
"""
Persist the blog post data. If ``post_id`` is ``None`` or ``post_id``
is invalid, the post must be inserted into the storage. If ``post_id``
is a valid id, then the data must be updated.
:param title: The title of the blog post
:type title: str
:param text: The text of the blog post
:type text: str
:param user_id: The user identifier
:type user_id: str
:param tags: A list of tags
:type tags: list
:param draft: (Optional) If the post is a draft of if needs to be
published. (default ``False``)
:type draft: bool
:param post_date: (Optional) The date the blog was posted (default
datetime.datetime.utcnow() )
:type post_date: datetime.datetime
:param last_modified_date: (Optional) The date when blog was last
modified (default datetime.datetime.utcnow() )
:type last_modified_date: datetime.datetime
:param post_id: (Optional) The post identifier. This should be ``None``
for an insert call,
and a valid value for update. (default ``None``)
:type post_id: str
:return: The post_id value, in case of a successful insert or update.
Return ``None`` if there were errors.
"""
new_post = post_id is None
post_id = _as_int(post_id)
current_datetime = datetime.datetime.utcnow()
draft = 1 if draft is True else 0
post_date = post_date if post_date is not None else current_datetime
last_modified_date = last_modified_date if last_modified_date is not \
None else current_datetime
with self._engine.begin() as conn:
try:
if post_id is not None: # validate post_id
exists_statement = sqla.select([self._post_table]).where(
self._post_table.c.id == post_id)
exists = \
conn.execute(exists_statement).fetchone() is not None
post_id = post_id if exists else None
post_statement = \
self._post_table.insert() if post_id is None else \
self._post_table.update().where(
self._post_table.c.id == post_id)
post_statement = post_statement.values(
title=title, text=text, post_date=post_date,
last_modified_date=last_modified_date, draft=draft
)
post_result = conn.execute(post_statement)
post_id = post_result.inserted_primary_key[0] \
if post_id is None else post_id
self._save_tags(tags, post_id, conn)
self._save_user_post(user_id, post_id, conn)
except Exception as e:
self._logger.exception(str(e))
post_id = None
return post_id
@classmethod
def _serialise_posts_and_tags_from_joined_rows(cls, joined_rows):
"""
Translates multiple rows of joined post and tag information
into the dictionary format expected by flask-blogging.
There will be one row per post/tag pairing.
"""
posts_by_id = OrderedDict()
tags_by_post_id = defaultdict(list)
for joined_row in joined_rows:
post_id = joined_row.post_id
post = cls._serialise_post_from_joined_row(joined_row)
posts_by_id[post_id] = post
tags_by_post_id[post_id].append(joined_row.tag_text)
for id, post in posts_by_id.items():
tags = tags_by_post_id.get(id)
if tags:
post["tags"] = tags
return [post for post in posts_by_id.values()]
@staticmethod
def _serialise_post_from_joined_row(joined_row):
return dict(
post_id=joined_row.post_id,
title=joined_row.post_title,
text=joined_row.post_text,
post_date=joined_row.post_post_date,
last_modified_date=joined_row.post_last_modified_date,
draft=joined_row.post_draft,
user_id=joined_row.user_posts_user_id
)
def get_post_by_id(self, post_id):
"""
Fetch the blog post given by ``post_id``
:param post_id: The post identifier for the blog post
:type post_id: str
:return: If the ``post_id`` is valid, the post data is retrieved, else
returns ``None``.
"""
r = None
post_id = _as_int(post_id)
with self._engine.begin() as conn:
try:
post_statement = sqla.select([self._post_table]) \
.where(self._post_table.c.id == post_id) \
.alias('post')
joined_statement = post_statement.join(self._tag_posts_table) \
.join(self._tag_table) \
.join(self._user_posts_table) \
.alias('join')
# Note this will retrieve one row per tag
all_rows = conn.execute(
sqla.select([joined_statement])
).fetchall()
r = self._serialise_posts_and_tags_from_joined_rows(
all_rows
)[0]
except Exception as e:
self._logger.exception(str(e))
r = None
return r
def get_posts(self, count=10, offset=0, recent=True, tag=None,
user_id=None, include_draft=False):
"""
Get posts given by filter criteria
:param count: The number of posts to retrieve (default 10)
:type count: int
:param offset: The number of posts to offset (default 0)
:type offset: int
:param recent: Order by recent posts or not
:type recent: bool
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: A list of posts, with each element a dict containing values
for the following keys: (title, text, draft, post_date,
last_modified_date). If count is ``None``, then all the posts are
returned.
"""
user_id = str(user_id) if user_id else user_id
with self._engine.begin() as conn:
try:
# post_statement ensures the correct posts are selected
# in the correct order
post_statement = sqla.select([self._post_table])
post_filter = self._get_filter(
tag, user_id, include_draft, conn
)
if post_filter is not None:
post_statement = post_statement.where(post_filter)
if count:
post_statement = post_statement.limit(count)
if offset:
post_statement = post_statement.offset(offset)
post_ordering = \
sqla.desc(self._post_table.c.post_date) if recent \
else self._post_table.c.post_date
post_statement = post_statement.order_by(post_ordering)
post_statement = post_statement.alias('post')
# joined_statement ensures other data is retrieved
joined_statement = post_statement.join(self._tag_posts_table) \
.join(self._tag_table) \
.join(self._user_posts_table) \
.alias('join')
joined_ordering = \
sqla.desc(joined_statement.c.post_post_date) if recent \
else joined_statement.c.post_post_date
joined_statement = sqla.select([joined_statement]) \
.order_by(joined_ordering)
all_rows = conn.execute(joined_statement).fetchall()
result = \
self._serialise_posts_and_tags_from_joined_rows(all_rows)
except Exception as e:
self._logger.exception(str(e))
result = []
return result
def count_posts(self, tag=None, user_id=None, include_draft=False):
"""
Returns the total number of posts for the give filter
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: The number of posts for the given filter.
"""
result = 0
with self._engine.begin() as conn:
try:
count_statement = sqla.select([sqla.func.count()]). \
select_from(self._post_table)
sql_filter = self._get_filter(tag, user_id, include_draft,
conn)
count_statement = count_statement.where(sql_filter)
result = conn.execute(count_statement).scalar()
except Exception as e:
self._logger.exception(str(e))
result = 0
return result
def delete_post(self, post_id):
"""
Delete the post defined by ``post_id``
:param post_id: The identifier corresponding to a post
:type post_id: int
:return: Returns True if the post was successfully deleted and False
otherwise.
"""
status = False
success = 0
post_id = _as_int(post_id)
with self._engine.begin() as conn:
try:
post_del_statement = self._post_table.delete().where(
self._post_table.c.id == post_id)
conn.execute(post_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
try:
user_posts_del_statement = self._user_posts_table.delete(). \
where(self._user_posts_table.c.post_id == post_id)
conn.execute(user_posts_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
try:
tag_posts_del_statement = self._tag_posts_table.delete(). \
where(self._tag_posts_table.c.post_id == post_id)
conn.execute(tag_posts_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
status = success == 3
return status
def _get_filter(self, tag, user_id, include_draft, conn):
filters = []
if tag:
tag = tag.upper()
tag_statement = sqla.select([self._tag_table.c.id]).where(
self._tag_table.c.text == tag)
tag_result = conn.execute(tag_statement).fetchone()
if tag_result is not None:
tag_id = tag_result[0]
tag_filter = sqla.and_(
self._tag_posts_table.c.tag_id == tag_id,
self._post_table.c.id == self._tag_posts_table.c.post_id
)
filters.append(tag_filter)
if user_id:
user_filter = sqla.and_(
self._user_posts_table.c.user_id == user_id,
self._post_table.c.id == self._user_posts_table.c.post_id
)
filters.append(user_filter)
draft_filter = self._post_table.c.draft == 1 if include_draft else \
self._post_table.c.draft == 0
filters.append(draft_filter)
sql_filter = sqla.and_(*filters)
return sql_filter
def _save_tags(self, tags, post_id, conn):
tags = self.normalize_tags(tags)
tag_ids = []
for tag in tags: # iterate over given tags
try:
# check if the tag exists
statement = self._tag_table.select().where(
self._tag_table.c.text == tag)
tag_result = conn.execute(statement).fetchone()
if tag_result is None:
# insert if it is a new tag
tag_insert_statement = self._tag_table.insert().\
values(text=tag)
result = conn.execute(tag_insert_statement)
tag_id = result.inserted_primary_key[0]
else:
# tag already exists
tag_id = tag_result[0]
except sqla.exc.IntegrityError as e:
# some database error occurred;
tag_id = None
self._logger.exception(str(e))
except Exception as e:
# unknown exception occurred
tag_id = None
self._logger.exception(str(e))
if tag_id is not None:
# for a valid tag_id
tag_ids.append(tag_id)
try:
# check if given post has tag given by tag_id
statement = self._tag_posts_table.select().where(
sqla.and_(self._tag_posts_table.c.tag_id == tag_id,
self._tag_posts_table.c.post_id == post_id))
tag_post_result = conn.execute(statement).fetchone()
if tag_post_result is None:
# if tag_id not present for the post given by post_id
tag_post_statement = self._tag_posts_table.insert().\
values(tag_id=tag_id, post_id=post_id)
conn.execute(tag_post_statement)
except sqla.exc.IntegrityError as e:
self._logger.exception(str(e))
except Exception as e:
self._logger.exception(str(e))
try:
# remove tags that have been deleted
statement = self._tag_posts_table.delete().where(
sqla.and_(sqla.not_(
self._tag_posts_table.c.tag_id.in_(tag_ids)),
self._tag_posts_table.c.post_id == post_id
)
)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
def _save_user_post(self, user_id, post_id, conn):
user_id = str(user_id)
statement = sqla.select([self._user_posts_table]).where(
self._user_posts_table.c.post_id == post_id)
result = conn.execute(statement).fetchone()
if result is None:
try:
statement = self._user_posts_table.insert().values(
user_id=user_id, post_id=post_id)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
else:
if result[0] != user_id:
try:
statement = self._user_posts_table.update().where(
self._user_posts_table.c.post_id == post_id). \
values(user_id=user_id)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
def _table_name(self, table_name):
return self._table_prefix + table_name
def _create_all_tables(self):
"""
Creates all the required tables by calling the required functions.
:return:
"""
self._create_post_table()
self._create_tag_table()
self._create_tag_posts_table()
self._create_user_posts_table()
def _create_post_table(self):
"""
Creates the table to store the blog posts.
:return:
"""
with self._engine.begin() as conn:
post_table_name = self._table_name("post")
if not conn.dialect.has_table(conn, post_table_name):
self._post_table = sqla.Table(
post_table_name, self._metadata,
sqla.Column("id", sqla.Integer, primary_key=True),
sqla.Column("title", sqla.String(256)),
sqla.Column("text", sqla.Text),
sqla.Column("post_date", sqla.DateTime),
sqla.Column("last_modified_date", sqla.DateTime),
# if 1 then make it a draft
sqla.Column("draft", sqla.SmallInteger, default=0),
info=self._info
)
self._logger.debug("Created table with table name %s" %
post_table_name)
else:
self._post_table = self._metadata.tables[post_table_name]
self._logger.debug("Reflecting to table with table name %s" %
post_table_name)
def _create_tag_table(self):
"""
Creates the table to store blog post tags.
:return:
"""
with self._engine.begin() as conn:
tag_table_name = self._table_name("tag")
if not conn.dialect.has_table(conn, tag_table_name):
self._tag_table = sqla.Table(
tag_table_name, self._metadata,
sqla.Column("id", sqla.Integer, primary_key=True),
sqla.Column("text", sqla.String(128), unique=True,
index=True),
info=self._info
)
self._logger.debug("Created table with table name %s" %
tag_table_name)
else:
self._tag_table = self._metadata.tables[tag_table_name]
self._logger.debug("Reflecting to table with table name %s" %
tag_table_name)
def _create_user_posts_table(self):
"""
Creates the table to store association info between user and blog
posts.
:return:
"""
with self._engine.begin() as conn:
user_posts_table_name = self._table_name("user_posts")
if not conn.dialect.has_table(conn, user_posts_table_name):
post_id_key = self._table_name("post") + ".id"
self._user_posts_table = sqla.Table(
user_posts_table_name, self._metadata,
sqla.Column("user_id", sqla.String(128), index=True),
sqla.Column("post_id", sqla.Integer,
sqla.ForeignKey(post_id_key,
onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.UniqueConstraint('user_id', 'post_id', name='uix_2'),
info=self._info
)
self._logger.debug("Created table with table name %s" %
user_posts_table_name)
else:
self._user_posts_table = \
self._metadata.tables[user_posts_table_name]
self._logger.debug("Reflecting to table with table name %s" %
user_posts_table_name)
|
gouthambs/Flask-Blogging
|
flask_blogging/sqlastorage.py
|
SQLAStorage._create_user_posts_table
|
python
|
def _create_user_posts_table(self):
with self._engine.begin() as conn:
user_posts_table_name = self._table_name("user_posts")
if not conn.dialect.has_table(conn, user_posts_table_name):
post_id_key = self._table_name("post") + ".id"
self._user_posts_table = sqla.Table(
user_posts_table_name, self._metadata,
sqla.Column("user_id", sqla.String(128), index=True),
sqla.Column("post_id", sqla.Integer,
sqla.ForeignKey(post_id_key,
onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.UniqueConstraint('user_id', 'post_id', name='uix_2'),
info=self._info
)
self._logger.debug("Created table with table name %s" %
user_posts_table_name)
else:
self._user_posts_table = \
self._metadata.tables[user_posts_table_name]
self._logger.debug("Reflecting to table with table name %s" %
user_posts_table_name)
|
Creates the table to store association info between user and blog
posts.
:return:
|
train
|
https://github.com/gouthambs/Flask-Blogging/blob/6636b8941175e9910f116a329521f96b8b05a9ac/flask_blogging/sqlastorage.py#L609-L636
| null |
class SQLAStorage(Storage):
"""
The ``SQLAStorage`` implements the interface specified by the ``Storage``
class. This class uses SQLAlchemy to implement storage and retrieval of
data from any of the databases supported by SQLAlchemy.
"""
_db = None
_logger = logging.getLogger("flask-blogging")
def __init__(self, engine=None, table_prefix="", metadata=None, db=None,
bind=None):
"""
The constructor for the ``SQLAStorage`` class.
:param engine: The ``SQLAlchemy`` engine instance created by calling
``create_engine``. One can also use Flask-SQLAlchemy, and pass the
engine property.
:type engine: object
:param table_prefix: (Optional) Prefix to use for the tables created
(default ``""``).
:type table_prefix: str
:param metadata: (Optional) The SQLAlchemy MetaData object
:type metadata: object
:param db: (Optional) The Flask-SQLAlchemy SQLAlchemy object
:type db: object
:param bind: (Optional) Reference the database to bind for multiple
database scenario with binds
:type bind: str
"""
self._bind = bind
if db:
self._engine = db.get_engine(db.get_app(), bind=self._bind)
self._metadata = db.metadata
else:
if engine is None:
raise ValueError("Both db and engine args cannot be None")
self._engine = engine
self._metadata = metadata or sqla.MetaData()
self._info = {} if self._bind is None else {"bind_key": self._bind}
self._table_prefix = table_prefix
self._metadata.reflect(bind=self._engine)
self._create_all_tables()
# automap base and restrict to the required tables here.
table_suffix = ['post', 'tag', 'user_posts', 'tag_posts']
table_names = [self._table_name(t) for t in table_suffix]
self._metadata.create_all(bind=self._engine, tables=self.all_tables)
meta = sqla.MetaData()
meta.reflect(bind=self._engine, only=table_names)
self._Base = automap_base(metadata=meta)
self._Base.prepare()
self._inject_models()
sqla_initialized.send(self, engine=self._engine,
table_prefix=self._table_prefix,
meta=self.metadata,
bind=self._bind)
def _inject_models(self):
global this
this.Post = getattr(self._Base.classes, self._table_name("post"))
this.Post.__name__ = 'Post'
this.Tag = getattr(self._Base.classes, self._table_name("tag"))
this.Tag.__name__ = 'Tag'
@property
def metadata(self):
return self._metadata
@property
def post_table(self):
return self._post_table
@property
def post_model(self):
return getattr(self._Base.classes, self._table_name("post"))
@property
def tag_model(self):
return getattr(self._Base.classes, self._table_name("tag"))
@property
def tag_table(self):
return self._tag_table
@property
def tag_posts_table(self):
return self._tag_posts_table
@property
def user_posts_table(self):
return self._user_posts_table
@property
def all_tables(self):
return [self._post_table, self._tag_table,
self._user_posts_table, self._tag_posts_table]
@property
def engine(self):
return self._engine
def save_post(self, title, text, user_id, tags, draft=False,
post_date=None, last_modified_date=None, meta_data=None,
post_id=None):
"""
Persist the blog post data. If ``post_id`` is ``None`` or ``post_id``
is invalid, the post must be inserted into the storage. If ``post_id``
is a valid id, then the data must be updated.
:param title: The title of the blog post
:type title: str
:param text: The text of the blog post
:type text: str
:param user_id: The user identifier
:type user_id: str
:param tags: A list of tags
:type tags: list
:param draft: (Optional) If the post is a draft of if needs to be
published. (default ``False``)
:type draft: bool
:param post_date: (Optional) The date the blog was posted (default
datetime.datetime.utcnow() )
:type post_date: datetime.datetime
:param last_modified_date: (Optional) The date when blog was last
modified (default datetime.datetime.utcnow() )
:type last_modified_date: datetime.datetime
:param post_id: (Optional) The post identifier. This should be ``None``
for an insert call,
and a valid value for update. (default ``None``)
:type post_id: str
:return: The post_id value, in case of a successful insert or update.
Return ``None`` if there were errors.
"""
new_post = post_id is None
post_id = _as_int(post_id)
current_datetime = datetime.datetime.utcnow()
draft = 1 if draft is True else 0
post_date = post_date if post_date is not None else current_datetime
last_modified_date = last_modified_date if last_modified_date is not \
None else current_datetime
with self._engine.begin() as conn:
try:
if post_id is not None: # validate post_id
exists_statement = sqla.select([self._post_table]).where(
self._post_table.c.id == post_id)
exists = \
conn.execute(exists_statement).fetchone() is not None
post_id = post_id if exists else None
post_statement = \
self._post_table.insert() if post_id is None else \
self._post_table.update().where(
self._post_table.c.id == post_id)
post_statement = post_statement.values(
title=title, text=text, post_date=post_date,
last_modified_date=last_modified_date, draft=draft
)
post_result = conn.execute(post_statement)
post_id = post_result.inserted_primary_key[0] \
if post_id is None else post_id
self._save_tags(tags, post_id, conn)
self._save_user_post(user_id, post_id, conn)
except Exception as e:
self._logger.exception(str(e))
post_id = None
return post_id
@classmethod
def _serialise_posts_and_tags_from_joined_rows(cls, joined_rows):
"""
Translates multiple rows of joined post and tag information
into the dictionary format expected by flask-blogging.
There will be one row per post/tag pairing.
"""
posts_by_id = OrderedDict()
tags_by_post_id = defaultdict(list)
for joined_row in joined_rows:
post_id = joined_row.post_id
post = cls._serialise_post_from_joined_row(joined_row)
posts_by_id[post_id] = post
tags_by_post_id[post_id].append(joined_row.tag_text)
for id, post in posts_by_id.items():
tags = tags_by_post_id.get(id)
if tags:
post["tags"] = tags
return [post for post in posts_by_id.values()]
@staticmethod
def _serialise_post_from_joined_row(joined_row):
return dict(
post_id=joined_row.post_id,
title=joined_row.post_title,
text=joined_row.post_text,
post_date=joined_row.post_post_date,
last_modified_date=joined_row.post_last_modified_date,
draft=joined_row.post_draft,
user_id=joined_row.user_posts_user_id
)
def get_post_by_id(self, post_id):
"""
Fetch the blog post given by ``post_id``
:param post_id: The post identifier for the blog post
:type post_id: str
:return: If the ``post_id`` is valid, the post data is retrieved, else
returns ``None``.
"""
r = None
post_id = _as_int(post_id)
with self._engine.begin() as conn:
try:
post_statement = sqla.select([self._post_table]) \
.where(self._post_table.c.id == post_id) \
.alias('post')
joined_statement = post_statement.join(self._tag_posts_table) \
.join(self._tag_table) \
.join(self._user_posts_table) \
.alias('join')
# Note this will retrieve one row per tag
all_rows = conn.execute(
sqla.select([joined_statement])
).fetchall()
r = self._serialise_posts_and_tags_from_joined_rows(
all_rows
)[0]
except Exception as e:
self._logger.exception(str(e))
r = None
return r
def get_posts(self, count=10, offset=0, recent=True, tag=None,
user_id=None, include_draft=False):
"""
Get posts given by filter criteria
:param count: The number of posts to retrieve (default 10)
:type count: int
:param offset: The number of posts to offset (default 0)
:type offset: int
:param recent: Order by recent posts or not
:type recent: bool
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: A list of posts, with each element a dict containing values
for the following keys: (title, text, draft, post_date,
last_modified_date). If count is ``None``, then all the posts are
returned.
"""
user_id = str(user_id) if user_id else user_id
with self._engine.begin() as conn:
try:
# post_statement ensures the correct posts are selected
# in the correct order
post_statement = sqla.select([self._post_table])
post_filter = self._get_filter(
tag, user_id, include_draft, conn
)
if post_filter is not None:
post_statement = post_statement.where(post_filter)
if count:
post_statement = post_statement.limit(count)
if offset:
post_statement = post_statement.offset(offset)
post_ordering = \
sqla.desc(self._post_table.c.post_date) if recent \
else self._post_table.c.post_date
post_statement = post_statement.order_by(post_ordering)
post_statement = post_statement.alias('post')
# joined_statement ensures other data is retrieved
joined_statement = post_statement.join(self._tag_posts_table) \
.join(self._tag_table) \
.join(self._user_posts_table) \
.alias('join')
joined_ordering = \
sqla.desc(joined_statement.c.post_post_date) if recent \
else joined_statement.c.post_post_date
joined_statement = sqla.select([joined_statement]) \
.order_by(joined_ordering)
all_rows = conn.execute(joined_statement).fetchall()
result = \
self._serialise_posts_and_tags_from_joined_rows(all_rows)
except Exception as e:
self._logger.exception(str(e))
result = []
return result
def count_posts(self, tag=None, user_id=None, include_draft=False):
"""
Returns the total number of posts for the give filter
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: The number of posts for the given filter.
"""
result = 0
with self._engine.begin() as conn:
try:
count_statement = sqla.select([sqla.func.count()]). \
select_from(self._post_table)
sql_filter = self._get_filter(tag, user_id, include_draft,
conn)
count_statement = count_statement.where(sql_filter)
result = conn.execute(count_statement).scalar()
except Exception as e:
self._logger.exception(str(e))
result = 0
return result
def delete_post(self, post_id):
"""
Delete the post defined by ``post_id``
:param post_id: The identifier corresponding to a post
:type post_id: int
:return: Returns True if the post was successfully deleted and False
otherwise.
"""
status = False
success = 0
post_id = _as_int(post_id)
with self._engine.begin() as conn:
try:
post_del_statement = self._post_table.delete().where(
self._post_table.c.id == post_id)
conn.execute(post_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
try:
user_posts_del_statement = self._user_posts_table.delete(). \
where(self._user_posts_table.c.post_id == post_id)
conn.execute(user_posts_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
try:
tag_posts_del_statement = self._tag_posts_table.delete(). \
where(self._tag_posts_table.c.post_id == post_id)
conn.execute(tag_posts_del_statement)
success += 1
except Exception as e:
self._logger.exception(str(e))
status = success == 3
return status
def _get_filter(self, tag, user_id, include_draft, conn):
filters = []
if tag:
tag = tag.upper()
tag_statement = sqla.select([self._tag_table.c.id]).where(
self._tag_table.c.text == tag)
tag_result = conn.execute(tag_statement).fetchone()
if tag_result is not None:
tag_id = tag_result[0]
tag_filter = sqla.and_(
self._tag_posts_table.c.tag_id == tag_id,
self._post_table.c.id == self._tag_posts_table.c.post_id
)
filters.append(tag_filter)
if user_id:
user_filter = sqla.and_(
self._user_posts_table.c.user_id == user_id,
self._post_table.c.id == self._user_posts_table.c.post_id
)
filters.append(user_filter)
draft_filter = self._post_table.c.draft == 1 if include_draft else \
self._post_table.c.draft == 0
filters.append(draft_filter)
sql_filter = sqla.and_(*filters)
return sql_filter
def _save_tags(self, tags, post_id, conn):
tags = self.normalize_tags(tags)
tag_ids = []
for tag in tags: # iterate over given tags
try:
# check if the tag exists
statement = self._tag_table.select().where(
self._tag_table.c.text == tag)
tag_result = conn.execute(statement).fetchone()
if tag_result is None:
# insert if it is a new tag
tag_insert_statement = self._tag_table.insert().\
values(text=tag)
result = conn.execute(tag_insert_statement)
tag_id = result.inserted_primary_key[0]
else:
# tag already exists
tag_id = tag_result[0]
except sqla.exc.IntegrityError as e:
# some database error occurred;
tag_id = None
self._logger.exception(str(e))
except Exception as e:
# unknown exception occurred
tag_id = None
self._logger.exception(str(e))
if tag_id is not None:
# for a valid tag_id
tag_ids.append(tag_id)
try:
# check if given post has tag given by tag_id
statement = self._tag_posts_table.select().where(
sqla.and_(self._tag_posts_table.c.tag_id == tag_id,
self._tag_posts_table.c.post_id == post_id))
tag_post_result = conn.execute(statement).fetchone()
if tag_post_result is None:
# if tag_id not present for the post given by post_id
tag_post_statement = self._tag_posts_table.insert().\
values(tag_id=tag_id, post_id=post_id)
conn.execute(tag_post_statement)
except sqla.exc.IntegrityError as e:
self._logger.exception(str(e))
except Exception as e:
self._logger.exception(str(e))
try:
# remove tags that have been deleted
statement = self._tag_posts_table.delete().where(
sqla.and_(sqla.not_(
self._tag_posts_table.c.tag_id.in_(tag_ids)),
self._tag_posts_table.c.post_id == post_id
)
)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
def _save_user_post(self, user_id, post_id, conn):
user_id = str(user_id)
statement = sqla.select([self._user_posts_table]).where(
self._user_posts_table.c.post_id == post_id)
result = conn.execute(statement).fetchone()
if result is None:
try:
statement = self._user_posts_table.insert().values(
user_id=user_id, post_id=post_id)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
else:
if result[0] != user_id:
try:
statement = self._user_posts_table.update().where(
self._user_posts_table.c.post_id == post_id). \
values(user_id=user_id)
conn.execute(statement)
except Exception as e:
self._logger.exception(str(e))
def _table_name(self, table_name):
return self._table_prefix + table_name
def _create_all_tables(self):
"""
Creates all the required tables by calling the required functions.
:return:
"""
self._create_post_table()
self._create_tag_table()
self._create_tag_posts_table()
self._create_user_posts_table()
def _create_post_table(self):
"""
Creates the table to store the blog posts.
:return:
"""
with self._engine.begin() as conn:
post_table_name = self._table_name("post")
if not conn.dialect.has_table(conn, post_table_name):
self._post_table = sqla.Table(
post_table_name, self._metadata,
sqla.Column("id", sqla.Integer, primary_key=True),
sqla.Column("title", sqla.String(256)),
sqla.Column("text", sqla.Text),
sqla.Column("post_date", sqla.DateTime),
sqla.Column("last_modified_date", sqla.DateTime),
# if 1 then make it a draft
sqla.Column("draft", sqla.SmallInteger, default=0),
info=self._info
)
self._logger.debug("Created table with table name %s" %
post_table_name)
else:
self._post_table = self._metadata.tables[post_table_name]
self._logger.debug("Reflecting to table with table name %s" %
post_table_name)
def _create_tag_table(self):
"""
Creates the table to store blog post tags.
:return:
"""
with self._engine.begin() as conn:
tag_table_name = self._table_name("tag")
if not conn.dialect.has_table(conn, tag_table_name):
self._tag_table = sqla.Table(
tag_table_name, self._metadata,
sqla.Column("id", sqla.Integer, primary_key=True),
sqla.Column("text", sqla.String(128), unique=True,
index=True),
info=self._info
)
self._logger.debug("Created table with table name %s" %
tag_table_name)
else:
self._tag_table = self._metadata.tables[tag_table_name]
self._logger.debug("Reflecting to table with table name %s" %
tag_table_name)
def _create_tag_posts_table(self):
"""
Creates the table to store association info between blog posts and
tags.
:return:
"""
with self._engine.begin() as conn:
tag_posts_table_name = self._table_name("tag_posts")
if not conn.dialect.has_table(conn, tag_posts_table_name):
tag_id_key = self._table_name("tag") + ".id"
post_id_key = self._table_name("post") + ".id"
self._tag_posts_table = sqla.Table(
tag_posts_table_name, self._metadata,
sqla.Column('tag_id', sqla.Integer,
sqla.ForeignKey(tag_id_key, onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.Column('post_id', sqla.Integer,
sqla.ForeignKey(post_id_key,
onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.UniqueConstraint('tag_id', 'post_id', name='uix_1'),
info=self._info
)
self._logger.debug("Created table with table name %s" %
tag_posts_table_name)
else:
self._tag_posts_table = \
self._metadata.tables[tag_posts_table_name]
self._logger.debug("Reflecting to table with table name %s" %
tag_posts_table_name)
|
gouthambs/Flask-Blogging
|
flask_blogging/utils.py
|
ensureUtf
|
python
|
def ensureUtf(s, encoding='utf8'):
# In Python2, str == bytes.
# In Python3, bytes remains unchanged, but str means unicode
# while unicode is not defined anymore
if type(s) == bytes:
return s.decode(encoding, 'ignore')
else:
return s
|
Converts input to unicode if necessary.
If `s` is bytes, it will be decoded using the `encoding` parameters.
This function is used for preprocessing /source/ and /filename/ arguments
to the builtin function `compile`.
|
train
|
https://github.com/gouthambs/Flask-Blogging/blob/6636b8941175e9910f116a329521f96b8b05a9ac/flask_blogging/utils.py#L1-L13
| null | |
gouthambs/Flask-Blogging
|
flask_blogging/views.py
|
index
|
python
|
def index(count, page):
blogging_engine = _get_blogging_engine(current_app)
storage = blogging_engine.storage
config = blogging_engine.config
count = count or config.get("BLOGGING_POSTS_PER_PAGE", 10)
meta = _get_meta(storage, count, page)
offset = meta["offset"]
meta["is_user_blogger"] = _is_blogger(blogging_engine.blogger_permission)
meta["count"] = count
meta["page"] = page
render = config.get("BLOGGING_RENDER_TEXT", True)
posts = storage.get_posts(count=count, offset=offset, include_draft=False,
tag=None, user_id=None, recent=True)
index_posts_fetched.send(blogging_engine.app, engine=blogging_engine,
posts=posts, meta=meta)
for post in posts:
blogging_engine.process_post(post, render=render)
index_posts_processed.send(blogging_engine.app, engine=blogging_engine,
posts=posts, meta=meta)
return render_template("blogging/index.html", posts=posts, meta=meta,
config=config)
|
Serves the page with a list of blog posts
:param count:
:param offset:
:return:
|
train
|
https://github.com/gouthambs/Flask-Blogging/blob/6636b8941175e9910f116a329521f96b8b05a9ac/flask_blogging/views.py#L104-L133
|
[
"def _get_blogging_engine(app):\n return app.extensions[\"FLASK_BLOGGING_ENGINE\"]\n",
"def _get_meta(storage, count, page, tag=None, user_id=None):\n max_posts = storage.count_posts(tag=tag, user_id=user_id)\n max_pages = math.ceil(float(max_posts)/float(count))\n max_offset = (max_pages-1)*count\n offset = min(max(0, (page-1)*count), max_offset)\n offset = offset if offset >= 0 else 0\n if (tag is None) and (user_id is None):\n prev_page = None if page <= 1 else url_for(\n \"blogging.index\", count=count, page=page-1)\n next_page = None if page >= max_pages else url_for(\n \"blogging.index\", count=count, page=page+1)\n elif tag:\n prev_page = None if page <= 1 else url_for(\n \"blogging.posts_by_tag\", tag=tag, count=count, page=page-1)\n next_page = None if page >= max_pages else url_for(\n \"blogging.posts_by_tag\", tag=tag, count=count, page=page+1)\n elif user_id:\n prev_page = None if page <= 1 else url_for(\n \"blogging.posts_by_author\", user_id=user_id, count=count,\n page=page-1)\n next_page = None if page >= max_pages else url_for(\n \"blogging.posts_by_author\", user_id=user_id, count=count,\n page=page+1)\n else:\n prev_page = next_page = None\n\n pagination = dict(prev_page=prev_page, next_page=next_page)\n meta = dict(max_posts=max_posts, max_pages=max_pages, page=page,\n max_offset=max_offset, offset=offset, count=count,\n pagination=pagination)\n return meta\n",
"def _is_blogger(blogger_permission):\n authenticated = current_user.is_authenticated() if \\\n callable(current_user.is_authenticated) \\\n else current_user.is_authenticated\n is_blogger = authenticated and \\\n blogger_permission.require().can()\n return is_blogger\n"
] |
from __future__ import division
try:
from builtins import str
except ImportError:
pass
from flask import escape
from .processor import PostProcessor
from flask_login import login_required, current_user
from flask import Blueprint, current_app, render_template, request, redirect, \
url_for, flash, make_response
from flask_blogging.forms import BlogEditor
import math
from werkzeug.contrib.atom import AtomFeed
import datetime
from flask_principal import PermissionDenied
from .signals import page_by_id_fetched, page_by_id_processed, \
posts_by_tag_fetched, posts_by_tag_processed, \
posts_by_author_fetched, posts_by_author_processed, \
index_posts_fetched, index_posts_processed, \
feed_posts_fetched, feed_posts_processed, \
sitemap_posts_fetched, sitemap_posts_processed, editor_post_saved, \
post_deleted, editor_get_fetched
from .utils import ensureUtf
def _get_blogging_engine(app):
return app.extensions["FLASK_BLOGGING_ENGINE"]
def _get_user_name(user):
user_name = user.get_name() if hasattr(user, "get_name") else str(user)
return user_name
def _clear_cache(cache):
cache.delete_memoized(index)
cache.delete_memoized(page_by_id)
cache.delete_memoized(posts_by_author)
cache.delete_memoized(posts_by_tag)
cache.delete_memoized(sitemap)
cache.delete_memoized(feed)
def _store_form_data(blog_form, storage, user, post, escape_text=True):
title = blog_form.title.data
text = escape(blog_form.text.data) if escape_text \
else blog_form.text.data
tags = blog_form.tags.data.split(",")
draft = blog_form.draft.data
user_id = user.get_id()
current_datetime = datetime.datetime.utcnow()
post_date = post.get("post_date", current_datetime)
last_modified_date = datetime.datetime.utcnow()
post_id = post.get("post_id")
pid = storage.save_post(title, text, user_id, tags, draft=draft,
post_date=post_date,
last_modified_date=last_modified_date,
post_id=post_id)
return pid
def _get_meta(storage, count, page, tag=None, user_id=None):
max_posts = storage.count_posts(tag=tag, user_id=user_id)
max_pages = math.ceil(float(max_posts)/float(count))
max_offset = (max_pages-1)*count
offset = min(max(0, (page-1)*count), max_offset)
offset = offset if offset >= 0 else 0
if (tag is None) and (user_id is None):
prev_page = None if page <= 1 else url_for(
"blogging.index", count=count, page=page-1)
next_page = None if page >= max_pages else url_for(
"blogging.index", count=count, page=page+1)
elif tag:
prev_page = None if page <= 1 else url_for(
"blogging.posts_by_tag", tag=tag, count=count, page=page-1)
next_page = None if page >= max_pages else url_for(
"blogging.posts_by_tag", tag=tag, count=count, page=page+1)
elif user_id:
prev_page = None if page <= 1 else url_for(
"blogging.posts_by_author", user_id=user_id, count=count,
page=page-1)
next_page = None if page >= max_pages else url_for(
"blogging.posts_by_author", user_id=user_id, count=count,
page=page+1)
else:
prev_page = next_page = None
pagination = dict(prev_page=prev_page, next_page=next_page)
meta = dict(max_posts=max_posts, max_pages=max_pages, page=page,
max_offset=max_offset, offset=offset, count=count,
pagination=pagination)
return meta
def _is_blogger(blogger_permission):
authenticated = current_user.is_authenticated() if \
callable(current_user.is_authenticated) \
else current_user.is_authenticated
is_blogger = authenticated and \
blogger_permission.require().can()
return is_blogger
def page_by_id(post_id, slug):
blogging_engine = _get_blogging_engine(current_app)
storage = blogging_engine.storage
config = blogging_engine.config
post = storage.get_post_by_id(post_id)
meta = {}
meta["is_user_blogger"] = _is_blogger(blogging_engine.blogger_permission)
render = config.get("BLOGGING_RENDER_TEXT", True)
meta["post_id"] = post_id
meta["slug"] = slug
page_by_id_fetched.send(blogging_engine.app, engine=blogging_engine,
post=post, meta=meta)
if post is not None:
blogging_engine.process_post(post, render=render)
page_by_id_processed.send(blogging_engine.app, engine=blogging_engine,
post=post, meta=meta)
return render_template("blogging/page.html", post=post, config=config,
meta=meta)
else:
flash("The page you are trying to access is not valid!", "warning")
return redirect(url_for("blogging.index"))
def posts_by_tag(tag, count, page):
blogging_engine = _get_blogging_engine(current_app)
storage = blogging_engine.storage
config = blogging_engine.config
count = count or config.get("BLOGGING_POSTS_PER_PAGE", 10)
meta = _get_meta(storage, count, page, tag=tag)
offset = meta["offset"]
meta["is_user_blogger"] = _is_blogger(blogging_engine.blogger_permission)
meta["tag"] = tag
meta["count"] = count
meta["page"] = page
render = config.get("BLOGGING_RENDER_TEXT", True)
posts = storage.get_posts(count=count, offset=offset, tag=tag,
include_draft=False, user_id=None, recent=True)
posts_by_tag_fetched.send(blogging_engine.app, engine=blogging_engine,
posts=posts, meta=meta)
if len(posts):
for post in posts:
blogging_engine.process_post(post, render=render)
posts_by_tag_processed.send(blogging_engine.app,
engine=blogging_engine,
posts=posts, meta=meta)
return render_template("blogging/index.html", posts=posts, meta=meta,
config=config)
else:
flash("No posts found for this tag!", "warning")
return redirect(url_for("blogging.index", post_id=None))
def posts_by_author(user_id, count, page):
blogging_engine = _get_blogging_engine(current_app)
storage = blogging_engine.storage
config = blogging_engine.config
count = count or config.get("BLOGGING_POSTS_PER_PAGE", 10)
meta = _get_meta(storage, count, page, user_id=user_id)
offset = meta["offset"]
meta["is_user_blogger"] = _is_blogger(blogging_engine.blogger_permission)
meta["user_id"] = user_id
meta["count"] = count
meta["page"] = page
posts = storage.get_posts(count=count, offset=offset, user_id=user_id,
include_draft=False, tag=None, recent=True)
render = config.get("BLOGGING_RENDER_TEXT", True)
posts_by_author_fetched.send(blogging_engine.app, engine=blogging_engine,
posts=posts, meta=meta)
if len(posts):
for post in posts:
blogging_engine.process_post(post, render=render)
posts_by_author_processed.send(blogging_engine.app,
engine=blogging_engine, posts=posts,
meta=meta)
return render_template("blogging/index.html", posts=posts, meta=meta,
config=config)
else:
flash("No posts found for this user!", "warning")
return redirect(url_for("blogging.index", post_id=None))
@login_required
def editor(post_id):
blogging_engine = _get_blogging_engine(current_app)
cache = blogging_engine.cache
if cache:
_clear_cache(cache)
try:
with blogging_engine.blogger_permission.require():
post_processor = blogging_engine.post_processor
config = blogging_engine.config
storage = blogging_engine.storage
if request.method == 'POST':
form = BlogEditor(request.form)
if form.validate():
post = storage.get_post_by_id(post_id)
if (post is not None) and \
(post_processor.is_author(post, current_user)) and \
(str(post["post_id"]) == post_id):
pass
else:
post = {}
escape_text = config.get("BLOGGING_ESCAPE_MARKDOWN", False)
pid = _store_form_data(form, storage, current_user, post,
escape_text)
editor_post_saved.send(blogging_engine.app,
engine=blogging_engine,
post_id=pid,
user=current_user,
post=post)
flash("Blog posted successfully!", "info")
slug = post_processor.create_slug(form.title.data)
return redirect(url_for("blogging.page_by_id", post_id=pid,
slug=slug))
else:
flash("There were errors in blog submission", "warning")
return render_template("blogging/editor.html", form=form,
post_id=post_id, config=config)
else:
if post_id is not None:
post = storage.get_post_by_id(post_id)
if (post is not None) and \
(post_processor.is_author(post, current_user)):
tags = ", ".join(post["tags"])
form = BlogEditor(title=post["title"],
text=post["text"], tags=tags)
editor_get_fetched.send(blogging_engine.app,
engine=blogging_engine,
post_id=post_id,
form=form)
return render_template("blogging/editor.html",
form=form, post_id=post_id,
config=config)
else:
flash("You do not have the rights to edit this post",
"warning")
return redirect(url_for("blogging.index",
post_id=None))
form = BlogEditor()
return render_template("blogging/editor.html", form=form,
post_id=post_id, config=config)
except PermissionDenied:
flash("You do not have permissions to create or edit posts", "warning")
return redirect(url_for("blogging.index", post_id=None))
@login_required
def delete(post_id):
blogging_engine = _get_blogging_engine(current_app)
cache = blogging_engine.cache
if cache:
_clear_cache(cache)
try:
post_processor = blogging_engine.post_processor
with blogging_engine.blogger_permission.require():
storage = blogging_engine.storage
post = storage.get_post_by_id(post_id)
if (post is not None) and \
(post_processor.is_author(post, current_user)):
success = storage.delete_post(post_id)
if success:
flash("Your post was successfully deleted", "info")
post_deleted.send(blogging_engine.app,
engine=blogging_engine,
post_id=post_id,
post=post)
else:
flash("There were errors while deleting your post",
"warning")
else:
flash("You do not have the rights to delete this post",
"warning")
return redirect(url_for("blogging.index"))
except PermissionDenied:
flash("You do not have permissions to delete posts", "warning")
return redirect(url_for("blogging.index", post_id=None))
def sitemap():
blogging_engine = _get_blogging_engine(current_app)
storage = blogging_engine.storage
config = blogging_engine.config
posts = storage.get_posts(count=None, offset=None, recent=True,
user_id=None, tag=None, include_draft=False)
sitemap_posts_fetched.send(blogging_engine.app, engine=blogging_engine,
posts=posts)
if len(posts):
for post in posts:
blogging_engine.process_post(post, render=False)
sitemap_posts_processed.send(blogging_engine.app,
engine=blogging_engine, posts=posts)
sitemap_xml = render_template("blogging/sitemap.xml", posts=posts,
config=config)
response = make_response(sitemap_xml)
response.headers["Content-Type"] = "application/xml"
return response
def feed():
blogging_engine = _get_blogging_engine(current_app)
storage = blogging_engine.storage
config = blogging_engine.config
count = config.get("BLOGGING_FEED_LIMIT")
posts = storage.get_posts(count=count, offset=None, recent=True,
user_id=None, tag=None, include_draft=False)
feed = AtomFeed(
'%s - All Articles' % config.get("BLOGGING_SITENAME",
"Flask-Blogging"),
feed_url=request.url, url=request.url_root, generator=None)
feed_posts_fetched.send(blogging_engine.app, engine=blogging_engine,
posts=posts)
if len(posts):
for post in posts:
blogging_engine.process_post(post, render=True)
feed.add(post["title"], ensureUtf(post["rendered_text"]),
content_type='html',
author=post["user_name"],
url=config.get("BLOGGING_SITEURL", "")+post["url"],
updated=post["last_modified_date"],
published=post["post_date"])
feed_posts_processed.send(blogging_engine.app, engine=blogging_engine,
feed=feed)
response = feed.get_response()
response.headers["Content-Type"] = "application/xml"
return response
def unless(blogging_engine):
# disable caching for bloggers. They can change state!
def _unless():
return _is_blogger(blogging_engine.blogger_permission)
return _unless
def cached_func(blogging_engine, func):
cache = blogging_engine.cache
if cache is None:
return func
else:
unless_func = unless(blogging_engine)
config = blogging_engine.config
cache_timeout = config.get("BLOGGING_CACHE_TIMEOUT", 60) # 60 seconds
memoized_func = cache.memoize(
timeout=cache_timeout, unless=unless_func)(func)
return memoized_func
def create_blueprint(import_name, blogging_engine):
blog_app = Blueprint("blogging", import_name, template_folder='templates')
# register index
index_func = cached_func(blogging_engine, index)
blog_app.add_url_rule("/", defaults={"count": None, "page": 1},
view_func=index_func)
blog_app.add_url_rule("/<int:count>/", defaults={"page": 1},
view_func=index_func)
blog_app.add_url_rule("/<int:count>/<int:page>/", view_func=index_func)
# register page_by_id
page_by_id_func = cached_func(blogging_engine, page_by_id)
blog_app.add_url_rule("/page/<post_id>/", defaults={"slug": ""},
view_func=page_by_id_func)
blog_app.add_url_rule("/page/<post_id>/<slug>/",
view_func=page_by_id_func)
# register posts_by_tag
posts_by_tag_func = cached_func(blogging_engine, posts_by_tag)
blog_app.add_url_rule("/tag/<tag>/", defaults=dict(count=None, page=1),
view_func=posts_by_tag_func)
blog_app.add_url_rule("/tag/<tag>/<int:count>/", defaults=dict(page=1),
view_func=posts_by_tag_func)
blog_app.add_url_rule("/tag/<tag>/<int:count>/<int:page>/",
view_func=posts_by_tag_func)
# register posts_by_author
posts_by_author_func = cached_func(blogging_engine, posts_by_author)
blog_app.add_url_rule("/author/<user_id>/",
defaults=dict(count=None, page=1),
view_func=posts_by_author_func)
blog_app.add_url_rule("/author/<user_id>/<int:count>/",
defaults=dict(page=1),
view_func=posts_by_author_func)
blog_app.add_url_rule("/author/<user_id>/<int:count>/<int:page>/",
view_func=posts_by_author_func)
# register editor
editor_func = editor # For now lets not cache this
blog_app.add_url_rule('/editor/', methods=["GET", "POST"],
defaults={"post_id": None},
view_func=editor_func)
blog_app.add_url_rule('/editor/<post_id>/', methods=["GET", "POST"],
view_func=editor_func)
# register delete
delete_func = delete # For now lets not cache this
blog_app.add_url_rule("/delete/<post_id>/", methods=["POST"],
view_func=delete_func)
# register sitemap
sitemap_func = cached_func(blogging_engine, sitemap)
blog_app.add_url_rule("/sitemap.xml", view_func=sitemap_func)
# register feed
feed_func = cached_func(blogging_engine, feed)
blog_app.add_url_rule('/feeds/all.atom.xml', view_func=feed_func)
return blog_app
|
pydata/pandas-gbq
|
pandas_gbq/auth.py
|
get_service_account_credentials
|
python
|
def get_service_account_credentials(private_key):
import google.auth.transport.requests
from google.oauth2.service_account import Credentials
is_path = os.path.isfile(private_key)
try:
if is_path:
with open(private_key) as f:
json_key = json.loads(f.read())
else:
# ugly hack: 'private_key' field has new lines inside,
# they break json parser, but we need to preserve them
json_key = json.loads(private_key.replace("\n", " "))
json_key["private_key"] = json_key["private_key"].replace(
" ", "\n"
)
json_key["private_key"] = bytes(json_key["private_key"], "UTF-8")
credentials = Credentials.from_service_account_info(json_key)
credentials = credentials.with_scopes(SCOPES)
# Refresh the token before trying to use it.
request = google.auth.transport.requests.Request()
credentials.refresh(request)
return credentials, json_key.get("project_id")
except (KeyError, ValueError, TypeError, AttributeError):
raise pandas_gbq.exceptions.InvalidPrivateKeyFormat(
"Detected private_key as {}. ".format(
"path" if is_path else "contents"
)
+ "Private key is missing or invalid. It should be service "
"account private key JSON (file path or string contents) "
'with at least two keys: "client_email" and "private_key". '
"Can be obtained from: https://console.developers.google."
"com/permissions/serviceaccounts"
)
|
DEPRECATED: Load service account credentials from key data or key path.
|
train
|
https://github.com/pydata/pandas-gbq/blob/e590317b3325939ede7563f49aa6b163bb803b77/pandas_gbq/auth.py#L53-L92
| null |
"""Private module for fetching Google BigQuery credentials."""
import json
import logging
import os
import os.path
import pandas_gbq.exceptions
logger = logging.getLogger(__name__)
CREDENTIALS_CACHE_DIRNAME = "pandas_gbq"
CREDENTIALS_CACHE_FILENAME = "bigquery_credentials.dat"
SCOPES = ["https://www.googleapis.com/auth/bigquery"]
# The following constants are used for end-user authentication.
# It identifies (via credentials from the pandas-gbq-auth GCP project) the
# application that is requesting permission to access the BigQuery API on
# behalf of a G Suite or Gmail user.
#
# In a web application, the client secret would be kept secret, but this is not
# possible for applications that are installed locally on an end-user's
# machine.
#
# See: https://cloud.google.com/docs/authentication/end-user for details.
CLIENT_ID = (
"725825577420-unm2gnkiprugilg743tkbig250f4sfsj.apps.googleusercontent.com"
)
CLIENT_SECRET = "4hqze9yI8fxShls8eJWkeMdJ"
def get_credentials(
private_key=None, project_id=None, reauth=False, auth_local_webserver=False
):
import pydata_google_auth
if private_key:
return get_service_account_credentials(private_key)
credentials, default_project_id = pydata_google_auth.default(
SCOPES,
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
credentials_cache=get_credentials_cache(reauth),
auth_local_webserver=auth_local_webserver,
)
project_id = project_id or default_project_id
return credentials, project_id
def get_credentials_cache(reauth,):
import pydata_google_auth.cache
if reauth:
return pydata_google_auth.cache.WriteOnlyCredentialsCache(
dirname=CREDENTIALS_CACHE_DIRNAME,
filename=CREDENTIALS_CACHE_FILENAME,
)
return pydata_google_auth.cache.ReadWriteCredentialsCache(
dirname=CREDENTIALS_CACHE_DIRNAME, filename=CREDENTIALS_CACHE_FILENAME
)
|
pydata/pandas-gbq
|
pandas_gbq/load.py
|
encode_chunk
|
python
|
def encode_chunk(dataframe):
csv_buffer = six.StringIO()
dataframe.to_csv(
csv_buffer,
index=False,
header=False,
encoding="utf-8",
float_format="%.15g",
date_format="%Y-%m-%d %H:%M:%S.%f",
)
# Convert to a BytesIO buffer so that unicode text is properly handled.
# See: https://github.com/pydata/pandas-gbq/issues/106
body = csv_buffer.getvalue()
if isinstance(body, bytes):
body = body.decode("utf-8")
body = body.encode("utf-8")
return six.BytesIO(body)
|
Return a file-like object of CSV-encoded rows.
Args:
dataframe (pandas.DataFrame): A chunk of a dataframe to encode
|
train
|
https://github.com/pydata/pandas-gbq/blob/e590317b3325939ede7563f49aa6b163bb803b77/pandas_gbq/load.py#L9-L31
| null |
"""Helper methods for loading data into BigQuery"""
import six
from google.cloud import bigquery
import pandas_gbq.schema
def encode_chunks(dataframe, chunksize=None):
dataframe = dataframe.reset_index(drop=True)
if chunksize is None:
yield 0, encode_chunk(dataframe)
return
remaining_rows = len(dataframe)
total_rows = remaining_rows
start_index = 0
while start_index < total_rows:
end_index = start_index + chunksize
chunk_buffer = encode_chunk(dataframe[start_index:end_index])
start_index += chunksize
remaining_rows = max(0, remaining_rows - chunksize)
yield remaining_rows, chunk_buffer
def load_chunks(
client,
dataframe,
dataset_id,
table_id,
chunksize=None,
schema=None,
location=None,
):
destination_table = client.dataset(dataset_id).table(table_id)
job_config = bigquery.LoadJobConfig()
job_config.write_disposition = "WRITE_APPEND"
job_config.source_format = "CSV"
job_config.allow_quoted_newlines = True
if schema is None:
schema = pandas_gbq.schema.generate_bq_schema(dataframe)
# Manually create the schema objects, adding NULLABLE mode
# as a workaround for
# https://github.com/GoogleCloudPlatform/google-cloud-python/issues/4456
for field in schema["fields"]:
if "mode" not in field:
field["mode"] = "NULLABLE"
job_config.schema = [
bigquery.SchemaField.from_api_repr(field) for field in schema["fields"]
]
chunks = encode_chunks(dataframe, chunksize=chunksize)
for remaining_rows, chunk_buffer in chunks:
try:
yield remaining_rows
client.load_table_from_file(
chunk_buffer,
destination_table,
job_config=job_config,
location=location,
).result()
finally:
chunk_buffer.close()
|
pydata/pandas-gbq
|
pandas_gbq/gbq.py
|
_bqschema_to_nullsafe_dtypes
|
python
|
def _bqschema_to_nullsafe_dtypes(schema_fields):
# If you update this mapping, also update the table at
# `docs/source/reading.rst`.
dtype_map = {
"FLOAT": np.dtype(float),
# pandas doesn't support timezone-aware dtype in DataFrame/Series
# constructors. It's more idiomatic to localize after construction.
# https://github.com/pandas-dev/pandas/issues/25843
"TIMESTAMP": "datetime64[ns]",
"TIME": "datetime64[ns]",
"DATE": "datetime64[ns]",
"DATETIME": "datetime64[ns]",
}
dtypes = {}
for field in schema_fields:
name = str(field["name"])
if field["mode"].upper() == "REPEATED":
continue
dtype = dtype_map.get(field["type"].upper())
if dtype:
dtypes[name] = dtype
return dtypes
|
Specify explicit dtypes based on BigQuery schema.
This function only specifies a dtype when the dtype allows nulls.
Otherwise, use pandas's default dtype choice.
See: http://pandas.pydata.org/pandas-docs/dev/missing_data.html
#missing-data-casting-rules-and-indexing
|
train
|
https://github.com/pydata/pandas-gbq/blob/e590317b3325939ede7563f49aa6b163bb803b77/pandas_gbq/gbq.py#L662-L694
| null |
import logging
import time
import warnings
from datetime import datetime
import numpy as np
try:
# The BigQuery Storage API client is an optional dependency. It is only
# required when use_bqstorage_api=True.
from google.cloud import bigquery_storage_v1beta1
except ImportError: # pragma: NO COVER
bigquery_storage_v1beta1 = None
from pandas_gbq.exceptions import AccessDenied
logger = logging.getLogger(__name__)
BIGQUERY_INSTALLED_VERSION = None
SHOW_VERBOSE_DEPRECATION = False
SHOW_PRIVATE_KEY_DEPRECATION = False
PRIVATE_KEY_DEPRECATION_MESSAGE = (
"private_key is deprecated and will be removed in a future version."
"Use the credentials argument instead. See "
"https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html "
"for examples on using the credentials argument with service account keys."
)
try:
import tqdm # noqa
except ImportError:
tqdm = None
def _check_google_client_version():
global BIGQUERY_INSTALLED_VERSION, SHOW_VERBOSE_DEPRECATION, SHOW_PRIVATE_KEY_DEPRECATION
try:
import pkg_resources
except ImportError:
raise ImportError("Could not import pkg_resources (setuptools).")
# https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/bigquery/CHANGELOG.md
bigquery_minimum_version = pkg_resources.parse_version("1.9.0")
BIGQUERY_INSTALLED_VERSION = pkg_resources.get_distribution(
"google-cloud-bigquery"
).parsed_version
if BIGQUERY_INSTALLED_VERSION < bigquery_minimum_version:
raise ImportError(
"pandas-gbq requires google-cloud-bigquery >= {0}, "
"current version {1}".format(
bigquery_minimum_version, BIGQUERY_INSTALLED_VERSION
)
)
# Add check for Pandas version before showing deprecation warning.
# https://github.com/pydata/pandas-gbq/issues/157
pandas_installed_version = pkg_resources.get_distribution(
"pandas"
).parsed_version
pandas_version_wo_verbosity = pkg_resources.parse_version("0.23.0")
SHOW_VERBOSE_DEPRECATION = (
pandas_installed_version >= pandas_version_wo_verbosity
)
pandas_version_with_credentials_arg = pkg_resources.parse_version("0.24.0")
SHOW_PRIVATE_KEY_DEPRECATION = (
pandas_installed_version >= pandas_version_with_credentials_arg
)
def _test_google_api_imports():
try:
import pydata_google_auth # noqa
except ImportError as ex:
raise ImportError(
"pandas-gbq requires pydata-google-auth: {0}".format(ex)
)
try:
from google_auth_oauthlib.flow import InstalledAppFlow # noqa
except ImportError as ex:
raise ImportError(
"pandas-gbq requires google-auth-oauthlib: {0}".format(ex)
)
try:
import google.auth # noqa
except ImportError as ex:
raise ImportError("pandas-gbq requires google-auth: {0}".format(ex))
try:
from google.cloud import bigquery # noqa
except ImportError as ex:
raise ImportError(
"pandas-gbq requires google-cloud-bigquery: {0}".format(ex)
)
_check_google_client_version()
class DatasetCreationError(ValueError):
"""
Raised when the create dataset method fails
"""
pass
class GenericGBQException(ValueError):
"""
Raised when an unrecognized Google API Error occurs.
"""
pass
class InvalidColumnOrder(ValueError):
"""
Raised when the provided column order for output
results DataFrame does not match the schema
returned by BigQuery.
"""
pass
class InvalidIndexColumn(ValueError):
"""
Raised when the provided index column for output
results DataFrame does not match the schema
returned by BigQuery.
"""
pass
class InvalidPageToken(ValueError):
"""
Raised when Google BigQuery fails to return,
or returns a duplicate page token.
"""
pass
class InvalidSchema(ValueError):
"""
Raised when the provided DataFrame does
not match the schema of the destination
table in BigQuery.
"""
pass
class NotFoundException(ValueError):
"""
Raised when the project_id, table or dataset provided in the query could
not be found.
"""
pass
class QueryTimeout(ValueError):
"""
Raised when the query request exceeds the timeoutMs value specified in the
BigQuery configuration.
"""
pass
class TableCreationError(ValueError):
"""
Raised when the create table method fails
"""
pass
class Context(object):
"""Storage for objects to be used throughout a session.
A Context object is initialized when the ``pandas_gbq`` module is
imported, and can be found at :attr:`pandas_gbq.context`.
"""
def __init__(self):
self._credentials = None
self._project = None
# dialect defaults to None so that read_gbq can stop warning if set.
self._dialect = None
@property
def credentials(self):
"""
Credentials to use for Google APIs.
These credentials are automatically cached in memory by calls to
:func:`pandas_gbq.read_gbq` and :func:`pandas_gbq.to_gbq`. To
manually set the credentials, construct an
:class:`google.auth.credentials.Credentials` object and set it as
the context credentials as demonstrated in the example below. See
`auth docs`_ for more information on obtaining credentials.
.. _auth docs: http://google-auth.readthedocs.io
/en/latest/user-guide.html#obtaining-credentials
Returns
-------
google.auth.credentials.Credentials
Examples
--------
Manually setting the context credentials:
>>> import pandas_gbq
>>> from google.oauth2 import service_account
>>> credentials = service_account.Credentials.from_service_account_file(
... '/path/to/key.json',
... )
>>> pandas_gbq.context.credentials = credentials
"""
return self._credentials
@credentials.setter
def credentials(self, value):
self._credentials = value
@property
def project(self):
"""Default project to use for calls to Google APIs.
Returns
-------
str
Examples
--------
Manually setting the context project:
>>> import pandas_gbq
>>> pandas_gbq.context.project = 'my-project'
"""
return self._project
@project.setter
def project(self, value):
self._project = value
@property
def dialect(self):
"""
Default dialect to use in :func:`pandas_gbq.read_gbq`.
Allowed values for the BigQuery SQL syntax dialect:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
Returns
-------
str
Examples
--------
Setting the default syntax to standard:
>>> import pandas_gbq
>>> pandas_gbq.context.dialect = 'standard'
"""
return self._dialect
@dialect.setter
def dialect(self, value):
self._dialect = value
# Create an empty context, used to cache credentials.
context = Context()
"""A :class:`pandas_gbq.Context` object used to cache credentials.
Credentials automatically are cached in-memory by :func:`pandas_gbq.read_gbq`
and :func:`pandas_gbq.to_gbq`.
"""
class GbqConnector(object):
def __init__(
self,
project_id,
reauth=False,
private_key=None,
auth_local_webserver=False,
dialect="standard",
location=None,
credentials=None,
use_bqstorage_api=False,
):
global context
from google.api_core.exceptions import GoogleAPIError
from google.api_core.exceptions import ClientError
from pandas_gbq import auth
self.http_error = (ClientError, GoogleAPIError)
self.project_id = project_id
self.location = location
self.reauth = reauth
self.private_key = private_key
self.auth_local_webserver = auth_local_webserver
self.dialect = dialect
self.credentials = credentials
default_project = None
# Service account credentials have a project associated with them.
# Prefer that project if none was supplied.
if self.project_id is None and hasattr(self.credentials, "project_id"):
self.project_id = credentials.project_id
# Load credentials from cache.
if not self.credentials:
self.credentials = context.credentials
default_project = context.project
# Credentials were explicitly asked for, so don't use the cache.
if private_key or reauth or not self.credentials:
self.credentials, default_project = auth.get_credentials(
private_key=private_key,
project_id=project_id,
reauth=reauth,
auth_local_webserver=auth_local_webserver,
)
if self.project_id is None:
self.project_id = default_project
if self.project_id is None:
raise ValueError(
"Could not determine project ID and one was not supplied."
)
# Cache the credentials if they haven't been set yet.
if context.credentials is None:
context.credentials = self.credentials
if context.project is None:
context.project = self.project_id
self.client = self.get_client()
self.bqstorage_client = _make_bqstorage_client(
use_bqstorage_api, self.credentials
)
# BQ Queries costs $5 per TB. First 1 TB per month is free
# see here for more: https://cloud.google.com/bigquery/pricing
self.query_price_for_TB = 5.0 / 2 ** 40 # USD/TB
def _start_timer(self):
self.start = time.time()
def get_elapsed_seconds(self):
return round(time.time() - self.start, 2)
def log_elapsed_seconds(self, prefix="Elapsed", postfix="s.", overlong=6):
sec = self.get_elapsed_seconds()
if sec > overlong:
logger.info("{} {} {}".format(prefix, sec, postfix))
# http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
@staticmethod
def sizeof_fmt(num, suffix="B"):
fmt = "%3.1f %s%s"
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if abs(num) < 1024.0:
return fmt % (num, unit, suffix)
num /= 1024.0
return fmt % (num, "Y", suffix)
def get_client(self):
from google.cloud import bigquery
return bigquery.Client(
project=self.project_id, credentials=self.credentials
)
@staticmethod
def process_http_error(ex):
# See `BigQuery Troubleshooting Errors
# <https://cloud.google.com/bigquery/troubleshooting-errors>`__
raise GenericGBQException("Reason: {0}".format(ex))
def run_query(self, query, **kwargs):
from concurrent.futures import TimeoutError
from google.auth.exceptions import RefreshError
from google.cloud import bigquery
job_config = {
"query": {
"useLegacySql": self.dialect
== "legacy"
# 'allowLargeResults', 'createDisposition',
# 'preserveNulls', destinationTable, useQueryCache
}
}
config = kwargs.get("configuration")
if config is not None:
job_config.update(config)
if "query" in config and "query" in config["query"]:
if query is not None:
raise ValueError(
"Query statement can't be specified "
"inside config while it is specified "
"as parameter"
)
query = config["query"].pop("query")
self._start_timer()
try:
logger.debug("Requesting query... ")
query_reply = self.client.query(
query,
job_config=bigquery.QueryJobConfig.from_api_repr(job_config),
location=self.location,
project=self.project_id,
)
logger.debug("Query running...")
except (RefreshError, ValueError):
if self.private_key:
raise AccessDenied(
"The service account credentials are not valid"
)
else:
raise AccessDenied(
"The credentials have been revoked or expired, "
"please re-run the application to re-authorize"
)
except self.http_error as ex:
self.process_http_error(ex)
job_id = query_reply.job_id
logger.debug("Job ID: %s" % job_id)
while query_reply.state != "DONE":
self.log_elapsed_seconds(" Elapsed", "s. Waiting...")
timeout_ms = job_config["query"].get("timeoutMs")
if timeout_ms and timeout_ms < self.get_elapsed_seconds() * 1000:
raise QueryTimeout("Query timeout: {} ms".format(timeout_ms))
timeout_sec = 1.0
if timeout_ms:
# Wait at most 1 second so we can show progress bar
timeout_sec = min(1.0, timeout_ms / 1000.0)
try:
query_reply.result(timeout=timeout_sec)
except TimeoutError:
# Use our own timeout logic
pass
except self.http_error as ex:
self.process_http_error(ex)
if query_reply.cache_hit:
logger.debug("Query done.\nCache hit.\n")
else:
bytes_processed = query_reply.total_bytes_processed or 0
bytes_billed = query_reply.total_bytes_billed or 0
logger.debug(
"Query done.\nProcessed: {} Billed: {}".format(
self.sizeof_fmt(bytes_processed),
self.sizeof_fmt(bytes_billed),
)
)
logger.debug(
"Standard price: ${:,.2f} USD\n".format(
bytes_billed * self.query_price_for_TB
)
)
try:
rows_iter = query_reply.result()
except self.http_error as ex:
self.process_http_error(ex)
schema_fields = [field.to_api_repr() for field in rows_iter.schema]
nullsafe_dtypes = _bqschema_to_nullsafe_dtypes(schema_fields)
df = rows_iter.to_dataframe(
dtypes=nullsafe_dtypes, bqstorage_client=self.bqstorage_client
)
if df.empty:
df = _cast_empty_df_dtypes(schema_fields, df)
# Ensure any TIMESTAMP columns are tz-aware.
df = _localize_df(schema_fields, df)
logger.debug("Got {} rows.\n".format(rows_iter.total_rows))
return df
def load_data(
self,
dataframe,
dataset_id,
table_id,
chunksize=None,
schema=None,
progress_bar=True,
):
from pandas_gbq import load
total_rows = len(dataframe)
try:
chunks = load.load_chunks(
self.client,
dataframe,
dataset_id,
table_id,
chunksize=chunksize,
schema=schema,
location=self.location,
)
if progress_bar and tqdm:
chunks = tqdm.tqdm(chunks)
for remaining_rows in chunks:
logger.info(
"\r{} out of {} rows loaded.".format(
total_rows - remaining_rows, total_rows
)
)
except self.http_error as ex:
self.process_http_error(ex)
def schema(self, dataset_id, table_id):
"""Retrieve the schema of the table
Obtain from BigQuery the field names and field types
for the table defined by the parameters
Parameters
----------
dataset_id : str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
Returns
-------
list of dicts
Fields representing the schema
"""
table_ref = self.client.dataset(dataset_id).table(table_id)
try:
table = self.client.get_table(table_ref)
remote_schema = table.schema
remote_fields = [
field_remote.to_api_repr() for field_remote in remote_schema
]
for field in remote_fields:
field["type"] = field["type"].upper()
field["mode"] = field["mode"].upper()
return remote_fields
except self.http_error as ex:
self.process_http_error(ex)
def _clean_schema_fields(self, fields):
"""Return a sanitized version of the schema for comparisons."""
fields_sorted = sorted(fields, key=lambda field: field["name"])
# Ignore mode and description when comparing schemas.
return [
{"name": field["name"], "type": field["type"]}
for field in fields_sorted
]
def verify_schema(self, dataset_id, table_id, schema):
"""Indicate whether schemas match exactly
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether all fields in the former
are present in the latter. Order is not considered.
Parameters
----------
dataset_id :str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
schema : list(dict)
Schema for comparison. Each item should have
a 'name' and a 'type'
Returns
-------
bool
Whether the schemas match
"""
fields_remote = self._clean_schema_fields(
self.schema(dataset_id, table_id)
)
fields_local = self._clean_schema_fields(schema["fields"])
return fields_remote == fields_local
def schema_is_subset(self, dataset_id, table_id, schema):
"""Indicate whether the schema to be uploaded is a subset
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether a subset of the fields in
the former are present in the latter. Order is not considered.
Parameters
----------
dataset_id : str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
schema : list(dict)
Schema for comparison. Each item should have
a 'name' and a 'type'
Returns
-------
bool
Whether the passed schema is a subset
"""
fields_remote = self._clean_schema_fields(
self.schema(dataset_id, table_id)
)
fields_local = self._clean_schema_fields(schema["fields"])
return all(field in fields_remote for field in fields_local)
def delete_and_recreate_table(self, dataset_id, table_id, table_schema):
table = _Table(
self.project_id, dataset_id, credentials=self.credentials
)
table.delete(table_id)
table.create(table_id, table_schema)
def _cast_empty_df_dtypes(schema_fields, df):
"""Cast any columns in an empty dataframe to correct type.
In an empty dataframe, pandas cannot choose a dtype unless one is
explicitly provided. The _bqschema_to_nullsafe_dtypes() function only
provides dtypes when the dtype safely handles null values. This means
that empty int64 and boolean columns are incorrectly classified as
``object``.
"""
if not df.empty:
raise ValueError(
"DataFrame must be empty in order to cast non-nullsafe dtypes"
)
dtype_map = {"BOOLEAN": bool, "INTEGER": np.int64}
for field in schema_fields:
column = str(field["name"])
if field["mode"].upper() == "REPEATED":
continue
dtype = dtype_map.get(field["type"].upper())
if dtype:
df[column] = df[column].astype(dtype)
return df
def _localize_df(schema_fields, df):
"""Localize any TIMESTAMP columns to tz-aware type.
In pandas versions before 0.24.0, DatetimeTZDtype cannot be used as the
dtype in Series/DataFrame construction, so localize those columns after
the DataFrame is constructed.
"""
for field in schema_fields:
column = str(field["name"])
if field["mode"].upper() == "REPEATED":
continue
if field["type"].upper() == "TIMESTAMP" and df[column].dt.tz is None:
df[column] = df[column].dt.tz_localize("UTC")
return df
def _make_bqstorage_client(use_bqstorage_api, credentials):
if not use_bqstorage_api:
return None
if bigquery_storage_v1beta1 is None:
raise ImportError(
"Install the google-cloud-bigquery-storage and fastavro packages "
"to use the BigQuery Storage API."
)
return bigquery_storage_v1beta1.BigQueryStorageClient(
credentials=credentials
)
def read_gbq(
query,
project_id=None,
index_col=None,
col_order=None,
reauth=False,
auth_local_webserver=False,
dialect=None,
location=None,
configuration=None,
credentials=None,
use_bqstorage_api=False,
verbose=None,
private_key=None,
):
r"""Load data from Google BigQuery using google-cloud-python
The main method a user calls to execute a Query in Google BigQuery
and read results into a pandas DataFrame.
This method uses the Google Cloud client library to make requests to
Google BigQuery, documented `here
<https://google-cloud-python.readthedocs.io/en/latest/bigquery/usage.html>`__.
See the :ref:`How to authenticate with Google BigQuery <authentication>`
guide for authentication instructions.
Parameters
----------
query : str
SQL-Like Query to return data values.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
index_col : str, optional
Name of result column to use for index in results DataFrame.
col_order : list(str), optional
List of BigQuery column names in the desired order for results
DataFrame.
reauth : boolean, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
auth_local_webserver : boolean, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
.. versionadded:: 0.2.0
dialect : str, default 'standard'
Note: The default value changed to 'standard' in version 0.10.0.
SQL syntax dialect to use. Value can be one of:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
location : str, optional
Location where the query job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of any
datasets used in the query.
.. versionadded:: 0.5.0
configuration : dict, optional
Query config parameters for job processing.
For example:
configuration = {'query': {'useQueryCache': False}}
For more information see `BigQuery REST API Reference
<https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service Account
:class:`google.oauth2.service_account.Credentials` directly.
.. versionadded:: 0.8.0
use_bqstorage_api : bool, default False
Use the `BigQuery Storage API
<https://cloud.google.com/bigquery/docs/reference/storage/>`__ to
download query results quickly, but at an increased cost. To use this
API, first `enable it in the Cloud Console
<https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__.
You must also have the `bigquery.readsessions.create
<https://cloud.google.com/bigquery/docs/access-control#roles>`__
permission on the project you are billing queries to.
**Note:** Due to a `known issue in the ``google-cloud-bigquery``
package
<https://github.com/googleapis/google-cloud-python/pull/7633>`__
(fixed in version 1.11.0), you must write your query results to a
destination table. To do this with ``read_gbq``, supply a
``configuration`` dictionary.
This feature requires the ``google-cloud-bigquery-storage`` and
``fastavro`` packages.
.. versionadded:: 0.10.0
verbose : None, deprecated
Deprecated in Pandas-GBQ 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
Returns
-------
df: DataFrame
DataFrame representing results of query.
"""
global context
if dialect is None:
dialect = context.dialect
if dialect is None:
dialect = "standard"
_test_google_api_imports()
if verbose is not None and SHOW_VERBOSE_DEPRECATION:
warnings.warn(
"verbose is deprecated and will be removed in "
"a future version. Set logging level in order to vary "
"verbosity",
FutureWarning,
stacklevel=2,
)
if private_key is not None and SHOW_PRIVATE_KEY_DEPRECATION:
warnings.warn(
PRIVATE_KEY_DEPRECATION_MESSAGE, FutureWarning, stacklevel=2
)
if dialect not in ("legacy", "standard"):
raise ValueError("'{0}' is not valid for dialect".format(dialect))
connector = GbqConnector(
project_id,
reauth=reauth,
dialect=dialect,
auth_local_webserver=auth_local_webserver,
location=location,
credentials=credentials,
private_key=private_key,
use_bqstorage_api=use_bqstorage_api,
)
final_df = connector.run_query(query, configuration=configuration)
# Reindex the DataFrame on the provided column
if index_col is not None:
if index_col in final_df.columns:
final_df.set_index(index_col, inplace=True)
else:
raise InvalidIndexColumn(
'Index column "{0}" does not exist in DataFrame.'.format(
index_col
)
)
# Change the order of columns in the DataFrame based on provided list
if col_order is not None:
if sorted(col_order) == sorted(final_df.columns):
final_df = final_df[col_order]
else:
raise InvalidColumnOrder(
"Column order does not match this DataFrame."
)
connector.log_elapsed_seconds(
"Total time taken",
datetime.now().strftime("s.\nFinished at %Y-%m-%d %H:%M:%S."),
)
return final_df
def to_gbq(
dataframe,
destination_table,
project_id=None,
chunksize=None,
reauth=False,
if_exists="fail",
auth_local_webserver=False,
table_schema=None,
location=None,
progress_bar=True,
credentials=None,
verbose=None,
private_key=None,
):
"""Write a DataFrame to a Google BigQuery table.
The main method a user calls to export pandas DataFrame contents to
Google BigQuery table.
This method uses the Google Cloud client library to make requests to
Google BigQuery, documented `here
<https://google-cloud-python.readthedocs.io/en/latest/bigquery/usage.html>`__.
See the :ref:`How to authenticate with Google BigQuery <authentication>`
guide for authentication instructions.
Parameters
----------
dataframe : pandas.DataFrame
DataFrame to be written to a Google BigQuery table.
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists, do nothing.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
.. versionadded:: 0.2.0
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``.
- If ``table_schema`` is provided, it may contain all or a subset of
DataFrame columns. If a subset is provided, the rest will be
inferred from the DataFrame dtypes.
- If ``table_schema`` is **not** provided, it will be
generated according to dtypes of DataFrame columns. See
`Inferring the Table Schema
<https://pandas-gbq.readthedocs.io/en/latest/writing.html#writing-schema>`__.
for a description of the schema inference.
See `BigQuery API documentation on valid column names
<https://cloud.google.com/bigquery/docs/schemas#column_names`>__.
.. versionadded:: 0.3.1
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
.. versionadded:: 0.5.0
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
.. versionadded:: 0.5.0
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service Account
:class:`google.oauth2.service_account.Credentials` directly.
.. versionadded:: 0.8.0
verbose : bool, deprecated
Deprecated in Pandas-GBQ 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
"""
_test_google_api_imports()
from pandas_gbq import schema
if verbose is not None and SHOW_VERBOSE_DEPRECATION:
warnings.warn(
"verbose is deprecated and will be removed in "
"a future version. Set logging level in order to vary "
"verbosity",
FutureWarning,
stacklevel=1,
)
if private_key is not None and SHOW_PRIVATE_KEY_DEPRECATION:
warnings.warn(
PRIVATE_KEY_DEPRECATION_MESSAGE, FutureWarning, stacklevel=2
)
if if_exists not in ("fail", "replace", "append"):
raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
if "." not in destination_table:
raise NotFoundException(
"Invalid Table Name. Should be of the form 'datasetId.tableId' "
)
connector = GbqConnector(
project_id,
reauth=reauth,
auth_local_webserver=auth_local_webserver,
location=location,
credentials=credentials,
private_key=private_key,
)
dataset_id, table_id = destination_table.rsplit(".", 1)
table = _Table(
project_id,
dataset_id,
location=location,
credentials=connector.credentials,
)
default_schema = _generate_bq_schema(dataframe)
if not table_schema:
table_schema = default_schema
else:
table_schema = schema.update_schema(
default_schema, dict(fields=table_schema)
)
# If table exists, check if_exists parameter
if table.exists(table_id):
if if_exists == "fail":
raise TableCreationError(
"Could not create the table because it "
"already exists. "
"Change the if_exists parameter to "
"'append' or 'replace' data."
)
elif if_exists == "replace":
connector.delete_and_recreate_table(
dataset_id, table_id, table_schema
)
elif if_exists == "append":
if not connector.schema_is_subset(
dataset_id, table_id, table_schema
):
raise InvalidSchema(
"Please verify that the structure and "
"data types in the DataFrame match the "
"schema of the destination table."
)
else:
table.create(table_id, table_schema)
if dataframe.empty:
# Create the table (if needed), but don't try to run a load job with an
# empty file. See: https://github.com/pydata/pandas-gbq/issues/237
return
connector.load_data(
dataframe,
dataset_id,
table_id,
chunksize=chunksize,
schema=table_schema,
progress_bar=progress_bar,
)
def generate_bq_schema(df, default_type="STRING"):
"""DEPRECATED: Given a passed df, generate the associated Google BigQuery
schema.
Parameters
----------
df : DataFrame
default_type : string
The default big query type in case the type of the column
does not exist in the schema.
"""
# deprecation TimeSeries, #11121
warnings.warn(
"generate_bq_schema is deprecated and will be removed in "
"a future version",
FutureWarning,
stacklevel=2,
)
return _generate_bq_schema(df, default_type=default_type)
def _generate_bq_schema(df, default_type="STRING"):
"""DEPRECATED: Given a dataframe, generate a Google BigQuery schema.
This is a private method, but was used in external code to work around
issues in the default schema generation. Now that individual columns can
be overridden: https://github.com/pydata/pandas-gbq/issues/218, this
method can be removed after there is time to migrate away from this
method. """
from pandas_gbq import schema
return schema.generate_bq_schema(df, default_type=default_type)
class _Table(GbqConnector):
def __init__(
self,
project_id,
dataset_id,
reauth=False,
location=None,
credentials=None,
private_key=None,
):
self.dataset_id = dataset_id
super(_Table, self).__init__(
project_id,
reauth,
location=location,
credentials=credentials,
private_key=private_key,
)
def exists(self, table_id):
""" Check if a table exists in Google BigQuery
Parameters
----------
table : str
Name of table to be verified
Returns
-------
boolean
true if table exists, otherwise false
"""
from google.api_core.exceptions import NotFound
table_ref = self.client.dataset(self.dataset_id).table(table_id)
try:
self.client.get_table(table_ref)
return True
except NotFound:
return False
except self.http_error as ex:
self.process_http_error(ex)
def create(self, table_id, schema):
""" Create a table in Google BigQuery given a table and schema
Parameters
----------
table : str
Name of table to be written
schema : str
Use the generate_bq_schema to generate your table schema from a
dataframe.
"""
from google.cloud.bigquery import SchemaField
from google.cloud.bigquery import Table
if self.exists(table_id):
raise TableCreationError(
"Table {0} already " "exists".format(table_id)
)
if not _Dataset(self.project_id, credentials=self.credentials).exists(
self.dataset_id
):
_Dataset(
self.project_id,
credentials=self.credentials,
location=self.location,
).create(self.dataset_id)
table_ref = self.client.dataset(self.dataset_id).table(table_id)
table = Table(table_ref)
# Manually create the schema objects, adding NULLABLE mode
# as a workaround for
# https://github.com/GoogleCloudPlatform/google-cloud-python/issues/4456
for field in schema["fields"]:
if "mode" not in field:
field["mode"] = "NULLABLE"
table.schema = [
SchemaField.from_api_repr(field) for field in schema["fields"]
]
try:
self.client.create_table(table)
except self.http_error as ex:
self.process_http_error(ex)
def delete(self, table_id):
""" Delete a table in Google BigQuery
Parameters
----------
table : str
Name of table to be deleted
"""
from google.api_core.exceptions import NotFound
if not self.exists(table_id):
raise NotFoundException("Table does not exist")
table_ref = self.client.dataset(self.dataset_id).table(table_id)
try:
self.client.delete_table(table_ref)
except NotFound:
# Ignore 404 error which may occur if table already deleted
pass
except self.http_error as ex:
self.process_http_error(ex)
class _Dataset(GbqConnector):
def __init__(
self,
project_id,
reauth=False,
location=None,
credentials=None,
private_key=None,
):
super(_Dataset, self).__init__(
project_id,
reauth,
credentials=credentials,
location=location,
private_key=private_key,
)
def exists(self, dataset_id):
""" Check if a dataset exists in Google BigQuery
Parameters
----------
dataset_id : str
Name of dataset to be verified
Returns
-------
boolean
true if dataset exists, otherwise false
"""
from google.api_core.exceptions import NotFound
try:
self.client.get_dataset(self.client.dataset(dataset_id))
return True
except NotFound:
return False
except self.http_error as ex:
self.process_http_error(ex)
def create(self, dataset_id):
""" Create a dataset in Google BigQuery
Parameters
----------
dataset : str
Name of dataset to be written
"""
from google.cloud.bigquery import Dataset
if self.exists(dataset_id):
raise DatasetCreationError(
"Dataset {0} already " "exists".format(dataset_id)
)
dataset = Dataset(self.client.dataset(dataset_id))
if self.location is not None:
dataset.location = self.location
try:
self.client.create_dataset(dataset)
except self.http_error as ex:
self.process_http_error(ex)
|
pydata/pandas-gbq
|
pandas_gbq/gbq.py
|
_cast_empty_df_dtypes
|
python
|
def _cast_empty_df_dtypes(schema_fields, df):
if not df.empty:
raise ValueError(
"DataFrame must be empty in order to cast non-nullsafe dtypes"
)
dtype_map = {"BOOLEAN": bool, "INTEGER": np.int64}
for field in schema_fields:
column = str(field["name"])
if field["mode"].upper() == "REPEATED":
continue
dtype = dtype_map.get(field["type"].upper())
if dtype:
df[column] = df[column].astype(dtype)
return df
|
Cast any columns in an empty dataframe to correct type.
In an empty dataframe, pandas cannot choose a dtype unless one is
explicitly provided. The _bqschema_to_nullsafe_dtypes() function only
provides dtypes when the dtype safely handles null values. This means
that empty int64 and boolean columns are incorrectly classified as
``object``.
|
train
|
https://github.com/pydata/pandas-gbq/blob/e590317b3325939ede7563f49aa6b163bb803b77/pandas_gbq/gbq.py#L697-L722
| null |
import logging
import time
import warnings
from datetime import datetime
import numpy as np
try:
# The BigQuery Storage API client is an optional dependency. It is only
# required when use_bqstorage_api=True.
from google.cloud import bigquery_storage_v1beta1
except ImportError: # pragma: NO COVER
bigquery_storage_v1beta1 = None
from pandas_gbq.exceptions import AccessDenied
logger = logging.getLogger(__name__)
BIGQUERY_INSTALLED_VERSION = None
SHOW_VERBOSE_DEPRECATION = False
SHOW_PRIVATE_KEY_DEPRECATION = False
PRIVATE_KEY_DEPRECATION_MESSAGE = (
"private_key is deprecated and will be removed in a future version."
"Use the credentials argument instead. See "
"https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html "
"for examples on using the credentials argument with service account keys."
)
try:
import tqdm # noqa
except ImportError:
tqdm = None
def _check_google_client_version():
global BIGQUERY_INSTALLED_VERSION, SHOW_VERBOSE_DEPRECATION, SHOW_PRIVATE_KEY_DEPRECATION
try:
import pkg_resources
except ImportError:
raise ImportError("Could not import pkg_resources (setuptools).")
# https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/bigquery/CHANGELOG.md
bigquery_minimum_version = pkg_resources.parse_version("1.9.0")
BIGQUERY_INSTALLED_VERSION = pkg_resources.get_distribution(
"google-cloud-bigquery"
).parsed_version
if BIGQUERY_INSTALLED_VERSION < bigquery_minimum_version:
raise ImportError(
"pandas-gbq requires google-cloud-bigquery >= {0}, "
"current version {1}".format(
bigquery_minimum_version, BIGQUERY_INSTALLED_VERSION
)
)
# Add check for Pandas version before showing deprecation warning.
# https://github.com/pydata/pandas-gbq/issues/157
pandas_installed_version = pkg_resources.get_distribution(
"pandas"
).parsed_version
pandas_version_wo_verbosity = pkg_resources.parse_version("0.23.0")
SHOW_VERBOSE_DEPRECATION = (
pandas_installed_version >= pandas_version_wo_verbosity
)
pandas_version_with_credentials_arg = pkg_resources.parse_version("0.24.0")
SHOW_PRIVATE_KEY_DEPRECATION = (
pandas_installed_version >= pandas_version_with_credentials_arg
)
def _test_google_api_imports():
try:
import pydata_google_auth # noqa
except ImportError as ex:
raise ImportError(
"pandas-gbq requires pydata-google-auth: {0}".format(ex)
)
try:
from google_auth_oauthlib.flow import InstalledAppFlow # noqa
except ImportError as ex:
raise ImportError(
"pandas-gbq requires google-auth-oauthlib: {0}".format(ex)
)
try:
import google.auth # noqa
except ImportError as ex:
raise ImportError("pandas-gbq requires google-auth: {0}".format(ex))
try:
from google.cloud import bigquery # noqa
except ImportError as ex:
raise ImportError(
"pandas-gbq requires google-cloud-bigquery: {0}".format(ex)
)
_check_google_client_version()
class DatasetCreationError(ValueError):
"""
Raised when the create dataset method fails
"""
pass
class GenericGBQException(ValueError):
"""
Raised when an unrecognized Google API Error occurs.
"""
pass
class InvalidColumnOrder(ValueError):
"""
Raised when the provided column order for output
results DataFrame does not match the schema
returned by BigQuery.
"""
pass
class InvalidIndexColumn(ValueError):
"""
Raised when the provided index column for output
results DataFrame does not match the schema
returned by BigQuery.
"""
pass
class InvalidPageToken(ValueError):
"""
Raised when Google BigQuery fails to return,
or returns a duplicate page token.
"""
pass
class InvalidSchema(ValueError):
"""
Raised when the provided DataFrame does
not match the schema of the destination
table in BigQuery.
"""
pass
class NotFoundException(ValueError):
"""
Raised when the project_id, table or dataset provided in the query could
not be found.
"""
pass
class QueryTimeout(ValueError):
"""
Raised when the query request exceeds the timeoutMs value specified in the
BigQuery configuration.
"""
pass
class TableCreationError(ValueError):
"""
Raised when the create table method fails
"""
pass
class Context(object):
"""Storage for objects to be used throughout a session.
A Context object is initialized when the ``pandas_gbq`` module is
imported, and can be found at :attr:`pandas_gbq.context`.
"""
def __init__(self):
self._credentials = None
self._project = None
# dialect defaults to None so that read_gbq can stop warning if set.
self._dialect = None
@property
def credentials(self):
"""
Credentials to use for Google APIs.
These credentials are automatically cached in memory by calls to
:func:`pandas_gbq.read_gbq` and :func:`pandas_gbq.to_gbq`. To
manually set the credentials, construct an
:class:`google.auth.credentials.Credentials` object and set it as
the context credentials as demonstrated in the example below. See
`auth docs`_ for more information on obtaining credentials.
.. _auth docs: http://google-auth.readthedocs.io
/en/latest/user-guide.html#obtaining-credentials
Returns
-------
google.auth.credentials.Credentials
Examples
--------
Manually setting the context credentials:
>>> import pandas_gbq
>>> from google.oauth2 import service_account
>>> credentials = service_account.Credentials.from_service_account_file(
... '/path/to/key.json',
... )
>>> pandas_gbq.context.credentials = credentials
"""
return self._credentials
@credentials.setter
def credentials(self, value):
self._credentials = value
@property
def project(self):
"""Default project to use for calls to Google APIs.
Returns
-------
str
Examples
--------
Manually setting the context project:
>>> import pandas_gbq
>>> pandas_gbq.context.project = 'my-project'
"""
return self._project
@project.setter
def project(self, value):
self._project = value
@property
def dialect(self):
"""
Default dialect to use in :func:`pandas_gbq.read_gbq`.
Allowed values for the BigQuery SQL syntax dialect:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
Returns
-------
str
Examples
--------
Setting the default syntax to standard:
>>> import pandas_gbq
>>> pandas_gbq.context.dialect = 'standard'
"""
return self._dialect
@dialect.setter
def dialect(self, value):
self._dialect = value
# Create an empty context, used to cache credentials.
context = Context()
"""A :class:`pandas_gbq.Context` object used to cache credentials.
Credentials automatically are cached in-memory by :func:`pandas_gbq.read_gbq`
and :func:`pandas_gbq.to_gbq`.
"""
class GbqConnector(object):
def __init__(
self,
project_id,
reauth=False,
private_key=None,
auth_local_webserver=False,
dialect="standard",
location=None,
credentials=None,
use_bqstorage_api=False,
):
global context
from google.api_core.exceptions import GoogleAPIError
from google.api_core.exceptions import ClientError
from pandas_gbq import auth
self.http_error = (ClientError, GoogleAPIError)
self.project_id = project_id
self.location = location
self.reauth = reauth
self.private_key = private_key
self.auth_local_webserver = auth_local_webserver
self.dialect = dialect
self.credentials = credentials
default_project = None
# Service account credentials have a project associated with them.
# Prefer that project if none was supplied.
if self.project_id is None and hasattr(self.credentials, "project_id"):
self.project_id = credentials.project_id
# Load credentials from cache.
if not self.credentials:
self.credentials = context.credentials
default_project = context.project
# Credentials were explicitly asked for, so don't use the cache.
if private_key or reauth or not self.credentials:
self.credentials, default_project = auth.get_credentials(
private_key=private_key,
project_id=project_id,
reauth=reauth,
auth_local_webserver=auth_local_webserver,
)
if self.project_id is None:
self.project_id = default_project
if self.project_id is None:
raise ValueError(
"Could not determine project ID and one was not supplied."
)
# Cache the credentials if they haven't been set yet.
if context.credentials is None:
context.credentials = self.credentials
if context.project is None:
context.project = self.project_id
self.client = self.get_client()
self.bqstorage_client = _make_bqstorage_client(
use_bqstorage_api, self.credentials
)
# BQ Queries costs $5 per TB. First 1 TB per month is free
# see here for more: https://cloud.google.com/bigquery/pricing
self.query_price_for_TB = 5.0 / 2 ** 40 # USD/TB
def _start_timer(self):
self.start = time.time()
def get_elapsed_seconds(self):
return round(time.time() - self.start, 2)
def log_elapsed_seconds(self, prefix="Elapsed", postfix="s.", overlong=6):
sec = self.get_elapsed_seconds()
if sec > overlong:
logger.info("{} {} {}".format(prefix, sec, postfix))
# http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
@staticmethod
def sizeof_fmt(num, suffix="B"):
fmt = "%3.1f %s%s"
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if abs(num) < 1024.0:
return fmt % (num, unit, suffix)
num /= 1024.0
return fmt % (num, "Y", suffix)
def get_client(self):
from google.cloud import bigquery
return bigquery.Client(
project=self.project_id, credentials=self.credentials
)
@staticmethod
def process_http_error(ex):
# See `BigQuery Troubleshooting Errors
# <https://cloud.google.com/bigquery/troubleshooting-errors>`__
raise GenericGBQException("Reason: {0}".format(ex))
def run_query(self, query, **kwargs):
from concurrent.futures import TimeoutError
from google.auth.exceptions import RefreshError
from google.cloud import bigquery
job_config = {
"query": {
"useLegacySql": self.dialect
== "legacy"
# 'allowLargeResults', 'createDisposition',
# 'preserveNulls', destinationTable, useQueryCache
}
}
config = kwargs.get("configuration")
if config is not None:
job_config.update(config)
if "query" in config and "query" in config["query"]:
if query is not None:
raise ValueError(
"Query statement can't be specified "
"inside config while it is specified "
"as parameter"
)
query = config["query"].pop("query")
self._start_timer()
try:
logger.debug("Requesting query... ")
query_reply = self.client.query(
query,
job_config=bigquery.QueryJobConfig.from_api_repr(job_config),
location=self.location,
project=self.project_id,
)
logger.debug("Query running...")
except (RefreshError, ValueError):
if self.private_key:
raise AccessDenied(
"The service account credentials are not valid"
)
else:
raise AccessDenied(
"The credentials have been revoked or expired, "
"please re-run the application to re-authorize"
)
except self.http_error as ex:
self.process_http_error(ex)
job_id = query_reply.job_id
logger.debug("Job ID: %s" % job_id)
while query_reply.state != "DONE":
self.log_elapsed_seconds(" Elapsed", "s. Waiting...")
timeout_ms = job_config["query"].get("timeoutMs")
if timeout_ms and timeout_ms < self.get_elapsed_seconds() * 1000:
raise QueryTimeout("Query timeout: {} ms".format(timeout_ms))
timeout_sec = 1.0
if timeout_ms:
# Wait at most 1 second so we can show progress bar
timeout_sec = min(1.0, timeout_ms / 1000.0)
try:
query_reply.result(timeout=timeout_sec)
except TimeoutError:
# Use our own timeout logic
pass
except self.http_error as ex:
self.process_http_error(ex)
if query_reply.cache_hit:
logger.debug("Query done.\nCache hit.\n")
else:
bytes_processed = query_reply.total_bytes_processed or 0
bytes_billed = query_reply.total_bytes_billed or 0
logger.debug(
"Query done.\nProcessed: {} Billed: {}".format(
self.sizeof_fmt(bytes_processed),
self.sizeof_fmt(bytes_billed),
)
)
logger.debug(
"Standard price: ${:,.2f} USD\n".format(
bytes_billed * self.query_price_for_TB
)
)
try:
rows_iter = query_reply.result()
except self.http_error as ex:
self.process_http_error(ex)
schema_fields = [field.to_api_repr() for field in rows_iter.schema]
nullsafe_dtypes = _bqschema_to_nullsafe_dtypes(schema_fields)
df = rows_iter.to_dataframe(
dtypes=nullsafe_dtypes, bqstorage_client=self.bqstorage_client
)
if df.empty:
df = _cast_empty_df_dtypes(schema_fields, df)
# Ensure any TIMESTAMP columns are tz-aware.
df = _localize_df(schema_fields, df)
logger.debug("Got {} rows.\n".format(rows_iter.total_rows))
return df
def load_data(
self,
dataframe,
dataset_id,
table_id,
chunksize=None,
schema=None,
progress_bar=True,
):
from pandas_gbq import load
total_rows = len(dataframe)
try:
chunks = load.load_chunks(
self.client,
dataframe,
dataset_id,
table_id,
chunksize=chunksize,
schema=schema,
location=self.location,
)
if progress_bar and tqdm:
chunks = tqdm.tqdm(chunks)
for remaining_rows in chunks:
logger.info(
"\r{} out of {} rows loaded.".format(
total_rows - remaining_rows, total_rows
)
)
except self.http_error as ex:
self.process_http_error(ex)
def schema(self, dataset_id, table_id):
"""Retrieve the schema of the table
Obtain from BigQuery the field names and field types
for the table defined by the parameters
Parameters
----------
dataset_id : str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
Returns
-------
list of dicts
Fields representing the schema
"""
table_ref = self.client.dataset(dataset_id).table(table_id)
try:
table = self.client.get_table(table_ref)
remote_schema = table.schema
remote_fields = [
field_remote.to_api_repr() for field_remote in remote_schema
]
for field in remote_fields:
field["type"] = field["type"].upper()
field["mode"] = field["mode"].upper()
return remote_fields
except self.http_error as ex:
self.process_http_error(ex)
def _clean_schema_fields(self, fields):
"""Return a sanitized version of the schema for comparisons."""
fields_sorted = sorted(fields, key=lambda field: field["name"])
# Ignore mode and description when comparing schemas.
return [
{"name": field["name"], "type": field["type"]}
for field in fields_sorted
]
def verify_schema(self, dataset_id, table_id, schema):
"""Indicate whether schemas match exactly
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether all fields in the former
are present in the latter. Order is not considered.
Parameters
----------
dataset_id :str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
schema : list(dict)
Schema for comparison. Each item should have
a 'name' and a 'type'
Returns
-------
bool
Whether the schemas match
"""
fields_remote = self._clean_schema_fields(
self.schema(dataset_id, table_id)
)
fields_local = self._clean_schema_fields(schema["fields"])
return fields_remote == fields_local
def schema_is_subset(self, dataset_id, table_id, schema):
"""Indicate whether the schema to be uploaded is a subset
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether a subset of the fields in
the former are present in the latter. Order is not considered.
Parameters
----------
dataset_id : str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
schema : list(dict)
Schema for comparison. Each item should have
a 'name' and a 'type'
Returns
-------
bool
Whether the passed schema is a subset
"""
fields_remote = self._clean_schema_fields(
self.schema(dataset_id, table_id)
)
fields_local = self._clean_schema_fields(schema["fields"])
return all(field in fields_remote for field in fields_local)
def delete_and_recreate_table(self, dataset_id, table_id, table_schema):
table = _Table(
self.project_id, dataset_id, credentials=self.credentials
)
table.delete(table_id)
table.create(table_id, table_schema)
def _bqschema_to_nullsafe_dtypes(schema_fields):
"""Specify explicit dtypes based on BigQuery schema.
This function only specifies a dtype when the dtype allows nulls.
Otherwise, use pandas's default dtype choice.
See: http://pandas.pydata.org/pandas-docs/dev/missing_data.html
#missing-data-casting-rules-and-indexing
"""
# If you update this mapping, also update the table at
# `docs/source/reading.rst`.
dtype_map = {
"FLOAT": np.dtype(float),
# pandas doesn't support timezone-aware dtype in DataFrame/Series
# constructors. It's more idiomatic to localize after construction.
# https://github.com/pandas-dev/pandas/issues/25843
"TIMESTAMP": "datetime64[ns]",
"TIME": "datetime64[ns]",
"DATE": "datetime64[ns]",
"DATETIME": "datetime64[ns]",
}
dtypes = {}
for field in schema_fields:
name = str(field["name"])
if field["mode"].upper() == "REPEATED":
continue
dtype = dtype_map.get(field["type"].upper())
if dtype:
dtypes[name] = dtype
return dtypes
def _localize_df(schema_fields, df):
"""Localize any TIMESTAMP columns to tz-aware type.
In pandas versions before 0.24.0, DatetimeTZDtype cannot be used as the
dtype in Series/DataFrame construction, so localize those columns after
the DataFrame is constructed.
"""
for field in schema_fields:
column = str(field["name"])
if field["mode"].upper() == "REPEATED":
continue
if field["type"].upper() == "TIMESTAMP" and df[column].dt.tz is None:
df[column] = df[column].dt.tz_localize("UTC")
return df
def _make_bqstorage_client(use_bqstorage_api, credentials):
if not use_bqstorage_api:
return None
if bigquery_storage_v1beta1 is None:
raise ImportError(
"Install the google-cloud-bigquery-storage and fastavro packages "
"to use the BigQuery Storage API."
)
return bigquery_storage_v1beta1.BigQueryStorageClient(
credentials=credentials
)
def read_gbq(
query,
project_id=None,
index_col=None,
col_order=None,
reauth=False,
auth_local_webserver=False,
dialect=None,
location=None,
configuration=None,
credentials=None,
use_bqstorage_api=False,
verbose=None,
private_key=None,
):
r"""Load data from Google BigQuery using google-cloud-python
The main method a user calls to execute a Query in Google BigQuery
and read results into a pandas DataFrame.
This method uses the Google Cloud client library to make requests to
Google BigQuery, documented `here
<https://google-cloud-python.readthedocs.io/en/latest/bigquery/usage.html>`__.
See the :ref:`How to authenticate with Google BigQuery <authentication>`
guide for authentication instructions.
Parameters
----------
query : str
SQL-Like Query to return data values.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
index_col : str, optional
Name of result column to use for index in results DataFrame.
col_order : list(str), optional
List of BigQuery column names in the desired order for results
DataFrame.
reauth : boolean, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
auth_local_webserver : boolean, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
.. versionadded:: 0.2.0
dialect : str, default 'standard'
Note: The default value changed to 'standard' in version 0.10.0.
SQL syntax dialect to use. Value can be one of:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
location : str, optional
Location where the query job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of any
datasets used in the query.
.. versionadded:: 0.5.0
configuration : dict, optional
Query config parameters for job processing.
For example:
configuration = {'query': {'useQueryCache': False}}
For more information see `BigQuery REST API Reference
<https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service Account
:class:`google.oauth2.service_account.Credentials` directly.
.. versionadded:: 0.8.0
use_bqstorage_api : bool, default False
Use the `BigQuery Storage API
<https://cloud.google.com/bigquery/docs/reference/storage/>`__ to
download query results quickly, but at an increased cost. To use this
API, first `enable it in the Cloud Console
<https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__.
You must also have the `bigquery.readsessions.create
<https://cloud.google.com/bigquery/docs/access-control#roles>`__
permission on the project you are billing queries to.
**Note:** Due to a `known issue in the ``google-cloud-bigquery``
package
<https://github.com/googleapis/google-cloud-python/pull/7633>`__
(fixed in version 1.11.0), you must write your query results to a
destination table. To do this with ``read_gbq``, supply a
``configuration`` dictionary.
This feature requires the ``google-cloud-bigquery-storage`` and
``fastavro`` packages.
.. versionadded:: 0.10.0
verbose : None, deprecated
Deprecated in Pandas-GBQ 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
Returns
-------
df: DataFrame
DataFrame representing results of query.
"""
global context
if dialect is None:
dialect = context.dialect
if dialect is None:
dialect = "standard"
_test_google_api_imports()
if verbose is not None and SHOW_VERBOSE_DEPRECATION:
warnings.warn(
"verbose is deprecated and will be removed in "
"a future version. Set logging level in order to vary "
"verbosity",
FutureWarning,
stacklevel=2,
)
if private_key is not None and SHOW_PRIVATE_KEY_DEPRECATION:
warnings.warn(
PRIVATE_KEY_DEPRECATION_MESSAGE, FutureWarning, stacklevel=2
)
if dialect not in ("legacy", "standard"):
raise ValueError("'{0}' is not valid for dialect".format(dialect))
connector = GbqConnector(
project_id,
reauth=reauth,
dialect=dialect,
auth_local_webserver=auth_local_webserver,
location=location,
credentials=credentials,
private_key=private_key,
use_bqstorage_api=use_bqstorage_api,
)
final_df = connector.run_query(query, configuration=configuration)
# Reindex the DataFrame on the provided column
if index_col is not None:
if index_col in final_df.columns:
final_df.set_index(index_col, inplace=True)
else:
raise InvalidIndexColumn(
'Index column "{0}" does not exist in DataFrame.'.format(
index_col
)
)
# Change the order of columns in the DataFrame based on provided list
if col_order is not None:
if sorted(col_order) == sorted(final_df.columns):
final_df = final_df[col_order]
else:
raise InvalidColumnOrder(
"Column order does not match this DataFrame."
)
connector.log_elapsed_seconds(
"Total time taken",
datetime.now().strftime("s.\nFinished at %Y-%m-%d %H:%M:%S."),
)
return final_df
def to_gbq(
dataframe,
destination_table,
project_id=None,
chunksize=None,
reauth=False,
if_exists="fail",
auth_local_webserver=False,
table_schema=None,
location=None,
progress_bar=True,
credentials=None,
verbose=None,
private_key=None,
):
"""Write a DataFrame to a Google BigQuery table.
The main method a user calls to export pandas DataFrame contents to
Google BigQuery table.
This method uses the Google Cloud client library to make requests to
Google BigQuery, documented `here
<https://google-cloud-python.readthedocs.io/en/latest/bigquery/usage.html>`__.
See the :ref:`How to authenticate with Google BigQuery <authentication>`
guide for authentication instructions.
Parameters
----------
dataframe : pandas.DataFrame
DataFrame to be written to a Google BigQuery table.
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists, do nothing.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
.. versionadded:: 0.2.0
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``.
- If ``table_schema`` is provided, it may contain all or a subset of
DataFrame columns. If a subset is provided, the rest will be
inferred from the DataFrame dtypes.
- If ``table_schema`` is **not** provided, it will be
generated according to dtypes of DataFrame columns. See
`Inferring the Table Schema
<https://pandas-gbq.readthedocs.io/en/latest/writing.html#writing-schema>`__.
for a description of the schema inference.
See `BigQuery API documentation on valid column names
<https://cloud.google.com/bigquery/docs/schemas#column_names`>__.
.. versionadded:: 0.3.1
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
.. versionadded:: 0.5.0
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
.. versionadded:: 0.5.0
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service Account
:class:`google.oauth2.service_account.Credentials` directly.
.. versionadded:: 0.8.0
verbose : bool, deprecated
Deprecated in Pandas-GBQ 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
"""
_test_google_api_imports()
from pandas_gbq import schema
if verbose is not None and SHOW_VERBOSE_DEPRECATION:
warnings.warn(
"verbose is deprecated and will be removed in "
"a future version. Set logging level in order to vary "
"verbosity",
FutureWarning,
stacklevel=1,
)
if private_key is not None and SHOW_PRIVATE_KEY_DEPRECATION:
warnings.warn(
PRIVATE_KEY_DEPRECATION_MESSAGE, FutureWarning, stacklevel=2
)
if if_exists not in ("fail", "replace", "append"):
raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
if "." not in destination_table:
raise NotFoundException(
"Invalid Table Name. Should be of the form 'datasetId.tableId' "
)
connector = GbqConnector(
project_id,
reauth=reauth,
auth_local_webserver=auth_local_webserver,
location=location,
credentials=credentials,
private_key=private_key,
)
dataset_id, table_id = destination_table.rsplit(".", 1)
table = _Table(
project_id,
dataset_id,
location=location,
credentials=connector.credentials,
)
default_schema = _generate_bq_schema(dataframe)
if not table_schema:
table_schema = default_schema
else:
table_schema = schema.update_schema(
default_schema, dict(fields=table_schema)
)
# If table exists, check if_exists parameter
if table.exists(table_id):
if if_exists == "fail":
raise TableCreationError(
"Could not create the table because it "
"already exists. "
"Change the if_exists parameter to "
"'append' or 'replace' data."
)
elif if_exists == "replace":
connector.delete_and_recreate_table(
dataset_id, table_id, table_schema
)
elif if_exists == "append":
if not connector.schema_is_subset(
dataset_id, table_id, table_schema
):
raise InvalidSchema(
"Please verify that the structure and "
"data types in the DataFrame match the "
"schema of the destination table."
)
else:
table.create(table_id, table_schema)
if dataframe.empty:
# Create the table (if needed), but don't try to run a load job with an
# empty file. See: https://github.com/pydata/pandas-gbq/issues/237
return
connector.load_data(
dataframe,
dataset_id,
table_id,
chunksize=chunksize,
schema=table_schema,
progress_bar=progress_bar,
)
def generate_bq_schema(df, default_type="STRING"):
"""DEPRECATED: Given a passed df, generate the associated Google BigQuery
schema.
Parameters
----------
df : DataFrame
default_type : string
The default big query type in case the type of the column
does not exist in the schema.
"""
# deprecation TimeSeries, #11121
warnings.warn(
"generate_bq_schema is deprecated and will be removed in "
"a future version",
FutureWarning,
stacklevel=2,
)
return _generate_bq_schema(df, default_type=default_type)
def _generate_bq_schema(df, default_type="STRING"):
"""DEPRECATED: Given a dataframe, generate a Google BigQuery schema.
This is a private method, but was used in external code to work around
issues in the default schema generation. Now that individual columns can
be overridden: https://github.com/pydata/pandas-gbq/issues/218, this
method can be removed after there is time to migrate away from this
method. """
from pandas_gbq import schema
return schema.generate_bq_schema(df, default_type=default_type)
class _Table(GbqConnector):
def __init__(
self,
project_id,
dataset_id,
reauth=False,
location=None,
credentials=None,
private_key=None,
):
self.dataset_id = dataset_id
super(_Table, self).__init__(
project_id,
reauth,
location=location,
credentials=credentials,
private_key=private_key,
)
def exists(self, table_id):
""" Check if a table exists in Google BigQuery
Parameters
----------
table : str
Name of table to be verified
Returns
-------
boolean
true if table exists, otherwise false
"""
from google.api_core.exceptions import NotFound
table_ref = self.client.dataset(self.dataset_id).table(table_id)
try:
self.client.get_table(table_ref)
return True
except NotFound:
return False
except self.http_error as ex:
self.process_http_error(ex)
def create(self, table_id, schema):
""" Create a table in Google BigQuery given a table and schema
Parameters
----------
table : str
Name of table to be written
schema : str
Use the generate_bq_schema to generate your table schema from a
dataframe.
"""
from google.cloud.bigquery import SchemaField
from google.cloud.bigquery import Table
if self.exists(table_id):
raise TableCreationError(
"Table {0} already " "exists".format(table_id)
)
if not _Dataset(self.project_id, credentials=self.credentials).exists(
self.dataset_id
):
_Dataset(
self.project_id,
credentials=self.credentials,
location=self.location,
).create(self.dataset_id)
table_ref = self.client.dataset(self.dataset_id).table(table_id)
table = Table(table_ref)
# Manually create the schema objects, adding NULLABLE mode
# as a workaround for
# https://github.com/GoogleCloudPlatform/google-cloud-python/issues/4456
for field in schema["fields"]:
if "mode" not in field:
field["mode"] = "NULLABLE"
table.schema = [
SchemaField.from_api_repr(field) for field in schema["fields"]
]
try:
self.client.create_table(table)
except self.http_error as ex:
self.process_http_error(ex)
def delete(self, table_id):
""" Delete a table in Google BigQuery
Parameters
----------
table : str
Name of table to be deleted
"""
from google.api_core.exceptions import NotFound
if not self.exists(table_id):
raise NotFoundException("Table does not exist")
table_ref = self.client.dataset(self.dataset_id).table(table_id)
try:
self.client.delete_table(table_ref)
except NotFound:
# Ignore 404 error which may occur if table already deleted
pass
except self.http_error as ex:
self.process_http_error(ex)
class _Dataset(GbqConnector):
def __init__(
self,
project_id,
reauth=False,
location=None,
credentials=None,
private_key=None,
):
super(_Dataset, self).__init__(
project_id,
reauth,
credentials=credentials,
location=location,
private_key=private_key,
)
def exists(self, dataset_id):
""" Check if a dataset exists in Google BigQuery
Parameters
----------
dataset_id : str
Name of dataset to be verified
Returns
-------
boolean
true if dataset exists, otherwise false
"""
from google.api_core.exceptions import NotFound
try:
self.client.get_dataset(self.client.dataset(dataset_id))
return True
except NotFound:
return False
except self.http_error as ex:
self.process_http_error(ex)
def create(self, dataset_id):
""" Create a dataset in Google BigQuery
Parameters
----------
dataset : str
Name of dataset to be written
"""
from google.cloud.bigquery import Dataset
if self.exists(dataset_id):
raise DatasetCreationError(
"Dataset {0} already " "exists".format(dataset_id)
)
dataset = Dataset(self.client.dataset(dataset_id))
if self.location is not None:
dataset.location = self.location
try:
self.client.create_dataset(dataset)
except self.http_error as ex:
self.process_http_error(ex)
|
pydata/pandas-gbq
|
pandas_gbq/gbq.py
|
_localize_df
|
python
|
def _localize_df(schema_fields, df):
for field in schema_fields:
column = str(field["name"])
if field["mode"].upper() == "REPEATED":
continue
if field["type"].upper() == "TIMESTAMP" and df[column].dt.tz is None:
df[column] = df[column].dt.tz_localize("UTC")
return df
|
Localize any TIMESTAMP columns to tz-aware type.
In pandas versions before 0.24.0, DatetimeTZDtype cannot be used as the
dtype in Series/DataFrame construction, so localize those columns after
the DataFrame is constructed.
|
train
|
https://github.com/pydata/pandas-gbq/blob/e590317b3325939ede7563f49aa6b163bb803b77/pandas_gbq/gbq.py#L725-L740
| null |
import logging
import time
import warnings
from datetime import datetime
import numpy as np
try:
# The BigQuery Storage API client is an optional dependency. It is only
# required when use_bqstorage_api=True.
from google.cloud import bigquery_storage_v1beta1
except ImportError: # pragma: NO COVER
bigquery_storage_v1beta1 = None
from pandas_gbq.exceptions import AccessDenied
logger = logging.getLogger(__name__)
BIGQUERY_INSTALLED_VERSION = None
SHOW_VERBOSE_DEPRECATION = False
SHOW_PRIVATE_KEY_DEPRECATION = False
PRIVATE_KEY_DEPRECATION_MESSAGE = (
"private_key is deprecated and will be removed in a future version."
"Use the credentials argument instead. See "
"https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html "
"for examples on using the credentials argument with service account keys."
)
try:
import tqdm # noqa
except ImportError:
tqdm = None
def _check_google_client_version():
global BIGQUERY_INSTALLED_VERSION, SHOW_VERBOSE_DEPRECATION, SHOW_PRIVATE_KEY_DEPRECATION
try:
import pkg_resources
except ImportError:
raise ImportError("Could not import pkg_resources (setuptools).")
# https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/bigquery/CHANGELOG.md
bigquery_minimum_version = pkg_resources.parse_version("1.9.0")
BIGQUERY_INSTALLED_VERSION = pkg_resources.get_distribution(
"google-cloud-bigquery"
).parsed_version
if BIGQUERY_INSTALLED_VERSION < bigquery_minimum_version:
raise ImportError(
"pandas-gbq requires google-cloud-bigquery >= {0}, "
"current version {1}".format(
bigquery_minimum_version, BIGQUERY_INSTALLED_VERSION
)
)
# Add check for Pandas version before showing deprecation warning.
# https://github.com/pydata/pandas-gbq/issues/157
pandas_installed_version = pkg_resources.get_distribution(
"pandas"
).parsed_version
pandas_version_wo_verbosity = pkg_resources.parse_version("0.23.0")
SHOW_VERBOSE_DEPRECATION = (
pandas_installed_version >= pandas_version_wo_verbosity
)
pandas_version_with_credentials_arg = pkg_resources.parse_version("0.24.0")
SHOW_PRIVATE_KEY_DEPRECATION = (
pandas_installed_version >= pandas_version_with_credentials_arg
)
def _test_google_api_imports():
try:
import pydata_google_auth # noqa
except ImportError as ex:
raise ImportError(
"pandas-gbq requires pydata-google-auth: {0}".format(ex)
)
try:
from google_auth_oauthlib.flow import InstalledAppFlow # noqa
except ImportError as ex:
raise ImportError(
"pandas-gbq requires google-auth-oauthlib: {0}".format(ex)
)
try:
import google.auth # noqa
except ImportError as ex:
raise ImportError("pandas-gbq requires google-auth: {0}".format(ex))
try:
from google.cloud import bigquery # noqa
except ImportError as ex:
raise ImportError(
"pandas-gbq requires google-cloud-bigquery: {0}".format(ex)
)
_check_google_client_version()
class DatasetCreationError(ValueError):
"""
Raised when the create dataset method fails
"""
pass
class GenericGBQException(ValueError):
"""
Raised when an unrecognized Google API Error occurs.
"""
pass
class InvalidColumnOrder(ValueError):
"""
Raised when the provided column order for output
results DataFrame does not match the schema
returned by BigQuery.
"""
pass
class InvalidIndexColumn(ValueError):
"""
Raised when the provided index column for output
results DataFrame does not match the schema
returned by BigQuery.
"""
pass
class InvalidPageToken(ValueError):
"""
Raised when Google BigQuery fails to return,
or returns a duplicate page token.
"""
pass
class InvalidSchema(ValueError):
"""
Raised when the provided DataFrame does
not match the schema of the destination
table in BigQuery.
"""
pass
class NotFoundException(ValueError):
"""
Raised when the project_id, table or dataset provided in the query could
not be found.
"""
pass
class QueryTimeout(ValueError):
"""
Raised when the query request exceeds the timeoutMs value specified in the
BigQuery configuration.
"""
pass
class TableCreationError(ValueError):
"""
Raised when the create table method fails
"""
pass
class Context(object):
"""Storage for objects to be used throughout a session.
A Context object is initialized when the ``pandas_gbq`` module is
imported, and can be found at :attr:`pandas_gbq.context`.
"""
def __init__(self):
self._credentials = None
self._project = None
# dialect defaults to None so that read_gbq can stop warning if set.
self._dialect = None
@property
def credentials(self):
"""
Credentials to use for Google APIs.
These credentials are automatically cached in memory by calls to
:func:`pandas_gbq.read_gbq` and :func:`pandas_gbq.to_gbq`. To
manually set the credentials, construct an
:class:`google.auth.credentials.Credentials` object and set it as
the context credentials as demonstrated in the example below. See
`auth docs`_ for more information on obtaining credentials.
.. _auth docs: http://google-auth.readthedocs.io
/en/latest/user-guide.html#obtaining-credentials
Returns
-------
google.auth.credentials.Credentials
Examples
--------
Manually setting the context credentials:
>>> import pandas_gbq
>>> from google.oauth2 import service_account
>>> credentials = service_account.Credentials.from_service_account_file(
... '/path/to/key.json',
... )
>>> pandas_gbq.context.credentials = credentials
"""
return self._credentials
@credentials.setter
def credentials(self, value):
self._credentials = value
@property
def project(self):
"""Default project to use for calls to Google APIs.
Returns
-------
str
Examples
--------
Manually setting the context project:
>>> import pandas_gbq
>>> pandas_gbq.context.project = 'my-project'
"""
return self._project
@project.setter
def project(self, value):
self._project = value
@property
def dialect(self):
"""
Default dialect to use in :func:`pandas_gbq.read_gbq`.
Allowed values for the BigQuery SQL syntax dialect:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
Returns
-------
str
Examples
--------
Setting the default syntax to standard:
>>> import pandas_gbq
>>> pandas_gbq.context.dialect = 'standard'
"""
return self._dialect
@dialect.setter
def dialect(self, value):
self._dialect = value
# Create an empty context, used to cache credentials.
context = Context()
"""A :class:`pandas_gbq.Context` object used to cache credentials.
Credentials automatically are cached in-memory by :func:`pandas_gbq.read_gbq`
and :func:`pandas_gbq.to_gbq`.
"""
class GbqConnector(object):
def __init__(
self,
project_id,
reauth=False,
private_key=None,
auth_local_webserver=False,
dialect="standard",
location=None,
credentials=None,
use_bqstorage_api=False,
):
global context
from google.api_core.exceptions import GoogleAPIError
from google.api_core.exceptions import ClientError
from pandas_gbq import auth
self.http_error = (ClientError, GoogleAPIError)
self.project_id = project_id
self.location = location
self.reauth = reauth
self.private_key = private_key
self.auth_local_webserver = auth_local_webserver
self.dialect = dialect
self.credentials = credentials
default_project = None
# Service account credentials have a project associated with them.
# Prefer that project if none was supplied.
if self.project_id is None and hasattr(self.credentials, "project_id"):
self.project_id = credentials.project_id
# Load credentials from cache.
if not self.credentials:
self.credentials = context.credentials
default_project = context.project
# Credentials were explicitly asked for, so don't use the cache.
if private_key or reauth or not self.credentials:
self.credentials, default_project = auth.get_credentials(
private_key=private_key,
project_id=project_id,
reauth=reauth,
auth_local_webserver=auth_local_webserver,
)
if self.project_id is None:
self.project_id = default_project
if self.project_id is None:
raise ValueError(
"Could not determine project ID and one was not supplied."
)
# Cache the credentials if they haven't been set yet.
if context.credentials is None:
context.credentials = self.credentials
if context.project is None:
context.project = self.project_id
self.client = self.get_client()
self.bqstorage_client = _make_bqstorage_client(
use_bqstorage_api, self.credentials
)
# BQ Queries costs $5 per TB. First 1 TB per month is free
# see here for more: https://cloud.google.com/bigquery/pricing
self.query_price_for_TB = 5.0 / 2 ** 40 # USD/TB
def _start_timer(self):
self.start = time.time()
def get_elapsed_seconds(self):
return round(time.time() - self.start, 2)
def log_elapsed_seconds(self, prefix="Elapsed", postfix="s.", overlong=6):
sec = self.get_elapsed_seconds()
if sec > overlong:
logger.info("{} {} {}".format(prefix, sec, postfix))
# http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
@staticmethod
def sizeof_fmt(num, suffix="B"):
fmt = "%3.1f %s%s"
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if abs(num) < 1024.0:
return fmt % (num, unit, suffix)
num /= 1024.0
return fmt % (num, "Y", suffix)
def get_client(self):
from google.cloud import bigquery
return bigquery.Client(
project=self.project_id, credentials=self.credentials
)
@staticmethod
def process_http_error(ex):
# See `BigQuery Troubleshooting Errors
# <https://cloud.google.com/bigquery/troubleshooting-errors>`__
raise GenericGBQException("Reason: {0}".format(ex))
def run_query(self, query, **kwargs):
from concurrent.futures import TimeoutError
from google.auth.exceptions import RefreshError
from google.cloud import bigquery
job_config = {
"query": {
"useLegacySql": self.dialect
== "legacy"
# 'allowLargeResults', 'createDisposition',
# 'preserveNulls', destinationTable, useQueryCache
}
}
config = kwargs.get("configuration")
if config is not None:
job_config.update(config)
if "query" in config and "query" in config["query"]:
if query is not None:
raise ValueError(
"Query statement can't be specified "
"inside config while it is specified "
"as parameter"
)
query = config["query"].pop("query")
self._start_timer()
try:
logger.debug("Requesting query... ")
query_reply = self.client.query(
query,
job_config=bigquery.QueryJobConfig.from_api_repr(job_config),
location=self.location,
project=self.project_id,
)
logger.debug("Query running...")
except (RefreshError, ValueError):
if self.private_key:
raise AccessDenied(
"The service account credentials are not valid"
)
else:
raise AccessDenied(
"The credentials have been revoked or expired, "
"please re-run the application to re-authorize"
)
except self.http_error as ex:
self.process_http_error(ex)
job_id = query_reply.job_id
logger.debug("Job ID: %s" % job_id)
while query_reply.state != "DONE":
self.log_elapsed_seconds(" Elapsed", "s. Waiting...")
timeout_ms = job_config["query"].get("timeoutMs")
if timeout_ms and timeout_ms < self.get_elapsed_seconds() * 1000:
raise QueryTimeout("Query timeout: {} ms".format(timeout_ms))
timeout_sec = 1.0
if timeout_ms:
# Wait at most 1 second so we can show progress bar
timeout_sec = min(1.0, timeout_ms / 1000.0)
try:
query_reply.result(timeout=timeout_sec)
except TimeoutError:
# Use our own timeout logic
pass
except self.http_error as ex:
self.process_http_error(ex)
if query_reply.cache_hit:
logger.debug("Query done.\nCache hit.\n")
else:
bytes_processed = query_reply.total_bytes_processed or 0
bytes_billed = query_reply.total_bytes_billed or 0
logger.debug(
"Query done.\nProcessed: {} Billed: {}".format(
self.sizeof_fmt(bytes_processed),
self.sizeof_fmt(bytes_billed),
)
)
logger.debug(
"Standard price: ${:,.2f} USD\n".format(
bytes_billed * self.query_price_for_TB
)
)
try:
rows_iter = query_reply.result()
except self.http_error as ex:
self.process_http_error(ex)
schema_fields = [field.to_api_repr() for field in rows_iter.schema]
nullsafe_dtypes = _bqschema_to_nullsafe_dtypes(schema_fields)
df = rows_iter.to_dataframe(
dtypes=nullsafe_dtypes, bqstorage_client=self.bqstorage_client
)
if df.empty:
df = _cast_empty_df_dtypes(schema_fields, df)
# Ensure any TIMESTAMP columns are tz-aware.
df = _localize_df(schema_fields, df)
logger.debug("Got {} rows.\n".format(rows_iter.total_rows))
return df
def load_data(
self,
dataframe,
dataset_id,
table_id,
chunksize=None,
schema=None,
progress_bar=True,
):
from pandas_gbq import load
total_rows = len(dataframe)
try:
chunks = load.load_chunks(
self.client,
dataframe,
dataset_id,
table_id,
chunksize=chunksize,
schema=schema,
location=self.location,
)
if progress_bar and tqdm:
chunks = tqdm.tqdm(chunks)
for remaining_rows in chunks:
logger.info(
"\r{} out of {} rows loaded.".format(
total_rows - remaining_rows, total_rows
)
)
except self.http_error as ex:
self.process_http_error(ex)
def schema(self, dataset_id, table_id):
"""Retrieve the schema of the table
Obtain from BigQuery the field names and field types
for the table defined by the parameters
Parameters
----------
dataset_id : str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
Returns
-------
list of dicts
Fields representing the schema
"""
table_ref = self.client.dataset(dataset_id).table(table_id)
try:
table = self.client.get_table(table_ref)
remote_schema = table.schema
remote_fields = [
field_remote.to_api_repr() for field_remote in remote_schema
]
for field in remote_fields:
field["type"] = field["type"].upper()
field["mode"] = field["mode"].upper()
return remote_fields
except self.http_error as ex:
self.process_http_error(ex)
def _clean_schema_fields(self, fields):
"""Return a sanitized version of the schema for comparisons."""
fields_sorted = sorted(fields, key=lambda field: field["name"])
# Ignore mode and description when comparing schemas.
return [
{"name": field["name"], "type": field["type"]}
for field in fields_sorted
]
def verify_schema(self, dataset_id, table_id, schema):
"""Indicate whether schemas match exactly
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether all fields in the former
are present in the latter. Order is not considered.
Parameters
----------
dataset_id :str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
schema : list(dict)
Schema for comparison. Each item should have
a 'name' and a 'type'
Returns
-------
bool
Whether the schemas match
"""
fields_remote = self._clean_schema_fields(
self.schema(dataset_id, table_id)
)
fields_local = self._clean_schema_fields(schema["fields"])
return fields_remote == fields_local
def schema_is_subset(self, dataset_id, table_id, schema):
"""Indicate whether the schema to be uploaded is a subset
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether a subset of the fields in
the former are present in the latter. Order is not considered.
Parameters
----------
dataset_id : str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
schema : list(dict)
Schema for comparison. Each item should have
a 'name' and a 'type'
Returns
-------
bool
Whether the passed schema is a subset
"""
fields_remote = self._clean_schema_fields(
self.schema(dataset_id, table_id)
)
fields_local = self._clean_schema_fields(schema["fields"])
return all(field in fields_remote for field in fields_local)
def delete_and_recreate_table(self, dataset_id, table_id, table_schema):
table = _Table(
self.project_id, dataset_id, credentials=self.credentials
)
table.delete(table_id)
table.create(table_id, table_schema)
def _bqschema_to_nullsafe_dtypes(schema_fields):
"""Specify explicit dtypes based on BigQuery schema.
This function only specifies a dtype when the dtype allows nulls.
Otherwise, use pandas's default dtype choice.
See: http://pandas.pydata.org/pandas-docs/dev/missing_data.html
#missing-data-casting-rules-and-indexing
"""
# If you update this mapping, also update the table at
# `docs/source/reading.rst`.
dtype_map = {
"FLOAT": np.dtype(float),
# pandas doesn't support timezone-aware dtype in DataFrame/Series
# constructors. It's more idiomatic to localize after construction.
# https://github.com/pandas-dev/pandas/issues/25843
"TIMESTAMP": "datetime64[ns]",
"TIME": "datetime64[ns]",
"DATE": "datetime64[ns]",
"DATETIME": "datetime64[ns]",
}
dtypes = {}
for field in schema_fields:
name = str(field["name"])
if field["mode"].upper() == "REPEATED":
continue
dtype = dtype_map.get(field["type"].upper())
if dtype:
dtypes[name] = dtype
return dtypes
def _cast_empty_df_dtypes(schema_fields, df):
"""Cast any columns in an empty dataframe to correct type.
In an empty dataframe, pandas cannot choose a dtype unless one is
explicitly provided. The _bqschema_to_nullsafe_dtypes() function only
provides dtypes when the dtype safely handles null values. This means
that empty int64 and boolean columns are incorrectly classified as
``object``.
"""
if not df.empty:
raise ValueError(
"DataFrame must be empty in order to cast non-nullsafe dtypes"
)
dtype_map = {"BOOLEAN": bool, "INTEGER": np.int64}
for field in schema_fields:
column = str(field["name"])
if field["mode"].upper() == "REPEATED":
continue
dtype = dtype_map.get(field["type"].upper())
if dtype:
df[column] = df[column].astype(dtype)
return df
def _make_bqstorage_client(use_bqstorage_api, credentials):
if not use_bqstorage_api:
return None
if bigquery_storage_v1beta1 is None:
raise ImportError(
"Install the google-cloud-bigquery-storage and fastavro packages "
"to use the BigQuery Storage API."
)
return bigquery_storage_v1beta1.BigQueryStorageClient(
credentials=credentials
)
def read_gbq(
query,
project_id=None,
index_col=None,
col_order=None,
reauth=False,
auth_local_webserver=False,
dialect=None,
location=None,
configuration=None,
credentials=None,
use_bqstorage_api=False,
verbose=None,
private_key=None,
):
r"""Load data from Google BigQuery using google-cloud-python
The main method a user calls to execute a Query in Google BigQuery
and read results into a pandas DataFrame.
This method uses the Google Cloud client library to make requests to
Google BigQuery, documented `here
<https://google-cloud-python.readthedocs.io/en/latest/bigquery/usage.html>`__.
See the :ref:`How to authenticate with Google BigQuery <authentication>`
guide for authentication instructions.
Parameters
----------
query : str
SQL-Like Query to return data values.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
index_col : str, optional
Name of result column to use for index in results DataFrame.
col_order : list(str), optional
List of BigQuery column names in the desired order for results
DataFrame.
reauth : boolean, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
auth_local_webserver : boolean, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
.. versionadded:: 0.2.0
dialect : str, default 'standard'
Note: The default value changed to 'standard' in version 0.10.0.
SQL syntax dialect to use. Value can be one of:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
location : str, optional
Location where the query job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of any
datasets used in the query.
.. versionadded:: 0.5.0
configuration : dict, optional
Query config parameters for job processing.
For example:
configuration = {'query': {'useQueryCache': False}}
For more information see `BigQuery REST API Reference
<https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service Account
:class:`google.oauth2.service_account.Credentials` directly.
.. versionadded:: 0.8.0
use_bqstorage_api : bool, default False
Use the `BigQuery Storage API
<https://cloud.google.com/bigquery/docs/reference/storage/>`__ to
download query results quickly, but at an increased cost. To use this
API, first `enable it in the Cloud Console
<https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__.
You must also have the `bigquery.readsessions.create
<https://cloud.google.com/bigquery/docs/access-control#roles>`__
permission on the project you are billing queries to.
**Note:** Due to a `known issue in the ``google-cloud-bigquery``
package
<https://github.com/googleapis/google-cloud-python/pull/7633>`__
(fixed in version 1.11.0), you must write your query results to a
destination table. To do this with ``read_gbq``, supply a
``configuration`` dictionary.
This feature requires the ``google-cloud-bigquery-storage`` and
``fastavro`` packages.
.. versionadded:: 0.10.0
verbose : None, deprecated
Deprecated in Pandas-GBQ 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
Returns
-------
df: DataFrame
DataFrame representing results of query.
"""
global context
if dialect is None:
dialect = context.dialect
if dialect is None:
dialect = "standard"
_test_google_api_imports()
if verbose is not None and SHOW_VERBOSE_DEPRECATION:
warnings.warn(
"verbose is deprecated and will be removed in "
"a future version. Set logging level in order to vary "
"verbosity",
FutureWarning,
stacklevel=2,
)
if private_key is not None and SHOW_PRIVATE_KEY_DEPRECATION:
warnings.warn(
PRIVATE_KEY_DEPRECATION_MESSAGE, FutureWarning, stacklevel=2
)
if dialect not in ("legacy", "standard"):
raise ValueError("'{0}' is not valid for dialect".format(dialect))
connector = GbqConnector(
project_id,
reauth=reauth,
dialect=dialect,
auth_local_webserver=auth_local_webserver,
location=location,
credentials=credentials,
private_key=private_key,
use_bqstorage_api=use_bqstorage_api,
)
final_df = connector.run_query(query, configuration=configuration)
# Reindex the DataFrame on the provided column
if index_col is not None:
if index_col in final_df.columns:
final_df.set_index(index_col, inplace=True)
else:
raise InvalidIndexColumn(
'Index column "{0}" does not exist in DataFrame.'.format(
index_col
)
)
# Change the order of columns in the DataFrame based on provided list
if col_order is not None:
if sorted(col_order) == sorted(final_df.columns):
final_df = final_df[col_order]
else:
raise InvalidColumnOrder(
"Column order does not match this DataFrame."
)
connector.log_elapsed_seconds(
"Total time taken",
datetime.now().strftime("s.\nFinished at %Y-%m-%d %H:%M:%S."),
)
return final_df
def to_gbq(
dataframe,
destination_table,
project_id=None,
chunksize=None,
reauth=False,
if_exists="fail",
auth_local_webserver=False,
table_schema=None,
location=None,
progress_bar=True,
credentials=None,
verbose=None,
private_key=None,
):
"""Write a DataFrame to a Google BigQuery table.
The main method a user calls to export pandas DataFrame contents to
Google BigQuery table.
This method uses the Google Cloud client library to make requests to
Google BigQuery, documented `here
<https://google-cloud-python.readthedocs.io/en/latest/bigquery/usage.html>`__.
See the :ref:`How to authenticate with Google BigQuery <authentication>`
guide for authentication instructions.
Parameters
----------
dataframe : pandas.DataFrame
DataFrame to be written to a Google BigQuery table.
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists, do nothing.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
.. versionadded:: 0.2.0
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``.
- If ``table_schema`` is provided, it may contain all or a subset of
DataFrame columns. If a subset is provided, the rest will be
inferred from the DataFrame dtypes.
- If ``table_schema`` is **not** provided, it will be
generated according to dtypes of DataFrame columns. See
`Inferring the Table Schema
<https://pandas-gbq.readthedocs.io/en/latest/writing.html#writing-schema>`__.
for a description of the schema inference.
See `BigQuery API documentation on valid column names
<https://cloud.google.com/bigquery/docs/schemas#column_names`>__.
.. versionadded:: 0.3.1
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
.. versionadded:: 0.5.0
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
.. versionadded:: 0.5.0
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service Account
:class:`google.oauth2.service_account.Credentials` directly.
.. versionadded:: 0.8.0
verbose : bool, deprecated
Deprecated in Pandas-GBQ 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
"""
_test_google_api_imports()
from pandas_gbq import schema
if verbose is not None and SHOW_VERBOSE_DEPRECATION:
warnings.warn(
"verbose is deprecated and will be removed in "
"a future version. Set logging level in order to vary "
"verbosity",
FutureWarning,
stacklevel=1,
)
if private_key is not None and SHOW_PRIVATE_KEY_DEPRECATION:
warnings.warn(
PRIVATE_KEY_DEPRECATION_MESSAGE, FutureWarning, stacklevel=2
)
if if_exists not in ("fail", "replace", "append"):
raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
if "." not in destination_table:
raise NotFoundException(
"Invalid Table Name. Should be of the form 'datasetId.tableId' "
)
connector = GbqConnector(
project_id,
reauth=reauth,
auth_local_webserver=auth_local_webserver,
location=location,
credentials=credentials,
private_key=private_key,
)
dataset_id, table_id = destination_table.rsplit(".", 1)
table = _Table(
project_id,
dataset_id,
location=location,
credentials=connector.credentials,
)
default_schema = _generate_bq_schema(dataframe)
if not table_schema:
table_schema = default_schema
else:
table_schema = schema.update_schema(
default_schema, dict(fields=table_schema)
)
# If table exists, check if_exists parameter
if table.exists(table_id):
if if_exists == "fail":
raise TableCreationError(
"Could not create the table because it "
"already exists. "
"Change the if_exists parameter to "
"'append' or 'replace' data."
)
elif if_exists == "replace":
connector.delete_and_recreate_table(
dataset_id, table_id, table_schema
)
elif if_exists == "append":
if not connector.schema_is_subset(
dataset_id, table_id, table_schema
):
raise InvalidSchema(
"Please verify that the structure and "
"data types in the DataFrame match the "
"schema of the destination table."
)
else:
table.create(table_id, table_schema)
if dataframe.empty:
# Create the table (if needed), but don't try to run a load job with an
# empty file. See: https://github.com/pydata/pandas-gbq/issues/237
return
connector.load_data(
dataframe,
dataset_id,
table_id,
chunksize=chunksize,
schema=table_schema,
progress_bar=progress_bar,
)
def generate_bq_schema(df, default_type="STRING"):
"""DEPRECATED: Given a passed df, generate the associated Google BigQuery
schema.
Parameters
----------
df : DataFrame
default_type : string
The default big query type in case the type of the column
does not exist in the schema.
"""
# deprecation TimeSeries, #11121
warnings.warn(
"generate_bq_schema is deprecated and will be removed in "
"a future version",
FutureWarning,
stacklevel=2,
)
return _generate_bq_schema(df, default_type=default_type)
def _generate_bq_schema(df, default_type="STRING"):
"""DEPRECATED: Given a dataframe, generate a Google BigQuery schema.
This is a private method, but was used in external code to work around
issues in the default schema generation. Now that individual columns can
be overridden: https://github.com/pydata/pandas-gbq/issues/218, this
method can be removed after there is time to migrate away from this
method. """
from pandas_gbq import schema
return schema.generate_bq_schema(df, default_type=default_type)
class _Table(GbqConnector):
def __init__(
self,
project_id,
dataset_id,
reauth=False,
location=None,
credentials=None,
private_key=None,
):
self.dataset_id = dataset_id
super(_Table, self).__init__(
project_id,
reauth,
location=location,
credentials=credentials,
private_key=private_key,
)
def exists(self, table_id):
""" Check if a table exists in Google BigQuery
Parameters
----------
table : str
Name of table to be verified
Returns
-------
boolean
true if table exists, otherwise false
"""
from google.api_core.exceptions import NotFound
table_ref = self.client.dataset(self.dataset_id).table(table_id)
try:
self.client.get_table(table_ref)
return True
except NotFound:
return False
except self.http_error as ex:
self.process_http_error(ex)
def create(self, table_id, schema):
""" Create a table in Google BigQuery given a table and schema
Parameters
----------
table : str
Name of table to be written
schema : str
Use the generate_bq_schema to generate your table schema from a
dataframe.
"""
from google.cloud.bigquery import SchemaField
from google.cloud.bigquery import Table
if self.exists(table_id):
raise TableCreationError(
"Table {0} already " "exists".format(table_id)
)
if not _Dataset(self.project_id, credentials=self.credentials).exists(
self.dataset_id
):
_Dataset(
self.project_id,
credentials=self.credentials,
location=self.location,
).create(self.dataset_id)
table_ref = self.client.dataset(self.dataset_id).table(table_id)
table = Table(table_ref)
# Manually create the schema objects, adding NULLABLE mode
# as a workaround for
# https://github.com/GoogleCloudPlatform/google-cloud-python/issues/4456
for field in schema["fields"]:
if "mode" not in field:
field["mode"] = "NULLABLE"
table.schema = [
SchemaField.from_api_repr(field) for field in schema["fields"]
]
try:
self.client.create_table(table)
except self.http_error as ex:
self.process_http_error(ex)
def delete(self, table_id):
""" Delete a table in Google BigQuery
Parameters
----------
table : str
Name of table to be deleted
"""
from google.api_core.exceptions import NotFound
if not self.exists(table_id):
raise NotFoundException("Table does not exist")
table_ref = self.client.dataset(self.dataset_id).table(table_id)
try:
self.client.delete_table(table_ref)
except NotFound:
# Ignore 404 error which may occur if table already deleted
pass
except self.http_error as ex:
self.process_http_error(ex)
class _Dataset(GbqConnector):
def __init__(
self,
project_id,
reauth=False,
location=None,
credentials=None,
private_key=None,
):
super(_Dataset, self).__init__(
project_id,
reauth,
credentials=credentials,
location=location,
private_key=private_key,
)
def exists(self, dataset_id):
""" Check if a dataset exists in Google BigQuery
Parameters
----------
dataset_id : str
Name of dataset to be verified
Returns
-------
boolean
true if dataset exists, otherwise false
"""
from google.api_core.exceptions import NotFound
try:
self.client.get_dataset(self.client.dataset(dataset_id))
return True
except NotFound:
return False
except self.http_error as ex:
self.process_http_error(ex)
def create(self, dataset_id):
""" Create a dataset in Google BigQuery
Parameters
----------
dataset : str
Name of dataset to be written
"""
from google.cloud.bigquery import Dataset
if self.exists(dataset_id):
raise DatasetCreationError(
"Dataset {0} already " "exists".format(dataset_id)
)
dataset = Dataset(self.client.dataset(dataset_id))
if self.location is not None:
dataset.location = self.location
try:
self.client.create_dataset(dataset)
except self.http_error as ex:
self.process_http_error(ex)
|
pydata/pandas-gbq
|
pandas_gbq/gbq.py
|
read_gbq
|
python
|
def read_gbq(
query,
project_id=None,
index_col=None,
col_order=None,
reauth=False,
auth_local_webserver=False,
dialect=None,
location=None,
configuration=None,
credentials=None,
use_bqstorage_api=False,
verbose=None,
private_key=None,
):
r"""Load data from Google BigQuery using google-cloud-python
The main method a user calls to execute a Query in Google BigQuery
and read results into a pandas DataFrame.
This method uses the Google Cloud client library to make requests to
Google BigQuery, documented `here
<https://google-cloud-python.readthedocs.io/en/latest/bigquery/usage.html>`__.
See the :ref:`How to authenticate with Google BigQuery <authentication>`
guide for authentication instructions.
Parameters
----------
query : str
SQL-Like Query to return data values.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
index_col : str, optional
Name of result column to use for index in results DataFrame.
col_order : list(str), optional
List of BigQuery column names in the desired order for results
DataFrame.
reauth : boolean, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
auth_local_webserver : boolean, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
.. versionadded:: 0.2.0
dialect : str, default 'standard'
Note: The default value changed to 'standard' in version 0.10.0.
SQL syntax dialect to use. Value can be one of:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
location : str, optional
Location where the query job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of any
datasets used in the query.
.. versionadded:: 0.5.0
configuration : dict, optional
Query config parameters for job processing.
For example:
configuration = {'query': {'useQueryCache': False}}
For more information see `BigQuery REST API Reference
<https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service Account
:class:`google.oauth2.service_account.Credentials` directly.
.. versionadded:: 0.8.0
use_bqstorage_api : bool, default False
Use the `BigQuery Storage API
<https://cloud.google.com/bigquery/docs/reference/storage/>`__ to
download query results quickly, but at an increased cost. To use this
API, first `enable it in the Cloud Console
<https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__.
You must also have the `bigquery.readsessions.create
<https://cloud.google.com/bigquery/docs/access-control#roles>`__
permission on the project you are billing queries to.
**Note:** Due to a `known issue in the ``google-cloud-bigquery``
package
<https://github.com/googleapis/google-cloud-python/pull/7633>`__
(fixed in version 1.11.0), you must write your query results to a
destination table. To do this with ``read_gbq``, supply a
``configuration`` dictionary.
This feature requires the ``google-cloud-bigquery-storage`` and
``fastavro`` packages.
.. versionadded:: 0.10.0
verbose : None, deprecated
Deprecated in Pandas-GBQ 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
Returns
-------
df: DataFrame
DataFrame representing results of query.
"""
global context
if dialect is None:
dialect = context.dialect
if dialect is None:
dialect = "standard"
_test_google_api_imports()
if verbose is not None and SHOW_VERBOSE_DEPRECATION:
warnings.warn(
"verbose is deprecated and will be removed in "
"a future version. Set logging level in order to vary "
"verbosity",
FutureWarning,
stacklevel=2,
)
if private_key is not None and SHOW_PRIVATE_KEY_DEPRECATION:
warnings.warn(
PRIVATE_KEY_DEPRECATION_MESSAGE, FutureWarning, stacklevel=2
)
if dialect not in ("legacy", "standard"):
raise ValueError("'{0}' is not valid for dialect".format(dialect))
connector = GbqConnector(
project_id,
reauth=reauth,
dialect=dialect,
auth_local_webserver=auth_local_webserver,
location=location,
credentials=credentials,
private_key=private_key,
use_bqstorage_api=use_bqstorage_api,
)
final_df = connector.run_query(query, configuration=configuration)
# Reindex the DataFrame on the provided column
if index_col is not None:
if index_col in final_df.columns:
final_df.set_index(index_col, inplace=True)
else:
raise InvalidIndexColumn(
'Index column "{0}" does not exist in DataFrame.'.format(
index_col
)
)
# Change the order of columns in the DataFrame based on provided list
if col_order is not None:
if sorted(col_order) == sorted(final_df.columns):
final_df = final_df[col_order]
else:
raise InvalidColumnOrder(
"Column order does not match this DataFrame."
)
connector.log_elapsed_seconds(
"Total time taken",
datetime.now().strftime("s.\nFinished at %Y-%m-%d %H:%M:%S."),
)
return final_df
|
r"""Load data from Google BigQuery using google-cloud-python
The main method a user calls to execute a Query in Google BigQuery
and read results into a pandas DataFrame.
This method uses the Google Cloud client library to make requests to
Google BigQuery, documented `here
<https://google-cloud-python.readthedocs.io/en/latest/bigquery/usage.html>`__.
See the :ref:`How to authenticate with Google BigQuery <authentication>`
guide for authentication instructions.
Parameters
----------
query : str
SQL-Like Query to return data values.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
index_col : str, optional
Name of result column to use for index in results DataFrame.
col_order : list(str), optional
List of BigQuery column names in the desired order for results
DataFrame.
reauth : boolean, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
auth_local_webserver : boolean, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
.. versionadded:: 0.2.0
dialect : str, default 'standard'
Note: The default value changed to 'standard' in version 0.10.0.
SQL syntax dialect to use. Value can be one of:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
location : str, optional
Location where the query job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of any
datasets used in the query.
.. versionadded:: 0.5.0
configuration : dict, optional
Query config parameters for job processing.
For example:
configuration = {'query': {'useQueryCache': False}}
For more information see `BigQuery REST API Reference
<https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service Account
:class:`google.oauth2.service_account.Credentials` directly.
.. versionadded:: 0.8.0
use_bqstorage_api : bool, default False
Use the `BigQuery Storage API
<https://cloud.google.com/bigquery/docs/reference/storage/>`__ to
download query results quickly, but at an increased cost. To use this
API, first `enable it in the Cloud Console
<https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__.
You must also have the `bigquery.readsessions.create
<https://cloud.google.com/bigquery/docs/access-control#roles>`__
permission on the project you are billing queries to.
**Note:** Due to a `known issue in the ``google-cloud-bigquery``
package
<https://github.com/googleapis/google-cloud-python/pull/7633>`__
(fixed in version 1.11.0), you must write your query results to a
destination table. To do this with ``read_gbq``, supply a
``configuration`` dictionary.
This feature requires the ``google-cloud-bigquery-storage`` and
``fastavro`` packages.
.. versionadded:: 0.10.0
verbose : None, deprecated
Deprecated in Pandas-GBQ 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
Returns
-------
df: DataFrame
DataFrame representing results of query.
|
train
|
https://github.com/pydata/pandas-gbq/blob/e590317b3325939ede7563f49aa6b163bb803b77/pandas_gbq/gbq.py#L758-L954
|
[
"def _test_google_api_imports():\n\n try:\n import pydata_google_auth # noqa\n except ImportError as ex:\n raise ImportError(\n \"pandas-gbq requires pydata-google-auth: {0}\".format(ex)\n )\n\n try:\n from google_auth_oauthlib.flow import InstalledAppFlow # noqa\n except ImportError as ex:\n raise ImportError(\n \"pandas-gbq requires google-auth-oauthlib: {0}\".format(ex)\n )\n\n try:\n import google.auth # noqa\n except ImportError as ex:\n raise ImportError(\"pandas-gbq requires google-auth: {0}\".format(ex))\n\n try:\n from google.cloud import bigquery # noqa\n except ImportError as ex:\n raise ImportError(\n \"pandas-gbq requires google-cloud-bigquery: {0}\".format(ex)\n )\n\n _check_google_client_version()\n",
"def log_elapsed_seconds(self, prefix=\"Elapsed\", postfix=\"s.\", overlong=6):\n sec = self.get_elapsed_seconds()\n if sec > overlong:\n logger.info(\"{} {} {}\".format(prefix, sec, postfix))\n",
"def run_query(self, query, **kwargs):\n from concurrent.futures import TimeoutError\n from google.auth.exceptions import RefreshError\n from google.cloud import bigquery\n\n job_config = {\n \"query\": {\n \"useLegacySql\": self.dialect\n == \"legacy\"\n # 'allowLargeResults', 'createDisposition',\n # 'preserveNulls', destinationTable, useQueryCache\n }\n }\n config = kwargs.get(\"configuration\")\n if config is not None:\n job_config.update(config)\n\n if \"query\" in config and \"query\" in config[\"query\"]:\n if query is not None:\n raise ValueError(\n \"Query statement can't be specified \"\n \"inside config while it is specified \"\n \"as parameter\"\n )\n query = config[\"query\"].pop(\"query\")\n\n self._start_timer()\n\n try:\n logger.debug(\"Requesting query... \")\n query_reply = self.client.query(\n query,\n job_config=bigquery.QueryJobConfig.from_api_repr(job_config),\n location=self.location,\n project=self.project_id,\n )\n logger.debug(\"Query running...\")\n except (RefreshError, ValueError):\n if self.private_key:\n raise AccessDenied(\n \"The service account credentials are not valid\"\n )\n else:\n raise AccessDenied(\n \"The credentials have been revoked or expired, \"\n \"please re-run the application to re-authorize\"\n )\n except self.http_error as ex:\n self.process_http_error(ex)\n\n job_id = query_reply.job_id\n logger.debug(\"Job ID: %s\" % job_id)\n\n while query_reply.state != \"DONE\":\n self.log_elapsed_seconds(\" Elapsed\", \"s. Waiting...\")\n\n timeout_ms = job_config[\"query\"].get(\"timeoutMs\")\n if timeout_ms and timeout_ms < self.get_elapsed_seconds() * 1000:\n raise QueryTimeout(\"Query timeout: {} ms\".format(timeout_ms))\n\n timeout_sec = 1.0\n if timeout_ms:\n # Wait at most 1 second so we can show progress bar\n timeout_sec = min(1.0, timeout_ms / 1000.0)\n\n try:\n query_reply.result(timeout=timeout_sec)\n except TimeoutError:\n # Use our own timeout logic\n pass\n except self.http_error as ex:\n self.process_http_error(ex)\n\n if query_reply.cache_hit:\n logger.debug(\"Query done.\\nCache hit.\\n\")\n else:\n bytes_processed = query_reply.total_bytes_processed or 0\n bytes_billed = query_reply.total_bytes_billed or 0\n logger.debug(\n \"Query done.\\nProcessed: {} Billed: {}\".format(\n self.sizeof_fmt(bytes_processed),\n self.sizeof_fmt(bytes_billed),\n )\n )\n logger.debug(\n \"Standard price: ${:,.2f} USD\\n\".format(\n bytes_billed * self.query_price_for_TB\n )\n )\n\n try:\n rows_iter = query_reply.result()\n except self.http_error as ex:\n self.process_http_error(ex)\n\n schema_fields = [field.to_api_repr() for field in rows_iter.schema]\n nullsafe_dtypes = _bqschema_to_nullsafe_dtypes(schema_fields)\n df = rows_iter.to_dataframe(\n dtypes=nullsafe_dtypes, bqstorage_client=self.bqstorage_client\n )\n\n if df.empty:\n df = _cast_empty_df_dtypes(schema_fields, df)\n\n # Ensure any TIMESTAMP columns are tz-aware.\n df = _localize_df(schema_fields, df)\n\n logger.debug(\"Got {} rows.\\n\".format(rows_iter.total_rows))\n return df\n"
] |
import logging
import time
import warnings
from datetime import datetime
import numpy as np
try:
# The BigQuery Storage API client is an optional dependency. It is only
# required when use_bqstorage_api=True.
from google.cloud import bigquery_storage_v1beta1
except ImportError: # pragma: NO COVER
bigquery_storage_v1beta1 = None
from pandas_gbq.exceptions import AccessDenied
logger = logging.getLogger(__name__)
BIGQUERY_INSTALLED_VERSION = None
SHOW_VERBOSE_DEPRECATION = False
SHOW_PRIVATE_KEY_DEPRECATION = False
PRIVATE_KEY_DEPRECATION_MESSAGE = (
"private_key is deprecated and will be removed in a future version."
"Use the credentials argument instead. See "
"https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html "
"for examples on using the credentials argument with service account keys."
)
try:
import tqdm # noqa
except ImportError:
tqdm = None
def _check_google_client_version():
global BIGQUERY_INSTALLED_VERSION, SHOW_VERBOSE_DEPRECATION, SHOW_PRIVATE_KEY_DEPRECATION
try:
import pkg_resources
except ImportError:
raise ImportError("Could not import pkg_resources (setuptools).")
# https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/bigquery/CHANGELOG.md
bigquery_minimum_version = pkg_resources.parse_version("1.9.0")
BIGQUERY_INSTALLED_VERSION = pkg_resources.get_distribution(
"google-cloud-bigquery"
).parsed_version
if BIGQUERY_INSTALLED_VERSION < bigquery_minimum_version:
raise ImportError(
"pandas-gbq requires google-cloud-bigquery >= {0}, "
"current version {1}".format(
bigquery_minimum_version, BIGQUERY_INSTALLED_VERSION
)
)
# Add check for Pandas version before showing deprecation warning.
# https://github.com/pydata/pandas-gbq/issues/157
pandas_installed_version = pkg_resources.get_distribution(
"pandas"
).parsed_version
pandas_version_wo_verbosity = pkg_resources.parse_version("0.23.0")
SHOW_VERBOSE_DEPRECATION = (
pandas_installed_version >= pandas_version_wo_verbosity
)
pandas_version_with_credentials_arg = pkg_resources.parse_version("0.24.0")
SHOW_PRIVATE_KEY_DEPRECATION = (
pandas_installed_version >= pandas_version_with_credentials_arg
)
def _test_google_api_imports():
try:
import pydata_google_auth # noqa
except ImportError as ex:
raise ImportError(
"pandas-gbq requires pydata-google-auth: {0}".format(ex)
)
try:
from google_auth_oauthlib.flow import InstalledAppFlow # noqa
except ImportError as ex:
raise ImportError(
"pandas-gbq requires google-auth-oauthlib: {0}".format(ex)
)
try:
import google.auth # noqa
except ImportError as ex:
raise ImportError("pandas-gbq requires google-auth: {0}".format(ex))
try:
from google.cloud import bigquery # noqa
except ImportError as ex:
raise ImportError(
"pandas-gbq requires google-cloud-bigquery: {0}".format(ex)
)
_check_google_client_version()
class DatasetCreationError(ValueError):
"""
Raised when the create dataset method fails
"""
pass
class GenericGBQException(ValueError):
"""
Raised when an unrecognized Google API Error occurs.
"""
pass
class InvalidColumnOrder(ValueError):
"""
Raised when the provided column order for output
results DataFrame does not match the schema
returned by BigQuery.
"""
pass
class InvalidIndexColumn(ValueError):
"""
Raised when the provided index column for output
results DataFrame does not match the schema
returned by BigQuery.
"""
pass
class InvalidPageToken(ValueError):
"""
Raised when Google BigQuery fails to return,
or returns a duplicate page token.
"""
pass
class InvalidSchema(ValueError):
"""
Raised when the provided DataFrame does
not match the schema of the destination
table in BigQuery.
"""
pass
class NotFoundException(ValueError):
"""
Raised when the project_id, table or dataset provided in the query could
not be found.
"""
pass
class QueryTimeout(ValueError):
"""
Raised when the query request exceeds the timeoutMs value specified in the
BigQuery configuration.
"""
pass
class TableCreationError(ValueError):
"""
Raised when the create table method fails
"""
pass
class Context(object):
"""Storage for objects to be used throughout a session.
A Context object is initialized when the ``pandas_gbq`` module is
imported, and can be found at :attr:`pandas_gbq.context`.
"""
def __init__(self):
self._credentials = None
self._project = None
# dialect defaults to None so that read_gbq can stop warning if set.
self._dialect = None
@property
def credentials(self):
"""
Credentials to use for Google APIs.
These credentials are automatically cached in memory by calls to
:func:`pandas_gbq.read_gbq` and :func:`pandas_gbq.to_gbq`. To
manually set the credentials, construct an
:class:`google.auth.credentials.Credentials` object and set it as
the context credentials as demonstrated in the example below. See
`auth docs`_ for more information on obtaining credentials.
.. _auth docs: http://google-auth.readthedocs.io
/en/latest/user-guide.html#obtaining-credentials
Returns
-------
google.auth.credentials.Credentials
Examples
--------
Manually setting the context credentials:
>>> import pandas_gbq
>>> from google.oauth2 import service_account
>>> credentials = service_account.Credentials.from_service_account_file(
... '/path/to/key.json',
... )
>>> pandas_gbq.context.credentials = credentials
"""
return self._credentials
@credentials.setter
def credentials(self, value):
self._credentials = value
@property
def project(self):
"""Default project to use for calls to Google APIs.
Returns
-------
str
Examples
--------
Manually setting the context project:
>>> import pandas_gbq
>>> pandas_gbq.context.project = 'my-project'
"""
return self._project
@project.setter
def project(self, value):
self._project = value
@property
def dialect(self):
"""
Default dialect to use in :func:`pandas_gbq.read_gbq`.
Allowed values for the BigQuery SQL syntax dialect:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
Returns
-------
str
Examples
--------
Setting the default syntax to standard:
>>> import pandas_gbq
>>> pandas_gbq.context.dialect = 'standard'
"""
return self._dialect
@dialect.setter
def dialect(self, value):
self._dialect = value
# Create an empty context, used to cache credentials.
context = Context()
"""A :class:`pandas_gbq.Context` object used to cache credentials.
Credentials automatically are cached in-memory by :func:`pandas_gbq.read_gbq`
and :func:`pandas_gbq.to_gbq`.
"""
class GbqConnector(object):
def __init__(
self,
project_id,
reauth=False,
private_key=None,
auth_local_webserver=False,
dialect="standard",
location=None,
credentials=None,
use_bqstorage_api=False,
):
global context
from google.api_core.exceptions import GoogleAPIError
from google.api_core.exceptions import ClientError
from pandas_gbq import auth
self.http_error = (ClientError, GoogleAPIError)
self.project_id = project_id
self.location = location
self.reauth = reauth
self.private_key = private_key
self.auth_local_webserver = auth_local_webserver
self.dialect = dialect
self.credentials = credentials
default_project = None
# Service account credentials have a project associated with them.
# Prefer that project if none was supplied.
if self.project_id is None and hasattr(self.credentials, "project_id"):
self.project_id = credentials.project_id
# Load credentials from cache.
if not self.credentials:
self.credentials = context.credentials
default_project = context.project
# Credentials were explicitly asked for, so don't use the cache.
if private_key or reauth or not self.credentials:
self.credentials, default_project = auth.get_credentials(
private_key=private_key,
project_id=project_id,
reauth=reauth,
auth_local_webserver=auth_local_webserver,
)
if self.project_id is None:
self.project_id = default_project
if self.project_id is None:
raise ValueError(
"Could not determine project ID and one was not supplied."
)
# Cache the credentials if they haven't been set yet.
if context.credentials is None:
context.credentials = self.credentials
if context.project is None:
context.project = self.project_id
self.client = self.get_client()
self.bqstorage_client = _make_bqstorage_client(
use_bqstorage_api, self.credentials
)
# BQ Queries costs $5 per TB. First 1 TB per month is free
# see here for more: https://cloud.google.com/bigquery/pricing
self.query_price_for_TB = 5.0 / 2 ** 40 # USD/TB
def _start_timer(self):
self.start = time.time()
def get_elapsed_seconds(self):
return round(time.time() - self.start, 2)
def log_elapsed_seconds(self, prefix="Elapsed", postfix="s.", overlong=6):
sec = self.get_elapsed_seconds()
if sec > overlong:
logger.info("{} {} {}".format(prefix, sec, postfix))
# http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
@staticmethod
def sizeof_fmt(num, suffix="B"):
fmt = "%3.1f %s%s"
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if abs(num) < 1024.0:
return fmt % (num, unit, suffix)
num /= 1024.0
return fmt % (num, "Y", suffix)
def get_client(self):
from google.cloud import bigquery
return bigquery.Client(
project=self.project_id, credentials=self.credentials
)
@staticmethod
def process_http_error(ex):
# See `BigQuery Troubleshooting Errors
# <https://cloud.google.com/bigquery/troubleshooting-errors>`__
raise GenericGBQException("Reason: {0}".format(ex))
def run_query(self, query, **kwargs):
from concurrent.futures import TimeoutError
from google.auth.exceptions import RefreshError
from google.cloud import bigquery
job_config = {
"query": {
"useLegacySql": self.dialect
== "legacy"
# 'allowLargeResults', 'createDisposition',
# 'preserveNulls', destinationTable, useQueryCache
}
}
config = kwargs.get("configuration")
if config is not None:
job_config.update(config)
if "query" in config and "query" in config["query"]:
if query is not None:
raise ValueError(
"Query statement can't be specified "
"inside config while it is specified "
"as parameter"
)
query = config["query"].pop("query")
self._start_timer()
try:
logger.debug("Requesting query... ")
query_reply = self.client.query(
query,
job_config=bigquery.QueryJobConfig.from_api_repr(job_config),
location=self.location,
project=self.project_id,
)
logger.debug("Query running...")
except (RefreshError, ValueError):
if self.private_key:
raise AccessDenied(
"The service account credentials are not valid"
)
else:
raise AccessDenied(
"The credentials have been revoked or expired, "
"please re-run the application to re-authorize"
)
except self.http_error as ex:
self.process_http_error(ex)
job_id = query_reply.job_id
logger.debug("Job ID: %s" % job_id)
while query_reply.state != "DONE":
self.log_elapsed_seconds(" Elapsed", "s. Waiting...")
timeout_ms = job_config["query"].get("timeoutMs")
if timeout_ms and timeout_ms < self.get_elapsed_seconds() * 1000:
raise QueryTimeout("Query timeout: {} ms".format(timeout_ms))
timeout_sec = 1.0
if timeout_ms:
# Wait at most 1 second so we can show progress bar
timeout_sec = min(1.0, timeout_ms / 1000.0)
try:
query_reply.result(timeout=timeout_sec)
except TimeoutError:
# Use our own timeout logic
pass
except self.http_error as ex:
self.process_http_error(ex)
if query_reply.cache_hit:
logger.debug("Query done.\nCache hit.\n")
else:
bytes_processed = query_reply.total_bytes_processed or 0
bytes_billed = query_reply.total_bytes_billed or 0
logger.debug(
"Query done.\nProcessed: {} Billed: {}".format(
self.sizeof_fmt(bytes_processed),
self.sizeof_fmt(bytes_billed),
)
)
logger.debug(
"Standard price: ${:,.2f} USD\n".format(
bytes_billed * self.query_price_for_TB
)
)
try:
rows_iter = query_reply.result()
except self.http_error as ex:
self.process_http_error(ex)
schema_fields = [field.to_api_repr() for field in rows_iter.schema]
nullsafe_dtypes = _bqschema_to_nullsafe_dtypes(schema_fields)
df = rows_iter.to_dataframe(
dtypes=nullsafe_dtypes, bqstorage_client=self.bqstorage_client
)
if df.empty:
df = _cast_empty_df_dtypes(schema_fields, df)
# Ensure any TIMESTAMP columns are tz-aware.
df = _localize_df(schema_fields, df)
logger.debug("Got {} rows.\n".format(rows_iter.total_rows))
return df
def load_data(
self,
dataframe,
dataset_id,
table_id,
chunksize=None,
schema=None,
progress_bar=True,
):
from pandas_gbq import load
total_rows = len(dataframe)
try:
chunks = load.load_chunks(
self.client,
dataframe,
dataset_id,
table_id,
chunksize=chunksize,
schema=schema,
location=self.location,
)
if progress_bar and tqdm:
chunks = tqdm.tqdm(chunks)
for remaining_rows in chunks:
logger.info(
"\r{} out of {} rows loaded.".format(
total_rows - remaining_rows, total_rows
)
)
except self.http_error as ex:
self.process_http_error(ex)
def schema(self, dataset_id, table_id):
"""Retrieve the schema of the table
Obtain from BigQuery the field names and field types
for the table defined by the parameters
Parameters
----------
dataset_id : str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
Returns
-------
list of dicts
Fields representing the schema
"""
table_ref = self.client.dataset(dataset_id).table(table_id)
try:
table = self.client.get_table(table_ref)
remote_schema = table.schema
remote_fields = [
field_remote.to_api_repr() for field_remote in remote_schema
]
for field in remote_fields:
field["type"] = field["type"].upper()
field["mode"] = field["mode"].upper()
return remote_fields
except self.http_error as ex:
self.process_http_error(ex)
def _clean_schema_fields(self, fields):
"""Return a sanitized version of the schema for comparisons."""
fields_sorted = sorted(fields, key=lambda field: field["name"])
# Ignore mode and description when comparing schemas.
return [
{"name": field["name"], "type": field["type"]}
for field in fields_sorted
]
def verify_schema(self, dataset_id, table_id, schema):
"""Indicate whether schemas match exactly
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether all fields in the former
are present in the latter. Order is not considered.
Parameters
----------
dataset_id :str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
schema : list(dict)
Schema for comparison. Each item should have
a 'name' and a 'type'
Returns
-------
bool
Whether the schemas match
"""
fields_remote = self._clean_schema_fields(
self.schema(dataset_id, table_id)
)
fields_local = self._clean_schema_fields(schema["fields"])
return fields_remote == fields_local
def schema_is_subset(self, dataset_id, table_id, schema):
"""Indicate whether the schema to be uploaded is a subset
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether a subset of the fields in
the former are present in the latter. Order is not considered.
Parameters
----------
dataset_id : str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
schema : list(dict)
Schema for comparison. Each item should have
a 'name' and a 'type'
Returns
-------
bool
Whether the passed schema is a subset
"""
fields_remote = self._clean_schema_fields(
self.schema(dataset_id, table_id)
)
fields_local = self._clean_schema_fields(schema["fields"])
return all(field in fields_remote for field in fields_local)
def delete_and_recreate_table(self, dataset_id, table_id, table_schema):
table = _Table(
self.project_id, dataset_id, credentials=self.credentials
)
table.delete(table_id)
table.create(table_id, table_schema)
def _bqschema_to_nullsafe_dtypes(schema_fields):
"""Specify explicit dtypes based on BigQuery schema.
This function only specifies a dtype when the dtype allows nulls.
Otherwise, use pandas's default dtype choice.
See: http://pandas.pydata.org/pandas-docs/dev/missing_data.html
#missing-data-casting-rules-and-indexing
"""
# If you update this mapping, also update the table at
# `docs/source/reading.rst`.
dtype_map = {
"FLOAT": np.dtype(float),
# pandas doesn't support timezone-aware dtype in DataFrame/Series
# constructors. It's more idiomatic to localize after construction.
# https://github.com/pandas-dev/pandas/issues/25843
"TIMESTAMP": "datetime64[ns]",
"TIME": "datetime64[ns]",
"DATE": "datetime64[ns]",
"DATETIME": "datetime64[ns]",
}
dtypes = {}
for field in schema_fields:
name = str(field["name"])
if field["mode"].upper() == "REPEATED":
continue
dtype = dtype_map.get(field["type"].upper())
if dtype:
dtypes[name] = dtype
return dtypes
def _cast_empty_df_dtypes(schema_fields, df):
"""Cast any columns in an empty dataframe to correct type.
In an empty dataframe, pandas cannot choose a dtype unless one is
explicitly provided. The _bqschema_to_nullsafe_dtypes() function only
provides dtypes when the dtype safely handles null values. This means
that empty int64 and boolean columns are incorrectly classified as
``object``.
"""
if not df.empty:
raise ValueError(
"DataFrame must be empty in order to cast non-nullsafe dtypes"
)
dtype_map = {"BOOLEAN": bool, "INTEGER": np.int64}
for field in schema_fields:
column = str(field["name"])
if field["mode"].upper() == "REPEATED":
continue
dtype = dtype_map.get(field["type"].upper())
if dtype:
df[column] = df[column].astype(dtype)
return df
def _localize_df(schema_fields, df):
"""Localize any TIMESTAMP columns to tz-aware type.
In pandas versions before 0.24.0, DatetimeTZDtype cannot be used as the
dtype in Series/DataFrame construction, so localize those columns after
the DataFrame is constructed.
"""
for field in schema_fields:
column = str(field["name"])
if field["mode"].upper() == "REPEATED":
continue
if field["type"].upper() == "TIMESTAMP" and df[column].dt.tz is None:
df[column] = df[column].dt.tz_localize("UTC")
return df
def _make_bqstorage_client(use_bqstorage_api, credentials):
if not use_bqstorage_api:
return None
if bigquery_storage_v1beta1 is None:
raise ImportError(
"Install the google-cloud-bigquery-storage and fastavro packages "
"to use the BigQuery Storage API."
)
return bigquery_storage_v1beta1.BigQueryStorageClient(
credentials=credentials
)
def to_gbq(
dataframe,
destination_table,
project_id=None,
chunksize=None,
reauth=False,
if_exists="fail",
auth_local_webserver=False,
table_schema=None,
location=None,
progress_bar=True,
credentials=None,
verbose=None,
private_key=None,
):
"""Write a DataFrame to a Google BigQuery table.
The main method a user calls to export pandas DataFrame contents to
Google BigQuery table.
This method uses the Google Cloud client library to make requests to
Google BigQuery, documented `here
<https://google-cloud-python.readthedocs.io/en/latest/bigquery/usage.html>`__.
See the :ref:`How to authenticate with Google BigQuery <authentication>`
guide for authentication instructions.
Parameters
----------
dataframe : pandas.DataFrame
DataFrame to be written to a Google BigQuery table.
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists, do nothing.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
.. versionadded:: 0.2.0
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``.
- If ``table_schema`` is provided, it may contain all or a subset of
DataFrame columns. If a subset is provided, the rest will be
inferred from the DataFrame dtypes.
- If ``table_schema`` is **not** provided, it will be
generated according to dtypes of DataFrame columns. See
`Inferring the Table Schema
<https://pandas-gbq.readthedocs.io/en/latest/writing.html#writing-schema>`__.
for a description of the schema inference.
See `BigQuery API documentation on valid column names
<https://cloud.google.com/bigquery/docs/schemas#column_names`>__.
.. versionadded:: 0.3.1
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
.. versionadded:: 0.5.0
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
.. versionadded:: 0.5.0
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service Account
:class:`google.oauth2.service_account.Credentials` directly.
.. versionadded:: 0.8.0
verbose : bool, deprecated
Deprecated in Pandas-GBQ 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
"""
_test_google_api_imports()
from pandas_gbq import schema
if verbose is not None and SHOW_VERBOSE_DEPRECATION:
warnings.warn(
"verbose is deprecated and will be removed in "
"a future version. Set logging level in order to vary "
"verbosity",
FutureWarning,
stacklevel=1,
)
if private_key is not None and SHOW_PRIVATE_KEY_DEPRECATION:
warnings.warn(
PRIVATE_KEY_DEPRECATION_MESSAGE, FutureWarning, stacklevel=2
)
if if_exists not in ("fail", "replace", "append"):
raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
if "." not in destination_table:
raise NotFoundException(
"Invalid Table Name. Should be of the form 'datasetId.tableId' "
)
connector = GbqConnector(
project_id,
reauth=reauth,
auth_local_webserver=auth_local_webserver,
location=location,
credentials=credentials,
private_key=private_key,
)
dataset_id, table_id = destination_table.rsplit(".", 1)
table = _Table(
project_id,
dataset_id,
location=location,
credentials=connector.credentials,
)
default_schema = _generate_bq_schema(dataframe)
if not table_schema:
table_schema = default_schema
else:
table_schema = schema.update_schema(
default_schema, dict(fields=table_schema)
)
# If table exists, check if_exists parameter
if table.exists(table_id):
if if_exists == "fail":
raise TableCreationError(
"Could not create the table because it "
"already exists. "
"Change the if_exists parameter to "
"'append' or 'replace' data."
)
elif if_exists == "replace":
connector.delete_and_recreate_table(
dataset_id, table_id, table_schema
)
elif if_exists == "append":
if not connector.schema_is_subset(
dataset_id, table_id, table_schema
):
raise InvalidSchema(
"Please verify that the structure and "
"data types in the DataFrame match the "
"schema of the destination table."
)
else:
table.create(table_id, table_schema)
if dataframe.empty:
# Create the table (if needed), but don't try to run a load job with an
# empty file. See: https://github.com/pydata/pandas-gbq/issues/237
return
connector.load_data(
dataframe,
dataset_id,
table_id,
chunksize=chunksize,
schema=table_schema,
progress_bar=progress_bar,
)
def generate_bq_schema(df, default_type="STRING"):
"""DEPRECATED: Given a passed df, generate the associated Google BigQuery
schema.
Parameters
----------
df : DataFrame
default_type : string
The default big query type in case the type of the column
does not exist in the schema.
"""
# deprecation TimeSeries, #11121
warnings.warn(
"generate_bq_schema is deprecated and will be removed in "
"a future version",
FutureWarning,
stacklevel=2,
)
return _generate_bq_schema(df, default_type=default_type)
def _generate_bq_schema(df, default_type="STRING"):
"""DEPRECATED: Given a dataframe, generate a Google BigQuery schema.
This is a private method, but was used in external code to work around
issues in the default schema generation. Now that individual columns can
be overridden: https://github.com/pydata/pandas-gbq/issues/218, this
method can be removed after there is time to migrate away from this
method. """
from pandas_gbq import schema
return schema.generate_bq_schema(df, default_type=default_type)
class _Table(GbqConnector):
def __init__(
self,
project_id,
dataset_id,
reauth=False,
location=None,
credentials=None,
private_key=None,
):
self.dataset_id = dataset_id
super(_Table, self).__init__(
project_id,
reauth,
location=location,
credentials=credentials,
private_key=private_key,
)
def exists(self, table_id):
""" Check if a table exists in Google BigQuery
Parameters
----------
table : str
Name of table to be verified
Returns
-------
boolean
true if table exists, otherwise false
"""
from google.api_core.exceptions import NotFound
table_ref = self.client.dataset(self.dataset_id).table(table_id)
try:
self.client.get_table(table_ref)
return True
except NotFound:
return False
except self.http_error as ex:
self.process_http_error(ex)
def create(self, table_id, schema):
""" Create a table in Google BigQuery given a table and schema
Parameters
----------
table : str
Name of table to be written
schema : str
Use the generate_bq_schema to generate your table schema from a
dataframe.
"""
from google.cloud.bigquery import SchemaField
from google.cloud.bigquery import Table
if self.exists(table_id):
raise TableCreationError(
"Table {0} already " "exists".format(table_id)
)
if not _Dataset(self.project_id, credentials=self.credentials).exists(
self.dataset_id
):
_Dataset(
self.project_id,
credentials=self.credentials,
location=self.location,
).create(self.dataset_id)
table_ref = self.client.dataset(self.dataset_id).table(table_id)
table = Table(table_ref)
# Manually create the schema objects, adding NULLABLE mode
# as a workaround for
# https://github.com/GoogleCloudPlatform/google-cloud-python/issues/4456
for field in schema["fields"]:
if "mode" not in field:
field["mode"] = "NULLABLE"
table.schema = [
SchemaField.from_api_repr(field) for field in schema["fields"]
]
try:
self.client.create_table(table)
except self.http_error as ex:
self.process_http_error(ex)
def delete(self, table_id):
""" Delete a table in Google BigQuery
Parameters
----------
table : str
Name of table to be deleted
"""
from google.api_core.exceptions import NotFound
if not self.exists(table_id):
raise NotFoundException("Table does not exist")
table_ref = self.client.dataset(self.dataset_id).table(table_id)
try:
self.client.delete_table(table_ref)
except NotFound:
# Ignore 404 error which may occur if table already deleted
pass
except self.http_error as ex:
self.process_http_error(ex)
class _Dataset(GbqConnector):
def __init__(
self,
project_id,
reauth=False,
location=None,
credentials=None,
private_key=None,
):
super(_Dataset, self).__init__(
project_id,
reauth,
credentials=credentials,
location=location,
private_key=private_key,
)
def exists(self, dataset_id):
""" Check if a dataset exists in Google BigQuery
Parameters
----------
dataset_id : str
Name of dataset to be verified
Returns
-------
boolean
true if dataset exists, otherwise false
"""
from google.api_core.exceptions import NotFound
try:
self.client.get_dataset(self.client.dataset(dataset_id))
return True
except NotFound:
return False
except self.http_error as ex:
self.process_http_error(ex)
def create(self, dataset_id):
""" Create a dataset in Google BigQuery
Parameters
----------
dataset : str
Name of dataset to be written
"""
from google.cloud.bigquery import Dataset
if self.exists(dataset_id):
raise DatasetCreationError(
"Dataset {0} already " "exists".format(dataset_id)
)
dataset = Dataset(self.client.dataset(dataset_id))
if self.location is not None:
dataset.location = self.location
try:
self.client.create_dataset(dataset)
except self.http_error as ex:
self.process_http_error(ex)
|
pydata/pandas-gbq
|
pandas_gbq/gbq.py
|
to_gbq
|
python
|
def to_gbq(
dataframe,
destination_table,
project_id=None,
chunksize=None,
reauth=False,
if_exists="fail",
auth_local_webserver=False,
table_schema=None,
location=None,
progress_bar=True,
credentials=None,
verbose=None,
private_key=None,
):
_test_google_api_imports()
from pandas_gbq import schema
if verbose is not None and SHOW_VERBOSE_DEPRECATION:
warnings.warn(
"verbose is deprecated and will be removed in "
"a future version. Set logging level in order to vary "
"verbosity",
FutureWarning,
stacklevel=1,
)
if private_key is not None and SHOW_PRIVATE_KEY_DEPRECATION:
warnings.warn(
PRIVATE_KEY_DEPRECATION_MESSAGE, FutureWarning, stacklevel=2
)
if if_exists not in ("fail", "replace", "append"):
raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
if "." not in destination_table:
raise NotFoundException(
"Invalid Table Name. Should be of the form 'datasetId.tableId' "
)
connector = GbqConnector(
project_id,
reauth=reauth,
auth_local_webserver=auth_local_webserver,
location=location,
credentials=credentials,
private_key=private_key,
)
dataset_id, table_id = destination_table.rsplit(".", 1)
table = _Table(
project_id,
dataset_id,
location=location,
credentials=connector.credentials,
)
default_schema = _generate_bq_schema(dataframe)
if not table_schema:
table_schema = default_schema
else:
table_schema = schema.update_schema(
default_schema, dict(fields=table_schema)
)
# If table exists, check if_exists parameter
if table.exists(table_id):
if if_exists == "fail":
raise TableCreationError(
"Could not create the table because it "
"already exists. "
"Change the if_exists parameter to "
"'append' or 'replace' data."
)
elif if_exists == "replace":
connector.delete_and_recreate_table(
dataset_id, table_id, table_schema
)
elif if_exists == "append":
if not connector.schema_is_subset(
dataset_id, table_id, table_schema
):
raise InvalidSchema(
"Please verify that the structure and "
"data types in the DataFrame match the "
"schema of the destination table."
)
else:
table.create(table_id, table_schema)
if dataframe.empty:
# Create the table (if needed), but don't try to run a load job with an
# empty file. See: https://github.com/pydata/pandas-gbq/issues/237
return
connector.load_data(
dataframe,
dataset_id,
table_id,
chunksize=chunksize,
schema=table_schema,
progress_bar=progress_bar,
)
|
Write a DataFrame to a Google BigQuery table.
The main method a user calls to export pandas DataFrame contents to
Google BigQuery table.
This method uses the Google Cloud client library to make requests to
Google BigQuery, documented `here
<https://google-cloud-python.readthedocs.io/en/latest/bigquery/usage.html>`__.
See the :ref:`How to authenticate with Google BigQuery <authentication>`
guide for authentication instructions.
Parameters
----------
dataframe : pandas.DataFrame
DataFrame to be written to a Google BigQuery table.
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists, do nothing.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
.. versionadded:: 0.2.0
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``.
- If ``table_schema`` is provided, it may contain all or a subset of
DataFrame columns. If a subset is provided, the rest will be
inferred from the DataFrame dtypes.
- If ``table_schema`` is **not** provided, it will be
generated according to dtypes of DataFrame columns. See
`Inferring the Table Schema
<https://pandas-gbq.readthedocs.io/en/latest/writing.html#writing-schema>`__.
for a description of the schema inference.
See `BigQuery API documentation on valid column names
<https://cloud.google.com/bigquery/docs/schemas#column_names`>__.
.. versionadded:: 0.3.1
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
.. versionadded:: 0.5.0
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
.. versionadded:: 0.5.0
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service Account
:class:`google.oauth2.service_account.Credentials` directly.
.. versionadded:: 0.8.0
verbose : bool, deprecated
Deprecated in Pandas-GBQ 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
|
train
|
https://github.com/pydata/pandas-gbq/blob/e590317b3325939ede7563f49aa6b163bb803b77/pandas_gbq/gbq.py#L957-L1160
|
[
"def _test_google_api_imports():\n\n try:\n import pydata_google_auth # noqa\n except ImportError as ex:\n raise ImportError(\n \"pandas-gbq requires pydata-google-auth: {0}\".format(ex)\n )\n\n try:\n from google_auth_oauthlib.flow import InstalledAppFlow # noqa\n except ImportError as ex:\n raise ImportError(\n \"pandas-gbq requires google-auth-oauthlib: {0}\".format(ex)\n )\n\n try:\n import google.auth # noqa\n except ImportError as ex:\n raise ImportError(\"pandas-gbq requires google-auth: {0}\".format(ex))\n\n try:\n from google.cloud import bigquery # noqa\n except ImportError as ex:\n raise ImportError(\n \"pandas-gbq requires google-cloud-bigquery: {0}\".format(ex)\n )\n\n _check_google_client_version()\n",
"def _generate_bq_schema(df, default_type=\"STRING\"):\n \"\"\"DEPRECATED: Given a dataframe, generate a Google BigQuery schema.\n\n This is a private method, but was used in external code to work around\n issues in the default schema generation. Now that individual columns can\n be overridden: https://github.com/pydata/pandas-gbq/issues/218, this\n method can be removed after there is time to migrate away from this\n method. \"\"\"\n from pandas_gbq import schema\n\n return schema.generate_bq_schema(df, default_type=default_type)\n",
"def update_schema(schema_old, schema_new):\n \"\"\"\n Given an old BigQuery schema, update it with a new one.\n\n Where a field name is the same, the new will replace the old. Any\n new fields not present in the old schema will be added.\n\n Arguments:\n schema_old: the old schema to update\n schema_new: the new schema which will overwrite/extend the old\n \"\"\"\n old_fields = schema_old[\"fields\"]\n new_fields = schema_new[\"fields\"]\n output_fields = list(old_fields)\n\n field_indices = {field[\"name\"]: i for i, field in enumerate(output_fields)}\n\n for field in new_fields:\n name = field[\"name\"]\n if name in field_indices:\n # replace old field with new field of same name\n output_fields[field_indices[name]] = field\n else:\n # add new field\n output_fields.append(field)\n\n return {\"fields\": output_fields}\n",
"def load_data(\n self,\n dataframe,\n dataset_id,\n table_id,\n chunksize=None,\n schema=None,\n progress_bar=True,\n):\n from pandas_gbq import load\n\n total_rows = len(dataframe)\n\n try:\n chunks = load.load_chunks(\n self.client,\n dataframe,\n dataset_id,\n table_id,\n chunksize=chunksize,\n schema=schema,\n location=self.location,\n )\n if progress_bar and tqdm:\n chunks = tqdm.tqdm(chunks)\n for remaining_rows in chunks:\n logger.info(\n \"\\r{} out of {} rows loaded.\".format(\n total_rows - remaining_rows, total_rows\n )\n )\n except self.http_error as ex:\n self.process_http_error(ex)\n",
"def schema_is_subset(self, dataset_id, table_id, schema):\n \"\"\"Indicate whether the schema to be uploaded is a subset\n\n Compare the BigQuery table identified in the parameters with\n the schema passed in and indicate whether a subset of the fields in\n the former are present in the latter. Order is not considered.\n\n Parameters\n ----------\n dataset_id : str\n Name of the BigQuery dataset for the table\n table_id : str\n Name of the BigQuery table\n schema : list(dict)\n Schema for comparison. Each item should have\n a 'name' and a 'type'\n\n Returns\n -------\n bool\n Whether the passed schema is a subset\n \"\"\"\n\n fields_remote = self._clean_schema_fields(\n self.schema(dataset_id, table_id)\n )\n fields_local = self._clean_schema_fields(schema[\"fields\"])\n\n return all(field in fields_remote for field in fields_local)\n",
"def delete_and_recreate_table(self, dataset_id, table_id, table_schema):\n table = _Table(\n self.project_id, dataset_id, credentials=self.credentials\n )\n table.delete(table_id)\n table.create(table_id, table_schema)\n",
"def exists(self, table_id):\n \"\"\" Check if a table exists in Google BigQuery\n\n Parameters\n ----------\n table : str\n Name of table to be verified\n\n Returns\n -------\n boolean\n true if table exists, otherwise false\n \"\"\"\n from google.api_core.exceptions import NotFound\n\n table_ref = self.client.dataset(self.dataset_id).table(table_id)\n try:\n self.client.get_table(table_ref)\n return True\n except NotFound:\n return False\n except self.http_error as ex:\n self.process_http_error(ex)\n"
] |
import logging
import time
import warnings
from datetime import datetime
import numpy as np
try:
# The BigQuery Storage API client is an optional dependency. It is only
# required when use_bqstorage_api=True.
from google.cloud import bigquery_storage_v1beta1
except ImportError: # pragma: NO COVER
bigquery_storage_v1beta1 = None
from pandas_gbq.exceptions import AccessDenied
logger = logging.getLogger(__name__)
BIGQUERY_INSTALLED_VERSION = None
SHOW_VERBOSE_DEPRECATION = False
SHOW_PRIVATE_KEY_DEPRECATION = False
PRIVATE_KEY_DEPRECATION_MESSAGE = (
"private_key is deprecated and will be removed in a future version."
"Use the credentials argument instead. See "
"https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html "
"for examples on using the credentials argument with service account keys."
)
try:
import tqdm # noqa
except ImportError:
tqdm = None
def _check_google_client_version():
global BIGQUERY_INSTALLED_VERSION, SHOW_VERBOSE_DEPRECATION, SHOW_PRIVATE_KEY_DEPRECATION
try:
import pkg_resources
except ImportError:
raise ImportError("Could not import pkg_resources (setuptools).")
# https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/bigquery/CHANGELOG.md
bigquery_minimum_version = pkg_resources.parse_version("1.9.0")
BIGQUERY_INSTALLED_VERSION = pkg_resources.get_distribution(
"google-cloud-bigquery"
).parsed_version
if BIGQUERY_INSTALLED_VERSION < bigquery_minimum_version:
raise ImportError(
"pandas-gbq requires google-cloud-bigquery >= {0}, "
"current version {1}".format(
bigquery_minimum_version, BIGQUERY_INSTALLED_VERSION
)
)
# Add check for Pandas version before showing deprecation warning.
# https://github.com/pydata/pandas-gbq/issues/157
pandas_installed_version = pkg_resources.get_distribution(
"pandas"
).parsed_version
pandas_version_wo_verbosity = pkg_resources.parse_version("0.23.0")
SHOW_VERBOSE_DEPRECATION = (
pandas_installed_version >= pandas_version_wo_verbosity
)
pandas_version_with_credentials_arg = pkg_resources.parse_version("0.24.0")
SHOW_PRIVATE_KEY_DEPRECATION = (
pandas_installed_version >= pandas_version_with_credentials_arg
)
def _test_google_api_imports():
try:
import pydata_google_auth # noqa
except ImportError as ex:
raise ImportError(
"pandas-gbq requires pydata-google-auth: {0}".format(ex)
)
try:
from google_auth_oauthlib.flow import InstalledAppFlow # noqa
except ImportError as ex:
raise ImportError(
"pandas-gbq requires google-auth-oauthlib: {0}".format(ex)
)
try:
import google.auth # noqa
except ImportError as ex:
raise ImportError("pandas-gbq requires google-auth: {0}".format(ex))
try:
from google.cloud import bigquery # noqa
except ImportError as ex:
raise ImportError(
"pandas-gbq requires google-cloud-bigquery: {0}".format(ex)
)
_check_google_client_version()
class DatasetCreationError(ValueError):
"""
Raised when the create dataset method fails
"""
pass
class GenericGBQException(ValueError):
"""
Raised when an unrecognized Google API Error occurs.
"""
pass
class InvalidColumnOrder(ValueError):
"""
Raised when the provided column order for output
results DataFrame does not match the schema
returned by BigQuery.
"""
pass
class InvalidIndexColumn(ValueError):
"""
Raised when the provided index column for output
results DataFrame does not match the schema
returned by BigQuery.
"""
pass
class InvalidPageToken(ValueError):
"""
Raised when Google BigQuery fails to return,
or returns a duplicate page token.
"""
pass
class InvalidSchema(ValueError):
"""
Raised when the provided DataFrame does
not match the schema of the destination
table in BigQuery.
"""
pass
class NotFoundException(ValueError):
"""
Raised when the project_id, table or dataset provided in the query could
not be found.
"""
pass
class QueryTimeout(ValueError):
"""
Raised when the query request exceeds the timeoutMs value specified in the
BigQuery configuration.
"""
pass
class TableCreationError(ValueError):
"""
Raised when the create table method fails
"""
pass
class Context(object):
"""Storage for objects to be used throughout a session.
A Context object is initialized when the ``pandas_gbq`` module is
imported, and can be found at :attr:`pandas_gbq.context`.
"""
def __init__(self):
self._credentials = None
self._project = None
# dialect defaults to None so that read_gbq can stop warning if set.
self._dialect = None
@property
def credentials(self):
"""
Credentials to use for Google APIs.
These credentials are automatically cached in memory by calls to
:func:`pandas_gbq.read_gbq` and :func:`pandas_gbq.to_gbq`. To
manually set the credentials, construct an
:class:`google.auth.credentials.Credentials` object and set it as
the context credentials as demonstrated in the example below. See
`auth docs`_ for more information on obtaining credentials.
.. _auth docs: http://google-auth.readthedocs.io
/en/latest/user-guide.html#obtaining-credentials
Returns
-------
google.auth.credentials.Credentials
Examples
--------
Manually setting the context credentials:
>>> import pandas_gbq
>>> from google.oauth2 import service_account
>>> credentials = service_account.Credentials.from_service_account_file(
... '/path/to/key.json',
... )
>>> pandas_gbq.context.credentials = credentials
"""
return self._credentials
@credentials.setter
def credentials(self, value):
self._credentials = value
@property
def project(self):
"""Default project to use for calls to Google APIs.
Returns
-------
str
Examples
--------
Manually setting the context project:
>>> import pandas_gbq
>>> pandas_gbq.context.project = 'my-project'
"""
return self._project
@project.setter
def project(self, value):
self._project = value
@property
def dialect(self):
"""
Default dialect to use in :func:`pandas_gbq.read_gbq`.
Allowed values for the BigQuery SQL syntax dialect:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
Returns
-------
str
Examples
--------
Setting the default syntax to standard:
>>> import pandas_gbq
>>> pandas_gbq.context.dialect = 'standard'
"""
return self._dialect
@dialect.setter
def dialect(self, value):
self._dialect = value
# Create an empty context, used to cache credentials.
context = Context()
"""A :class:`pandas_gbq.Context` object used to cache credentials.
Credentials automatically are cached in-memory by :func:`pandas_gbq.read_gbq`
and :func:`pandas_gbq.to_gbq`.
"""
class GbqConnector(object):
def __init__(
self,
project_id,
reauth=False,
private_key=None,
auth_local_webserver=False,
dialect="standard",
location=None,
credentials=None,
use_bqstorage_api=False,
):
global context
from google.api_core.exceptions import GoogleAPIError
from google.api_core.exceptions import ClientError
from pandas_gbq import auth
self.http_error = (ClientError, GoogleAPIError)
self.project_id = project_id
self.location = location
self.reauth = reauth
self.private_key = private_key
self.auth_local_webserver = auth_local_webserver
self.dialect = dialect
self.credentials = credentials
default_project = None
# Service account credentials have a project associated with them.
# Prefer that project if none was supplied.
if self.project_id is None and hasattr(self.credentials, "project_id"):
self.project_id = credentials.project_id
# Load credentials from cache.
if not self.credentials:
self.credentials = context.credentials
default_project = context.project
# Credentials were explicitly asked for, so don't use the cache.
if private_key or reauth or not self.credentials:
self.credentials, default_project = auth.get_credentials(
private_key=private_key,
project_id=project_id,
reauth=reauth,
auth_local_webserver=auth_local_webserver,
)
if self.project_id is None:
self.project_id = default_project
if self.project_id is None:
raise ValueError(
"Could not determine project ID and one was not supplied."
)
# Cache the credentials if they haven't been set yet.
if context.credentials is None:
context.credentials = self.credentials
if context.project is None:
context.project = self.project_id
self.client = self.get_client()
self.bqstorage_client = _make_bqstorage_client(
use_bqstorage_api, self.credentials
)
# BQ Queries costs $5 per TB. First 1 TB per month is free
# see here for more: https://cloud.google.com/bigquery/pricing
self.query_price_for_TB = 5.0 / 2 ** 40 # USD/TB
def _start_timer(self):
self.start = time.time()
def get_elapsed_seconds(self):
return round(time.time() - self.start, 2)
def log_elapsed_seconds(self, prefix="Elapsed", postfix="s.", overlong=6):
sec = self.get_elapsed_seconds()
if sec > overlong:
logger.info("{} {} {}".format(prefix, sec, postfix))
# http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
@staticmethod
def sizeof_fmt(num, suffix="B"):
fmt = "%3.1f %s%s"
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if abs(num) < 1024.0:
return fmt % (num, unit, suffix)
num /= 1024.0
return fmt % (num, "Y", suffix)
def get_client(self):
from google.cloud import bigquery
return bigquery.Client(
project=self.project_id, credentials=self.credentials
)
@staticmethod
def process_http_error(ex):
# See `BigQuery Troubleshooting Errors
# <https://cloud.google.com/bigquery/troubleshooting-errors>`__
raise GenericGBQException("Reason: {0}".format(ex))
def run_query(self, query, **kwargs):
from concurrent.futures import TimeoutError
from google.auth.exceptions import RefreshError
from google.cloud import bigquery
job_config = {
"query": {
"useLegacySql": self.dialect
== "legacy"
# 'allowLargeResults', 'createDisposition',
# 'preserveNulls', destinationTable, useQueryCache
}
}
config = kwargs.get("configuration")
if config is not None:
job_config.update(config)
if "query" in config and "query" in config["query"]:
if query is not None:
raise ValueError(
"Query statement can't be specified "
"inside config while it is specified "
"as parameter"
)
query = config["query"].pop("query")
self._start_timer()
try:
logger.debug("Requesting query... ")
query_reply = self.client.query(
query,
job_config=bigquery.QueryJobConfig.from_api_repr(job_config),
location=self.location,
project=self.project_id,
)
logger.debug("Query running...")
except (RefreshError, ValueError):
if self.private_key:
raise AccessDenied(
"The service account credentials are not valid"
)
else:
raise AccessDenied(
"The credentials have been revoked or expired, "
"please re-run the application to re-authorize"
)
except self.http_error as ex:
self.process_http_error(ex)
job_id = query_reply.job_id
logger.debug("Job ID: %s" % job_id)
while query_reply.state != "DONE":
self.log_elapsed_seconds(" Elapsed", "s. Waiting...")
timeout_ms = job_config["query"].get("timeoutMs")
if timeout_ms and timeout_ms < self.get_elapsed_seconds() * 1000:
raise QueryTimeout("Query timeout: {} ms".format(timeout_ms))
timeout_sec = 1.0
if timeout_ms:
# Wait at most 1 second so we can show progress bar
timeout_sec = min(1.0, timeout_ms / 1000.0)
try:
query_reply.result(timeout=timeout_sec)
except TimeoutError:
# Use our own timeout logic
pass
except self.http_error as ex:
self.process_http_error(ex)
if query_reply.cache_hit:
logger.debug("Query done.\nCache hit.\n")
else:
bytes_processed = query_reply.total_bytes_processed or 0
bytes_billed = query_reply.total_bytes_billed or 0
logger.debug(
"Query done.\nProcessed: {} Billed: {}".format(
self.sizeof_fmt(bytes_processed),
self.sizeof_fmt(bytes_billed),
)
)
logger.debug(
"Standard price: ${:,.2f} USD\n".format(
bytes_billed * self.query_price_for_TB
)
)
try:
rows_iter = query_reply.result()
except self.http_error as ex:
self.process_http_error(ex)
schema_fields = [field.to_api_repr() for field in rows_iter.schema]
nullsafe_dtypes = _bqschema_to_nullsafe_dtypes(schema_fields)
df = rows_iter.to_dataframe(
dtypes=nullsafe_dtypes, bqstorage_client=self.bqstorage_client
)
if df.empty:
df = _cast_empty_df_dtypes(schema_fields, df)
# Ensure any TIMESTAMP columns are tz-aware.
df = _localize_df(schema_fields, df)
logger.debug("Got {} rows.\n".format(rows_iter.total_rows))
return df
def load_data(
self,
dataframe,
dataset_id,
table_id,
chunksize=None,
schema=None,
progress_bar=True,
):
from pandas_gbq import load
total_rows = len(dataframe)
try:
chunks = load.load_chunks(
self.client,
dataframe,
dataset_id,
table_id,
chunksize=chunksize,
schema=schema,
location=self.location,
)
if progress_bar and tqdm:
chunks = tqdm.tqdm(chunks)
for remaining_rows in chunks:
logger.info(
"\r{} out of {} rows loaded.".format(
total_rows - remaining_rows, total_rows
)
)
except self.http_error as ex:
self.process_http_error(ex)
def schema(self, dataset_id, table_id):
"""Retrieve the schema of the table
Obtain from BigQuery the field names and field types
for the table defined by the parameters
Parameters
----------
dataset_id : str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
Returns
-------
list of dicts
Fields representing the schema
"""
table_ref = self.client.dataset(dataset_id).table(table_id)
try:
table = self.client.get_table(table_ref)
remote_schema = table.schema
remote_fields = [
field_remote.to_api_repr() for field_remote in remote_schema
]
for field in remote_fields:
field["type"] = field["type"].upper()
field["mode"] = field["mode"].upper()
return remote_fields
except self.http_error as ex:
self.process_http_error(ex)
def _clean_schema_fields(self, fields):
"""Return a sanitized version of the schema for comparisons."""
fields_sorted = sorted(fields, key=lambda field: field["name"])
# Ignore mode and description when comparing schemas.
return [
{"name": field["name"], "type": field["type"]}
for field in fields_sorted
]
def verify_schema(self, dataset_id, table_id, schema):
"""Indicate whether schemas match exactly
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether all fields in the former
are present in the latter. Order is not considered.
Parameters
----------
dataset_id :str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
schema : list(dict)
Schema for comparison. Each item should have
a 'name' and a 'type'
Returns
-------
bool
Whether the schemas match
"""
fields_remote = self._clean_schema_fields(
self.schema(dataset_id, table_id)
)
fields_local = self._clean_schema_fields(schema["fields"])
return fields_remote == fields_local
def schema_is_subset(self, dataset_id, table_id, schema):
"""Indicate whether the schema to be uploaded is a subset
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether a subset of the fields in
the former are present in the latter. Order is not considered.
Parameters
----------
dataset_id : str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
schema : list(dict)
Schema for comparison. Each item should have
a 'name' and a 'type'
Returns
-------
bool
Whether the passed schema is a subset
"""
fields_remote = self._clean_schema_fields(
self.schema(dataset_id, table_id)
)
fields_local = self._clean_schema_fields(schema["fields"])
return all(field in fields_remote for field in fields_local)
def delete_and_recreate_table(self, dataset_id, table_id, table_schema):
table = _Table(
self.project_id, dataset_id, credentials=self.credentials
)
table.delete(table_id)
table.create(table_id, table_schema)
def _bqschema_to_nullsafe_dtypes(schema_fields):
"""Specify explicit dtypes based on BigQuery schema.
This function only specifies a dtype when the dtype allows nulls.
Otherwise, use pandas's default dtype choice.
See: http://pandas.pydata.org/pandas-docs/dev/missing_data.html
#missing-data-casting-rules-and-indexing
"""
# If you update this mapping, also update the table at
# `docs/source/reading.rst`.
dtype_map = {
"FLOAT": np.dtype(float),
# pandas doesn't support timezone-aware dtype in DataFrame/Series
# constructors. It's more idiomatic to localize after construction.
# https://github.com/pandas-dev/pandas/issues/25843
"TIMESTAMP": "datetime64[ns]",
"TIME": "datetime64[ns]",
"DATE": "datetime64[ns]",
"DATETIME": "datetime64[ns]",
}
dtypes = {}
for field in schema_fields:
name = str(field["name"])
if field["mode"].upper() == "REPEATED":
continue
dtype = dtype_map.get(field["type"].upper())
if dtype:
dtypes[name] = dtype
return dtypes
def _cast_empty_df_dtypes(schema_fields, df):
"""Cast any columns in an empty dataframe to correct type.
In an empty dataframe, pandas cannot choose a dtype unless one is
explicitly provided. The _bqschema_to_nullsafe_dtypes() function only
provides dtypes when the dtype safely handles null values. This means
that empty int64 and boolean columns are incorrectly classified as
``object``.
"""
if not df.empty:
raise ValueError(
"DataFrame must be empty in order to cast non-nullsafe dtypes"
)
dtype_map = {"BOOLEAN": bool, "INTEGER": np.int64}
for field in schema_fields:
column = str(field["name"])
if field["mode"].upper() == "REPEATED":
continue
dtype = dtype_map.get(field["type"].upper())
if dtype:
df[column] = df[column].astype(dtype)
return df
def _localize_df(schema_fields, df):
"""Localize any TIMESTAMP columns to tz-aware type.
In pandas versions before 0.24.0, DatetimeTZDtype cannot be used as the
dtype in Series/DataFrame construction, so localize those columns after
the DataFrame is constructed.
"""
for field in schema_fields:
column = str(field["name"])
if field["mode"].upper() == "REPEATED":
continue
if field["type"].upper() == "TIMESTAMP" and df[column].dt.tz is None:
df[column] = df[column].dt.tz_localize("UTC")
return df
def _make_bqstorage_client(use_bqstorage_api, credentials):
if not use_bqstorage_api:
return None
if bigquery_storage_v1beta1 is None:
raise ImportError(
"Install the google-cloud-bigquery-storage and fastavro packages "
"to use the BigQuery Storage API."
)
return bigquery_storage_v1beta1.BigQueryStorageClient(
credentials=credentials
)
def read_gbq(
query,
project_id=None,
index_col=None,
col_order=None,
reauth=False,
auth_local_webserver=False,
dialect=None,
location=None,
configuration=None,
credentials=None,
use_bqstorage_api=False,
verbose=None,
private_key=None,
):
r"""Load data from Google BigQuery using google-cloud-python
The main method a user calls to execute a Query in Google BigQuery
and read results into a pandas DataFrame.
This method uses the Google Cloud client library to make requests to
Google BigQuery, documented `here
<https://google-cloud-python.readthedocs.io/en/latest/bigquery/usage.html>`__.
See the :ref:`How to authenticate with Google BigQuery <authentication>`
guide for authentication instructions.
Parameters
----------
query : str
SQL-Like Query to return data values.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
index_col : str, optional
Name of result column to use for index in results DataFrame.
col_order : list(str), optional
List of BigQuery column names in the desired order for results
DataFrame.
reauth : boolean, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
auth_local_webserver : boolean, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
.. versionadded:: 0.2.0
dialect : str, default 'standard'
Note: The default value changed to 'standard' in version 0.10.0.
SQL syntax dialect to use. Value can be one of:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
location : str, optional
Location where the query job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of any
datasets used in the query.
.. versionadded:: 0.5.0
configuration : dict, optional
Query config parameters for job processing.
For example:
configuration = {'query': {'useQueryCache': False}}
For more information see `BigQuery REST API Reference
<https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service Account
:class:`google.oauth2.service_account.Credentials` directly.
.. versionadded:: 0.8.0
use_bqstorage_api : bool, default False
Use the `BigQuery Storage API
<https://cloud.google.com/bigquery/docs/reference/storage/>`__ to
download query results quickly, but at an increased cost. To use this
API, first `enable it in the Cloud Console
<https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__.
You must also have the `bigquery.readsessions.create
<https://cloud.google.com/bigquery/docs/access-control#roles>`__
permission on the project you are billing queries to.
**Note:** Due to a `known issue in the ``google-cloud-bigquery``
package
<https://github.com/googleapis/google-cloud-python/pull/7633>`__
(fixed in version 1.11.0), you must write your query results to a
destination table. To do this with ``read_gbq``, supply a
``configuration`` dictionary.
This feature requires the ``google-cloud-bigquery-storage`` and
``fastavro`` packages.
.. versionadded:: 0.10.0
verbose : None, deprecated
Deprecated in Pandas-GBQ 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
Returns
-------
df: DataFrame
DataFrame representing results of query.
"""
global context
if dialect is None:
dialect = context.dialect
if dialect is None:
dialect = "standard"
_test_google_api_imports()
if verbose is not None and SHOW_VERBOSE_DEPRECATION:
warnings.warn(
"verbose is deprecated and will be removed in "
"a future version. Set logging level in order to vary "
"verbosity",
FutureWarning,
stacklevel=2,
)
if private_key is not None and SHOW_PRIVATE_KEY_DEPRECATION:
warnings.warn(
PRIVATE_KEY_DEPRECATION_MESSAGE, FutureWarning, stacklevel=2
)
if dialect not in ("legacy", "standard"):
raise ValueError("'{0}' is not valid for dialect".format(dialect))
connector = GbqConnector(
project_id,
reauth=reauth,
dialect=dialect,
auth_local_webserver=auth_local_webserver,
location=location,
credentials=credentials,
private_key=private_key,
use_bqstorage_api=use_bqstorage_api,
)
final_df = connector.run_query(query, configuration=configuration)
# Reindex the DataFrame on the provided column
if index_col is not None:
if index_col in final_df.columns:
final_df.set_index(index_col, inplace=True)
else:
raise InvalidIndexColumn(
'Index column "{0}" does not exist in DataFrame.'.format(
index_col
)
)
# Change the order of columns in the DataFrame based on provided list
if col_order is not None:
if sorted(col_order) == sorted(final_df.columns):
final_df = final_df[col_order]
else:
raise InvalidColumnOrder(
"Column order does not match this DataFrame."
)
connector.log_elapsed_seconds(
"Total time taken",
datetime.now().strftime("s.\nFinished at %Y-%m-%d %H:%M:%S."),
)
return final_df
def generate_bq_schema(df, default_type="STRING"):
"""DEPRECATED: Given a passed df, generate the associated Google BigQuery
schema.
Parameters
----------
df : DataFrame
default_type : string
The default big query type in case the type of the column
does not exist in the schema.
"""
# deprecation TimeSeries, #11121
warnings.warn(
"generate_bq_schema is deprecated and will be removed in "
"a future version",
FutureWarning,
stacklevel=2,
)
return _generate_bq_schema(df, default_type=default_type)
def _generate_bq_schema(df, default_type="STRING"):
"""DEPRECATED: Given a dataframe, generate a Google BigQuery schema.
This is a private method, but was used in external code to work around
issues in the default schema generation. Now that individual columns can
be overridden: https://github.com/pydata/pandas-gbq/issues/218, this
method can be removed after there is time to migrate away from this
method. """
from pandas_gbq import schema
return schema.generate_bq_schema(df, default_type=default_type)
class _Table(GbqConnector):
def __init__(
self,
project_id,
dataset_id,
reauth=False,
location=None,
credentials=None,
private_key=None,
):
self.dataset_id = dataset_id
super(_Table, self).__init__(
project_id,
reauth,
location=location,
credentials=credentials,
private_key=private_key,
)
def exists(self, table_id):
""" Check if a table exists in Google BigQuery
Parameters
----------
table : str
Name of table to be verified
Returns
-------
boolean
true if table exists, otherwise false
"""
from google.api_core.exceptions import NotFound
table_ref = self.client.dataset(self.dataset_id).table(table_id)
try:
self.client.get_table(table_ref)
return True
except NotFound:
return False
except self.http_error as ex:
self.process_http_error(ex)
def create(self, table_id, schema):
""" Create a table in Google BigQuery given a table and schema
Parameters
----------
table : str
Name of table to be written
schema : str
Use the generate_bq_schema to generate your table schema from a
dataframe.
"""
from google.cloud.bigquery import SchemaField
from google.cloud.bigquery import Table
if self.exists(table_id):
raise TableCreationError(
"Table {0} already " "exists".format(table_id)
)
if not _Dataset(self.project_id, credentials=self.credentials).exists(
self.dataset_id
):
_Dataset(
self.project_id,
credentials=self.credentials,
location=self.location,
).create(self.dataset_id)
table_ref = self.client.dataset(self.dataset_id).table(table_id)
table = Table(table_ref)
# Manually create the schema objects, adding NULLABLE mode
# as a workaround for
# https://github.com/GoogleCloudPlatform/google-cloud-python/issues/4456
for field in schema["fields"]:
if "mode" not in field:
field["mode"] = "NULLABLE"
table.schema = [
SchemaField.from_api_repr(field) for field in schema["fields"]
]
try:
self.client.create_table(table)
except self.http_error as ex:
self.process_http_error(ex)
def delete(self, table_id):
""" Delete a table in Google BigQuery
Parameters
----------
table : str
Name of table to be deleted
"""
from google.api_core.exceptions import NotFound
if not self.exists(table_id):
raise NotFoundException("Table does not exist")
table_ref = self.client.dataset(self.dataset_id).table(table_id)
try:
self.client.delete_table(table_ref)
except NotFound:
# Ignore 404 error which may occur if table already deleted
pass
except self.http_error as ex:
self.process_http_error(ex)
class _Dataset(GbqConnector):
def __init__(
self,
project_id,
reauth=False,
location=None,
credentials=None,
private_key=None,
):
super(_Dataset, self).__init__(
project_id,
reauth,
credentials=credentials,
location=location,
private_key=private_key,
)
def exists(self, dataset_id):
""" Check if a dataset exists in Google BigQuery
Parameters
----------
dataset_id : str
Name of dataset to be verified
Returns
-------
boolean
true if dataset exists, otherwise false
"""
from google.api_core.exceptions import NotFound
try:
self.client.get_dataset(self.client.dataset(dataset_id))
return True
except NotFound:
return False
except self.http_error as ex:
self.process_http_error(ex)
def create(self, dataset_id):
""" Create a dataset in Google BigQuery
Parameters
----------
dataset : str
Name of dataset to be written
"""
from google.cloud.bigquery import Dataset
if self.exists(dataset_id):
raise DatasetCreationError(
"Dataset {0} already " "exists".format(dataset_id)
)
dataset = Dataset(self.client.dataset(dataset_id))
if self.location is not None:
dataset.location = self.location
try:
self.client.create_dataset(dataset)
except self.http_error as ex:
self.process_http_error(ex)
|
pydata/pandas-gbq
|
pandas_gbq/gbq.py
|
generate_bq_schema
|
python
|
def generate_bq_schema(df, default_type="STRING"):
# deprecation TimeSeries, #11121
warnings.warn(
"generate_bq_schema is deprecated and will be removed in "
"a future version",
FutureWarning,
stacklevel=2,
)
return _generate_bq_schema(df, default_type=default_type)
|
DEPRECATED: Given a passed df, generate the associated Google BigQuery
schema.
Parameters
----------
df : DataFrame
default_type : string
The default big query type in case the type of the column
does not exist in the schema.
|
train
|
https://github.com/pydata/pandas-gbq/blob/e590317b3325939ede7563f49aa6b163bb803b77/pandas_gbq/gbq.py#L1163-L1182
|
[
"def _generate_bq_schema(df, default_type=\"STRING\"):\n \"\"\"DEPRECATED: Given a dataframe, generate a Google BigQuery schema.\n\n This is a private method, but was used in external code to work around\n issues in the default schema generation. Now that individual columns can\n be overridden: https://github.com/pydata/pandas-gbq/issues/218, this\n method can be removed after there is time to migrate away from this\n method. \"\"\"\n from pandas_gbq import schema\n\n return schema.generate_bq_schema(df, default_type=default_type)\n"
] |
import logging
import time
import warnings
from datetime import datetime
import numpy as np
try:
# The BigQuery Storage API client is an optional dependency. It is only
# required when use_bqstorage_api=True.
from google.cloud import bigquery_storage_v1beta1
except ImportError: # pragma: NO COVER
bigquery_storage_v1beta1 = None
from pandas_gbq.exceptions import AccessDenied
logger = logging.getLogger(__name__)
BIGQUERY_INSTALLED_VERSION = None
SHOW_VERBOSE_DEPRECATION = False
SHOW_PRIVATE_KEY_DEPRECATION = False
PRIVATE_KEY_DEPRECATION_MESSAGE = (
"private_key is deprecated and will be removed in a future version."
"Use the credentials argument instead. See "
"https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html "
"for examples on using the credentials argument with service account keys."
)
try:
import tqdm # noqa
except ImportError:
tqdm = None
def _check_google_client_version():
global BIGQUERY_INSTALLED_VERSION, SHOW_VERBOSE_DEPRECATION, SHOW_PRIVATE_KEY_DEPRECATION
try:
import pkg_resources
except ImportError:
raise ImportError("Could not import pkg_resources (setuptools).")
# https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/bigquery/CHANGELOG.md
bigquery_minimum_version = pkg_resources.parse_version("1.9.0")
BIGQUERY_INSTALLED_VERSION = pkg_resources.get_distribution(
"google-cloud-bigquery"
).parsed_version
if BIGQUERY_INSTALLED_VERSION < bigquery_minimum_version:
raise ImportError(
"pandas-gbq requires google-cloud-bigquery >= {0}, "
"current version {1}".format(
bigquery_minimum_version, BIGQUERY_INSTALLED_VERSION
)
)
# Add check for Pandas version before showing deprecation warning.
# https://github.com/pydata/pandas-gbq/issues/157
pandas_installed_version = pkg_resources.get_distribution(
"pandas"
).parsed_version
pandas_version_wo_verbosity = pkg_resources.parse_version("0.23.0")
SHOW_VERBOSE_DEPRECATION = (
pandas_installed_version >= pandas_version_wo_verbosity
)
pandas_version_with_credentials_arg = pkg_resources.parse_version("0.24.0")
SHOW_PRIVATE_KEY_DEPRECATION = (
pandas_installed_version >= pandas_version_with_credentials_arg
)
def _test_google_api_imports():
try:
import pydata_google_auth # noqa
except ImportError as ex:
raise ImportError(
"pandas-gbq requires pydata-google-auth: {0}".format(ex)
)
try:
from google_auth_oauthlib.flow import InstalledAppFlow # noqa
except ImportError as ex:
raise ImportError(
"pandas-gbq requires google-auth-oauthlib: {0}".format(ex)
)
try:
import google.auth # noqa
except ImportError as ex:
raise ImportError("pandas-gbq requires google-auth: {0}".format(ex))
try:
from google.cloud import bigquery # noqa
except ImportError as ex:
raise ImportError(
"pandas-gbq requires google-cloud-bigquery: {0}".format(ex)
)
_check_google_client_version()
class DatasetCreationError(ValueError):
"""
Raised when the create dataset method fails
"""
pass
class GenericGBQException(ValueError):
"""
Raised when an unrecognized Google API Error occurs.
"""
pass
class InvalidColumnOrder(ValueError):
"""
Raised when the provided column order for output
results DataFrame does not match the schema
returned by BigQuery.
"""
pass
class InvalidIndexColumn(ValueError):
"""
Raised when the provided index column for output
results DataFrame does not match the schema
returned by BigQuery.
"""
pass
class InvalidPageToken(ValueError):
"""
Raised when Google BigQuery fails to return,
or returns a duplicate page token.
"""
pass
class InvalidSchema(ValueError):
"""
Raised when the provided DataFrame does
not match the schema of the destination
table in BigQuery.
"""
pass
class NotFoundException(ValueError):
"""
Raised when the project_id, table or dataset provided in the query could
not be found.
"""
pass
class QueryTimeout(ValueError):
"""
Raised when the query request exceeds the timeoutMs value specified in the
BigQuery configuration.
"""
pass
class TableCreationError(ValueError):
"""
Raised when the create table method fails
"""
pass
class Context(object):
"""Storage for objects to be used throughout a session.
A Context object is initialized when the ``pandas_gbq`` module is
imported, and can be found at :attr:`pandas_gbq.context`.
"""
def __init__(self):
self._credentials = None
self._project = None
# dialect defaults to None so that read_gbq can stop warning if set.
self._dialect = None
@property
def credentials(self):
"""
Credentials to use for Google APIs.
These credentials are automatically cached in memory by calls to
:func:`pandas_gbq.read_gbq` and :func:`pandas_gbq.to_gbq`. To
manually set the credentials, construct an
:class:`google.auth.credentials.Credentials` object and set it as
the context credentials as demonstrated in the example below. See
`auth docs`_ for more information on obtaining credentials.
.. _auth docs: http://google-auth.readthedocs.io
/en/latest/user-guide.html#obtaining-credentials
Returns
-------
google.auth.credentials.Credentials
Examples
--------
Manually setting the context credentials:
>>> import pandas_gbq
>>> from google.oauth2 import service_account
>>> credentials = service_account.Credentials.from_service_account_file(
... '/path/to/key.json',
... )
>>> pandas_gbq.context.credentials = credentials
"""
return self._credentials
@credentials.setter
def credentials(self, value):
self._credentials = value
@property
def project(self):
"""Default project to use for calls to Google APIs.
Returns
-------
str
Examples
--------
Manually setting the context project:
>>> import pandas_gbq
>>> pandas_gbq.context.project = 'my-project'
"""
return self._project
@project.setter
def project(self, value):
self._project = value
@property
def dialect(self):
"""
Default dialect to use in :func:`pandas_gbq.read_gbq`.
Allowed values for the BigQuery SQL syntax dialect:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
Returns
-------
str
Examples
--------
Setting the default syntax to standard:
>>> import pandas_gbq
>>> pandas_gbq.context.dialect = 'standard'
"""
return self._dialect
@dialect.setter
def dialect(self, value):
self._dialect = value
# Create an empty context, used to cache credentials.
context = Context()
"""A :class:`pandas_gbq.Context` object used to cache credentials.
Credentials automatically are cached in-memory by :func:`pandas_gbq.read_gbq`
and :func:`pandas_gbq.to_gbq`.
"""
class GbqConnector(object):
def __init__(
self,
project_id,
reauth=False,
private_key=None,
auth_local_webserver=False,
dialect="standard",
location=None,
credentials=None,
use_bqstorage_api=False,
):
global context
from google.api_core.exceptions import GoogleAPIError
from google.api_core.exceptions import ClientError
from pandas_gbq import auth
self.http_error = (ClientError, GoogleAPIError)
self.project_id = project_id
self.location = location
self.reauth = reauth
self.private_key = private_key
self.auth_local_webserver = auth_local_webserver
self.dialect = dialect
self.credentials = credentials
default_project = None
# Service account credentials have a project associated with them.
# Prefer that project if none was supplied.
if self.project_id is None and hasattr(self.credentials, "project_id"):
self.project_id = credentials.project_id
# Load credentials from cache.
if not self.credentials:
self.credentials = context.credentials
default_project = context.project
# Credentials were explicitly asked for, so don't use the cache.
if private_key or reauth or not self.credentials:
self.credentials, default_project = auth.get_credentials(
private_key=private_key,
project_id=project_id,
reauth=reauth,
auth_local_webserver=auth_local_webserver,
)
if self.project_id is None:
self.project_id = default_project
if self.project_id is None:
raise ValueError(
"Could not determine project ID and one was not supplied."
)
# Cache the credentials if they haven't been set yet.
if context.credentials is None:
context.credentials = self.credentials
if context.project is None:
context.project = self.project_id
self.client = self.get_client()
self.bqstorage_client = _make_bqstorage_client(
use_bqstorage_api, self.credentials
)
# BQ Queries costs $5 per TB. First 1 TB per month is free
# see here for more: https://cloud.google.com/bigquery/pricing
self.query_price_for_TB = 5.0 / 2 ** 40 # USD/TB
def _start_timer(self):
self.start = time.time()
def get_elapsed_seconds(self):
return round(time.time() - self.start, 2)
def log_elapsed_seconds(self, prefix="Elapsed", postfix="s.", overlong=6):
sec = self.get_elapsed_seconds()
if sec > overlong:
logger.info("{} {} {}".format(prefix, sec, postfix))
# http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
@staticmethod
def sizeof_fmt(num, suffix="B"):
fmt = "%3.1f %s%s"
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if abs(num) < 1024.0:
return fmt % (num, unit, suffix)
num /= 1024.0
return fmt % (num, "Y", suffix)
def get_client(self):
from google.cloud import bigquery
return bigquery.Client(
project=self.project_id, credentials=self.credentials
)
@staticmethod
def process_http_error(ex):
# See `BigQuery Troubleshooting Errors
# <https://cloud.google.com/bigquery/troubleshooting-errors>`__
raise GenericGBQException("Reason: {0}".format(ex))
def run_query(self, query, **kwargs):
from concurrent.futures import TimeoutError
from google.auth.exceptions import RefreshError
from google.cloud import bigquery
job_config = {
"query": {
"useLegacySql": self.dialect
== "legacy"
# 'allowLargeResults', 'createDisposition',
# 'preserveNulls', destinationTable, useQueryCache
}
}
config = kwargs.get("configuration")
if config is not None:
job_config.update(config)
if "query" in config and "query" in config["query"]:
if query is not None:
raise ValueError(
"Query statement can't be specified "
"inside config while it is specified "
"as parameter"
)
query = config["query"].pop("query")
self._start_timer()
try:
logger.debug("Requesting query... ")
query_reply = self.client.query(
query,
job_config=bigquery.QueryJobConfig.from_api_repr(job_config),
location=self.location,
project=self.project_id,
)
logger.debug("Query running...")
except (RefreshError, ValueError):
if self.private_key:
raise AccessDenied(
"The service account credentials are not valid"
)
else:
raise AccessDenied(
"The credentials have been revoked or expired, "
"please re-run the application to re-authorize"
)
except self.http_error as ex:
self.process_http_error(ex)
job_id = query_reply.job_id
logger.debug("Job ID: %s" % job_id)
while query_reply.state != "DONE":
self.log_elapsed_seconds(" Elapsed", "s. Waiting...")
timeout_ms = job_config["query"].get("timeoutMs")
if timeout_ms and timeout_ms < self.get_elapsed_seconds() * 1000:
raise QueryTimeout("Query timeout: {} ms".format(timeout_ms))
timeout_sec = 1.0
if timeout_ms:
# Wait at most 1 second so we can show progress bar
timeout_sec = min(1.0, timeout_ms / 1000.0)
try:
query_reply.result(timeout=timeout_sec)
except TimeoutError:
# Use our own timeout logic
pass
except self.http_error as ex:
self.process_http_error(ex)
if query_reply.cache_hit:
logger.debug("Query done.\nCache hit.\n")
else:
bytes_processed = query_reply.total_bytes_processed or 0
bytes_billed = query_reply.total_bytes_billed or 0
logger.debug(
"Query done.\nProcessed: {} Billed: {}".format(
self.sizeof_fmt(bytes_processed),
self.sizeof_fmt(bytes_billed),
)
)
logger.debug(
"Standard price: ${:,.2f} USD\n".format(
bytes_billed * self.query_price_for_TB
)
)
try:
rows_iter = query_reply.result()
except self.http_error as ex:
self.process_http_error(ex)
schema_fields = [field.to_api_repr() for field in rows_iter.schema]
nullsafe_dtypes = _bqschema_to_nullsafe_dtypes(schema_fields)
df = rows_iter.to_dataframe(
dtypes=nullsafe_dtypes, bqstorage_client=self.bqstorage_client
)
if df.empty:
df = _cast_empty_df_dtypes(schema_fields, df)
# Ensure any TIMESTAMP columns are tz-aware.
df = _localize_df(schema_fields, df)
logger.debug("Got {} rows.\n".format(rows_iter.total_rows))
return df
def load_data(
self,
dataframe,
dataset_id,
table_id,
chunksize=None,
schema=None,
progress_bar=True,
):
from pandas_gbq import load
total_rows = len(dataframe)
try:
chunks = load.load_chunks(
self.client,
dataframe,
dataset_id,
table_id,
chunksize=chunksize,
schema=schema,
location=self.location,
)
if progress_bar and tqdm:
chunks = tqdm.tqdm(chunks)
for remaining_rows in chunks:
logger.info(
"\r{} out of {} rows loaded.".format(
total_rows - remaining_rows, total_rows
)
)
except self.http_error as ex:
self.process_http_error(ex)
def schema(self, dataset_id, table_id):
"""Retrieve the schema of the table
Obtain from BigQuery the field names and field types
for the table defined by the parameters
Parameters
----------
dataset_id : str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
Returns
-------
list of dicts
Fields representing the schema
"""
table_ref = self.client.dataset(dataset_id).table(table_id)
try:
table = self.client.get_table(table_ref)
remote_schema = table.schema
remote_fields = [
field_remote.to_api_repr() for field_remote in remote_schema
]
for field in remote_fields:
field["type"] = field["type"].upper()
field["mode"] = field["mode"].upper()
return remote_fields
except self.http_error as ex:
self.process_http_error(ex)
def _clean_schema_fields(self, fields):
"""Return a sanitized version of the schema for comparisons."""
fields_sorted = sorted(fields, key=lambda field: field["name"])
# Ignore mode and description when comparing schemas.
return [
{"name": field["name"], "type": field["type"]}
for field in fields_sorted
]
def verify_schema(self, dataset_id, table_id, schema):
"""Indicate whether schemas match exactly
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether all fields in the former
are present in the latter. Order is not considered.
Parameters
----------
dataset_id :str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
schema : list(dict)
Schema for comparison. Each item should have
a 'name' and a 'type'
Returns
-------
bool
Whether the schemas match
"""
fields_remote = self._clean_schema_fields(
self.schema(dataset_id, table_id)
)
fields_local = self._clean_schema_fields(schema["fields"])
return fields_remote == fields_local
def schema_is_subset(self, dataset_id, table_id, schema):
"""Indicate whether the schema to be uploaded is a subset
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether a subset of the fields in
the former are present in the latter. Order is not considered.
Parameters
----------
dataset_id : str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
schema : list(dict)
Schema for comparison. Each item should have
a 'name' and a 'type'
Returns
-------
bool
Whether the passed schema is a subset
"""
fields_remote = self._clean_schema_fields(
self.schema(dataset_id, table_id)
)
fields_local = self._clean_schema_fields(schema["fields"])
return all(field in fields_remote for field in fields_local)
def delete_and_recreate_table(self, dataset_id, table_id, table_schema):
table = _Table(
self.project_id, dataset_id, credentials=self.credentials
)
table.delete(table_id)
table.create(table_id, table_schema)
def _bqschema_to_nullsafe_dtypes(schema_fields):
"""Specify explicit dtypes based on BigQuery schema.
This function only specifies a dtype when the dtype allows nulls.
Otherwise, use pandas's default dtype choice.
See: http://pandas.pydata.org/pandas-docs/dev/missing_data.html
#missing-data-casting-rules-and-indexing
"""
# If you update this mapping, also update the table at
# `docs/source/reading.rst`.
dtype_map = {
"FLOAT": np.dtype(float),
# pandas doesn't support timezone-aware dtype in DataFrame/Series
# constructors. It's more idiomatic to localize after construction.
# https://github.com/pandas-dev/pandas/issues/25843
"TIMESTAMP": "datetime64[ns]",
"TIME": "datetime64[ns]",
"DATE": "datetime64[ns]",
"DATETIME": "datetime64[ns]",
}
dtypes = {}
for field in schema_fields:
name = str(field["name"])
if field["mode"].upper() == "REPEATED":
continue
dtype = dtype_map.get(field["type"].upper())
if dtype:
dtypes[name] = dtype
return dtypes
def _cast_empty_df_dtypes(schema_fields, df):
"""Cast any columns in an empty dataframe to correct type.
In an empty dataframe, pandas cannot choose a dtype unless one is
explicitly provided. The _bqschema_to_nullsafe_dtypes() function only
provides dtypes when the dtype safely handles null values. This means
that empty int64 and boolean columns are incorrectly classified as
``object``.
"""
if not df.empty:
raise ValueError(
"DataFrame must be empty in order to cast non-nullsafe dtypes"
)
dtype_map = {"BOOLEAN": bool, "INTEGER": np.int64}
for field in schema_fields:
column = str(field["name"])
if field["mode"].upper() == "REPEATED":
continue
dtype = dtype_map.get(field["type"].upper())
if dtype:
df[column] = df[column].astype(dtype)
return df
def _localize_df(schema_fields, df):
"""Localize any TIMESTAMP columns to tz-aware type.
In pandas versions before 0.24.0, DatetimeTZDtype cannot be used as the
dtype in Series/DataFrame construction, so localize those columns after
the DataFrame is constructed.
"""
for field in schema_fields:
column = str(field["name"])
if field["mode"].upper() == "REPEATED":
continue
if field["type"].upper() == "TIMESTAMP" and df[column].dt.tz is None:
df[column] = df[column].dt.tz_localize("UTC")
return df
def _make_bqstorage_client(use_bqstorage_api, credentials):
if not use_bqstorage_api:
return None
if bigquery_storage_v1beta1 is None:
raise ImportError(
"Install the google-cloud-bigquery-storage and fastavro packages "
"to use the BigQuery Storage API."
)
return bigquery_storage_v1beta1.BigQueryStorageClient(
credentials=credentials
)
def read_gbq(
query,
project_id=None,
index_col=None,
col_order=None,
reauth=False,
auth_local_webserver=False,
dialect=None,
location=None,
configuration=None,
credentials=None,
use_bqstorage_api=False,
verbose=None,
private_key=None,
):
r"""Load data from Google BigQuery using google-cloud-python
The main method a user calls to execute a Query in Google BigQuery
and read results into a pandas DataFrame.
This method uses the Google Cloud client library to make requests to
Google BigQuery, documented `here
<https://google-cloud-python.readthedocs.io/en/latest/bigquery/usage.html>`__.
See the :ref:`How to authenticate with Google BigQuery <authentication>`
guide for authentication instructions.
Parameters
----------
query : str
SQL-Like Query to return data values.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
index_col : str, optional
Name of result column to use for index in results DataFrame.
col_order : list(str), optional
List of BigQuery column names in the desired order for results
DataFrame.
reauth : boolean, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
auth_local_webserver : boolean, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
.. versionadded:: 0.2.0
dialect : str, default 'standard'
Note: The default value changed to 'standard' in version 0.10.0.
SQL syntax dialect to use. Value can be one of:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
location : str, optional
Location where the query job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of any
datasets used in the query.
.. versionadded:: 0.5.0
configuration : dict, optional
Query config parameters for job processing.
For example:
configuration = {'query': {'useQueryCache': False}}
For more information see `BigQuery REST API Reference
<https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service Account
:class:`google.oauth2.service_account.Credentials` directly.
.. versionadded:: 0.8.0
use_bqstorage_api : bool, default False
Use the `BigQuery Storage API
<https://cloud.google.com/bigquery/docs/reference/storage/>`__ to
download query results quickly, but at an increased cost. To use this
API, first `enable it in the Cloud Console
<https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__.
You must also have the `bigquery.readsessions.create
<https://cloud.google.com/bigquery/docs/access-control#roles>`__
permission on the project you are billing queries to.
**Note:** Due to a `known issue in the ``google-cloud-bigquery``
package
<https://github.com/googleapis/google-cloud-python/pull/7633>`__
(fixed in version 1.11.0), you must write your query results to a
destination table. To do this with ``read_gbq``, supply a
``configuration`` dictionary.
This feature requires the ``google-cloud-bigquery-storage`` and
``fastavro`` packages.
.. versionadded:: 0.10.0
verbose : None, deprecated
Deprecated in Pandas-GBQ 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
Returns
-------
df: DataFrame
DataFrame representing results of query.
"""
global context
if dialect is None:
dialect = context.dialect
if dialect is None:
dialect = "standard"
_test_google_api_imports()
if verbose is not None and SHOW_VERBOSE_DEPRECATION:
warnings.warn(
"verbose is deprecated and will be removed in "
"a future version. Set logging level in order to vary "
"verbosity",
FutureWarning,
stacklevel=2,
)
if private_key is not None and SHOW_PRIVATE_KEY_DEPRECATION:
warnings.warn(
PRIVATE_KEY_DEPRECATION_MESSAGE, FutureWarning, stacklevel=2
)
if dialect not in ("legacy", "standard"):
raise ValueError("'{0}' is not valid for dialect".format(dialect))
connector = GbqConnector(
project_id,
reauth=reauth,
dialect=dialect,
auth_local_webserver=auth_local_webserver,
location=location,
credentials=credentials,
private_key=private_key,
use_bqstorage_api=use_bqstorage_api,
)
final_df = connector.run_query(query, configuration=configuration)
# Reindex the DataFrame on the provided column
if index_col is not None:
if index_col in final_df.columns:
final_df.set_index(index_col, inplace=True)
else:
raise InvalidIndexColumn(
'Index column "{0}" does not exist in DataFrame.'.format(
index_col
)
)
# Change the order of columns in the DataFrame based on provided list
if col_order is not None:
if sorted(col_order) == sorted(final_df.columns):
final_df = final_df[col_order]
else:
raise InvalidColumnOrder(
"Column order does not match this DataFrame."
)
connector.log_elapsed_seconds(
"Total time taken",
datetime.now().strftime("s.\nFinished at %Y-%m-%d %H:%M:%S."),
)
return final_df
def to_gbq(
dataframe,
destination_table,
project_id=None,
chunksize=None,
reauth=False,
if_exists="fail",
auth_local_webserver=False,
table_schema=None,
location=None,
progress_bar=True,
credentials=None,
verbose=None,
private_key=None,
):
"""Write a DataFrame to a Google BigQuery table.
The main method a user calls to export pandas DataFrame contents to
Google BigQuery table.
This method uses the Google Cloud client library to make requests to
Google BigQuery, documented `here
<https://google-cloud-python.readthedocs.io/en/latest/bigquery/usage.html>`__.
See the :ref:`How to authenticate with Google BigQuery <authentication>`
guide for authentication instructions.
Parameters
----------
dataframe : pandas.DataFrame
DataFrame to be written to a Google BigQuery table.
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists, do nothing.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
.. versionadded:: 0.2.0
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``.
- If ``table_schema`` is provided, it may contain all or a subset of
DataFrame columns. If a subset is provided, the rest will be
inferred from the DataFrame dtypes.
- If ``table_schema`` is **not** provided, it will be
generated according to dtypes of DataFrame columns. See
`Inferring the Table Schema
<https://pandas-gbq.readthedocs.io/en/latest/writing.html#writing-schema>`__.
for a description of the schema inference.
See `BigQuery API documentation on valid column names
<https://cloud.google.com/bigquery/docs/schemas#column_names`>__.
.. versionadded:: 0.3.1
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
.. versionadded:: 0.5.0
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
.. versionadded:: 0.5.0
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service Account
:class:`google.oauth2.service_account.Credentials` directly.
.. versionadded:: 0.8.0
verbose : bool, deprecated
Deprecated in Pandas-GBQ 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
"""
_test_google_api_imports()
from pandas_gbq import schema
if verbose is not None and SHOW_VERBOSE_DEPRECATION:
warnings.warn(
"verbose is deprecated and will be removed in "
"a future version. Set logging level in order to vary "
"verbosity",
FutureWarning,
stacklevel=1,
)
if private_key is not None and SHOW_PRIVATE_KEY_DEPRECATION:
warnings.warn(
PRIVATE_KEY_DEPRECATION_MESSAGE, FutureWarning, stacklevel=2
)
if if_exists not in ("fail", "replace", "append"):
raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
if "." not in destination_table:
raise NotFoundException(
"Invalid Table Name. Should be of the form 'datasetId.tableId' "
)
connector = GbqConnector(
project_id,
reauth=reauth,
auth_local_webserver=auth_local_webserver,
location=location,
credentials=credentials,
private_key=private_key,
)
dataset_id, table_id = destination_table.rsplit(".", 1)
table = _Table(
project_id,
dataset_id,
location=location,
credentials=connector.credentials,
)
default_schema = _generate_bq_schema(dataframe)
if not table_schema:
table_schema = default_schema
else:
table_schema = schema.update_schema(
default_schema, dict(fields=table_schema)
)
# If table exists, check if_exists parameter
if table.exists(table_id):
if if_exists == "fail":
raise TableCreationError(
"Could not create the table because it "
"already exists. "
"Change the if_exists parameter to "
"'append' or 'replace' data."
)
elif if_exists == "replace":
connector.delete_and_recreate_table(
dataset_id, table_id, table_schema
)
elif if_exists == "append":
if not connector.schema_is_subset(
dataset_id, table_id, table_schema
):
raise InvalidSchema(
"Please verify that the structure and "
"data types in the DataFrame match the "
"schema of the destination table."
)
else:
table.create(table_id, table_schema)
if dataframe.empty:
# Create the table (if needed), but don't try to run a load job with an
# empty file. See: https://github.com/pydata/pandas-gbq/issues/237
return
connector.load_data(
dataframe,
dataset_id,
table_id,
chunksize=chunksize,
schema=table_schema,
progress_bar=progress_bar,
)
def _generate_bq_schema(df, default_type="STRING"):
"""DEPRECATED: Given a dataframe, generate a Google BigQuery schema.
This is a private method, but was used in external code to work around
issues in the default schema generation. Now that individual columns can
be overridden: https://github.com/pydata/pandas-gbq/issues/218, this
method can be removed after there is time to migrate away from this
method. """
from pandas_gbq import schema
return schema.generate_bq_schema(df, default_type=default_type)
class _Table(GbqConnector):
def __init__(
self,
project_id,
dataset_id,
reauth=False,
location=None,
credentials=None,
private_key=None,
):
self.dataset_id = dataset_id
super(_Table, self).__init__(
project_id,
reauth,
location=location,
credentials=credentials,
private_key=private_key,
)
def exists(self, table_id):
""" Check if a table exists in Google BigQuery
Parameters
----------
table : str
Name of table to be verified
Returns
-------
boolean
true if table exists, otherwise false
"""
from google.api_core.exceptions import NotFound
table_ref = self.client.dataset(self.dataset_id).table(table_id)
try:
self.client.get_table(table_ref)
return True
except NotFound:
return False
except self.http_error as ex:
self.process_http_error(ex)
def create(self, table_id, schema):
""" Create a table in Google BigQuery given a table and schema
Parameters
----------
table : str
Name of table to be written
schema : str
Use the generate_bq_schema to generate your table schema from a
dataframe.
"""
from google.cloud.bigquery import SchemaField
from google.cloud.bigquery import Table
if self.exists(table_id):
raise TableCreationError(
"Table {0} already " "exists".format(table_id)
)
if not _Dataset(self.project_id, credentials=self.credentials).exists(
self.dataset_id
):
_Dataset(
self.project_id,
credentials=self.credentials,
location=self.location,
).create(self.dataset_id)
table_ref = self.client.dataset(self.dataset_id).table(table_id)
table = Table(table_ref)
# Manually create the schema objects, adding NULLABLE mode
# as a workaround for
# https://github.com/GoogleCloudPlatform/google-cloud-python/issues/4456
for field in schema["fields"]:
if "mode" not in field:
field["mode"] = "NULLABLE"
table.schema = [
SchemaField.from_api_repr(field) for field in schema["fields"]
]
try:
self.client.create_table(table)
except self.http_error as ex:
self.process_http_error(ex)
def delete(self, table_id):
""" Delete a table in Google BigQuery
Parameters
----------
table : str
Name of table to be deleted
"""
from google.api_core.exceptions import NotFound
if not self.exists(table_id):
raise NotFoundException("Table does not exist")
table_ref = self.client.dataset(self.dataset_id).table(table_id)
try:
self.client.delete_table(table_ref)
except NotFound:
# Ignore 404 error which may occur if table already deleted
pass
except self.http_error as ex:
self.process_http_error(ex)
class _Dataset(GbqConnector):
def __init__(
self,
project_id,
reauth=False,
location=None,
credentials=None,
private_key=None,
):
super(_Dataset, self).__init__(
project_id,
reauth,
credentials=credentials,
location=location,
private_key=private_key,
)
def exists(self, dataset_id):
""" Check if a dataset exists in Google BigQuery
Parameters
----------
dataset_id : str
Name of dataset to be verified
Returns
-------
boolean
true if dataset exists, otherwise false
"""
from google.api_core.exceptions import NotFound
try:
self.client.get_dataset(self.client.dataset(dataset_id))
return True
except NotFound:
return False
except self.http_error as ex:
self.process_http_error(ex)
def create(self, dataset_id):
""" Create a dataset in Google BigQuery
Parameters
----------
dataset : str
Name of dataset to be written
"""
from google.cloud.bigquery import Dataset
if self.exists(dataset_id):
raise DatasetCreationError(
"Dataset {0} already " "exists".format(dataset_id)
)
dataset = Dataset(self.client.dataset(dataset_id))
if self.location is not None:
dataset.location = self.location
try:
self.client.create_dataset(dataset)
except self.http_error as ex:
self.process_http_error(ex)
|
pydata/pandas-gbq
|
pandas_gbq/gbq.py
|
_generate_bq_schema
|
python
|
def _generate_bq_schema(df, default_type="STRING"):
from pandas_gbq import schema
return schema.generate_bq_schema(df, default_type=default_type)
|
DEPRECATED: Given a dataframe, generate a Google BigQuery schema.
This is a private method, but was used in external code to work around
issues in the default schema generation. Now that individual columns can
be overridden: https://github.com/pydata/pandas-gbq/issues/218, this
method can be removed after there is time to migrate away from this
method.
|
train
|
https://github.com/pydata/pandas-gbq/blob/e590317b3325939ede7563f49aa6b163bb803b77/pandas_gbq/gbq.py#L1185-L1195
|
[
"def generate_bq_schema(dataframe, default_type=\"STRING\"):\n \"\"\"Given a passed dataframe, generate the associated Google BigQuery schema.\n\n Arguments:\n dataframe (pandas.DataFrame): D\n default_type : string\n The default big query type in case the type of the column\n does not exist in the schema.\n \"\"\"\n\n # If you update this mapping, also update the table at\n # `docs/source/writing.rst`.\n type_mapping = {\n \"i\": \"INTEGER\",\n \"b\": \"BOOLEAN\",\n \"f\": \"FLOAT\",\n \"O\": \"STRING\",\n \"S\": \"STRING\",\n \"U\": \"STRING\",\n \"M\": \"TIMESTAMP\",\n }\n\n fields = []\n for column_name, dtype in dataframe.dtypes.iteritems():\n fields.append(\n {\n \"name\": column_name,\n \"type\": type_mapping.get(dtype.kind, default_type),\n }\n )\n\n return {\"fields\": fields}\n"
] |
import logging
import time
import warnings
from datetime import datetime
import numpy as np
try:
# The BigQuery Storage API client is an optional dependency. It is only
# required when use_bqstorage_api=True.
from google.cloud import bigquery_storage_v1beta1
except ImportError: # pragma: NO COVER
bigquery_storage_v1beta1 = None
from pandas_gbq.exceptions import AccessDenied
logger = logging.getLogger(__name__)
BIGQUERY_INSTALLED_VERSION = None
SHOW_VERBOSE_DEPRECATION = False
SHOW_PRIVATE_KEY_DEPRECATION = False
PRIVATE_KEY_DEPRECATION_MESSAGE = (
"private_key is deprecated and will be removed in a future version."
"Use the credentials argument instead. See "
"https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html "
"for examples on using the credentials argument with service account keys."
)
try:
import tqdm # noqa
except ImportError:
tqdm = None
def _check_google_client_version():
global BIGQUERY_INSTALLED_VERSION, SHOW_VERBOSE_DEPRECATION, SHOW_PRIVATE_KEY_DEPRECATION
try:
import pkg_resources
except ImportError:
raise ImportError("Could not import pkg_resources (setuptools).")
# https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/bigquery/CHANGELOG.md
bigquery_minimum_version = pkg_resources.parse_version("1.9.0")
BIGQUERY_INSTALLED_VERSION = pkg_resources.get_distribution(
"google-cloud-bigquery"
).parsed_version
if BIGQUERY_INSTALLED_VERSION < bigquery_minimum_version:
raise ImportError(
"pandas-gbq requires google-cloud-bigquery >= {0}, "
"current version {1}".format(
bigquery_minimum_version, BIGQUERY_INSTALLED_VERSION
)
)
# Add check for Pandas version before showing deprecation warning.
# https://github.com/pydata/pandas-gbq/issues/157
pandas_installed_version = pkg_resources.get_distribution(
"pandas"
).parsed_version
pandas_version_wo_verbosity = pkg_resources.parse_version("0.23.0")
SHOW_VERBOSE_DEPRECATION = (
pandas_installed_version >= pandas_version_wo_verbosity
)
pandas_version_with_credentials_arg = pkg_resources.parse_version("0.24.0")
SHOW_PRIVATE_KEY_DEPRECATION = (
pandas_installed_version >= pandas_version_with_credentials_arg
)
def _test_google_api_imports():
try:
import pydata_google_auth # noqa
except ImportError as ex:
raise ImportError(
"pandas-gbq requires pydata-google-auth: {0}".format(ex)
)
try:
from google_auth_oauthlib.flow import InstalledAppFlow # noqa
except ImportError as ex:
raise ImportError(
"pandas-gbq requires google-auth-oauthlib: {0}".format(ex)
)
try:
import google.auth # noqa
except ImportError as ex:
raise ImportError("pandas-gbq requires google-auth: {0}".format(ex))
try:
from google.cloud import bigquery # noqa
except ImportError as ex:
raise ImportError(
"pandas-gbq requires google-cloud-bigquery: {0}".format(ex)
)
_check_google_client_version()
class DatasetCreationError(ValueError):
"""
Raised when the create dataset method fails
"""
pass
class GenericGBQException(ValueError):
"""
Raised when an unrecognized Google API Error occurs.
"""
pass
class InvalidColumnOrder(ValueError):
"""
Raised when the provided column order for output
results DataFrame does not match the schema
returned by BigQuery.
"""
pass
class InvalidIndexColumn(ValueError):
"""
Raised when the provided index column for output
results DataFrame does not match the schema
returned by BigQuery.
"""
pass
class InvalidPageToken(ValueError):
"""
Raised when Google BigQuery fails to return,
or returns a duplicate page token.
"""
pass
class InvalidSchema(ValueError):
"""
Raised when the provided DataFrame does
not match the schema of the destination
table in BigQuery.
"""
pass
class NotFoundException(ValueError):
"""
Raised when the project_id, table or dataset provided in the query could
not be found.
"""
pass
class QueryTimeout(ValueError):
"""
Raised when the query request exceeds the timeoutMs value specified in the
BigQuery configuration.
"""
pass
class TableCreationError(ValueError):
"""
Raised when the create table method fails
"""
pass
class Context(object):
"""Storage for objects to be used throughout a session.
A Context object is initialized when the ``pandas_gbq`` module is
imported, and can be found at :attr:`pandas_gbq.context`.
"""
def __init__(self):
self._credentials = None
self._project = None
# dialect defaults to None so that read_gbq can stop warning if set.
self._dialect = None
@property
def credentials(self):
"""
Credentials to use for Google APIs.
These credentials are automatically cached in memory by calls to
:func:`pandas_gbq.read_gbq` and :func:`pandas_gbq.to_gbq`. To
manually set the credentials, construct an
:class:`google.auth.credentials.Credentials` object and set it as
the context credentials as demonstrated in the example below. See
`auth docs`_ for more information on obtaining credentials.
.. _auth docs: http://google-auth.readthedocs.io
/en/latest/user-guide.html#obtaining-credentials
Returns
-------
google.auth.credentials.Credentials
Examples
--------
Manually setting the context credentials:
>>> import pandas_gbq
>>> from google.oauth2 import service_account
>>> credentials = service_account.Credentials.from_service_account_file(
... '/path/to/key.json',
... )
>>> pandas_gbq.context.credentials = credentials
"""
return self._credentials
@credentials.setter
def credentials(self, value):
self._credentials = value
@property
def project(self):
"""Default project to use for calls to Google APIs.
Returns
-------
str
Examples
--------
Manually setting the context project:
>>> import pandas_gbq
>>> pandas_gbq.context.project = 'my-project'
"""
return self._project
@project.setter
def project(self, value):
self._project = value
@property
def dialect(self):
"""
Default dialect to use in :func:`pandas_gbq.read_gbq`.
Allowed values for the BigQuery SQL syntax dialect:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
Returns
-------
str
Examples
--------
Setting the default syntax to standard:
>>> import pandas_gbq
>>> pandas_gbq.context.dialect = 'standard'
"""
return self._dialect
@dialect.setter
def dialect(self, value):
self._dialect = value
# Create an empty context, used to cache credentials.
context = Context()
"""A :class:`pandas_gbq.Context` object used to cache credentials.
Credentials automatically are cached in-memory by :func:`pandas_gbq.read_gbq`
and :func:`pandas_gbq.to_gbq`.
"""
class GbqConnector(object):
def __init__(
self,
project_id,
reauth=False,
private_key=None,
auth_local_webserver=False,
dialect="standard",
location=None,
credentials=None,
use_bqstorage_api=False,
):
global context
from google.api_core.exceptions import GoogleAPIError
from google.api_core.exceptions import ClientError
from pandas_gbq import auth
self.http_error = (ClientError, GoogleAPIError)
self.project_id = project_id
self.location = location
self.reauth = reauth
self.private_key = private_key
self.auth_local_webserver = auth_local_webserver
self.dialect = dialect
self.credentials = credentials
default_project = None
# Service account credentials have a project associated with them.
# Prefer that project if none was supplied.
if self.project_id is None and hasattr(self.credentials, "project_id"):
self.project_id = credentials.project_id
# Load credentials from cache.
if not self.credentials:
self.credentials = context.credentials
default_project = context.project
# Credentials were explicitly asked for, so don't use the cache.
if private_key or reauth or not self.credentials:
self.credentials, default_project = auth.get_credentials(
private_key=private_key,
project_id=project_id,
reauth=reauth,
auth_local_webserver=auth_local_webserver,
)
if self.project_id is None:
self.project_id = default_project
if self.project_id is None:
raise ValueError(
"Could not determine project ID and one was not supplied."
)
# Cache the credentials if they haven't been set yet.
if context.credentials is None:
context.credentials = self.credentials
if context.project is None:
context.project = self.project_id
self.client = self.get_client()
self.bqstorage_client = _make_bqstorage_client(
use_bqstorage_api, self.credentials
)
# BQ Queries costs $5 per TB. First 1 TB per month is free
# see here for more: https://cloud.google.com/bigquery/pricing
self.query_price_for_TB = 5.0 / 2 ** 40 # USD/TB
def _start_timer(self):
self.start = time.time()
def get_elapsed_seconds(self):
return round(time.time() - self.start, 2)
def log_elapsed_seconds(self, prefix="Elapsed", postfix="s.", overlong=6):
sec = self.get_elapsed_seconds()
if sec > overlong:
logger.info("{} {} {}".format(prefix, sec, postfix))
# http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
@staticmethod
def sizeof_fmt(num, suffix="B"):
fmt = "%3.1f %s%s"
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if abs(num) < 1024.0:
return fmt % (num, unit, suffix)
num /= 1024.0
return fmt % (num, "Y", suffix)
def get_client(self):
from google.cloud import bigquery
return bigquery.Client(
project=self.project_id, credentials=self.credentials
)
@staticmethod
def process_http_error(ex):
# See `BigQuery Troubleshooting Errors
# <https://cloud.google.com/bigquery/troubleshooting-errors>`__
raise GenericGBQException("Reason: {0}".format(ex))
def run_query(self, query, **kwargs):
from concurrent.futures import TimeoutError
from google.auth.exceptions import RefreshError
from google.cloud import bigquery
job_config = {
"query": {
"useLegacySql": self.dialect
== "legacy"
# 'allowLargeResults', 'createDisposition',
# 'preserveNulls', destinationTable, useQueryCache
}
}
config = kwargs.get("configuration")
if config is not None:
job_config.update(config)
if "query" in config and "query" in config["query"]:
if query is not None:
raise ValueError(
"Query statement can't be specified "
"inside config while it is specified "
"as parameter"
)
query = config["query"].pop("query")
self._start_timer()
try:
logger.debug("Requesting query... ")
query_reply = self.client.query(
query,
job_config=bigquery.QueryJobConfig.from_api_repr(job_config),
location=self.location,
project=self.project_id,
)
logger.debug("Query running...")
except (RefreshError, ValueError):
if self.private_key:
raise AccessDenied(
"The service account credentials are not valid"
)
else:
raise AccessDenied(
"The credentials have been revoked or expired, "
"please re-run the application to re-authorize"
)
except self.http_error as ex:
self.process_http_error(ex)
job_id = query_reply.job_id
logger.debug("Job ID: %s" % job_id)
while query_reply.state != "DONE":
self.log_elapsed_seconds(" Elapsed", "s. Waiting...")
timeout_ms = job_config["query"].get("timeoutMs")
if timeout_ms and timeout_ms < self.get_elapsed_seconds() * 1000:
raise QueryTimeout("Query timeout: {} ms".format(timeout_ms))
timeout_sec = 1.0
if timeout_ms:
# Wait at most 1 second so we can show progress bar
timeout_sec = min(1.0, timeout_ms / 1000.0)
try:
query_reply.result(timeout=timeout_sec)
except TimeoutError:
# Use our own timeout logic
pass
except self.http_error as ex:
self.process_http_error(ex)
if query_reply.cache_hit:
logger.debug("Query done.\nCache hit.\n")
else:
bytes_processed = query_reply.total_bytes_processed or 0
bytes_billed = query_reply.total_bytes_billed or 0
logger.debug(
"Query done.\nProcessed: {} Billed: {}".format(
self.sizeof_fmt(bytes_processed),
self.sizeof_fmt(bytes_billed),
)
)
logger.debug(
"Standard price: ${:,.2f} USD\n".format(
bytes_billed * self.query_price_for_TB
)
)
try:
rows_iter = query_reply.result()
except self.http_error as ex:
self.process_http_error(ex)
schema_fields = [field.to_api_repr() for field in rows_iter.schema]
nullsafe_dtypes = _bqschema_to_nullsafe_dtypes(schema_fields)
df = rows_iter.to_dataframe(
dtypes=nullsafe_dtypes, bqstorage_client=self.bqstorage_client
)
if df.empty:
df = _cast_empty_df_dtypes(schema_fields, df)
# Ensure any TIMESTAMP columns are tz-aware.
df = _localize_df(schema_fields, df)
logger.debug("Got {} rows.\n".format(rows_iter.total_rows))
return df
def load_data(
self,
dataframe,
dataset_id,
table_id,
chunksize=None,
schema=None,
progress_bar=True,
):
from pandas_gbq import load
total_rows = len(dataframe)
try:
chunks = load.load_chunks(
self.client,
dataframe,
dataset_id,
table_id,
chunksize=chunksize,
schema=schema,
location=self.location,
)
if progress_bar and tqdm:
chunks = tqdm.tqdm(chunks)
for remaining_rows in chunks:
logger.info(
"\r{} out of {} rows loaded.".format(
total_rows - remaining_rows, total_rows
)
)
except self.http_error as ex:
self.process_http_error(ex)
def schema(self, dataset_id, table_id):
"""Retrieve the schema of the table
Obtain from BigQuery the field names and field types
for the table defined by the parameters
Parameters
----------
dataset_id : str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
Returns
-------
list of dicts
Fields representing the schema
"""
table_ref = self.client.dataset(dataset_id).table(table_id)
try:
table = self.client.get_table(table_ref)
remote_schema = table.schema
remote_fields = [
field_remote.to_api_repr() for field_remote in remote_schema
]
for field in remote_fields:
field["type"] = field["type"].upper()
field["mode"] = field["mode"].upper()
return remote_fields
except self.http_error as ex:
self.process_http_error(ex)
def _clean_schema_fields(self, fields):
"""Return a sanitized version of the schema for comparisons."""
fields_sorted = sorted(fields, key=lambda field: field["name"])
# Ignore mode and description when comparing schemas.
return [
{"name": field["name"], "type": field["type"]}
for field in fields_sorted
]
def verify_schema(self, dataset_id, table_id, schema):
"""Indicate whether schemas match exactly
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether all fields in the former
are present in the latter. Order is not considered.
Parameters
----------
dataset_id :str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
schema : list(dict)
Schema for comparison. Each item should have
a 'name' and a 'type'
Returns
-------
bool
Whether the schemas match
"""
fields_remote = self._clean_schema_fields(
self.schema(dataset_id, table_id)
)
fields_local = self._clean_schema_fields(schema["fields"])
return fields_remote == fields_local
def schema_is_subset(self, dataset_id, table_id, schema):
"""Indicate whether the schema to be uploaded is a subset
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether a subset of the fields in
the former are present in the latter. Order is not considered.
Parameters
----------
dataset_id : str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
schema : list(dict)
Schema for comparison. Each item should have
a 'name' and a 'type'
Returns
-------
bool
Whether the passed schema is a subset
"""
fields_remote = self._clean_schema_fields(
self.schema(dataset_id, table_id)
)
fields_local = self._clean_schema_fields(schema["fields"])
return all(field in fields_remote for field in fields_local)
def delete_and_recreate_table(self, dataset_id, table_id, table_schema):
table = _Table(
self.project_id, dataset_id, credentials=self.credentials
)
table.delete(table_id)
table.create(table_id, table_schema)
def _bqschema_to_nullsafe_dtypes(schema_fields):
"""Specify explicit dtypes based on BigQuery schema.
This function only specifies a dtype when the dtype allows nulls.
Otherwise, use pandas's default dtype choice.
See: http://pandas.pydata.org/pandas-docs/dev/missing_data.html
#missing-data-casting-rules-and-indexing
"""
# If you update this mapping, also update the table at
# `docs/source/reading.rst`.
dtype_map = {
"FLOAT": np.dtype(float),
# pandas doesn't support timezone-aware dtype in DataFrame/Series
# constructors. It's more idiomatic to localize after construction.
# https://github.com/pandas-dev/pandas/issues/25843
"TIMESTAMP": "datetime64[ns]",
"TIME": "datetime64[ns]",
"DATE": "datetime64[ns]",
"DATETIME": "datetime64[ns]",
}
dtypes = {}
for field in schema_fields:
name = str(field["name"])
if field["mode"].upper() == "REPEATED":
continue
dtype = dtype_map.get(field["type"].upper())
if dtype:
dtypes[name] = dtype
return dtypes
def _cast_empty_df_dtypes(schema_fields, df):
"""Cast any columns in an empty dataframe to correct type.
In an empty dataframe, pandas cannot choose a dtype unless one is
explicitly provided. The _bqschema_to_nullsafe_dtypes() function only
provides dtypes when the dtype safely handles null values. This means
that empty int64 and boolean columns are incorrectly classified as
``object``.
"""
if not df.empty:
raise ValueError(
"DataFrame must be empty in order to cast non-nullsafe dtypes"
)
dtype_map = {"BOOLEAN": bool, "INTEGER": np.int64}
for field in schema_fields:
column = str(field["name"])
if field["mode"].upper() == "REPEATED":
continue
dtype = dtype_map.get(field["type"].upper())
if dtype:
df[column] = df[column].astype(dtype)
return df
def _localize_df(schema_fields, df):
"""Localize any TIMESTAMP columns to tz-aware type.
In pandas versions before 0.24.0, DatetimeTZDtype cannot be used as the
dtype in Series/DataFrame construction, so localize those columns after
the DataFrame is constructed.
"""
for field in schema_fields:
column = str(field["name"])
if field["mode"].upper() == "REPEATED":
continue
if field["type"].upper() == "TIMESTAMP" and df[column].dt.tz is None:
df[column] = df[column].dt.tz_localize("UTC")
return df
def _make_bqstorage_client(use_bqstorage_api, credentials):
if not use_bqstorage_api:
return None
if bigquery_storage_v1beta1 is None:
raise ImportError(
"Install the google-cloud-bigquery-storage and fastavro packages "
"to use the BigQuery Storage API."
)
return bigquery_storage_v1beta1.BigQueryStorageClient(
credentials=credentials
)
def read_gbq(
query,
project_id=None,
index_col=None,
col_order=None,
reauth=False,
auth_local_webserver=False,
dialect=None,
location=None,
configuration=None,
credentials=None,
use_bqstorage_api=False,
verbose=None,
private_key=None,
):
r"""Load data from Google BigQuery using google-cloud-python
The main method a user calls to execute a Query in Google BigQuery
and read results into a pandas DataFrame.
This method uses the Google Cloud client library to make requests to
Google BigQuery, documented `here
<https://google-cloud-python.readthedocs.io/en/latest/bigquery/usage.html>`__.
See the :ref:`How to authenticate with Google BigQuery <authentication>`
guide for authentication instructions.
Parameters
----------
query : str
SQL-Like Query to return data values.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
index_col : str, optional
Name of result column to use for index in results DataFrame.
col_order : list(str), optional
List of BigQuery column names in the desired order for results
DataFrame.
reauth : boolean, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
auth_local_webserver : boolean, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
.. versionadded:: 0.2.0
dialect : str, default 'standard'
Note: The default value changed to 'standard' in version 0.10.0.
SQL syntax dialect to use. Value can be one of:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
location : str, optional
Location where the query job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of any
datasets used in the query.
.. versionadded:: 0.5.0
configuration : dict, optional
Query config parameters for job processing.
For example:
configuration = {'query': {'useQueryCache': False}}
For more information see `BigQuery REST API Reference
<https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service Account
:class:`google.oauth2.service_account.Credentials` directly.
.. versionadded:: 0.8.0
use_bqstorage_api : bool, default False
Use the `BigQuery Storage API
<https://cloud.google.com/bigquery/docs/reference/storage/>`__ to
download query results quickly, but at an increased cost. To use this
API, first `enable it in the Cloud Console
<https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__.
You must also have the `bigquery.readsessions.create
<https://cloud.google.com/bigquery/docs/access-control#roles>`__
permission on the project you are billing queries to.
**Note:** Due to a `known issue in the ``google-cloud-bigquery``
package
<https://github.com/googleapis/google-cloud-python/pull/7633>`__
(fixed in version 1.11.0), you must write your query results to a
destination table. To do this with ``read_gbq``, supply a
``configuration`` dictionary.
This feature requires the ``google-cloud-bigquery-storage`` and
``fastavro`` packages.
.. versionadded:: 0.10.0
verbose : None, deprecated
Deprecated in Pandas-GBQ 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
Returns
-------
df: DataFrame
DataFrame representing results of query.
"""
global context
if dialect is None:
dialect = context.dialect
if dialect is None:
dialect = "standard"
_test_google_api_imports()
if verbose is not None and SHOW_VERBOSE_DEPRECATION:
warnings.warn(
"verbose is deprecated and will be removed in "
"a future version. Set logging level in order to vary "
"verbosity",
FutureWarning,
stacklevel=2,
)
if private_key is not None and SHOW_PRIVATE_KEY_DEPRECATION:
warnings.warn(
PRIVATE_KEY_DEPRECATION_MESSAGE, FutureWarning, stacklevel=2
)
if dialect not in ("legacy", "standard"):
raise ValueError("'{0}' is not valid for dialect".format(dialect))
connector = GbqConnector(
project_id,
reauth=reauth,
dialect=dialect,
auth_local_webserver=auth_local_webserver,
location=location,
credentials=credentials,
private_key=private_key,
use_bqstorage_api=use_bqstorage_api,
)
final_df = connector.run_query(query, configuration=configuration)
# Reindex the DataFrame on the provided column
if index_col is not None:
if index_col in final_df.columns:
final_df.set_index(index_col, inplace=True)
else:
raise InvalidIndexColumn(
'Index column "{0}" does not exist in DataFrame.'.format(
index_col
)
)
# Change the order of columns in the DataFrame based on provided list
if col_order is not None:
if sorted(col_order) == sorted(final_df.columns):
final_df = final_df[col_order]
else:
raise InvalidColumnOrder(
"Column order does not match this DataFrame."
)
connector.log_elapsed_seconds(
"Total time taken",
datetime.now().strftime("s.\nFinished at %Y-%m-%d %H:%M:%S."),
)
return final_df
def to_gbq(
dataframe,
destination_table,
project_id=None,
chunksize=None,
reauth=False,
if_exists="fail",
auth_local_webserver=False,
table_schema=None,
location=None,
progress_bar=True,
credentials=None,
verbose=None,
private_key=None,
):
"""Write a DataFrame to a Google BigQuery table.
The main method a user calls to export pandas DataFrame contents to
Google BigQuery table.
This method uses the Google Cloud client library to make requests to
Google BigQuery, documented `here
<https://google-cloud-python.readthedocs.io/en/latest/bigquery/usage.html>`__.
See the :ref:`How to authenticate with Google BigQuery <authentication>`
guide for authentication instructions.
Parameters
----------
dataframe : pandas.DataFrame
DataFrame to be written to a Google BigQuery table.
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists, do nothing.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
.. versionadded:: 0.2.0
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``.
- If ``table_schema`` is provided, it may contain all or a subset of
DataFrame columns. If a subset is provided, the rest will be
inferred from the DataFrame dtypes.
- If ``table_schema`` is **not** provided, it will be
generated according to dtypes of DataFrame columns. See
`Inferring the Table Schema
<https://pandas-gbq.readthedocs.io/en/latest/writing.html#writing-schema>`__.
for a description of the schema inference.
See `BigQuery API documentation on valid column names
<https://cloud.google.com/bigquery/docs/schemas#column_names`>__.
.. versionadded:: 0.3.1
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
.. versionadded:: 0.5.0
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
.. versionadded:: 0.5.0
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service Account
:class:`google.oauth2.service_account.Credentials` directly.
.. versionadded:: 0.8.0
verbose : bool, deprecated
Deprecated in Pandas-GBQ 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
"""
_test_google_api_imports()
from pandas_gbq import schema
if verbose is not None and SHOW_VERBOSE_DEPRECATION:
warnings.warn(
"verbose is deprecated and will be removed in "
"a future version. Set logging level in order to vary "
"verbosity",
FutureWarning,
stacklevel=1,
)
if private_key is not None and SHOW_PRIVATE_KEY_DEPRECATION:
warnings.warn(
PRIVATE_KEY_DEPRECATION_MESSAGE, FutureWarning, stacklevel=2
)
if if_exists not in ("fail", "replace", "append"):
raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
if "." not in destination_table:
raise NotFoundException(
"Invalid Table Name. Should be of the form 'datasetId.tableId' "
)
connector = GbqConnector(
project_id,
reauth=reauth,
auth_local_webserver=auth_local_webserver,
location=location,
credentials=credentials,
private_key=private_key,
)
dataset_id, table_id = destination_table.rsplit(".", 1)
table = _Table(
project_id,
dataset_id,
location=location,
credentials=connector.credentials,
)
default_schema = _generate_bq_schema(dataframe)
if not table_schema:
table_schema = default_schema
else:
table_schema = schema.update_schema(
default_schema, dict(fields=table_schema)
)
# If table exists, check if_exists parameter
if table.exists(table_id):
if if_exists == "fail":
raise TableCreationError(
"Could not create the table because it "
"already exists. "
"Change the if_exists parameter to "
"'append' or 'replace' data."
)
elif if_exists == "replace":
connector.delete_and_recreate_table(
dataset_id, table_id, table_schema
)
elif if_exists == "append":
if not connector.schema_is_subset(
dataset_id, table_id, table_schema
):
raise InvalidSchema(
"Please verify that the structure and "
"data types in the DataFrame match the "
"schema of the destination table."
)
else:
table.create(table_id, table_schema)
if dataframe.empty:
# Create the table (if needed), but don't try to run a load job with an
# empty file. See: https://github.com/pydata/pandas-gbq/issues/237
return
connector.load_data(
dataframe,
dataset_id,
table_id,
chunksize=chunksize,
schema=table_schema,
progress_bar=progress_bar,
)
def generate_bq_schema(df, default_type="STRING"):
"""DEPRECATED: Given a passed df, generate the associated Google BigQuery
schema.
Parameters
----------
df : DataFrame
default_type : string
The default big query type in case the type of the column
does not exist in the schema.
"""
# deprecation TimeSeries, #11121
warnings.warn(
"generate_bq_schema is deprecated and will be removed in "
"a future version",
FutureWarning,
stacklevel=2,
)
return _generate_bq_schema(df, default_type=default_type)
class _Table(GbqConnector):
def __init__(
self,
project_id,
dataset_id,
reauth=False,
location=None,
credentials=None,
private_key=None,
):
self.dataset_id = dataset_id
super(_Table, self).__init__(
project_id,
reauth,
location=location,
credentials=credentials,
private_key=private_key,
)
def exists(self, table_id):
""" Check if a table exists in Google BigQuery
Parameters
----------
table : str
Name of table to be verified
Returns
-------
boolean
true if table exists, otherwise false
"""
from google.api_core.exceptions import NotFound
table_ref = self.client.dataset(self.dataset_id).table(table_id)
try:
self.client.get_table(table_ref)
return True
except NotFound:
return False
except self.http_error as ex:
self.process_http_error(ex)
def create(self, table_id, schema):
""" Create a table in Google BigQuery given a table and schema
Parameters
----------
table : str
Name of table to be written
schema : str
Use the generate_bq_schema to generate your table schema from a
dataframe.
"""
from google.cloud.bigquery import SchemaField
from google.cloud.bigquery import Table
if self.exists(table_id):
raise TableCreationError(
"Table {0} already " "exists".format(table_id)
)
if not _Dataset(self.project_id, credentials=self.credentials).exists(
self.dataset_id
):
_Dataset(
self.project_id,
credentials=self.credentials,
location=self.location,
).create(self.dataset_id)
table_ref = self.client.dataset(self.dataset_id).table(table_id)
table = Table(table_ref)
# Manually create the schema objects, adding NULLABLE mode
# as a workaround for
# https://github.com/GoogleCloudPlatform/google-cloud-python/issues/4456
for field in schema["fields"]:
if "mode" not in field:
field["mode"] = "NULLABLE"
table.schema = [
SchemaField.from_api_repr(field) for field in schema["fields"]
]
try:
self.client.create_table(table)
except self.http_error as ex:
self.process_http_error(ex)
def delete(self, table_id):
""" Delete a table in Google BigQuery
Parameters
----------
table : str
Name of table to be deleted
"""
from google.api_core.exceptions import NotFound
if not self.exists(table_id):
raise NotFoundException("Table does not exist")
table_ref = self.client.dataset(self.dataset_id).table(table_id)
try:
self.client.delete_table(table_ref)
except NotFound:
# Ignore 404 error which may occur if table already deleted
pass
except self.http_error as ex:
self.process_http_error(ex)
class _Dataset(GbqConnector):
def __init__(
self,
project_id,
reauth=False,
location=None,
credentials=None,
private_key=None,
):
super(_Dataset, self).__init__(
project_id,
reauth,
credentials=credentials,
location=location,
private_key=private_key,
)
def exists(self, dataset_id):
""" Check if a dataset exists in Google BigQuery
Parameters
----------
dataset_id : str
Name of dataset to be verified
Returns
-------
boolean
true if dataset exists, otherwise false
"""
from google.api_core.exceptions import NotFound
try:
self.client.get_dataset(self.client.dataset(dataset_id))
return True
except NotFound:
return False
except self.http_error as ex:
self.process_http_error(ex)
def create(self, dataset_id):
""" Create a dataset in Google BigQuery
Parameters
----------
dataset : str
Name of dataset to be written
"""
from google.cloud.bigquery import Dataset
if self.exists(dataset_id):
raise DatasetCreationError(
"Dataset {0} already " "exists".format(dataset_id)
)
dataset = Dataset(self.client.dataset(dataset_id))
if self.location is not None:
dataset.location = self.location
try:
self.client.create_dataset(dataset)
except self.http_error as ex:
self.process_http_error(ex)
|
pydata/pandas-gbq
|
pandas_gbq/gbq.py
|
GbqConnector.schema
|
python
|
def schema(self, dataset_id, table_id):
table_ref = self.client.dataset(dataset_id).table(table_id)
try:
table = self.client.get_table(table_ref)
remote_schema = table.schema
remote_fields = [
field_remote.to_api_repr() for field_remote in remote_schema
]
for field in remote_fields:
field["type"] = field["type"].upper()
field["mode"] = field["mode"].upper()
return remote_fields
except self.http_error as ex:
self.process_http_error(ex)
|
Retrieve the schema of the table
Obtain from BigQuery the field names and field types
for the table defined by the parameters
Parameters
----------
dataset_id : str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
Returns
-------
list of dicts
Fields representing the schema
|
train
|
https://github.com/pydata/pandas-gbq/blob/e590317b3325939ede7563f49aa6b163bb803b77/pandas_gbq/gbq.py#L550-L583
|
[
"def process_http_error(ex):\n # See `BigQuery Troubleshooting Errors\n # <https://cloud.google.com/bigquery/troubleshooting-errors>`__\n\n raise GenericGBQException(\"Reason: {0}\".format(ex))\n"
] |
class GbqConnector(object):
def __init__(
self,
project_id,
reauth=False,
private_key=None,
auth_local_webserver=False,
dialect="standard",
location=None,
credentials=None,
use_bqstorage_api=False,
):
global context
from google.api_core.exceptions import GoogleAPIError
from google.api_core.exceptions import ClientError
from pandas_gbq import auth
self.http_error = (ClientError, GoogleAPIError)
self.project_id = project_id
self.location = location
self.reauth = reauth
self.private_key = private_key
self.auth_local_webserver = auth_local_webserver
self.dialect = dialect
self.credentials = credentials
default_project = None
# Service account credentials have a project associated with them.
# Prefer that project if none was supplied.
if self.project_id is None and hasattr(self.credentials, "project_id"):
self.project_id = credentials.project_id
# Load credentials from cache.
if not self.credentials:
self.credentials = context.credentials
default_project = context.project
# Credentials were explicitly asked for, so don't use the cache.
if private_key or reauth or not self.credentials:
self.credentials, default_project = auth.get_credentials(
private_key=private_key,
project_id=project_id,
reauth=reauth,
auth_local_webserver=auth_local_webserver,
)
if self.project_id is None:
self.project_id = default_project
if self.project_id is None:
raise ValueError(
"Could not determine project ID and one was not supplied."
)
# Cache the credentials if they haven't been set yet.
if context.credentials is None:
context.credentials = self.credentials
if context.project is None:
context.project = self.project_id
self.client = self.get_client()
self.bqstorage_client = _make_bqstorage_client(
use_bqstorage_api, self.credentials
)
# BQ Queries costs $5 per TB. First 1 TB per month is free
# see here for more: https://cloud.google.com/bigquery/pricing
self.query_price_for_TB = 5.0 / 2 ** 40 # USD/TB
def _start_timer(self):
self.start = time.time()
def get_elapsed_seconds(self):
return round(time.time() - self.start, 2)
def log_elapsed_seconds(self, prefix="Elapsed", postfix="s.", overlong=6):
sec = self.get_elapsed_seconds()
if sec > overlong:
logger.info("{} {} {}".format(prefix, sec, postfix))
# http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
@staticmethod
def sizeof_fmt(num, suffix="B"):
fmt = "%3.1f %s%s"
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if abs(num) < 1024.0:
return fmt % (num, unit, suffix)
num /= 1024.0
return fmt % (num, "Y", suffix)
def get_client(self):
from google.cloud import bigquery
return bigquery.Client(
project=self.project_id, credentials=self.credentials
)
@staticmethod
def process_http_error(ex):
# See `BigQuery Troubleshooting Errors
# <https://cloud.google.com/bigquery/troubleshooting-errors>`__
raise GenericGBQException("Reason: {0}".format(ex))
def run_query(self, query, **kwargs):
from concurrent.futures import TimeoutError
from google.auth.exceptions import RefreshError
from google.cloud import bigquery
job_config = {
"query": {
"useLegacySql": self.dialect
== "legacy"
# 'allowLargeResults', 'createDisposition',
# 'preserveNulls', destinationTable, useQueryCache
}
}
config = kwargs.get("configuration")
if config is not None:
job_config.update(config)
if "query" in config and "query" in config["query"]:
if query is not None:
raise ValueError(
"Query statement can't be specified "
"inside config while it is specified "
"as parameter"
)
query = config["query"].pop("query")
self._start_timer()
try:
logger.debug("Requesting query... ")
query_reply = self.client.query(
query,
job_config=bigquery.QueryJobConfig.from_api_repr(job_config),
location=self.location,
project=self.project_id,
)
logger.debug("Query running...")
except (RefreshError, ValueError):
if self.private_key:
raise AccessDenied(
"The service account credentials are not valid"
)
else:
raise AccessDenied(
"The credentials have been revoked or expired, "
"please re-run the application to re-authorize"
)
except self.http_error as ex:
self.process_http_error(ex)
job_id = query_reply.job_id
logger.debug("Job ID: %s" % job_id)
while query_reply.state != "DONE":
self.log_elapsed_seconds(" Elapsed", "s. Waiting...")
timeout_ms = job_config["query"].get("timeoutMs")
if timeout_ms and timeout_ms < self.get_elapsed_seconds() * 1000:
raise QueryTimeout("Query timeout: {} ms".format(timeout_ms))
timeout_sec = 1.0
if timeout_ms:
# Wait at most 1 second so we can show progress bar
timeout_sec = min(1.0, timeout_ms / 1000.0)
try:
query_reply.result(timeout=timeout_sec)
except TimeoutError:
# Use our own timeout logic
pass
except self.http_error as ex:
self.process_http_error(ex)
if query_reply.cache_hit:
logger.debug("Query done.\nCache hit.\n")
else:
bytes_processed = query_reply.total_bytes_processed or 0
bytes_billed = query_reply.total_bytes_billed or 0
logger.debug(
"Query done.\nProcessed: {} Billed: {}".format(
self.sizeof_fmt(bytes_processed),
self.sizeof_fmt(bytes_billed),
)
)
logger.debug(
"Standard price: ${:,.2f} USD\n".format(
bytes_billed * self.query_price_for_TB
)
)
try:
rows_iter = query_reply.result()
except self.http_error as ex:
self.process_http_error(ex)
schema_fields = [field.to_api_repr() for field in rows_iter.schema]
nullsafe_dtypes = _bqschema_to_nullsafe_dtypes(schema_fields)
df = rows_iter.to_dataframe(
dtypes=nullsafe_dtypes, bqstorage_client=self.bqstorage_client
)
if df.empty:
df = _cast_empty_df_dtypes(schema_fields, df)
# Ensure any TIMESTAMP columns are tz-aware.
df = _localize_df(schema_fields, df)
logger.debug("Got {} rows.\n".format(rows_iter.total_rows))
return df
def load_data(
self,
dataframe,
dataset_id,
table_id,
chunksize=None,
schema=None,
progress_bar=True,
):
from pandas_gbq import load
total_rows = len(dataframe)
try:
chunks = load.load_chunks(
self.client,
dataframe,
dataset_id,
table_id,
chunksize=chunksize,
schema=schema,
location=self.location,
)
if progress_bar and tqdm:
chunks = tqdm.tqdm(chunks)
for remaining_rows in chunks:
logger.info(
"\r{} out of {} rows loaded.".format(
total_rows - remaining_rows, total_rows
)
)
except self.http_error as ex:
self.process_http_error(ex)
def _clean_schema_fields(self, fields):
"""Return a sanitized version of the schema for comparisons."""
fields_sorted = sorted(fields, key=lambda field: field["name"])
# Ignore mode and description when comparing schemas.
return [
{"name": field["name"], "type": field["type"]}
for field in fields_sorted
]
def verify_schema(self, dataset_id, table_id, schema):
"""Indicate whether schemas match exactly
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether all fields in the former
are present in the latter. Order is not considered.
Parameters
----------
dataset_id :str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
schema : list(dict)
Schema for comparison. Each item should have
a 'name' and a 'type'
Returns
-------
bool
Whether the schemas match
"""
fields_remote = self._clean_schema_fields(
self.schema(dataset_id, table_id)
)
fields_local = self._clean_schema_fields(schema["fields"])
return fields_remote == fields_local
def schema_is_subset(self, dataset_id, table_id, schema):
"""Indicate whether the schema to be uploaded is a subset
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether a subset of the fields in
the former are present in the latter. Order is not considered.
Parameters
----------
dataset_id : str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
schema : list(dict)
Schema for comparison. Each item should have
a 'name' and a 'type'
Returns
-------
bool
Whether the passed schema is a subset
"""
fields_remote = self._clean_schema_fields(
self.schema(dataset_id, table_id)
)
fields_local = self._clean_schema_fields(schema["fields"])
return all(field in fields_remote for field in fields_local)
def delete_and_recreate_table(self, dataset_id, table_id, table_schema):
table = _Table(
self.project_id, dataset_id, credentials=self.credentials
)
table.delete(table_id)
table.create(table_id, table_schema)
|
pydata/pandas-gbq
|
pandas_gbq/gbq.py
|
GbqConnector._clean_schema_fields
|
python
|
def _clean_schema_fields(self, fields):
fields_sorted = sorted(fields, key=lambda field: field["name"])
# Ignore mode and description when comparing schemas.
return [
{"name": field["name"], "type": field["type"]}
for field in fields_sorted
]
|
Return a sanitized version of the schema for comparisons.
|
train
|
https://github.com/pydata/pandas-gbq/blob/e590317b3325939ede7563f49aa6b163bb803b77/pandas_gbq/gbq.py#L585-L592
| null |
class GbqConnector(object):
def __init__(
self,
project_id,
reauth=False,
private_key=None,
auth_local_webserver=False,
dialect="standard",
location=None,
credentials=None,
use_bqstorage_api=False,
):
global context
from google.api_core.exceptions import GoogleAPIError
from google.api_core.exceptions import ClientError
from pandas_gbq import auth
self.http_error = (ClientError, GoogleAPIError)
self.project_id = project_id
self.location = location
self.reauth = reauth
self.private_key = private_key
self.auth_local_webserver = auth_local_webserver
self.dialect = dialect
self.credentials = credentials
default_project = None
# Service account credentials have a project associated with them.
# Prefer that project if none was supplied.
if self.project_id is None and hasattr(self.credentials, "project_id"):
self.project_id = credentials.project_id
# Load credentials from cache.
if not self.credentials:
self.credentials = context.credentials
default_project = context.project
# Credentials were explicitly asked for, so don't use the cache.
if private_key or reauth or not self.credentials:
self.credentials, default_project = auth.get_credentials(
private_key=private_key,
project_id=project_id,
reauth=reauth,
auth_local_webserver=auth_local_webserver,
)
if self.project_id is None:
self.project_id = default_project
if self.project_id is None:
raise ValueError(
"Could not determine project ID and one was not supplied."
)
# Cache the credentials if they haven't been set yet.
if context.credentials is None:
context.credentials = self.credentials
if context.project is None:
context.project = self.project_id
self.client = self.get_client()
self.bqstorage_client = _make_bqstorage_client(
use_bqstorage_api, self.credentials
)
# BQ Queries costs $5 per TB. First 1 TB per month is free
# see here for more: https://cloud.google.com/bigquery/pricing
self.query_price_for_TB = 5.0 / 2 ** 40 # USD/TB
def _start_timer(self):
self.start = time.time()
def get_elapsed_seconds(self):
return round(time.time() - self.start, 2)
def log_elapsed_seconds(self, prefix="Elapsed", postfix="s.", overlong=6):
sec = self.get_elapsed_seconds()
if sec > overlong:
logger.info("{} {} {}".format(prefix, sec, postfix))
# http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
@staticmethod
def sizeof_fmt(num, suffix="B"):
fmt = "%3.1f %s%s"
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if abs(num) < 1024.0:
return fmt % (num, unit, suffix)
num /= 1024.0
return fmt % (num, "Y", suffix)
def get_client(self):
from google.cloud import bigquery
return bigquery.Client(
project=self.project_id, credentials=self.credentials
)
@staticmethod
def process_http_error(ex):
# See `BigQuery Troubleshooting Errors
# <https://cloud.google.com/bigquery/troubleshooting-errors>`__
raise GenericGBQException("Reason: {0}".format(ex))
def run_query(self, query, **kwargs):
from concurrent.futures import TimeoutError
from google.auth.exceptions import RefreshError
from google.cloud import bigquery
job_config = {
"query": {
"useLegacySql": self.dialect
== "legacy"
# 'allowLargeResults', 'createDisposition',
# 'preserveNulls', destinationTable, useQueryCache
}
}
config = kwargs.get("configuration")
if config is not None:
job_config.update(config)
if "query" in config and "query" in config["query"]:
if query is not None:
raise ValueError(
"Query statement can't be specified "
"inside config while it is specified "
"as parameter"
)
query = config["query"].pop("query")
self._start_timer()
try:
logger.debug("Requesting query... ")
query_reply = self.client.query(
query,
job_config=bigquery.QueryJobConfig.from_api_repr(job_config),
location=self.location,
project=self.project_id,
)
logger.debug("Query running...")
except (RefreshError, ValueError):
if self.private_key:
raise AccessDenied(
"The service account credentials are not valid"
)
else:
raise AccessDenied(
"The credentials have been revoked or expired, "
"please re-run the application to re-authorize"
)
except self.http_error as ex:
self.process_http_error(ex)
job_id = query_reply.job_id
logger.debug("Job ID: %s" % job_id)
while query_reply.state != "DONE":
self.log_elapsed_seconds(" Elapsed", "s. Waiting...")
timeout_ms = job_config["query"].get("timeoutMs")
if timeout_ms and timeout_ms < self.get_elapsed_seconds() * 1000:
raise QueryTimeout("Query timeout: {} ms".format(timeout_ms))
timeout_sec = 1.0
if timeout_ms:
# Wait at most 1 second so we can show progress bar
timeout_sec = min(1.0, timeout_ms / 1000.0)
try:
query_reply.result(timeout=timeout_sec)
except TimeoutError:
# Use our own timeout logic
pass
except self.http_error as ex:
self.process_http_error(ex)
if query_reply.cache_hit:
logger.debug("Query done.\nCache hit.\n")
else:
bytes_processed = query_reply.total_bytes_processed or 0
bytes_billed = query_reply.total_bytes_billed or 0
logger.debug(
"Query done.\nProcessed: {} Billed: {}".format(
self.sizeof_fmt(bytes_processed),
self.sizeof_fmt(bytes_billed),
)
)
logger.debug(
"Standard price: ${:,.2f} USD\n".format(
bytes_billed * self.query_price_for_TB
)
)
try:
rows_iter = query_reply.result()
except self.http_error as ex:
self.process_http_error(ex)
schema_fields = [field.to_api_repr() for field in rows_iter.schema]
nullsafe_dtypes = _bqschema_to_nullsafe_dtypes(schema_fields)
df = rows_iter.to_dataframe(
dtypes=nullsafe_dtypes, bqstorage_client=self.bqstorage_client
)
if df.empty:
df = _cast_empty_df_dtypes(schema_fields, df)
# Ensure any TIMESTAMP columns are tz-aware.
df = _localize_df(schema_fields, df)
logger.debug("Got {} rows.\n".format(rows_iter.total_rows))
return df
def load_data(
self,
dataframe,
dataset_id,
table_id,
chunksize=None,
schema=None,
progress_bar=True,
):
from pandas_gbq import load
total_rows = len(dataframe)
try:
chunks = load.load_chunks(
self.client,
dataframe,
dataset_id,
table_id,
chunksize=chunksize,
schema=schema,
location=self.location,
)
if progress_bar and tqdm:
chunks = tqdm.tqdm(chunks)
for remaining_rows in chunks:
logger.info(
"\r{} out of {} rows loaded.".format(
total_rows - remaining_rows, total_rows
)
)
except self.http_error as ex:
self.process_http_error(ex)
def schema(self, dataset_id, table_id):
"""Retrieve the schema of the table
Obtain from BigQuery the field names and field types
for the table defined by the parameters
Parameters
----------
dataset_id : str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
Returns
-------
list of dicts
Fields representing the schema
"""
table_ref = self.client.dataset(dataset_id).table(table_id)
try:
table = self.client.get_table(table_ref)
remote_schema = table.schema
remote_fields = [
field_remote.to_api_repr() for field_remote in remote_schema
]
for field in remote_fields:
field["type"] = field["type"].upper()
field["mode"] = field["mode"].upper()
return remote_fields
except self.http_error as ex:
self.process_http_error(ex)
def verify_schema(self, dataset_id, table_id, schema):
"""Indicate whether schemas match exactly
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether all fields in the former
are present in the latter. Order is not considered.
Parameters
----------
dataset_id :str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
schema : list(dict)
Schema for comparison. Each item should have
a 'name' and a 'type'
Returns
-------
bool
Whether the schemas match
"""
fields_remote = self._clean_schema_fields(
self.schema(dataset_id, table_id)
)
fields_local = self._clean_schema_fields(schema["fields"])
return fields_remote == fields_local
def schema_is_subset(self, dataset_id, table_id, schema):
"""Indicate whether the schema to be uploaded is a subset
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether a subset of the fields in
the former are present in the latter. Order is not considered.
Parameters
----------
dataset_id : str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
schema : list(dict)
Schema for comparison. Each item should have
a 'name' and a 'type'
Returns
-------
bool
Whether the passed schema is a subset
"""
fields_remote = self._clean_schema_fields(
self.schema(dataset_id, table_id)
)
fields_local = self._clean_schema_fields(schema["fields"])
return all(field in fields_remote for field in fields_local)
def delete_and_recreate_table(self, dataset_id, table_id, table_schema):
table = _Table(
self.project_id, dataset_id, credentials=self.credentials
)
table.delete(table_id)
table.create(table_id, table_schema)
|
pydata/pandas-gbq
|
pandas_gbq/gbq.py
|
GbqConnector.verify_schema
|
python
|
def verify_schema(self, dataset_id, table_id, schema):
fields_remote = self._clean_schema_fields(
self.schema(dataset_id, table_id)
)
fields_local = self._clean_schema_fields(schema["fields"])
return fields_remote == fields_local
|
Indicate whether schemas match exactly
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether all fields in the former
are present in the latter. Order is not considered.
Parameters
----------
dataset_id :str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
schema : list(dict)
Schema for comparison. Each item should have
a 'name' and a 'type'
Returns
-------
bool
Whether the schemas match
|
train
|
https://github.com/pydata/pandas-gbq/blob/e590317b3325939ede7563f49aa6b163bb803b77/pandas_gbq/gbq.py#L594-L622
|
[
"def schema(self, dataset_id, table_id):\n \"\"\"Retrieve the schema of the table\n\n Obtain from BigQuery the field names and field types\n for the table defined by the parameters\n\n Parameters\n ----------\n dataset_id : str\n Name of the BigQuery dataset for the table\n table_id : str\n Name of the BigQuery table\n\n Returns\n -------\n list of dicts\n Fields representing the schema\n \"\"\"\n table_ref = self.client.dataset(dataset_id).table(table_id)\n\n try:\n table = self.client.get_table(table_ref)\n remote_schema = table.schema\n\n remote_fields = [\n field_remote.to_api_repr() for field_remote in remote_schema\n ]\n for field in remote_fields:\n field[\"type\"] = field[\"type\"].upper()\n field[\"mode\"] = field[\"mode\"].upper()\n\n return remote_fields\n except self.http_error as ex:\n self.process_http_error(ex)\n",
"def _clean_schema_fields(self, fields):\n \"\"\"Return a sanitized version of the schema for comparisons.\"\"\"\n fields_sorted = sorted(fields, key=lambda field: field[\"name\"])\n # Ignore mode and description when comparing schemas.\n return [\n {\"name\": field[\"name\"], \"type\": field[\"type\"]}\n for field in fields_sorted\n ]\n"
] |
class GbqConnector(object):
def __init__(
self,
project_id,
reauth=False,
private_key=None,
auth_local_webserver=False,
dialect="standard",
location=None,
credentials=None,
use_bqstorage_api=False,
):
global context
from google.api_core.exceptions import GoogleAPIError
from google.api_core.exceptions import ClientError
from pandas_gbq import auth
self.http_error = (ClientError, GoogleAPIError)
self.project_id = project_id
self.location = location
self.reauth = reauth
self.private_key = private_key
self.auth_local_webserver = auth_local_webserver
self.dialect = dialect
self.credentials = credentials
default_project = None
# Service account credentials have a project associated with them.
# Prefer that project if none was supplied.
if self.project_id is None and hasattr(self.credentials, "project_id"):
self.project_id = credentials.project_id
# Load credentials from cache.
if not self.credentials:
self.credentials = context.credentials
default_project = context.project
# Credentials were explicitly asked for, so don't use the cache.
if private_key or reauth or not self.credentials:
self.credentials, default_project = auth.get_credentials(
private_key=private_key,
project_id=project_id,
reauth=reauth,
auth_local_webserver=auth_local_webserver,
)
if self.project_id is None:
self.project_id = default_project
if self.project_id is None:
raise ValueError(
"Could not determine project ID and one was not supplied."
)
# Cache the credentials if they haven't been set yet.
if context.credentials is None:
context.credentials = self.credentials
if context.project is None:
context.project = self.project_id
self.client = self.get_client()
self.bqstorage_client = _make_bqstorage_client(
use_bqstorage_api, self.credentials
)
# BQ Queries costs $5 per TB. First 1 TB per month is free
# see here for more: https://cloud.google.com/bigquery/pricing
self.query_price_for_TB = 5.0 / 2 ** 40 # USD/TB
def _start_timer(self):
self.start = time.time()
def get_elapsed_seconds(self):
return round(time.time() - self.start, 2)
def log_elapsed_seconds(self, prefix="Elapsed", postfix="s.", overlong=6):
sec = self.get_elapsed_seconds()
if sec > overlong:
logger.info("{} {} {}".format(prefix, sec, postfix))
# http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
@staticmethod
def sizeof_fmt(num, suffix="B"):
fmt = "%3.1f %s%s"
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if abs(num) < 1024.0:
return fmt % (num, unit, suffix)
num /= 1024.0
return fmt % (num, "Y", suffix)
def get_client(self):
from google.cloud import bigquery
return bigquery.Client(
project=self.project_id, credentials=self.credentials
)
@staticmethod
def process_http_error(ex):
# See `BigQuery Troubleshooting Errors
# <https://cloud.google.com/bigquery/troubleshooting-errors>`__
raise GenericGBQException("Reason: {0}".format(ex))
def run_query(self, query, **kwargs):
from concurrent.futures import TimeoutError
from google.auth.exceptions import RefreshError
from google.cloud import bigquery
job_config = {
"query": {
"useLegacySql": self.dialect
== "legacy"
# 'allowLargeResults', 'createDisposition',
# 'preserveNulls', destinationTable, useQueryCache
}
}
config = kwargs.get("configuration")
if config is not None:
job_config.update(config)
if "query" in config and "query" in config["query"]:
if query is not None:
raise ValueError(
"Query statement can't be specified "
"inside config while it is specified "
"as parameter"
)
query = config["query"].pop("query")
self._start_timer()
try:
logger.debug("Requesting query... ")
query_reply = self.client.query(
query,
job_config=bigquery.QueryJobConfig.from_api_repr(job_config),
location=self.location,
project=self.project_id,
)
logger.debug("Query running...")
except (RefreshError, ValueError):
if self.private_key:
raise AccessDenied(
"The service account credentials are not valid"
)
else:
raise AccessDenied(
"The credentials have been revoked or expired, "
"please re-run the application to re-authorize"
)
except self.http_error as ex:
self.process_http_error(ex)
job_id = query_reply.job_id
logger.debug("Job ID: %s" % job_id)
while query_reply.state != "DONE":
self.log_elapsed_seconds(" Elapsed", "s. Waiting...")
timeout_ms = job_config["query"].get("timeoutMs")
if timeout_ms and timeout_ms < self.get_elapsed_seconds() * 1000:
raise QueryTimeout("Query timeout: {} ms".format(timeout_ms))
timeout_sec = 1.0
if timeout_ms:
# Wait at most 1 second so we can show progress bar
timeout_sec = min(1.0, timeout_ms / 1000.0)
try:
query_reply.result(timeout=timeout_sec)
except TimeoutError:
# Use our own timeout logic
pass
except self.http_error as ex:
self.process_http_error(ex)
if query_reply.cache_hit:
logger.debug("Query done.\nCache hit.\n")
else:
bytes_processed = query_reply.total_bytes_processed or 0
bytes_billed = query_reply.total_bytes_billed or 0
logger.debug(
"Query done.\nProcessed: {} Billed: {}".format(
self.sizeof_fmt(bytes_processed),
self.sizeof_fmt(bytes_billed),
)
)
logger.debug(
"Standard price: ${:,.2f} USD\n".format(
bytes_billed * self.query_price_for_TB
)
)
try:
rows_iter = query_reply.result()
except self.http_error as ex:
self.process_http_error(ex)
schema_fields = [field.to_api_repr() for field in rows_iter.schema]
nullsafe_dtypes = _bqschema_to_nullsafe_dtypes(schema_fields)
df = rows_iter.to_dataframe(
dtypes=nullsafe_dtypes, bqstorage_client=self.bqstorage_client
)
if df.empty:
df = _cast_empty_df_dtypes(schema_fields, df)
# Ensure any TIMESTAMP columns are tz-aware.
df = _localize_df(schema_fields, df)
logger.debug("Got {} rows.\n".format(rows_iter.total_rows))
return df
def load_data(
self,
dataframe,
dataset_id,
table_id,
chunksize=None,
schema=None,
progress_bar=True,
):
from pandas_gbq import load
total_rows = len(dataframe)
try:
chunks = load.load_chunks(
self.client,
dataframe,
dataset_id,
table_id,
chunksize=chunksize,
schema=schema,
location=self.location,
)
if progress_bar and tqdm:
chunks = tqdm.tqdm(chunks)
for remaining_rows in chunks:
logger.info(
"\r{} out of {} rows loaded.".format(
total_rows - remaining_rows, total_rows
)
)
except self.http_error as ex:
self.process_http_error(ex)
def schema(self, dataset_id, table_id):
"""Retrieve the schema of the table
Obtain from BigQuery the field names and field types
for the table defined by the parameters
Parameters
----------
dataset_id : str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
Returns
-------
list of dicts
Fields representing the schema
"""
table_ref = self.client.dataset(dataset_id).table(table_id)
try:
table = self.client.get_table(table_ref)
remote_schema = table.schema
remote_fields = [
field_remote.to_api_repr() for field_remote in remote_schema
]
for field in remote_fields:
field["type"] = field["type"].upper()
field["mode"] = field["mode"].upper()
return remote_fields
except self.http_error as ex:
self.process_http_error(ex)
def _clean_schema_fields(self, fields):
"""Return a sanitized version of the schema for comparisons."""
fields_sorted = sorted(fields, key=lambda field: field["name"])
# Ignore mode and description when comparing schemas.
return [
{"name": field["name"], "type": field["type"]}
for field in fields_sorted
]
def schema_is_subset(self, dataset_id, table_id, schema):
"""Indicate whether the schema to be uploaded is a subset
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether a subset of the fields in
the former are present in the latter. Order is not considered.
Parameters
----------
dataset_id : str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
schema : list(dict)
Schema for comparison. Each item should have
a 'name' and a 'type'
Returns
-------
bool
Whether the passed schema is a subset
"""
fields_remote = self._clean_schema_fields(
self.schema(dataset_id, table_id)
)
fields_local = self._clean_schema_fields(schema["fields"])
return all(field in fields_remote for field in fields_local)
def delete_and_recreate_table(self, dataset_id, table_id, table_schema):
table = _Table(
self.project_id, dataset_id, credentials=self.credentials
)
table.delete(table_id)
table.create(table_id, table_schema)
|
pydata/pandas-gbq
|
pandas_gbq/gbq.py
|
GbqConnector.schema_is_subset
|
python
|
def schema_is_subset(self, dataset_id, table_id, schema):
fields_remote = self._clean_schema_fields(
self.schema(dataset_id, table_id)
)
fields_local = self._clean_schema_fields(schema["fields"])
return all(field in fields_remote for field in fields_local)
|
Indicate whether the schema to be uploaded is a subset
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether a subset of the fields in
the former are present in the latter. Order is not considered.
Parameters
----------
dataset_id : str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
schema : list(dict)
Schema for comparison. Each item should have
a 'name' and a 'type'
Returns
-------
bool
Whether the passed schema is a subset
|
train
|
https://github.com/pydata/pandas-gbq/blob/e590317b3325939ede7563f49aa6b163bb803b77/pandas_gbq/gbq.py#L624-L652
|
[
"def schema(self, dataset_id, table_id):\n \"\"\"Retrieve the schema of the table\n\n Obtain from BigQuery the field names and field types\n for the table defined by the parameters\n\n Parameters\n ----------\n dataset_id : str\n Name of the BigQuery dataset for the table\n table_id : str\n Name of the BigQuery table\n\n Returns\n -------\n list of dicts\n Fields representing the schema\n \"\"\"\n table_ref = self.client.dataset(dataset_id).table(table_id)\n\n try:\n table = self.client.get_table(table_ref)\n remote_schema = table.schema\n\n remote_fields = [\n field_remote.to_api_repr() for field_remote in remote_schema\n ]\n for field in remote_fields:\n field[\"type\"] = field[\"type\"].upper()\n field[\"mode\"] = field[\"mode\"].upper()\n\n return remote_fields\n except self.http_error as ex:\n self.process_http_error(ex)\n",
"def _clean_schema_fields(self, fields):\n \"\"\"Return a sanitized version of the schema for comparisons.\"\"\"\n fields_sorted = sorted(fields, key=lambda field: field[\"name\"])\n # Ignore mode and description when comparing schemas.\n return [\n {\"name\": field[\"name\"], \"type\": field[\"type\"]}\n for field in fields_sorted\n ]\n"
] |
class GbqConnector(object):
def __init__(
self,
project_id,
reauth=False,
private_key=None,
auth_local_webserver=False,
dialect="standard",
location=None,
credentials=None,
use_bqstorage_api=False,
):
global context
from google.api_core.exceptions import GoogleAPIError
from google.api_core.exceptions import ClientError
from pandas_gbq import auth
self.http_error = (ClientError, GoogleAPIError)
self.project_id = project_id
self.location = location
self.reauth = reauth
self.private_key = private_key
self.auth_local_webserver = auth_local_webserver
self.dialect = dialect
self.credentials = credentials
default_project = None
# Service account credentials have a project associated with them.
# Prefer that project if none was supplied.
if self.project_id is None and hasattr(self.credentials, "project_id"):
self.project_id = credentials.project_id
# Load credentials from cache.
if not self.credentials:
self.credentials = context.credentials
default_project = context.project
# Credentials were explicitly asked for, so don't use the cache.
if private_key or reauth or not self.credentials:
self.credentials, default_project = auth.get_credentials(
private_key=private_key,
project_id=project_id,
reauth=reauth,
auth_local_webserver=auth_local_webserver,
)
if self.project_id is None:
self.project_id = default_project
if self.project_id is None:
raise ValueError(
"Could not determine project ID and one was not supplied."
)
# Cache the credentials if they haven't been set yet.
if context.credentials is None:
context.credentials = self.credentials
if context.project is None:
context.project = self.project_id
self.client = self.get_client()
self.bqstorage_client = _make_bqstorage_client(
use_bqstorage_api, self.credentials
)
# BQ Queries costs $5 per TB. First 1 TB per month is free
# see here for more: https://cloud.google.com/bigquery/pricing
self.query_price_for_TB = 5.0 / 2 ** 40 # USD/TB
def _start_timer(self):
self.start = time.time()
def get_elapsed_seconds(self):
return round(time.time() - self.start, 2)
def log_elapsed_seconds(self, prefix="Elapsed", postfix="s.", overlong=6):
sec = self.get_elapsed_seconds()
if sec > overlong:
logger.info("{} {} {}".format(prefix, sec, postfix))
# http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
@staticmethod
def sizeof_fmt(num, suffix="B"):
fmt = "%3.1f %s%s"
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if abs(num) < 1024.0:
return fmt % (num, unit, suffix)
num /= 1024.0
return fmt % (num, "Y", suffix)
def get_client(self):
from google.cloud import bigquery
return bigquery.Client(
project=self.project_id, credentials=self.credentials
)
@staticmethod
def process_http_error(ex):
# See `BigQuery Troubleshooting Errors
# <https://cloud.google.com/bigquery/troubleshooting-errors>`__
raise GenericGBQException("Reason: {0}".format(ex))
def run_query(self, query, **kwargs):
from concurrent.futures import TimeoutError
from google.auth.exceptions import RefreshError
from google.cloud import bigquery
job_config = {
"query": {
"useLegacySql": self.dialect
== "legacy"
# 'allowLargeResults', 'createDisposition',
# 'preserveNulls', destinationTable, useQueryCache
}
}
config = kwargs.get("configuration")
if config is not None:
job_config.update(config)
if "query" in config and "query" in config["query"]:
if query is not None:
raise ValueError(
"Query statement can't be specified "
"inside config while it is specified "
"as parameter"
)
query = config["query"].pop("query")
self._start_timer()
try:
logger.debug("Requesting query... ")
query_reply = self.client.query(
query,
job_config=bigquery.QueryJobConfig.from_api_repr(job_config),
location=self.location,
project=self.project_id,
)
logger.debug("Query running...")
except (RefreshError, ValueError):
if self.private_key:
raise AccessDenied(
"The service account credentials are not valid"
)
else:
raise AccessDenied(
"The credentials have been revoked or expired, "
"please re-run the application to re-authorize"
)
except self.http_error as ex:
self.process_http_error(ex)
job_id = query_reply.job_id
logger.debug("Job ID: %s" % job_id)
while query_reply.state != "DONE":
self.log_elapsed_seconds(" Elapsed", "s. Waiting...")
timeout_ms = job_config["query"].get("timeoutMs")
if timeout_ms and timeout_ms < self.get_elapsed_seconds() * 1000:
raise QueryTimeout("Query timeout: {} ms".format(timeout_ms))
timeout_sec = 1.0
if timeout_ms:
# Wait at most 1 second so we can show progress bar
timeout_sec = min(1.0, timeout_ms / 1000.0)
try:
query_reply.result(timeout=timeout_sec)
except TimeoutError:
# Use our own timeout logic
pass
except self.http_error as ex:
self.process_http_error(ex)
if query_reply.cache_hit:
logger.debug("Query done.\nCache hit.\n")
else:
bytes_processed = query_reply.total_bytes_processed or 0
bytes_billed = query_reply.total_bytes_billed or 0
logger.debug(
"Query done.\nProcessed: {} Billed: {}".format(
self.sizeof_fmt(bytes_processed),
self.sizeof_fmt(bytes_billed),
)
)
logger.debug(
"Standard price: ${:,.2f} USD\n".format(
bytes_billed * self.query_price_for_TB
)
)
try:
rows_iter = query_reply.result()
except self.http_error as ex:
self.process_http_error(ex)
schema_fields = [field.to_api_repr() for field in rows_iter.schema]
nullsafe_dtypes = _bqschema_to_nullsafe_dtypes(schema_fields)
df = rows_iter.to_dataframe(
dtypes=nullsafe_dtypes, bqstorage_client=self.bqstorage_client
)
if df.empty:
df = _cast_empty_df_dtypes(schema_fields, df)
# Ensure any TIMESTAMP columns are tz-aware.
df = _localize_df(schema_fields, df)
logger.debug("Got {} rows.\n".format(rows_iter.total_rows))
return df
def load_data(
self,
dataframe,
dataset_id,
table_id,
chunksize=None,
schema=None,
progress_bar=True,
):
from pandas_gbq import load
total_rows = len(dataframe)
try:
chunks = load.load_chunks(
self.client,
dataframe,
dataset_id,
table_id,
chunksize=chunksize,
schema=schema,
location=self.location,
)
if progress_bar and tqdm:
chunks = tqdm.tqdm(chunks)
for remaining_rows in chunks:
logger.info(
"\r{} out of {} rows loaded.".format(
total_rows - remaining_rows, total_rows
)
)
except self.http_error as ex:
self.process_http_error(ex)
def schema(self, dataset_id, table_id):
"""Retrieve the schema of the table
Obtain from BigQuery the field names and field types
for the table defined by the parameters
Parameters
----------
dataset_id : str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
Returns
-------
list of dicts
Fields representing the schema
"""
table_ref = self.client.dataset(dataset_id).table(table_id)
try:
table = self.client.get_table(table_ref)
remote_schema = table.schema
remote_fields = [
field_remote.to_api_repr() for field_remote in remote_schema
]
for field in remote_fields:
field["type"] = field["type"].upper()
field["mode"] = field["mode"].upper()
return remote_fields
except self.http_error as ex:
self.process_http_error(ex)
def _clean_schema_fields(self, fields):
"""Return a sanitized version of the schema for comparisons."""
fields_sorted = sorted(fields, key=lambda field: field["name"])
# Ignore mode and description when comparing schemas.
return [
{"name": field["name"], "type": field["type"]}
for field in fields_sorted
]
def verify_schema(self, dataset_id, table_id, schema):
"""Indicate whether schemas match exactly
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether all fields in the former
are present in the latter. Order is not considered.
Parameters
----------
dataset_id :str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
schema : list(dict)
Schema for comparison. Each item should have
a 'name' and a 'type'
Returns
-------
bool
Whether the schemas match
"""
fields_remote = self._clean_schema_fields(
self.schema(dataset_id, table_id)
)
fields_local = self._clean_schema_fields(schema["fields"])
return fields_remote == fields_local
def delete_and_recreate_table(self, dataset_id, table_id, table_schema):
table = _Table(
self.project_id, dataset_id, credentials=self.credentials
)
table.delete(table_id)
table.create(table_id, table_schema)
|
pydata/pandas-gbq
|
pandas_gbq/gbq.py
|
_Table.exists
|
python
|
def exists(self, table_id):
from google.api_core.exceptions import NotFound
table_ref = self.client.dataset(self.dataset_id).table(table_id)
try:
self.client.get_table(table_ref)
return True
except NotFound:
return False
except self.http_error as ex:
self.process_http_error(ex)
|
Check if a table exists in Google BigQuery
Parameters
----------
table : str
Name of table to be verified
Returns
-------
boolean
true if table exists, otherwise false
|
train
|
https://github.com/pydata/pandas-gbq/blob/e590317b3325939ede7563f49aa6b163bb803b77/pandas_gbq/gbq.py#L1217-L1239
|
[
"def process_http_error(ex):\n # See `BigQuery Troubleshooting Errors\n # <https://cloud.google.com/bigquery/troubleshooting-errors>`__\n\n raise GenericGBQException(\"Reason: {0}\".format(ex))\n"
] |
class _Table(GbqConnector):
def __init__(
self,
project_id,
dataset_id,
reauth=False,
location=None,
credentials=None,
private_key=None,
):
self.dataset_id = dataset_id
super(_Table, self).__init__(
project_id,
reauth,
location=location,
credentials=credentials,
private_key=private_key,
)
def create(self, table_id, schema):
""" Create a table in Google BigQuery given a table and schema
Parameters
----------
table : str
Name of table to be written
schema : str
Use the generate_bq_schema to generate your table schema from a
dataframe.
"""
from google.cloud.bigquery import SchemaField
from google.cloud.bigquery import Table
if self.exists(table_id):
raise TableCreationError(
"Table {0} already " "exists".format(table_id)
)
if not _Dataset(self.project_id, credentials=self.credentials).exists(
self.dataset_id
):
_Dataset(
self.project_id,
credentials=self.credentials,
location=self.location,
).create(self.dataset_id)
table_ref = self.client.dataset(self.dataset_id).table(table_id)
table = Table(table_ref)
# Manually create the schema objects, adding NULLABLE mode
# as a workaround for
# https://github.com/GoogleCloudPlatform/google-cloud-python/issues/4456
for field in schema["fields"]:
if "mode" not in field:
field["mode"] = "NULLABLE"
table.schema = [
SchemaField.from_api_repr(field) for field in schema["fields"]
]
try:
self.client.create_table(table)
except self.http_error as ex:
self.process_http_error(ex)
def delete(self, table_id):
""" Delete a table in Google BigQuery
Parameters
----------
table : str
Name of table to be deleted
"""
from google.api_core.exceptions import NotFound
if not self.exists(table_id):
raise NotFoundException("Table does not exist")
table_ref = self.client.dataset(self.dataset_id).table(table_id)
try:
self.client.delete_table(table_ref)
except NotFound:
# Ignore 404 error which may occur if table already deleted
pass
except self.http_error as ex:
self.process_http_error(ex)
|
pydata/pandas-gbq
|
pandas_gbq/gbq.py
|
_Table.create
|
python
|
def create(self, table_id, schema):
from google.cloud.bigquery import SchemaField
from google.cloud.bigquery import Table
if self.exists(table_id):
raise TableCreationError(
"Table {0} already " "exists".format(table_id)
)
if not _Dataset(self.project_id, credentials=self.credentials).exists(
self.dataset_id
):
_Dataset(
self.project_id,
credentials=self.credentials,
location=self.location,
).create(self.dataset_id)
table_ref = self.client.dataset(self.dataset_id).table(table_id)
table = Table(table_ref)
# Manually create the schema objects, adding NULLABLE mode
# as a workaround for
# https://github.com/GoogleCloudPlatform/google-cloud-python/issues/4456
for field in schema["fields"]:
if "mode" not in field:
field["mode"] = "NULLABLE"
table.schema = [
SchemaField.from_api_repr(field) for field in schema["fields"]
]
try:
self.client.create_table(table)
except self.http_error as ex:
self.process_http_error(ex)
|
Create a table in Google BigQuery given a table and schema
Parameters
----------
table : str
Name of table to be written
schema : str
Use the generate_bq_schema to generate your table schema from a
dataframe.
|
train
|
https://github.com/pydata/pandas-gbq/blob/e590317b3325939ede7563f49aa6b163bb803b77/pandas_gbq/gbq.py#L1241-L1286
|
[
"def exists(self, table_id):\n \"\"\" Check if a table exists in Google BigQuery\n\n Parameters\n ----------\n table : str\n Name of table to be verified\n\n Returns\n -------\n boolean\n true if table exists, otherwise false\n \"\"\"\n from google.api_core.exceptions import NotFound\n\n table_ref = self.client.dataset(self.dataset_id).table(table_id)\n try:\n self.client.get_table(table_ref)\n return True\n except NotFound:\n return False\n except self.http_error as ex:\n self.process_http_error(ex)\n",
"def exists(self, dataset_id):\n \"\"\" Check if a dataset exists in Google BigQuery\n\n Parameters\n ----------\n dataset_id : str\n Name of dataset to be verified\n\n Returns\n -------\n boolean\n true if dataset exists, otherwise false\n \"\"\"\n from google.api_core.exceptions import NotFound\n\n try:\n self.client.get_dataset(self.client.dataset(dataset_id))\n return True\n except NotFound:\n return False\n except self.http_error as ex:\n self.process_http_error(ex)\n"
] |
class _Table(GbqConnector):
def __init__(
self,
project_id,
dataset_id,
reauth=False,
location=None,
credentials=None,
private_key=None,
):
self.dataset_id = dataset_id
super(_Table, self).__init__(
project_id,
reauth,
location=location,
credentials=credentials,
private_key=private_key,
)
def exists(self, table_id):
""" Check if a table exists in Google BigQuery
Parameters
----------
table : str
Name of table to be verified
Returns
-------
boolean
true if table exists, otherwise false
"""
from google.api_core.exceptions import NotFound
table_ref = self.client.dataset(self.dataset_id).table(table_id)
try:
self.client.get_table(table_ref)
return True
except NotFound:
return False
except self.http_error as ex:
self.process_http_error(ex)
def delete(self, table_id):
""" Delete a table in Google BigQuery
Parameters
----------
table : str
Name of table to be deleted
"""
from google.api_core.exceptions import NotFound
if not self.exists(table_id):
raise NotFoundException("Table does not exist")
table_ref = self.client.dataset(self.dataset_id).table(table_id)
try:
self.client.delete_table(table_ref)
except NotFound:
# Ignore 404 error which may occur if table already deleted
pass
except self.http_error as ex:
self.process_http_error(ex)
|
pydata/pandas-gbq
|
pandas_gbq/gbq.py
|
_Table.delete
|
python
|
def delete(self, table_id):
from google.api_core.exceptions import NotFound
if not self.exists(table_id):
raise NotFoundException("Table does not exist")
table_ref = self.client.dataset(self.dataset_id).table(table_id)
try:
self.client.delete_table(table_ref)
except NotFound:
# Ignore 404 error which may occur if table already deleted
pass
except self.http_error as ex:
self.process_http_error(ex)
|
Delete a table in Google BigQuery
Parameters
----------
table : str
Name of table to be deleted
|
train
|
https://github.com/pydata/pandas-gbq/blob/e590317b3325939ede7563f49aa6b163bb803b77/pandas_gbq/gbq.py#L1288-L1308
|
[
"def exists(self, table_id):\n \"\"\" Check if a table exists in Google BigQuery\n\n Parameters\n ----------\n table : str\n Name of table to be verified\n\n Returns\n -------\n boolean\n true if table exists, otherwise false\n \"\"\"\n from google.api_core.exceptions import NotFound\n\n table_ref = self.client.dataset(self.dataset_id).table(table_id)\n try:\n self.client.get_table(table_ref)\n return True\n except NotFound:\n return False\n except self.http_error as ex:\n self.process_http_error(ex)\n"
] |
class _Table(GbqConnector):
def __init__(
self,
project_id,
dataset_id,
reauth=False,
location=None,
credentials=None,
private_key=None,
):
self.dataset_id = dataset_id
super(_Table, self).__init__(
project_id,
reauth,
location=location,
credentials=credentials,
private_key=private_key,
)
def exists(self, table_id):
""" Check if a table exists in Google BigQuery
Parameters
----------
table : str
Name of table to be verified
Returns
-------
boolean
true if table exists, otherwise false
"""
from google.api_core.exceptions import NotFound
table_ref = self.client.dataset(self.dataset_id).table(table_id)
try:
self.client.get_table(table_ref)
return True
except NotFound:
return False
except self.http_error as ex:
self.process_http_error(ex)
def create(self, table_id, schema):
""" Create a table in Google BigQuery given a table and schema
Parameters
----------
table : str
Name of table to be written
schema : str
Use the generate_bq_schema to generate your table schema from a
dataframe.
"""
from google.cloud.bigquery import SchemaField
from google.cloud.bigquery import Table
if self.exists(table_id):
raise TableCreationError(
"Table {0} already " "exists".format(table_id)
)
if not _Dataset(self.project_id, credentials=self.credentials).exists(
self.dataset_id
):
_Dataset(
self.project_id,
credentials=self.credentials,
location=self.location,
).create(self.dataset_id)
table_ref = self.client.dataset(self.dataset_id).table(table_id)
table = Table(table_ref)
# Manually create the schema objects, adding NULLABLE mode
# as a workaround for
# https://github.com/GoogleCloudPlatform/google-cloud-python/issues/4456
for field in schema["fields"]:
if "mode" not in field:
field["mode"] = "NULLABLE"
table.schema = [
SchemaField.from_api_repr(field) for field in schema["fields"]
]
try:
self.client.create_table(table)
except self.http_error as ex:
self.process_http_error(ex)
|
pydata/pandas-gbq
|
pandas_gbq/gbq.py
|
_Dataset.exists
|
python
|
def exists(self, dataset_id):
from google.api_core.exceptions import NotFound
try:
self.client.get_dataset(self.client.dataset(dataset_id))
return True
except NotFound:
return False
except self.http_error as ex:
self.process_http_error(ex)
|
Check if a dataset exists in Google BigQuery
Parameters
----------
dataset_id : str
Name of dataset to be verified
Returns
-------
boolean
true if dataset exists, otherwise false
|
train
|
https://github.com/pydata/pandas-gbq/blob/e590317b3325939ede7563f49aa6b163bb803b77/pandas_gbq/gbq.py#L1328-L1349
|
[
"def process_http_error(ex):\n # See `BigQuery Troubleshooting Errors\n # <https://cloud.google.com/bigquery/troubleshooting-errors>`__\n\n raise GenericGBQException(\"Reason: {0}\".format(ex))\n"
] |
class _Dataset(GbqConnector):
def __init__(
self,
project_id,
reauth=False,
location=None,
credentials=None,
private_key=None,
):
super(_Dataset, self).__init__(
project_id,
reauth,
credentials=credentials,
location=location,
private_key=private_key,
)
def create(self, dataset_id):
""" Create a dataset in Google BigQuery
Parameters
----------
dataset : str
Name of dataset to be written
"""
from google.cloud.bigquery import Dataset
if self.exists(dataset_id):
raise DatasetCreationError(
"Dataset {0} already " "exists".format(dataset_id)
)
dataset = Dataset(self.client.dataset(dataset_id))
if self.location is not None:
dataset.location = self.location
try:
self.client.create_dataset(dataset)
except self.http_error as ex:
self.process_http_error(ex)
|
pydata/pandas-gbq
|
pandas_gbq/gbq.py
|
_Dataset.create
|
python
|
def create(self, dataset_id):
from google.cloud.bigquery import Dataset
if self.exists(dataset_id):
raise DatasetCreationError(
"Dataset {0} already " "exists".format(dataset_id)
)
dataset = Dataset(self.client.dataset(dataset_id))
if self.location is not None:
dataset.location = self.location
try:
self.client.create_dataset(dataset)
except self.http_error as ex:
self.process_http_error(ex)
|
Create a dataset in Google BigQuery
Parameters
----------
dataset : str
Name of dataset to be written
|
train
|
https://github.com/pydata/pandas-gbq/blob/e590317b3325939ede7563f49aa6b163bb803b77/pandas_gbq/gbq.py#L1351-L1374
|
[
"def exists(self, dataset_id):\n \"\"\" Check if a dataset exists in Google BigQuery\n\n Parameters\n ----------\n dataset_id : str\n Name of dataset to be verified\n\n Returns\n -------\n boolean\n true if dataset exists, otherwise false\n \"\"\"\n from google.api_core.exceptions import NotFound\n\n try:\n self.client.get_dataset(self.client.dataset(dataset_id))\n return True\n except NotFound:\n return False\n except self.http_error as ex:\n self.process_http_error(ex)\n"
] |
class _Dataset(GbqConnector):
def __init__(
self,
project_id,
reauth=False,
location=None,
credentials=None,
private_key=None,
):
super(_Dataset, self).__init__(
project_id,
reauth,
credentials=credentials,
location=location,
private_key=private_key,
)
def exists(self, dataset_id):
""" Check if a dataset exists in Google BigQuery
Parameters
----------
dataset_id : str
Name of dataset to be verified
Returns
-------
boolean
true if dataset exists, otherwise false
"""
from google.api_core.exceptions import NotFound
try:
self.client.get_dataset(self.client.dataset(dataset_id))
return True
except NotFound:
return False
except self.http_error as ex:
self.process_http_error(ex)
|
pydata/pandas-gbq
|
pandas_gbq/schema.py
|
update_schema
|
python
|
def update_schema(schema_old, schema_new):
old_fields = schema_old["fields"]
new_fields = schema_new["fields"]
output_fields = list(old_fields)
field_indices = {field["name"]: i for i, field in enumerate(output_fields)}
for field in new_fields:
name = field["name"]
if name in field_indices:
# replace old field with new field of same name
output_fields[field_indices[name]] = field
else:
# add new field
output_fields.append(field)
return {"fields": output_fields}
|
Given an old BigQuery schema, update it with a new one.
Where a field name is the same, the new will replace the old. Any
new fields not present in the old schema will be added.
Arguments:
schema_old: the old schema to update
schema_new: the new schema which will overwrite/extend the old
|
train
|
https://github.com/pydata/pandas-gbq/blob/e590317b3325939ede7563f49aa6b163bb803b77/pandas_gbq/schema.py#L38-L64
| null |
"""Helper methods for BigQuery schemas"""
def generate_bq_schema(dataframe, default_type="STRING"):
"""Given a passed dataframe, generate the associated Google BigQuery schema.
Arguments:
dataframe (pandas.DataFrame): D
default_type : string
The default big query type in case the type of the column
does not exist in the schema.
"""
# If you update this mapping, also update the table at
# `docs/source/writing.rst`.
type_mapping = {
"i": "INTEGER",
"b": "BOOLEAN",
"f": "FLOAT",
"O": "STRING",
"S": "STRING",
"U": "STRING",
"M": "TIMESTAMP",
}
fields = []
for column_name, dtype in dataframe.dtypes.iteritems():
fields.append(
{
"name": column_name,
"type": type_mapping.get(dtype.kind, default_type),
}
)
return {"fields": fields}
|
IS-ENES-Data/esgf-pid
|
esgfpid/rabbit/asynchronous/thread_builder.py
|
ConnectionBuilder.__start_waiting_for_events
|
python
|
def __start_waiting_for_events(self):
'''
This waits until the whole chain of callback methods triggered by
"trigger_connection_to_rabbit_etc()" has finished, and then starts
waiting for publications.
This is done by starting the ioloop.
Note: In the pika usage example, these things are both called inside the run()
method, so I wonder if this check-and-wait here is necessary. Maybe not.
But the usage example does not implement a Thread, so it probably blocks during
the opening of the connection. Here, as it is a different thread, the run()
might get called before the __init__ has finished? I'd rather stay on the
safe side, as my experience of threading in Python is limited.
'''
# Start ioloop if connection object ready:
if self.thread._connection is not None:
try:
logdebug(LOGGER, 'Starting ioloop...')
logtrace(LOGGER, 'ioloop is owned by connection %s...', self.thread._connection)
# Tell the main thread that we're now open for events.
# As soon as the thread._connection object is not None anymore, it
# can receive events.
self.thread.tell_publisher_to_stop_waiting_for_thread_to_accept_events()
self.thread.continue_gently_closing_if_applicable()
self.thread._connection.ioloop.start()
except PIDServerException as e:
raise e
# It seems that some connection problems do not cause
# RabbitMQ to call any callback (on_connection_closed
# or on_connection_error) - it just silently swallows the
# problem.
# So we need to manually trigger reconnection to the next
# host here, which we do by manually calling the callback.
# We start the ioloop, so it can handle the reconnection events,
# or also receive events from the publisher in the meantime.
except Exception as e:
# This catches any error during connection startup and during the entire
# time the ioloop runs, blocks and waits for events.
time_passed = datetime.datetime.now() - self.__start_connect_time
time_passed_seconds = time_passed.total_seconds()
# Some pika errors:
if isinstance(e, pika.exceptions.ProbableAuthenticationError):
errorname = self.__make_error_name(e, 'e.g. wrong user or password')
elif isinstance(e, pika.exceptions.ProbableAccessDeniedError):
errorname = self.__make_error_name(e, 'e.g. wrong virtual host name')
elif isinstance(e, pika.exceptions.IncompatibleProtocolError):
errorname = self.__make_error_name(e, 'e.g. trying TLS/SSL on wrong port')
# Other errors:
else:
errorname = self.__make_error_name(e)
logdebug(LOGGER, 'Unexpected error during event listener\'s lifetime (after %s seconds): %s', time_passed_seconds, errorname)
# Now trigger reconnection:
self.statemachine.set_to_waiting_to_be_available()
self.on_connection_error(self.thread._connection, errorname)
self.thread._connection.ioloop.start()
else:
# I'm quite sure that this cannot happen, as the connection object
# is created in "trigger_connection_...()" and thus exists, no matter
# if the actual connection to RabbitMQ succeeded (yet) or not.
logdebug(LOGGER, 'This cannot happen: Connection object is not ready.')
logerror(LOGGER, 'Cannot happen. Cannot properly start the thread. Connection object is not ready.')
|
This waits until the whole chain of callback methods triggered by
"trigger_connection_to_rabbit_etc()" has finished, and then starts
waiting for publications.
This is done by starting the ioloop.
Note: In the pika usage example, these things are both called inside the run()
method, so I wonder if this check-and-wait here is necessary. Maybe not.
But the usage example does not implement a Thread, so it probably blocks during
the opening of the connection. Here, as it is a different thread, the run()
might get called before the __init__ has finished? I'd rather stay on the
safe side, as my experience of threading in Python is limited.
|
train
|
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/rabbit/asynchronous/thread_builder.py#L111-L184
|
[
"def logdebug(logger, msg, *args, **kwargs):\n '''\n Logs messages as DEBUG,\n unless show=True and esgfpid.defaults.LOG_SHOW_TO_INFO=True,\n (then it logs messages as INFO).\n '''\n if esgfpid.defaults.LOG_DEBUG_TO_INFO:\n logger.info('DEBUG %s ' % msg, *args, **kwargs)\n else:\n logger.debug(msg, *args, **kwargs)\n",
"def logtrace(logger, msg, *args, **kwargs):\n '''\n If esgfpid.defaults.LOG_TRACE_TO_DEBUG, messages are treated\n like debug messages (with an added [trace]).\n Otherwise, they are ignored.\n '''\n if esgfpid.defaults.LOG_TRACE_TO_DEBUG:\n logdebug(logger, '[trace] %s' % msg, *args, **kwargs)\n else:\n pass\n",
"def logerror(logger, msg, *args, **kwargs):\n logger.error(msg, *args, **kwargs)\n"
] |
class ConnectionBuilder(object):
def __init__(self, thread, statemachine, confirmer, returnhandler, shutter, nodemanager):
self.thread = thread
self.statemachine = statemachine
'''
We need to pass the "confirmer.on_delivery_confirmation()" callback to
RabbitMQ's channel.'''
self.confirmer = confirmer
'''
We need to pass the "returnhandler.on_message_not_accepted()"" callback
to RabbitMQ's channel as "on_return_callback" '''
self.returnhandler = returnhandler
'''
We need this to be able to trigger all the closing mechanisms
in case the module should close down as soon it was opened, i.e.
if the close-command was issued while the connection was still
building up.
'''
self.shutter = shutter
'''
The node manager keeps all the info about the RabbitMQ nodes,
e.g. URLs, usernames, passwords.
'''
self.__node_manager = nodemanager
'''
To count how many times we have tried to reconnect the set of
RabbitMQ hosts.
'''
self.__reconnect_counter = 0
self.__backup_reconnect_counter = 0
'''
To see how many times we should try reconnecting to the set
of RabbitMQ hosts. Note that if there is 3 hosts, and we try 2
times, this means 6 connection tries in total.
'''
self.__max_reconnection_tries = defaults.RABBIT_RECONNECTION_MAX_TRIES
'''
How many seconds to wait before reconnecting after having tried
all hosts. (There is no waiting time trying to connect to a different
host after one fails).
'''
self.__wait_seconds_before_reconnect = defaults.RABBIT_RECONNECTION_SECONDS
'''
To see how much time it takes to connect. Once a connection is
established or failed, we print the time delta to logs.
'''
self.__start_connect_time = None
'''
Name of the fallback exchange to try if the normal exchange
is not found.
'''
self.__fallback_exchange_name = defaults.RABBIT_FALLBACK_EXCHANGE_NAME
'''
Collect the connection errors for the hosts for telling the user.
'''
self.__connection_errors = {}
####################
### Start ioloop ###
####################
'''
Entry point. Called once to trigger the whole
(re) connection process. Called from run method of the rabbit thread.
'''
def first_connection(self):
logdebug(LOGGER, 'Trigger connection to rabbit...')
self.__trigger_connection_to_rabbit_etc()
logdebug(LOGGER, 'Trigger connection to rabbit... done.')
logdebug(LOGGER, 'Start waiting for events...')
self.__start_waiting_for_events()
logtrace(LOGGER, 'Had started waiting for events, but stopped.')
def __start_waiting_for_events(self):
'''
This waits until the whole chain of callback methods triggered by
"trigger_connection_to_rabbit_etc()" has finished, and then starts
waiting for publications.
This is done by starting the ioloop.
Note: In the pika usage example, these things are both called inside the run()
method, so I wonder if this check-and-wait here is necessary. Maybe not.
But the usage example does not implement a Thread, so it probably blocks during
the opening of the connection. Here, as it is a different thread, the run()
might get called before the __init__ has finished? I'd rather stay on the
safe side, as my experience of threading in Python is limited.
'''
# Start ioloop if connection object ready:
if self.thread._connection is not None:
try:
logdebug(LOGGER, 'Starting ioloop...')
logtrace(LOGGER, 'ioloop is owned by connection %s...', self.thread._connection)
# Tell the main thread that we're now open for events.
# As soon as the thread._connection object is not None anymore, it
# can receive events.
self.thread.tell_publisher_to_stop_waiting_for_thread_to_accept_events()
self.thread.continue_gently_closing_if_applicable()
self.thread._connection.ioloop.start()
except PIDServerException as e:
raise e
# It seems that some connection problems do not cause
# RabbitMQ to call any callback (on_connection_closed
# or on_connection_error) - it just silently swallows the
# problem.
# So we need to manually trigger reconnection to the next
# host here, which we do by manually calling the callback.
# We start the ioloop, so it can handle the reconnection events,
# or also receive events from the publisher in the meantime.
except Exception as e:
# This catches any error during connection startup and during the entire
# time the ioloop runs, blocks and waits for events.
time_passed = datetime.datetime.now() - self.__start_connect_time
time_passed_seconds = time_passed.total_seconds()
# Some pika errors:
if isinstance(e, pika.exceptions.ProbableAuthenticationError):
errorname = self.__make_error_name(e, 'e.g. wrong user or password')
elif isinstance(e, pika.exceptions.ProbableAccessDeniedError):
errorname = self.__make_error_name(e, 'e.g. wrong virtual host name')
elif isinstance(e, pika.exceptions.IncompatibleProtocolError):
errorname = self.__make_error_name(e, 'e.g. trying TLS/SSL on wrong port')
# Other errors:
else:
errorname = self.__make_error_name(e)
logdebug(LOGGER, 'Unexpected error during event listener\'s lifetime (after %s seconds): %s', time_passed_seconds, errorname)
# Now trigger reconnection:
self.statemachine.set_to_waiting_to_be_available()
self.on_connection_error(self.thread._connection, errorname)
self.thread._connection.ioloop.start()
else:
# I'm quite sure that this cannot happen, as the connection object
# is created in "trigger_connection_...()" and thus exists, no matter
# if the actual connection to RabbitMQ succeeded (yet) or not.
logdebug(LOGGER, 'This cannot happen: Connection object is not ready.')
logerror(LOGGER, 'Cannot happen. Cannot properly start the thread. Connection object is not ready.')
def __make_error_name(self, ex, custom_text=None):
errorname = ex.__class__.__name__
if not ex.message == '':
errorname += ': '+ex.message
if custom_text is not None:
errorname += ' ('+custom_text+')'
return errorname
########################################
### Chain of callback functions that ###
### connect to rabbit ###
########################################
def __trigger_connection_to_rabbit_etc(self):
self.statemachine.set_to_waiting_to_be_available()
self.__please_open_connection()
''' Asynchronous, waits for answer from RabbitMQ.'''
def __please_open_connection(self):
params = self.__node_manager.get_connection_parameters()
self.__start_connect_time = datetime.datetime.now()
logdebug(LOGGER, 'Connecting to RabbitMQ at %s... (%s)',
params.host, get_now_utc_as_formatted_string())
loginfo(LOGGER, 'Opening connection to RabbitMQ...')
self.thread._connection = pika.SelectConnection(
parameters=params,
on_open_callback=self.on_connection_open,
on_open_error_callback=self.on_connection_error,
on_close_callback=self.on_connection_closed,
stop_ioloop_on_close=False # why? see below.
)
# Don't stop ioloop on connection close, because
# otherwise the thread would not accept more events/
# messages (and might end) after a connection is
# closed. We still want to accept messages and try
# to reconnect and send them then.
''' Callback, called by RabbitMQ.'''
def on_connection_open(self, unused_connection):
logdebug(LOGGER, 'Opening connection... done.')
loginfo(LOGGER, 'Connection to RabbitMQ at %s opened... (%s)',
self.__node_manager.get_connection_parameters().host,
get_now_utc_as_formatted_string())
# Tell the main thread we're open for events now:
# When the connection is open, the thread is ready to accept events.
# Note: It was already ready when the connection object was created,
# not just now that it's actually open. There was already a call to
# "...stop_waiting..." in start_waiting_for_events(), which quite
# certainly was carried out before this callback. So this call to
# "...stop_waiting..." is likelily redundant!
self.thread.tell_publisher_to_stop_waiting_for_thread_to_accept_events()
self.__please_open_rabbit_channel()
''' Asynchronous, waits for answer from RabbitMQ.'''
def __please_open_rabbit_channel(self):
logdebug(LOGGER, 'Opening channel...')
self.thread._connection.channel(on_open_callback=self.on_channel_open)
''' Callback, called by RabbitMQ. '''
def on_channel_open(self, channel):
time_passed = datetime.datetime.now() - self.__start_connect_time
logdebug(LOGGER, 'Opening channel... done. Took %s seconds.' % time_passed.total_seconds())
logtrace(LOGGER, 'Channel has number: %s.', channel.channel_number)
self.thread._channel = channel
self.__reset_reconnect_counter()
self.__add_on_channel_close_callback()
self.__add_on_return_callback()
self.__make_channel_confirm_delivery()
self.__make_ready_for_publishing()
'''
Once we succeeded in building a connection, we reset the
reconnection counter, so after a connection was interrupted,
we can do the same number of reconnection attempts again.
This is called when a connection AND channel was successfully
build, i.e. in the on_channel_open workflow.
'''
def __reset_reconnect_counter(self):
logdebug(LOGGER, 'Resetting reconnection counter, because a channel was successfully opened.')
self.__backup_reconnect_counter = self.__reconnect_counter # we may need to undo this later...
self.__reconnect_counter = 0
'''
Sometimes we assume we were successful in connecting and set the
reconnection counter to zero.
But the connection was not successful, so we reconnect, and as the
reconnection counter was reset, we do so infinitely.
This occurs if we did succeed in opening a channel, but the
host we connected to lacks the required exchange. This is only
noticed when we try to send the first message. Then, the channel
is closed. So we call this in the on_channel_closed, but only
in specific situations.
'''
def __undo_resetting_reconnect_counter(self):
logdebug(LOGGER, 'Undo resetting reconnection counter, because the channel that was opened did not actually function.')
self.__reconnect_counter = self.__backup_reconnect_counter
def __make_channel_confirm_delivery(self):
logtrace(LOGGER, 'Set confirm delivery... (Issue Confirm.Select RPC command)')
self.thread._channel.confirm_delivery(callback=self.confirmer.on_delivery_confirmation)
logdebug(LOGGER, 'Set confirm delivery... done.')
def __make_ready_for_publishing(self):
logdebug(LOGGER, '(Re)connection established, making ready for publication...')
# Check for unexpected errors:
if self.thread._channel is None:
logerror(LOGGER, 'Channel is None after connecting to server. This should not happen.')
self.statemachine.set_to_permanently_unavailable()
if self.thread._connection is None:
logerror(LOGGER, 'Connection is None after connecting to server. This should not happen.')
self.statemachine.set_to_permanently_unavailable()
# Normally, it should already be waiting to be available:
if self.statemachine.is_WAITING_TO_BE_AVAILABLE():
logdebug(LOGGER, 'Setup is finished. Publishing may start.')
logtrace(LOGGER, 'Publishing will use channel no. %s!', self.thread._channel.channel_number)
self.statemachine.set_to_available()
self.__check_for_already_arrived_messages_and_publish_them()
# It was asked to close in the meantime (but might be able to publish the last messages):
elif self.statemachine.is_AVAILABLE_BUT_WANTS_TO_STOP():
logdebug(LOGGER, 'Setup is finished, but the module was already asked to be closed in the meantime.')
self.__check_for_already_arrived_messages_and_publish_them()
# It was force-closed in the meantime:
elif self.statemachine.is_PERMANENTLY_UNAVAILABLE() or self.statemachine.is_FORCE_FINISHED():
if self.statemachine.get_detail_closed_by_publisher():
logdebug(LOGGER, 'Setup is finished now, but the module was already force-closed in the meantime.')
self.shutter.safety_finish('closed before connection was ready. reclosing.')
elif self.statemachine.detail_could_not_connect:
logerror(LOGGER, 'This is not supposed to happen. If the connection failed, this part of the code should not be reached.')
else:
logerror(LOGGER, 'This is not supposed to happen. An unknown event set this module to be unavailable. When was this set to unavailable?')
else:
logdebug(LOGGER, 'Unexpected state.')
def __check_for_already_arrived_messages_and_publish_them(self):
logdebug(LOGGER, 'Checking if messages have arrived in the meantime...')
num = self.thread.get_num_unpublished()
if num > 0:
loginfo(LOGGER, 'Ready to publish messages to RabbitMQ. %s messages are already waiting to be published.', num)
for i in xrange(int(num*1.1)):
self.thread.add_event_publish_message()
else:
loginfo(LOGGER, 'Ready to publish messages to RabbitMQ.')
logdebug(LOGGER, 'Ready to publish messages to RabbitMQ. No messages waiting yet.')
########################
### Connection error ###
########################
'''
If the connection to RabbitMQ failed, there is various
things that may happen:
(1) If there is other RabbitMQ urls, it will try to connect
to one of these.
(2) If there is no other URLs, it will try to reconnect to this
one after a short waiting time.
(3) If the maximum number of reconnection tries is reached, it
gives up.
'''
def on_connection_error(self, connection, msg):
oldhost = self.__get_whole_host_name()
time_passed = datetime.datetime.now() - self.__start_connect_time
time_passed_seconds = time_passed.total_seconds()
logerror(LOGGER, 'Could not connect to %s: "%s" (connection failure after %s seconds)', oldhost, msg, time_passed_seconds)
self.__store_connection_error_info(msg, oldhost)
# If there was a force-finish, we do not reconnect.
if self.statemachine.is_FORCE_FINISHED():
errormsg = 'Permanently failed to connect to RabbitMQ.'
if self.statemachine.detail_asked_to_gently_close_by_publisher:
errormsg += ' Tried all hosts until was force-closed by user.'
elif self.statemachine.detail_asked_to_force_close_by_publisher:
errormsg += ' Tried all hosts until a user close-down forced us to give up (e.g. the maximum waiting time was reached).'
errormsg += ' Giving up. No PID requests will be sent.'
self.__give_up_reconnecting_and_raise_exception(errormsg)
# If there is alternative URLs, try one of them:
if self.__node_manager.has_more_urls():
logdebug(LOGGER, 'Connection failure: %s fallback URLs left to try.', self.__node_manager.get_num_left_urls())
self.__node_manager.set_next_host()
newhost = self.__get_whole_host_name()
loginfo(LOGGER, 'Connection failure: Trying to connect (now) to %s.', newhost)
reopen_seconds = 0
self.__wait_and_trigger_reconnection(connection, reopen_seconds)
# If there is no URLs, reset the node manager to
# start at the first nodes again...
else:
self.__reconnect_counter += 1;
if self.__reconnect_counter <= self.__max_reconnection_tries:
reopen_seconds = self.__wait_seconds_before_reconnect
logdebug(LOGGER, 'Connection failure: Failed connecting to all hosts. Waiting %s seconds and starting over.', reopen_seconds)
self.__node_manager.reset_nodes()
newhost = self.__node_manager.get_connection_parameters().host
loginfo(LOGGER, 'Connection failure: Trying to connect (in %s seconds) to %s.', reopen_seconds, newhost)
self.__wait_and_trigger_reconnection(connection, reopen_seconds)
# Give up after so many tries...
else:
errormsg = ('Permanently failed to connect to RabbitMQ. Tried all hosts %s times. Giving up. No PID requests will be sent.' % (self.__max_reconnection_tries+1))
self.__give_up_reconnecting_and_raise_exception(errormsg)
def __give_up_reconnecting_and_raise_exception(self, error_message):
self.statemachine.set_to_permanently_unavailable()
self.statemachine.detail_could_not_connect = True
problem_message = self.__connection_errors_to_string()
logerror(LOGGER, error_message)
logdebug(LOGGER, problem_message)
self.__make_permanently_closed_by_error(None, self.thread.ERROR_TEXT_CONNECTION_PERMANENT_ERROR) # Stops ioloop, so thread may stop!
if not (hasattr(defaults, 'IS_TEST_RUN') and defaults.IS_TEST_RUN==True):
raise PIDServerException(error_message+'\nProblems:\n'+problem_message)
else:
msg = 'PIDServerException would have been raised in real life.'
logerror(LOGGER, msg)
def __store_connection_error_info(self, errorname, host):
errorname = str(errorname)
if host not in self.__connection_errors:
self.__connection_errors[host] = {}
if errorname not in self.__connection_errors[host]:
self.__connection_errors[host][errorname] = 1
else:
self.__connection_errors[host][errorname] += 1
def __connection_errors_to_string(self, sep='\n'):
separate_messages_per_host = []
# For each host:
for host in self.__connection_errors:
all_errors_for_one_host = []
# For each error in this host:
for errortype in self.__connection_errors[host]:
num = self.__connection_errors[host][errortype]
message_for_one_error_type = ('%ix "%s"' % (num, errortype))
all_errors_for_one_host.append(message_for_one_error_type)
concat_errors = ', '.join(all_errors_for_one_host)
message_for_one_host = 'Server "%s": %s' % (host, concat_errors)
separate_messages_per_host.append(message_for_one_host)
return sep.join(separate_messages_per_host)
def __get_whole_host_name(self):
params = self.__node_manager.get_connection_parameters()
name = params.host
vhost = params.virtual_host
if vhost =='/':
pass
elif vhost.startswith('/'):
name += vhost
else:
name += '/'+vhost
name += ':'+str(params.port)
return name
#############################
### React to channel and ###
### connection close ###
#############################
''' This tells RabbitMQ what to do if it receives
a message it cannot accept, e.g. if it cannot
route it. '''
def __add_on_return_callback(self):
self.thread._channel.add_on_return_callback(self.returnhandler.on_message_not_accepted)
'''
This tells RabbitMQ what to do if the channel
was closed.
Note: Every connection close includes a channel close.
However, as far as I know, this callback is only
called if the channel is closed without the underlying
connection being closed. I am not 100 percent sure though.
'''
def __add_on_channel_close_callback(self):
self.thread._channel.add_on_close_callback(self.on_channel_closed)
'''
Callback, called by RabbitMQ.
"on_channel_closed" can be called in three situations:
(1) The user asked to close the connection.
In this case, we want to clean up everything and leave it closed.
(2) The connection was closed because we tried to publish to a non-
existent exchange.
In this case, the connection is still open, and we want to reopen
a new channel and publish to a different exchange.
We also want to republish the ones that had failed.
(3) There was some problem that closed the connection, which causes
the channel to close.
In this case, we want to reopen a connection.
'''
def on_channel_closed(self, channel, reply_code, reply_text):
logdebug(LOGGER, 'Channel was closed: %s (code %s)', reply_text, reply_code)
# Channel closed because user wants to close:
if self.statemachine.is_PERMANENTLY_UNAVAILABLE() or self.statemachine.is_FORCE_FINISHED():
if self.statemachine.get_detail_closed_by_publisher():
logdebug(LOGGER,'Channel close event due to close command by user. This is expected.')
# Channel closed because even fallback exchange did not exist:
elif reply_code == 404 and "NOT_FOUND - no exchange 'FALLBACK'" in reply_text:
logerror(LOGGER,'Channel closed because FALLBACK exchange does not exist. Need to close connection to trigger all the necessary close down steps.')
self.__undo_resetting_reconnect_counter()
self.thread.reset_exchange_name() # So next host is tried with normal exchange
self.thread._connection.close() # This will reconnect!
# TODO: Put a different reply_code and text, so we won't treat this as a Normal Shutdown!
# Channel closed because exchange did not exist:
elif reply_code == 404:
logdebug(LOGGER, 'Channel closed because the exchange "%s" did not exist.', self.__node_manager.get_exchange_name())
self.__use_different_exchange_and_reopen_channel()
# Other unexpected channel close:
else:
logerror(LOGGER,'Unexpected channel shutdown. Need to close connection to trigger all the necessary close down steps.')
self.__undo_resetting_reconnect_counter()
self.thread._connection.close() # This will reconnect!
# TODO: Put a different reply_code and text, so we won't treat this as a Normal Shutdown!
'''
An attempt to publish to a nonexistent exchange will close
the channel. In this case, we use a different exchange name
and reopen the channel. The underlying connection was kept
open.
'''
def __use_different_exchange_and_reopen_channel(self):
# Set to waiting to be available, so that incoming
# messages are stored:
self.statemachine.set_to_waiting_to_be_available()
# New exchange name
logdebug(LOGGER, 'Setting exchange name to fallback exchange "%s"', self.__fallback_exchange_name)
self.thread.change_exchange_name(self.__fallback_exchange_name)
# If this happened while sending message to the wrong exchange, we
# have to trigger their resending...
self.__prepare_channel_reopen('Channel reopen')
# Reopen channel
logdebug(LOGGER, 'Reopening channel...')
self.statemachine.set_to_waiting_to_be_available()
self.__please_open_rabbit_channel()
'''
Callback, called by RabbitMQ.
"on_connection_closed" can be called in two situations:
(1) The user asked to close the connection.
In this case, we want to clean up everything and leave it closed.
(2) There was some other problem that closed the connection.
'''
def on_connection_closed(self, connection, reply_code, reply_text):
loginfo(LOGGER, 'Connection to RabbitMQ was closed. Reason: %s.', reply_text)
self.thread._channel = None
if self.__was_user_shutdown(reply_code, reply_text):
loginfo(LOGGER, 'Connection to %s closed.', self.__node_manager.get_connection_parameters().host)
self.make_permanently_closed_by_user()
elif self.__was_permanent_error(reply_code, reply_text):
loginfo(LOGGER, 'Connection to %s closed.', self.__node_manager.get_connection_parameters().host)
self.__make_permanently_closed_by_error(connection, reply_text)
else:
# This reconnects to next host_
self.on_connection_error(connection, reply_text)
def __was_permanent_error(self, reply_code, reply_text):
if self.thread.ERROR_TEXT_CONNECTION_PERMANENT_ERROR in reply_text:
return True
return False
def __was_user_shutdown(self, reply_code, reply_text):
if self.__was_forced_user_shutdown(reply_code, reply_text):
return True
elif self.__was_gentle_user_shutdown(reply_code, reply_text):
return True
return False
def __was_forced_user_shutdown(self, reply_code, reply_text):
if (reply_code==self.thread.ERROR_CODE_CONNECTION_CLOSED_BY_USER and
self.thread.ERROR_TEXT_CONNECTION_FORCE_CLOSED in reply_text):
return True
return False
def __was_gentle_user_shutdown(self, reply_code, reply_text):
if (reply_code==self.thread.ERROR_CODE_CONNECTION_CLOSED_BY_USER and
self.thread.ERROR_TEXT_CONNECTION_NORMAL_SHUTDOWN in reply_text):
return True
return False
''' Called by thread, by shutter module.'''
def make_permanently_closed_by_user(self):
# This changes the state of the state machine!
# This needs to be called from the shutter module
# in case there is a force_finish while the connection
# is already closed (as the callback on_connection_closed
# is not called then).
self.statemachine.set_to_permanently_unavailable()
logtrace(LOGGER, 'Stop waiting for events due to user interrupt!')
logtrace(LOGGER, 'Permanent close: Stopping ioloop of connection %s...', self.thread._connection)
self.thread._connection.ioloop.stop()
loginfo(LOGGER, 'Stopped listening for RabbitMQ events (%s).', get_now_utc_as_formatted_string())
logdebug(LOGGER, 'Connection to messaging service closed by user. Will not reopen.')
def __make_permanently_closed_by_error(self, connection, reply_text):
# This changes the state of the state machine!
# This needs to be called if there is a permanent
# error and we don't want the library to reonnect,
# and we also don't want to pretend it was closed
# by the user.
# This is really rarely needed.
self.statemachine.set_to_permanently_unavailable()
logtrace(LOGGER, 'Stop waiting for events due to permanent error!')
# In case the main thread was waiting for any synchronization event.
self.thread.unblock_events()
# Close ioloop, which blocks the thread.
logdebug(LOGGER, 'Permanent close: Stopping ioloop of connection %s...', self.thread._connection)
self.thread._connection.ioloop.stop()
loginfo(LOGGER, 'Stopped listening for RabbitMQ events (%s).', get_now_utc_as_formatted_string())
logdebug(LOGGER, 'Connection to messaging service closed because of error. Will not reopen. Reason: %s', reply_text)
'''
This triggers a reconnection to whatever host is stored in
self.__node_manager.get_connection_parameters().host at the moment of reconnection.
If it is called to reconnect to the same host, it is better
to wait some seconds.
If it is used to connect to the next host, there is no point
in waiting.
'''
def __wait_and_trigger_reconnection(self, connection, wait_seconds):
if self.statemachine.is_FORCE_FINISHED():
errormsg = 'Permanently failed to connect to RabbitMQ. Tried all hosts until received a force-finish. Giving up. No PID requests will be sent.'
self.__give_up_reconnecting_and_raise_exception(errormsg)
else:
self.statemachine.set_to_waiting_to_be_available()
loginfo(LOGGER, 'Trying to reconnect to RabbitMQ in %s seconds.', wait_seconds)
connection.add_timeout(wait_seconds, self.reconnect)
logtrace(LOGGER, 'Reconnect event added to connection %s (not to %s)', connection, self.thread._connection)
###########################
### Reconnect after ###
### unexpected shutdown ###
###########################
'''
Reconnecting creates a completely new connection.
If we reconnect, we need to reset message number,
delivery tag etc.
We need to prepare to republish the yet-unconfirmed
messages.
Then we need to stop the old connection's ioloop.
The reconnection will create a new connection object
and this will have its own ioloop.
'''
def reconnect(self):
logdebug(LOGGER, 'Reconnecting...')
# We need to reset delivery tags, unconfirmed messages,
# republish the unconfirmed, ...
self.__prepare_channel_reopen('Reconnect')
# This is the old connection ioloop instance, stop its ioloop
logdebug(LOGGER, 'Reconnect: Stopping ioloop of connection %s...', self.thread._connection)
self.thread._connection.ioloop.stop()
# Note: All events still waiting on the ioloop are lost.
# Messages are kept track of in the Queue.Queue or in the confirmer
# module. Closing events are kept track on in shutter module.
# Now we trigger the actual reconnection, which
# works just like the first connection to RabbitMQ.
self.first_connection()
'''
This is called during reconnection and during channel reopen.
Both implies that a new channel is opened.
'''
def __prepare_channel_reopen(self, operation_string):
# We need to reset the message number, as
# it works by channel:
logdebug(LOGGER, operation_string+': Resetting delivery number (for publishing messages).')
self.thread.reset_delivery_number()
# Furthermore, as we'd like to re-publish messages
# that had not been confirmed yet, we remove them
# from the stack of unconfirmed messages, and put them
# back to the stack of unpublished messages.
logdebug(LOGGER, operation_string+': Sending all messages that have not been confirmed yet...')
self.__prepare_republication_of_unconfirmed()
# Reset the unconfirmed delivery tags, as they also work by channel:
logdebug(LOGGER, operation_string+': Resetting delivery tags (for confirming messages).')
self.thread.reset_unconfirmed_messages_and_delivery_tags()
def __prepare_republication_of_unconfirmed(self):
# Get all unconfirmed messages - we won't be able to receive their confirms anymore:
# IMPORTANT: This has to happen before we reset the delivery_tags of the confirmer
# module, as this deletes the collection of unconfirmed messages.
rescued_messages = self.thread.get_unconfirmed_messages_as_list_copy_during_lifetime()
if len(rescued_messages)>0:
logdebug(LOGGER, '%s unconfirmed messages were saved and are sent now.', len(rescued_messages))
self.thread.send_many_messages(rescued_messages)
|
IS-ENES-Data/esgf-pid
|
esgfpid/utils/logutils.py
|
logtrace
|
python
|
def logtrace(logger, msg, *args, **kwargs):
'''
If esgfpid.defaults.LOG_TRACE_TO_DEBUG, messages are treated
like debug messages (with an added [trace]).
Otherwise, they are ignored.
'''
if esgfpid.defaults.LOG_TRACE_TO_DEBUG:
logdebug(logger, '[trace] %s' % msg, *args, **kwargs)
else:
pass
|
If esgfpid.defaults.LOG_TRACE_TO_DEBUG, messages are treated
like debug messages (with an added [trace]).
Otherwise, they are ignored.
|
train
|
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/utils/logutils.py#L7-L16
| null |
import esgfpid.defaults
#
# Logging helpers
#
def logdebug(logger, msg, *args, **kwargs):
'''
Logs messages as DEBUG,
unless show=True and esgfpid.defaults.LOG_SHOW_TO_INFO=True,
(then it logs messages as INFO).
'''
if esgfpid.defaults.LOG_DEBUG_TO_INFO:
logger.info('DEBUG %s ' % msg, *args, **kwargs)
else:
logger.debug(msg, *args, **kwargs)
def loginfo(logger, msg, *args, **kwargs):
'''
Logs messages as INFO,
unless esgfpid.defaults.LOG_INFO_TO_DEBUG,
(then it logs messages as DEBUG).
'''
if esgfpid.defaults.LOG_INFO_TO_DEBUG:
logger.debug(msg, *args, **kwargs)
else:
logger.info(msg, *args, **kwargs)
def logwarn(logger, msg, *args, **kwargs):
logger.warn(msg, *args, **kwargs)
def logerror(logger, msg, *args, **kwargs):
logger.error(msg, *args, **kwargs)
def log_every_x_times(logger, counter, x, msg, *args, **kwargs):
'''
Works like logdebug, but only prints first and
and every xth message.
'''
if counter==1 or counter % x == 0:
#msg = msg + (' (counter %i)' % counter)
logdebug(logger, msg, *args, **kwargs)
|
IS-ENES-Data/esgf-pid
|
esgfpid/utils/logutils.py
|
logdebug
|
python
|
def logdebug(logger, msg, *args, **kwargs):
'''
Logs messages as DEBUG,
unless show=True and esgfpid.defaults.LOG_SHOW_TO_INFO=True,
(then it logs messages as INFO).
'''
if esgfpid.defaults.LOG_DEBUG_TO_INFO:
logger.info('DEBUG %s ' % msg, *args, **kwargs)
else:
logger.debug(msg, *args, **kwargs)
|
Logs messages as DEBUG,
unless show=True and esgfpid.defaults.LOG_SHOW_TO_INFO=True,
(then it logs messages as INFO).
|
train
|
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/utils/logutils.py#L18-L27
|
[
"def debug(self, msg, *args, **kwargs):\n logging.warn('DEBUG! '+str(msg))\n msgf = msg % args\n self.debug_messages.append(msgf)\n"
] |
import esgfpid.defaults
#
# Logging helpers
#
def logtrace(logger, msg, *args, **kwargs):
'''
If esgfpid.defaults.LOG_TRACE_TO_DEBUG, messages are treated
like debug messages (with an added [trace]).
Otherwise, they are ignored.
'''
if esgfpid.defaults.LOG_TRACE_TO_DEBUG:
logdebug(logger, '[trace] %s' % msg, *args, **kwargs)
else:
pass
def loginfo(logger, msg, *args, **kwargs):
'''
Logs messages as INFO,
unless esgfpid.defaults.LOG_INFO_TO_DEBUG,
(then it logs messages as DEBUG).
'''
if esgfpid.defaults.LOG_INFO_TO_DEBUG:
logger.debug(msg, *args, **kwargs)
else:
logger.info(msg, *args, **kwargs)
def logwarn(logger, msg, *args, **kwargs):
logger.warn(msg, *args, **kwargs)
def logerror(logger, msg, *args, **kwargs):
logger.error(msg, *args, **kwargs)
def log_every_x_times(logger, counter, x, msg, *args, **kwargs):
'''
Works like logdebug, but only prints first and
and every xth message.
'''
if counter==1 or counter % x == 0:
#msg = msg + (' (counter %i)' % counter)
logdebug(logger, msg, *args, **kwargs)
|
IS-ENES-Data/esgf-pid
|
esgfpid/utils/logutils.py
|
loginfo
|
python
|
def loginfo(logger, msg, *args, **kwargs):
'''
Logs messages as INFO,
unless esgfpid.defaults.LOG_INFO_TO_DEBUG,
(then it logs messages as DEBUG).
'''
if esgfpid.defaults.LOG_INFO_TO_DEBUG:
logger.debug(msg, *args, **kwargs)
else:
logger.info(msg, *args, **kwargs)
|
Logs messages as INFO,
unless esgfpid.defaults.LOG_INFO_TO_DEBUG,
(then it logs messages as DEBUG).
|
train
|
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/utils/logutils.py#L29-L38
|
[
"def info(self, msg, *args, **kwargs):\n msgf = msg % args\n self.info_messages.append(msgf)\n"
] |
import esgfpid.defaults
#
# Logging helpers
#
def logtrace(logger, msg, *args, **kwargs):
'''
If esgfpid.defaults.LOG_TRACE_TO_DEBUG, messages are treated
like debug messages (with an added [trace]).
Otherwise, they are ignored.
'''
if esgfpid.defaults.LOG_TRACE_TO_DEBUG:
logdebug(logger, '[trace] %s' % msg, *args, **kwargs)
else:
pass
def logdebug(logger, msg, *args, **kwargs):
'''
Logs messages as DEBUG,
unless show=True and esgfpid.defaults.LOG_SHOW_TO_INFO=True,
(then it logs messages as INFO).
'''
if esgfpid.defaults.LOG_DEBUG_TO_INFO:
logger.info('DEBUG %s ' % msg, *args, **kwargs)
else:
logger.debug(msg, *args, **kwargs)
def logwarn(logger, msg, *args, **kwargs):
logger.warn(msg, *args, **kwargs)
def logerror(logger, msg, *args, **kwargs):
logger.error(msg, *args, **kwargs)
def log_every_x_times(logger, counter, x, msg, *args, **kwargs):
'''
Works like logdebug, but only prints first and
and every xth message.
'''
if counter==1 or counter % x == 0:
#msg = msg + (' (counter %i)' % counter)
logdebug(logger, msg, *args, **kwargs)
|
IS-ENES-Data/esgf-pid
|
esgfpid/utils/logutils.py
|
log_every_x_times
|
python
|
def log_every_x_times(logger, counter, x, msg, *args, **kwargs):
'''
Works like logdebug, but only prints first and
and every xth message.
'''
if counter==1 or counter % x == 0:
#msg = msg + (' (counter %i)' % counter)
logdebug(logger, msg, *args, **kwargs)
|
Works like logdebug, but only prints first and
and every xth message.
|
train
|
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/utils/logutils.py#L47-L54
|
[
"def logdebug(logger, msg, *args, **kwargs):\n '''\n Logs messages as DEBUG,\n unless show=True and esgfpid.defaults.LOG_SHOW_TO_INFO=True,\n (then it logs messages as INFO).\n '''\n if esgfpid.defaults.LOG_DEBUG_TO_INFO:\n logger.info('DEBUG %s ' % msg, *args, **kwargs)\n else:\n logger.debug(msg, *args, **kwargs)\n"
] |
import esgfpid.defaults
#
# Logging helpers
#
def logtrace(logger, msg, *args, **kwargs):
'''
If esgfpid.defaults.LOG_TRACE_TO_DEBUG, messages are treated
like debug messages (with an added [trace]).
Otherwise, they are ignored.
'''
if esgfpid.defaults.LOG_TRACE_TO_DEBUG:
logdebug(logger, '[trace] %s' % msg, *args, **kwargs)
else:
pass
def logdebug(logger, msg, *args, **kwargs):
'''
Logs messages as DEBUG,
unless show=True and esgfpid.defaults.LOG_SHOW_TO_INFO=True,
(then it logs messages as INFO).
'''
if esgfpid.defaults.LOG_DEBUG_TO_INFO:
logger.info('DEBUG %s ' % msg, *args, **kwargs)
else:
logger.debug(msg, *args, **kwargs)
def loginfo(logger, msg, *args, **kwargs):
'''
Logs messages as INFO,
unless esgfpid.defaults.LOG_INFO_TO_DEBUG,
(then it logs messages as DEBUG).
'''
if esgfpid.defaults.LOG_INFO_TO_DEBUG:
logger.debug(msg, *args, **kwargs)
else:
logger.info(msg, *args, **kwargs)
def logwarn(logger, msg, *args, **kwargs):
logger.warn(msg, *args, **kwargs)
def logerror(logger, msg, *args, **kwargs):
logger.error(msg, *args, **kwargs)
|
IS-ENES-Data/esgf-pid
|
esgfpid/solr/tasks/filehandles_same_dataset.py
|
FindFilesOfSameDatasetVersion.retrieve_file_handles_of_same_dataset
|
python
|
def retrieve_file_handles_of_same_dataset(self, **args):
'''
:return: List of handles, or empty list. Should never return None.
:raise: SolrSwitchedOff
:raise SolrError: If both strategies to find file handles failed.
'''
mandatory_args = ['drs_id', 'version_number', 'data_node', 'prefix']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
self.__reset_error_messages()
# Try plan A
file_handles = None
try:
file_handles = self.__strategy1(args) # can raise SolrError or SolrSwitchedOff, but can't return None
except esgfpid.exceptions.SolrError as e:
self.__error_messages.append('Error during first query: '+e.message)
if file_handles is not None and len(file_handles)>0:
LOGGER.debug('Retrieved file handles from solr in first query.')
return file_handles
# Try plan B
try:
file_handles = self.__strategy2(args) # can raise SolrError or SolrSwitchedOff, but can't return None
except esgfpid.exceptions.SolrError as e:
self.__error_messages.append('Error during second query: '+e.message)
msg = '/n'.join(self.__error_messages)
raise esgfpid.exceptions.SolrError('Failure in both queries. Messages:\n'+msg)
return file_handles
|
:return: List of handles, or empty list. Should never return None.
:raise: SolrSwitchedOff
:raise SolrError: If both strategies to find file handles failed.
|
train
|
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/solr/tasks/filehandles_same_dataset.py#L21-L50
|
[
"def check_presence_of_mandatory_args(args, mandatory_args):\n missing_args = []\n for name in mandatory_args:\n if name not in args.keys():\n missing_args.append(name)\n if len(missing_args)>0:\n raise esgfpid.exceptions.ArgumentError('Missing mandatory arguments: '+', '.join(missing_args))\n else:\n return True\n",
"def __reset_error_messages(self):\n self.__error_messages = []\n",
"def __strategy1(self, args):\n return self.__retrieve_file_handles_of_same_dataset_if_same_datanode(\n args['drs_id'],\n args['version_number'],\n args['data_node'],\n args['prefix'])\n",
"def __strategy2(self, args):\n return self.__retrieve_file_handles_of_same_dataset_if_different_datanode(\n args['drs_id'],\n args['version_number'],\n args['prefix'])\n"
] |
class FindFilesOfSameDatasetVersion(object):
def __init__(self, solr_interactor):
self.__solr_interactor = solr_interactor
self.__error_messages = None
def __reset_error_messages(self):
self.__error_messages = []
# General methods:
def __strategy1(self, args):
return self.__retrieve_file_handles_of_same_dataset_if_same_datanode(
args['drs_id'],
args['version_number'],
args['data_node'],
args['prefix'])
def __strategy2(self, args):
return self.__retrieve_file_handles_of_same_dataset_if_different_datanode(
args['drs_id'],
args['version_number'],
args['prefix'])
def __retrieve_file_handles_of_same_dataset_if_same_datanode(self, drs_id, version_number, data_node, prefix):
dataset_id = self.__make_dataset_id_from_drsid_and_versnum(drs_id, version_number, data_node)
response_json = self.__ask_solr_for_handles_of_files_with_same_dataset_id(dataset_id) # can raise SolrError or SolrSwitchedOff, but can't be None
file_handles = solrutils.extract_file_handles_from_response_json(response_json, prefix) # can raise SolrReponseError
if len(file_handles)==0:
msg = 'First query returned an empty list.'
LOGGER.debug(msg)
self.__error_messages.append(msg)
return file_handles
def __make_dataset_id_from_drsid_and_versnum(self, drs_id, version_number, data_node):
dataset_id = drs_id + '.v' + str(version_number) + '|' + data_node
return dataset_id
def __retrieve_file_handles_of_same_dataset_if_different_datanode(self, drs_id, version_number, prefix):
response_json = self.__ask_solr_for_handles_of_files_with_same_drsid_and_version(drs_id, version_number) # can raise SolrError or SolrSwitchedOff, but can't be None
file_handles = solrutils.extract_file_handles_from_response_json(response_json, prefix) # can raise SolrReponseError
if len(file_handles)==0:
self.__error_messages.append('Second query returned an empty list.')
return file_handles
# Querying solr for handles of files with the same dataset id:
def __ask_solr_for_handles_of_files_with_same_dataset_id(self, dataset_id):
LOGGER.debug('Asking solr for all handles or files with the dataset_id "%s".', dataset_id)
query = self.__make_query_handles_of_files_with_same_dataset_id(dataset_id)
LOGGER.debug('Query: %s', query)
response_json = self.__solr_interactor.send_query(query) # can raise SolrError or SolrSwitchedOff, but can't be None
return response_json
def __make_query_handles_of_files_with_same_dataset_id(self, dataset_id):
query_dict = self.__solr_interactor.make_solr_base_query()
query_dict['type'] = 'File'
query_dict['facets'] = 'tracking_id'
query_dict['dataset_id'] = dataset_id
return query_dict
# Querying solr for the dataset_ids of all versions with the same drs_id:
def __ask_solr_for_handles_of_files_with_same_drsid_and_version(self, drs_id, version_number):
part_of_dataset_id = self.__make_half_dataset_id_from_drsid_and_versnum(drs_id, version_number)
LOGGER.debug('Asking solr for all handles or files with the "half dataset_id" "%s".', part_of_dataset_id)
query = self.__make_query_for_handles_of_files_with_same_drsid_and_version(part_of_dataset_id)
LOGGER.debug('Query: %s', query)
response_json = self.__solr_interactor.send_query(query) # can raise SolrError or SolrSwitchedOff, but can't be None
return response_json
def __make_half_dataset_id_from_drsid_and_versnum(self, drs_id, version_number):
part_of_dataset_id = drs_id + '.v' + str(version_number) + '|'
return part_of_dataset_id
def __make_query_for_handles_of_files_with_same_drsid_and_version(self, part_of_dataset_id):
query_dict = self.__solr_interactor.make_solr_base_query()
query_dict['type'] = 'File'
query_dict['facets'] = 'tracking_id'
query_dict['query'] = 'dataset_id:'+part_of_dataset_id+'*'
return query_dict
|
IS-ENES-Data/esgf-pid
|
esgfpid/utils/timeutils.py
|
get_now_utc
|
python
|
def get_now_utc():
''' date in UTC, ISO format'''
# Helper class for UTC time
# Source: http://stackoverflow.com/questions/2331592/datetime-datetime-utcnow-why-no-tzinfo
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
#now = datetime.datetime.now(timezone.utc) # Python 3.2
now = datetime.datetime.now(UTC())
return now
|
date in UTC, ISO format
|
train
|
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/utils/timeutils.py#L10-L27
| null |
import datetime
def get_now_utc_as_formatted_string():
now = get_now_utc()
now_string = datetime.datetime.isoformat(now) # 2015-12-21T10:31:37.524825+00:00
return now_string
|
IS-ENES-Data/esgf-pid
|
esgfpid/solr/tasks/all_versions_of_dataset.py
|
FindVersionsOfSameDataset.retrieve_dataset_handles_or_version_numbers_of_all_versions
|
python
|
def retrieve_dataset_handles_or_version_numbers_of_all_versions(self, drs_id, prefix):
'''
:return: Dict. Should never return None.
:raise: SolrSwitchedOff
:raise SolrError: If ...
'''
self.__reset_error_messages()
response_json = self.__ask_solr_for_handles_or_version_numbers_of_all_versions(drs_id)
result_dict = self.__parse_result_handles_or_version_numbers_of_all_versions(response_json, prefix)
return result_dict
|
:return: Dict. Should never return None.
:raise: SolrSwitchedOff
:raise SolrError: If ...
|
train
|
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/solr/tasks/all_versions_of_dataset.py#L19-L28
|
[
"def __reset_error_messages(self):\n self.__error_messages = []\n",
"def __ask_solr_for_handles_or_version_numbers_of_all_versions(self, drs_id):\n LOGGER.debug('Asking solr for dataset handles or version numbers of all dataset versions with the drs_id \"%s\".', drs_id)\n query = self.__make_query_for_handles_or_version_numbers_of_all_versions(drs_id)\n LOGGER.debug('Query: %s', query)\n response_json = self.__solr_interactor.send_query(query) # can raise SolrError or SolrSwitchedOff, but can't be None\n return response_json\n",
"def __parse_result_handles_or_version_numbers_of_all_versions(self, response_json, prefix):\n\n # Prepare result object:\n result_dict = {}\n result_dict['dataset_handles'] = None\n result_dict['version_numbers'] = None\n\n # Get handles, if there is any:\n result_dict['dataset_handles'] = self.__get_handles_if_any(response_json, prefix)\n\n # Otherwise, get version numbers, if there is any:\n result_dict['version_numbers'] = self.__get_version_numbers_if_any(response_json)\n\n if (result_dict['dataset_handles'] is None and\n result_dict['version_numbers'] is None):\n\n msg = 'Found neither version numbers, nor handles. Errors: %s' % '; '.join(self.__error_messages)\n LOGGER.debug(msg)\n raise esgfpid.exceptions.SolrResponseError(msg)\n\n return result_dict\n"
] |
class FindVersionsOfSameDataset(object):
def __init__(self, solr_interactor):
self.__solr_interactor = solr_interactor
self.__error_messages = None
def __reset_error_messages(self):
self.__error_messages = []
# General methods:
# Querying solr for handles and/or version numbers of all versions of a dataset (same drs_id):
def __ask_solr_for_handles_or_version_numbers_of_all_versions(self, drs_id):
LOGGER.debug('Asking solr for dataset handles or version numbers of all dataset versions with the drs_id "%s".', drs_id)
query = self.__make_query_for_handles_or_version_numbers_of_all_versions(drs_id)
LOGGER.debug('Query: %s', query)
response_json = self.__solr_interactor.send_query(query) # can raise SolrError or SolrSwitchedOff, but can't be None
return response_json
def __make_query_for_handles_or_version_numbers_of_all_versions(self, drs_id):
query_dict = self.__solr_interactor.make_solr_base_query()
query_dict['type'] = 'Dataset'
query_dict['facets'] = 'pid,version'
query_dict['drs_id'] = drs_id
return query_dict
def __parse_result_handles_or_version_numbers_of_all_versions(self, response_json, prefix):
# Prepare result object:
result_dict = {}
result_dict['dataset_handles'] = None
result_dict['version_numbers'] = None
# Get handles, if there is any:
result_dict['dataset_handles'] = self.__get_handles_if_any(response_json, prefix)
# Otherwise, get version numbers, if there is any:
result_dict['version_numbers'] = self.__get_version_numbers_if_any(response_json)
if (result_dict['dataset_handles'] is None and
result_dict['version_numbers'] is None):
msg = 'Found neither version numbers, nor handles. Errors: %s' % '; '.join(self.__error_messages)
LOGGER.debug(msg)
raise esgfpid.exceptions.SolrResponseError(msg)
return result_dict
def __get_handles_if_any(self, response_json, prefix):
try:
dataset_handles = solrutils.extract_dataset_handles_from_response_json(response_json, prefix)
if len(dataset_handles) > 0:
LOGGER.debug('Found dataset handles: %s', dataset_handles)
return dataset_handles
except esgfpid.exceptions.SolrResponseError as e:
self.__error_messages.append(e.message)
def __get_version_numbers_if_any(self, response_json):
try:
version_numbers = solrutils.extract_dataset_version_numbers_from_response_json(response_json)
if len(version_numbers) > 0:
LOGGER.debug('Found version numbers, but no handles: %s', version_numbers)
return version_numbers
except esgfpid.exceptions.SolrResponseError as e:
self.__error_messages.append(e.message)
|
IS-ENES-Data/esgf-pid
|
esgfpid/connector.py
|
Connector.create_publication_assistant
|
python
|
def create_publication_assistant(self, **args):
'''
Create an assistant for a dataset that allows to make PID
requests for the dataset and all of its files.
:param drs_id: Mandatory. The dataset id of the dataset
to be published.
:param version_number: Mandatory. The version number of the
dataset to be published.
:param is_replica: Mandatory. Flag to indicate whether the
dataset is a replica.
.. note:: If the replica flag is set to False, the publication
may still be considered a replica by the consuming servlet,
namely if the dataset was already published at a different
host. For this, please refer to the consumer documentation.
:return: A publication assistant which provides all necessary
methods to publish a dataset and its files.
'''
# Check args
logdebug(LOGGER, 'Creating publication assistant..')
mandatory_args = ['drs_id', 'version_number', 'is_replica']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
# Check if service path is given
if self.__thredds_service_path is None:
msg = 'No thredds_service_path given (but it is mandatory for publication)'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.ArgumentError(msg)
# Check if data node is given
if self.__data_node is None:
msg = 'No data_node given (but it is mandatory for publication)'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.ArgumentError(msg)
# Check if solr has access:
if self.__coupler.is_solr_switched_off():
pass # solr access not mandatory anymore
# Create publication assistant
assistant = esgfpid.assistant.publish.DatasetPublicationAssistant(
drs_id=args['drs_id'],
version_number=args['version_number'],
thredds_service_path=self.__thredds_service_path,
data_node=self.__data_node,
prefix=self.prefix,
coupler=self.__coupler,
is_replica=args['is_replica'],
consumer_solr_url=self.__consumer_solr_url # may be None
)
logdebug(LOGGER, 'Creating publication assistant.. done')
return assistant
|
Create an assistant for a dataset that allows to make PID
requests for the dataset and all of its files.
:param drs_id: Mandatory. The dataset id of the dataset
to be published.
:param version_number: Mandatory. The version number of the
dataset to be published.
:param is_replica: Mandatory. Flag to indicate whether the
dataset is a replica.
.. note:: If the replica flag is set to False, the publication
may still be considered a replica by the consuming servlet,
namely if the dataset was already published at a different
host. For this, please refer to the consumer documentation.
:return: A publication assistant which provides all necessary
methods to publish a dataset and its files.
|
train
|
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/connector.py#L287-L341
|
[
"def check_presence_of_mandatory_args(args, mandatory_args):\n missing_args = []\n for name in mandatory_args:\n if name not in args.keys():\n missing_args.append(name)\n if len(missing_args)>0:\n raise esgfpid.exceptions.ArgumentError('Missing mandatory arguments: '+', '.join(missing_args))\n else:\n return True\n",
"def logdebug(logger, msg, *args, **kwargs):\n '''\n Logs messages as DEBUG,\n unless show=True and esgfpid.defaults.LOG_SHOW_TO_INFO=True,\n (then it logs messages as INFO).\n '''\n if esgfpid.defaults.LOG_DEBUG_TO_INFO:\n logger.info('DEBUG %s ' % msg, *args, **kwargs)\n else:\n logger.debug(msg, *args, **kwargs)\n",
"def logwarn(logger, msg, *args, **kwargs):\n logger.warn(msg, *args, **kwargs)\n"
] |
class Connector(object):
'''
This class provides the main functionality for the ESGF PID
module.
Author: Merret Buurman (DKRZ), 2015-2016
'''
def __init__(self, **args):
'''
Create a connector object with the necessary config
to connect to a RabbitMQ messaging server and perform
PID creations/updates.
The arguments have to be passed as named parameters.
Please contact your ESGF index node or CDNOT for this
information.
Some of the arguments are needed for making connections
from this library to RabbitMQ or to solr. Other arguments
are only passed on to the consuming servlet inside the
messages.
:param handle_prefix: Mandatory. The handle prefix (as a
string) for the handles to be created/updates. This
has to match the handle prefix that the message queue
consuming servlet has write access to. In CMIP6, this
is "21.14100".
:param messaging_service_exchange_name: Mandatory. The
name of the messaging exchange that will forward the
messages to a specific queue.
:param messaging_service_credentials: Mandatory. List of
dictionaries with credentials for the RabbitMQ nodes.
Each needs to have the entries: "user", "password", "url".
They may have an integer "priority" too. If two nodes have
the same priority, the library chooses randomly between
them. They also may have a "vhost" (RabbitMQ virtual host),
a "port" and a boolean "ssl_enabled". Please refer to pika's
documentation
(http://pika.readthedocs.io/en/latest/modules/parameters.html).
Dictionaries for 'open nodes' do not need a password
to be provided. Open nodes are only used if no more
other nodes are available. Note: Open nodes are no longer
supported.
:param message_service_synchronous: Optional. Boolean to
define if the connection to RabbitMQ and the message
sending should work in synchronous mode. Defaults to
the value defined in defaults.py.
:param data_node: Mandatory/Optional.
(Mandatory for publications and unpublications,
ignored for any other usage of the library. No default.)
The data node (host name) at which (un)publication takes
place. This will be included in the handle records. Trailing
slashes are removed.
Used during publication and unpublication (modules
assistant.publish and assistant.unpublish):
* Publication: Used to construct the file data URL (together
with thredds service path and file publish path). Sent along
in rabbit message. Used for consistency check, if solr use
is enabled.
* Unpublication: Sent along in rabbit message.
:param thredds_service_path: Mandatory for publications,
ignored for any other usage of the library. No default.
The thredds service path where the files of a publication
reside. Will be combined with files' publish path and data
node to form the files' data access URLs.
:param solr_url: Optional. The URL of the solr to be uses by this
library for the dataset consistency check. No default. If not provided,
the check is not done.
Note: This is currently switched off for performance reasons.
:param solr_https_verify: Optional flag to indicate whether
requests to solr should verify the SSL certificate.
Please see documentation of requests library: http://docs.python-requests.org/en/master/user/advanced/
:param disable_insecure_request_warning: Optional flag (only for
use during testing). If True, warnings are not printed during
insecure SSL requests.
Important: This is not passed through to the solr module, so
that switching off the warnings is not possible. It can only
be passed directly to the solr module during tests.
:param solr_switched_off: Optional flag to tell if the solr module
should be switched off. In that case, no connections to solr
are made.
:param consumer_solr_url: Optional. URL of a solr instance that
is to be used by the consumer (e.g. for finding versions), *not*
by this library.
:param test_publication: Optional flag. If True, the
handles that are created are test handles
that will be overwritten by real publications. Also,
test publications cannot update real handles.
:returns: An instance of the connector, configured for one
data node, and for connection with a specific RabbitMQ node.
'''
LOGGER.debug(40*'-')
LOGGER.debug('Creating PID connector object ..')
self.__check_presence_of_args(args)
self.__check_rabbit_credentials_completeness(args)
self.__define_defaults_for_optional_args(args)
self.__store_some_args(args)
self.__throw_error_if_prefix_not_in_list()
esgfpid.utils.routingkeys.add_prefix_to_routing_keys(self.prefix)
self.__coupler = esgfpid.coupling.Coupler(**args)
loginfo(LOGGER, 'Created PID connector.')
def __check_presence_of_args(self, args):
mandatory_args = [
'messaging_service_credentials',
'messaging_service_exchange_name',
'handle_prefix'
]
optional_args = [
'data_node',
'thredds_service_path',
'test_publication',
'solr_url',
'solr_https_verify',
'disable_insecure_request_warning',
'solr_switched_off',
'consumer_solr_url',
'message_service_synchronous'
]
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
def __define_defaults_for_optional_args(self, args):
if 'data_node' not in args or args['data_node'] is None:
''' May be None, only needed for some operations.
If it is needed, its presence is checked later. '''
args['data_node'] = None
if 'thredds_service_path' not in args or args['thredds_service_path'] is None:
''' May be None, only needed for some operations.
If it is needed, its presence is checked later. '''
args['thredds_service_path'] = None
if 'test_publication' not in args or args['test_publication'] is None:
args['test_publication'] = False
if 'solr_url' not in args or args['solr_url'] is None:
args['solr_url'] = None
args['solr_switched_off'] = True
if 'solr_switched_off' not in args or args['solr_switched_off'] is None:
args['solr_switched_off'] = False
if 'solr_https_verify' not in args or args['solr_https_verify'] is None:
args['solr_https_verify'] = esgfpid.defaults.SOLR_HTTPS_VERIFY_DEFAULT
if 'disable_insecure_request_warning' not in args or args['disable_insecure_request_warning'] is None:
args['disable_insecure_request_warning'] = False
if 'message_service_synchronous' not in args or args['message_service_synchronous'] is None:
args['message_service_synchronous'] = not esgfpid.defaults.RABBIT_IS_ASYNCHRONOUS
if 'consumer_solr_url' not in args or args['consumer_solr_url'] is None:
args['consumer_solr_url'] = None
def __check_rabbit_credentials_completeness(self, args):
for credentials in args['messaging_service_credentials']:
if not isinstance(credentials, dict):
errmsg = 'Credentials for each RabbitMQ node should be a dictionary.'
raise esgfpid.exceptions.ArgumentError(errmsg)
# Mandatory:
self.__check_presence_and_type('url', credentials, basestring)
self.__check_presence_and_type('user', credentials, basestring)
self.__check_presence_and_type('password', credentials, basestring) # If you want open nodes to be enabled again, remove this!
# Optional:
self.__check_and_adapt_type_if_exists('password', credentials, basestring)
self.__check_and_adapt_type_if_exists('vhost', credentials, basestring)
self.__check_and_adapt_type_if_exists('port', credentials, int)
self.__check_and_adapt_type_if_exists('ssl_enabled', credentials, bool)
def __check_presence_and_type(self, attname, credentials, desiredtype):
self.__check_presence(attname, credentials)
self.__check_and_adapt_type_if_exists(attname, credentials, desiredtype)
def __check_presence(self, attname, credentials):
if attname not in credentials:
rabbitname_for_errmsg = '(not specified)'
if 'url' in credentials:
rabbitname_for_errmsg = credentials['url']
errmsg = 'Missing %s for messaging service "%s"!' % (attname, rabbitname_for_errmsg)
raise esgfpid.exceptions.ArgumentError(errmsg)
def __check_and_adapt_type_if_exists(self, attname, credentials, desiredtype):
if attname in credentials:
# Empty string to None:
if credentials[attname] == '':
credentials[attname] = None
# List to object:
if type(credentials[attname]) == type([]) and len(credentials[attname]) == 1:
credentials[attname] = credentials[attname][0]
# Don't check if None:
if credentials[attname] is None:
pass
# Check type:
elif not isinstance(credentials[attname], desiredtype):
# Try conversion:
try:
credentials[attname] = self.__try_conversion(credentials[attname], desiredtype)
except ValueError as e:
errmsg = ('Wrong type of messaging service %s (%s). Expected %s, got %s, conversion failed.' %
(attname, credentials[attname], desiredtype, type(credentials[attname])))
raise esgfpid.exceptions.ArgumentError(errmsg)
def __try_conversion(self, value, desiredtype):
if desiredtype == bool:
if isinstance(value, basestring):
if str.lower(value) == 'true':
return True
elif str.lower(value) == 'false':
return False
raise ValueError()
if desiredtype == basestring:
#return str(value)
raise ValueError('Not transforming booleans')
if desiredtype == int:
return int(value)
else:
return desiredtype(value)
'''
These are not (only) needed during initialisation, but
(also) later on.
'''
def __store_some_args(self, args):
self.prefix = args['handle_prefix']
self.__thredds_service_path = args['thredds_service_path']
self.__data_node = args['data_node'] # may be None, only needed for some assistants.
self.__consumer_solr_url = args['consumer_solr_url'] # may be None
def __throw_error_if_prefix_not_in_list(self):
if self.prefix is None:
raise esgfpid.exceptions.ArgumentError('Missing handle prefix!')
if self.prefix not in esgfpid.defaults.ACCEPTED_PREFIXES:
raise esgfpid.exceptions.ArgumentError('The prefix "%s" is not a valid prefix! Please check your config. Accepted prefixes: %s'
% (self.prefix, ', '.join(esgfpid.defaults.ACCEPTED_PREFIXES)))
'''
Please see documentation of solr module (:func:`~check.check_pid_queue_availability`).
'''
def check_pid_queue_availability(self, **args):
rabbit_checker = esgfpid.check.RabbitChecker(connector = self, prefix = self.prefix, **args)
return rabbit_checker.check_and_inform()
def unpublish_one_version(self, **args):
'''
Sends a PID update request for the unpublication of one version
of a dataset currently published at the given data node.
Either the handle or the pair of drs_id and version_number
have to be provided, otherwise an exception will occur.
The consumer will of course check the PID request message's
timestamp with the timestamp of the last publication, so that
republications in the mean time are not unpublished.
The unpublication of the files is included in this method.
:param handle: Optional. The handle of the dataset
to be unpublished.
:param drs_id: Optional. The dataset id of the dataset
to be unpublished.
:param version_number: Optional. The version number of
the dataset to be unpublished.
:raises: ArgumentError: If not enough arguments are passed
to identify the dataset, or if no data node was specified
during library init.
'''
# Check args
optional_args = ['handle', 'drs_id', 'version_number']
esgfpid.utils.add_missing_optional_args_with_value_none(args, optional_args)
# Check if data node is given
if self.__data_node is None:
msg = 'No data_node given (but it is mandatory for unpublication)'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.ArgumentError(msg)
# Unpublish
assistant = esgfpid.assistant.unpublish.AssistantOneVersion(
drs_id = args['drs_id'],
data_node = self.__data_node,
prefix=self.prefix,
coupler=self.__coupler,
message_timestamp=esgfpid.utils.get_now_utc_as_formatted_string()
)
assistant.unpublish_one_dataset_version(
handle = args['handle'],
version_number = args['version_number']
)
def unpublish_all_versions(self, **args):
'''
Sends a PID update request for the unpublication of all versions
of a dataset currently published at the given data node.
If the library has solr access, it will try to find all the
dataset versions and their handles from solr, and send individual
messages for each version. Otherwise, one message is sent, and the
queue consuming servlet has to identify the relevant versions,
also making sure not to unpublish any versions that may have been
republished in the meantime.
:param drs_id: Dataset id of the dataset to be unpublished.
:raises: ArgumentError: If the data node
was not provided at library initialization.
'''
# Check args
mandatory_args = ['drs_id']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
# Check if data node is given
if self.__data_node is None:
msg = 'No data_node given (but it is mandatory for publication)'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.ArgumentError(msg)
# Check if solr has access:
if self.__coupler.is_solr_switched_off():
msg = 'Unpublication of all versions. Without solr access, we cannot identify the versions, so the consumer will have to take care of this.'
logdebug(LOGGER, msg)
#raise esgfpid.exceptions.ArgumentError('No solr access. Solr access is needed for publication. Please provide access to a solr index when initializing the library')
# Unpublish
assistant = esgfpid.assistant.unpublish.AssistantAllVersions(
drs_id = args['drs_id'],
data_node = self.__data_node,
prefix=self.prefix,
coupler=self.__coupler,
message_timestamp=esgfpid.utils.get_now_utc_as_formatted_string(),
consumer_solr_url = self.__consumer_solr_url # may be None
)
assistant.unpublish_all_dataset_versions()
def add_errata_ids(self, **args):
'''
Add errata ids to a dataset handle record.
To call this method, you do not need to provide the
PID of the dataset. Instead, the PID string is derived
from the dataset id and the version number.
:param errata_ids: Mandatory. A list of errata ids (strings)
to be added to the handle record.
:param drs_id: Mandatory. The dataset id of the dataset
to whose handle record the errata ids are to be
added. (This is needed because the handle is found
by making a hash over dataset id and version number).
:param version_number: Mandatory. The version number of the
dataset to whose handle record the errata ids are to be
added. (This is needed because the handle is found by
making a hash over dataset id and version number).
'''
# Check args:
mandatory_args = ['drs_id', 'version_number', 'errata_ids']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
esgfpid.utils.check_noneness_of_mandatory_args(args, mandatory_args)
# Perform metadata update
assistant = esgfpid.assistant.errata.ErrataAssistant(
coupler=self.__coupler,
prefix=self.prefix
)
assistant.add_errata_ids(
drs_id=args['drs_id'],
version_number=args['version_number'],
errata_ids=args['errata_ids']
)
def remove_errata_ids(self, **args):
'''
Remove errata ids from a dataset handle record.
To call this method, you do not need to provide the
PID of the dataset. Instead, the PID string is derived
from the dataset id and the version number.
:param errata_ids: Mandatory. A list of errata ids (strings) to
be removed from the handle record.
:param drs_id: Mandatory. The dataset id of the dataset
from whose handle record the errata ids are to be
removed. (This is needed because the handle is found
by making a hash over dataset id and version number).
:param version_number: Mandatory. The version number of the
dataset from whose handle record the errata ids are to be
removed. (This is needed because the handle is found by
making a hash over dataset id and version number).
'''
# Check args:
mandatory_args = ['drs_id', 'version_number', 'errata_ids']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
esgfpid.utils.check_noneness_of_mandatory_args(args, mandatory_args)
# Perform metadata update
assistant = esgfpid.assistant.errata.ErrataAssistant(
coupler=self.__coupler,
prefix=self.prefix
)
assistant.remove_errata_ids(
drs_id=args['drs_id'],
version_number=args['version_number'],
errata_ids=args['errata_ids']
)
def create_data_cart_pid(self, dict_of_drs_ids_and_pids):
'''
Create a handle record for a data cart (a custom set of datasets).
The handle string is made of the prefix passed to tbe library,
and a hash over all the dataset ids in the cart. This way, if exactly
the same set of datasets is passed several times, the same handle
record is created, instead of making a new one.
:param dict_of_drs_ids_and_pids: A dictionary of all dataset ids
and their pid strings. If a dataset has no (known) PID, use
"None".
:return: The handle string for this data cart.
'''
assistant = esgfpid.assistant.datacart.DataCartAssistant(
prefix=self.prefix,
coupler=self.__coupler
)
return assistant.make_data_cart_pid(dict_of_drs_ids_and_pids)
def start_messaging_thread(self):
'''
Start the parallel thread that takes care of the asynchronous
communication with RabbitMQ.
If PID creation/update requests are attempted before
this was called, an exception will be raised.
Preferably call this method as early as possible, so that
the module has some time to build the connection before
the first PID requests are made.
(If PID requests are made before the connection is ready,
they will not be lost, but pile up and sent once the connection
is ready).
.. important:: Please do not forget to finish the thread at the end,
using :meth:`~esgfpid.connector.Connector.finish_messaging_thread`
or :meth:`~esgfpid.connector.Connector.force_finish_messaging_thread`.
'''
self.__coupler.start_rabbit_connection()
def finish_messaging_thread(self):
'''
Finish and join the parallel thread that takes care of
the asynchronous communication with RabbitMQ.
If some messages are still in the stack to be sent,
or if some messages were not confirmed yet, this method
blocks and waits for some time while it iteratively
checks for message confirmation.
Currently, it waits up to 5 seconds: It checks up to
11 times, waiting 0.5 seconds in between - these
values can be configured in the defaults module).
'''
self.__coupler.finish_rabbit_connection()
def force_finish_messaging_thread(self):
'''
Finish and join the parallel thread that takes care of
the asynchronous communication with RabbitMQ.
This method does not wait for any pending messages.
Messages that are not sent yet are lost. Messages that
are not confirmed yet are probably not lost, but their
receival is not guaranteed.
Note:
The rabbit module keeps a copy of all unsent and
unconfirmed messages, so they could be resent in
a later connection. It would also be easy to expose
a method for the library caller to retrieve those
messages, e.g. to write them into some file.
'''
self.__coupler.force_finish_rabbit_connection()
def make_handle_from_drsid_and_versionnumber(self, **args):
'''
Create a handle string for a specific dataset, based
on its dataset id and version number, and the prefix
passed to the library at initializing.
:param drs_id: The dataset id of the dataset.
:param version_number: The version number of the dataset
(as a string or integer, this does not matter)
:return: A handle string (e.g. "hdl:21.14100/abcxyzfoo")
'''
args['prefix'] = self.prefix
return esgfpid.utils.make_handle_from_drsid_and_versionnumber(**args)
|
IS-ENES-Data/esgf-pid
|
esgfpid/connector.py
|
Connector.unpublish_one_version
|
python
|
def unpublish_one_version(self, **args):
'''
Sends a PID update request for the unpublication of one version
of a dataset currently published at the given data node.
Either the handle or the pair of drs_id and version_number
have to be provided, otherwise an exception will occur.
The consumer will of course check the PID request message's
timestamp with the timestamp of the last publication, so that
republications in the mean time are not unpublished.
The unpublication of the files is included in this method.
:param handle: Optional. The handle of the dataset
to be unpublished.
:param drs_id: Optional. The dataset id of the dataset
to be unpublished.
:param version_number: Optional. The version number of
the dataset to be unpublished.
:raises: ArgumentError: If not enough arguments are passed
to identify the dataset, or if no data node was specified
during library init.
'''
# Check args
optional_args = ['handle', 'drs_id', 'version_number']
esgfpid.utils.add_missing_optional_args_with_value_none(args, optional_args)
# Check if data node is given
if self.__data_node is None:
msg = 'No data_node given (but it is mandatory for unpublication)'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.ArgumentError(msg)
# Unpublish
assistant = esgfpid.assistant.unpublish.AssistantOneVersion(
drs_id = args['drs_id'],
data_node = self.__data_node,
prefix=self.prefix,
coupler=self.__coupler,
message_timestamp=esgfpid.utils.get_now_utc_as_formatted_string()
)
assistant.unpublish_one_dataset_version(
handle = args['handle'],
version_number = args['version_number']
)
|
Sends a PID update request for the unpublication of one version
of a dataset currently published at the given data node.
Either the handle or the pair of drs_id and version_number
have to be provided, otherwise an exception will occur.
The consumer will of course check the PID request message's
timestamp with the timestamp of the last publication, so that
republications in the mean time are not unpublished.
The unpublication of the files is included in this method.
:param handle: Optional. The handle of the dataset
to be unpublished.
:param drs_id: Optional. The dataset id of the dataset
to be unpublished.
:param version_number: Optional. The version number of
the dataset to be unpublished.
:raises: ArgumentError: If not enough arguments are passed
to identify the dataset, or if no data node was specified
during library init.
|
train
|
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/connector.py#L351-L401
|
[
"def add_missing_optional_args_with_value_none(args, optional_args):\n for name in optional_args:\n if not name in args.keys():\n args[name] = None\n return args\n",
"def logwarn(logger, msg, *args, **kwargs):\n logger.warn(msg, *args, **kwargs)\n",
"def get_now_utc_as_formatted_string():\n now = get_now_utc()\n now_string = datetime.datetime.isoformat(now) # 2015-12-21T10:31:37.524825+00:00\n return now_string\n",
"def unpublish_one_dataset_version(self, **args):\n optional_args = ['dataset_handle', 'version_number']\n esgfpid.utils.add_missing_optional_args_with_value_none(args, optional_args)\n\n handle = args['dataset_handle']\n version_number = args['version_number']\n\n if handle and version_number:\n self.__both_given(handle, version_number)\n loginfo(LOGGER, 'Requesting to unpublish version %s of dataset %s from %s (handle: %s).', version_number, self._drs_id, self._data_node, handle)\n elif handle:\n self.__only_handle_given(handle)\n loginfo(LOGGER, 'Requesting to unpublish a version of dataset %s from %s (handle: %s).', self._drs_id, self._data_node, handle)\n elif version_number:\n self.__only_version_given(version_number)\n loginfo(LOGGER, 'Requesting to unpublish version %s of dataset %s from %s.', version_number, self._drs_id, self._data_node)\n else:\n msg = 'Neither a handle nor a version number were specified for unpublication!'\n raise esgfpid.exceptions.ArgumentError(msg)\n"
] |
class Connector(object):
'''
This class provides the main functionality for the ESGF PID
module.
Author: Merret Buurman (DKRZ), 2015-2016
'''
def __init__(self, **args):
'''
Create a connector object with the necessary config
to connect to a RabbitMQ messaging server and perform
PID creations/updates.
The arguments have to be passed as named parameters.
Please contact your ESGF index node or CDNOT for this
information.
Some of the arguments are needed for making connections
from this library to RabbitMQ or to solr. Other arguments
are only passed on to the consuming servlet inside the
messages.
:param handle_prefix: Mandatory. The handle prefix (as a
string) for the handles to be created/updates. This
has to match the handle prefix that the message queue
consuming servlet has write access to. In CMIP6, this
is "21.14100".
:param messaging_service_exchange_name: Mandatory. The
name of the messaging exchange that will forward the
messages to a specific queue.
:param messaging_service_credentials: Mandatory. List of
dictionaries with credentials for the RabbitMQ nodes.
Each needs to have the entries: "user", "password", "url".
They may have an integer "priority" too. If two nodes have
the same priority, the library chooses randomly between
them. They also may have a "vhost" (RabbitMQ virtual host),
a "port" and a boolean "ssl_enabled". Please refer to pika's
documentation
(http://pika.readthedocs.io/en/latest/modules/parameters.html).
Dictionaries for 'open nodes' do not need a password
to be provided. Open nodes are only used if no more
other nodes are available. Note: Open nodes are no longer
supported.
:param message_service_synchronous: Optional. Boolean to
define if the connection to RabbitMQ and the message
sending should work in synchronous mode. Defaults to
the value defined in defaults.py.
:param data_node: Mandatory/Optional.
(Mandatory for publications and unpublications,
ignored for any other usage of the library. No default.)
The data node (host name) at which (un)publication takes
place. This will be included in the handle records. Trailing
slashes are removed.
Used during publication and unpublication (modules
assistant.publish and assistant.unpublish):
* Publication: Used to construct the file data URL (together
with thredds service path and file publish path). Sent along
in rabbit message. Used for consistency check, if solr use
is enabled.
* Unpublication: Sent along in rabbit message.
:param thredds_service_path: Mandatory for publications,
ignored for any other usage of the library. No default.
The thredds service path where the files of a publication
reside. Will be combined with files' publish path and data
node to form the files' data access URLs.
:param solr_url: Optional. The URL of the solr to be uses by this
library for the dataset consistency check. No default. If not provided,
the check is not done.
Note: This is currently switched off for performance reasons.
:param solr_https_verify: Optional flag to indicate whether
requests to solr should verify the SSL certificate.
Please see documentation of requests library: http://docs.python-requests.org/en/master/user/advanced/
:param disable_insecure_request_warning: Optional flag (only for
use during testing). If True, warnings are not printed during
insecure SSL requests.
Important: This is not passed through to the solr module, so
that switching off the warnings is not possible. It can only
be passed directly to the solr module during tests.
:param solr_switched_off: Optional flag to tell if the solr module
should be switched off. In that case, no connections to solr
are made.
:param consumer_solr_url: Optional. URL of a solr instance that
is to be used by the consumer (e.g. for finding versions), *not*
by this library.
:param test_publication: Optional flag. If True, the
handles that are created are test handles
that will be overwritten by real publications. Also,
test publications cannot update real handles.
:returns: An instance of the connector, configured for one
data node, and for connection with a specific RabbitMQ node.
'''
LOGGER.debug(40*'-')
LOGGER.debug('Creating PID connector object ..')
self.__check_presence_of_args(args)
self.__check_rabbit_credentials_completeness(args)
self.__define_defaults_for_optional_args(args)
self.__store_some_args(args)
self.__throw_error_if_prefix_not_in_list()
esgfpid.utils.routingkeys.add_prefix_to_routing_keys(self.prefix)
self.__coupler = esgfpid.coupling.Coupler(**args)
loginfo(LOGGER, 'Created PID connector.')
def __check_presence_of_args(self, args):
mandatory_args = [
'messaging_service_credentials',
'messaging_service_exchange_name',
'handle_prefix'
]
optional_args = [
'data_node',
'thredds_service_path',
'test_publication',
'solr_url',
'solr_https_verify',
'disable_insecure_request_warning',
'solr_switched_off',
'consumer_solr_url',
'message_service_synchronous'
]
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
def __define_defaults_for_optional_args(self, args):
if 'data_node' not in args or args['data_node'] is None:
''' May be None, only needed for some operations.
If it is needed, its presence is checked later. '''
args['data_node'] = None
if 'thredds_service_path' not in args or args['thredds_service_path'] is None:
''' May be None, only needed for some operations.
If it is needed, its presence is checked later. '''
args['thredds_service_path'] = None
if 'test_publication' not in args or args['test_publication'] is None:
args['test_publication'] = False
if 'solr_url' not in args or args['solr_url'] is None:
args['solr_url'] = None
args['solr_switched_off'] = True
if 'solr_switched_off' not in args or args['solr_switched_off'] is None:
args['solr_switched_off'] = False
if 'solr_https_verify' not in args or args['solr_https_verify'] is None:
args['solr_https_verify'] = esgfpid.defaults.SOLR_HTTPS_VERIFY_DEFAULT
if 'disable_insecure_request_warning' not in args or args['disable_insecure_request_warning'] is None:
args['disable_insecure_request_warning'] = False
if 'message_service_synchronous' not in args or args['message_service_synchronous'] is None:
args['message_service_synchronous'] = not esgfpid.defaults.RABBIT_IS_ASYNCHRONOUS
if 'consumer_solr_url' not in args or args['consumer_solr_url'] is None:
args['consumer_solr_url'] = None
def __check_rabbit_credentials_completeness(self, args):
for credentials in args['messaging_service_credentials']:
if not isinstance(credentials, dict):
errmsg = 'Credentials for each RabbitMQ node should be a dictionary.'
raise esgfpid.exceptions.ArgumentError(errmsg)
# Mandatory:
self.__check_presence_and_type('url', credentials, basestring)
self.__check_presence_and_type('user', credentials, basestring)
self.__check_presence_and_type('password', credentials, basestring) # If you want open nodes to be enabled again, remove this!
# Optional:
self.__check_and_adapt_type_if_exists('password', credentials, basestring)
self.__check_and_adapt_type_if_exists('vhost', credentials, basestring)
self.__check_and_adapt_type_if_exists('port', credentials, int)
self.__check_and_adapt_type_if_exists('ssl_enabled', credentials, bool)
def __check_presence_and_type(self, attname, credentials, desiredtype):
self.__check_presence(attname, credentials)
self.__check_and_adapt_type_if_exists(attname, credentials, desiredtype)
def __check_presence(self, attname, credentials):
if attname not in credentials:
rabbitname_for_errmsg = '(not specified)'
if 'url' in credentials:
rabbitname_for_errmsg = credentials['url']
errmsg = 'Missing %s for messaging service "%s"!' % (attname, rabbitname_for_errmsg)
raise esgfpid.exceptions.ArgumentError(errmsg)
def __check_and_adapt_type_if_exists(self, attname, credentials, desiredtype):
if attname in credentials:
# Empty string to None:
if credentials[attname] == '':
credentials[attname] = None
# List to object:
if type(credentials[attname]) == type([]) and len(credentials[attname]) == 1:
credentials[attname] = credentials[attname][0]
# Don't check if None:
if credentials[attname] is None:
pass
# Check type:
elif not isinstance(credentials[attname], desiredtype):
# Try conversion:
try:
credentials[attname] = self.__try_conversion(credentials[attname], desiredtype)
except ValueError as e:
errmsg = ('Wrong type of messaging service %s (%s). Expected %s, got %s, conversion failed.' %
(attname, credentials[attname], desiredtype, type(credentials[attname])))
raise esgfpid.exceptions.ArgumentError(errmsg)
def __try_conversion(self, value, desiredtype):
if desiredtype == bool:
if isinstance(value, basestring):
if str.lower(value) == 'true':
return True
elif str.lower(value) == 'false':
return False
raise ValueError()
if desiredtype == basestring:
#return str(value)
raise ValueError('Not transforming booleans')
if desiredtype == int:
return int(value)
else:
return desiredtype(value)
'''
These are not (only) needed during initialisation, but
(also) later on.
'''
def __store_some_args(self, args):
self.prefix = args['handle_prefix']
self.__thredds_service_path = args['thredds_service_path']
self.__data_node = args['data_node'] # may be None, only needed for some assistants.
self.__consumer_solr_url = args['consumer_solr_url'] # may be None
def __throw_error_if_prefix_not_in_list(self):
if self.prefix is None:
raise esgfpid.exceptions.ArgumentError('Missing handle prefix!')
if self.prefix not in esgfpid.defaults.ACCEPTED_PREFIXES:
raise esgfpid.exceptions.ArgumentError('The prefix "%s" is not a valid prefix! Please check your config. Accepted prefixes: %s'
% (self.prefix, ', '.join(esgfpid.defaults.ACCEPTED_PREFIXES)))
def create_publication_assistant(self, **args):
'''
Create an assistant for a dataset that allows to make PID
requests for the dataset and all of its files.
:param drs_id: Mandatory. The dataset id of the dataset
to be published.
:param version_number: Mandatory. The version number of the
dataset to be published.
:param is_replica: Mandatory. Flag to indicate whether the
dataset is a replica.
.. note:: If the replica flag is set to False, the publication
may still be considered a replica by the consuming servlet,
namely if the dataset was already published at a different
host. For this, please refer to the consumer documentation.
:return: A publication assistant which provides all necessary
methods to publish a dataset and its files.
'''
# Check args
logdebug(LOGGER, 'Creating publication assistant..')
mandatory_args = ['drs_id', 'version_number', 'is_replica']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
# Check if service path is given
if self.__thredds_service_path is None:
msg = 'No thredds_service_path given (but it is mandatory for publication)'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.ArgumentError(msg)
# Check if data node is given
if self.__data_node is None:
msg = 'No data_node given (but it is mandatory for publication)'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.ArgumentError(msg)
# Check if solr has access:
if self.__coupler.is_solr_switched_off():
pass # solr access not mandatory anymore
# Create publication assistant
assistant = esgfpid.assistant.publish.DatasetPublicationAssistant(
drs_id=args['drs_id'],
version_number=args['version_number'],
thredds_service_path=self.__thredds_service_path,
data_node=self.__data_node,
prefix=self.prefix,
coupler=self.__coupler,
is_replica=args['is_replica'],
consumer_solr_url=self.__consumer_solr_url # may be None
)
logdebug(LOGGER, 'Creating publication assistant.. done')
return assistant
'''
Please see documentation of solr module (:func:`~check.check_pid_queue_availability`).
'''
def check_pid_queue_availability(self, **args):
rabbit_checker = esgfpid.check.RabbitChecker(connector = self, prefix = self.prefix, **args)
return rabbit_checker.check_and_inform()
def unpublish_all_versions(self, **args):
'''
Sends a PID update request for the unpublication of all versions
of a dataset currently published at the given data node.
If the library has solr access, it will try to find all the
dataset versions and their handles from solr, and send individual
messages for each version. Otherwise, one message is sent, and the
queue consuming servlet has to identify the relevant versions,
also making sure not to unpublish any versions that may have been
republished in the meantime.
:param drs_id: Dataset id of the dataset to be unpublished.
:raises: ArgumentError: If the data node
was not provided at library initialization.
'''
# Check args
mandatory_args = ['drs_id']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
# Check if data node is given
if self.__data_node is None:
msg = 'No data_node given (but it is mandatory for publication)'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.ArgumentError(msg)
# Check if solr has access:
if self.__coupler.is_solr_switched_off():
msg = 'Unpublication of all versions. Without solr access, we cannot identify the versions, so the consumer will have to take care of this.'
logdebug(LOGGER, msg)
#raise esgfpid.exceptions.ArgumentError('No solr access. Solr access is needed for publication. Please provide access to a solr index when initializing the library')
# Unpublish
assistant = esgfpid.assistant.unpublish.AssistantAllVersions(
drs_id = args['drs_id'],
data_node = self.__data_node,
prefix=self.prefix,
coupler=self.__coupler,
message_timestamp=esgfpid.utils.get_now_utc_as_formatted_string(),
consumer_solr_url = self.__consumer_solr_url # may be None
)
assistant.unpublish_all_dataset_versions()
def add_errata_ids(self, **args):
'''
Add errata ids to a dataset handle record.
To call this method, you do not need to provide the
PID of the dataset. Instead, the PID string is derived
from the dataset id and the version number.
:param errata_ids: Mandatory. A list of errata ids (strings)
to be added to the handle record.
:param drs_id: Mandatory. The dataset id of the dataset
to whose handle record the errata ids are to be
added. (This is needed because the handle is found
by making a hash over dataset id and version number).
:param version_number: Mandatory. The version number of the
dataset to whose handle record the errata ids are to be
added. (This is needed because the handle is found by
making a hash over dataset id and version number).
'''
# Check args:
mandatory_args = ['drs_id', 'version_number', 'errata_ids']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
esgfpid.utils.check_noneness_of_mandatory_args(args, mandatory_args)
# Perform metadata update
assistant = esgfpid.assistant.errata.ErrataAssistant(
coupler=self.__coupler,
prefix=self.prefix
)
assistant.add_errata_ids(
drs_id=args['drs_id'],
version_number=args['version_number'],
errata_ids=args['errata_ids']
)
def remove_errata_ids(self, **args):
'''
Remove errata ids from a dataset handle record.
To call this method, you do not need to provide the
PID of the dataset. Instead, the PID string is derived
from the dataset id and the version number.
:param errata_ids: Mandatory. A list of errata ids (strings) to
be removed from the handle record.
:param drs_id: Mandatory. The dataset id of the dataset
from whose handle record the errata ids are to be
removed. (This is needed because the handle is found
by making a hash over dataset id and version number).
:param version_number: Mandatory. The version number of the
dataset from whose handle record the errata ids are to be
removed. (This is needed because the handle is found by
making a hash over dataset id and version number).
'''
# Check args:
mandatory_args = ['drs_id', 'version_number', 'errata_ids']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
esgfpid.utils.check_noneness_of_mandatory_args(args, mandatory_args)
# Perform metadata update
assistant = esgfpid.assistant.errata.ErrataAssistant(
coupler=self.__coupler,
prefix=self.prefix
)
assistant.remove_errata_ids(
drs_id=args['drs_id'],
version_number=args['version_number'],
errata_ids=args['errata_ids']
)
def create_data_cart_pid(self, dict_of_drs_ids_and_pids):
'''
Create a handle record for a data cart (a custom set of datasets).
The handle string is made of the prefix passed to tbe library,
and a hash over all the dataset ids in the cart. This way, if exactly
the same set of datasets is passed several times, the same handle
record is created, instead of making a new one.
:param dict_of_drs_ids_and_pids: A dictionary of all dataset ids
and their pid strings. If a dataset has no (known) PID, use
"None".
:return: The handle string for this data cart.
'''
assistant = esgfpid.assistant.datacart.DataCartAssistant(
prefix=self.prefix,
coupler=self.__coupler
)
return assistant.make_data_cart_pid(dict_of_drs_ids_and_pids)
def start_messaging_thread(self):
'''
Start the parallel thread that takes care of the asynchronous
communication with RabbitMQ.
If PID creation/update requests are attempted before
this was called, an exception will be raised.
Preferably call this method as early as possible, so that
the module has some time to build the connection before
the first PID requests are made.
(If PID requests are made before the connection is ready,
they will not be lost, but pile up and sent once the connection
is ready).
.. important:: Please do not forget to finish the thread at the end,
using :meth:`~esgfpid.connector.Connector.finish_messaging_thread`
or :meth:`~esgfpid.connector.Connector.force_finish_messaging_thread`.
'''
self.__coupler.start_rabbit_connection()
def finish_messaging_thread(self):
'''
Finish and join the parallel thread that takes care of
the asynchronous communication with RabbitMQ.
If some messages are still in the stack to be sent,
or if some messages were not confirmed yet, this method
blocks and waits for some time while it iteratively
checks for message confirmation.
Currently, it waits up to 5 seconds: It checks up to
11 times, waiting 0.5 seconds in between - these
values can be configured in the defaults module).
'''
self.__coupler.finish_rabbit_connection()
def force_finish_messaging_thread(self):
'''
Finish and join the parallel thread that takes care of
the asynchronous communication with RabbitMQ.
This method does not wait for any pending messages.
Messages that are not sent yet are lost. Messages that
are not confirmed yet are probably not lost, but their
receival is not guaranteed.
Note:
The rabbit module keeps a copy of all unsent and
unconfirmed messages, so they could be resent in
a later connection. It would also be easy to expose
a method for the library caller to retrieve those
messages, e.g. to write them into some file.
'''
self.__coupler.force_finish_rabbit_connection()
def make_handle_from_drsid_and_versionnumber(self, **args):
'''
Create a handle string for a specific dataset, based
on its dataset id and version number, and the prefix
passed to the library at initializing.
:param drs_id: The dataset id of the dataset.
:param version_number: The version number of the dataset
(as a string or integer, this does not matter)
:return: A handle string (e.g. "hdl:21.14100/abcxyzfoo")
'''
args['prefix'] = self.prefix
return esgfpid.utils.make_handle_from_drsid_and_versionnumber(**args)
|
IS-ENES-Data/esgf-pid
|
esgfpid/connector.py
|
Connector.unpublish_all_versions
|
python
|
def unpublish_all_versions(self, **args):
'''
Sends a PID update request for the unpublication of all versions
of a dataset currently published at the given data node.
If the library has solr access, it will try to find all the
dataset versions and their handles from solr, and send individual
messages for each version. Otherwise, one message is sent, and the
queue consuming servlet has to identify the relevant versions,
also making sure not to unpublish any versions that may have been
republished in the meantime.
:param drs_id: Dataset id of the dataset to be unpublished.
:raises: ArgumentError: If the data node
was not provided at library initialization.
'''
# Check args
mandatory_args = ['drs_id']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
# Check if data node is given
if self.__data_node is None:
msg = 'No data_node given (but it is mandatory for publication)'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.ArgumentError(msg)
# Check if solr has access:
if self.__coupler.is_solr_switched_off():
msg = 'Unpublication of all versions. Without solr access, we cannot identify the versions, so the consumer will have to take care of this.'
logdebug(LOGGER, msg)
#raise esgfpid.exceptions.ArgumentError('No solr access. Solr access is needed for publication. Please provide access to a solr index when initializing the library')
# Unpublish
assistant = esgfpid.assistant.unpublish.AssistantAllVersions(
drs_id = args['drs_id'],
data_node = self.__data_node,
prefix=self.prefix,
coupler=self.__coupler,
message_timestamp=esgfpid.utils.get_now_utc_as_formatted_string(),
consumer_solr_url = self.__consumer_solr_url # may be None
)
assistant.unpublish_all_dataset_versions()
|
Sends a PID update request for the unpublication of all versions
of a dataset currently published at the given data node.
If the library has solr access, it will try to find all the
dataset versions and their handles from solr, and send individual
messages for each version. Otherwise, one message is sent, and the
queue consuming servlet has to identify the relevant versions,
also making sure not to unpublish any versions that may have been
republished in the meantime.
:param drs_id: Dataset id of the dataset to be unpublished.
:raises: ArgumentError: If the data node
was not provided at library initialization.
|
train
|
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/connector.py#L403-L446
|
[
"def check_presence_of_mandatory_args(args, mandatory_args):\n missing_args = []\n for name in mandatory_args:\n if name not in args.keys():\n missing_args.append(name)\n if len(missing_args)>0:\n raise esgfpid.exceptions.ArgumentError('Missing mandatory arguments: '+', '.join(missing_args))\n else:\n return True\n",
"def logdebug(logger, msg, *args, **kwargs):\n '''\n Logs messages as DEBUG,\n unless show=True and esgfpid.defaults.LOG_SHOW_TO_INFO=True,\n (then it logs messages as INFO).\n '''\n if esgfpid.defaults.LOG_DEBUG_TO_INFO:\n logger.info('DEBUG %s ' % msg, *args, **kwargs)\n else:\n logger.debug(msg, *args, **kwargs)\n",
"def logwarn(logger, msg, *args, **kwargs):\n logger.warn(msg, *args, **kwargs)\n",
"def get_now_utc_as_formatted_string():\n now = get_now_utc()\n now_string = datetime.datetime.isoformat(now) # 2015-12-21T10:31:37.524825+00:00\n return now_string\n",
"def unpublish_all_dataset_versions(self):\n\n # If solr is switched off, consumer must find versions:\n if self._coupler.is_solr_switched_off():\n self.__unpublish_allversions_consumer_must_find_versions()\n\n # Get handles or version numbers from solr:\n else:\n all_handles_or_versionnumbers = self.__get_all_handles_or_versionnumbers()\n all_handles = all_handles_or_versionnumbers['dataset_handles']\n all_version_numbers = all_handles_or_versionnumbers['version_numbers']\n\n # If we can have all versions' handles, it's easy.\n if all_handles is not None:\n self.__unpublish_all_dataset_versions_by_handle(all_handles)\n\n # If not, we have the version numbers (and can make the handles from them):\n elif all_version_numbers is not None:\n self.__unpublish_all_dataset_versions_by_version(all_version_numbers)\n\n # If neither, let the consumer find them\n else:\n self.__unpublish_allversions_consumer_must_find_versions()\n loginfo(LOGGER, 'Requesting to unpublish all versions of dataset %s from %s', self._drs_id, self._data_node)\n"
] |
class Connector(object):
'''
This class provides the main functionality for the ESGF PID
module.
Author: Merret Buurman (DKRZ), 2015-2016
'''
def __init__(self, **args):
'''
Create a connector object with the necessary config
to connect to a RabbitMQ messaging server and perform
PID creations/updates.
The arguments have to be passed as named parameters.
Please contact your ESGF index node or CDNOT for this
information.
Some of the arguments are needed for making connections
from this library to RabbitMQ or to solr. Other arguments
are only passed on to the consuming servlet inside the
messages.
:param handle_prefix: Mandatory. The handle prefix (as a
string) for the handles to be created/updates. This
has to match the handle prefix that the message queue
consuming servlet has write access to. In CMIP6, this
is "21.14100".
:param messaging_service_exchange_name: Mandatory. The
name of the messaging exchange that will forward the
messages to a specific queue.
:param messaging_service_credentials: Mandatory. List of
dictionaries with credentials for the RabbitMQ nodes.
Each needs to have the entries: "user", "password", "url".
They may have an integer "priority" too. If two nodes have
the same priority, the library chooses randomly between
them. They also may have a "vhost" (RabbitMQ virtual host),
a "port" and a boolean "ssl_enabled". Please refer to pika's
documentation
(http://pika.readthedocs.io/en/latest/modules/parameters.html).
Dictionaries for 'open nodes' do not need a password
to be provided. Open nodes are only used if no more
other nodes are available. Note: Open nodes are no longer
supported.
:param message_service_synchronous: Optional. Boolean to
define if the connection to RabbitMQ and the message
sending should work in synchronous mode. Defaults to
the value defined in defaults.py.
:param data_node: Mandatory/Optional.
(Mandatory for publications and unpublications,
ignored for any other usage of the library. No default.)
The data node (host name) at which (un)publication takes
place. This will be included in the handle records. Trailing
slashes are removed.
Used during publication and unpublication (modules
assistant.publish and assistant.unpublish):
* Publication: Used to construct the file data URL (together
with thredds service path and file publish path). Sent along
in rabbit message. Used for consistency check, if solr use
is enabled.
* Unpublication: Sent along in rabbit message.
:param thredds_service_path: Mandatory for publications,
ignored for any other usage of the library. No default.
The thredds service path where the files of a publication
reside. Will be combined with files' publish path and data
node to form the files' data access URLs.
:param solr_url: Optional. The URL of the solr to be uses by this
library for the dataset consistency check. No default. If not provided,
the check is not done.
Note: This is currently switched off for performance reasons.
:param solr_https_verify: Optional flag to indicate whether
requests to solr should verify the SSL certificate.
Please see documentation of requests library: http://docs.python-requests.org/en/master/user/advanced/
:param disable_insecure_request_warning: Optional flag (only for
use during testing). If True, warnings are not printed during
insecure SSL requests.
Important: This is not passed through to the solr module, so
that switching off the warnings is not possible. It can only
be passed directly to the solr module during tests.
:param solr_switched_off: Optional flag to tell if the solr module
should be switched off. In that case, no connections to solr
are made.
:param consumer_solr_url: Optional. URL of a solr instance that
is to be used by the consumer (e.g. for finding versions), *not*
by this library.
:param test_publication: Optional flag. If True, the
handles that are created are test handles
that will be overwritten by real publications. Also,
test publications cannot update real handles.
:returns: An instance of the connector, configured for one
data node, and for connection with a specific RabbitMQ node.
'''
LOGGER.debug(40*'-')
LOGGER.debug('Creating PID connector object ..')
self.__check_presence_of_args(args)
self.__check_rabbit_credentials_completeness(args)
self.__define_defaults_for_optional_args(args)
self.__store_some_args(args)
self.__throw_error_if_prefix_not_in_list()
esgfpid.utils.routingkeys.add_prefix_to_routing_keys(self.prefix)
self.__coupler = esgfpid.coupling.Coupler(**args)
loginfo(LOGGER, 'Created PID connector.')
def __check_presence_of_args(self, args):
mandatory_args = [
'messaging_service_credentials',
'messaging_service_exchange_name',
'handle_prefix'
]
optional_args = [
'data_node',
'thredds_service_path',
'test_publication',
'solr_url',
'solr_https_verify',
'disable_insecure_request_warning',
'solr_switched_off',
'consumer_solr_url',
'message_service_synchronous'
]
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
def __define_defaults_for_optional_args(self, args):
if 'data_node' not in args or args['data_node'] is None:
''' May be None, only needed for some operations.
If it is needed, its presence is checked later. '''
args['data_node'] = None
if 'thredds_service_path' not in args or args['thredds_service_path'] is None:
''' May be None, only needed for some operations.
If it is needed, its presence is checked later. '''
args['thredds_service_path'] = None
if 'test_publication' not in args or args['test_publication'] is None:
args['test_publication'] = False
if 'solr_url' not in args or args['solr_url'] is None:
args['solr_url'] = None
args['solr_switched_off'] = True
if 'solr_switched_off' not in args or args['solr_switched_off'] is None:
args['solr_switched_off'] = False
if 'solr_https_verify' not in args or args['solr_https_verify'] is None:
args['solr_https_verify'] = esgfpid.defaults.SOLR_HTTPS_VERIFY_DEFAULT
if 'disable_insecure_request_warning' not in args or args['disable_insecure_request_warning'] is None:
args['disable_insecure_request_warning'] = False
if 'message_service_synchronous' not in args or args['message_service_synchronous'] is None:
args['message_service_synchronous'] = not esgfpid.defaults.RABBIT_IS_ASYNCHRONOUS
if 'consumer_solr_url' not in args or args['consumer_solr_url'] is None:
args['consumer_solr_url'] = None
def __check_rabbit_credentials_completeness(self, args):
for credentials in args['messaging_service_credentials']:
if not isinstance(credentials, dict):
errmsg = 'Credentials for each RabbitMQ node should be a dictionary.'
raise esgfpid.exceptions.ArgumentError(errmsg)
# Mandatory:
self.__check_presence_and_type('url', credentials, basestring)
self.__check_presence_and_type('user', credentials, basestring)
self.__check_presence_and_type('password', credentials, basestring) # If you want open nodes to be enabled again, remove this!
# Optional:
self.__check_and_adapt_type_if_exists('password', credentials, basestring)
self.__check_and_adapt_type_if_exists('vhost', credentials, basestring)
self.__check_and_adapt_type_if_exists('port', credentials, int)
self.__check_and_adapt_type_if_exists('ssl_enabled', credentials, bool)
def __check_presence_and_type(self, attname, credentials, desiredtype):
self.__check_presence(attname, credentials)
self.__check_and_adapt_type_if_exists(attname, credentials, desiredtype)
def __check_presence(self, attname, credentials):
if attname not in credentials:
rabbitname_for_errmsg = '(not specified)'
if 'url' in credentials:
rabbitname_for_errmsg = credentials['url']
errmsg = 'Missing %s for messaging service "%s"!' % (attname, rabbitname_for_errmsg)
raise esgfpid.exceptions.ArgumentError(errmsg)
def __check_and_adapt_type_if_exists(self, attname, credentials, desiredtype):
if attname in credentials:
# Empty string to None:
if credentials[attname] == '':
credentials[attname] = None
# List to object:
if type(credentials[attname]) == type([]) and len(credentials[attname]) == 1:
credentials[attname] = credentials[attname][0]
# Don't check if None:
if credentials[attname] is None:
pass
# Check type:
elif not isinstance(credentials[attname], desiredtype):
# Try conversion:
try:
credentials[attname] = self.__try_conversion(credentials[attname], desiredtype)
except ValueError as e:
errmsg = ('Wrong type of messaging service %s (%s). Expected %s, got %s, conversion failed.' %
(attname, credentials[attname], desiredtype, type(credentials[attname])))
raise esgfpid.exceptions.ArgumentError(errmsg)
def __try_conversion(self, value, desiredtype):
if desiredtype == bool:
if isinstance(value, basestring):
if str.lower(value) == 'true':
return True
elif str.lower(value) == 'false':
return False
raise ValueError()
if desiredtype == basestring:
#return str(value)
raise ValueError('Not transforming booleans')
if desiredtype == int:
return int(value)
else:
return desiredtype(value)
'''
These are not (only) needed during initialisation, but
(also) later on.
'''
def __store_some_args(self, args):
self.prefix = args['handle_prefix']
self.__thredds_service_path = args['thredds_service_path']
self.__data_node = args['data_node'] # may be None, only needed for some assistants.
self.__consumer_solr_url = args['consumer_solr_url'] # may be None
def __throw_error_if_prefix_not_in_list(self):
if self.prefix is None:
raise esgfpid.exceptions.ArgumentError('Missing handle prefix!')
if self.prefix not in esgfpid.defaults.ACCEPTED_PREFIXES:
raise esgfpid.exceptions.ArgumentError('The prefix "%s" is not a valid prefix! Please check your config. Accepted prefixes: %s'
% (self.prefix, ', '.join(esgfpid.defaults.ACCEPTED_PREFIXES)))
def create_publication_assistant(self, **args):
'''
Create an assistant for a dataset that allows to make PID
requests for the dataset and all of its files.
:param drs_id: Mandatory. The dataset id of the dataset
to be published.
:param version_number: Mandatory. The version number of the
dataset to be published.
:param is_replica: Mandatory. Flag to indicate whether the
dataset is a replica.
.. note:: If the replica flag is set to False, the publication
may still be considered a replica by the consuming servlet,
namely if the dataset was already published at a different
host. For this, please refer to the consumer documentation.
:return: A publication assistant which provides all necessary
methods to publish a dataset and its files.
'''
# Check args
logdebug(LOGGER, 'Creating publication assistant..')
mandatory_args = ['drs_id', 'version_number', 'is_replica']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
# Check if service path is given
if self.__thredds_service_path is None:
msg = 'No thredds_service_path given (but it is mandatory for publication)'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.ArgumentError(msg)
# Check if data node is given
if self.__data_node is None:
msg = 'No data_node given (but it is mandatory for publication)'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.ArgumentError(msg)
# Check if solr has access:
if self.__coupler.is_solr_switched_off():
pass # solr access not mandatory anymore
# Create publication assistant
assistant = esgfpid.assistant.publish.DatasetPublicationAssistant(
drs_id=args['drs_id'],
version_number=args['version_number'],
thredds_service_path=self.__thredds_service_path,
data_node=self.__data_node,
prefix=self.prefix,
coupler=self.__coupler,
is_replica=args['is_replica'],
consumer_solr_url=self.__consumer_solr_url # may be None
)
logdebug(LOGGER, 'Creating publication assistant.. done')
return assistant
'''
Please see documentation of solr module (:func:`~check.check_pid_queue_availability`).
'''
def check_pid_queue_availability(self, **args):
rabbit_checker = esgfpid.check.RabbitChecker(connector = self, prefix = self.prefix, **args)
return rabbit_checker.check_and_inform()
def unpublish_one_version(self, **args):
'''
Sends a PID update request for the unpublication of one version
of a dataset currently published at the given data node.
Either the handle or the pair of drs_id and version_number
have to be provided, otherwise an exception will occur.
The consumer will of course check the PID request message's
timestamp with the timestamp of the last publication, so that
republications in the mean time are not unpublished.
The unpublication of the files is included in this method.
:param handle: Optional. The handle of the dataset
to be unpublished.
:param drs_id: Optional. The dataset id of the dataset
to be unpublished.
:param version_number: Optional. The version number of
the dataset to be unpublished.
:raises: ArgumentError: If not enough arguments are passed
to identify the dataset, or if no data node was specified
during library init.
'''
# Check args
optional_args = ['handle', 'drs_id', 'version_number']
esgfpid.utils.add_missing_optional_args_with_value_none(args, optional_args)
# Check if data node is given
if self.__data_node is None:
msg = 'No data_node given (but it is mandatory for unpublication)'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.ArgumentError(msg)
# Unpublish
assistant = esgfpid.assistant.unpublish.AssistantOneVersion(
drs_id = args['drs_id'],
data_node = self.__data_node,
prefix=self.prefix,
coupler=self.__coupler,
message_timestamp=esgfpid.utils.get_now_utc_as_formatted_string()
)
assistant.unpublish_one_dataset_version(
handle = args['handle'],
version_number = args['version_number']
)
def add_errata_ids(self, **args):
'''
Add errata ids to a dataset handle record.
To call this method, you do not need to provide the
PID of the dataset. Instead, the PID string is derived
from the dataset id and the version number.
:param errata_ids: Mandatory. A list of errata ids (strings)
to be added to the handle record.
:param drs_id: Mandatory. The dataset id of the dataset
to whose handle record the errata ids are to be
added. (This is needed because the handle is found
by making a hash over dataset id and version number).
:param version_number: Mandatory. The version number of the
dataset to whose handle record the errata ids are to be
added. (This is needed because the handle is found by
making a hash over dataset id and version number).
'''
# Check args:
mandatory_args = ['drs_id', 'version_number', 'errata_ids']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
esgfpid.utils.check_noneness_of_mandatory_args(args, mandatory_args)
# Perform metadata update
assistant = esgfpid.assistant.errata.ErrataAssistant(
coupler=self.__coupler,
prefix=self.prefix
)
assistant.add_errata_ids(
drs_id=args['drs_id'],
version_number=args['version_number'],
errata_ids=args['errata_ids']
)
def remove_errata_ids(self, **args):
'''
Remove errata ids from a dataset handle record.
To call this method, you do not need to provide the
PID of the dataset. Instead, the PID string is derived
from the dataset id and the version number.
:param errata_ids: Mandatory. A list of errata ids (strings) to
be removed from the handle record.
:param drs_id: Mandatory. The dataset id of the dataset
from whose handle record the errata ids are to be
removed. (This is needed because the handle is found
by making a hash over dataset id and version number).
:param version_number: Mandatory. The version number of the
dataset from whose handle record the errata ids are to be
removed. (This is needed because the handle is found by
making a hash over dataset id and version number).
'''
# Check args:
mandatory_args = ['drs_id', 'version_number', 'errata_ids']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
esgfpid.utils.check_noneness_of_mandatory_args(args, mandatory_args)
# Perform metadata update
assistant = esgfpid.assistant.errata.ErrataAssistant(
coupler=self.__coupler,
prefix=self.prefix
)
assistant.remove_errata_ids(
drs_id=args['drs_id'],
version_number=args['version_number'],
errata_ids=args['errata_ids']
)
def create_data_cart_pid(self, dict_of_drs_ids_and_pids):
'''
Create a handle record for a data cart (a custom set of datasets).
The handle string is made of the prefix passed to tbe library,
and a hash over all the dataset ids in the cart. This way, if exactly
the same set of datasets is passed several times, the same handle
record is created, instead of making a new one.
:param dict_of_drs_ids_and_pids: A dictionary of all dataset ids
and their pid strings. If a dataset has no (known) PID, use
"None".
:return: The handle string for this data cart.
'''
assistant = esgfpid.assistant.datacart.DataCartAssistant(
prefix=self.prefix,
coupler=self.__coupler
)
return assistant.make_data_cart_pid(dict_of_drs_ids_and_pids)
def start_messaging_thread(self):
'''
Start the parallel thread that takes care of the asynchronous
communication with RabbitMQ.
If PID creation/update requests are attempted before
this was called, an exception will be raised.
Preferably call this method as early as possible, so that
the module has some time to build the connection before
the first PID requests are made.
(If PID requests are made before the connection is ready,
they will not be lost, but pile up and sent once the connection
is ready).
.. important:: Please do not forget to finish the thread at the end,
using :meth:`~esgfpid.connector.Connector.finish_messaging_thread`
or :meth:`~esgfpid.connector.Connector.force_finish_messaging_thread`.
'''
self.__coupler.start_rabbit_connection()
def finish_messaging_thread(self):
'''
Finish and join the parallel thread that takes care of
the asynchronous communication with RabbitMQ.
If some messages are still in the stack to be sent,
or if some messages were not confirmed yet, this method
blocks and waits for some time while it iteratively
checks for message confirmation.
Currently, it waits up to 5 seconds: It checks up to
11 times, waiting 0.5 seconds in between - these
values can be configured in the defaults module).
'''
self.__coupler.finish_rabbit_connection()
def force_finish_messaging_thread(self):
'''
Finish and join the parallel thread that takes care of
the asynchronous communication with RabbitMQ.
This method does not wait for any pending messages.
Messages that are not sent yet are lost. Messages that
are not confirmed yet are probably not lost, but their
receival is not guaranteed.
Note:
The rabbit module keeps a copy of all unsent and
unconfirmed messages, so they could be resent in
a later connection. It would also be easy to expose
a method for the library caller to retrieve those
messages, e.g. to write them into some file.
'''
self.__coupler.force_finish_rabbit_connection()
def make_handle_from_drsid_and_versionnumber(self, **args):
'''
Create a handle string for a specific dataset, based
on its dataset id and version number, and the prefix
passed to the library at initializing.
:param drs_id: The dataset id of the dataset.
:param version_number: The version number of the dataset
(as a string or integer, this does not matter)
:return: A handle string (e.g. "hdl:21.14100/abcxyzfoo")
'''
args['prefix'] = self.prefix
return esgfpid.utils.make_handle_from_drsid_and_versionnumber(**args)
|
IS-ENES-Data/esgf-pid
|
esgfpid/connector.py
|
Connector.add_errata_ids
|
python
|
def add_errata_ids(self, **args):
'''
Add errata ids to a dataset handle record.
To call this method, you do not need to provide the
PID of the dataset. Instead, the PID string is derived
from the dataset id and the version number.
:param errata_ids: Mandatory. A list of errata ids (strings)
to be added to the handle record.
:param drs_id: Mandatory. The dataset id of the dataset
to whose handle record the errata ids are to be
added. (This is needed because the handle is found
by making a hash over dataset id and version number).
:param version_number: Mandatory. The version number of the
dataset to whose handle record the errata ids are to be
added. (This is needed because the handle is found by
making a hash over dataset id and version number).
'''
# Check args:
mandatory_args = ['drs_id', 'version_number', 'errata_ids']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
esgfpid.utils.check_noneness_of_mandatory_args(args, mandatory_args)
# Perform metadata update
assistant = esgfpid.assistant.errata.ErrataAssistant(
coupler=self.__coupler,
prefix=self.prefix
)
assistant.add_errata_ids(
drs_id=args['drs_id'],
version_number=args['version_number'],
errata_ids=args['errata_ids']
)
|
Add errata ids to a dataset handle record.
To call this method, you do not need to provide the
PID of the dataset. Instead, the PID string is derived
from the dataset id and the version number.
:param errata_ids: Mandatory. A list of errata ids (strings)
to be added to the handle record.
:param drs_id: Mandatory. The dataset id of the dataset
to whose handle record the errata ids are to be
added. (This is needed because the handle is found
by making a hash over dataset id and version number).
:param version_number: Mandatory. The version number of the
dataset to whose handle record the errata ids are to be
added. (This is needed because the handle is found by
making a hash over dataset id and version number).
|
train
|
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/connector.py#L448-L484
|
[
"def check_presence_of_mandatory_args(args, mandatory_args):\n missing_args = []\n for name in mandatory_args:\n if name not in args.keys():\n missing_args.append(name)\n if len(missing_args)>0:\n raise esgfpid.exceptions.ArgumentError('Missing mandatory arguments: '+', '.join(missing_args))\n else:\n return True\n",
"def check_noneness_of_mandatory_args(args, mandatory_args):\n empty_args = []\n for name in mandatory_args:\n if args[name] is None:\n empty_args.append(name)\n if len(empty_args)>0:\n raise esgfpid.exceptions.ArgumentError('Problem: These arguments are None: '+', '.join(empty_args))\n else:\n return True\n",
"def add_errata_ids(self, **args):\n logdebug(LOGGER, 'Adding errata ids...')\n mandatory_args = ['drs_id', 'version_number', 'errata_ids']\n esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)\n esgfpid.utils.check_noneness_of_mandatory_args(args, mandatory_args)\n\n dataset_handle = self.__get_dataset_handle(args)\n errata_ids = self.__get_errata_ids_as_list(args)\n message = self.__make_add_message(errata_ids, dataset_handle, args['drs_id'], args['version_number'])\n self.__send_message_to_queue(message)\n\n loginfo(LOGGER, 'Requesting to add errata ids \"%s\" to dataset \"%s\".', ', '.join(errata_ids), dataset_handle)\n logdebug(LOGGER, 'Adding errata ids... done')\n"
] |
class Connector(object):
'''
This class provides the main functionality for the ESGF PID
module.
Author: Merret Buurman (DKRZ), 2015-2016
'''
def __init__(self, **args):
'''
Create a connector object with the necessary config
to connect to a RabbitMQ messaging server and perform
PID creations/updates.
The arguments have to be passed as named parameters.
Please contact your ESGF index node or CDNOT for this
information.
Some of the arguments are needed for making connections
from this library to RabbitMQ or to solr. Other arguments
are only passed on to the consuming servlet inside the
messages.
:param handle_prefix: Mandatory. The handle prefix (as a
string) for the handles to be created/updates. This
has to match the handle prefix that the message queue
consuming servlet has write access to. In CMIP6, this
is "21.14100".
:param messaging_service_exchange_name: Mandatory. The
name of the messaging exchange that will forward the
messages to a specific queue.
:param messaging_service_credentials: Mandatory. List of
dictionaries with credentials for the RabbitMQ nodes.
Each needs to have the entries: "user", "password", "url".
They may have an integer "priority" too. If two nodes have
the same priority, the library chooses randomly between
them. They also may have a "vhost" (RabbitMQ virtual host),
a "port" and a boolean "ssl_enabled". Please refer to pika's
documentation
(http://pika.readthedocs.io/en/latest/modules/parameters.html).
Dictionaries for 'open nodes' do not need a password
to be provided. Open nodes are only used if no more
other nodes are available. Note: Open nodes are no longer
supported.
:param message_service_synchronous: Optional. Boolean to
define if the connection to RabbitMQ and the message
sending should work in synchronous mode. Defaults to
the value defined in defaults.py.
:param data_node: Mandatory/Optional.
(Mandatory for publications and unpublications,
ignored for any other usage of the library. No default.)
The data node (host name) at which (un)publication takes
place. This will be included in the handle records. Trailing
slashes are removed.
Used during publication and unpublication (modules
assistant.publish and assistant.unpublish):
* Publication: Used to construct the file data URL (together
with thredds service path and file publish path). Sent along
in rabbit message. Used for consistency check, if solr use
is enabled.
* Unpublication: Sent along in rabbit message.
:param thredds_service_path: Mandatory for publications,
ignored for any other usage of the library. No default.
The thredds service path where the files of a publication
reside. Will be combined with files' publish path and data
node to form the files' data access URLs.
:param solr_url: Optional. The URL of the solr to be uses by this
library for the dataset consistency check. No default. If not provided,
the check is not done.
Note: This is currently switched off for performance reasons.
:param solr_https_verify: Optional flag to indicate whether
requests to solr should verify the SSL certificate.
Please see documentation of requests library: http://docs.python-requests.org/en/master/user/advanced/
:param disable_insecure_request_warning: Optional flag (only for
use during testing). If True, warnings are not printed during
insecure SSL requests.
Important: This is not passed through to the solr module, so
that switching off the warnings is not possible. It can only
be passed directly to the solr module during tests.
:param solr_switched_off: Optional flag to tell if the solr module
should be switched off. In that case, no connections to solr
are made.
:param consumer_solr_url: Optional. URL of a solr instance that
is to be used by the consumer (e.g. for finding versions), *not*
by this library.
:param test_publication: Optional flag. If True, the
handles that are created are test handles
that will be overwritten by real publications. Also,
test publications cannot update real handles.
:returns: An instance of the connector, configured for one
data node, and for connection with a specific RabbitMQ node.
'''
LOGGER.debug(40*'-')
LOGGER.debug('Creating PID connector object ..')
self.__check_presence_of_args(args)
self.__check_rabbit_credentials_completeness(args)
self.__define_defaults_for_optional_args(args)
self.__store_some_args(args)
self.__throw_error_if_prefix_not_in_list()
esgfpid.utils.routingkeys.add_prefix_to_routing_keys(self.prefix)
self.__coupler = esgfpid.coupling.Coupler(**args)
loginfo(LOGGER, 'Created PID connector.')
def __check_presence_of_args(self, args):
mandatory_args = [
'messaging_service_credentials',
'messaging_service_exchange_name',
'handle_prefix'
]
optional_args = [
'data_node',
'thredds_service_path',
'test_publication',
'solr_url',
'solr_https_verify',
'disable_insecure_request_warning',
'solr_switched_off',
'consumer_solr_url',
'message_service_synchronous'
]
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
def __define_defaults_for_optional_args(self, args):
if 'data_node' not in args or args['data_node'] is None:
''' May be None, only needed for some operations.
If it is needed, its presence is checked later. '''
args['data_node'] = None
if 'thredds_service_path' not in args or args['thredds_service_path'] is None:
''' May be None, only needed for some operations.
If it is needed, its presence is checked later. '''
args['thredds_service_path'] = None
if 'test_publication' not in args or args['test_publication'] is None:
args['test_publication'] = False
if 'solr_url' not in args or args['solr_url'] is None:
args['solr_url'] = None
args['solr_switched_off'] = True
if 'solr_switched_off' not in args or args['solr_switched_off'] is None:
args['solr_switched_off'] = False
if 'solr_https_verify' not in args or args['solr_https_verify'] is None:
args['solr_https_verify'] = esgfpid.defaults.SOLR_HTTPS_VERIFY_DEFAULT
if 'disable_insecure_request_warning' not in args or args['disable_insecure_request_warning'] is None:
args['disable_insecure_request_warning'] = False
if 'message_service_synchronous' not in args or args['message_service_synchronous'] is None:
args['message_service_synchronous'] = not esgfpid.defaults.RABBIT_IS_ASYNCHRONOUS
if 'consumer_solr_url' not in args or args['consumer_solr_url'] is None:
args['consumer_solr_url'] = None
def __check_rabbit_credentials_completeness(self, args):
for credentials in args['messaging_service_credentials']:
if not isinstance(credentials, dict):
errmsg = 'Credentials for each RabbitMQ node should be a dictionary.'
raise esgfpid.exceptions.ArgumentError(errmsg)
# Mandatory:
self.__check_presence_and_type('url', credentials, basestring)
self.__check_presence_and_type('user', credentials, basestring)
self.__check_presence_and_type('password', credentials, basestring) # If you want open nodes to be enabled again, remove this!
# Optional:
self.__check_and_adapt_type_if_exists('password', credentials, basestring)
self.__check_and_adapt_type_if_exists('vhost', credentials, basestring)
self.__check_and_adapt_type_if_exists('port', credentials, int)
self.__check_and_adapt_type_if_exists('ssl_enabled', credentials, bool)
def __check_presence_and_type(self, attname, credentials, desiredtype):
self.__check_presence(attname, credentials)
self.__check_and_adapt_type_if_exists(attname, credentials, desiredtype)
def __check_presence(self, attname, credentials):
if attname not in credentials:
rabbitname_for_errmsg = '(not specified)'
if 'url' in credentials:
rabbitname_for_errmsg = credentials['url']
errmsg = 'Missing %s for messaging service "%s"!' % (attname, rabbitname_for_errmsg)
raise esgfpid.exceptions.ArgumentError(errmsg)
def __check_and_adapt_type_if_exists(self, attname, credentials, desiredtype):
if attname in credentials:
# Empty string to None:
if credentials[attname] == '':
credentials[attname] = None
# List to object:
if type(credentials[attname]) == type([]) and len(credentials[attname]) == 1:
credentials[attname] = credentials[attname][0]
# Don't check if None:
if credentials[attname] is None:
pass
# Check type:
elif not isinstance(credentials[attname], desiredtype):
# Try conversion:
try:
credentials[attname] = self.__try_conversion(credentials[attname], desiredtype)
except ValueError as e:
errmsg = ('Wrong type of messaging service %s (%s). Expected %s, got %s, conversion failed.' %
(attname, credentials[attname], desiredtype, type(credentials[attname])))
raise esgfpid.exceptions.ArgumentError(errmsg)
def __try_conversion(self, value, desiredtype):
if desiredtype == bool:
if isinstance(value, basestring):
if str.lower(value) == 'true':
return True
elif str.lower(value) == 'false':
return False
raise ValueError()
if desiredtype == basestring:
#return str(value)
raise ValueError('Not transforming booleans')
if desiredtype == int:
return int(value)
else:
return desiredtype(value)
'''
These are not (only) needed during initialisation, but
(also) later on.
'''
def __store_some_args(self, args):
self.prefix = args['handle_prefix']
self.__thredds_service_path = args['thredds_service_path']
self.__data_node = args['data_node'] # may be None, only needed for some assistants.
self.__consumer_solr_url = args['consumer_solr_url'] # may be None
def __throw_error_if_prefix_not_in_list(self):
if self.prefix is None:
raise esgfpid.exceptions.ArgumentError('Missing handle prefix!')
if self.prefix not in esgfpid.defaults.ACCEPTED_PREFIXES:
raise esgfpid.exceptions.ArgumentError('The prefix "%s" is not a valid prefix! Please check your config. Accepted prefixes: %s'
% (self.prefix, ', '.join(esgfpid.defaults.ACCEPTED_PREFIXES)))
def create_publication_assistant(self, **args):
'''
Create an assistant for a dataset that allows to make PID
requests for the dataset and all of its files.
:param drs_id: Mandatory. The dataset id of the dataset
to be published.
:param version_number: Mandatory. The version number of the
dataset to be published.
:param is_replica: Mandatory. Flag to indicate whether the
dataset is a replica.
.. note:: If the replica flag is set to False, the publication
may still be considered a replica by the consuming servlet,
namely if the dataset was already published at a different
host. For this, please refer to the consumer documentation.
:return: A publication assistant which provides all necessary
methods to publish a dataset and its files.
'''
# Check args
logdebug(LOGGER, 'Creating publication assistant..')
mandatory_args = ['drs_id', 'version_number', 'is_replica']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
# Check if service path is given
if self.__thredds_service_path is None:
msg = 'No thredds_service_path given (but it is mandatory for publication)'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.ArgumentError(msg)
# Check if data node is given
if self.__data_node is None:
msg = 'No data_node given (but it is mandatory for publication)'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.ArgumentError(msg)
# Check if solr has access:
if self.__coupler.is_solr_switched_off():
pass # solr access not mandatory anymore
# Create publication assistant
assistant = esgfpid.assistant.publish.DatasetPublicationAssistant(
drs_id=args['drs_id'],
version_number=args['version_number'],
thredds_service_path=self.__thredds_service_path,
data_node=self.__data_node,
prefix=self.prefix,
coupler=self.__coupler,
is_replica=args['is_replica'],
consumer_solr_url=self.__consumer_solr_url # may be None
)
logdebug(LOGGER, 'Creating publication assistant.. done')
return assistant
'''
Please see documentation of solr module (:func:`~check.check_pid_queue_availability`).
'''
def check_pid_queue_availability(self, **args):
rabbit_checker = esgfpid.check.RabbitChecker(connector = self, prefix = self.prefix, **args)
return rabbit_checker.check_and_inform()
def unpublish_one_version(self, **args):
'''
Sends a PID update request for the unpublication of one version
of a dataset currently published at the given data node.
Either the handle or the pair of drs_id and version_number
have to be provided, otherwise an exception will occur.
The consumer will of course check the PID request message's
timestamp with the timestamp of the last publication, so that
republications in the mean time are not unpublished.
The unpublication of the files is included in this method.
:param handle: Optional. The handle of the dataset
to be unpublished.
:param drs_id: Optional. The dataset id of the dataset
to be unpublished.
:param version_number: Optional. The version number of
the dataset to be unpublished.
:raises: ArgumentError: If not enough arguments are passed
to identify the dataset, or if no data node was specified
during library init.
'''
# Check args
optional_args = ['handle', 'drs_id', 'version_number']
esgfpid.utils.add_missing_optional_args_with_value_none(args, optional_args)
# Check if data node is given
if self.__data_node is None:
msg = 'No data_node given (but it is mandatory for unpublication)'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.ArgumentError(msg)
# Unpublish
assistant = esgfpid.assistant.unpublish.AssistantOneVersion(
drs_id = args['drs_id'],
data_node = self.__data_node,
prefix=self.prefix,
coupler=self.__coupler,
message_timestamp=esgfpid.utils.get_now_utc_as_formatted_string()
)
assistant.unpublish_one_dataset_version(
handle = args['handle'],
version_number = args['version_number']
)
def unpublish_all_versions(self, **args):
'''
Sends a PID update request for the unpublication of all versions
of a dataset currently published at the given data node.
If the library has solr access, it will try to find all the
dataset versions and their handles from solr, and send individual
messages for each version. Otherwise, one message is sent, and the
queue consuming servlet has to identify the relevant versions,
also making sure not to unpublish any versions that may have been
republished in the meantime.
:param drs_id: Dataset id of the dataset to be unpublished.
:raises: ArgumentError: If the data node
was not provided at library initialization.
'''
# Check args
mandatory_args = ['drs_id']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
# Check if data node is given
if self.__data_node is None:
msg = 'No data_node given (but it is mandatory for publication)'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.ArgumentError(msg)
# Check if solr has access:
if self.__coupler.is_solr_switched_off():
msg = 'Unpublication of all versions. Without solr access, we cannot identify the versions, so the consumer will have to take care of this.'
logdebug(LOGGER, msg)
#raise esgfpid.exceptions.ArgumentError('No solr access. Solr access is needed for publication. Please provide access to a solr index when initializing the library')
# Unpublish
assistant = esgfpid.assistant.unpublish.AssistantAllVersions(
drs_id = args['drs_id'],
data_node = self.__data_node,
prefix=self.prefix,
coupler=self.__coupler,
message_timestamp=esgfpid.utils.get_now_utc_as_formatted_string(),
consumer_solr_url = self.__consumer_solr_url # may be None
)
assistant.unpublish_all_dataset_versions()
def remove_errata_ids(self, **args):
'''
Remove errata ids from a dataset handle record.
To call this method, you do not need to provide the
PID of the dataset. Instead, the PID string is derived
from the dataset id and the version number.
:param errata_ids: Mandatory. A list of errata ids (strings) to
be removed from the handle record.
:param drs_id: Mandatory. The dataset id of the dataset
from whose handle record the errata ids are to be
removed. (This is needed because the handle is found
by making a hash over dataset id and version number).
:param version_number: Mandatory. The version number of the
dataset from whose handle record the errata ids are to be
removed. (This is needed because the handle is found by
making a hash over dataset id and version number).
'''
# Check args:
mandatory_args = ['drs_id', 'version_number', 'errata_ids']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
esgfpid.utils.check_noneness_of_mandatory_args(args, mandatory_args)
# Perform metadata update
assistant = esgfpid.assistant.errata.ErrataAssistant(
coupler=self.__coupler,
prefix=self.prefix
)
assistant.remove_errata_ids(
drs_id=args['drs_id'],
version_number=args['version_number'],
errata_ids=args['errata_ids']
)
def create_data_cart_pid(self, dict_of_drs_ids_and_pids):
'''
Create a handle record for a data cart (a custom set of datasets).
The handle string is made of the prefix passed to tbe library,
and a hash over all the dataset ids in the cart. This way, if exactly
the same set of datasets is passed several times, the same handle
record is created, instead of making a new one.
:param dict_of_drs_ids_and_pids: A dictionary of all dataset ids
and their pid strings. If a dataset has no (known) PID, use
"None".
:return: The handle string for this data cart.
'''
assistant = esgfpid.assistant.datacart.DataCartAssistant(
prefix=self.prefix,
coupler=self.__coupler
)
return assistant.make_data_cart_pid(dict_of_drs_ids_and_pids)
def start_messaging_thread(self):
'''
Start the parallel thread that takes care of the asynchronous
communication with RabbitMQ.
If PID creation/update requests are attempted before
this was called, an exception will be raised.
Preferably call this method as early as possible, so that
the module has some time to build the connection before
the first PID requests are made.
(If PID requests are made before the connection is ready,
they will not be lost, but pile up and sent once the connection
is ready).
.. important:: Please do not forget to finish the thread at the end,
using :meth:`~esgfpid.connector.Connector.finish_messaging_thread`
or :meth:`~esgfpid.connector.Connector.force_finish_messaging_thread`.
'''
self.__coupler.start_rabbit_connection()
def finish_messaging_thread(self):
'''
Finish and join the parallel thread that takes care of
the asynchronous communication with RabbitMQ.
If some messages are still in the stack to be sent,
or if some messages were not confirmed yet, this method
blocks and waits for some time while it iteratively
checks for message confirmation.
Currently, it waits up to 5 seconds: It checks up to
11 times, waiting 0.5 seconds in between - these
values can be configured in the defaults module).
'''
self.__coupler.finish_rabbit_connection()
def force_finish_messaging_thread(self):
'''
Finish and join the parallel thread that takes care of
the asynchronous communication with RabbitMQ.
This method does not wait for any pending messages.
Messages that are not sent yet are lost. Messages that
are not confirmed yet are probably not lost, but their
receival is not guaranteed.
Note:
The rabbit module keeps a copy of all unsent and
unconfirmed messages, so they could be resent in
a later connection. It would also be easy to expose
a method for the library caller to retrieve those
messages, e.g. to write them into some file.
'''
self.__coupler.force_finish_rabbit_connection()
def make_handle_from_drsid_and_versionnumber(self, **args):
'''
Create a handle string for a specific dataset, based
on its dataset id and version number, and the prefix
passed to the library at initializing.
:param drs_id: The dataset id of the dataset.
:param version_number: The version number of the dataset
(as a string or integer, this does not matter)
:return: A handle string (e.g. "hdl:21.14100/abcxyzfoo")
'''
args['prefix'] = self.prefix
return esgfpid.utils.make_handle_from_drsid_and_versionnumber(**args)
|
IS-ENES-Data/esgf-pid
|
esgfpid/connector.py
|
Connector.create_data_cart_pid
|
python
|
def create_data_cart_pid(self, dict_of_drs_ids_and_pids):
'''
Create a handle record for a data cart (a custom set of datasets).
The handle string is made of the prefix passed to tbe library,
and a hash over all the dataset ids in the cart. This way, if exactly
the same set of datasets is passed several times, the same handle
record is created, instead of making a new one.
:param dict_of_drs_ids_and_pids: A dictionary of all dataset ids
and their pid strings. If a dataset has no (known) PID, use
"None".
:return: The handle string for this data cart.
'''
assistant = esgfpid.assistant.datacart.DataCartAssistant(
prefix=self.prefix,
coupler=self.__coupler
)
return assistant.make_data_cart_pid(dict_of_drs_ids_and_pids)
|
Create a handle record for a data cart (a custom set of datasets).
The handle string is made of the prefix passed to tbe library,
and a hash over all the dataset ids in the cart. This way, if exactly
the same set of datasets is passed several times, the same handle
record is created, instead of making a new one.
:param dict_of_drs_ids_and_pids: A dictionary of all dataset ids
and their pid strings. If a dataset has no (known) PID, use
"None".
:return: The handle string for this data cart.
|
train
|
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/connector.py#L525-L544
|
[
"def make_data_cart_pid(self, dict_of_drs_ids_and_pids):\n logdebug(LOGGER, 'Making a PID for a data cart full of datasets...')\n\n # Check arg\n if not type(dict_of_drs_ids_and_pids) == type(dict()):\n if type(dict_of_drs_ids_and_pids) == type([]):\n raise esgfpid.exceptions.ArgumentError('Please provide a dictionary of dataset ids and handles, not a list')\n else:\n raise esgfpid.exceptions.ArgumentError('Please provide a dictionary of dataset ids and handles')\n\n # Make a pid (hash on the content):\n cart_handle = DataCartAssistant._get_handle_string_for_datacart(\n dict_of_drs_ids_and_pids,\n self.__prefix\n )\n\n # Make and send message\n message = self.__make_message(cart_handle, dict_of_drs_ids_and_pids)\n self.__send_message_to_queue(message)\n\n # Return pid\n logdebug(LOGGER, 'Making a PID for a data cart full of datasets... done.')\n loginfo(LOGGER, 'Requesting to create PID for data cart (%s).', cart_handle)\n return cart_handle\n"
] |
class Connector(object):
'''
This class provides the main functionality for the ESGF PID
module.
Author: Merret Buurman (DKRZ), 2015-2016
'''
def __init__(self, **args):
'''
Create a connector object with the necessary config
to connect to a RabbitMQ messaging server and perform
PID creations/updates.
The arguments have to be passed as named parameters.
Please contact your ESGF index node or CDNOT for this
information.
Some of the arguments are needed for making connections
from this library to RabbitMQ or to solr. Other arguments
are only passed on to the consuming servlet inside the
messages.
:param handle_prefix: Mandatory. The handle prefix (as a
string) for the handles to be created/updates. This
has to match the handle prefix that the message queue
consuming servlet has write access to. In CMIP6, this
is "21.14100".
:param messaging_service_exchange_name: Mandatory. The
name of the messaging exchange that will forward the
messages to a specific queue.
:param messaging_service_credentials: Mandatory. List of
dictionaries with credentials for the RabbitMQ nodes.
Each needs to have the entries: "user", "password", "url".
They may have an integer "priority" too. If two nodes have
the same priority, the library chooses randomly between
them. They also may have a "vhost" (RabbitMQ virtual host),
a "port" and a boolean "ssl_enabled". Please refer to pika's
documentation
(http://pika.readthedocs.io/en/latest/modules/parameters.html).
Dictionaries for 'open nodes' do not need a password
to be provided. Open nodes are only used if no more
other nodes are available. Note: Open nodes are no longer
supported.
:param message_service_synchronous: Optional. Boolean to
define if the connection to RabbitMQ and the message
sending should work in synchronous mode. Defaults to
the value defined in defaults.py.
:param data_node: Mandatory/Optional.
(Mandatory for publications and unpublications,
ignored for any other usage of the library. No default.)
The data node (host name) at which (un)publication takes
place. This will be included in the handle records. Trailing
slashes are removed.
Used during publication and unpublication (modules
assistant.publish and assistant.unpublish):
* Publication: Used to construct the file data URL (together
with thredds service path and file publish path). Sent along
in rabbit message. Used for consistency check, if solr use
is enabled.
* Unpublication: Sent along in rabbit message.
:param thredds_service_path: Mandatory for publications,
ignored for any other usage of the library. No default.
The thredds service path where the files of a publication
reside. Will be combined with files' publish path and data
node to form the files' data access URLs.
:param solr_url: Optional. The URL of the solr to be uses by this
library for the dataset consistency check. No default. If not provided,
the check is not done.
Note: This is currently switched off for performance reasons.
:param solr_https_verify: Optional flag to indicate whether
requests to solr should verify the SSL certificate.
Please see documentation of requests library: http://docs.python-requests.org/en/master/user/advanced/
:param disable_insecure_request_warning: Optional flag (only for
use during testing). If True, warnings are not printed during
insecure SSL requests.
Important: This is not passed through to the solr module, so
that switching off the warnings is not possible. It can only
be passed directly to the solr module during tests.
:param solr_switched_off: Optional flag to tell if the solr module
should be switched off. In that case, no connections to solr
are made.
:param consumer_solr_url: Optional. URL of a solr instance that
is to be used by the consumer (e.g. for finding versions), *not*
by this library.
:param test_publication: Optional flag. If True, the
handles that are created are test handles
that will be overwritten by real publications. Also,
test publications cannot update real handles.
:returns: An instance of the connector, configured for one
data node, and for connection with a specific RabbitMQ node.
'''
LOGGER.debug(40*'-')
LOGGER.debug('Creating PID connector object ..')
self.__check_presence_of_args(args)
self.__check_rabbit_credentials_completeness(args)
self.__define_defaults_for_optional_args(args)
self.__store_some_args(args)
self.__throw_error_if_prefix_not_in_list()
esgfpid.utils.routingkeys.add_prefix_to_routing_keys(self.prefix)
self.__coupler = esgfpid.coupling.Coupler(**args)
loginfo(LOGGER, 'Created PID connector.')
def __check_presence_of_args(self, args):
mandatory_args = [
'messaging_service_credentials',
'messaging_service_exchange_name',
'handle_prefix'
]
optional_args = [
'data_node',
'thredds_service_path',
'test_publication',
'solr_url',
'solr_https_verify',
'disable_insecure_request_warning',
'solr_switched_off',
'consumer_solr_url',
'message_service_synchronous'
]
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
def __define_defaults_for_optional_args(self, args):
if 'data_node' not in args or args['data_node'] is None:
''' May be None, only needed for some operations.
If it is needed, its presence is checked later. '''
args['data_node'] = None
if 'thredds_service_path' not in args or args['thredds_service_path'] is None:
''' May be None, only needed for some operations.
If it is needed, its presence is checked later. '''
args['thredds_service_path'] = None
if 'test_publication' not in args or args['test_publication'] is None:
args['test_publication'] = False
if 'solr_url' not in args or args['solr_url'] is None:
args['solr_url'] = None
args['solr_switched_off'] = True
if 'solr_switched_off' not in args or args['solr_switched_off'] is None:
args['solr_switched_off'] = False
if 'solr_https_verify' not in args or args['solr_https_verify'] is None:
args['solr_https_verify'] = esgfpid.defaults.SOLR_HTTPS_VERIFY_DEFAULT
if 'disable_insecure_request_warning' not in args or args['disable_insecure_request_warning'] is None:
args['disable_insecure_request_warning'] = False
if 'message_service_synchronous' not in args or args['message_service_synchronous'] is None:
args['message_service_synchronous'] = not esgfpid.defaults.RABBIT_IS_ASYNCHRONOUS
if 'consumer_solr_url' not in args or args['consumer_solr_url'] is None:
args['consumer_solr_url'] = None
def __check_rabbit_credentials_completeness(self, args):
for credentials in args['messaging_service_credentials']:
if not isinstance(credentials, dict):
errmsg = 'Credentials for each RabbitMQ node should be a dictionary.'
raise esgfpid.exceptions.ArgumentError(errmsg)
# Mandatory:
self.__check_presence_and_type('url', credentials, basestring)
self.__check_presence_and_type('user', credentials, basestring)
self.__check_presence_and_type('password', credentials, basestring) # If you want open nodes to be enabled again, remove this!
# Optional:
self.__check_and_adapt_type_if_exists('password', credentials, basestring)
self.__check_and_adapt_type_if_exists('vhost', credentials, basestring)
self.__check_and_adapt_type_if_exists('port', credentials, int)
self.__check_and_adapt_type_if_exists('ssl_enabled', credentials, bool)
def __check_presence_and_type(self, attname, credentials, desiredtype):
self.__check_presence(attname, credentials)
self.__check_and_adapt_type_if_exists(attname, credentials, desiredtype)
def __check_presence(self, attname, credentials):
if attname not in credentials:
rabbitname_for_errmsg = '(not specified)'
if 'url' in credentials:
rabbitname_for_errmsg = credentials['url']
errmsg = 'Missing %s for messaging service "%s"!' % (attname, rabbitname_for_errmsg)
raise esgfpid.exceptions.ArgumentError(errmsg)
def __check_and_adapt_type_if_exists(self, attname, credentials, desiredtype):
if attname in credentials:
# Empty string to None:
if credentials[attname] == '':
credentials[attname] = None
# List to object:
if type(credentials[attname]) == type([]) and len(credentials[attname]) == 1:
credentials[attname] = credentials[attname][0]
# Don't check if None:
if credentials[attname] is None:
pass
# Check type:
elif not isinstance(credentials[attname], desiredtype):
# Try conversion:
try:
credentials[attname] = self.__try_conversion(credentials[attname], desiredtype)
except ValueError as e:
errmsg = ('Wrong type of messaging service %s (%s). Expected %s, got %s, conversion failed.' %
(attname, credentials[attname], desiredtype, type(credentials[attname])))
raise esgfpid.exceptions.ArgumentError(errmsg)
def __try_conversion(self, value, desiredtype):
if desiredtype == bool:
if isinstance(value, basestring):
if str.lower(value) == 'true':
return True
elif str.lower(value) == 'false':
return False
raise ValueError()
if desiredtype == basestring:
#return str(value)
raise ValueError('Not transforming booleans')
if desiredtype == int:
return int(value)
else:
return desiredtype(value)
'''
These are not (only) needed during initialisation, but
(also) later on.
'''
def __store_some_args(self, args):
self.prefix = args['handle_prefix']
self.__thredds_service_path = args['thredds_service_path']
self.__data_node = args['data_node'] # may be None, only needed for some assistants.
self.__consumer_solr_url = args['consumer_solr_url'] # may be None
def __throw_error_if_prefix_not_in_list(self):
if self.prefix is None:
raise esgfpid.exceptions.ArgumentError('Missing handle prefix!')
if self.prefix not in esgfpid.defaults.ACCEPTED_PREFIXES:
raise esgfpid.exceptions.ArgumentError('The prefix "%s" is not a valid prefix! Please check your config. Accepted prefixes: %s'
% (self.prefix, ', '.join(esgfpid.defaults.ACCEPTED_PREFIXES)))
def create_publication_assistant(self, **args):
'''
Create an assistant for a dataset that allows to make PID
requests for the dataset and all of its files.
:param drs_id: Mandatory. The dataset id of the dataset
to be published.
:param version_number: Mandatory. The version number of the
dataset to be published.
:param is_replica: Mandatory. Flag to indicate whether the
dataset is a replica.
.. note:: If the replica flag is set to False, the publication
may still be considered a replica by the consuming servlet,
namely if the dataset was already published at a different
host. For this, please refer to the consumer documentation.
:return: A publication assistant which provides all necessary
methods to publish a dataset and its files.
'''
# Check args
logdebug(LOGGER, 'Creating publication assistant..')
mandatory_args = ['drs_id', 'version_number', 'is_replica']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
# Check if service path is given
if self.__thredds_service_path is None:
msg = 'No thredds_service_path given (but it is mandatory for publication)'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.ArgumentError(msg)
# Check if data node is given
if self.__data_node is None:
msg = 'No data_node given (but it is mandatory for publication)'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.ArgumentError(msg)
# Check if solr has access:
if self.__coupler.is_solr_switched_off():
pass # solr access not mandatory anymore
# Create publication assistant
assistant = esgfpid.assistant.publish.DatasetPublicationAssistant(
drs_id=args['drs_id'],
version_number=args['version_number'],
thredds_service_path=self.__thredds_service_path,
data_node=self.__data_node,
prefix=self.prefix,
coupler=self.__coupler,
is_replica=args['is_replica'],
consumer_solr_url=self.__consumer_solr_url # may be None
)
logdebug(LOGGER, 'Creating publication assistant.. done')
return assistant
'''
Please see documentation of solr module (:func:`~check.check_pid_queue_availability`).
'''
def check_pid_queue_availability(self, **args):
rabbit_checker = esgfpid.check.RabbitChecker(connector = self, prefix = self.prefix, **args)
return rabbit_checker.check_and_inform()
def unpublish_one_version(self, **args):
'''
Sends a PID update request for the unpublication of one version
of a dataset currently published at the given data node.
Either the handle or the pair of drs_id and version_number
have to be provided, otherwise an exception will occur.
The consumer will of course check the PID request message's
timestamp with the timestamp of the last publication, so that
republications in the mean time are not unpublished.
The unpublication of the files is included in this method.
:param handle: Optional. The handle of the dataset
to be unpublished.
:param drs_id: Optional. The dataset id of the dataset
to be unpublished.
:param version_number: Optional. The version number of
the dataset to be unpublished.
:raises: ArgumentError: If not enough arguments are passed
to identify the dataset, or if no data node was specified
during library init.
'''
# Check args
optional_args = ['handle', 'drs_id', 'version_number']
esgfpid.utils.add_missing_optional_args_with_value_none(args, optional_args)
# Check if data node is given
if self.__data_node is None:
msg = 'No data_node given (but it is mandatory for unpublication)'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.ArgumentError(msg)
# Unpublish
assistant = esgfpid.assistant.unpublish.AssistantOneVersion(
drs_id = args['drs_id'],
data_node = self.__data_node,
prefix=self.prefix,
coupler=self.__coupler,
message_timestamp=esgfpid.utils.get_now_utc_as_formatted_string()
)
assistant.unpublish_one_dataset_version(
handle = args['handle'],
version_number = args['version_number']
)
def unpublish_all_versions(self, **args):
'''
Sends a PID update request for the unpublication of all versions
of a dataset currently published at the given data node.
If the library has solr access, it will try to find all the
dataset versions and their handles from solr, and send individual
messages for each version. Otherwise, one message is sent, and the
queue consuming servlet has to identify the relevant versions,
also making sure not to unpublish any versions that may have been
republished in the meantime.
:param drs_id: Dataset id of the dataset to be unpublished.
:raises: ArgumentError: If the data node
was not provided at library initialization.
'''
# Check args
mandatory_args = ['drs_id']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
# Check if data node is given
if self.__data_node is None:
msg = 'No data_node given (but it is mandatory for publication)'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.ArgumentError(msg)
# Check if solr has access:
if self.__coupler.is_solr_switched_off():
msg = 'Unpublication of all versions. Without solr access, we cannot identify the versions, so the consumer will have to take care of this.'
logdebug(LOGGER, msg)
#raise esgfpid.exceptions.ArgumentError('No solr access. Solr access is needed for publication. Please provide access to a solr index when initializing the library')
# Unpublish
assistant = esgfpid.assistant.unpublish.AssistantAllVersions(
drs_id = args['drs_id'],
data_node = self.__data_node,
prefix=self.prefix,
coupler=self.__coupler,
message_timestamp=esgfpid.utils.get_now_utc_as_formatted_string(),
consumer_solr_url = self.__consumer_solr_url # may be None
)
assistant.unpublish_all_dataset_versions()
def add_errata_ids(self, **args):
'''
Add errata ids to a dataset handle record.
To call this method, you do not need to provide the
PID of the dataset. Instead, the PID string is derived
from the dataset id and the version number.
:param errata_ids: Mandatory. A list of errata ids (strings)
to be added to the handle record.
:param drs_id: Mandatory. The dataset id of the dataset
to whose handle record the errata ids are to be
added. (This is needed because the handle is found
by making a hash over dataset id and version number).
:param version_number: Mandatory. The version number of the
dataset to whose handle record the errata ids are to be
added. (This is needed because the handle is found by
making a hash over dataset id and version number).
'''
# Check args:
mandatory_args = ['drs_id', 'version_number', 'errata_ids']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
esgfpid.utils.check_noneness_of_mandatory_args(args, mandatory_args)
# Perform metadata update
assistant = esgfpid.assistant.errata.ErrataAssistant(
coupler=self.__coupler,
prefix=self.prefix
)
assistant.add_errata_ids(
drs_id=args['drs_id'],
version_number=args['version_number'],
errata_ids=args['errata_ids']
)
def remove_errata_ids(self, **args):
'''
Remove errata ids from a dataset handle record.
To call this method, you do not need to provide the
PID of the dataset. Instead, the PID string is derived
from the dataset id and the version number.
:param errata_ids: Mandatory. A list of errata ids (strings) to
be removed from the handle record.
:param drs_id: Mandatory. The dataset id of the dataset
from whose handle record the errata ids are to be
removed. (This is needed because the handle is found
by making a hash over dataset id and version number).
:param version_number: Mandatory. The version number of the
dataset from whose handle record the errata ids are to be
removed. (This is needed because the handle is found by
making a hash over dataset id and version number).
'''
# Check args:
mandatory_args = ['drs_id', 'version_number', 'errata_ids']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
esgfpid.utils.check_noneness_of_mandatory_args(args, mandatory_args)
# Perform metadata update
assistant = esgfpid.assistant.errata.ErrataAssistant(
coupler=self.__coupler,
prefix=self.prefix
)
assistant.remove_errata_ids(
drs_id=args['drs_id'],
version_number=args['version_number'],
errata_ids=args['errata_ids']
)
def start_messaging_thread(self):
'''
Start the parallel thread that takes care of the asynchronous
communication with RabbitMQ.
If PID creation/update requests are attempted before
this was called, an exception will be raised.
Preferably call this method as early as possible, so that
the module has some time to build the connection before
the first PID requests are made.
(If PID requests are made before the connection is ready,
they will not be lost, but pile up and sent once the connection
is ready).
.. important:: Please do not forget to finish the thread at the end,
using :meth:`~esgfpid.connector.Connector.finish_messaging_thread`
or :meth:`~esgfpid.connector.Connector.force_finish_messaging_thread`.
'''
self.__coupler.start_rabbit_connection()
def finish_messaging_thread(self):
'''
Finish and join the parallel thread that takes care of
the asynchronous communication with RabbitMQ.
If some messages are still in the stack to be sent,
or if some messages were not confirmed yet, this method
blocks and waits for some time while it iteratively
checks for message confirmation.
Currently, it waits up to 5 seconds: It checks up to
11 times, waiting 0.5 seconds in between - these
values can be configured in the defaults module).
'''
self.__coupler.finish_rabbit_connection()
def force_finish_messaging_thread(self):
'''
Finish and join the parallel thread that takes care of
the asynchronous communication with RabbitMQ.
This method does not wait for any pending messages.
Messages that are not sent yet are lost. Messages that
are not confirmed yet are probably not lost, but their
receival is not guaranteed.
Note:
The rabbit module keeps a copy of all unsent and
unconfirmed messages, so they could be resent in
a later connection. It would also be easy to expose
a method for the library caller to retrieve those
messages, e.g. to write them into some file.
'''
self.__coupler.force_finish_rabbit_connection()
def make_handle_from_drsid_and_versionnumber(self, **args):
'''
Create a handle string for a specific dataset, based
on its dataset id and version number, and the prefix
passed to the library at initializing.
:param drs_id: The dataset id of the dataset.
:param version_number: The version number of the dataset
(as a string or integer, this does not matter)
:return: A handle string (e.g. "hdl:21.14100/abcxyzfoo")
'''
args['prefix'] = self.prefix
return esgfpid.utils.make_handle_from_drsid_and_versionnumber(**args)
|
IS-ENES-Data/esgf-pid
|
esgfpid/connector.py
|
Connector.make_handle_from_drsid_and_versionnumber
|
python
|
def make_handle_from_drsid_and_versionnumber(self, **args):
'''
Create a handle string for a specific dataset, based
on its dataset id and version number, and the prefix
passed to the library at initializing.
:param drs_id: The dataset id of the dataset.
:param version_number: The version number of the dataset
(as a string or integer, this does not matter)
:return: A handle string (e.g. "hdl:21.14100/abcxyzfoo")
'''
args['prefix'] = self.prefix
return esgfpid.utils.make_handle_from_drsid_and_versionnumber(**args)
|
Create a handle string for a specific dataset, based
on its dataset id and version number, and the prefix
passed to the library at initializing.
:param drs_id: The dataset id of the dataset.
:param version_number: The version number of the dataset
(as a string or integer, this does not matter)
:return: A handle string (e.g. "hdl:21.14100/abcxyzfoo")
|
train
|
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/connector.py#L605-L617
|
[
"def make_handle_from_drsid_and_versionnumber(**args):\n check_presence_of_mandatory_args(args, ['drs_id','version_number','prefix'])\n suffix = make_suffix_from_drsid_and_versionnumber(drs_id=args['drs_id'], version_number=args['version_number'])\n return _suffix_to_handle(args['prefix'], suffix)\n"
] |
class Connector(object):
'''
This class provides the main functionality for the ESGF PID
module.
Author: Merret Buurman (DKRZ), 2015-2016
'''
def __init__(self, **args):
'''
Create a connector object with the necessary config
to connect to a RabbitMQ messaging server and perform
PID creations/updates.
The arguments have to be passed as named parameters.
Please contact your ESGF index node or CDNOT for this
information.
Some of the arguments are needed for making connections
from this library to RabbitMQ or to solr. Other arguments
are only passed on to the consuming servlet inside the
messages.
:param handle_prefix: Mandatory. The handle prefix (as a
string) for the handles to be created/updates. This
has to match the handle prefix that the message queue
consuming servlet has write access to. In CMIP6, this
is "21.14100".
:param messaging_service_exchange_name: Mandatory. The
name of the messaging exchange that will forward the
messages to a specific queue.
:param messaging_service_credentials: Mandatory. List of
dictionaries with credentials for the RabbitMQ nodes.
Each needs to have the entries: "user", "password", "url".
They may have an integer "priority" too. If two nodes have
the same priority, the library chooses randomly between
them. They also may have a "vhost" (RabbitMQ virtual host),
a "port" and a boolean "ssl_enabled". Please refer to pika's
documentation
(http://pika.readthedocs.io/en/latest/modules/parameters.html).
Dictionaries for 'open nodes' do not need a password
to be provided. Open nodes are only used if no more
other nodes are available. Note: Open nodes are no longer
supported.
:param message_service_synchronous: Optional. Boolean to
define if the connection to RabbitMQ and the message
sending should work in synchronous mode. Defaults to
the value defined in defaults.py.
:param data_node: Mandatory/Optional.
(Mandatory for publications and unpublications,
ignored for any other usage of the library. No default.)
The data node (host name) at which (un)publication takes
place. This will be included in the handle records. Trailing
slashes are removed.
Used during publication and unpublication (modules
assistant.publish and assistant.unpublish):
* Publication: Used to construct the file data URL (together
with thredds service path and file publish path). Sent along
in rabbit message. Used for consistency check, if solr use
is enabled.
* Unpublication: Sent along in rabbit message.
:param thredds_service_path: Mandatory for publications,
ignored for any other usage of the library. No default.
The thredds service path where the files of a publication
reside. Will be combined with files' publish path and data
node to form the files' data access URLs.
:param solr_url: Optional. The URL of the solr to be uses by this
library for the dataset consistency check. No default. If not provided,
the check is not done.
Note: This is currently switched off for performance reasons.
:param solr_https_verify: Optional flag to indicate whether
requests to solr should verify the SSL certificate.
Please see documentation of requests library: http://docs.python-requests.org/en/master/user/advanced/
:param disable_insecure_request_warning: Optional flag (only for
use during testing). If True, warnings are not printed during
insecure SSL requests.
Important: This is not passed through to the solr module, so
that switching off the warnings is not possible. It can only
be passed directly to the solr module during tests.
:param solr_switched_off: Optional flag to tell if the solr module
should be switched off. In that case, no connections to solr
are made.
:param consumer_solr_url: Optional. URL of a solr instance that
is to be used by the consumer (e.g. for finding versions), *not*
by this library.
:param test_publication: Optional flag. If True, the
handles that are created are test handles
that will be overwritten by real publications. Also,
test publications cannot update real handles.
:returns: An instance of the connector, configured for one
data node, and for connection with a specific RabbitMQ node.
'''
LOGGER.debug(40*'-')
LOGGER.debug('Creating PID connector object ..')
self.__check_presence_of_args(args)
self.__check_rabbit_credentials_completeness(args)
self.__define_defaults_for_optional_args(args)
self.__store_some_args(args)
self.__throw_error_if_prefix_not_in_list()
esgfpid.utils.routingkeys.add_prefix_to_routing_keys(self.prefix)
self.__coupler = esgfpid.coupling.Coupler(**args)
loginfo(LOGGER, 'Created PID connector.')
def __check_presence_of_args(self, args):
mandatory_args = [
'messaging_service_credentials',
'messaging_service_exchange_name',
'handle_prefix'
]
optional_args = [
'data_node',
'thredds_service_path',
'test_publication',
'solr_url',
'solr_https_verify',
'disable_insecure_request_warning',
'solr_switched_off',
'consumer_solr_url',
'message_service_synchronous'
]
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
def __define_defaults_for_optional_args(self, args):
if 'data_node' not in args or args['data_node'] is None:
''' May be None, only needed for some operations.
If it is needed, its presence is checked later. '''
args['data_node'] = None
if 'thredds_service_path' not in args or args['thredds_service_path'] is None:
''' May be None, only needed for some operations.
If it is needed, its presence is checked later. '''
args['thredds_service_path'] = None
if 'test_publication' not in args or args['test_publication'] is None:
args['test_publication'] = False
if 'solr_url' not in args or args['solr_url'] is None:
args['solr_url'] = None
args['solr_switched_off'] = True
if 'solr_switched_off' not in args or args['solr_switched_off'] is None:
args['solr_switched_off'] = False
if 'solr_https_verify' not in args or args['solr_https_verify'] is None:
args['solr_https_verify'] = esgfpid.defaults.SOLR_HTTPS_VERIFY_DEFAULT
if 'disable_insecure_request_warning' not in args or args['disable_insecure_request_warning'] is None:
args['disable_insecure_request_warning'] = False
if 'message_service_synchronous' not in args or args['message_service_synchronous'] is None:
args['message_service_synchronous'] = not esgfpid.defaults.RABBIT_IS_ASYNCHRONOUS
if 'consumer_solr_url' not in args or args['consumer_solr_url'] is None:
args['consumer_solr_url'] = None
def __check_rabbit_credentials_completeness(self, args):
for credentials in args['messaging_service_credentials']:
if not isinstance(credentials, dict):
errmsg = 'Credentials for each RabbitMQ node should be a dictionary.'
raise esgfpid.exceptions.ArgumentError(errmsg)
# Mandatory:
self.__check_presence_and_type('url', credentials, basestring)
self.__check_presence_and_type('user', credentials, basestring)
self.__check_presence_and_type('password', credentials, basestring) # If you want open nodes to be enabled again, remove this!
# Optional:
self.__check_and_adapt_type_if_exists('password', credentials, basestring)
self.__check_and_adapt_type_if_exists('vhost', credentials, basestring)
self.__check_and_adapt_type_if_exists('port', credentials, int)
self.__check_and_adapt_type_if_exists('ssl_enabled', credentials, bool)
def __check_presence_and_type(self, attname, credentials, desiredtype):
self.__check_presence(attname, credentials)
self.__check_and_adapt_type_if_exists(attname, credentials, desiredtype)
def __check_presence(self, attname, credentials):
if attname not in credentials:
rabbitname_for_errmsg = '(not specified)'
if 'url' in credentials:
rabbitname_for_errmsg = credentials['url']
errmsg = 'Missing %s for messaging service "%s"!' % (attname, rabbitname_for_errmsg)
raise esgfpid.exceptions.ArgumentError(errmsg)
def __check_and_adapt_type_if_exists(self, attname, credentials, desiredtype):
if attname in credentials:
# Empty string to None:
if credentials[attname] == '':
credentials[attname] = None
# List to object:
if type(credentials[attname]) == type([]) and len(credentials[attname]) == 1:
credentials[attname] = credentials[attname][0]
# Don't check if None:
if credentials[attname] is None:
pass
# Check type:
elif not isinstance(credentials[attname], desiredtype):
# Try conversion:
try:
credentials[attname] = self.__try_conversion(credentials[attname], desiredtype)
except ValueError as e:
errmsg = ('Wrong type of messaging service %s (%s). Expected %s, got %s, conversion failed.' %
(attname, credentials[attname], desiredtype, type(credentials[attname])))
raise esgfpid.exceptions.ArgumentError(errmsg)
def __try_conversion(self, value, desiredtype):
if desiredtype == bool:
if isinstance(value, basestring):
if str.lower(value) == 'true':
return True
elif str.lower(value) == 'false':
return False
raise ValueError()
if desiredtype == basestring:
#return str(value)
raise ValueError('Not transforming booleans')
if desiredtype == int:
return int(value)
else:
return desiredtype(value)
'''
These are not (only) needed during initialisation, but
(also) later on.
'''
def __store_some_args(self, args):
self.prefix = args['handle_prefix']
self.__thredds_service_path = args['thredds_service_path']
self.__data_node = args['data_node'] # may be None, only needed for some assistants.
self.__consumer_solr_url = args['consumer_solr_url'] # may be None
def __throw_error_if_prefix_not_in_list(self):
if self.prefix is None:
raise esgfpid.exceptions.ArgumentError('Missing handle prefix!')
if self.prefix not in esgfpid.defaults.ACCEPTED_PREFIXES:
raise esgfpid.exceptions.ArgumentError('The prefix "%s" is not a valid prefix! Please check your config. Accepted prefixes: %s'
% (self.prefix, ', '.join(esgfpid.defaults.ACCEPTED_PREFIXES)))
def create_publication_assistant(self, **args):
'''
Create an assistant for a dataset that allows to make PID
requests for the dataset and all of its files.
:param drs_id: Mandatory. The dataset id of the dataset
to be published.
:param version_number: Mandatory. The version number of the
dataset to be published.
:param is_replica: Mandatory. Flag to indicate whether the
dataset is a replica.
.. note:: If the replica flag is set to False, the publication
may still be considered a replica by the consuming servlet,
namely if the dataset was already published at a different
host. For this, please refer to the consumer documentation.
:return: A publication assistant which provides all necessary
methods to publish a dataset and its files.
'''
# Check args
logdebug(LOGGER, 'Creating publication assistant..')
mandatory_args = ['drs_id', 'version_number', 'is_replica']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
# Check if service path is given
if self.__thredds_service_path is None:
msg = 'No thredds_service_path given (but it is mandatory for publication)'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.ArgumentError(msg)
# Check if data node is given
if self.__data_node is None:
msg = 'No data_node given (but it is mandatory for publication)'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.ArgumentError(msg)
# Check if solr has access:
if self.__coupler.is_solr_switched_off():
pass # solr access not mandatory anymore
# Create publication assistant
assistant = esgfpid.assistant.publish.DatasetPublicationAssistant(
drs_id=args['drs_id'],
version_number=args['version_number'],
thredds_service_path=self.__thredds_service_path,
data_node=self.__data_node,
prefix=self.prefix,
coupler=self.__coupler,
is_replica=args['is_replica'],
consumer_solr_url=self.__consumer_solr_url # may be None
)
logdebug(LOGGER, 'Creating publication assistant.. done')
return assistant
'''
Please see documentation of solr module (:func:`~check.check_pid_queue_availability`).
'''
def check_pid_queue_availability(self, **args):
rabbit_checker = esgfpid.check.RabbitChecker(connector = self, prefix = self.prefix, **args)
return rabbit_checker.check_and_inform()
def unpublish_one_version(self, **args):
'''
Sends a PID update request for the unpublication of one version
of a dataset currently published at the given data node.
Either the handle or the pair of drs_id and version_number
have to be provided, otherwise an exception will occur.
The consumer will of course check the PID request message's
timestamp with the timestamp of the last publication, so that
republications in the mean time are not unpublished.
The unpublication of the files is included in this method.
:param handle: Optional. The handle of the dataset
to be unpublished.
:param drs_id: Optional. The dataset id of the dataset
to be unpublished.
:param version_number: Optional. The version number of
the dataset to be unpublished.
:raises: ArgumentError: If not enough arguments are passed
to identify the dataset, or if no data node was specified
during library init.
'''
# Check args
optional_args = ['handle', 'drs_id', 'version_number']
esgfpid.utils.add_missing_optional_args_with_value_none(args, optional_args)
# Check if data node is given
if self.__data_node is None:
msg = 'No data_node given (but it is mandatory for unpublication)'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.ArgumentError(msg)
# Unpublish
assistant = esgfpid.assistant.unpublish.AssistantOneVersion(
drs_id = args['drs_id'],
data_node = self.__data_node,
prefix=self.prefix,
coupler=self.__coupler,
message_timestamp=esgfpid.utils.get_now_utc_as_formatted_string()
)
assistant.unpublish_one_dataset_version(
handle = args['handle'],
version_number = args['version_number']
)
def unpublish_all_versions(self, **args):
'''
Sends a PID update request for the unpublication of all versions
of a dataset currently published at the given data node.
If the library has solr access, it will try to find all the
dataset versions and their handles from solr, and send individual
messages for each version. Otherwise, one message is sent, and the
queue consuming servlet has to identify the relevant versions,
also making sure not to unpublish any versions that may have been
republished in the meantime.
:param drs_id: Dataset id of the dataset to be unpublished.
:raises: ArgumentError: If the data node
was not provided at library initialization.
'''
# Check args
mandatory_args = ['drs_id']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
# Check if data node is given
if self.__data_node is None:
msg = 'No data_node given (but it is mandatory for publication)'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.ArgumentError(msg)
# Check if solr has access:
if self.__coupler.is_solr_switched_off():
msg = 'Unpublication of all versions. Without solr access, we cannot identify the versions, so the consumer will have to take care of this.'
logdebug(LOGGER, msg)
#raise esgfpid.exceptions.ArgumentError('No solr access. Solr access is needed for publication. Please provide access to a solr index when initializing the library')
# Unpublish
assistant = esgfpid.assistant.unpublish.AssistantAllVersions(
drs_id = args['drs_id'],
data_node = self.__data_node,
prefix=self.prefix,
coupler=self.__coupler,
message_timestamp=esgfpid.utils.get_now_utc_as_formatted_string(),
consumer_solr_url = self.__consumer_solr_url # may be None
)
assistant.unpublish_all_dataset_versions()
def add_errata_ids(self, **args):
'''
Add errata ids to a dataset handle record.
To call this method, you do not need to provide the
PID of the dataset. Instead, the PID string is derived
from the dataset id and the version number.
:param errata_ids: Mandatory. A list of errata ids (strings)
to be added to the handle record.
:param drs_id: Mandatory. The dataset id of the dataset
to whose handle record the errata ids are to be
added. (This is needed because the handle is found
by making a hash over dataset id and version number).
:param version_number: Mandatory. The version number of the
dataset to whose handle record the errata ids are to be
added. (This is needed because the handle is found by
making a hash over dataset id and version number).
'''
# Check args:
mandatory_args = ['drs_id', 'version_number', 'errata_ids']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
esgfpid.utils.check_noneness_of_mandatory_args(args, mandatory_args)
# Perform metadata update
assistant = esgfpid.assistant.errata.ErrataAssistant(
coupler=self.__coupler,
prefix=self.prefix
)
assistant.add_errata_ids(
drs_id=args['drs_id'],
version_number=args['version_number'],
errata_ids=args['errata_ids']
)
def remove_errata_ids(self, **args):
'''
Remove errata ids from a dataset handle record.
To call this method, you do not need to provide the
PID of the dataset. Instead, the PID string is derived
from the dataset id and the version number.
:param errata_ids: Mandatory. A list of errata ids (strings) to
be removed from the handle record.
:param drs_id: Mandatory. The dataset id of the dataset
from whose handle record the errata ids are to be
removed. (This is needed because the handle is found
by making a hash over dataset id and version number).
:param version_number: Mandatory. The version number of the
dataset from whose handle record the errata ids are to be
removed. (This is needed because the handle is found by
making a hash over dataset id and version number).
'''
# Check args:
mandatory_args = ['drs_id', 'version_number', 'errata_ids']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
esgfpid.utils.check_noneness_of_mandatory_args(args, mandatory_args)
# Perform metadata update
assistant = esgfpid.assistant.errata.ErrataAssistant(
coupler=self.__coupler,
prefix=self.prefix
)
assistant.remove_errata_ids(
drs_id=args['drs_id'],
version_number=args['version_number'],
errata_ids=args['errata_ids']
)
def create_data_cart_pid(self, dict_of_drs_ids_and_pids):
'''
Create a handle record for a data cart (a custom set of datasets).
The handle string is made of the prefix passed to tbe library,
and a hash over all the dataset ids in the cart. This way, if exactly
the same set of datasets is passed several times, the same handle
record is created, instead of making a new one.
:param dict_of_drs_ids_and_pids: A dictionary of all dataset ids
and their pid strings. If a dataset has no (known) PID, use
"None".
:return: The handle string for this data cart.
'''
assistant = esgfpid.assistant.datacart.DataCartAssistant(
prefix=self.prefix,
coupler=self.__coupler
)
return assistant.make_data_cart_pid(dict_of_drs_ids_and_pids)
def start_messaging_thread(self):
'''
Start the parallel thread that takes care of the asynchronous
communication with RabbitMQ.
If PID creation/update requests are attempted before
this was called, an exception will be raised.
Preferably call this method as early as possible, so that
the module has some time to build the connection before
the first PID requests are made.
(If PID requests are made before the connection is ready,
they will not be lost, but pile up and sent once the connection
is ready).
.. important:: Please do not forget to finish the thread at the end,
using :meth:`~esgfpid.connector.Connector.finish_messaging_thread`
or :meth:`~esgfpid.connector.Connector.force_finish_messaging_thread`.
'''
self.__coupler.start_rabbit_connection()
def finish_messaging_thread(self):
'''
Finish and join the parallel thread that takes care of
the asynchronous communication with RabbitMQ.
If some messages are still in the stack to be sent,
or if some messages were not confirmed yet, this method
blocks and waits for some time while it iteratively
checks for message confirmation.
Currently, it waits up to 5 seconds: It checks up to
11 times, waiting 0.5 seconds in between - these
values can be configured in the defaults module).
'''
self.__coupler.finish_rabbit_connection()
def force_finish_messaging_thread(self):
'''
Finish and join the parallel thread that takes care of
the asynchronous communication with RabbitMQ.
This method does not wait for any pending messages.
Messages that are not sent yet are lost. Messages that
are not confirmed yet are probably not lost, but their
receival is not guaranteed.
Note:
The rabbit module keeps a copy of all unsent and
unconfirmed messages, so they could be resent in
a later connection. It would also be easy to expose
a method for the library caller to retrieve those
messages, e.g. to write them into some file.
'''
self.__coupler.force_finish_rabbit_connection()
|
IS-ENES-Data/esgf-pid
|
esgfpid/assistant/publish.py
|
DatasetPublicationAssistant.add_file
|
python
|
def add_file(self, **args):
'''
Adds a file's information to the set of files to be
published in this dataset.
:param file_name: Mandatory. The file name (string).
This information will simply be included in the
PID record, but not used for anything.
:param file_handle: Mandatory. The handle (PID) of
this file (string). It is included in the file's netcdf
header. It must bear the prefix that this library
(or rather, the consuming servlet that will consume
this library's requests), has write access to.
:param file_size: Mandatory. The file size (as string or
integer. Will be transformed to integer). This
information will be included in the handle record
and used for consistency checks during republications
of files with the same handle.
:param checksum: Mandatory. The file's checksum. This
information will be included in the handle record
and used for consistency checks during republications
of files with the same handle.
:param checksum_type: Mandatory. The checksum type/method
(string), e.g. "MD5" or "SHA256". This information will
be included in the handle record and used for consistency
checks during republications of files with the same handle.
:param publish_path: Mandatory. The THREDDS publish path as
a string. This is part of the URL for accessing the file,
which will be part of the handle record. It will not be
accessed, neither by the library nor by the consumer.
The URL consists of the dataset's "data_node", the dataset's
"thredds_service_path", and this "publish_path". Redundant
slashes are removed. If the URL does not start with "http",
"http://" is added.
:param file_version: Mandatory. Any string. File versions
are not managed in the PID. This information will simply be
included in the PID record, but not used for any reasoning.
'''
# Check if allowed:
self.__check_if_adding_files_allowed_right_now()
# Check if args ok:
mandatory_args = ['file_name', 'file_handle', 'file_size',
'checksum', 'publish_path', 'checksum_type',
'file_version']
utils.check_presence_of_mandatory_args(args, mandatory_args)
self.__enforce_integer_file_size(args)
self.__enforce_string_file_version(args)
# Add file:
self.__check_and_correct_handle_syntax(args)
self.__add_file(**args)
|
Adds a file's information to the set of files to be
published in this dataset.
:param file_name: Mandatory. The file name (string).
This information will simply be included in the
PID record, but not used for anything.
:param file_handle: Mandatory. The handle (PID) of
this file (string). It is included in the file's netcdf
header. It must bear the prefix that this library
(or rather, the consuming servlet that will consume
this library's requests), has write access to.
:param file_size: Mandatory. The file size (as string or
integer. Will be transformed to integer). This
information will be included in the handle record
and used for consistency checks during republications
of files with the same handle.
:param checksum: Mandatory. The file's checksum. This
information will be included in the handle record
and used for consistency checks during republications
of files with the same handle.
:param checksum_type: Mandatory. The checksum type/method
(string), e.g. "MD5" or "SHA256". This information will
be included in the handle record and used for consistency
checks during republications of files with the same handle.
:param publish_path: Mandatory. The THREDDS publish path as
a string. This is part of the URL for accessing the file,
which will be part of the handle record. It will not be
accessed, neither by the library nor by the consumer.
The URL consists of the dataset's "data_node", the dataset's
"thredds_service_path", and this "publish_path". Redundant
slashes are removed. If the URL does not start with "http",
"http://" is added.
:param file_version: Mandatory. Any string. File versions
are not managed in the PID. This information will simply be
included in the PID record, but not used for any reasoning.
|
train
|
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/assistant/publish.py#L119-L177
|
[
"def check_presence_of_mandatory_args(args, mandatory_args):\n missing_args = []\n for name in mandatory_args:\n if name not in args.keys():\n missing_args.append(name)\n if len(missing_args)>0:\n raise esgfpid.exceptions.ArgumentError('Missing mandatory arguments: '+', '.join(missing_args))\n else:\n return True\n",
"def __enforce_integer_file_size(self, args):\n try:\n args['file_size'] = int(args['file_size'])\n except ValueError:\n raise esgfpid.exceptions.ArgumentError('File size is not an integer')\n",
"def __enforce_string_file_version(self, args):\n args['file_version'] = str(args['file_version'])\n",
"def __check_and_correct_handle_syntax(self, args):\n self.__make_sure_hdl_is_added(args)\n self.__check_if_prefix_is_there(args['file_handle'])\n",
"def __add_file(self, **args):\n logdebug(LOGGER, 'Adding file \"%s\" with handle \"%s\".', args['file_name'], args['file_handle'])\n self.__add_file_to_datasets_children(args['file_handle'])\n self.__adapt_file_args(args)\n self.__create_and_store_file_publication_message(args) \n self.__set_machine_state_to_files_added()\n logtrace(LOGGER, 'Adding file done.')\n",
"def __check_if_adding_files_allowed_right_now(self):\n dataset_added = self.__machine_state == self.__machine_states['dataset_added']\n files_added = self.__machine_state == self.__machine_states['files_added']\n if dataset_added or files_added:\n pass\n else:\n msg = 'Too late to add files!'\n logwarn(LOGGER, msg)\n raise esgfpid.exceptions.OperationUnsupportedException(msg)\n"
] |
class DatasetPublicationAssistant(object):
def __init__(self, **args):
logdebug(LOGGER, 'Constructor for Publication assistant for dataset "%s", version "%s" at host "%s".',
args['drs_id'],
args['version_number'],
args['data_node']
)
# Check args
mandatory_args = ['drs_id', 'version_number', 'data_node', 'prefix',
'thredds_service_path', 'is_replica', 'coupler',
'consumer_solr_url']
optional_args = []
utils.check_presence_of_mandatory_args(args, mandatory_args)
utils.add_missing_optional_args_with_value_none(args, optional_args)
self.__enforce_integer_version_number(args)
self.__enforce_boolean_replica_flag(args)
# Init methods...
self.__store_args_in_attributes(args)
self.__define_other_attributes()
self.__create_and_store_dataset_handle()
self.__init_state_machine()
logdebug(LOGGER, 'Done: Constructor for Publication assistant for dataset "%s", version "%i" at host "%s".',
args['drs_id'],
args['version_number'],
args['data_node']
)
def __enforce_integer_version_number(self, args):
try:
args['version_number'] = int(args['version_number'])
except ValueError:
raise esgfpid.exceptions.ArgumentError('Dataset version number is not an integer')
def __enforce_boolean_replica_flag(self, args):
try:
args['is_replica'] = utils.get_boolean(args['is_replica'])
except ValueError:
msg = ('Replica flag "%s" could not be parsed to boolean. '
'Please pass a boolean or "True" or "true" or "False" or "false"'
% args['is_replica'])
raise esgfpid.exceptions.ArgumentError(msg)
def __enforce_integer_file_size(self, args):
try:
args['file_size'] = int(args['file_size'])
except ValueError:
raise esgfpid.exceptions.ArgumentError('File size is not an integer')
def __enforce_string_file_version(self, args):
args['file_version'] = str(args['file_version'])
def __define_other_attributes(self):
self.__dataset_handle = None
self.__list_of_file_handles = []
self.__list_of_file_messages = []
self.__message_timestamp = utils.get_now_utc_as_formatted_string()
def __store_args_in_attributes(self, args):
self.__drs_id = args['drs_id']
self.__version_number = args['version_number']
self.__data_node = args['data_node'].rstrip('/')
self.__prefix = args['prefix']
self.__thredds_service_path = args['thredds_service_path'].strip('/')
self.__is_replica = args['is_replica']
self.__coupler = args['coupler']
self.__consumer_solr_url = args['consumer_solr_url']
def __init_state_machine(self):
self.__machine_states = {'dataset_added':0, 'files_added':1, 'publication_finished':2}
self.__machine_state = self.__machine_states['dataset_added']
def __create_and_store_dataset_handle(self):
self.__dataset_handle = create_dataset_handle(
drs_id = self.__drs_id,
version_number = self.__version_number,
prefix = self.__prefix
)
def get_dataset_handle(self):
'''
This returns the handle string of the dataset to be
published, so that the publisher can use it for its
own purposes, e.g. publishing it on a website.
The handle string consists of the prefix specified
at library init, and a suffix. The suffix is created
by making a hash over dataset id and version number.
:return: The handle string of this dataset,
e.g.: "hdl:21.14100/foobar".
'''
return self.__dataset_handle
# work horses:
def __check_and_correct_handle_syntax(self, args):
self.__make_sure_hdl_is_added(args)
self.__check_if_prefix_is_there(args['file_handle'])
def __add_file(self, **args):
logdebug(LOGGER, 'Adding file "%s" with handle "%s".', args['file_name'], args['file_handle'])
self.__add_file_to_datasets_children(args['file_handle'])
self.__adapt_file_args(args)
self.__create_and_store_file_publication_message(args)
self.__set_machine_state_to_files_added()
logtrace(LOGGER, 'Adding file done.')
def __add_file_to_datasets_children(self, file_handle):
self.__list_of_file_handles.append(file_handle)
def __make_sure_hdl_is_added(self, args):
if not args['file_handle'].startswith('hdl:'):
args['file_handle'] = 'hdl:'+args['file_handle']
def __check_if_prefix_is_there(self, file_handle):
if not file_handle.startswith('hdl:'+self.__prefix+'/'):
expected = self.__prefix + '/'+ file_handle.lstrip('hdl:')
msg = ('\nThis file\'s tracking_id "%s" does not have the expected handle prefix "%s".'
'\nExpected "%s" or "%s"' % (file_handle, self.__prefix, expected, 'hdl:'+expected))
if '/' in file_handle:
maybe_prefix = file_handle.split('/')[0].lstrip('hdl:')
msg += ('.\nIf "%s" was meant to be the prefix, it does not correspond to '
'the prefix specified when initializing this library' % maybe_prefix)
raise esgfpid.exceptions.ESGFException(msg)
def __adapt_file_args(self, args):
url = self.__create_file_url(args['publish_path'])
args['data_url'] = url
del args['publish_path']
def __create_file_url(self, publish_path):
url = self.__data_node +'/'+ self.__thredds_service_path +'/'+ publish_path.strip('/')
if not url.startswith('http'):
url = 'http://'+url
return url
def __create_and_store_file_publication_message(self, args):
message = self.__create_file_publication_message(args)
self.__list_of_file_messages.append(message)
def __set_machine_state_to_files_added(self):
self.__machine_state = self.__machine_states['files_added']
def __check_if_adding_files_allowed_right_now(self):
dataset_added = self.__machine_state == self.__machine_states['dataset_added']
files_added = self.__machine_state == self.__machine_states['files_added']
if dataset_added or files_added:
pass
else:
msg = 'Too late to add files!'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.OperationUnsupportedException(msg)
def dataset_publication_finished(self, ignore_exception=False):
'''
This is the "commit". It triggers the creation/update of handles.
* Check if the set of files corresponds to the previously published set (if applicable, and if solr url given, and if solr replied)
* The dataset publication message is created and sent to the queue.
* All file publication messages are sent to the queue.
'''
self.__check_if_dataset_publication_allowed_right_now()
self.__check_data_consistency(ignore_exception)
self.__coupler.start_rabbit_business() # Synchronous: Opens connection. Asynchronous: Ignored.
self.__create_and_send_dataset_publication_message_to_queue()
self.__send_existing_file_messages_to_queue()
self.__coupler.done_with_rabbit_business() # Synchronous: Closes connection. Asynchronous: Ignored.
self.__set_machine_state_to_finished()
loginfo(LOGGER, 'Requesting to publish PID for dataset "%s" (version %s) and its files at "%s" (handle %s).', self.__drs_id, self.__version_number, self.__data_node, self.__dataset_handle)
def __check_if_dataset_publication_allowed_right_now(self):
if not self.__machine_state == self.__machine_states['files_added']:
msg = None
if self.__machine_state == self.__machine_states['dataset_added']:
msg = 'No file added yet.'
else:
msg = 'Publication was already done.'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.OperationUnsupportedException(msg)
def __check_data_consistency(self, ignore_exception):
checker = esgfpid.assistant.consistency.Checker(
coupler=self.__coupler,
drs_id=self.__drs_id,
version_number=self.__version_number,
data_node=self.__data_node
)
check_possible = checker.can_run_check()
if check_possible:
check_passed = checker.data_consistency_check(self.__list_of_file_handles)
if check_passed:
loginfo(LOGGER, 'Data consistency check passed for dataset %s.', self.__dataset_handle)
else:
msg = 'Dataset consistency check failed'
logwarn(LOGGER, msg)
if not ignore_exception:
raise esgfpid.exceptions.InconsistentFilesetException(msg)
else:
logdebug(LOGGER, 'No consistency check was carried out.')
def __create_and_send_dataset_publication_message_to_queue(self):
self.__remove_duplicates_from_list_of_file_handles()
message = self.__create_dataset_publication_message()
self.__send_message_to_queue(message)
logdebug(LOGGER, 'Dataset publication message handed to rabbit thread.')
logtrace(LOGGER, 'Dataset publication message: %s (%s, version %s).', self.__dataset_handle, self.__drs_id, self.__version_number)
def __remove_duplicates_from_list_of_file_handles(self):
self.__list_of_file_handles = list(set(self.__list_of_file_handles))
def __send_existing_file_messages_to_queue(self):
for i in xrange(0, len(self.__list_of_file_messages)):
self.__try_to_send_one_file_message(i)
msg = 'All file publication jobs handed to rabbit thread.'
logdebug(LOGGER, msg)
def __try_to_send_one_file_message(self, list_index):
msg = self.__list_of_file_messages[list_index]
success = self.__send_message_to_queue(msg)
logdebug(LOGGER, 'File publication message handed to rabbit thread: %s (%s)', msg['handle'], msg['file_name'])
return success
def __set_machine_state_to_finished(self):
self.__machine_state = self.__machine_states['publication_finished']
def __create_file_publication_message(self, args):
message = esgfpid.assistant.messages.publish_file(
file_handle=args['file_handle'],
file_size=args['file_size'],
file_name=args['file_name'],
checksum=args['checksum'],
data_url=args['data_url'],
data_node=self.__data_node,
parent_dataset=self.__dataset_handle,
checksum_type=args['checksum_type'],
file_version=args['file_version'],
is_replica=self.__is_replica,
timestamp=self.__message_timestamp,
)
return message
def __create_dataset_publication_message(self):
message = esgfpid.assistant.messages.publish_dataset(
dataset_handle=self.__dataset_handle,
is_replica=self.__is_replica,
drs_id=self.__drs_id,
version_number=self.__version_number,
list_of_files=self.__list_of_file_handles,
data_node=self.__data_node,
timestamp=self.__message_timestamp,
consumer_solr_url=self.__consumer_solr_url
)
return message
def __send_message_to_queue(self, message):
success = self.__coupler.send_message_to_queue(message)
return success
|
IS-ENES-Data/esgf-pid
|
esgfpid/assistant/publish.py
|
DatasetPublicationAssistant.dataset_publication_finished
|
python
|
def dataset_publication_finished(self, ignore_exception=False):
'''
This is the "commit". It triggers the creation/update of handles.
* Check if the set of files corresponds to the previously published set (if applicable, and if solr url given, and if solr replied)
* The dataset publication message is created and sent to the queue.
* All file publication messages are sent to the queue.
'''
self.__check_if_dataset_publication_allowed_right_now()
self.__check_data_consistency(ignore_exception)
self.__coupler.start_rabbit_business() # Synchronous: Opens connection. Asynchronous: Ignored.
self.__create_and_send_dataset_publication_message_to_queue()
self.__send_existing_file_messages_to_queue()
self.__coupler.done_with_rabbit_business() # Synchronous: Closes connection. Asynchronous: Ignored.
self.__set_machine_state_to_finished()
loginfo(LOGGER, 'Requesting to publish PID for dataset "%s" (version %s) and its files at "%s" (handle %s).', self.__drs_id, self.__version_number, self.__data_node, self.__dataset_handle)
|
This is the "commit". It triggers the creation/update of handles.
* Check if the set of files corresponds to the previously published set (if applicable, and if solr url given, and if solr replied)
* The dataset publication message is created and sent to the queue.
* All file publication messages are sent to the queue.
|
train
|
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/assistant/publish.py#L241-L257
|
[
"def loginfo(logger, msg, *args, **kwargs):\n '''\n Logs messages as INFO,\n unless esgfpid.defaults.LOG_INFO_TO_DEBUG,\n (then it logs messages as DEBUG).\n '''\n if esgfpid.defaults.LOG_INFO_TO_DEBUG:\n logger.debug(msg, *args, **kwargs)\n else:\n logger.info(msg, *args, **kwargs)\n",
"def __check_if_dataset_publication_allowed_right_now(self):\n if not self.__machine_state == self.__machine_states['files_added']:\n msg = None\n\n if self.__machine_state == self.__machine_states['dataset_added']:\n msg = 'No file added yet.'\n else:\n msg = 'Publication was already done.'\n\n logwarn(LOGGER, msg)\n raise esgfpid.exceptions.OperationUnsupportedException(msg)\n",
"def __check_data_consistency(self, ignore_exception):\n checker = esgfpid.assistant.consistency.Checker(\n coupler=self.__coupler,\n drs_id=self.__drs_id,\n version_number=self.__version_number,\n data_node=self.__data_node\n )\n check_possible = checker.can_run_check()\n if check_possible:\n check_passed = checker.data_consistency_check(self.__list_of_file_handles)\n if check_passed:\n loginfo(LOGGER, 'Data consistency check passed for dataset %s.', self.__dataset_handle)\n else:\n msg = 'Dataset consistency check failed'\n logwarn(LOGGER, msg)\n if not ignore_exception:\n raise esgfpid.exceptions.InconsistentFilesetException(msg)\n else:\n logdebug(LOGGER, 'No consistency check was carried out.')\n",
"def __create_and_send_dataset_publication_message_to_queue(self):\n self.__remove_duplicates_from_list_of_file_handles()\n message = self.__create_dataset_publication_message()\n self.__send_message_to_queue(message)\n logdebug(LOGGER, 'Dataset publication message handed to rabbit thread.')\n logtrace(LOGGER, 'Dataset publication message: %s (%s, version %s).', self.__dataset_handle, self.__drs_id, self.__version_number)\n",
"def __send_existing_file_messages_to_queue(self):\n for i in xrange(0, len(self.__list_of_file_messages)):\n self.__try_to_send_one_file_message(i)\n msg = 'All file publication jobs handed to rabbit thread.'\n logdebug(LOGGER, msg)\n",
"def __set_machine_state_to_finished(self):\n self.__machine_state = self.__machine_states['publication_finished']\n"
] |
class DatasetPublicationAssistant(object):
def __init__(self, **args):
logdebug(LOGGER, 'Constructor for Publication assistant for dataset "%s", version "%s" at host "%s".',
args['drs_id'],
args['version_number'],
args['data_node']
)
# Check args
mandatory_args = ['drs_id', 'version_number', 'data_node', 'prefix',
'thredds_service_path', 'is_replica', 'coupler',
'consumer_solr_url']
optional_args = []
utils.check_presence_of_mandatory_args(args, mandatory_args)
utils.add_missing_optional_args_with_value_none(args, optional_args)
self.__enforce_integer_version_number(args)
self.__enforce_boolean_replica_flag(args)
# Init methods...
self.__store_args_in_attributes(args)
self.__define_other_attributes()
self.__create_and_store_dataset_handle()
self.__init_state_machine()
logdebug(LOGGER, 'Done: Constructor for Publication assistant for dataset "%s", version "%i" at host "%s".',
args['drs_id'],
args['version_number'],
args['data_node']
)
def __enforce_integer_version_number(self, args):
try:
args['version_number'] = int(args['version_number'])
except ValueError:
raise esgfpid.exceptions.ArgumentError('Dataset version number is not an integer')
def __enforce_boolean_replica_flag(self, args):
try:
args['is_replica'] = utils.get_boolean(args['is_replica'])
except ValueError:
msg = ('Replica flag "%s" could not be parsed to boolean. '
'Please pass a boolean or "True" or "true" or "False" or "false"'
% args['is_replica'])
raise esgfpid.exceptions.ArgumentError(msg)
def __enforce_integer_file_size(self, args):
try:
args['file_size'] = int(args['file_size'])
except ValueError:
raise esgfpid.exceptions.ArgumentError('File size is not an integer')
def __enforce_string_file_version(self, args):
args['file_version'] = str(args['file_version'])
def __define_other_attributes(self):
self.__dataset_handle = None
self.__list_of_file_handles = []
self.__list_of_file_messages = []
self.__message_timestamp = utils.get_now_utc_as_formatted_string()
def __store_args_in_attributes(self, args):
self.__drs_id = args['drs_id']
self.__version_number = args['version_number']
self.__data_node = args['data_node'].rstrip('/')
self.__prefix = args['prefix']
self.__thredds_service_path = args['thredds_service_path'].strip('/')
self.__is_replica = args['is_replica']
self.__coupler = args['coupler']
self.__consumer_solr_url = args['consumer_solr_url']
def __init_state_machine(self):
self.__machine_states = {'dataset_added':0, 'files_added':1, 'publication_finished':2}
self.__machine_state = self.__machine_states['dataset_added']
def __create_and_store_dataset_handle(self):
self.__dataset_handle = create_dataset_handle(
drs_id = self.__drs_id,
version_number = self.__version_number,
prefix = self.__prefix
)
def get_dataset_handle(self):
'''
This returns the handle string of the dataset to be
published, so that the publisher can use it for its
own purposes, e.g. publishing it on a website.
The handle string consists of the prefix specified
at library init, and a suffix. The suffix is created
by making a hash over dataset id and version number.
:return: The handle string of this dataset,
e.g.: "hdl:21.14100/foobar".
'''
return self.__dataset_handle
# work horses:
def add_file(self, **args):
'''
Adds a file's information to the set of files to be
published in this dataset.
:param file_name: Mandatory. The file name (string).
This information will simply be included in the
PID record, but not used for anything.
:param file_handle: Mandatory. The handle (PID) of
this file (string). It is included in the file's netcdf
header. It must bear the prefix that this library
(or rather, the consuming servlet that will consume
this library's requests), has write access to.
:param file_size: Mandatory. The file size (as string or
integer. Will be transformed to integer). This
information will be included in the handle record
and used for consistency checks during republications
of files with the same handle.
:param checksum: Mandatory. The file's checksum. This
information will be included in the handle record
and used for consistency checks during republications
of files with the same handle.
:param checksum_type: Mandatory. The checksum type/method
(string), e.g. "MD5" or "SHA256". This information will
be included in the handle record and used for consistency
checks during republications of files with the same handle.
:param publish_path: Mandatory. The THREDDS publish path as
a string. This is part of the URL for accessing the file,
which will be part of the handle record. It will not be
accessed, neither by the library nor by the consumer.
The URL consists of the dataset's "data_node", the dataset's
"thredds_service_path", and this "publish_path". Redundant
slashes are removed. If the URL does not start with "http",
"http://" is added.
:param file_version: Mandatory. Any string. File versions
are not managed in the PID. This information will simply be
included in the PID record, but not used for any reasoning.
'''
# Check if allowed:
self.__check_if_adding_files_allowed_right_now()
# Check if args ok:
mandatory_args = ['file_name', 'file_handle', 'file_size',
'checksum', 'publish_path', 'checksum_type',
'file_version']
utils.check_presence_of_mandatory_args(args, mandatory_args)
self.__enforce_integer_file_size(args)
self.__enforce_string_file_version(args)
# Add file:
self.__check_and_correct_handle_syntax(args)
self.__add_file(**args)
def __check_and_correct_handle_syntax(self, args):
self.__make_sure_hdl_is_added(args)
self.__check_if_prefix_is_there(args['file_handle'])
def __add_file(self, **args):
logdebug(LOGGER, 'Adding file "%s" with handle "%s".', args['file_name'], args['file_handle'])
self.__add_file_to_datasets_children(args['file_handle'])
self.__adapt_file_args(args)
self.__create_and_store_file_publication_message(args)
self.__set_machine_state_to_files_added()
logtrace(LOGGER, 'Adding file done.')
def __add_file_to_datasets_children(self, file_handle):
self.__list_of_file_handles.append(file_handle)
def __make_sure_hdl_is_added(self, args):
if not args['file_handle'].startswith('hdl:'):
args['file_handle'] = 'hdl:'+args['file_handle']
def __check_if_prefix_is_there(self, file_handle):
if not file_handle.startswith('hdl:'+self.__prefix+'/'):
expected = self.__prefix + '/'+ file_handle.lstrip('hdl:')
msg = ('\nThis file\'s tracking_id "%s" does not have the expected handle prefix "%s".'
'\nExpected "%s" or "%s"' % (file_handle, self.__prefix, expected, 'hdl:'+expected))
if '/' in file_handle:
maybe_prefix = file_handle.split('/')[0].lstrip('hdl:')
msg += ('.\nIf "%s" was meant to be the prefix, it does not correspond to '
'the prefix specified when initializing this library' % maybe_prefix)
raise esgfpid.exceptions.ESGFException(msg)
def __adapt_file_args(self, args):
url = self.__create_file_url(args['publish_path'])
args['data_url'] = url
del args['publish_path']
def __create_file_url(self, publish_path):
url = self.__data_node +'/'+ self.__thredds_service_path +'/'+ publish_path.strip('/')
if not url.startswith('http'):
url = 'http://'+url
return url
def __create_and_store_file_publication_message(self, args):
message = self.__create_file_publication_message(args)
self.__list_of_file_messages.append(message)
def __set_machine_state_to_files_added(self):
self.__machine_state = self.__machine_states['files_added']
def __check_if_adding_files_allowed_right_now(self):
dataset_added = self.__machine_state == self.__machine_states['dataset_added']
files_added = self.__machine_state == self.__machine_states['files_added']
if dataset_added or files_added:
pass
else:
msg = 'Too late to add files!'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.OperationUnsupportedException(msg)
def dataset_publication_finished(self, ignore_exception=False):
'''
This is the "commit". It triggers the creation/update of handles.
* Check if the set of files corresponds to the previously published set (if applicable, and if solr url given, and if solr replied)
* The dataset publication message is created and sent to the queue.
* All file publication messages are sent to the queue.
'''
self.__check_if_dataset_publication_allowed_right_now()
self.__check_data_consistency(ignore_exception)
self.__coupler.start_rabbit_business() # Synchronous: Opens connection. Asynchronous: Ignored.
self.__create_and_send_dataset_publication_message_to_queue()
self.__send_existing_file_messages_to_queue()
self.__coupler.done_with_rabbit_business() # Synchronous: Closes connection. Asynchronous: Ignored.
self.__set_machine_state_to_finished()
loginfo(LOGGER, 'Requesting to publish PID for dataset "%s" (version %s) and its files at "%s" (handle %s).', self.__drs_id, self.__version_number, self.__data_node, self.__dataset_handle)
def __check_if_dataset_publication_allowed_right_now(self):
if not self.__machine_state == self.__machine_states['files_added']:
msg = None
if self.__machine_state == self.__machine_states['dataset_added']:
msg = 'No file added yet.'
else:
msg = 'Publication was already done.'
logwarn(LOGGER, msg)
raise esgfpid.exceptions.OperationUnsupportedException(msg)
def __check_data_consistency(self, ignore_exception):
checker = esgfpid.assistant.consistency.Checker(
coupler=self.__coupler,
drs_id=self.__drs_id,
version_number=self.__version_number,
data_node=self.__data_node
)
check_possible = checker.can_run_check()
if check_possible:
check_passed = checker.data_consistency_check(self.__list_of_file_handles)
if check_passed:
loginfo(LOGGER, 'Data consistency check passed for dataset %s.', self.__dataset_handle)
else:
msg = 'Dataset consistency check failed'
logwarn(LOGGER, msg)
if not ignore_exception:
raise esgfpid.exceptions.InconsistentFilesetException(msg)
else:
logdebug(LOGGER, 'No consistency check was carried out.')
def __create_and_send_dataset_publication_message_to_queue(self):
self.__remove_duplicates_from_list_of_file_handles()
message = self.__create_dataset_publication_message()
self.__send_message_to_queue(message)
logdebug(LOGGER, 'Dataset publication message handed to rabbit thread.')
logtrace(LOGGER, 'Dataset publication message: %s (%s, version %s).', self.__dataset_handle, self.__drs_id, self.__version_number)
def __remove_duplicates_from_list_of_file_handles(self):
self.__list_of_file_handles = list(set(self.__list_of_file_handles))
def __send_existing_file_messages_to_queue(self):
for i in xrange(0, len(self.__list_of_file_messages)):
self.__try_to_send_one_file_message(i)
msg = 'All file publication jobs handed to rabbit thread.'
logdebug(LOGGER, msg)
def __try_to_send_one_file_message(self, list_index):
msg = self.__list_of_file_messages[list_index]
success = self.__send_message_to_queue(msg)
logdebug(LOGGER, 'File publication message handed to rabbit thread: %s (%s)', msg['handle'], msg['file_name'])
return success
def __set_machine_state_to_finished(self):
self.__machine_state = self.__machine_states['publication_finished']
def __create_file_publication_message(self, args):
message = esgfpid.assistant.messages.publish_file(
file_handle=args['file_handle'],
file_size=args['file_size'],
file_name=args['file_name'],
checksum=args['checksum'],
data_url=args['data_url'],
data_node=self.__data_node,
parent_dataset=self.__dataset_handle,
checksum_type=args['checksum_type'],
file_version=args['file_version'],
is_replica=self.__is_replica,
timestamp=self.__message_timestamp,
)
return message
def __create_dataset_publication_message(self):
message = esgfpid.assistant.messages.publish_dataset(
dataset_handle=self.__dataset_handle,
is_replica=self.__is_replica,
drs_id=self.__drs_id,
version_number=self.__version_number,
list_of_files=self.__list_of_file_handles,
data_node=self.__data_node,
timestamp=self.__message_timestamp,
consumer_solr_url=self.__consumer_solr_url
)
return message
def __send_message_to_queue(self, message):
success = self.__coupler.send_message_to_queue(message)
return success
|
IS-ENES-Data/esgf-pid
|
esgfpid/solr/solr.py
|
SolrInteractor.send_query
|
python
|
def send_query(self, query):
''' This method is called by the tasks. It is redirected to the submodule.'''
if self.__switched_on:
return self.__solr_server_connector.send_query(query)
else:
msg = 'Not sending query'
LOGGER.debug(msg)
raise esgfpid.exceptions.SolrSwitchedOff(msg)
|
This method is called by the tasks. It is redirected to the submodule.
|
train
|
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/solr/solr.py#L104-L111
| null |
class SolrInteractor(object):
# Constructor:
'''
:param switched_off: Mandatory. Boolean.
:param prefix: Mandatory if not switched off.
:param solr_url: Mandatory if not switched off.
:param https_verify: Mandatory if not switched off.
:param disable_insecure_request_warning: Mandatory if not switched off.
'''
def __init__(self, **args):
mandatory_args = [
'switched_off',
'prefix',
'solr_url',
'https_verify',
'disable_insecure_request_warning'
]
esgfpid.utils.check_presence_of_mandatory_args(args, ['switched_off'])
if args['switched_off'] == True:
logdebug(LOGGER, 'Initializing solr module without access..')
self.__init_without_access()
logdebug(LOGGER, 'Initializing solr module without access.. done')
else:
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
logdebug(LOGGER, 'Initializing solr module..')
self.__init_with_access(args)
logdebug(LOGGER, 'Initializing solr module.. done')
def __init_without_access(self):
self.__switched_on = False
self.__prefix = None
self.__solr_server_connector = None
def __init_with_access(self, args):
self.__switched_on = True
self.__check_presence_of_args(args)
self.__prefix = args['prefix']
self.__make_server_connector(args)
def __check_presence_of_args(self, args):
mandatory_args = ['solr_url', 'prefix', 'https_verify',
'disable_insecure_request_warning', 'switched_off']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
esgfpid.utils.check_noneness_of_mandatory_args(args, mandatory_args)
def __make_server_connector(self, args):
self.__solr_server_connector = esgfpid.solr.serverconnector.SolrServerConnector(
solr_url = args['solr_url'],
https_verify = args ['https_verify'],
disable_insecure_request_warning = args['disable_insecure_request_warning']
)
# Getter
'''
State getter.
:returns: True if the solr module is switched off, i.e.
it either received a switch-off flag from the library
or had no solr URL passed. False if not switched off.
'''
def is_switched_off(self):
return not self.__switched_on
# Methods called by tasks:
def make_solr_base_query(self):
query_dict = {}
query_dict['distrib'] = esgfpid.defaults.SOLR_QUERY_DISTRIB
query_dict['format'] = 'application/solr+json'
query_dict['limit'] = 0 # As we don't want all the details of the found files/datasets!
return query_dict
#####################
### Various tasks ###
#####################
# Task 1
def retrieve_file_handles_of_same_dataset(self, **args):
'''
:return: List of handles, or empty list. Should never return None.
:raise: SolrSwitchedOff
:raise SolrError: If both strategies to find file handles failed.
'''
mandatory_args = ['drs_id', 'version_number', 'data_node']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
LOGGER.debug('Looking for files of dataset "%s", version "%s".',
args['drs_id'], str(args['version_number']))
if self.__switched_on:
return self.__retrieve_file_handles_of_same_dataset(**args)
else:
msg = 'Cannot retrieve handles of files of the same dataset.'
raise esgfpid.exceptions.SolrSwitchedOff(msg)
def __retrieve_file_handles_of_same_dataset(self, **args):
finder = esgfpid.solr.tasks.filehandles_same_dataset.FindFilesOfSameDatasetVersion(self)
args['prefix'] = self.__prefix
file_handles = finder.retrieve_file_handles_of_same_dataset(**args)
return file_handles
# Task 2
def retrieve_datasethandles_or_versionnumbers_of_allversions(self, drs_id):
LOGGER.debug('Looking for dataset handles or version numbers of '+
'dataset "%s".', drs_id)
if self.__switched_on:
return self.__retrieve_datasethandles_or_versionnumbers_of_allversions(drs_id)
else:
msg = 'Cannot retrieve handles or version numbers of all versions of the dataset.'
raise esgfpid.exceptions.SolrSwitchedOff(msg)
def __retrieve_datasethandles_or_versionnumbers_of_allversions(self, drs_id):
finder = esgfpid.solr.tasks.all_versions_of_dataset.FindVersionsOfSameDataset(self)
result_dict = finder.retrieve_dataset_handles_or_version_numbers_of_all_versions(drs_id, self.__prefix)
return result_dict
|
IS-ENES-Data/esgf-pid
|
esgfpid/solr/solr.py
|
SolrInteractor.retrieve_file_handles_of_same_dataset
|
python
|
def retrieve_file_handles_of_same_dataset(self, **args):
'''
:return: List of handles, or empty list. Should never return None.
:raise: SolrSwitchedOff
:raise SolrError: If both strategies to find file handles failed.
'''
mandatory_args = ['drs_id', 'version_number', 'data_node']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
LOGGER.debug('Looking for files of dataset "%s", version "%s".',
args['drs_id'], str(args['version_number']))
if self.__switched_on:
return self.__retrieve_file_handles_of_same_dataset(**args)
else:
msg = 'Cannot retrieve handles of files of the same dataset.'
raise esgfpid.exceptions.SolrSwitchedOff(msg)
|
:return: List of handles, or empty list. Should never return None.
:raise: SolrSwitchedOff
:raise SolrError: If both strategies to find file handles failed.
|
train
|
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/solr/solr.py#L126-L142
|
[
"def check_presence_of_mandatory_args(args, mandatory_args):\n missing_args = []\n for name in mandatory_args:\n if name not in args.keys():\n missing_args.append(name)\n if len(missing_args)>0:\n raise esgfpid.exceptions.ArgumentError('Missing mandatory arguments: '+', '.join(missing_args))\n else:\n return True\n",
"def __retrieve_file_handles_of_same_dataset(self, **args):\n finder = esgfpid.solr.tasks.filehandles_same_dataset.FindFilesOfSameDatasetVersion(self)\n args['prefix'] = self.__prefix\n file_handles = finder.retrieve_file_handles_of_same_dataset(**args)\n return file_handles\n"
] |
class SolrInteractor(object):
# Constructor:
'''
:param switched_off: Mandatory. Boolean.
:param prefix: Mandatory if not switched off.
:param solr_url: Mandatory if not switched off.
:param https_verify: Mandatory if not switched off.
:param disable_insecure_request_warning: Mandatory if not switched off.
'''
def __init__(self, **args):
mandatory_args = [
'switched_off',
'prefix',
'solr_url',
'https_verify',
'disable_insecure_request_warning'
]
esgfpid.utils.check_presence_of_mandatory_args(args, ['switched_off'])
if args['switched_off'] == True:
logdebug(LOGGER, 'Initializing solr module without access..')
self.__init_without_access()
logdebug(LOGGER, 'Initializing solr module without access.. done')
else:
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
logdebug(LOGGER, 'Initializing solr module..')
self.__init_with_access(args)
logdebug(LOGGER, 'Initializing solr module.. done')
def __init_without_access(self):
self.__switched_on = False
self.__prefix = None
self.__solr_server_connector = None
def __init_with_access(self, args):
self.__switched_on = True
self.__check_presence_of_args(args)
self.__prefix = args['prefix']
self.__make_server_connector(args)
def __check_presence_of_args(self, args):
mandatory_args = ['solr_url', 'prefix', 'https_verify',
'disable_insecure_request_warning', 'switched_off']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
esgfpid.utils.check_noneness_of_mandatory_args(args, mandatory_args)
def __make_server_connector(self, args):
self.__solr_server_connector = esgfpid.solr.serverconnector.SolrServerConnector(
solr_url = args['solr_url'],
https_verify = args ['https_verify'],
disable_insecure_request_warning = args['disable_insecure_request_warning']
)
# Getter
'''
State getter.
:returns: True if the solr module is switched off, i.e.
it either received a switch-off flag from the library
or had no solr URL passed. False if not switched off.
'''
def is_switched_off(self):
return not self.__switched_on
# Methods called by tasks:
def send_query(self, query):
''' This method is called by the tasks. It is redirected to the submodule.'''
if self.__switched_on:
return self.__solr_server_connector.send_query(query)
else:
msg = 'Not sending query'
LOGGER.debug(msg)
raise esgfpid.exceptions.SolrSwitchedOff(msg)
def make_solr_base_query(self):
query_dict = {}
query_dict['distrib'] = esgfpid.defaults.SOLR_QUERY_DISTRIB
query_dict['format'] = 'application/solr+json'
query_dict['limit'] = 0 # As we don't want all the details of the found files/datasets!
return query_dict
#####################
### Various tasks ###
#####################
# Task 1
def __retrieve_file_handles_of_same_dataset(self, **args):
finder = esgfpid.solr.tasks.filehandles_same_dataset.FindFilesOfSameDatasetVersion(self)
args['prefix'] = self.__prefix
file_handles = finder.retrieve_file_handles_of_same_dataset(**args)
return file_handles
# Task 2
def retrieve_datasethandles_or_versionnumbers_of_allversions(self, drs_id):
LOGGER.debug('Looking for dataset handles or version numbers of '+
'dataset "%s".', drs_id)
if self.__switched_on:
return self.__retrieve_datasethandles_or_versionnumbers_of_allversions(drs_id)
else:
msg = 'Cannot retrieve handles or version numbers of all versions of the dataset.'
raise esgfpid.exceptions.SolrSwitchedOff(msg)
def __retrieve_datasethandles_or_versionnumbers_of_allversions(self, drs_id):
finder = esgfpid.solr.tasks.all_versions_of_dataset.FindVersionsOfSameDataset(self)
result_dict = finder.retrieve_dataset_handles_or_version_numbers_of_all_versions(drs_id, self.__prefix)
return result_dict
|
IS-ENES-Data/esgf-pid
|
esgfpid/rabbit/nodemanager.py
|
NodeManager.__complete_info_dict
|
python
|
def __complete_info_dict(self, node_info_dict, is_open):
# Make pika credentials
creds = pika.PlainCredentials(
node_info_dict['username'],
node_info_dict['password']
)
node_info_dict['credentials'] = creds
if 'priority' in node_info_dict and node_info_dict['priority'] is not None:
node_info_dict['priority'] = str(node_info_dict['priority'])
else:
node_info_dict['priority'] = DEFAULT_PRIO
# Mandatories:
host = node_info_dict['host']
credentials = node_info_dict['credentials']
# Optional ones
# If not specified, fill in defaults.
vhost = ""
if 'vhost' in node_info_dict and node_info_dict['vhost'] is not None:
vhost = node_info_dict['vhost']
port = 15672
if 'port' in node_info_dict and node_info_dict['port'] is not None:
port = node_info_dict['port']
ssl_enabled = False
if 'ssl_enabled' in node_info_dict and node_info_dict['ssl_enabled'] is not None:
ssl_enabled = node_info_dict['ssl_enabled']
# Get some defaults:
socket_timeout = esgfpid.defaults.RABBIT_PIKA_SOCKET_TIMEOUT
connection_attempts = esgfpid.defaults.RABBIT_PIKA_CONNECTION_ATTEMPTS
retry_delay = esgfpid.defaults.RABBIT_PIKA_CONNECTION_RETRY_DELAY_SECONDS
# Make pika connection params
# https://pika.readthedocs.org/en/0.9.6/connecting.html
params = pika.ConnectionParameters(
host=host,
ssl=ssl_enabled,
port=port,
virtual_host=vhost,
credentials=credentials,
socket_timeout=socket_timeout,
connection_attempts=connection_attempts,
retry_delay=retry_delay
)
node_info_dict['params'] = params
# Add some stuff
node_info_dict['is_open'] = is_open
'''
https://pika.readthedocs.org/en/0.9.6/connecting.html
class pika.connection.ConnectionParameters(
host=None, port=None, virtual_host=None, credentials=None, channel_max=None,
frame_max=None, heartbeat_interval=None, ssl=None, ssl_options=None,
connection_attempts=None, retry_delay=None, socket_timeout=None, locale=None,
backpressure_detection=None)
'''
return node_info_dict
|
https://pika.readthedocs.org/en/0.9.6/connecting.html
class pika.connection.ConnectionParameters(
host=None, port=None, virtual_host=None, credentials=None, channel_max=None,
frame_max=None, heartbeat_interval=None, ssl=None, ssl_options=None,
connection_attempts=None, retry_delay=None, socket_timeout=None, locale=None,
backpressure_detection=None)
|
train
|
https://github.com/IS-ENES-Data/esgf-pid/blob/2f4909bb3ff79c0b6ed2932e0dd8b3bb6aec5e41/esgfpid/rabbit/nodemanager.py#L256-L316
| null |
class NodeManager(object):
'''
Constructor that takes no params. It creates an empty
container for RabbitMQ node information. The node
information then has to be added using "add_trusted_node()"
and "add_open_node()".
'''
def __init__(self):
# Props for basic_publish (needed by thread_feeder)
self.__properties = pika.BasicProperties(
delivery_mode=esgfpid.defaults.RABBIT_DELIVERY_MODE,
content_type='application/json',
)
# Nodes
self.__trusted_nodes = {}
self.__open_nodes = {}
self.__trusted_nodes_archive = {}
self.__open_nodes_archive = {}
# Each of these dictionaries has the priorities as keys (integers
# stored as strings, or 'zzz_default', if no prio was given, or
# 'zzzz_last' if the host had failed in the pre-flight check).
# For each priority, there is a list of node-info-dictionaries:
# self.__trusted_nodes = {
# "1": [node_info1, node_info2],
# "2": [node_info3],
# "zzz_default": [node_info4]
# }
# Current node
self.__current_node = None
self.__exchange_name = None
# Important info
self.__has_trusted = False
'''
Add information about a trusted RabbitMQ node to
the container, for later use.
:param username: The username to connect to RabbitMQ.
:param password: The password to connect to RabbitMQ.
:param host: The host name of the RabbitMQ instance.
:param exchange_name: The exchange to which to send the
messages.
:param priority: Optional. Integer priority for the use
of this instance.
'''
def add_trusted_node(self, **kwargs):
kwargs['is_open'] = False
node_info = self.__add_node(self.__trusted_nodes, self.__trusted_nodes_archive, **kwargs)
self.__has_trusted = True
logdebug(LOGGER, 'Trusted rabbit: %s', self.__get_node_log_string(node_info))
'''
Add information about an open RabbitMQ node to
the container, for later use.
The parameters that are needed are the same as
for trusted nodes.
Note that a password is needed!
:param username: The username to connect to RabbitMQ.
:param password: The password to connect to RabbitMQ.
:param host: The host name of the RabbitMQ instance.
:param exchange_name: The exchange to which to send the
messages.
:param priority: Optional. Integer priority for the use
of this instance.
'''
def add_open_node(self, **kwargs):
raise esgfpid.exceptions.ArgumentError('Open nodes no longer supported! (Messaging service "'+kwargs['host']+'")')
#kwargs['is_open'] = True
#added = node_info = self.__add_node(self.__open_nodes, self.__open_nodes_archive, **kwargs)
#logdebug(LOGGER, 'Open rabbit: %s', self.__get_node_log_string(node_info))
def __add_node(self, store_where, store_archive, **kwargs):
if self.__has_necessary_info(kwargs):
node_info = copy.deepcopy(kwargs)
self.__complete_info_dict(node_info, kwargs['is_open'])
self.__store_node_info_by_priority(node_info, store_where)
self.__store_node_info_by_priority(copy.deepcopy(node_info), store_archive)
#store_where[node_info['priority']].append(node_info)
#store_archive[node_info['priority']].append(copy.deepcopy(node_info))
return node_info
else:
raise esgfpid.exceptions.ArgumentError('Cannot add this RabbitMQ node. Missing info. Required: username, password, host and exchange_name. Provided: '+str(kwargs))
def __compare_nodes(self, cand1, cand2):
copy1 = copy.deepcopy(cand1)
copy2 = copy.deepcopy(cand2)
# These cannot be compared by "==".
# They are created from the other info, so neglecting
# them in this comparison is ok!
copy1['credentials'] = None
copy2['credentials'] = None
copy1['params'] = None
copy2['params'] = None
return copy1 == copy2
def __is_this_node_in_last_prio_already(self, where_to_look):
try:
list_candidates = where_to_look[LAST_PRIO]
except KeyError as e:
errmsg = 'No node of last prio (%s) exists.' % LAST_PRIO
logwarn(LOGGER, errmsg)
return False
for i in xrange(len(list_candidates)):
candidate = list_candidates[i]
if self.__compare_nodes(candidate,self.__current_node):
logtrace(LOGGER, 'Found current node in archive (in list of last-prio nodes).')
return True
return False
def __move_to_last_prio(self, current_prio, all_nodes):
list_candidates = all_nodes[current_prio]
loginfo(LOGGER, 'Nodes of prio "%s": %s', current_prio, list_candidates)
for i in xrange(len(list_candidates)):
candidate = list_candidates[i]
if self.__compare_nodes(candidate,self.__current_node):
logtrace(LOGGER, 'Found current node in archive.')
# Add to lowest prio:
try:
all_nodes[LAST_PRIO].append(candidate)
logdebug(LOGGER, 'Added this host to list of lowest prio hosts...')
except KeyError:
all_nodes[LAST_PRIO] = [candidate]
logdebug(LOGGER, 'Added this host to (newly-created) list of lowest prio hosts...')
# Remove from current prio:
list_candidates.pop(i)
loginfo(LOGGER, 'Removed this host from list of hosts with prio %s!', current_prio)
if len(list_candidates)==0:
all_nodes.pop(current_prio)
loginfo(LOGGER, 'Removed the current priority %s!', current_prio)
return True
return False
def set_priority_low_for_current(self):
# We do not change the priority stored ass attribute in the
# dicts, BUT we change the priority under which it is stored in
# the list of nodes to be used.
# Deal with open or trusted node:
if self.__current_node['is_open']:
where_to_look = self.__open_nodes_archive
else:
where_to_look = self.__trusted_nodes_archive
# Go over all nodes of the current prio to find the
# current one, then move it to a different prio:
moved = False
try:
current_prio = self.__current_node['priority']
moved = self.__move_to_last_prio(current_prio, where_to_look)
if moved: return # changed successfully!
except KeyError as e:
errmsg = 'No node of prio %s found. Nodes: %s.' % (current_prio, where_to_look)
logwarn(LOGGER, errmsg)
# The node had already been added to the last-prio nodes ?!
last_already = self.__is_this_node_in_last_prio_already(where_to_look)
if last_already:
logdebug(LOGGER, 'Node already had lowest priority.')
return # nothing to change!
# This is extremely unlikely - in fact I don't see how it could occur:
if (not moved) and (not last_already):
errmsg = 'Could not find this node\'s priority (%s), nor the last-priority (%s). Somehow this node\'s priority was changed weirdly.' % (current_prio, LAST_PRIO)
logwarn(LOGGER, errmsg)
logwarn(LOGGER, 'All nodes: %s' % where_to_look)
# No matter where the node is stored, move it to "last" prio:
for prio, nodes in where_to_look.iteritems():
logtrace(LOGGER, 'Looking in prio "%s"...' % prio)
moved = self.__move_to_last_prio(prio, where_to_look)
if moved: return # changed successfully!
errmsg = 'Node definitely not found, cannot change prio.'
logwarn(LOGGER, errmsg)
raise ValueError(errmsg)
def __store_node_info_by_priority(self, node_info, store_where):
try:
store_where[node_info['priority']].append(node_info)
except KeyError:
store_where[node_info['priority']] = [node_info]
def __get_node_log_string(self, node_info):
return ('%s, %s, %s (exchange "%s")' % (node_info['host'], node_info['username'], node_info['password'], node_info['exchange_name']))
def __has_necessary_info(self, node_info_dict):
if ('username' in node_info_dict and
'password' in node_info_dict and
'host' in node_info_dict and
'exchange_name' in node_info_dict and
node_info_dict['username'] is not None and
node_info_dict['password'] is not None and
node_info_dict['host'] is not None and
node_info_dict['exchange_name'] is not None):
return True
else:
return False
def __complete_info_dict(self, node_info_dict, is_open):
# Make pika credentials
creds = pika.PlainCredentials(
node_info_dict['username'],
node_info_dict['password']
)
node_info_dict['credentials'] = creds
if 'priority' in node_info_dict and node_info_dict['priority'] is not None:
node_info_dict['priority'] = str(node_info_dict['priority'])
else:
node_info_dict['priority'] = DEFAULT_PRIO
# Mandatories:
host = node_info_dict['host']
credentials = node_info_dict['credentials']
# Optional ones
# If not specified, fill in defaults.
vhost = ""
if 'vhost' in node_info_dict and node_info_dict['vhost'] is not None:
vhost = node_info_dict['vhost']
port = 15672
if 'port' in node_info_dict and node_info_dict['port'] is not None:
port = node_info_dict['port']
ssl_enabled = False
if 'ssl_enabled' in node_info_dict and node_info_dict['ssl_enabled'] is not None:
ssl_enabled = node_info_dict['ssl_enabled']
# Get some defaults:
socket_timeout = esgfpid.defaults.RABBIT_PIKA_SOCKET_TIMEOUT
connection_attempts = esgfpid.defaults.RABBIT_PIKA_CONNECTION_ATTEMPTS
retry_delay = esgfpid.defaults.RABBIT_PIKA_CONNECTION_RETRY_DELAY_SECONDS
# Make pika connection params
# https://pika.readthedocs.org/en/0.9.6/connecting.html
params = pika.ConnectionParameters(
host=host,
ssl=ssl_enabled,
port=port,
virtual_host=vhost,
credentials=credentials,
socket_timeout=socket_timeout,
connection_attempts=connection_attempts,
retry_delay=retry_delay
)
node_info_dict['params'] = params
# Add some stuff
node_info_dict['is_open'] = is_open
'''
https://pika.readthedocs.org/en/0.9.6/connecting.html
class pika.connection.ConnectionParameters(
host=None, port=None, virtual_host=None, credentials=None, channel_max=None,
frame_max=None, heartbeat_interval=None, ssl=None, ssl_options=None,
connection_attempts=None, retry_delay=None, socket_timeout=None, locale=None,
backpressure_detection=None)
'''
return node_info_dict
'''
Return the connection parameters for the current
RabbitMQ host.
:return: Connection parameters (of type pika.ConnectionParameters)
'''
def get_connection_parameters(self):
if self.__current_node is None:
self.set_next_host()
if self.__current_node['is_open']:
raise ArgumentError('Open nodes no longer supported! (Messaging service "'+credentials['url']+'")')
return self.__current_node['params']
'''
Simple getter to find out if any URLs are
left.
TODO: Needed for what, as we start over
once all have been used? There is no end!
:return: Boolean.
'''
def has_more_urls(self):
if self.get_num_left_urls() > 0:
return True
return False
'''
Compute and return the number of trusted
RabbitMQ instances.
:return: Number of trusted instances (integer).
'''
def get_num_left_trusted(self):
n_trusted = 0
for list_of_nodes in self.__trusted_nodes.values():
n_trusted = n_trusted + len(list_of_nodes)
return n_trusted
'''
Compute and return the number of open
RabbitMQ instances.
:return: Number of open instances (integer).
'''
def get_num_left_open(self):
n_open = 0
for list_of_nodes in self.__open_nodes.values():
n_open = n_open + len(list_of_nodes)
return n_open
'''
Compute and return the total number of RabbitMQ
instances.
:return: Number of trusted instances (integer).
'''
def get_num_left_urls(self):
return self.get_num_left_open() + self.get_num_left_trusted()
'''
Select the next RabbitMQ to be used, using the
predefined priorities. It is not returned.
'''
def set_next_host(self):
if len(self.__trusted_nodes) > 0:
self.__current_node = self.__get_highest_priority_node(self.__trusted_nodes)
logdebug(LOGGER, 'Selected a trusted node: %s', self.__current_node['host'])
elif len(self.__open_nodes) > 0:
self.__current_node = self.__get_highest_priority_node(self.__open_nodes)
logdebug(LOGGER, 'Selected an open node: %s', self.__current_node['host'])
else:
if self.__current_node is None:
logwarn(LOGGER, 'Unexpected: No RabbitMQ node left to try, and there is no current one.')
raise esgfpid.exceptions.ArgumentError('No RabbitMQ nodes were passed at all.')
logwarn(LOGGER, 'No RabbitMQ node left to try! Leaving the last one: %s', self.__current_node['host'])
self.__exchange_name = self.__current_node['exchange_name']
def __get_highest_priority_node(self, dict_of_nodes):
# Get highest priority:
available_priorities = dict_of_nodes.keys()
available_priorities.sort(key=natural_keys)
current_priority = available_priorities.pop(0)
list_of_priority_nodes = dict_of_nodes[current_priority]
# Select one of them
if len(list_of_priority_nodes)==1:
nexthost = list_of_priority_nodes.pop()
if len(list_of_priority_nodes)==0:
dict_of_nodes.pop(current_priority)
return nexthost
else:
nexthost = self.__select_and_remove_random_url_from_list(list_of_priority_nodes)
return nexthost
# TODO WHAT IF NONE LEFT???
'''
Return a pika.BasicProperties object needed for
connecting to RabbitMQ.
This returns always the same. Does not depend on node.
:return: A properties object (pika.BasicProperties).'''
def get_properties_for_message_publications(self):
return self.__properties
'''
Select and return a random URL from a list.
This modifies the list and returns the URL!
:param list_urls: The list of URLs to randomly select from.
:return: Randomly selected URL.
'''
def __select_and_remove_random_url_from_list(self, list_urls):
num_urls = len(list_urls)
random_num = random.randint(0,num_urls-1)
selected_url = list_urls[random_num]
list_urls.remove(selected_url)
return selected_url
'''
Return the current exchange name as string.
:return: The current exchange name.
'''
def get_exchange_name(self):
return self.__exchange_name
'''
Adapt the routing key (if necessary) to indicate
that a message comes from an untrusted node.
The middle part (the RabbitMQ instruction part)
of the key is adapted so it is visible that the
node is an untrusted one.
Note: The binding has to be done in the RabbitMQ exit
node (by the consumer).
:return: The adapted routing key, in case the node
is untrusted.
'''
def adapt_routing_key_for_untrusted(self, routing_key):
# Message is published via an open node:
if self.__current_node['is_open'] == True:
if self.__has_trusted:
return esgfpid.utils.adapt_routing_key_for_untrusted_fallback(routing_key)
else:
return esgfpid.utils.adapt_routing_key_for_untrusted(routing_key)
# Message is published via a trusted node:
elif self.__current_node['is_open'] == False:
return routing_key
else:
logerror(LOGGER, 'Problem: Unsure whether the current node is open or not!')
return esgfpid.utils.adapt_routing_key_for_untrusted_fallback(routing_key)
'''
Reset the list of available RabbitMQ instances to
how it was before trying any.
Once all RabbitMQ instances have been tried,
the list is reset, so we can start over trying
to connect.
TODO: Where is this called?
'''
def reset_nodes(self):
logdebug(LOGGER, 'Resetting hosts...')
self.__trusted_nodes = copy.deepcopy(self.__trusted_nodes_archive)
self.__open_nodes = copy.deepcopy(self.__open_nodes_archive)
self.set_next_host()
def _get_prio_stored_for_current(self):
# Currently only used in unit test
return self.__current_node['priority']
def _get_prio_where_current_is_stored(self):
# Currently only used in unit test
if self.__current_node['is_open']:
where_to_look = self.__open_nodes_archive
else:
where_to_look = self.__trusted_nodes_archive
if not type(where_to_look) == type(dict()):
raise ValueError('%s is not a dict!')
for prio, nodes in where_to_look.iteritems():
for candidate in nodes:
if self.__compare_nodes(self.__current_node, candidate):
return prio
raise ValueError('Node not found, so could not know currently active prio!')
|
codenerix/django-codenerix-invoicing
|
codenerix_invoicing/models_sales_w_lines.py
|
Customer.buy_product
|
python
|
def buy_product(self, product_pk):
if self.invoice_sales.filter(line_invoice_sales__line_order__product__pk=product_pk).exists() \
or self.ticket_sales.filter(line_ticket_sales__line_order__product__pk=product_pk).exists():
return True
else:
return False
|
determina si el customer ha comprado un producto
|
train
|
https://github.com/codenerix/django-codenerix-invoicing/blob/7db5c62f335f9215a8b308603848625208b48698/codenerix_invoicing/models_sales_w_lines.py#L296-L304
| null |
class Customer(GenRole, CodenerixModel):
class CodenerixMeta:
abstract = ABSTRACT_GenCustomer
rol_groups = {
'Customer': CDNX_INVOICING_PERMISSIONS['customer'],
}
rol_permissions = [
'add_city',
'add_citygeonameen',
'add_citygeonamees',
'add_continent',
'add_continentgeonameen',
'add_continentgeonamees',
'add_corporateimage',
'add_country',
'add_countrygeonameen',
'add_countrygeonamees',
'add_customer',
'add_customerdocument',
'add_person',
'add_personaddress',
'add_province',
'add_provincegeonameen',
'add_provincegeonamees',
'add_region',
'add_regiongeonameen',
'add_regiongeonamees',
'add_salesbasket',
'add_saleslinebasket',
'add_timezone',
'change_city',
'change_citygeonameen',
'change_citygeonamees',
'change_continent',
'change_continentgeonameen',
'change_continentgeonamees',
'change_corporateimage',
'change_country',
'change_countrygeonameen',
'change_countrygeonamees',
'change_customer',
'change_customerdocument',
'change_person',
'change_personaddress',
'change_province',
'change_provincegeonameen',
'change_provincegeonamees',
'change_region',
'change_regiongeonameen',
'change_regiongeonamees',
'change_salesbasket',
'change_saleslinebasket',
'change_timezone',
'change_user',
'delete_city',
'delete_citygeonameen',
'delete_citygeonamees',
'delete_continent',
'delete_continentgeonameen',
'delete_continentgeonamees',
'delete_corporateimage',
'delete_country',
'delete_countrygeonameen',
'delete_countrygeonamees',
'delete_customer',
'delete_customerdocument',
'delete_person',
'delete_personaddress',
'delete_province',
'delete_provincegeonameen',
'delete_provincegeonamees',
'delete_region',
'delete_regiongeonameen',
'delete_regiongeonamees',
'delete_salesbasket',
'delete_saleslinebasket',
'delete_timezone ',
'list_billingseries',
'list_city',
'list_continent',
'list_corporateimage',
'list_country',
'list_customer',
'list_customerdocument',
'list_legalnote',
'list_personaddress',
'list_productdocument',
'list_province',
'list_purchaseslineinvoice',
'list_region',
'list_salesalbaran',
'list_salesbasket',
'list_salesinvoice',
'list_salesinvoicerectification',
'list_saleslinealbaran',
'list_saleslinebasket',
'list_saleslineinvoice',
'list_saleslineinvoicerectification',
'list_saleslineorder',
'list_saleslineticket',
'list_saleslineticketrectification',
'list_salesorder',
'list_salesreservedproduct',
'list_salesticket',
'list_salesticketrectification',
'list_timezone',
'list_typedocument',
'list_typedocumenttexten',
'list_typedocumenttextes',
'view_billingseries',
'view_city',
'view_continent',
'view_corporateimage',
'view_country',
'view_customer',
'view_customerdocument',
'view_legalnote',
'view_personaddress',
'view_province',
'view_region',
'view_salesbasket',
'view_saleslinebasket',
'view_timezone',
'view_typedocument',
'view_typedocumenttexten',
'view_typedocumenttextes',
]
force_methods = {
'foreignkey_customer': ('CDNX_get_fk_info_customer', _('---')),
'get_email': ('CDNX_get_email', ),
'info_customer_details': ('CDNX_get_details_info_customer', ),
}
currency = models.ForeignKey(Currency, related_name='customers', verbose_name='Currency', on_delete=models.CASCADE)
# serie de facturacion
billing_series = models.ForeignKey(BillingSeries, related_name='customers', verbose_name='Billing series', on_delete=models.CASCADE)
# datos de facturaciΓ³n
# saldo final
final_balance = models.CharField(_("Balance"), max_length=250, blank=True, null=True)
# credito o riesgo maximo autorizado
credit = models.CharField(_("Credit"), max_length=250, blank=True, null=True)
# Aplicar recargo de equivalencia
apply_equivalence_surcharge = models.BooleanField(_("Apply equivalence surcharge"), blank=False, default=False)
# Tipo de iva
type_tax = models.ForeignKey(TypeTax, related_name='customers', verbose_name=_("Type tax"), null=True, on_delete=models.CASCADE)
default_customer = models.BooleanField(_("Default customer"), blank=False, default=False)
@staticmethod
def foreignkey_external():
return get_external_method(Customer, Customer.CodenerixMeta.force_methods['foreignkey_customer'][0])
def __str__(self):
if hasattr(self, 'external'):
return u"{}".format(smart_text(self.external))
else:
return "{}".format(self.pk)
def __unicode__(self):
return self.__str__()
def __fields__(self, info):
fields = []
fields.append(('final_balance', _("Balance")))
fields.append(('credit', _("Credit")))
fields.append(('currency', _("Currency")))
fields.append(('billing_series', _("Billing series")))
fields.append(('apply_equivalence_surcharge', _("Equivalence Surcharge")))
fields.append(('type_tax', _("Type tax")))
fields.append(('default_customer', _("Default customer")))
fields = get_external_method(Customer, '__fields_customer__', info, fields)
return fields
def save(self, *args, **kwargs):
with transaction.atomic():
if self.default_customer:
Customer.objects.exclude(pk=self.pk).update(default_customer=False)
else:
if not Customer.objects.exclude(pk=self.pk).filter(default_customer=True).exists():
self.default_customer = True
return super(Customer, self).save(*args, **kwargs)
|
codenerix/django-codenerix-invoicing
|
codenerix_invoicing/models_cash.py
|
CashDiary.find
|
python
|
def find(pos, user):
'''
Get a valid CashDiary for today from the given POS, it will return:
- None: if no CashDiary is available today and older one was already closed
- New CashDiary: if no CashDiary is available today but there is an older one which it was opened
- Existing CashDiary: if a CashDiary is available today (open or close)
'''
# Get checkpoint
ck = dateparse.parse_time(getattr(settings, "CASHDIARY_CLOSES_AT", '03:00'))
year = timezone.now().year
month = timezone.now().month
day = timezone.now().day
hour = ck.hour
minute = ck.minute
second = ck.second
checkpoint = timezone.datetime(year, month, day, hour, minute, second)
# Get
cashdiary = CashDiary.objects.filter(pos=pos, opened_date__gte=checkpoint).order_by("-opened_date").first()
if not cashdiary:
# No cashdiary found for today, check older one
oldercashdiary = CashDiary.objects.filter(pos=pos, opened_date__lt=checkpoint).order_by("-opened_date").first()
if oldercashdiary:
if oldercashdiary.closed_user:
cashdiary = None
else:
# Older cashdiary is not closed, we have to close it and open a new one
amount_cash = oldercashdiary.amount_cash()
amount_cards = oldercashdiary.amount_cards()
# The older cashdiary is still opened, we have to close it and create a new one
oldercashdiary.closed_cash = amount_cash
oldercashdiary.closed_cards = amount_cards
oldercashdiary.closed_user = user
oldercashdiary.closed_date = timezone.now()
oldercashdiary.save()
# Open new cashdiary
cashdiary = CashDiary()
cashdiary.pos = pos
cashdiary.opened_cash = amount_cash
cashdiary.opened_cards = amount_cards
cashdiary.opened_user = user
cashdiary.opened_date = timezone.now()
cashdiary.save()
else:
# initial new cashdiary
cashdiary = CashDiary()
cashdiary.pos = pos
cashdiary.opened_cash = Decimal('0')
cashdiary.opened_cards = Decimal('0')
cashdiary.opened_user = user
cashdiary.opened_date = timezone.now()
cashdiary.save()
# Return the found CashDiary
return cashdiary
|
Get a valid CashDiary for today from the given POS, it will return:
- None: if no CashDiary is available today and older one was already closed
- New CashDiary: if no CashDiary is available today but there is an older one which it was opened
- Existing CashDiary: if a CashDiary is available today (open or close)
|
train
|
https://github.com/codenerix/django-codenerix-invoicing/blob/7db5c62f335f9215a8b308603848625208b48698/codenerix_invoicing/models_cash.py#L73-L128
| null |
class CashDiary(CodenerixModel):
pos = models.ForeignKey(POS, related_name='cash_movements', verbose_name=_("Point of Sales"), null=True, on_delete=models.CASCADE)
opened_user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='opened_cash_diarys', verbose_name=_("User"), on_delete=models.CASCADE)
opened_date = models.DateTimeField(_("Opened Date"), blank=False, null=False)
opened_cash = models.DecimalField(_("Opened Cash"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
opened_cash_extra = models.DecimalField(_("Opened Cash Deviation"), blank=True, null=True, default=None, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
opened_cash_notes = models.TextField(_("Opened Cash Notes"), blank=True, null=False, default="")
opened_cards = models.DecimalField(_("Opened Cards"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
opened_cards_extra = models.DecimalField(_("Opened Cards Deviation"), blank=True, null=True, default=None, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
opened_cards_notes = models.TextField(_("Opened Cards Notes"), blank=True, null=False, default="")
closed_user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='closed_cash_diarys', verbose_name=_("User"), blank=True, null=True, on_delete=models.CASCADE)
closed_date = models.DateTimeField(_("Closed Date"), blank=True, null=True)
closed_cash = models.DecimalField(_("Closed Cash"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
closed_cash_extra = models.DecimalField(_("Closed Cash Deviation"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=None)
closed_cash_notes = models.TextField(_("Closed Cash Notes"), blank=True, null=False, default="")
closed_cards = models.DecimalField(_("Closed Cards"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
closed_cards_extra = models.DecimalField(_("Closed Cards Deviation"), blank=True, null=True, default=None, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
closed_cards_notes = models.TextField(_("Closed Cards Notes"), blank=True, null=False, default="")
def amount_cash(self):
total = self.cash_movements.filter(kind=PAYMENT_DETAILS_CASH).aggregate(total=Sum('amount')).get('total', Decimal('0'))
if total:
return total
else:
return Decimal('0')
def amount_cards(self):
total = self.cash_movements.filter(kind=PAYMENT_DETAILS_CARD).aggregate(total=Sum('amount')).get('total', Decimal('0'))
if total:
return total
else:
return Decimal('0')
@staticmethod
@property
def is_opened(self):
return self.closed_user is None
@property
def is_closed(self):
return not self.is_opened
def __str__(self):
return u"({}) {}: {}".format(smart_text(self.pos), smart_text(self.opened_user), smart_text(self.opened_date))
def __unicode__(self):
return self.__str__()
def __fields__(self, info):
fields = []
fields.append(('pos', _('Point of Sales')))
fields.append(('opened_user', _('Opened user')))
fields.append(('opened_date', _('Opened date')))
fields.append(('opened_cash', _('Opened cash')))
fields.append(('opened_cards', _('Opened cards')))
fields.append(('closed_user', _('Closed user')))
fields.append(('closed_date', _('Closed date')))
fields.append(('closed_cash', _('Closed cash')))
fields.append(('closed_cards', _('Closed cards')))
fields.append(('opened_cash_extra', None))
fields.append(('opened_cash_notes', None))
fields.append(('opened_cards_extra', None))
fields.append(('opened_cards_notes', None))
fields.append(('closed_cash_extra', None))
fields.append(('closed_cash_notes', None))
fields.append(('closed_cards_extra', None))
fields.append(('closed_cards_notes', None))
return fields
def __searchF__(self, info):
error_margin = getattr(settings, "CASHDIARY_ERROR_MARGIN", 0.5)
bigQ = Q(
Q(
Q(opened_cash_extra__isnull=False),
Q(opened_cash_notes=""),
Q(opened_cash_extra__gte=error_margin) | Q(opened_cash_extra__lte=-error_margin)
) | Q(
Q(opened_cards_extra__isnull=False),
Q(opened_cards_notes=""),
Q(opened_cards_extra__gte=error_margin) | Q(opened_cards_extra__lte=-error_margin)
) | Q(
Q(closed_cash_extra__isnull=False),
Q(closed_cash_notes=""),
Q(closed_cash_extra__gte=error_margin) | Q(closed_cash_extra__lte=-error_margin)
) | Q(
Q(closed_cards_extra__isnull=False),
Q(closed_cards_notes=""),
Q(closed_cards_extra__gte=error_margin) | Q(closed_cards_extra__lte=-error_margin)
)
)
tf = {}
tf['pending'] = (_('Status'), lambda x: bigQ, [(False, _("Pending validation"))])
return tf
def save(self, *args, **kwargs):
if self.closed_user is None and CashDiary.objects.filter(pos=self.pos, closed_date__isnull=True).exclude(pk=self.pk).exists():
raise IntegrityError(_('Can not open a CashDiary in this POS, another CashDiary is already open'))
else:
return super(CashDiary, self).save(*args, **kwargs)
|
codenerix/django-codenerix-invoicing
|
codenerix_invoicing/models_sales_original.py
|
GenLineProduct.save
|
python
|
def save(self, *args, **kwargs):
if self.pk is None:
if hasattr(self, 'product'):
if not self.description:
self.description = self.product
self.price_recommended = self.product.price_base
elif hasattr(self, 'line_order'):
if not self.description:
self.description = self.line_order.product
self.price_recommended = self.line_order.price_base
if hasattr(self, 'tax') and hasattr(self, 'type_tax'):
self.tax = self.type_tax.tax
if hasattr(self, 'product'):
self.tax_label = self.product.product.tax.name
if self.product.code:
self.code = self.product.code
else:
self.code = self.product.product.code
self.update_total(force_save=False)
if 'force_save' in kwargs:
kwargs.pop('force_save')
return super(GenLineProduct, self).save(*args, **kwargs)
|
si al guardar una linea asociada a un documento bloqueado (lock==True), duplicar el documento en una nueva versiΓ³n
|
train
|
https://github.com/codenerix/django-codenerix-invoicing/blob/7db5c62f335f9215a8b308603848625208b48698/codenerix_invoicing/models_sales_original.py#L816-L843
| null |
class GenLineProduct(GenLineProductBasic): # META: Abstract class
class Meta(GenLineProductBasic.Meta):
abstract = True
price_recommended = models.DecimalField(_("Recomended price base"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
# valores aplicados
"""
desde el formulario se podrΓ‘ modificar el precio y la descripcion del producto
se guarda el tax usado y la relacion para poder hacer un seguimiento
"""
code = models.CharField(_("Code"), max_length=250, blank=True, null=True, default=None)
description = models.CharField(_("Description"), max_length=256, blank=True, null=True)
discount = models.DecimalField(_("Discount (%)"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
price_base = models.DecimalField(_("Price base"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
tax = models.FloatField(_("Tax (%)"), blank=True, null=True, default=0)
equivalence_surcharge = models.FloatField(_("Equivalence surcharge (%)"), blank=True, null=True, default=0)
tax_label = models.CharField(_("Tax Name"), max_length=250, blank=True, null=True)
def __str__(self):
description = ''
if hasattr(self, 'description'):
description = self.description
elif hasattr(self, 'line_invoice'):
description = self.line_invoice.description
elif hasattr(self, 'line_ticket'):
description = self.line_ticket.description
return u"{} - {}".format(smart_text(description), smart_text(self.quantity))
def __unicode__(self):
return self.__str__()
def __fields__(self, info):
fields = []
fields.append(('code', _("Code")))
fields.append(('description', _("Description")))
fields.append(('quantity', _("Quantity")))
fields.append(('price_base', _("Price base")))
fields.append(('discount', _("Discount (%)")))
fields.append(('discounts', _("Total Discount")))
fields.append(('tax', _("Tax (%)")))
fields.append(('equivalence_surcharge', _("Equivalence surcharge (%)")))
fields.append(('taxes', _("Total Tax")))
fields.append(('total', _("Total")))
return fields
def calculate_total(self):
# compatibility with old version
return self.total
def update_total(self, force_save=True):
# calculate totals
self.gen_update_total(self, force_save)
def __save__(self, args, kwargs, **conditional):
if hasattr(self, 'product'):
conditional["product"] = self.product
if hasattr(self, 'line_order'):
conditional["line_order"] = self.line_order
if hasattr(self, 'basket'):
conditional["basket"] = self.basket
return super(GenLineProduct, self).__save__(args, kwargs, **conditional)
@staticmethod
def create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL,
url_reverse, related_line, related_object,
msg_error_relation, msg_error_not_found, unique):
"""
pk: pk del documento origen
list_lines: listado de pk de lineas de origen
MODEL_SOURCE: modelo del documento origen
MODEL_FINAL: model del documento final
MODEL_LINE_SOURCE: modelo de la linea origen
MODEL_LINE_FINAL: modelo de la linea final
url_reverse: url del destino
related_line: campo del modelo linea final en el que irΓ‘ asignada la linea origen
related_object: campo del modelo linea final en el que irΓ‘ asignado el objeto final
msg_error_relation: Mensaje de error indicando que las lineas ya estΓ‘n relacionadas
msg_error_not_found: Mensaje de error indicando que no se encuentra el objeto origen
unique: (True/False) Indica si puede haber mΓ‘s de una linea asociada a otras lineas
"""
context = {}
obj_src = MODEL_SOURCE.objects.filter(pk=pk).first()
if list_lines and obj_src:
# parse to int
list_lines = [int(x) for x in list_lines]
# list of lines objects
if unique:
create = not MODEL_LINE_FINAL.objects.filter(**{"{}__pk__in".format(related_line): list_lines}).exists()
else:
create = True
"""
si debiendo ser filas unicas no las encuentra en el modelo final, se crea el nuevo documento
"""
if create:
with transaction.atomic():
obj_final = MODEL_FINAL()
obj_final.customer = obj_src.customer
obj_final.date = datetime.datetime.now()
obj_final.billing_series = obj_src.billing_series
if isinstance(obj_final, SalesOrder):
obj_final.budget = obj_src
obj_final.save()
for lb_pk in list_lines:
line_src = MODEL_LINE_SOURCE.objects.filter(pk=lb_pk).first()
if line_src:
line_final = MODEL_LINE_FINAL(**{"{}_id".format(related_object): obj_final.pk, related_line: line_src})
# line_final.order = obj_final
# line_final.line_budget = line_src
src_list_fields = [f.name for f in line_src._meta.get_fields()]
dst_list_fields = [f.name for f in line_final._meta.get_fields()]
if 'product' in src_list_fields and 'product' in dst_list_fields:
line_final.product = line_src.product
if 'description' in src_list_fields and 'description' in dst_list_fields:
line_final.description = line_src.description
if 'code' in src_list_fields and 'code' in dst_list_fields:
line_final.code = line_src.code
# if hasattr(line_src, 'line_order') and hasattr(line_final, 'line_order'):
if 'line_order' in src_list_fields and 'line_order' in dst_list_fields:
line_final.line_order = line_src.line_order
line_final.quantity = line_src.quantity
line_final.price_base = line_src.price_base
# if hasattr(line_src, 'price_recommended') and hasattr(line_final, 'price_recommended'):
if 'price_recommended' in src_list_fields and 'price_recommended' in dst_list_fields:
line_final.price_recommended = line_src.price_recommended
line_final.tax = line_src.tax
# line_final.type_tax = line_src.type_tax
line_final.discount = line_src.discount
if 'removed' in src_list_fields and 'removed' in dst_list_fields:
line_final.removed = line_src.removed
line_final.save()
if hasattr(line_src, 'line_basket_option_sales') and line_src.line_basket_option_sales.exists():
for opt_src in line_src.line_basket_option_sales.all():
opt_dst = SalesLineOrderOption()
opt_dst.line_order = line_final
opt_dst.product_option = opt_src.product_option
opt_dst.product_final = opt_src.product_final
opt_dst.quantity = opt_src.quantity
opt_dst.save()
# bloqueamos el documento origen
obj_src.lock = True
obj_src.save()
# context['url'] = reverse('ordersaless_details', kwargs={'pk': order.pk})
context['url'] = "{}#/{}".format(reverse(url_reverse), obj_final.pk)
context['obj_final'] = obj_final
else:
# _("Hay lineas asignadas a pedidos")
context['error'] = msg_error_relation
else:
# _('Budget not found')
context['error'] = msg_error_not_found
return context
@staticmethod
def create_order_from_budget_all(order):
lines_budget = order.budget.line_basket_sales.all()
for lb in lines_budget:
lo = SalesLineOrder()
lo.order = order
lo.line_budget = lb
lo.product = lb.product
lo.quantity = lb.quantity
lo.notes = lb.notes
lo.price_recommended = lb.price_recommended
lo.description = lb.description
lo.discount = lb.discount
lo.price_base = lb.price_base
lo.tax = lb.tax
lo.save()
order.budget.role = ROLE_BASKET_BUDGET
order.budget.save()
return lines_budget.count() == order.line_order_sales.all().count()
@staticmethod
def create_order_from_budget(pk, list_lines):
MODEL_SOURCE = SalesBasket
MODEL_FINAL = SalesOrder
MODEL_LINE_SOURCE = SalesLineBasket
MODEL_LINE_FINAL = SalesLineOrder
url_reverse = 'CDNX_invoicing_ordersaless_list'
# type_doc
related_line = 'line_budget'
related_object = 'order'
msg_error_relation = _("Hay lineas asignadas a pedidos")
msg_error_not_found = _('Budget not found')
# duplicamos el presupuesto si el numero de lineas es diferente
# relacionando el pedido a este nuevo presupuesto
if list_lines and len(list_lines) != MODEL_LINE_SOURCE.objects.filter(basket=pk).count():
budget = MODEL_SOURCE.objects.get(pk=pk)
# el presupuesto tiene que estar firmado para poder generar el pedido
if not budget.signed:
context = {}
context['error'] = _("Unsigned budget")
return context
else:
new_budget = budget.duplicate(MODEL_LINE_SOURCE, list_lines)
pk = new_budget.pk
list_lines = [x[0] for x in MODEL_LINE_SOURCE.objects.filter(basket=pk).values_list('pk')]
return GenLineProduct.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL,
url_reverse, related_line, related_object,
msg_error_relation, msg_error_not_found, True)
@staticmethod
def create_albaran_automatic(pk, list_lines):
"""
creamos de forma automatica el albaran
"""
line_bd = SalesLineAlbaran.objects.filter(line_order__pk__in=list_lines).values_list('line_order__pk')
if line_bd.count() == 0 or len(list_lines) != len(line_bd[0]):
# solo aquellas lineas de pedidos que no estan ya albarandas
if line_bd.count() != 0:
for x in line_bd[0]:
list_lines.pop(list_lines.index(x))
GenLineProduct.create_albaran_from_order(pk, list_lines)
@staticmethod
def create_albaran_from_order(pk, list_lines):
MODEL_SOURCE = SalesOrder
MODEL_FINAL = SalesAlbaran
MODEL_LINE_SOURCE = SalesLineOrder
MODEL_LINE_FINAL = SalesLineAlbaran
url_reverse = 'CDNX_invoicing_albaransaless_list'
# type_doc
related_line = 'line_order'
related_object = 'albaran'
msg_error_relation = _("Hay lineas asignadas a albaranes")
msg_error_not_found = _('Sales order not found')
return GenLineProduct.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL,
url_reverse, related_line, related_object,
msg_error_relation, msg_error_not_found, False)
@staticmethod
def create_ticket_from_order(pk, list_lines):
MODEL_SOURCE = SalesOrder
MODEL_FINAL = SalesTicket
MODEL_LINE_SOURCE = SalesLineOrder
MODEL_LINE_FINAL = SalesLineTicket
url_reverse = 'CDNX_invoicing_ticketsaless_list'
# type_doc
related_line = 'line_order'
related_object = 'ticket'
msg_error_relation = _("Hay lineas asignadas a ticket")
msg_error_not_found = _('Sales order not found')
with transaction.atomic():
GenLineProduct.create_albaran_automatic(pk, list_lines)
return GenLineProduct.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL,
url_reverse, related_line, related_object,
msg_error_relation, msg_error_not_found, False)
@staticmethod
def create_ticket_from_slot(slot_pk):
context = {
"error": None,
"obj_final": None,
}
# order line not paid
line_orders = SalesLineOrder.objects.filter(
order__budget__pos_slot__pk=slot_pk,
order__payment__isnull=True,
order__cash_movements__isnull=True,
order__budget__removed=False,
order__removed=False,
removed=False
)
if line_orders:
# create o update ticket
tickets = []
for line in line_orders:
if line.line_ticket_sales.filter(removed=False, ticket__removed=False).count() != 0:
tickets += list(line.line_ticket_sales.filter(removed=False, ticket__removed=False).values_list('ticket')[0])
if len(set(tickets)) != len(tickets) and len(set(tickets)) > 1:
context['error'] = _(u'There are orders that are in several different tickets')
else:
if tickets:
with transaction.atomic():
ticket = SalesTicket.objects.get(pk=tickets[0], removed=False)
# There are already orders associated with a ticket
for line in line_orders:
if line.line_ticket_sales.count() == 0:
# new line
lt = SalesLineTicket()
lt.ticket = ticket
lt.line_order = line
lt.tax = line.tax
lt.discount = line.discount
lt.description = line.description
lt.notes = line.notes
lt.quantity = line.quantity
lt.price_recommended = line.price_recommended
lt.price_base = line.price_base
lt.save()
else:
# update line
lt = line.line_ticket_sales.first()
if lt.quantity != line.quantity:
lt.quantity = line.quantity
lt.save()
else:
# new ticket
with transaction.atomic():
ticket = SalesTicket()
ticket.billing_series = BillingSeries.objects.filter(default=True).first()
ticket.customer = line_orders[0].order.customer
ticket.save()
for line in line_orders:
lt = SalesLineTicket()
lt.ticket = ticket
lt.line_order = line
lt.tax = line.tax
lt.discount = line.discount
lt.description = line.description
lt.notes = line.notes
lt.quantity = line.quantity
lt.price_recommended = line.price_recommended
lt.price_base = line.price_base
lt.save()
context['obj_final'] = ticket
else:
# get ticket
line_order = SalesLineOrder.objects.filter(
order__budget__pos_slot__pk=slot_pk,
order__budget__removed=False,
order__removed=False,
removed=False,
).last()
ticket = SalesTicket.objects.filter(
customer=line_order.order.customer,
line_ticket_sales__line_order=line_order,
line_ticket_sales__line_order__removed=False,
line_ticket_sales__removed=False,
removed=False
).first()
if ticket:
context['obj_final'] = ticket
else:
context['error'] = _("Ticket don't found")
return context
@staticmethod
def create_invoice_from_order(pk, list_lines):
MODEL_SOURCE = SalesOrder
MODEL_FINAL = SalesInvoice
MODEL_LINE_SOURCE = SalesLineOrder
MODEL_LINE_FINAL = SalesLineInvoice
url_reverse = 'CDNX_invoicing_invoicesaless_list'
# type_doc
related_line = 'line_order'
related_object = 'invoice'
msg_error_relation = _("Hay lineas asignadas a facturas")
msg_error_not_found = _('Sales order not found')
with transaction.atomic():
GenLineProduct.create_albaran_automatic(pk, list_lines)
return GenLineProduct.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL,
url_reverse, related_line, related_object,
msg_error_relation, msg_error_not_found, False)
@staticmethod
def create_ticket_from_albaran(pk, list_lines):
"""
la pk y list_lines son de albaranes, necesitamos la info de las lineas de pedidos
"""
context = {}
if list_lines:
new_list_lines = [x[0] for x in SalesLineAlbaran.objects.values_list('line_order__pk').filter(
pk__in=[int(x) for x in list_lines]
).exclude(invoiced=True)]
if new_list_lines:
lo = SalesLineOrder.objects.values_list('order__pk').filter(pk__in=new_list_lines)[:1]
if lo and lo[0] and lo[0][0]:
new_pk = lo[0][0]
context = GenLineProduct.create_ticket_from_order(new_pk, new_list_lines)
if 'error' not in context or not context['error']:
SalesLineAlbaran.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(invoiced=True).update(invoiced=True)
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
@staticmethod
def create_invoice_from_albaran(pk, list_lines):
"""
la pk y list_lines son de albaranes, necesitamos la info de las lineas de pedidos
"""
context = {}
if list_lines:
new_list_lines = [x[0] for x in SalesLineAlbaran.objects.values_list('line_order__pk').filter(
pk__in=[int(x) for x in list_lines]
).exclude(invoiced=True)]
if new_list_lines:
lo = SalesLineOrder.objects.values_list('order__pk').filter(pk__in=new_list_lines)[:1]
if lo and lo[0] and lo[0][0]:
new_pk = lo[0][0]
context = GenLineProduct.create_invoice_from_order(new_pk, new_list_lines)
if 'error' not in context or not context['error']:
SalesLineAlbaran.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(invoiced=True).update(invoiced=True)
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
@staticmethod
def create_invoice_from_ticket(pk, list_lines):
"""
la pk y list_lines son de ticket, necesitamos la info de las lineas de pedidos
"""
context = {}
if list_lines:
new_list_lines = [x[0] for x in SalesLineTicket.objects.values_list('line_order__pk').filter(pk__in=[int(x) for x in list_lines])]
if new_list_lines:
lo = SalesLineOrder.objects.values_list('order__pk').filter(pk__in=new_list_lines)[:1]
if lo and lo[0] and lo[0][0]:
new_pk = lo[0][0]
return GenLineProduct.create_invoice_from_order(new_pk, new_list_lines)
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
|
codenerix/django-codenerix-invoicing
|
codenerix_invoicing/models_sales_original.py
|
GenLineProduct.create_document_from_another
|
python
|
def create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL,
url_reverse, related_line, related_object,
msg_error_relation, msg_error_not_found, unique):
context = {}
obj_src = MODEL_SOURCE.objects.filter(pk=pk).first()
if list_lines and obj_src:
# parse to int
list_lines = [int(x) for x in list_lines]
# list of lines objects
if unique:
create = not MODEL_LINE_FINAL.objects.filter(**{"{}__pk__in".format(related_line): list_lines}).exists()
else:
create = True
"""
si debiendo ser filas unicas no las encuentra en el modelo final, se crea el nuevo documento
"""
if create:
with transaction.atomic():
obj_final = MODEL_FINAL()
obj_final.customer = obj_src.customer
obj_final.date = datetime.datetime.now()
obj_final.billing_series = obj_src.billing_series
if isinstance(obj_final, SalesOrder):
obj_final.budget = obj_src
obj_final.save()
for lb_pk in list_lines:
line_src = MODEL_LINE_SOURCE.objects.filter(pk=lb_pk).first()
if line_src:
line_final = MODEL_LINE_FINAL(**{"{}_id".format(related_object): obj_final.pk, related_line: line_src})
# line_final.order = obj_final
# line_final.line_budget = line_src
src_list_fields = [f.name for f in line_src._meta.get_fields()]
dst_list_fields = [f.name for f in line_final._meta.get_fields()]
if 'product' in src_list_fields and 'product' in dst_list_fields:
line_final.product = line_src.product
if 'description' in src_list_fields and 'description' in dst_list_fields:
line_final.description = line_src.description
if 'code' in src_list_fields and 'code' in dst_list_fields:
line_final.code = line_src.code
# if hasattr(line_src, 'line_order') and hasattr(line_final, 'line_order'):
if 'line_order' in src_list_fields and 'line_order' in dst_list_fields:
line_final.line_order = line_src.line_order
line_final.quantity = line_src.quantity
line_final.price_base = line_src.price_base
# if hasattr(line_src, 'price_recommended') and hasattr(line_final, 'price_recommended'):
if 'price_recommended' in src_list_fields and 'price_recommended' in dst_list_fields:
line_final.price_recommended = line_src.price_recommended
line_final.tax = line_src.tax
# line_final.type_tax = line_src.type_tax
line_final.discount = line_src.discount
if 'removed' in src_list_fields and 'removed' in dst_list_fields:
line_final.removed = line_src.removed
line_final.save()
if hasattr(line_src, 'line_basket_option_sales') and line_src.line_basket_option_sales.exists():
for opt_src in line_src.line_basket_option_sales.all():
opt_dst = SalesLineOrderOption()
opt_dst.line_order = line_final
opt_dst.product_option = opt_src.product_option
opt_dst.product_final = opt_src.product_final
opt_dst.quantity = opt_src.quantity
opt_dst.save()
# bloqueamos el documento origen
obj_src.lock = True
obj_src.save()
# context['url'] = reverse('ordersaless_details', kwargs={'pk': order.pk})
context['url'] = "{}#/{}".format(reverse(url_reverse), obj_final.pk)
context['obj_final'] = obj_final
else:
# _("Hay lineas asignadas a pedidos")
context['error'] = msg_error_relation
else:
# _('Budget not found')
context['error'] = msg_error_not_found
return context
|
pk: pk del documento origen
list_lines: listado de pk de lineas de origen
MODEL_SOURCE: modelo del documento origen
MODEL_FINAL: model del documento final
MODEL_LINE_SOURCE: modelo de la linea origen
MODEL_LINE_FINAL: modelo de la linea final
url_reverse: url del destino
related_line: campo del modelo linea final en el que irΓ‘ asignada la linea origen
related_object: campo del modelo linea final en el que irΓ‘ asignado el objeto final
msg_error_relation: Mensaje de error indicando que las lineas ya estΓ‘n relacionadas
msg_error_not_found: Mensaje de error indicando que no se encuentra el objeto origen
unique: (True/False) Indica si puede haber mΓ‘s de una linea asociada a otras lineas
|
train
|
https://github.com/codenerix/django-codenerix-invoicing/blob/7db5c62f335f9215a8b308603848625208b48698/codenerix_invoicing/models_sales_original.py#L856-L952
| null |
class GenLineProduct(GenLineProductBasic): # META: Abstract class
class Meta(GenLineProductBasic.Meta):
abstract = True
price_recommended = models.DecimalField(_("Recomended price base"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
# valores aplicados
"""
desde el formulario se podrΓ‘ modificar el precio y la descripcion del producto
se guarda el tax usado y la relacion para poder hacer un seguimiento
"""
code = models.CharField(_("Code"), max_length=250, blank=True, null=True, default=None)
description = models.CharField(_("Description"), max_length=256, blank=True, null=True)
discount = models.DecimalField(_("Discount (%)"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
price_base = models.DecimalField(_("Price base"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
tax = models.FloatField(_("Tax (%)"), blank=True, null=True, default=0)
equivalence_surcharge = models.FloatField(_("Equivalence surcharge (%)"), blank=True, null=True, default=0)
tax_label = models.CharField(_("Tax Name"), max_length=250, blank=True, null=True)
def __str__(self):
description = ''
if hasattr(self, 'description'):
description = self.description
elif hasattr(self, 'line_invoice'):
description = self.line_invoice.description
elif hasattr(self, 'line_ticket'):
description = self.line_ticket.description
return u"{} - {}".format(smart_text(description), smart_text(self.quantity))
def __unicode__(self):
return self.__str__()
def __fields__(self, info):
fields = []
fields.append(('code', _("Code")))
fields.append(('description', _("Description")))
fields.append(('quantity', _("Quantity")))
fields.append(('price_base', _("Price base")))
fields.append(('discount', _("Discount (%)")))
fields.append(('discounts', _("Total Discount")))
fields.append(('tax', _("Tax (%)")))
fields.append(('equivalence_surcharge', _("Equivalence surcharge (%)")))
fields.append(('taxes', _("Total Tax")))
fields.append(('total', _("Total")))
return fields
def calculate_total(self):
# compatibility with old version
return self.total
def update_total(self, force_save=True):
# calculate totals
self.gen_update_total(self, force_save)
def save(self, *args, **kwargs):
if self.pk is None:
if hasattr(self, 'product'):
if not self.description:
self.description = self.product
self.price_recommended = self.product.price_base
elif hasattr(self, 'line_order'):
if not self.description:
self.description = self.line_order.product
self.price_recommended = self.line_order.price_base
if hasattr(self, 'tax') and hasattr(self, 'type_tax'):
self.tax = self.type_tax.tax
if hasattr(self, 'product'):
self.tax_label = self.product.product.tax.name
if self.product.code:
self.code = self.product.code
else:
self.code = self.product.product.code
"""
si al guardar una linea asociada a un documento bloqueado (lock==True), duplicar el documento en una nueva versiΓ³n
"""
self.update_total(force_save=False)
if 'force_save' in kwargs:
kwargs.pop('force_save')
return super(GenLineProduct, self).save(*args, **kwargs)
def __save__(self, args, kwargs, **conditional):
if hasattr(self, 'product'):
conditional["product"] = self.product
if hasattr(self, 'line_order'):
conditional["line_order"] = self.line_order
if hasattr(self, 'basket'):
conditional["basket"] = self.basket
return super(GenLineProduct, self).__save__(args, kwargs, **conditional)
@staticmethod
@staticmethod
def create_order_from_budget_all(order):
lines_budget = order.budget.line_basket_sales.all()
for lb in lines_budget:
lo = SalesLineOrder()
lo.order = order
lo.line_budget = lb
lo.product = lb.product
lo.quantity = lb.quantity
lo.notes = lb.notes
lo.price_recommended = lb.price_recommended
lo.description = lb.description
lo.discount = lb.discount
lo.price_base = lb.price_base
lo.tax = lb.tax
lo.save()
order.budget.role = ROLE_BASKET_BUDGET
order.budget.save()
return lines_budget.count() == order.line_order_sales.all().count()
@staticmethod
def create_order_from_budget(pk, list_lines):
MODEL_SOURCE = SalesBasket
MODEL_FINAL = SalesOrder
MODEL_LINE_SOURCE = SalesLineBasket
MODEL_LINE_FINAL = SalesLineOrder
url_reverse = 'CDNX_invoicing_ordersaless_list'
# type_doc
related_line = 'line_budget'
related_object = 'order'
msg_error_relation = _("Hay lineas asignadas a pedidos")
msg_error_not_found = _('Budget not found')
# duplicamos el presupuesto si el numero de lineas es diferente
# relacionando el pedido a este nuevo presupuesto
if list_lines and len(list_lines) != MODEL_LINE_SOURCE.objects.filter(basket=pk).count():
budget = MODEL_SOURCE.objects.get(pk=pk)
# el presupuesto tiene que estar firmado para poder generar el pedido
if not budget.signed:
context = {}
context['error'] = _("Unsigned budget")
return context
else:
new_budget = budget.duplicate(MODEL_LINE_SOURCE, list_lines)
pk = new_budget.pk
list_lines = [x[0] for x in MODEL_LINE_SOURCE.objects.filter(basket=pk).values_list('pk')]
return GenLineProduct.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL,
url_reverse, related_line, related_object,
msg_error_relation, msg_error_not_found, True)
@staticmethod
def create_albaran_automatic(pk, list_lines):
"""
creamos de forma automatica el albaran
"""
line_bd = SalesLineAlbaran.objects.filter(line_order__pk__in=list_lines).values_list('line_order__pk')
if line_bd.count() == 0 or len(list_lines) != len(line_bd[0]):
# solo aquellas lineas de pedidos que no estan ya albarandas
if line_bd.count() != 0:
for x in line_bd[0]:
list_lines.pop(list_lines.index(x))
GenLineProduct.create_albaran_from_order(pk, list_lines)
@staticmethod
def create_albaran_from_order(pk, list_lines):
MODEL_SOURCE = SalesOrder
MODEL_FINAL = SalesAlbaran
MODEL_LINE_SOURCE = SalesLineOrder
MODEL_LINE_FINAL = SalesLineAlbaran
url_reverse = 'CDNX_invoicing_albaransaless_list'
# type_doc
related_line = 'line_order'
related_object = 'albaran'
msg_error_relation = _("Hay lineas asignadas a albaranes")
msg_error_not_found = _('Sales order not found')
return GenLineProduct.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL,
url_reverse, related_line, related_object,
msg_error_relation, msg_error_not_found, False)
@staticmethod
def create_ticket_from_order(pk, list_lines):
MODEL_SOURCE = SalesOrder
MODEL_FINAL = SalesTicket
MODEL_LINE_SOURCE = SalesLineOrder
MODEL_LINE_FINAL = SalesLineTicket
url_reverse = 'CDNX_invoicing_ticketsaless_list'
# type_doc
related_line = 'line_order'
related_object = 'ticket'
msg_error_relation = _("Hay lineas asignadas a ticket")
msg_error_not_found = _('Sales order not found')
with transaction.atomic():
GenLineProduct.create_albaran_automatic(pk, list_lines)
return GenLineProduct.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL,
url_reverse, related_line, related_object,
msg_error_relation, msg_error_not_found, False)
@staticmethod
def create_ticket_from_slot(slot_pk):
context = {
"error": None,
"obj_final": None,
}
# order line not paid
line_orders = SalesLineOrder.objects.filter(
order__budget__pos_slot__pk=slot_pk,
order__payment__isnull=True,
order__cash_movements__isnull=True,
order__budget__removed=False,
order__removed=False,
removed=False
)
if line_orders:
# create o update ticket
tickets = []
for line in line_orders:
if line.line_ticket_sales.filter(removed=False, ticket__removed=False).count() != 0:
tickets += list(line.line_ticket_sales.filter(removed=False, ticket__removed=False).values_list('ticket')[0])
if len(set(tickets)) != len(tickets) and len(set(tickets)) > 1:
context['error'] = _(u'There are orders that are in several different tickets')
else:
if tickets:
with transaction.atomic():
ticket = SalesTicket.objects.get(pk=tickets[0], removed=False)
# There are already orders associated with a ticket
for line in line_orders:
if line.line_ticket_sales.count() == 0:
# new line
lt = SalesLineTicket()
lt.ticket = ticket
lt.line_order = line
lt.tax = line.tax
lt.discount = line.discount
lt.description = line.description
lt.notes = line.notes
lt.quantity = line.quantity
lt.price_recommended = line.price_recommended
lt.price_base = line.price_base
lt.save()
else:
# update line
lt = line.line_ticket_sales.first()
if lt.quantity != line.quantity:
lt.quantity = line.quantity
lt.save()
else:
# new ticket
with transaction.atomic():
ticket = SalesTicket()
ticket.billing_series = BillingSeries.objects.filter(default=True).first()
ticket.customer = line_orders[0].order.customer
ticket.save()
for line in line_orders:
lt = SalesLineTicket()
lt.ticket = ticket
lt.line_order = line
lt.tax = line.tax
lt.discount = line.discount
lt.description = line.description
lt.notes = line.notes
lt.quantity = line.quantity
lt.price_recommended = line.price_recommended
lt.price_base = line.price_base
lt.save()
context['obj_final'] = ticket
else:
# get ticket
line_order = SalesLineOrder.objects.filter(
order__budget__pos_slot__pk=slot_pk,
order__budget__removed=False,
order__removed=False,
removed=False,
).last()
ticket = SalesTicket.objects.filter(
customer=line_order.order.customer,
line_ticket_sales__line_order=line_order,
line_ticket_sales__line_order__removed=False,
line_ticket_sales__removed=False,
removed=False
).first()
if ticket:
context['obj_final'] = ticket
else:
context['error'] = _("Ticket don't found")
return context
@staticmethod
def create_invoice_from_order(pk, list_lines):
MODEL_SOURCE = SalesOrder
MODEL_FINAL = SalesInvoice
MODEL_LINE_SOURCE = SalesLineOrder
MODEL_LINE_FINAL = SalesLineInvoice
url_reverse = 'CDNX_invoicing_invoicesaless_list'
# type_doc
related_line = 'line_order'
related_object = 'invoice'
msg_error_relation = _("Hay lineas asignadas a facturas")
msg_error_not_found = _('Sales order not found')
with transaction.atomic():
GenLineProduct.create_albaran_automatic(pk, list_lines)
return GenLineProduct.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL,
url_reverse, related_line, related_object,
msg_error_relation, msg_error_not_found, False)
@staticmethod
def create_ticket_from_albaran(pk, list_lines):
"""
la pk y list_lines son de albaranes, necesitamos la info de las lineas de pedidos
"""
context = {}
if list_lines:
new_list_lines = [x[0] for x in SalesLineAlbaran.objects.values_list('line_order__pk').filter(
pk__in=[int(x) for x in list_lines]
).exclude(invoiced=True)]
if new_list_lines:
lo = SalesLineOrder.objects.values_list('order__pk').filter(pk__in=new_list_lines)[:1]
if lo and lo[0] and lo[0][0]:
new_pk = lo[0][0]
context = GenLineProduct.create_ticket_from_order(new_pk, new_list_lines)
if 'error' not in context or not context['error']:
SalesLineAlbaran.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(invoiced=True).update(invoiced=True)
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
@staticmethod
def create_invoice_from_albaran(pk, list_lines):
"""
la pk y list_lines son de albaranes, necesitamos la info de las lineas de pedidos
"""
context = {}
if list_lines:
new_list_lines = [x[0] for x in SalesLineAlbaran.objects.values_list('line_order__pk').filter(
pk__in=[int(x) for x in list_lines]
).exclude(invoiced=True)]
if new_list_lines:
lo = SalesLineOrder.objects.values_list('order__pk').filter(pk__in=new_list_lines)[:1]
if lo and lo[0] and lo[0][0]:
new_pk = lo[0][0]
context = GenLineProduct.create_invoice_from_order(new_pk, new_list_lines)
if 'error' not in context or not context['error']:
SalesLineAlbaran.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(invoiced=True).update(invoiced=True)
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
@staticmethod
def create_invoice_from_ticket(pk, list_lines):
"""
la pk y list_lines son de ticket, necesitamos la info de las lineas de pedidos
"""
context = {}
if list_lines:
new_list_lines = [x[0] for x in SalesLineTicket.objects.values_list('line_order__pk').filter(pk__in=[int(x) for x in list_lines])]
if new_list_lines:
lo = SalesLineOrder.objects.values_list('order__pk').filter(pk__in=new_list_lines)[:1]
if lo and lo[0] and lo[0][0]:
new_pk = lo[0][0]
return GenLineProduct.create_invoice_from_order(new_pk, new_list_lines)
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
|
codenerix/django-codenerix-invoicing
|
codenerix_invoicing/models_sales_original.py
|
GenLineProduct.create_albaran_automatic
|
python
|
def create_albaran_automatic(pk, list_lines):
line_bd = SalesLineAlbaran.objects.filter(line_order__pk__in=list_lines).values_list('line_order__pk')
if line_bd.count() == 0 or len(list_lines) != len(line_bd[0]):
# solo aquellas lineas de pedidos que no estan ya albarandas
if line_bd.count() != 0:
for x in line_bd[0]:
list_lines.pop(list_lines.index(x))
GenLineProduct.create_albaran_from_order(pk, list_lines)
|
creamos de forma automatica el albaran
|
train
|
https://github.com/codenerix/django-codenerix-invoicing/blob/7db5c62f335f9215a8b308603848625208b48698/codenerix_invoicing/models_sales_original.py#L1009-L1020
| null |
class GenLineProduct(GenLineProductBasic): # META: Abstract class
class Meta(GenLineProductBasic.Meta):
abstract = True
price_recommended = models.DecimalField(_("Recomended price base"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
# valores aplicados
"""
desde el formulario se podrΓ‘ modificar el precio y la descripcion del producto
se guarda el tax usado y la relacion para poder hacer un seguimiento
"""
code = models.CharField(_("Code"), max_length=250, blank=True, null=True, default=None)
description = models.CharField(_("Description"), max_length=256, blank=True, null=True)
discount = models.DecimalField(_("Discount (%)"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
price_base = models.DecimalField(_("Price base"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
tax = models.FloatField(_("Tax (%)"), blank=True, null=True, default=0)
equivalence_surcharge = models.FloatField(_("Equivalence surcharge (%)"), blank=True, null=True, default=0)
tax_label = models.CharField(_("Tax Name"), max_length=250, blank=True, null=True)
def __str__(self):
description = ''
if hasattr(self, 'description'):
description = self.description
elif hasattr(self, 'line_invoice'):
description = self.line_invoice.description
elif hasattr(self, 'line_ticket'):
description = self.line_ticket.description
return u"{} - {}".format(smart_text(description), smart_text(self.quantity))
def __unicode__(self):
return self.__str__()
def __fields__(self, info):
fields = []
fields.append(('code', _("Code")))
fields.append(('description', _("Description")))
fields.append(('quantity', _("Quantity")))
fields.append(('price_base', _("Price base")))
fields.append(('discount', _("Discount (%)")))
fields.append(('discounts', _("Total Discount")))
fields.append(('tax', _("Tax (%)")))
fields.append(('equivalence_surcharge', _("Equivalence surcharge (%)")))
fields.append(('taxes', _("Total Tax")))
fields.append(('total', _("Total")))
return fields
def calculate_total(self):
# compatibility with old version
return self.total
def update_total(self, force_save=True):
# calculate totals
self.gen_update_total(self, force_save)
def save(self, *args, **kwargs):
if self.pk is None:
if hasattr(self, 'product'):
if not self.description:
self.description = self.product
self.price_recommended = self.product.price_base
elif hasattr(self, 'line_order'):
if not self.description:
self.description = self.line_order.product
self.price_recommended = self.line_order.price_base
if hasattr(self, 'tax') and hasattr(self, 'type_tax'):
self.tax = self.type_tax.tax
if hasattr(self, 'product'):
self.tax_label = self.product.product.tax.name
if self.product.code:
self.code = self.product.code
else:
self.code = self.product.product.code
"""
si al guardar una linea asociada a un documento bloqueado (lock==True), duplicar el documento en una nueva versiΓ³n
"""
self.update_total(force_save=False)
if 'force_save' in kwargs:
kwargs.pop('force_save')
return super(GenLineProduct, self).save(*args, **kwargs)
def __save__(self, args, kwargs, **conditional):
if hasattr(self, 'product'):
conditional["product"] = self.product
if hasattr(self, 'line_order'):
conditional["line_order"] = self.line_order
if hasattr(self, 'basket'):
conditional["basket"] = self.basket
return super(GenLineProduct, self).__save__(args, kwargs, **conditional)
@staticmethod
def create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL,
url_reverse, related_line, related_object,
msg_error_relation, msg_error_not_found, unique):
"""
pk: pk del documento origen
list_lines: listado de pk de lineas de origen
MODEL_SOURCE: modelo del documento origen
MODEL_FINAL: model del documento final
MODEL_LINE_SOURCE: modelo de la linea origen
MODEL_LINE_FINAL: modelo de la linea final
url_reverse: url del destino
related_line: campo del modelo linea final en el que irΓ‘ asignada la linea origen
related_object: campo del modelo linea final en el que irΓ‘ asignado el objeto final
msg_error_relation: Mensaje de error indicando que las lineas ya estΓ‘n relacionadas
msg_error_not_found: Mensaje de error indicando que no se encuentra el objeto origen
unique: (True/False) Indica si puede haber mΓ‘s de una linea asociada a otras lineas
"""
context = {}
obj_src = MODEL_SOURCE.objects.filter(pk=pk).first()
if list_lines and obj_src:
# parse to int
list_lines = [int(x) for x in list_lines]
# list of lines objects
if unique:
create = not MODEL_LINE_FINAL.objects.filter(**{"{}__pk__in".format(related_line): list_lines}).exists()
else:
create = True
"""
si debiendo ser filas unicas no las encuentra en el modelo final, se crea el nuevo documento
"""
if create:
with transaction.atomic():
obj_final = MODEL_FINAL()
obj_final.customer = obj_src.customer
obj_final.date = datetime.datetime.now()
obj_final.billing_series = obj_src.billing_series
if isinstance(obj_final, SalesOrder):
obj_final.budget = obj_src
obj_final.save()
for lb_pk in list_lines:
line_src = MODEL_LINE_SOURCE.objects.filter(pk=lb_pk).first()
if line_src:
line_final = MODEL_LINE_FINAL(**{"{}_id".format(related_object): obj_final.pk, related_line: line_src})
# line_final.order = obj_final
# line_final.line_budget = line_src
src_list_fields = [f.name for f in line_src._meta.get_fields()]
dst_list_fields = [f.name for f in line_final._meta.get_fields()]
if 'product' in src_list_fields and 'product' in dst_list_fields:
line_final.product = line_src.product
if 'description' in src_list_fields and 'description' in dst_list_fields:
line_final.description = line_src.description
if 'code' in src_list_fields and 'code' in dst_list_fields:
line_final.code = line_src.code
# if hasattr(line_src, 'line_order') and hasattr(line_final, 'line_order'):
if 'line_order' in src_list_fields and 'line_order' in dst_list_fields:
line_final.line_order = line_src.line_order
line_final.quantity = line_src.quantity
line_final.price_base = line_src.price_base
# if hasattr(line_src, 'price_recommended') and hasattr(line_final, 'price_recommended'):
if 'price_recommended' in src_list_fields and 'price_recommended' in dst_list_fields:
line_final.price_recommended = line_src.price_recommended
line_final.tax = line_src.tax
# line_final.type_tax = line_src.type_tax
line_final.discount = line_src.discount
if 'removed' in src_list_fields and 'removed' in dst_list_fields:
line_final.removed = line_src.removed
line_final.save()
if hasattr(line_src, 'line_basket_option_sales') and line_src.line_basket_option_sales.exists():
for opt_src in line_src.line_basket_option_sales.all():
opt_dst = SalesLineOrderOption()
opt_dst.line_order = line_final
opt_dst.product_option = opt_src.product_option
opt_dst.product_final = opt_src.product_final
opt_dst.quantity = opt_src.quantity
opt_dst.save()
# bloqueamos el documento origen
obj_src.lock = True
obj_src.save()
# context['url'] = reverse('ordersaless_details', kwargs={'pk': order.pk})
context['url'] = "{}#/{}".format(reverse(url_reverse), obj_final.pk)
context['obj_final'] = obj_final
else:
# _("Hay lineas asignadas a pedidos")
context['error'] = msg_error_relation
else:
# _('Budget not found')
context['error'] = msg_error_not_found
return context
@staticmethod
def create_order_from_budget_all(order):
lines_budget = order.budget.line_basket_sales.all()
for lb in lines_budget:
lo = SalesLineOrder()
lo.order = order
lo.line_budget = lb
lo.product = lb.product
lo.quantity = lb.quantity
lo.notes = lb.notes
lo.price_recommended = lb.price_recommended
lo.description = lb.description
lo.discount = lb.discount
lo.price_base = lb.price_base
lo.tax = lb.tax
lo.save()
order.budget.role = ROLE_BASKET_BUDGET
order.budget.save()
return lines_budget.count() == order.line_order_sales.all().count()
@staticmethod
def create_order_from_budget(pk, list_lines):
MODEL_SOURCE = SalesBasket
MODEL_FINAL = SalesOrder
MODEL_LINE_SOURCE = SalesLineBasket
MODEL_LINE_FINAL = SalesLineOrder
url_reverse = 'CDNX_invoicing_ordersaless_list'
# type_doc
related_line = 'line_budget'
related_object = 'order'
msg_error_relation = _("Hay lineas asignadas a pedidos")
msg_error_not_found = _('Budget not found')
# duplicamos el presupuesto si el numero de lineas es diferente
# relacionando el pedido a este nuevo presupuesto
if list_lines and len(list_lines) != MODEL_LINE_SOURCE.objects.filter(basket=pk).count():
budget = MODEL_SOURCE.objects.get(pk=pk)
# el presupuesto tiene que estar firmado para poder generar el pedido
if not budget.signed:
context = {}
context['error'] = _("Unsigned budget")
return context
else:
new_budget = budget.duplicate(MODEL_LINE_SOURCE, list_lines)
pk = new_budget.pk
list_lines = [x[0] for x in MODEL_LINE_SOURCE.objects.filter(basket=pk).values_list('pk')]
return GenLineProduct.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL,
url_reverse, related_line, related_object,
msg_error_relation, msg_error_not_found, True)
@staticmethod
@staticmethod
def create_albaran_from_order(pk, list_lines):
MODEL_SOURCE = SalesOrder
MODEL_FINAL = SalesAlbaran
MODEL_LINE_SOURCE = SalesLineOrder
MODEL_LINE_FINAL = SalesLineAlbaran
url_reverse = 'CDNX_invoicing_albaransaless_list'
# type_doc
related_line = 'line_order'
related_object = 'albaran'
msg_error_relation = _("Hay lineas asignadas a albaranes")
msg_error_not_found = _('Sales order not found')
return GenLineProduct.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL,
url_reverse, related_line, related_object,
msg_error_relation, msg_error_not_found, False)
@staticmethod
def create_ticket_from_order(pk, list_lines):
MODEL_SOURCE = SalesOrder
MODEL_FINAL = SalesTicket
MODEL_LINE_SOURCE = SalesLineOrder
MODEL_LINE_FINAL = SalesLineTicket
url_reverse = 'CDNX_invoicing_ticketsaless_list'
# type_doc
related_line = 'line_order'
related_object = 'ticket'
msg_error_relation = _("Hay lineas asignadas a ticket")
msg_error_not_found = _('Sales order not found')
with transaction.atomic():
GenLineProduct.create_albaran_automatic(pk, list_lines)
return GenLineProduct.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL,
url_reverse, related_line, related_object,
msg_error_relation, msg_error_not_found, False)
@staticmethod
def create_ticket_from_slot(slot_pk):
context = {
"error": None,
"obj_final": None,
}
# order line not paid
line_orders = SalesLineOrder.objects.filter(
order__budget__pos_slot__pk=slot_pk,
order__payment__isnull=True,
order__cash_movements__isnull=True,
order__budget__removed=False,
order__removed=False,
removed=False
)
if line_orders:
# create o update ticket
tickets = []
for line in line_orders:
if line.line_ticket_sales.filter(removed=False, ticket__removed=False).count() != 0:
tickets += list(line.line_ticket_sales.filter(removed=False, ticket__removed=False).values_list('ticket')[0])
if len(set(tickets)) != len(tickets) and len(set(tickets)) > 1:
context['error'] = _(u'There are orders that are in several different tickets')
else:
if tickets:
with transaction.atomic():
ticket = SalesTicket.objects.get(pk=tickets[0], removed=False)
# There are already orders associated with a ticket
for line in line_orders:
if line.line_ticket_sales.count() == 0:
# new line
lt = SalesLineTicket()
lt.ticket = ticket
lt.line_order = line
lt.tax = line.tax
lt.discount = line.discount
lt.description = line.description
lt.notes = line.notes
lt.quantity = line.quantity
lt.price_recommended = line.price_recommended
lt.price_base = line.price_base
lt.save()
else:
# update line
lt = line.line_ticket_sales.first()
if lt.quantity != line.quantity:
lt.quantity = line.quantity
lt.save()
else:
# new ticket
with transaction.atomic():
ticket = SalesTicket()
ticket.billing_series = BillingSeries.objects.filter(default=True).first()
ticket.customer = line_orders[0].order.customer
ticket.save()
for line in line_orders:
lt = SalesLineTicket()
lt.ticket = ticket
lt.line_order = line
lt.tax = line.tax
lt.discount = line.discount
lt.description = line.description
lt.notes = line.notes
lt.quantity = line.quantity
lt.price_recommended = line.price_recommended
lt.price_base = line.price_base
lt.save()
context['obj_final'] = ticket
else:
# get ticket
line_order = SalesLineOrder.objects.filter(
order__budget__pos_slot__pk=slot_pk,
order__budget__removed=False,
order__removed=False,
removed=False,
).last()
ticket = SalesTicket.objects.filter(
customer=line_order.order.customer,
line_ticket_sales__line_order=line_order,
line_ticket_sales__line_order__removed=False,
line_ticket_sales__removed=False,
removed=False
).first()
if ticket:
context['obj_final'] = ticket
else:
context['error'] = _("Ticket don't found")
return context
@staticmethod
def create_invoice_from_order(pk, list_lines):
MODEL_SOURCE = SalesOrder
MODEL_FINAL = SalesInvoice
MODEL_LINE_SOURCE = SalesLineOrder
MODEL_LINE_FINAL = SalesLineInvoice
url_reverse = 'CDNX_invoicing_invoicesaless_list'
# type_doc
related_line = 'line_order'
related_object = 'invoice'
msg_error_relation = _("Hay lineas asignadas a facturas")
msg_error_not_found = _('Sales order not found')
with transaction.atomic():
GenLineProduct.create_albaran_automatic(pk, list_lines)
return GenLineProduct.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL,
url_reverse, related_line, related_object,
msg_error_relation, msg_error_not_found, False)
@staticmethod
def create_ticket_from_albaran(pk, list_lines):
"""
la pk y list_lines son de albaranes, necesitamos la info de las lineas de pedidos
"""
context = {}
if list_lines:
new_list_lines = [x[0] for x in SalesLineAlbaran.objects.values_list('line_order__pk').filter(
pk__in=[int(x) for x in list_lines]
).exclude(invoiced=True)]
if new_list_lines:
lo = SalesLineOrder.objects.values_list('order__pk').filter(pk__in=new_list_lines)[:1]
if lo and lo[0] and lo[0][0]:
new_pk = lo[0][0]
context = GenLineProduct.create_ticket_from_order(new_pk, new_list_lines)
if 'error' not in context or not context['error']:
SalesLineAlbaran.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(invoiced=True).update(invoiced=True)
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
@staticmethod
def create_invoice_from_albaran(pk, list_lines):
"""
la pk y list_lines son de albaranes, necesitamos la info de las lineas de pedidos
"""
context = {}
if list_lines:
new_list_lines = [x[0] for x in SalesLineAlbaran.objects.values_list('line_order__pk').filter(
pk__in=[int(x) for x in list_lines]
).exclude(invoiced=True)]
if new_list_lines:
lo = SalesLineOrder.objects.values_list('order__pk').filter(pk__in=new_list_lines)[:1]
if lo and lo[0] and lo[0][0]:
new_pk = lo[0][0]
context = GenLineProduct.create_invoice_from_order(new_pk, new_list_lines)
if 'error' not in context or not context['error']:
SalesLineAlbaran.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(invoiced=True).update(invoiced=True)
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
@staticmethod
def create_invoice_from_ticket(pk, list_lines):
"""
la pk y list_lines son de ticket, necesitamos la info de las lineas de pedidos
"""
context = {}
if list_lines:
new_list_lines = [x[0] for x in SalesLineTicket.objects.values_list('line_order__pk').filter(pk__in=[int(x) for x in list_lines])]
if new_list_lines:
lo = SalesLineOrder.objects.values_list('order__pk').filter(pk__in=new_list_lines)[:1]
if lo and lo[0] and lo[0][0]:
new_pk = lo[0][0]
return GenLineProduct.create_invoice_from_order(new_pk, new_list_lines)
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
|
codenerix/django-codenerix-invoicing
|
codenerix_invoicing/models_sales_original.py
|
GenLineProduct.create_invoice_from_albaran
|
python
|
def create_invoice_from_albaran(pk, list_lines):
context = {}
if list_lines:
new_list_lines = [x[0] for x in SalesLineAlbaran.objects.values_list('line_order__pk').filter(
pk__in=[int(x) for x in list_lines]
).exclude(invoiced=True)]
if new_list_lines:
lo = SalesLineOrder.objects.values_list('order__pk').filter(pk__in=new_list_lines)[:1]
if lo and lo[0] and lo[0][0]:
new_pk = lo[0][0]
context = GenLineProduct.create_invoice_from_order(new_pk, new_list_lines)
if 'error' not in context or not context['error']:
SalesLineAlbaran.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(invoiced=True).update(invoiced=True)
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
|
la pk y list_lines son de albaranes, necesitamos la info de las lineas de pedidos
|
train
|
https://github.com/codenerix/django-codenerix-invoicing/blob/7db5c62f335f9215a8b308603848625208b48698/codenerix_invoicing/models_sales_original.py#L1201-L1227
| null |
class GenLineProduct(GenLineProductBasic): # META: Abstract class
class Meta(GenLineProductBasic.Meta):
abstract = True
price_recommended = models.DecimalField(_("Recomended price base"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
# valores aplicados
"""
desde el formulario se podrΓ‘ modificar el precio y la descripcion del producto
se guarda el tax usado y la relacion para poder hacer un seguimiento
"""
code = models.CharField(_("Code"), max_length=250, blank=True, null=True, default=None)
description = models.CharField(_("Description"), max_length=256, blank=True, null=True)
discount = models.DecimalField(_("Discount (%)"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
price_base = models.DecimalField(_("Price base"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
tax = models.FloatField(_("Tax (%)"), blank=True, null=True, default=0)
equivalence_surcharge = models.FloatField(_("Equivalence surcharge (%)"), blank=True, null=True, default=0)
tax_label = models.CharField(_("Tax Name"), max_length=250, blank=True, null=True)
def __str__(self):
description = ''
if hasattr(self, 'description'):
description = self.description
elif hasattr(self, 'line_invoice'):
description = self.line_invoice.description
elif hasattr(self, 'line_ticket'):
description = self.line_ticket.description
return u"{} - {}".format(smart_text(description), smart_text(self.quantity))
def __unicode__(self):
return self.__str__()
def __fields__(self, info):
fields = []
fields.append(('code', _("Code")))
fields.append(('description', _("Description")))
fields.append(('quantity', _("Quantity")))
fields.append(('price_base', _("Price base")))
fields.append(('discount', _("Discount (%)")))
fields.append(('discounts', _("Total Discount")))
fields.append(('tax', _("Tax (%)")))
fields.append(('equivalence_surcharge', _("Equivalence surcharge (%)")))
fields.append(('taxes', _("Total Tax")))
fields.append(('total', _("Total")))
return fields
def calculate_total(self):
# compatibility with old version
return self.total
def update_total(self, force_save=True):
# calculate totals
self.gen_update_total(self, force_save)
def save(self, *args, **kwargs):
if self.pk is None:
if hasattr(self, 'product'):
if not self.description:
self.description = self.product
self.price_recommended = self.product.price_base
elif hasattr(self, 'line_order'):
if not self.description:
self.description = self.line_order.product
self.price_recommended = self.line_order.price_base
if hasattr(self, 'tax') and hasattr(self, 'type_tax'):
self.tax = self.type_tax.tax
if hasattr(self, 'product'):
self.tax_label = self.product.product.tax.name
if self.product.code:
self.code = self.product.code
else:
self.code = self.product.product.code
"""
si al guardar una linea asociada a un documento bloqueado (lock==True), duplicar el documento en una nueva versiΓ³n
"""
self.update_total(force_save=False)
if 'force_save' in kwargs:
kwargs.pop('force_save')
return super(GenLineProduct, self).save(*args, **kwargs)
def __save__(self, args, kwargs, **conditional):
if hasattr(self, 'product'):
conditional["product"] = self.product
if hasattr(self, 'line_order'):
conditional["line_order"] = self.line_order
if hasattr(self, 'basket'):
conditional["basket"] = self.basket
return super(GenLineProduct, self).__save__(args, kwargs, **conditional)
@staticmethod
def create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL,
url_reverse, related_line, related_object,
msg_error_relation, msg_error_not_found, unique):
"""
pk: pk del documento origen
list_lines: listado de pk de lineas de origen
MODEL_SOURCE: modelo del documento origen
MODEL_FINAL: model del documento final
MODEL_LINE_SOURCE: modelo de la linea origen
MODEL_LINE_FINAL: modelo de la linea final
url_reverse: url del destino
related_line: campo del modelo linea final en el que irΓ‘ asignada la linea origen
related_object: campo del modelo linea final en el que irΓ‘ asignado el objeto final
msg_error_relation: Mensaje de error indicando que las lineas ya estΓ‘n relacionadas
msg_error_not_found: Mensaje de error indicando que no se encuentra el objeto origen
unique: (True/False) Indica si puede haber mΓ‘s de una linea asociada a otras lineas
"""
context = {}
obj_src = MODEL_SOURCE.objects.filter(pk=pk).first()
if list_lines and obj_src:
# parse to int
list_lines = [int(x) for x in list_lines]
# list of lines objects
if unique:
create = not MODEL_LINE_FINAL.objects.filter(**{"{}__pk__in".format(related_line): list_lines}).exists()
else:
create = True
"""
si debiendo ser filas unicas no las encuentra en el modelo final, se crea el nuevo documento
"""
if create:
with transaction.atomic():
obj_final = MODEL_FINAL()
obj_final.customer = obj_src.customer
obj_final.date = datetime.datetime.now()
obj_final.billing_series = obj_src.billing_series
if isinstance(obj_final, SalesOrder):
obj_final.budget = obj_src
obj_final.save()
for lb_pk in list_lines:
line_src = MODEL_LINE_SOURCE.objects.filter(pk=lb_pk).first()
if line_src:
line_final = MODEL_LINE_FINAL(**{"{}_id".format(related_object): obj_final.pk, related_line: line_src})
# line_final.order = obj_final
# line_final.line_budget = line_src
src_list_fields = [f.name for f in line_src._meta.get_fields()]
dst_list_fields = [f.name for f in line_final._meta.get_fields()]
if 'product' in src_list_fields and 'product' in dst_list_fields:
line_final.product = line_src.product
if 'description' in src_list_fields and 'description' in dst_list_fields:
line_final.description = line_src.description
if 'code' in src_list_fields and 'code' in dst_list_fields:
line_final.code = line_src.code
# if hasattr(line_src, 'line_order') and hasattr(line_final, 'line_order'):
if 'line_order' in src_list_fields and 'line_order' in dst_list_fields:
line_final.line_order = line_src.line_order
line_final.quantity = line_src.quantity
line_final.price_base = line_src.price_base
# if hasattr(line_src, 'price_recommended') and hasattr(line_final, 'price_recommended'):
if 'price_recommended' in src_list_fields and 'price_recommended' in dst_list_fields:
line_final.price_recommended = line_src.price_recommended
line_final.tax = line_src.tax
# line_final.type_tax = line_src.type_tax
line_final.discount = line_src.discount
if 'removed' in src_list_fields and 'removed' in dst_list_fields:
line_final.removed = line_src.removed
line_final.save()
if hasattr(line_src, 'line_basket_option_sales') and line_src.line_basket_option_sales.exists():
for opt_src in line_src.line_basket_option_sales.all():
opt_dst = SalesLineOrderOption()
opt_dst.line_order = line_final
opt_dst.product_option = opt_src.product_option
opt_dst.product_final = opt_src.product_final
opt_dst.quantity = opt_src.quantity
opt_dst.save()
# bloqueamos el documento origen
obj_src.lock = True
obj_src.save()
# context['url'] = reverse('ordersaless_details', kwargs={'pk': order.pk})
context['url'] = "{}#/{}".format(reverse(url_reverse), obj_final.pk)
context['obj_final'] = obj_final
else:
# _("Hay lineas asignadas a pedidos")
context['error'] = msg_error_relation
else:
# _('Budget not found')
context['error'] = msg_error_not_found
return context
@staticmethod
def create_order_from_budget_all(order):
lines_budget = order.budget.line_basket_sales.all()
for lb in lines_budget:
lo = SalesLineOrder()
lo.order = order
lo.line_budget = lb
lo.product = lb.product
lo.quantity = lb.quantity
lo.notes = lb.notes
lo.price_recommended = lb.price_recommended
lo.description = lb.description
lo.discount = lb.discount
lo.price_base = lb.price_base
lo.tax = lb.tax
lo.save()
order.budget.role = ROLE_BASKET_BUDGET
order.budget.save()
return lines_budget.count() == order.line_order_sales.all().count()
@staticmethod
def create_order_from_budget(pk, list_lines):
MODEL_SOURCE = SalesBasket
MODEL_FINAL = SalesOrder
MODEL_LINE_SOURCE = SalesLineBasket
MODEL_LINE_FINAL = SalesLineOrder
url_reverse = 'CDNX_invoicing_ordersaless_list'
# type_doc
related_line = 'line_budget'
related_object = 'order'
msg_error_relation = _("Hay lineas asignadas a pedidos")
msg_error_not_found = _('Budget not found')
# duplicamos el presupuesto si el numero de lineas es diferente
# relacionando el pedido a este nuevo presupuesto
if list_lines and len(list_lines) != MODEL_LINE_SOURCE.objects.filter(basket=pk).count():
budget = MODEL_SOURCE.objects.get(pk=pk)
# el presupuesto tiene que estar firmado para poder generar el pedido
if not budget.signed:
context = {}
context['error'] = _("Unsigned budget")
return context
else:
new_budget = budget.duplicate(MODEL_LINE_SOURCE, list_lines)
pk = new_budget.pk
list_lines = [x[0] for x in MODEL_LINE_SOURCE.objects.filter(basket=pk).values_list('pk')]
return GenLineProduct.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL,
url_reverse, related_line, related_object,
msg_error_relation, msg_error_not_found, True)
@staticmethod
def create_albaran_automatic(pk, list_lines):
"""
creamos de forma automatica el albaran
"""
line_bd = SalesLineAlbaran.objects.filter(line_order__pk__in=list_lines).values_list('line_order__pk')
if line_bd.count() == 0 or len(list_lines) != len(line_bd[0]):
# solo aquellas lineas de pedidos que no estan ya albarandas
if line_bd.count() != 0:
for x in line_bd[0]:
list_lines.pop(list_lines.index(x))
GenLineProduct.create_albaran_from_order(pk, list_lines)
@staticmethod
def create_albaran_from_order(pk, list_lines):
MODEL_SOURCE = SalesOrder
MODEL_FINAL = SalesAlbaran
MODEL_LINE_SOURCE = SalesLineOrder
MODEL_LINE_FINAL = SalesLineAlbaran
url_reverse = 'CDNX_invoicing_albaransaless_list'
# type_doc
related_line = 'line_order'
related_object = 'albaran'
msg_error_relation = _("Hay lineas asignadas a albaranes")
msg_error_not_found = _('Sales order not found')
return GenLineProduct.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL,
url_reverse, related_line, related_object,
msg_error_relation, msg_error_not_found, False)
@staticmethod
def create_ticket_from_order(pk, list_lines):
MODEL_SOURCE = SalesOrder
MODEL_FINAL = SalesTicket
MODEL_LINE_SOURCE = SalesLineOrder
MODEL_LINE_FINAL = SalesLineTicket
url_reverse = 'CDNX_invoicing_ticketsaless_list'
# type_doc
related_line = 'line_order'
related_object = 'ticket'
msg_error_relation = _("Hay lineas asignadas a ticket")
msg_error_not_found = _('Sales order not found')
with transaction.atomic():
GenLineProduct.create_albaran_automatic(pk, list_lines)
return GenLineProduct.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL,
url_reverse, related_line, related_object,
msg_error_relation, msg_error_not_found, False)
@staticmethod
def create_ticket_from_slot(slot_pk):
context = {
"error": None,
"obj_final": None,
}
# order line not paid
line_orders = SalesLineOrder.objects.filter(
order__budget__pos_slot__pk=slot_pk,
order__payment__isnull=True,
order__cash_movements__isnull=True,
order__budget__removed=False,
order__removed=False,
removed=False
)
if line_orders:
# create o update ticket
tickets = []
for line in line_orders:
if line.line_ticket_sales.filter(removed=False, ticket__removed=False).count() != 0:
tickets += list(line.line_ticket_sales.filter(removed=False, ticket__removed=False).values_list('ticket')[0])
if len(set(tickets)) != len(tickets) and len(set(tickets)) > 1:
context['error'] = _(u'There are orders that are in several different tickets')
else:
if tickets:
with transaction.atomic():
ticket = SalesTicket.objects.get(pk=tickets[0], removed=False)
# There are already orders associated with a ticket
for line in line_orders:
if line.line_ticket_sales.count() == 0:
# new line
lt = SalesLineTicket()
lt.ticket = ticket
lt.line_order = line
lt.tax = line.tax
lt.discount = line.discount
lt.description = line.description
lt.notes = line.notes
lt.quantity = line.quantity
lt.price_recommended = line.price_recommended
lt.price_base = line.price_base
lt.save()
else:
# update line
lt = line.line_ticket_sales.first()
if lt.quantity != line.quantity:
lt.quantity = line.quantity
lt.save()
else:
# new ticket
with transaction.atomic():
ticket = SalesTicket()
ticket.billing_series = BillingSeries.objects.filter(default=True).first()
ticket.customer = line_orders[0].order.customer
ticket.save()
for line in line_orders:
lt = SalesLineTicket()
lt.ticket = ticket
lt.line_order = line
lt.tax = line.tax
lt.discount = line.discount
lt.description = line.description
lt.notes = line.notes
lt.quantity = line.quantity
lt.price_recommended = line.price_recommended
lt.price_base = line.price_base
lt.save()
context['obj_final'] = ticket
else:
# get ticket
line_order = SalesLineOrder.objects.filter(
order__budget__pos_slot__pk=slot_pk,
order__budget__removed=False,
order__removed=False,
removed=False,
).last()
ticket = SalesTicket.objects.filter(
customer=line_order.order.customer,
line_ticket_sales__line_order=line_order,
line_ticket_sales__line_order__removed=False,
line_ticket_sales__removed=False,
removed=False
).first()
if ticket:
context['obj_final'] = ticket
else:
context['error'] = _("Ticket don't found")
return context
@staticmethod
def create_invoice_from_order(pk, list_lines):
MODEL_SOURCE = SalesOrder
MODEL_FINAL = SalesInvoice
MODEL_LINE_SOURCE = SalesLineOrder
MODEL_LINE_FINAL = SalesLineInvoice
url_reverse = 'CDNX_invoicing_invoicesaless_list'
# type_doc
related_line = 'line_order'
related_object = 'invoice'
msg_error_relation = _("Hay lineas asignadas a facturas")
msg_error_not_found = _('Sales order not found')
with transaction.atomic():
GenLineProduct.create_albaran_automatic(pk, list_lines)
return GenLineProduct.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL,
url_reverse, related_line, related_object,
msg_error_relation, msg_error_not_found, False)
@staticmethod
def create_ticket_from_albaran(pk, list_lines):
"""
la pk y list_lines son de albaranes, necesitamos la info de las lineas de pedidos
"""
context = {}
if list_lines:
new_list_lines = [x[0] for x in SalesLineAlbaran.objects.values_list('line_order__pk').filter(
pk__in=[int(x) for x in list_lines]
).exclude(invoiced=True)]
if new_list_lines:
lo = SalesLineOrder.objects.values_list('order__pk').filter(pk__in=new_list_lines)[:1]
if lo and lo[0] and lo[0][0]:
new_pk = lo[0][0]
context = GenLineProduct.create_ticket_from_order(new_pk, new_list_lines)
if 'error' not in context or not context['error']:
SalesLineAlbaran.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(invoiced=True).update(invoiced=True)
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
@staticmethod
@staticmethod
def create_invoice_from_ticket(pk, list_lines):
"""
la pk y list_lines son de ticket, necesitamos la info de las lineas de pedidos
"""
context = {}
if list_lines:
new_list_lines = [x[0] for x in SalesLineTicket.objects.values_list('line_order__pk').filter(pk__in=[int(x) for x in list_lines])]
if new_list_lines:
lo = SalesLineOrder.objects.values_list('order__pk').filter(pk__in=new_list_lines)[:1]
if lo and lo[0] and lo[0][0]:
new_pk = lo[0][0]
return GenLineProduct.create_invoice_from_order(new_pk, new_list_lines)
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
|
codenerix/django-codenerix-invoicing
|
codenerix_invoicing/models_sales_original.py
|
GenLineProduct.create_invoice_from_ticket
|
python
|
def create_invoice_from_ticket(pk, list_lines):
context = {}
if list_lines:
new_list_lines = [x[0] for x in SalesLineTicket.objects.values_list('line_order__pk').filter(pk__in=[int(x) for x in list_lines])]
if new_list_lines:
lo = SalesLineOrder.objects.values_list('order__pk').filter(pk__in=new_list_lines)[:1]
if lo and lo[0] and lo[0][0]:
new_pk = lo[0][0]
return GenLineProduct.create_invoice_from_order(new_pk, new_list_lines)
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
|
la pk y list_lines son de ticket, necesitamos la info de las lineas de pedidos
|
train
|
https://github.com/codenerix/django-codenerix-invoicing/blob/7db5c62f335f9215a8b308603848625208b48698/codenerix_invoicing/models_sales_original.py#L1230-L1249
| null |
class GenLineProduct(GenLineProductBasic): # META: Abstract class
class Meta(GenLineProductBasic.Meta):
abstract = True
price_recommended = models.DecimalField(_("Recomended price base"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
# valores aplicados
"""
desde el formulario se podrΓ‘ modificar el precio y la descripcion del producto
se guarda el tax usado y la relacion para poder hacer un seguimiento
"""
code = models.CharField(_("Code"), max_length=250, blank=True, null=True, default=None)
description = models.CharField(_("Description"), max_length=256, blank=True, null=True)
discount = models.DecimalField(_("Discount (%)"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
price_base = models.DecimalField(_("Price base"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
tax = models.FloatField(_("Tax (%)"), blank=True, null=True, default=0)
equivalence_surcharge = models.FloatField(_("Equivalence surcharge (%)"), blank=True, null=True, default=0)
tax_label = models.CharField(_("Tax Name"), max_length=250, blank=True, null=True)
def __str__(self):
description = ''
if hasattr(self, 'description'):
description = self.description
elif hasattr(self, 'line_invoice'):
description = self.line_invoice.description
elif hasattr(self, 'line_ticket'):
description = self.line_ticket.description
return u"{} - {}".format(smart_text(description), smart_text(self.quantity))
def __unicode__(self):
return self.__str__()
def __fields__(self, info):
fields = []
fields.append(('code', _("Code")))
fields.append(('description', _("Description")))
fields.append(('quantity', _("Quantity")))
fields.append(('price_base', _("Price base")))
fields.append(('discount', _("Discount (%)")))
fields.append(('discounts', _("Total Discount")))
fields.append(('tax', _("Tax (%)")))
fields.append(('equivalence_surcharge', _("Equivalence surcharge (%)")))
fields.append(('taxes', _("Total Tax")))
fields.append(('total', _("Total")))
return fields
def calculate_total(self):
# compatibility with old version
return self.total
def update_total(self, force_save=True):
# calculate totals
self.gen_update_total(self, force_save)
def save(self, *args, **kwargs):
if self.pk is None:
if hasattr(self, 'product'):
if not self.description:
self.description = self.product
self.price_recommended = self.product.price_base
elif hasattr(self, 'line_order'):
if not self.description:
self.description = self.line_order.product
self.price_recommended = self.line_order.price_base
if hasattr(self, 'tax') and hasattr(self, 'type_tax'):
self.tax = self.type_tax.tax
if hasattr(self, 'product'):
self.tax_label = self.product.product.tax.name
if self.product.code:
self.code = self.product.code
else:
self.code = self.product.product.code
"""
si al guardar una linea asociada a un documento bloqueado (lock==True), duplicar el documento en una nueva versiΓ³n
"""
self.update_total(force_save=False)
if 'force_save' in kwargs:
kwargs.pop('force_save')
return super(GenLineProduct, self).save(*args, **kwargs)
def __save__(self, args, kwargs, **conditional):
if hasattr(self, 'product'):
conditional["product"] = self.product
if hasattr(self, 'line_order'):
conditional["line_order"] = self.line_order
if hasattr(self, 'basket'):
conditional["basket"] = self.basket
return super(GenLineProduct, self).__save__(args, kwargs, **conditional)
@staticmethod
def create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL,
url_reverse, related_line, related_object,
msg_error_relation, msg_error_not_found, unique):
"""
pk: pk del documento origen
list_lines: listado de pk de lineas de origen
MODEL_SOURCE: modelo del documento origen
MODEL_FINAL: model del documento final
MODEL_LINE_SOURCE: modelo de la linea origen
MODEL_LINE_FINAL: modelo de la linea final
url_reverse: url del destino
related_line: campo del modelo linea final en el que irΓ‘ asignada la linea origen
related_object: campo del modelo linea final en el que irΓ‘ asignado el objeto final
msg_error_relation: Mensaje de error indicando que las lineas ya estΓ‘n relacionadas
msg_error_not_found: Mensaje de error indicando que no se encuentra el objeto origen
unique: (True/False) Indica si puede haber mΓ‘s de una linea asociada a otras lineas
"""
context = {}
obj_src = MODEL_SOURCE.objects.filter(pk=pk).first()
if list_lines and obj_src:
# parse to int
list_lines = [int(x) for x in list_lines]
# list of lines objects
if unique:
create = not MODEL_LINE_FINAL.objects.filter(**{"{}__pk__in".format(related_line): list_lines}).exists()
else:
create = True
"""
si debiendo ser filas unicas no las encuentra en el modelo final, se crea el nuevo documento
"""
if create:
with transaction.atomic():
obj_final = MODEL_FINAL()
obj_final.customer = obj_src.customer
obj_final.date = datetime.datetime.now()
obj_final.billing_series = obj_src.billing_series
if isinstance(obj_final, SalesOrder):
obj_final.budget = obj_src
obj_final.save()
for lb_pk in list_lines:
line_src = MODEL_LINE_SOURCE.objects.filter(pk=lb_pk).first()
if line_src:
line_final = MODEL_LINE_FINAL(**{"{}_id".format(related_object): obj_final.pk, related_line: line_src})
# line_final.order = obj_final
# line_final.line_budget = line_src
src_list_fields = [f.name for f in line_src._meta.get_fields()]
dst_list_fields = [f.name for f in line_final._meta.get_fields()]
if 'product' in src_list_fields and 'product' in dst_list_fields:
line_final.product = line_src.product
if 'description' in src_list_fields and 'description' in dst_list_fields:
line_final.description = line_src.description
if 'code' in src_list_fields and 'code' in dst_list_fields:
line_final.code = line_src.code
# if hasattr(line_src, 'line_order') and hasattr(line_final, 'line_order'):
if 'line_order' in src_list_fields and 'line_order' in dst_list_fields:
line_final.line_order = line_src.line_order
line_final.quantity = line_src.quantity
line_final.price_base = line_src.price_base
# if hasattr(line_src, 'price_recommended') and hasattr(line_final, 'price_recommended'):
if 'price_recommended' in src_list_fields and 'price_recommended' in dst_list_fields:
line_final.price_recommended = line_src.price_recommended
line_final.tax = line_src.tax
# line_final.type_tax = line_src.type_tax
line_final.discount = line_src.discount
if 'removed' in src_list_fields and 'removed' in dst_list_fields:
line_final.removed = line_src.removed
line_final.save()
if hasattr(line_src, 'line_basket_option_sales') and line_src.line_basket_option_sales.exists():
for opt_src in line_src.line_basket_option_sales.all():
opt_dst = SalesLineOrderOption()
opt_dst.line_order = line_final
opt_dst.product_option = opt_src.product_option
opt_dst.product_final = opt_src.product_final
opt_dst.quantity = opt_src.quantity
opt_dst.save()
# bloqueamos el documento origen
obj_src.lock = True
obj_src.save()
# context['url'] = reverse('ordersaless_details', kwargs={'pk': order.pk})
context['url'] = "{}#/{}".format(reverse(url_reverse), obj_final.pk)
context['obj_final'] = obj_final
else:
# _("Hay lineas asignadas a pedidos")
context['error'] = msg_error_relation
else:
# _('Budget not found')
context['error'] = msg_error_not_found
return context
@staticmethod
def create_order_from_budget_all(order):
lines_budget = order.budget.line_basket_sales.all()
for lb in lines_budget:
lo = SalesLineOrder()
lo.order = order
lo.line_budget = lb
lo.product = lb.product
lo.quantity = lb.quantity
lo.notes = lb.notes
lo.price_recommended = lb.price_recommended
lo.description = lb.description
lo.discount = lb.discount
lo.price_base = lb.price_base
lo.tax = lb.tax
lo.save()
order.budget.role = ROLE_BASKET_BUDGET
order.budget.save()
return lines_budget.count() == order.line_order_sales.all().count()
@staticmethod
def create_order_from_budget(pk, list_lines):
MODEL_SOURCE = SalesBasket
MODEL_FINAL = SalesOrder
MODEL_LINE_SOURCE = SalesLineBasket
MODEL_LINE_FINAL = SalesLineOrder
url_reverse = 'CDNX_invoicing_ordersaless_list'
# type_doc
related_line = 'line_budget'
related_object = 'order'
msg_error_relation = _("Hay lineas asignadas a pedidos")
msg_error_not_found = _('Budget not found')
# duplicamos el presupuesto si el numero de lineas es diferente
# relacionando el pedido a este nuevo presupuesto
if list_lines and len(list_lines) != MODEL_LINE_SOURCE.objects.filter(basket=pk).count():
budget = MODEL_SOURCE.objects.get(pk=pk)
# el presupuesto tiene que estar firmado para poder generar el pedido
if not budget.signed:
context = {}
context['error'] = _("Unsigned budget")
return context
else:
new_budget = budget.duplicate(MODEL_LINE_SOURCE, list_lines)
pk = new_budget.pk
list_lines = [x[0] for x in MODEL_LINE_SOURCE.objects.filter(basket=pk).values_list('pk')]
return GenLineProduct.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL,
url_reverse, related_line, related_object,
msg_error_relation, msg_error_not_found, True)
@staticmethod
def create_albaran_automatic(pk, list_lines):
"""
creamos de forma automatica el albaran
"""
line_bd = SalesLineAlbaran.objects.filter(line_order__pk__in=list_lines).values_list('line_order__pk')
if line_bd.count() == 0 or len(list_lines) != len(line_bd[0]):
# solo aquellas lineas de pedidos que no estan ya albarandas
if line_bd.count() != 0:
for x in line_bd[0]:
list_lines.pop(list_lines.index(x))
GenLineProduct.create_albaran_from_order(pk, list_lines)
@staticmethod
def create_albaran_from_order(pk, list_lines):
MODEL_SOURCE = SalesOrder
MODEL_FINAL = SalesAlbaran
MODEL_LINE_SOURCE = SalesLineOrder
MODEL_LINE_FINAL = SalesLineAlbaran
url_reverse = 'CDNX_invoicing_albaransaless_list'
# type_doc
related_line = 'line_order'
related_object = 'albaran'
msg_error_relation = _("Hay lineas asignadas a albaranes")
msg_error_not_found = _('Sales order not found')
return GenLineProduct.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL,
url_reverse, related_line, related_object,
msg_error_relation, msg_error_not_found, False)
@staticmethod
def create_ticket_from_order(pk, list_lines):
MODEL_SOURCE = SalesOrder
MODEL_FINAL = SalesTicket
MODEL_LINE_SOURCE = SalesLineOrder
MODEL_LINE_FINAL = SalesLineTicket
url_reverse = 'CDNX_invoicing_ticketsaless_list'
# type_doc
related_line = 'line_order'
related_object = 'ticket'
msg_error_relation = _("Hay lineas asignadas a ticket")
msg_error_not_found = _('Sales order not found')
with transaction.atomic():
GenLineProduct.create_albaran_automatic(pk, list_lines)
return GenLineProduct.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL,
url_reverse, related_line, related_object,
msg_error_relation, msg_error_not_found, False)
@staticmethod
def create_ticket_from_slot(slot_pk):
context = {
"error": None,
"obj_final": None,
}
# order line not paid
line_orders = SalesLineOrder.objects.filter(
order__budget__pos_slot__pk=slot_pk,
order__payment__isnull=True,
order__cash_movements__isnull=True,
order__budget__removed=False,
order__removed=False,
removed=False
)
if line_orders:
# create o update ticket
tickets = []
for line in line_orders:
if line.line_ticket_sales.filter(removed=False, ticket__removed=False).count() != 0:
tickets += list(line.line_ticket_sales.filter(removed=False, ticket__removed=False).values_list('ticket')[0])
if len(set(tickets)) != len(tickets) and len(set(tickets)) > 1:
context['error'] = _(u'There are orders that are in several different tickets')
else:
if tickets:
with transaction.atomic():
ticket = SalesTicket.objects.get(pk=tickets[0], removed=False)
# There are already orders associated with a ticket
for line in line_orders:
if line.line_ticket_sales.count() == 0:
# new line
lt = SalesLineTicket()
lt.ticket = ticket
lt.line_order = line
lt.tax = line.tax
lt.discount = line.discount
lt.description = line.description
lt.notes = line.notes
lt.quantity = line.quantity
lt.price_recommended = line.price_recommended
lt.price_base = line.price_base
lt.save()
else:
# update line
lt = line.line_ticket_sales.first()
if lt.quantity != line.quantity:
lt.quantity = line.quantity
lt.save()
else:
# new ticket
with transaction.atomic():
ticket = SalesTicket()
ticket.billing_series = BillingSeries.objects.filter(default=True).first()
ticket.customer = line_orders[0].order.customer
ticket.save()
for line in line_orders:
lt = SalesLineTicket()
lt.ticket = ticket
lt.line_order = line
lt.tax = line.tax
lt.discount = line.discount
lt.description = line.description
lt.notes = line.notes
lt.quantity = line.quantity
lt.price_recommended = line.price_recommended
lt.price_base = line.price_base
lt.save()
context['obj_final'] = ticket
else:
# get ticket
line_order = SalesLineOrder.objects.filter(
order__budget__pos_slot__pk=slot_pk,
order__budget__removed=False,
order__removed=False,
removed=False,
).last()
ticket = SalesTicket.objects.filter(
customer=line_order.order.customer,
line_ticket_sales__line_order=line_order,
line_ticket_sales__line_order__removed=False,
line_ticket_sales__removed=False,
removed=False
).first()
if ticket:
context['obj_final'] = ticket
else:
context['error'] = _("Ticket don't found")
return context
@staticmethod
def create_invoice_from_order(pk, list_lines):
MODEL_SOURCE = SalesOrder
MODEL_FINAL = SalesInvoice
MODEL_LINE_SOURCE = SalesLineOrder
MODEL_LINE_FINAL = SalesLineInvoice
url_reverse = 'CDNX_invoicing_invoicesaless_list'
# type_doc
related_line = 'line_order'
related_object = 'invoice'
msg_error_relation = _("Hay lineas asignadas a facturas")
msg_error_not_found = _('Sales order not found')
with transaction.atomic():
GenLineProduct.create_albaran_automatic(pk, list_lines)
return GenLineProduct.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, MODEL_LINE_SOURCE, MODEL_LINE_FINAL,
url_reverse, related_line, related_object,
msg_error_relation, msg_error_not_found, False)
@staticmethod
def create_ticket_from_albaran(pk, list_lines):
"""
la pk y list_lines son de albaranes, necesitamos la info de las lineas de pedidos
"""
context = {}
if list_lines:
new_list_lines = [x[0] for x in SalesLineAlbaran.objects.values_list('line_order__pk').filter(
pk__in=[int(x) for x in list_lines]
).exclude(invoiced=True)]
if new_list_lines:
lo = SalesLineOrder.objects.values_list('order__pk').filter(pk__in=new_list_lines)[:1]
if lo and lo[0] and lo[0][0]:
new_pk = lo[0][0]
context = GenLineProduct.create_ticket_from_order(new_pk, new_list_lines)
if 'error' not in context or not context['error']:
SalesLineAlbaran.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(invoiced=True).update(invoiced=True)
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
@staticmethod
def create_invoice_from_albaran(pk, list_lines):
"""
la pk y list_lines son de albaranes, necesitamos la info de las lineas de pedidos
"""
context = {}
if list_lines:
new_list_lines = [x[0] for x in SalesLineAlbaran.objects.values_list('line_order__pk').filter(
pk__in=[int(x) for x in list_lines]
).exclude(invoiced=True)]
if new_list_lines:
lo = SalesLineOrder.objects.values_list('order__pk').filter(pk__in=new_list_lines)[:1]
if lo and lo[0] and lo[0][0]:
new_pk = lo[0][0]
context = GenLineProduct.create_invoice_from_order(new_pk, new_list_lines)
if 'error' not in context or not context['error']:
SalesLineAlbaran.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(invoiced=True).update(invoiced=True)
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
@staticmethod
|
codenerix/django-codenerix-invoicing
|
codenerix_invoicing/models_sales_original.py
|
SalesLineBasket.set_options
|
python
|
def set_options(self, options):
with transaction.atomic():
for option in options:
opt = self.line_basket_option_sales.filter(
product_option=option['product_option']
).first()
if opt: # edit
change = False
if opt.quantity != option['quantity']:
opt.quantity = option['quantity']
change = True
if opt.product_final != option['product_final']:
opt.product_final = option['product_final']
change = True
if change:
opt.save()
else: # new
opt = SalesLineBasketOption()
# raise Exception(self.pk, self.__dict__, self)
# raise Exception(self.pk)
opt.line_budget = SalesLineBasket.objects.get(pk=self.pk)
opt.product_option = option['product_option']
opt.product_final = option['product_final']
opt.quantity = option['quantity']
opt.save()
|
options = [{
'product_option': instance of ProductFinalOption,
'product_final': instance of ProductFinal,
'quantity': Float
}, ]
|
train
|
https://github.com/codenerix/django-codenerix-invoicing/blob/7db5c62f335f9215a8b308603848625208b48698/codenerix_invoicing/models_sales_original.py#L1627-L1658
| null |
class SalesLineBasket(GenLineProduct):
basket = models.ForeignKey(SalesBasket, related_name='line_basket_sales', verbose_name=_("Basket"), on_delete=models.CASCADE)
product = models.ForeignKey(ProductFinal, related_name='line_basket_sales', verbose_name=_("Product"), on_delete=models.CASCADE)
def __fields__(self, info):
fields = super(SalesLineBasket, self).__fields__(info)
fields.append(('line_basket_option_sales', _('Options')))
return fields
def lock_delete(self, request=None):
# Solo se puede eliminar si no se ha generado un albaran, ticket o factura apartir de ella
if hasattr(self.basket, 'order_sales') and hasattr(self, 'line_order_sales'):
if self.line_order_sales.line_albaran_sales.count() != 0:
return _("Cannot delete line, it is related to albaran")
elif self.line_order_sales.line_ticket_sales.count() != 0:
return _("Cannot delete line, it is related to tickets")
elif self.line_order_sales.line_invoice_sales.count() != 0:
return _("Cannot delete line, it is related to invoices")
return super(SalesLineBasket, self).lock_delete(request)
def save(self, *args, **kwargs):
force = kwargs.get('force_save', False)
if self.basket.lock and force is False:
raise IntegrityError(_('You can not modify, locked document'))
else:
if kwargs.get('standard_save', False):
kwargs.pop('standard_save')
result = super(self._meta.model, self).save(*args, **kwargs)
self.basket.update_totales()
return result
else:
return self.__save__(args, kwargs)
def remove_options(self):
self.line_basket_option_sales.all().delete()
def get_customer(self):
return self.basket.customer
def get_product(self):
return self.product
@staticmethod
def delete_line(line_pk):
context = {}
with transaction.atomic():
line = SalesLineBasket.objects.filter(pk=line_pk, removed=False).first()
if line:
can_delete = line.lock_delete()
if can_delete is None:
basket = line.basket
line.delete()
for lb in basket.line_basket_sales.all():
can_delete = lb.lock_delete()
if can_delete is not None:
context['error'] = can_delete
break
if context['error'] is not None:
basket.delete()
raise IntegrityError(context['error'])
else:
context['error'] = can_delete
raise IntegrityError(can_delete)
return context
def update_line(self, quantity, reason):
context = {'error': None}
product_unique = ProductUnique.objects.filter(product_final=self.product).first()
if product_unique:
product_unique.stock_real += self.quantity - quantity
if product_unique.stock_real < 0:
context['error'] = _('Insufficient stock')
if context['error'] is None:
self.quantity = quantity
try:
with transaction.atomic():
product_unique.save()
self.save()
context['quantity'] = self.quantity
mod = ReasonModificationLineBasket()
mod.user = get_current_user()
mod.date = datetime.datetime.now()
mod.line = self
mod.quantity = quantity
mod.reason = reason
mod.save()
except IntegrityError as e:
context['error'] = e
else:
context['error'] = _('Stock invalid')
if context['error']:
context['error'] = str(context['error'])
return context
|
codenerix/django-codenerix-invoicing
|
codenerix_invoicing/views_sales.py
|
ShoppingCartManagement.get
|
python
|
def get(self, request, *args, **kwargs):
cart = ShoppingCartProxy(request)
return JsonResponse(cart.get_products(onlypublic=request.GET.get('onlypublic', True)))
|
List all products in the shopping cart
|
train
|
https://github.com/codenerix/django-codenerix-invoicing/blob/7db5c62f335f9215a8b308603848625208b48698/codenerix_invoicing/views_sales.py#L1842-L1847
| null |
class ShoppingCartManagement(View):
http_method_names = ['get', 'post', 'put', 'delete']
def post(self, request, *args, **kwargs):
"""
Adds new product to the current shopping cart
"""
POST = json.loads(request.body.decode('utf-8'))
if 'product_pk' in POST and 'quantity' in POST:
cart = ShoppingCartProxy(request)
cart.add(
product_pk=int(POST['product_pk']),
quantity=int(POST['quantity'])
)
return JsonResponse(cart.products)
return HttpResponseBadRequest()
def put(self, request, *args, **kwargs):
PUT = json.loads(request.body.decode('utf-8'))
if 'product_pk' in PUT and 'quantity' in PUT:
cart = ShoppingCartProxy(request)
product_pk = int(PUT['product_pk'])
cart.edit(
product_pk=product_pk,
quantity=int(PUT['quantity'])
)
return JsonResponse(cart.product(product_pk))
return HttpResponseBadRequest()
def delete(self, request, *args, **kwargs):
DELETE = json.loads(request.body.decode('utf-8'))
if 'product_pk' in DELETE:
cart = ShoppingCartProxy(request)
cart.remove(product_pk=int(DELETE['product_pk']))
return JsonResponse(cart.totals)
return HttpResponseBadRequest()
|
codenerix/django-codenerix-invoicing
|
codenerix_invoicing/views_sales.py
|
ShoppingCartManagement.post
|
python
|
def post(self, request, *args, **kwargs):
POST = json.loads(request.body.decode('utf-8'))
if 'product_pk' in POST and 'quantity' in POST:
cart = ShoppingCartProxy(request)
cart.add(
product_pk=int(POST['product_pk']),
quantity=int(POST['quantity'])
)
return JsonResponse(cart.products)
return HttpResponseBadRequest()
|
Adds new product to the current shopping cart
|
train
|
https://github.com/codenerix/django-codenerix-invoicing/blob/7db5c62f335f9215a8b308603848625208b48698/codenerix_invoicing/views_sales.py#L1849-L1863
| null |
class ShoppingCartManagement(View):
http_method_names = ['get', 'post', 'put', 'delete']
def get(self, request, *args, **kwargs):
"""
List all products in the shopping cart
"""
cart = ShoppingCartProxy(request)
return JsonResponse(cart.get_products(onlypublic=request.GET.get('onlypublic', True)))
def put(self, request, *args, **kwargs):
PUT = json.loads(request.body.decode('utf-8'))
if 'product_pk' in PUT and 'quantity' in PUT:
cart = ShoppingCartProxy(request)
product_pk = int(PUT['product_pk'])
cart.edit(
product_pk=product_pk,
quantity=int(PUT['quantity'])
)
return JsonResponse(cart.product(product_pk))
return HttpResponseBadRequest()
def delete(self, request, *args, **kwargs):
DELETE = json.loads(request.body.decode('utf-8'))
if 'product_pk' in DELETE:
cart = ShoppingCartProxy(request)
cart.remove(product_pk=int(DELETE['product_pk']))
return JsonResponse(cart.totals)
return HttpResponseBadRequest()
|
codenerix/django-codenerix-invoicing
|
codenerix_invoicing/views_sales.py
|
LinesUpdateModalBasket.dispatch
|
python
|
def dispatch(self, *args, **kwargs):
self.__line_pk = kwargs.get('pk', None)
return super(LinesUpdateModalBasket, self).dispatch(*args, **kwargs)
|
if SalesLineBasketOption.objects.filter(line_budget__pk=self.__line_pk).exists():
self.form_class = LineBasketFormPack
self.__is_pack = True
else:
self.__is_pack = False
|
train
|
https://github.com/codenerix/django-codenerix-invoicing/blob/7db5c62f335f9215a8b308603848625208b48698/codenerix_invoicing/views_sales.py#L2190-L2199
| null |
class LinesUpdateModalBasket(GenUpdateModal, LinesUpdateBasket):
# form_class = LineBasketForm
@method_decorator(login_required)
def get_form(self, form_class=None):
# form_kwargs = super(LineBasketUpdateModal, self).get_form_kwargs(*args, **kwargs)
form = super(LinesUpdateModalBasket, self).get_form(form_class)
initial = form.initial
initial['type_tax'] = self.object.product_final.product.tax.pk
initial['tax'] = self.object.tax_basket
initial['price'] = float(self.object.price_base_basket) * (1 + (self.object.tax_basket / 100))
"""
if self.__is_pack:
options = []
lang = get_language_database()
for option in SalesLineBasketOption.objects.filter(line_budget__pk=self.__line_pk):
initial['packs[{}]'.format(option.product_option.pk)] = option.product_final.pk
a = {
'id': option.product_option.pk,
'label': getattr(option.product_option, lang).name,
'products': list(option.product_option.products_pack.all().values('pk').annotate(name=F('{}__name'.format(lang)))),
'selected': option.product_final.pk,
}
options.append(a)
# compatibility with GenForeignKey
initial['packs'] = json.dumps({'__JSON_DATA__': options})
"""
return form
def form_valid(self, form):
# lb = SalesLines.objects.filter(pk=self.__line_pk).first()
# product_old = lb.product_final
product_pk = self.request.POST.get("product_final", None)
quantity = self.request.POST.get("quantity", None)
product_final = ProductFinal.objects.filter(pk=product_pk).first()
"""
if product:
is_pack = product.is_pack()
else:
is_pack = False
"""
if product_final and quantity:
reason = form.data['reason']
if reason:
reason_obj = ReasonModification.objects.filter(pk=reason).first()
if reason_obj:
try:
with transaction.atomic():
result = super(LinesUpdateModalBasket, self).form_valid(form)
reason_basket = ReasonModificationLineBasket()
reason_basket.basket = self.object.basket
reason_basket.reason = reason_obj
reason_basket.line = self.object
reason_basket.user = get_current_user()
reason_basket.quantity = self.object.quantity
reason_basket.save()
return result
except ValidationError as e:
errors = form._errors.setdefault("product_final", ErrorList())
errors.append(e)
return super(LinesUpdateModalBasket, self).form_invalid(form)
else:
errors = form._errors.setdefault("reason", ErrorList())
errors.append(_("Reason of modification invalid"))
return super(LinesUpdatelOrder, self).form_invalid(form)
else:
errors = form._errors.setdefault("reason", ErrorList())
errors.append(_("Reason of modification invalid"))
return super(LinesUpdatelOrder, self).form_invalid(form)
"""
if is_pack:
options = product.productfinals_option.filter(active=True)
options_pack = []
for option in options:
field = 'packs[{}]'.format(option.pk)
opt = self.request.POST.get(field, None)
if opt:
opt_product = ProductFinal.objects.filter(pk=opt).first()
if opt_product:
options_pack.append({
'product_option': option,
'product_final': opt_product,
'quantity': quantity
})
else:
errors = form._errors.setdefault(field, ErrorList())
errors.append(_("Product Option invalid"))
return super(LinesUpdateModalBasket, self).form_invalid(form)
else:
errors = form._errors.setdefault(field, ErrorList())
errors.append(_("Option invalid"))
return super(LinesUpdateModalBasket, self).form_invalid(form)
"""
else:
errors = form._errors.setdefault("product_final", ErrorList())
errors.append((_("Product invalid"), quantity, product_final))
return super(LinesUpdateModalBasket, self).form_invalid(form)
"""
ret = super(LinesUpdateModalBasket, self).form_valid(form)
if product_old != self.object.product:
self.object.remove_options()
if is_pack:
self.object.set_options(options_pack)
return ret
"""
|
codenerix/django-codenerix-invoicing
|
codenerix_invoicing/views_sales.py
|
LinesUpdateModalBasket.get_form
|
python
|
def get_form(self, form_class=None):
# form_kwargs = super(LineBasketUpdateModal, self).get_form_kwargs(*args, **kwargs)
form = super(LinesUpdateModalBasket, self).get_form(form_class)
initial = form.initial
initial['type_tax'] = self.object.product_final.product.tax.pk
initial['tax'] = self.object.tax_basket
initial['price'] = float(self.object.price_base_basket) * (1 + (self.object.tax_basket / 100))
return form
|
if self.__is_pack:
options = []
lang = get_language_database()
for option in SalesLineBasketOption.objects.filter(line_budget__pk=self.__line_pk):
initial['packs[{}]'.format(option.product_option.pk)] = option.product_final.pk
a = {
'id': option.product_option.pk,
'label': getattr(option.product_option, lang).name,
'products': list(option.product_option.products_pack.all().values('pk').annotate(name=F('{}__name'.format(lang)))),
'selected': option.product_final.pk,
}
options.append(a)
# compatibility with GenForeignKey
initial['packs'] = json.dumps({'__JSON_DATA__': options})
|
train
|
https://github.com/codenerix/django-codenerix-invoicing/blob/7db5c62f335f9215a8b308603848625208b48698/codenerix_invoicing/views_sales.py#L2201-L2225
| null |
class LinesUpdateModalBasket(GenUpdateModal, LinesUpdateBasket):
# form_class = LineBasketForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
self.__line_pk = kwargs.get('pk', None)
"""
if SalesLineBasketOption.objects.filter(line_budget__pk=self.__line_pk).exists():
self.form_class = LineBasketFormPack
self.__is_pack = True
else:
self.__is_pack = False
"""
return super(LinesUpdateModalBasket, self).dispatch(*args, **kwargs)
def form_valid(self, form):
# lb = SalesLines.objects.filter(pk=self.__line_pk).first()
# product_old = lb.product_final
product_pk = self.request.POST.get("product_final", None)
quantity = self.request.POST.get("quantity", None)
product_final = ProductFinal.objects.filter(pk=product_pk).first()
"""
if product:
is_pack = product.is_pack()
else:
is_pack = False
"""
if product_final and quantity:
reason = form.data['reason']
if reason:
reason_obj = ReasonModification.objects.filter(pk=reason).first()
if reason_obj:
try:
with transaction.atomic():
result = super(LinesUpdateModalBasket, self).form_valid(form)
reason_basket = ReasonModificationLineBasket()
reason_basket.basket = self.object.basket
reason_basket.reason = reason_obj
reason_basket.line = self.object
reason_basket.user = get_current_user()
reason_basket.quantity = self.object.quantity
reason_basket.save()
return result
except ValidationError as e:
errors = form._errors.setdefault("product_final", ErrorList())
errors.append(e)
return super(LinesUpdateModalBasket, self).form_invalid(form)
else:
errors = form._errors.setdefault("reason", ErrorList())
errors.append(_("Reason of modification invalid"))
return super(LinesUpdatelOrder, self).form_invalid(form)
else:
errors = form._errors.setdefault("reason", ErrorList())
errors.append(_("Reason of modification invalid"))
return super(LinesUpdatelOrder, self).form_invalid(form)
"""
if is_pack:
options = product.productfinals_option.filter(active=True)
options_pack = []
for option in options:
field = 'packs[{}]'.format(option.pk)
opt = self.request.POST.get(field, None)
if opt:
opt_product = ProductFinal.objects.filter(pk=opt).first()
if opt_product:
options_pack.append({
'product_option': option,
'product_final': opt_product,
'quantity': quantity
})
else:
errors = form._errors.setdefault(field, ErrorList())
errors.append(_("Product Option invalid"))
return super(LinesUpdateModalBasket, self).form_invalid(form)
else:
errors = form._errors.setdefault(field, ErrorList())
errors.append(_("Option invalid"))
return super(LinesUpdateModalBasket, self).form_invalid(form)
"""
else:
errors = form._errors.setdefault("product_final", ErrorList())
errors.append((_("Product invalid"), quantity, product_final))
return super(LinesUpdateModalBasket, self).form_invalid(form)
"""
ret = super(LinesUpdateModalBasket, self).form_valid(form)
if product_old != self.object.product:
self.object.remove_options()
if is_pack:
self.object.set_options(options_pack)
return ret
"""
|
codenerix/django-codenerix-invoicing
|
codenerix_invoicing/views_sales.py
|
LinesUpdateModalBasket.form_valid
|
python
|
def form_valid(self, form):
# lb = SalesLines.objects.filter(pk=self.__line_pk).first()
# product_old = lb.product_final
product_pk = self.request.POST.get("product_final", None)
quantity = self.request.POST.get("quantity", None)
product_final = ProductFinal.objects.filter(pk=product_pk).first()
if product_final and quantity:
reason = form.data['reason']
if reason:
reason_obj = ReasonModification.objects.filter(pk=reason).first()
if reason_obj:
try:
with transaction.atomic():
result = super(LinesUpdateModalBasket, self).form_valid(form)
reason_basket = ReasonModificationLineBasket()
reason_basket.basket = self.object.basket
reason_basket.reason = reason_obj
reason_basket.line = self.object
reason_basket.user = get_current_user()
reason_basket.quantity = self.object.quantity
reason_basket.save()
return result
except ValidationError as e:
errors = form._errors.setdefault("product_final", ErrorList())
errors.append(e)
return super(LinesUpdateModalBasket, self).form_invalid(form)
else:
errors = form._errors.setdefault("reason", ErrorList())
errors.append(_("Reason of modification invalid"))
return super(LinesUpdatelOrder, self).form_invalid(form)
else:
errors = form._errors.setdefault("reason", ErrorList())
errors.append(_("Reason of modification invalid"))
return super(LinesUpdatelOrder, self).form_invalid(form)
"""
if is_pack:
options = product.productfinals_option.filter(active=True)
options_pack = []
for option in options:
field = 'packs[{}]'.format(option.pk)
opt = self.request.POST.get(field, None)
if opt:
opt_product = ProductFinal.objects.filter(pk=opt).first()
if opt_product:
options_pack.append({
'product_option': option,
'product_final': opt_product,
'quantity': quantity
})
else:
errors = form._errors.setdefault(field, ErrorList())
errors.append(_("Product Option invalid"))
return super(LinesUpdateModalBasket, self).form_invalid(form)
else:
errors = form._errors.setdefault(field, ErrorList())
errors.append(_("Option invalid"))
return super(LinesUpdateModalBasket, self).form_invalid(form)
"""
else:
errors = form._errors.setdefault("product_final", ErrorList())
errors.append((_("Product invalid"), quantity, product_final))
return super(LinesUpdateModalBasket, self).form_invalid(form)
"""
ret = super(LinesUpdateModalBasket, self).form_valid(form)
if product_old != self.object.product:
self.object.remove_options()
if is_pack:
self.object.set_options(options_pack)
return ret
"""
|
if product:
is_pack = product.is_pack()
else:
is_pack = False
|
train
|
https://github.com/codenerix/django-codenerix-invoicing/blob/7db5c62f335f9215a8b308603848625208b48698/codenerix_invoicing/views_sales.py#L2227-L2306
| null |
class LinesUpdateModalBasket(GenUpdateModal, LinesUpdateBasket):
# form_class = LineBasketForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
self.__line_pk = kwargs.get('pk', None)
"""
if SalesLineBasketOption.objects.filter(line_budget__pk=self.__line_pk).exists():
self.form_class = LineBasketFormPack
self.__is_pack = True
else:
self.__is_pack = False
"""
return super(LinesUpdateModalBasket, self).dispatch(*args, **kwargs)
def get_form(self, form_class=None):
# form_kwargs = super(LineBasketUpdateModal, self).get_form_kwargs(*args, **kwargs)
form = super(LinesUpdateModalBasket, self).get_form(form_class)
initial = form.initial
initial['type_tax'] = self.object.product_final.product.tax.pk
initial['tax'] = self.object.tax_basket
initial['price'] = float(self.object.price_base_basket) * (1 + (self.object.tax_basket / 100))
"""
if self.__is_pack:
options = []
lang = get_language_database()
for option in SalesLineBasketOption.objects.filter(line_budget__pk=self.__line_pk):
initial['packs[{}]'.format(option.product_option.pk)] = option.product_final.pk
a = {
'id': option.product_option.pk,
'label': getattr(option.product_option, lang).name,
'products': list(option.product_option.products_pack.all().values('pk').annotate(name=F('{}__name'.format(lang)))),
'selected': option.product_final.pk,
}
options.append(a)
# compatibility with GenForeignKey
initial['packs'] = json.dumps({'__JSON_DATA__': options})
"""
return form
|
codenerix/django-codenerix-invoicing
|
codenerix_invoicing/models_sales.py
|
Customer.buy_product
|
python
|
def buy_product(self, product_pk):
if self.invoice_sales.filter(lines_sales__product_final__pk=product_pk).exists() \
or self.ticket_sales.filter(lines_sales__product_final__pk=product_pk).exists():
return True
else:
return False
|
determina si el customer ha comprado un producto
|
train
|
https://github.com/codenerix/django-codenerix-invoicing/blob/7db5c62f335f9215a8b308603848625208b48698/codenerix_invoicing/models_sales.py#L288-L296
| null |
class Customer(GenRole, CodenerixModel):
class CodenerixMeta:
abstract = ABSTRACT_GenCustomer
rol_groups = {
'Customer': CDNX_INVOICING_PERMISSIONS['customer'],
}
rol_permissions = [
'add_city',
'add_citygeonameen',
'add_citygeonamees',
'add_continent',
'add_continentgeonameen',
'add_continentgeonamees',
'add_corporateimage',
'add_country',
'add_countrygeonameen',
'add_countrygeonamees',
'add_customer',
'add_customerdocument',
'add_person',
'add_personaddress',
'add_province',
'add_provincegeonameen',
'add_provincegeonamees',
'add_region',
'add_regiongeonameen',
'add_regiongeonamees',
'add_salesbasket',
'add_timezone',
'change_city',
'change_citygeonameen',
'change_citygeonamees',
'change_continent',
'change_continentgeonameen',
'change_continentgeonamees',
'change_corporateimage',
'change_country',
'change_countrygeonameen',
'change_countrygeonamees',
'change_customer',
'change_customerdocument',
'change_person',
'change_personaddress',
'change_province',
'change_provincegeonameen',
'change_provincegeonamees',
'change_region',
'change_regiongeonameen',
'change_regiongeonamees',
'change_salesbasket',
'change_timezone',
'change_user',
'delete_city',
'delete_citygeonameen',
'delete_citygeonamees',
'delete_continent',
'delete_continentgeonameen',
'delete_continentgeonamees',
'delete_corporateimage',
'delete_country',
'delete_countrygeonameen',
'delete_countrygeonamees',
'delete_customer',
'delete_customerdocument',
'delete_person',
'delete_personaddress',
'delete_province',
'delete_provincegeonameen',
'delete_provincegeonamees',
'delete_region',
'delete_regiongeonameen',
'delete_regiongeonamees',
'delete_salesbasket',
'delete_timezone ',
'list_billingseries',
'list_city',
'list_continent',
'list_corporateimage',
'list_country',
'list_customer',
'list_customerdocument',
'list_legalnote',
'list_personaddress',
'list_productdocument',
'list_province',
'list_purchaseslineinvoice',
'list_region',
'list_salesalbaran',
'list_salesbasket',
'list_salesinvoice',
'list_salesinvoicerectification',
'list_salesorder',
'list_salesticket',
'list_salesticketrectification',
'list_timezone',
'list_typedocument',
'list_typedocumenttexten',
'list_typedocumenttextes',
'view_billingseries',
'view_city',
'view_continent',
'view_corporateimage',
'view_country',
'view_customer',
'view_customerdocument',
'view_legalnote',
'view_personaddress',
'view_province',
'view_region',
'view_salesbasket',
'view_timezone',
'view_typedocument',
'view_typedocumenttexten',
'view_typedocumenttextes',
]
force_methods = {
'foreignkey_customer': ('CDNX_get_fk_info_customer', _('---')),
'get_email': ('CDNX_get_email', ),
'info_customer_details': ('CDNX_get_details_info_customer', ),
}
currency = models.ForeignKey(Currency, related_name='customers', verbose_name='Currency', on_delete=models.CASCADE)
# serie de facturacion
billing_series = models.ForeignKey(BillingSeries, related_name='customers', verbose_name='Billing series', on_delete=models.CASCADE)
# datos de facturaciΓ³n
# saldo final
final_balance = models.CharField(_("Balance"), max_length=250, blank=True, null=True)
# credito o riesgo maximo autorizado
credit = models.CharField(_("Credit"), max_length=250, blank=True, null=True)
# Aplicar recargo de equivalencia
apply_equivalence_surcharge = models.BooleanField(_("Apply equivalence surcharge"), blank=False, default=False)
# Tipo de iva
type_tax = models.ForeignKey(TypeTax, related_name='customers', verbose_name=_("Type tax"), null=True, on_delete=models.CASCADE)
default_customer = models.BooleanField(_("Default customer"), blank=False, default=False)
@staticmethod
def foreignkey_external():
return get_external_method(Customer, Customer.CodenerixMeta.force_methods['foreignkey_customer'][0])
def __str__(self):
if hasattr(self, 'external'):
return u"{}".format(smart_text(self.external))
else:
return "{}".format(self.pk)
def __unicode__(self):
return self.__str__()
def __fields__(self, info):
fields = []
fields.append(('final_balance', _("Balance")))
fields.append(('credit', _("Credit")))
fields.append(('currency', _("Currency")))
fields.append(('billing_series', _("Billing series")))
fields.append(('apply_equivalence_surcharge', _("Equivalence Surcharge")))
fields.append(('type_tax', _("Type tax")))
fields.append(('default_customer', _("Default customer")))
fields = get_external_method(Customer, '__fields_customer__', info, fields)
return fields
def save(self, *args, **kwargs):
with transaction.atomic():
if self.default_customer:
Customer.objects.exclude(pk=self.pk).update(default_customer=False)
else:
if not Customer.objects.exclude(pk=self.pk).filter(default_customer=True).exists():
self.default_customer = True
return super(Customer, self).save(*args, **kwargs)
|
codenerix/django-codenerix-invoicing
|
codenerix_invoicing/models_sales.py
|
SalesLines.create_ticket_from_albaran
|
python
|
def create_ticket_from_albaran(pk, list_lines):
MODEL_SOURCE = SalesAlbaran
MODEL_FINAL = SalesTicket
url_reverse = 'CDNX_invoicing_ticketsaless_list'
# type_doc
msg_error_relation = _("Hay lineas asignadas a ticket")
msg_error_not_found = _('Sales albaran not found')
msg_error_line_not_found = _('Todas las lineas ya se han pasado a ticket')
return SalesLines.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, url_reverse,
msg_error_relation, msg_error_not_found, msg_error_line_not_found,
False)
"""
context = {}
if list_lines:
new_list_lines = SalesLines.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(
invoice__isnull=True
).values_list('pk')
if new_list_lines:
new_pk = SalesLines.objects.values_list('order__pk').filter(pk__in=new_list_lines).first()
if new_pk:
context = SalesLines.create_ticket_from_order(new_pk, new_list_lines)
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
"""
|
context = {}
if list_lines:
new_list_lines = SalesLines.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(
invoice__isnull=True
).values_list('pk')
if new_list_lines:
new_pk = SalesLines.objects.values_list('order__pk').filter(pk__in=new_list_lines).first()
if new_pk:
context = SalesLines.create_ticket_from_order(new_pk, new_list_lines)
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
|
train
|
https://github.com/codenerix/django-codenerix-invoicing/blob/7db5c62f335f9215a8b308603848625208b48698/codenerix_invoicing/models_sales.py#L2372-L2407
| null |
class SalesLines(CodenerixModel):
basket = models.ForeignKey(SalesBasket, related_name='lines_sales', verbose_name=_("Basket"), on_delete=models.CASCADE)
tax_basket_fk = models.ForeignKey(TypeTax, related_name='lines_sales_basket', verbose_name=_("Tax Basket"), on_delete=models.CASCADE)
order = models.ForeignKey(SalesOrder, related_name='lines_sales', verbose_name=_("Sales order"), on_delete=models.CASCADE, null=True, blank=True)
tax_order_fk = models.ForeignKey(TypeTax, related_name='lines_sales_order', verbose_name=_("Tax Sales order"), on_delete=models.CASCADE, null=True, blank=True)
albaran = models.ForeignKey(SalesAlbaran, related_name='lines_sales', verbose_name=_("Albaran"), on_delete=models.CASCADE, null=True, blank=True)
ticket = models.ForeignKey(SalesTicket, related_name='lines_sales', verbose_name=_("Ticket"), on_delete=models.CASCADE, null=True, blank=True)
tax_ticket_fk = models.ForeignKey(TypeTax, related_name='lines_sales_ticket', verbose_name=_("Tax Ticket"), on_delete=models.CASCADE, null=True, blank=True)
ticket_rectification = models.ForeignKey(SalesTicketRectification, related_name='lines_sales', verbose_name=_("Ticket rectification"), on_delete=models.CASCADE, null=True, blank=True)
invoice = models.ForeignKey(SalesInvoice, related_name='lines_sales', verbose_name=_("Invoice"), on_delete=models.CASCADE, null=True, blank=True)
tax_invoice_fk = models.ForeignKey(TypeTax, related_name='lines_sales_invoice', verbose_name=_("Tax Invoice"), on_delete=models.CASCADE, null=True, blank=True)
invoice_rectification = models.ForeignKey(SalesInvoiceRectification, related_name='lines_sales', verbose_name=_("Invoice rectification"), on_delete=models.CASCADE, null=True, blank=True)
product_final = models.ForeignKey(ProductFinal, related_name='lines_sales', verbose_name=_("Product"), on_delete=models.CASCADE)
product_unique = models.ForeignKey(ProductUnique, related_name='lines_sales', verbose_name=_("Product Unique"), on_delete=models.CASCADE, null=True, blank=True)
# invoiced is True if 'invoice' is not null
# invoiced = models.BooleanField(_("Invoiced"), blank=False, default=False)
# logical deletion
removed = models.BooleanField(_("Removed"), blank=False, default=False, editable=False)
quantity = models.FloatField(_("Quantity"), blank=False, null=False)
code = models.CharField(_("Code"), max_length=250, blank=True, null=True, default=None)
# ####
# desde el formulario se podrΓ‘ modificar el precio y la descripcion del producto
# se guarda el tax usado y la relacion para poder hacer un seguimiento
# ####
# info basket
price_recommended_basket = models.DecimalField(_("Recomended price base"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
description_basket = models.CharField(_("Description"), max_length=256, blank=True, null=True)
price_base_basket = models.DecimalField(_("Price base"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
discount_basket = models.DecimalField(_("Discount (%)"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
tax_basket = models.FloatField(_("Tax (%)"), blank=True, null=True, default=0)
equivalence_surcharge_basket = models.FloatField(_("Equivalence surcharge (%)"), blank=True, null=True, default=0)
tax_label_basket = models.CharField(_("Tax Name"), max_length=250, blank=True, null=True)
notes_basket = models.CharField(_("Notes"), max_length=256, blank=True, null=True)
# info basket total
price_unit_basket = models.DecimalField(_("unit_price"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
subtotal_basket = models.DecimalField(_("Subtotal"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
discounts_basket = models.DecimalField(_("Discounts"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
taxes_basket = models.DecimalField(_("Taxes"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
equivalence_surcharges_basket = models.DecimalField(_("Equivalence surcharge"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
total_basket = models.DecimalField(_("Total"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
# info order
price_recommended_order = models.DecimalField(_("Recomended price base"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
description_order = models.CharField(_("Description"), max_length=256, blank=True, null=True)
price_base_order = models.DecimalField(_("Price base"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
discount_order = models.DecimalField(_("Discount (%)"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
tax_order = models.FloatField(_("Tax (%)"), blank=True, null=True, default=0)
equivalence_surcharge_order = models.FloatField(_("Equivalence surcharge (%)"), blank=True, null=True, default=0)
tax_label_order = models.CharField(_("Tax Name"), max_length=250, blank=True, null=True)
notes_order = models.CharField(_("Notes"), max_length=256, blank=True, null=True)
# info basket total
price_unit_order = models.DecimalField(_("unit_price"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
subtotal_order = models.DecimalField(_("Subtotal"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
discounts_order = models.DecimalField(_("Discounts"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
taxes_order = models.DecimalField(_("Taxes"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
equivalence_surcharges_order = models.DecimalField(_("Equivalence surcharge"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
total_order = models.DecimalField(_("Total"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
# info albaran - basic
notes_albaran = models.CharField(_("Notes"), max_length=256, blank=True, null=True)
# info ticket
price_recommended_ticket = models.DecimalField(_("Recomended price base"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
description_ticket = models.CharField(_("Description"), max_length=256, blank=True, null=True)
price_base_ticket = models.DecimalField(_("Price base"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
discount_ticket = models.DecimalField(_("Discount (%)"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
tax_ticket = models.FloatField(_("Tax (%)"), blank=True, null=True, default=0)
equivalence_surcharge_ticket = models.FloatField(_("Equivalence surcharge (%)"), blank=True, null=True, default=0)
tax_label_ticket = models.CharField(_("Tax Name"), max_length=250, blank=True, null=True)
notes_ticket = models.CharField(_("Notes"), max_length=256, blank=True, null=True)
# info basket total
price_unit_ticket = models.DecimalField(_("unit_price"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
subtotal_ticket = models.DecimalField(_("Subtotal"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
discounts_ticket = models.DecimalField(_("Discounts"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
taxes_ticket = models.DecimalField(_("Taxes"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
equivalence_surcharges_ticket = models.DecimalField(_("Equivalence surcharge"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
total_ticket = models.DecimalField(_("Total"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
# info ticket rectification - basic
notes_ticket_rectification = models.CharField(_("Notes"), max_length=256, blank=True, null=True)
# info invoice
price_recommended_invoice = models.DecimalField(_("Recomended price base"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
description_invoice = models.CharField(_("Description"), max_length=256, blank=True, null=True)
price_base_invoice = models.DecimalField(_("Price base"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
discount_invoice = models.DecimalField(_("Discount (%)"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
tax_invoice = models.FloatField(_("Tax (%)"), blank=True, null=True, default=0)
equivalence_surcharge_invoice = models.FloatField(_("Equivalence surcharge (%)"), blank=True, null=True, default=0)
tax_label_invoice = models.CharField(_("Tax Name"), max_length=250, blank=True, null=True)
notes_invoice = models.CharField(_("Notes"), max_length=256, blank=True, null=True)
# info basket total
price_unit_invoice = models.DecimalField(_("unit_price"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
subtotal_invoice = models.DecimalField(_("Subtotal"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
discounts_invoice = models.DecimalField(_("Discounts"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
taxes_invoice = models.DecimalField(_("Taxes"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
equivalence_surcharges_invoice = models.DecimalField(_("Equivalence surcharge"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
total_invoice = models.DecimalField(_("Total"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
# info invoice rectification - basic
notes_invoice_rectification = models.CharField(_("Notes"), max_length=256, blank=True, null=True)
def __str__(self):
return u"{} - {}".format(self.product_final, self.quantity)
def __unicode__(self):
return self.__str__()
def __fields__(self, info):
fields = []
fields.append(('product_final', _("Product final")))
fields.append(('product_unique', _("Product unique")))
fields.append(('quantity', _("Quantity")))
return fields
def get_product_unique(self, quantity, pos=None):
if self.product_final.sample:
raise SalesLinesProductFinalIsSample(_("This product can not be sold, it is marked as 'sample'"))
else:
products_unique = []
with transaction.atomic():
qs = ProductUnique.objects.filter(
product_final=self.product_final,
stock_real__gt=0,
stock_locked__lt=F('stock_real')
)
if pos:
qs = qs.filter(box__box_structure__zone__storage__in=pos.storage_stock.filter(storage_zones__salable=True))
elif self.basket.pos:
qs = qs.filter(box__box_structure__zone__storage__in=self.basket.pos.storage_stock.filter(storage_zones__salable=True))
if self.product_final.product.force_stock is False:
product_unique = qs.first()
if product_unique:
products_unique = [
{
'quantity': quantity,
'product_unique': product_unique
}
]
else:
raise SalesLinesUniqueProductNotExists(_('Unique product not exists! No stock!'))
else:
stock_available = None
for unique_product in qs:
if quantity <= 0:
break
stock_available = unique_product.stock_real - unique_product.stock_locked
if stock_available > quantity:
stock_available = quantity
unique_product.duplicate(quantity)
unique_product.locked_stock(stock_available)
products_unique.append({
'product_unique': unique_product,
'quantity': stock_available
})
quantity -= stock_available
if quantity > 0:
raise SalesLinesInsufficientStock(_("Insufficient stock. Product: {}".format(self.product_final)))
else:
return products_unique
def save(self, *args, **kwargs):
with transaction.atomic():
if self.pk is None:
line_old = None
if self.product_final.code:
self.code = self.product_final.code
else:
self.code = self.product_final.product.code
if self.product_unique is None:
if getattr(settings, 'CDNX_INVOICING_FORCE_STOCK_IN_BUDGET', True):
products_unique = self.get_product_unique(self.quantity)
first = True
for unique_product in products_unique:
if first:
first = False
self.quantity = unique_product['quantity']
self.product_unique = unique_product['product_unique']
else:
line = copy.copy(self)
line.pk = None
line.quantity = unique_product['quantity']
line.product_unique = unique_product['product_unique']
line.save()
elif self.pk:
line_old = SalesLines.objects.filter(pk=self.pk).first()
if line_old:
product_final_old = line_old.product_final
else:
product_final_old = None
if self.product_final != product_final_old:
if self.order or self.albaran or self.ticket or self.invoice:
raise SalesLinesNotModifiable(_('You can not modify product'))
elif self.description_basket == '{}'.format(product_final_old):
self.description_basket = ''
# solo se puede cambiar el producto si no esta en un pedido, albaran, ticket o factura
self.price_recommended_basket = None
self.tax_label_basket = None
if getattr(settings, 'CDNX_INVOICING_FORCE_STOCK_IN_BUDGET', True):
products_unique = self.get_product_unique(self.quantity)
first = True
for unique_product in products_unique:
if first:
first = False
self.quantity = unique_product['quantity']
self.product_unique = unique_product['product_unique']
else:
line = copy.copy(self)
line.pk = None
line.quantity = unique_product['quantity']
line.product_unique = unique_product['product_unique']
line.save()
else:
self.product_unique = None
elif self.order and line_old.order is None:
# associate line with order
# locked product unique!!
if self.product_final.product.force_stock:
if self.product_unique is None:
products_unique = self.get_product_unique(self.quantity)
first = True
for unique_product in products_unique:
if first:
first = False
self.quantity = unique_product['quantity']
self.product_unique = unique_product['product_unique']
else:
line = copy.copy(self)
line.pk = None
line.quantity = unique_product['quantity']
line.product_unique = unique_product['product_unique']
line.save()
else:
available = self.product_unique.stock_real - self.product_unique.stock_locked
if available < self.quantity:
products_unique = self.get_product_unique(self.quantity)
first = True
for unique_product in products_unique:
if first:
first = False
self.quantity = unique_product['quantity']
self.product_unique = unique_product['product_unique']
else:
line = copy.copy(self)
line.pk = None
line.quantity = unique_product['quantity']
line.product_unique = unique_product['product_unique']
line.save()
# calculate value of equivalence_surcharge
# save tax label
# save price recommended
# save tax foreignkey
if self.basket:
if self.tax_basket_fk is None:
self.tax_basket_fk = self.product_final.product.tax
if not self.tax_label_basket:
self.tax_label_basket = self.product_final.product.tax.name
if not self.tax_basket:
self.tax_basket = self.product_final.product.tax.tax
if self.basket.get_customer().apply_equivalence_surcharge:
self.equivalence_surcharge_basket = self.basket.get_customer().tax.recargo_equivalencia
if self.price_recommended_basket is None:
self.price_recommended_basket = self.product_final.price_base
if not self.description_basket:
self.description_basket = '{}'.format(self.product_final)
update_basket = self.__update_subtotal_basket(line_old)
if self.order:
if self.tax_order_fk is None:
self.tax_order_fk = self.product_final.product.tax
if self.tax_label_order is None:
self.tax_label_order = self.product_final.product.tax.name
if not self.tax_order:
self.tax_order = self.product_final.product.tax.tax
if self.order.get_customer().apply_equivalence_surcharge:
self.equivalence_surcharge_order = self.order.get_customer().tax.recargo_equivalencia
if self.price_recommended_order is None:
self.price_recommended_order = self.product_final.price_base
if not self.description_order:
self.description_order = '{}'.format(self.product_final)
update_order = self.__update_subtotal_order(line_old)
if self.ticket:
if self.tax_ticket_fk is None:
self.tax_ticket_fk = self.product_final.product.tax
if self.tax_label_ticket is None:
self.tax_label_ticket = self.product_final.product.tax.name
if not self.tax_ticket:
self.tax_ticket = self.product_final.product.tax.tax
if self.ticket.get_customer().apply_equivalence_surcharge:
self.equivalence_surcharge_ticket = self.ticket.get_customer().tax.recargo_equivalencia
if self.price_recommended_ticket is None:
self.price_recommended_ticket = self.product_final.price_base
if not self.description_ticket:
self.description_ticket = '{}'.format(self.product_final)
update_ticket = self.__update_subtotal_ticket(line_old)
if self.invoice:
if self.tax_invoice_fk is None:
self.tax_invoice_fk = self.product_final.product.tax
if self.tax_label_invoice is None:
self.tax_label_invoice = self.product_final.product.tax.name
if not self.tax_invoice:
self.tax_invoice = self.product_final.product.tax.tax
if self.invoice.get_customer().apply_equivalence_surcharge:
self.equivalence_surcharge_invoice = self.invoice.get_customer().tax.recargo_equivalencia
if self.price_recommended_invoice is None:
self.price_recommended_invoice = self.product_final.price_base
if not self.description_invoice:
self.description_invoice = '{}'.format(self.product_final)
update_invoice = self.__update_subtotal_invoice(line_old)
result = super(self._meta.model, self).save(*args, **kwargs)
# update totals
if update_basket:
self.basket.update_totales()
if self.order and update_order:
self.order.update_totales()
if self.albaran:
self.albaran.update_totales()
if self.ticket and update_ticket:
self.ticket.update_totales()
if self.ticket_rectification:
self.ticket_rectification.update_totales()
if self.invoice and update_invoice:
self.invoice.update_totales()
if self.invoice_rectification:
self.invoice_rectification.update_totales()
return result
def __update_subtotal_basket(self, line_old):
self.subtotal_basket = Decimal(self.quantity) * self.price_base_basket
self.discounts_basket = round_decimal(self.subtotal_basket * self.discount_basket / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.taxes_basket = round_decimal(self.subtotal_basket * Decimal(self.tax_basket) / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.equivalence_surcharges_basket = round_decimal(self.subtotal_basket * Decimal(self.equivalence_surcharge_basket) / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.total_basket = self.subtotal_basket + self.taxes_basket - self.discounts_basket
if self.quantity:
self.price_unit_basket = self.total_basket / Decimal(self.quantity)
else:
self.price_unit_basket = Decimal('0')
if line_old is None:
update = True
elif self.subtotal_basket != line_old.subtotal_basket or self.discounts_basket != line_old.discounts_basket or self.taxes_basket != line_old.taxes_basket or self.equivalence_surcharges_basket != line_old.equivalence_surcharges_basket or self.total_basket != line_old.total_basket:
update = True
else:
update = False
return update
def __update_subtotal_order(self, line_old):
self.subtotal_order = Decimal(self.quantity) * self.price_base_order
self.discounts_order = round_decimal(self.subtotal_order * self.discount_order / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.taxes_order = round_decimal(self.subtotal_order * Decimal(self.tax_order) / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.equivalence_surcharges_order = round_decimal(self.subtotal_order * Decimal(self.equivalence_surcharge_order) / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.total_order = self.subtotal_order + self.taxes_order - self.discounts_order
if self.quantity:
self.price_unit_order = self.total_order / Decimal(self.quantity)
else:
self.price_unit_order = Decimal('0')
if line_old is None:
update = True
elif self.subtotal_order != line_old.subtotal_order or self.discounts_order != line_old.discounts_order or self.taxes_order != line_old.taxes_order or self.equivalence_surcharges_order != line_old.equivalence_surcharges_order or self.total_order != line_old.total_order:
update = True
else:
update = False
return update
def __update_subtotal_ticket(self, line_old):
self.subtotal_ticket = Decimal(self.quantity) * self.price_base_ticket
self.discounts_ticket = round_decimal(self.subtotal_ticket * self.discount_ticket / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.taxes_ticket = round_decimal(self.subtotal_ticket * Decimal(self.tax_ticket) / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.equivalence_surcharges_ticket = round_decimal(self.subtotal_ticket * Decimal(self.equivalence_surcharge_ticket) / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.total_ticket = self.subtotal_ticket + self.taxes_ticket - self.discounts_ticket
if self.quantity:
self.price_unit_ticket = self.total_ticket / Decimal(self.quantity)
else:
self.price_unit_ticket = Decimal('0')
if line_old is None:
update = True
elif self.subtotal_ticket != line_old.subtotal_ticket or self.discounts_ticket != line_old.discounts_ticket or self.taxes_ticket != line_old.taxes_ticket or self.equivalence_surcharges_ticket != line_old.equivalence_surcharges_ticket or self.total_ticket != line_old.total_ticket:
update = True
else:
update = False
return update
def __update_subtotal_invoice(self, line_old):
self.subtotal_invoice = Decimal(self.quantity) * self.price_base_invoice
self.discounts_invoice = round_decimal(self.subtotal_invoice * self.discount_invoice / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.taxes_invoice = round_decimal(self.subtotal_invoice * Decimal(self.tax_invoice) / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.equivalence_surcharges_invoice = round_decimal(self.subtotal_invoice * Decimal(self.equivalence_surcharge_invoice) / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.total_invoice = self.subtotal_invoice + self.taxes_invoice - self.discounts_invoice
if self.quantity:
self.price_unit_invoice = self.total_invoice / Decimal(self.quantity)
else:
self.price_unit_invoice = Decimal('0')
if line_old is None:
update = True
elif self.subtotal_invoice != line_old.subtotal_invoice or self.discounts_invoice != line_old.discounts_invoice or self.taxes_invoice != line_old.taxes_invoice or self.equivalence_surcharges_invoice != line_old.equivalence_surcharges_invoice or self.total_invoice != line_old.total_invoice:
update = True
else:
update = False
return update
def lock_delete(self, request=None):
# Un SalesLines si:
# * Esta en una factura
# * Esta en un ticket
# * Esta en un albaran
# * Esta en un pedido
# * Esta en un budget bloqueado
if self.invoice:
return _('Cannot delete, it is related to invoice')
elif self.ticket:
return _('Cannot delete, it is related to ticket')
elif self.albaran:
return _('Cannot delete, it is related to albaran')
elif self.order:
return _('Cannot delete, it is related to order')
elif self.basket.lock:
return _('Cannot delete, the order is lock')
return super().lock_delete()
def delete(self):
with transaction.atomic():
if not hasattr(settings, 'CDNX_INVOICING_LOGICAL_DELETION') or settings.CDNX_INVOICING_LOGICAL_DELETION is False:
return super(SalesLines, self).delete()
else:
self.removed = True
self.save()
def __limitQ__(self, info):
return {'removed': Q(removed=False)}
@staticmethod
def delete_doc(doc):
if isinstance(doc, SalesBasket):
qs = doc.lines_sales.filter(Q(order__isnull=False) | Q(albaran__isnull=False) | Q(ticket__isnull=False) | Q(invoice__isnull=False)).exists()
if qs:
raise SalesLinesNotDelete(_('No se puede eliminar el presupuesto al estar relacionado con pedido, albaran, ticket o factura'))
else:
with transaction.atomic():
doc.lines_sales.objects.filter(removed=False).delete()
elif isinstance(doc, SalesOrder):
qs = doc.lines_sales.filter(Q(albaran__isnull=False) | Q(ticket__isnull=False) | Q(invoice__isnull=False)).exists()
if qs:
raise SalesLinesNotDelete(_('No se puede eliminar el presupuesto al estar relacionado con albaran, ticket o factura'))
else:
with transaction.atomic():
for line in doc.lines_sales.filter(removed=False):
nline = copy.copy(line)
nline.pk = None
nline.order = None
nline.save()
line.delete()
elif isinstance(doc, SalesAlbaran):
qs = doc.lines_sales.filter(Q(ticket__isnull=False) | Q(invoice__isnull=False)).exists()
if qs:
raise SalesLinesNotDelete(_('No se puede eliminar el presupuesto al estar relacionado ticket o factura'))
else:
with transaction.atomic():
for line in doc.lines_sales.filter(removed=False):
nline = copy.copy(line)
nline.pk = None
nline.albaran = None
nline.save()
line.delete()
elif isinstance(doc, SalesTicket):
qs = doc.lines_sales.filter(Q(ticket_rectification__isnull=False)).exists()
if qs:
raise SalesLinesNotDelete(_('No se puede eliminar el presupuesto al estar relacionado con ticket rectificativos'))
else:
with transaction.atomic():
for line in doc.lines_sales.filter(removed=False):
nline = copy.copy(line)
nline.pk = None
nline.ticket = None
nline.save()
line.delete()
elif isinstance(doc, SalesTicketRectification):
with transaction.atomic():
for line in doc.lines_sales.filter(removed=False):
nline = copy.copy(line)
nline.pk = None
nline.ticket_rectification = None
nline.save()
line.delete()
elif isinstance(doc, SalesInvoice):
qs = doc.lines_sales.filter(Q(invoice_rectification__isnull=False)).exists()
if qs:
raise SalesLinesNotDelete(_('No se puede eliminar el presupuesto al estar relacionado con factura rectificativos'))
else:
with transaction.atomic():
for line in doc.lines_sales.filter(removed=False):
nline = copy.copy(line)
nline.pk = None
nline.invoice = None
nline.save()
line.delete()
elif isinstance(doc, SalesInvoiceRectification):
with transaction.atomic():
for line in doc.lines_sales.filter(removed=False):
nline = copy.copy(line)
nline.pk = None
nline.invoice_rectification = None
nline.save()
line.delete()
@staticmethod
def create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL,
url_reverse,
msg_error_relation, msg_error_not_found, msg_error_line_not_found,
unique):
"""
pk: pk del documento origen
list_lines: listado de pk de lineas de origen
MODEL_SOURCE: modelo del documento origen
MODEL_FINAL: model del documento final
url_reverse: url del destino
msg_error_relation: Mensaje de error indicando que las lineas ya estΓ‘n relacionadas
msg_error_not_found: Mensaje de error indicando que no se encuentra el objeto origen
unique: (True/False) Indica si puede haber mΓ‘s de una linea asociada a otras lineas
"""
context = {}
obj_src = MODEL_SOURCE.objects.filter(pk=pk).first()
if list_lines and obj_src:
# parse to int
list_lines = [int(x) for x in list_lines]
obj_final = MODEL_FINAL()
complete = True
field_final_tax = None
if isinstance(obj_final, SalesOrder):
obj_final.budget = obj_src
field_final = 'order'
field_final_tax = 'tax_order_fk'
elif isinstance(obj_final, SalesAlbaran):
field_final = 'albaran'
field_final_tax = 'tax_albaran_fk'
complete = False
elif isinstance(obj_final, SalesTicket):
field_final = 'ticket'
field_final_tax = 'tax_ticket_fk'
elif isinstance(obj_final, SalesTicketRectification):
field_final = 'ticket_rectification'
complete = False
elif isinstance(obj_final, SalesInvoice):
field_final = 'invoice'
field_final_tax = 'tax_invoice_fk'
elif isinstance(obj_final, SalesInvoiceRectification):
field_final = 'invoice_rectification'
complete = False
# list of lines objects
if unique:
create = not SalesLines.objects.filter(**{
"pk__in": list_lines,
"{}__isnull".format(field_final): False
}).exists()
else:
create = True
"""
si debiendo ser filas unicas no las encuentra en el modelo final, se crea el nuevo documento
"""
if create:
with transaction.atomic():
if hasattr(obj_src, 'customer'):
customer = obj_src.customer
else:
customer = obj_src.lines_sales.filter(removed=False).first().order.customer
obj_final.customer = customer
obj_final.date = datetime.datetime.now()
obj_final.billing_series = obj_src.billing_series
field_src_tax = None
if isinstance(obj_src, SalesBasket):
field_src = 'basket'
field_src_tax = 'tax_basket_fk'
elif isinstance(obj_src, SalesOrder) or isinstance(obj_src, SalesAlbaran):
field_src = 'order'
field_src_tax = 'tax_order_fk'
elif isinstance(obj_src, SalesTicket) or isinstance(obj_src, SalesTicketRectification):
field_src = 'ticket'
field_src_tax = 'tax_ticket_fk'
elif isinstance(obj_src, SalesInvoice) or isinstance(obj_src, SalesInvoiceRectification):
field_src = 'invoice'
field_src_tax = 'tax_invoice_fk'
obj_final.save()
qs = SalesLines.objects.filter(**{'pk__in': list_lines, '{}__isnull'.format(field_final): True})
if qs:
for line in qs:
setattr(line, field_final, obj_final)
if complete:
setattr(line, 'description_{}'.format(field_final), getattr(line, 'description_{}'.format(field_src)))
setattr(line, 'price_base_{}'.format(field_final), getattr(line, 'price_base_{}'.format(field_src)))
setattr(line, 'discount_{}'.format(field_final), getattr(line, 'discount_{}'.format(field_src)))
setattr(line, 'tax_{}'.format(field_final), getattr(line, 'tax_{}'.format(field_src)))
setattr(line, 'equivalence_surcharge_{}'.format(field_final), getattr(line, 'equivalence_surcharge_{}'.format(field_src)))
setattr(line, 'tax_label_{}'.format(field_final), getattr(line, 'tax_label_{}'.format(field_src)))
if field_src_tax and field_final_tax:
setattr(line, '{}'.format(field_final_tax), getattr(line, '{}'.format(field_src_tax)))
setattr(line, 'notes_{}'.format(field_final), getattr(line, 'notes_{}'.format(field_src)))
line.save()
"""
FALTA LOS PACKS
if hasattr(line_src, 'line_basket_option_sales') and line_src.line_basket_option_sales.exists():
for opt_src in line_src.line_basket_option_sales.all():
opt_dst = SalesLineOrderOption()
opt_dst.line_order = line_final
opt_dst.product_option = opt_src.product_option
opt_dst.product_final = opt_src.product_final
opt_dst.quantity = opt_src.quantity
opt_dst.save()
"""
# bloqueamos el documento origen
obj_src.lock = True
obj_src.save()
# context['url'] = reverse('ordersaless_details', kwargs={'pk': order.pk})
context['url'] = "{}#/{}".format(reverse(url_reverse), obj_final.pk)
context['obj_final'] = obj_final
else:
context['error'] = msg_error_relation
else:
# _("Hay lineas asignadas a pedidos")
context['error'] = msg_error_relation
else:
# _('Budget not found')
context['error'] = msg_error_not_found
return context
@staticmethod # ok
def create_order_from_budget_all(order, signed_obligatorily=True):
lines_budget = order.budget.lines_sales.filter(removed=False)
lines = [x[0] for x in lines_budget.values_list('pk')]
result = SalesLines.create_order_from_budget(order.pk, lines, signed_obligatorily)
order = result['obj_final']
return lines_budget.count() == order.lines_sales.filter(removed=False).count()
@staticmethod # ok
def create_order_from_budget(pk, list_lines, signed_obligatorily=True):
MODEL_SOURCE = SalesBasket
MODEL_FINAL = SalesOrder
url_reverse = 'CDNX_invoicing_ordersaless_list'
# type_doc
msg_error_relation = _("Hay lineas asignadas a pedidos")
msg_error_not_found = _('Budget not found')
msg_error_line_not_found = _('Todas las lineas ya se han pasado a pedido')
budget = MODEL_SOURCE.objects.get(pk=pk)
if signed_obligatorily and not budget.signed:
# el presupuesto tiene que estar firmado para poder generar el pedido
context = {}
context['error'] = _("Unsigned budget!")
return context
else:
# duplicamos el presupuesto si el numero de lineas es diferente
# relacionando el pedido a este nuevo presupuesto
if list_lines and len(list_lines) != SalesLines.objects.filter(removed=False, basket=pk).count():
new_budget = budget.duplicate(list_lines)
pk = new_budget.pk
list_lines = [x[0] for x in SalesLines.objects.filter(removed=False, basket=pk).values_list('pk')]
return SalesLines.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, url_reverse,
msg_error_relation, msg_error_not_found, msg_error_line_not_found,
True)
@staticmethod # ok
def create_albaran_automatic(pk, list_lines):
"""
creamos de forma automatica el albaran
"""
lines = SalesLines.objects.filter(pk__in=list_lines, removed=False).exclude(albaran__isnull=False).values_list('pk')
lines_to_albaran = [x[0] for x in lines]
SalesLines.create_albaran_from_order(pk, lines_to_albaran)
@staticmethod
def create_albaran_from_order(pk, list_lines):
MODEL_SOURCE = SalesOrder
MODEL_FINAL = SalesAlbaran
url_reverse = 'CDNX_invoicing_albaransaless_list'
# type_doc
msg_error_relation = _("Hay lineas asignadas a albaranes")
msg_error_not_found = _('Sales order not found')
msg_error_line_not_found = _('Todas las lineas ya se han pasado a albaran')
context = SalesLines.create_document_from_another(pk, list_lines, MODEL_SOURCE, MODEL_FINAL, url_reverse, msg_error_relation, msg_error_not_found, msg_error_line_not_found, False)
# If there was not any error
if 'error' not in context:
# Get albaran
albaran = context['obj_final']
# Reserve stock
try:
with transaction.atomic():
# For each line
for line in albaran.lines_sales.all():
if line.product_unique:
# It is a unique product
pus = [line.product_unique, ]
else:
# It is not a unique product, get all of them
pus = line.product_final.products_unique.filter(stock_real__gt=F('stock_locked'))
# Reserve as many as we can
quantity = line.quantity
for pu in pus:
# Check how many are free and lock as many as we need
available = pu.stock_real - pu.stock_locked
# Choose how many we are going to lock
to_lock = min(available, quantity)
# Mark as locked
pu.stock_locked += to_lock
pu.save()
# Count down from quantity
quantity -= to_lock
# When we are done, break bucle
if not quantity:
break
# If we are not done
if quantity:
# Fail
raise IOError("Not enought products for line '{}'!".format(line))
except IOError as e:
# Remove all line's from albaran before failing
for line in albaran.lines_sales.all():
line.delete()
# Remove albaran before failing
albaran.delete()
# Set error
context = {}
context['error'] = e
# Return result
return context
@staticmethod
def create_ticket_from_order(pk, list_lines):
MODEL_SOURCE = SalesOrder
MODEL_FINAL = SalesTicket
url_reverse = 'CDNX_invoicing_ticketsaless_list'
# type_doc
msg_error_relation = _("Hay lineas asignadas a ticket")
msg_error_not_found = _('Sales order not found')
msg_error_line_not_found = _('Todas las lineas ya se han pasado a ticket')
with transaction.atomic():
SalesLines.create_albaran_automatic(pk, list_lines)
return SalesLines.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, url_reverse,
msg_error_relation, msg_error_not_found, msg_error_line_not_found,
False)
@staticmethod
def create_ticket_from_slot(slot_pk):
context = {
"error": None,
"obj_final": None,
}
# order line not paid
line_orders = SalesLines.objects.filter(
order__budget__pos_slot__pk=slot_pk,
order__payment__isnull=True,
order__cash_movements__isnull=True,
order__budget__removed=False,
order__removed=False,
removed=False
)
if line_orders:
# create o update ticket
tickets = []
for line in line_orders:
if line.ticket:
tickets.append(line.ticket)
if len(set(tickets)) > 1:
context['error'] = _(u'There are orders that are in several different tickets')
else:
if tickets:
# update line
with transaction.atomic():
ticket = SalesTicket.objects.get(pk=tickets[0], removed=False)
# There are already orders associated with a ticket
for line in line_orders:
if line.ticket is None:
line.ticket = ticket
line.tax_ticket = line.tax_order
line.discount_ticket = line.discount_order
line.description_ticket = line.description_order
line.notes_ticket = line.notes_order
line.quantity_ticket = line.quantity_order
line.price_recommended_ticket = line.price_recommended_order
line.price_base_ticket = line.price_base_order
line.save()
else:
# update line
if line.quantity_ticket != line.quantity_order:
line.quantity_ticket = line.quantity_order
line.save()
else:
# new ticket
with transaction.atomic():
ticket = SalesTicket()
ticket.billing_series = BillingSeries.objects.filter(default=True).first()
ticket.customer = line_orders[0].order.customer
ticket.save()
for line in line_orders:
line.ticket = ticket
line.tax_ticket = line.tax_order
line.discount_ticket = line.discount_order
line.description_ticket = line.description_order
line.notes_ticket = line.notes_order
line.quantity_ticket = line.quantity_order
line.price_recommended_ticket = line.price_recommended_order
line.price_base_ticket = line.price_base_order
line.save()
context['obj_final'] = ticket
else:
# get ticket
line_order = SalesLines.objects.filter(
order__budget__pos_slot__pk=slot_pk,
order__budget__removed=False,
order__removed=False,
removed=False,
).last()
ticket = SalesTicket.objects.filter(
customer=line_order.order.customer,
lines_sales=line_order,
lines_sales__removed=False,
removed=False
).first()
if ticket:
context['obj_final'] = ticket
else:
context['error'] = _("Ticket don't found")
return context
@staticmethod
def create_invoice_from_order(pk, list_lines):
MODEL_SOURCE = SalesOrder
MODEL_FINAL = SalesInvoice
url_reverse = 'CDNX_invoicing_invoicesaless_list'
# type_doc
msg_error_relation = _("Hay lineas asignadas a facturas")
msg_error_not_found = _('Sales order not found')
msg_error_line_not_found = _('Todas las lineas ya se han pasado a facturas')
with transaction.atomic():
SalesLines.create_albaran_automatic(pk, list_lines)
return SalesLines.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, url_reverse,
msg_error_relation, msg_error_not_found, msg_error_line_not_found,
False)
@staticmethod
@staticmethod
def create_invoice_from_albaran(pk, list_lines):
MODEL_SOURCE = SalesAlbaran
MODEL_FINAL = SalesInvoice
url_reverse = 'CDNX_invoicing_invoicesaless_list'
# type_doc
msg_error_relation = _("Hay lineas asignadas a facturas")
msg_error_not_found = _('Sales albaran not found')
msg_error_line_not_found = _('Todas las lineas ya se han pasado a facturas')
return SalesLines.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, url_reverse,
msg_error_relation, msg_error_not_found, msg_error_line_not_found,
False)
"""
context = {}
if list_lines:
new_list_lines = SalesLines.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(
invoice__isnull=False
)
if new_list_lines:
new_pk = new_list_lines.first()
if new_pk:
context = SalesLines.create_invoice_from_order(
new_pk.order.pk,
[x['pk'] for x in new_list_lines.values('pk')])
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
"""
@staticmethod
def create_invoice_from_ticket(pk, list_lines):
MODEL_SOURCE = SalesTicket
MODEL_FINAL = SalesInvoice
url_reverse = 'CDNX_invoicing_invoicesaless_list'
# type_doc
msg_error_relation = _("Hay lineas asignadas a facturas")
msg_error_not_found = _('Sales ticket not found')
msg_error_line_not_found = _('Todas las lineas ya se han pasado a facturas')
return SalesLines.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, url_reverse,
msg_error_relation, msg_error_not_found, msg_error_line_not_found,
False)
"""
context = {}
if list_lines:
new_list_lines = SalesLines.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(
invoice__isnull=True
)
if new_list_lines:
new_pk = new_list_lines.first()
if new_pk:
context = SalesLines.create_invoice_from_order(
new_pk.order.pk,
[x['pk'] for x in new_list_lines.values('pk')])
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
"""
|
codenerix/django-codenerix-invoicing
|
codenerix_invoicing/models_sales.py
|
SalesLines.create_invoice_from_albaran
|
python
|
def create_invoice_from_albaran(pk, list_lines):
MODEL_SOURCE = SalesAlbaran
MODEL_FINAL = SalesInvoice
url_reverse = 'CDNX_invoicing_invoicesaless_list'
# type_doc
msg_error_relation = _("Hay lineas asignadas a facturas")
msg_error_not_found = _('Sales albaran not found')
msg_error_line_not_found = _('Todas las lineas ya se han pasado a facturas')
return SalesLines.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, url_reverse,
msg_error_relation, msg_error_not_found, msg_error_line_not_found,
False)
"""
context = {}
if list_lines:
new_list_lines = SalesLines.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(
invoice__isnull=False
)
if new_list_lines:
new_pk = new_list_lines.first()
if new_pk:
context = SalesLines.create_invoice_from_order(
new_pk.order.pk,
[x['pk'] for x in new_list_lines.values('pk')])
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
"""
|
context = {}
if list_lines:
new_list_lines = SalesLines.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(
invoice__isnull=False
)
if new_list_lines:
new_pk = new_list_lines.first()
if new_pk:
context = SalesLines.create_invoice_from_order(
new_pk.order.pk,
[x['pk'] for x in new_list_lines.values('pk')])
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
|
train
|
https://github.com/codenerix/django-codenerix-invoicing/blob/7db5c62f335f9215a8b308603848625208b48698/codenerix_invoicing/models_sales.py#L2410-L2447
| null |
class SalesLines(CodenerixModel):
basket = models.ForeignKey(SalesBasket, related_name='lines_sales', verbose_name=_("Basket"), on_delete=models.CASCADE)
tax_basket_fk = models.ForeignKey(TypeTax, related_name='lines_sales_basket', verbose_name=_("Tax Basket"), on_delete=models.CASCADE)
order = models.ForeignKey(SalesOrder, related_name='lines_sales', verbose_name=_("Sales order"), on_delete=models.CASCADE, null=True, blank=True)
tax_order_fk = models.ForeignKey(TypeTax, related_name='lines_sales_order', verbose_name=_("Tax Sales order"), on_delete=models.CASCADE, null=True, blank=True)
albaran = models.ForeignKey(SalesAlbaran, related_name='lines_sales', verbose_name=_("Albaran"), on_delete=models.CASCADE, null=True, blank=True)
ticket = models.ForeignKey(SalesTicket, related_name='lines_sales', verbose_name=_("Ticket"), on_delete=models.CASCADE, null=True, blank=True)
tax_ticket_fk = models.ForeignKey(TypeTax, related_name='lines_sales_ticket', verbose_name=_("Tax Ticket"), on_delete=models.CASCADE, null=True, blank=True)
ticket_rectification = models.ForeignKey(SalesTicketRectification, related_name='lines_sales', verbose_name=_("Ticket rectification"), on_delete=models.CASCADE, null=True, blank=True)
invoice = models.ForeignKey(SalesInvoice, related_name='lines_sales', verbose_name=_("Invoice"), on_delete=models.CASCADE, null=True, blank=True)
tax_invoice_fk = models.ForeignKey(TypeTax, related_name='lines_sales_invoice', verbose_name=_("Tax Invoice"), on_delete=models.CASCADE, null=True, blank=True)
invoice_rectification = models.ForeignKey(SalesInvoiceRectification, related_name='lines_sales', verbose_name=_("Invoice rectification"), on_delete=models.CASCADE, null=True, blank=True)
product_final = models.ForeignKey(ProductFinal, related_name='lines_sales', verbose_name=_("Product"), on_delete=models.CASCADE)
product_unique = models.ForeignKey(ProductUnique, related_name='lines_sales', verbose_name=_("Product Unique"), on_delete=models.CASCADE, null=True, blank=True)
# invoiced is True if 'invoice' is not null
# invoiced = models.BooleanField(_("Invoiced"), blank=False, default=False)
# logical deletion
removed = models.BooleanField(_("Removed"), blank=False, default=False, editable=False)
quantity = models.FloatField(_("Quantity"), blank=False, null=False)
code = models.CharField(_("Code"), max_length=250, blank=True, null=True, default=None)
# ####
# desde el formulario se podrΓ‘ modificar el precio y la descripcion del producto
# se guarda el tax usado y la relacion para poder hacer un seguimiento
# ####
# info basket
price_recommended_basket = models.DecimalField(_("Recomended price base"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
description_basket = models.CharField(_("Description"), max_length=256, blank=True, null=True)
price_base_basket = models.DecimalField(_("Price base"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
discount_basket = models.DecimalField(_("Discount (%)"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
tax_basket = models.FloatField(_("Tax (%)"), blank=True, null=True, default=0)
equivalence_surcharge_basket = models.FloatField(_("Equivalence surcharge (%)"), blank=True, null=True, default=0)
tax_label_basket = models.CharField(_("Tax Name"), max_length=250, blank=True, null=True)
notes_basket = models.CharField(_("Notes"), max_length=256, blank=True, null=True)
# info basket total
price_unit_basket = models.DecimalField(_("unit_price"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
subtotal_basket = models.DecimalField(_("Subtotal"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
discounts_basket = models.DecimalField(_("Discounts"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
taxes_basket = models.DecimalField(_("Taxes"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
equivalence_surcharges_basket = models.DecimalField(_("Equivalence surcharge"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
total_basket = models.DecimalField(_("Total"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
# info order
price_recommended_order = models.DecimalField(_("Recomended price base"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
description_order = models.CharField(_("Description"), max_length=256, blank=True, null=True)
price_base_order = models.DecimalField(_("Price base"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
discount_order = models.DecimalField(_("Discount (%)"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
tax_order = models.FloatField(_("Tax (%)"), blank=True, null=True, default=0)
equivalence_surcharge_order = models.FloatField(_("Equivalence surcharge (%)"), blank=True, null=True, default=0)
tax_label_order = models.CharField(_("Tax Name"), max_length=250, blank=True, null=True)
notes_order = models.CharField(_("Notes"), max_length=256, blank=True, null=True)
# info basket total
price_unit_order = models.DecimalField(_("unit_price"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
subtotal_order = models.DecimalField(_("Subtotal"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
discounts_order = models.DecimalField(_("Discounts"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
taxes_order = models.DecimalField(_("Taxes"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
equivalence_surcharges_order = models.DecimalField(_("Equivalence surcharge"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
total_order = models.DecimalField(_("Total"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
# info albaran - basic
notes_albaran = models.CharField(_("Notes"), max_length=256, blank=True, null=True)
# info ticket
price_recommended_ticket = models.DecimalField(_("Recomended price base"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
description_ticket = models.CharField(_("Description"), max_length=256, blank=True, null=True)
price_base_ticket = models.DecimalField(_("Price base"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
discount_ticket = models.DecimalField(_("Discount (%)"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
tax_ticket = models.FloatField(_("Tax (%)"), blank=True, null=True, default=0)
equivalence_surcharge_ticket = models.FloatField(_("Equivalence surcharge (%)"), blank=True, null=True, default=0)
tax_label_ticket = models.CharField(_("Tax Name"), max_length=250, blank=True, null=True)
notes_ticket = models.CharField(_("Notes"), max_length=256, blank=True, null=True)
# info basket total
price_unit_ticket = models.DecimalField(_("unit_price"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
subtotal_ticket = models.DecimalField(_("Subtotal"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
discounts_ticket = models.DecimalField(_("Discounts"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
taxes_ticket = models.DecimalField(_("Taxes"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
equivalence_surcharges_ticket = models.DecimalField(_("Equivalence surcharge"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
total_ticket = models.DecimalField(_("Total"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
# info ticket rectification - basic
notes_ticket_rectification = models.CharField(_("Notes"), max_length=256, blank=True, null=True)
# info invoice
price_recommended_invoice = models.DecimalField(_("Recomended price base"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
description_invoice = models.CharField(_("Description"), max_length=256, blank=True, null=True)
price_base_invoice = models.DecimalField(_("Price base"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
discount_invoice = models.DecimalField(_("Discount (%)"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
tax_invoice = models.FloatField(_("Tax (%)"), blank=True, null=True, default=0)
equivalence_surcharge_invoice = models.FloatField(_("Equivalence surcharge (%)"), blank=True, null=True, default=0)
tax_label_invoice = models.CharField(_("Tax Name"), max_length=250, blank=True, null=True)
notes_invoice = models.CharField(_("Notes"), max_length=256, blank=True, null=True)
# info basket total
price_unit_invoice = models.DecimalField(_("unit_price"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
subtotal_invoice = models.DecimalField(_("Subtotal"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
discounts_invoice = models.DecimalField(_("Discounts"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
taxes_invoice = models.DecimalField(_("Taxes"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
equivalence_surcharges_invoice = models.DecimalField(_("Equivalence surcharge"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
total_invoice = models.DecimalField(_("Total"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
# info invoice rectification - basic
notes_invoice_rectification = models.CharField(_("Notes"), max_length=256, blank=True, null=True)
def __str__(self):
return u"{} - {}".format(self.product_final, self.quantity)
def __unicode__(self):
return self.__str__()
def __fields__(self, info):
fields = []
fields.append(('product_final', _("Product final")))
fields.append(('product_unique', _("Product unique")))
fields.append(('quantity', _("Quantity")))
return fields
def get_product_unique(self, quantity, pos=None):
if self.product_final.sample:
raise SalesLinesProductFinalIsSample(_("This product can not be sold, it is marked as 'sample'"))
else:
products_unique = []
with transaction.atomic():
qs = ProductUnique.objects.filter(
product_final=self.product_final,
stock_real__gt=0,
stock_locked__lt=F('stock_real')
)
if pos:
qs = qs.filter(box__box_structure__zone__storage__in=pos.storage_stock.filter(storage_zones__salable=True))
elif self.basket.pos:
qs = qs.filter(box__box_structure__zone__storage__in=self.basket.pos.storage_stock.filter(storage_zones__salable=True))
if self.product_final.product.force_stock is False:
product_unique = qs.first()
if product_unique:
products_unique = [
{
'quantity': quantity,
'product_unique': product_unique
}
]
else:
raise SalesLinesUniqueProductNotExists(_('Unique product not exists! No stock!'))
else:
stock_available = None
for unique_product in qs:
if quantity <= 0:
break
stock_available = unique_product.stock_real - unique_product.stock_locked
if stock_available > quantity:
stock_available = quantity
unique_product.duplicate(quantity)
unique_product.locked_stock(stock_available)
products_unique.append({
'product_unique': unique_product,
'quantity': stock_available
})
quantity -= stock_available
if quantity > 0:
raise SalesLinesInsufficientStock(_("Insufficient stock. Product: {}".format(self.product_final)))
else:
return products_unique
def save(self, *args, **kwargs):
with transaction.atomic():
if self.pk is None:
line_old = None
if self.product_final.code:
self.code = self.product_final.code
else:
self.code = self.product_final.product.code
if self.product_unique is None:
if getattr(settings, 'CDNX_INVOICING_FORCE_STOCK_IN_BUDGET', True):
products_unique = self.get_product_unique(self.quantity)
first = True
for unique_product in products_unique:
if first:
first = False
self.quantity = unique_product['quantity']
self.product_unique = unique_product['product_unique']
else:
line = copy.copy(self)
line.pk = None
line.quantity = unique_product['quantity']
line.product_unique = unique_product['product_unique']
line.save()
elif self.pk:
line_old = SalesLines.objects.filter(pk=self.pk).first()
if line_old:
product_final_old = line_old.product_final
else:
product_final_old = None
if self.product_final != product_final_old:
if self.order or self.albaran or self.ticket or self.invoice:
raise SalesLinesNotModifiable(_('You can not modify product'))
elif self.description_basket == '{}'.format(product_final_old):
self.description_basket = ''
# solo se puede cambiar el producto si no esta en un pedido, albaran, ticket o factura
self.price_recommended_basket = None
self.tax_label_basket = None
if getattr(settings, 'CDNX_INVOICING_FORCE_STOCK_IN_BUDGET', True):
products_unique = self.get_product_unique(self.quantity)
first = True
for unique_product in products_unique:
if first:
first = False
self.quantity = unique_product['quantity']
self.product_unique = unique_product['product_unique']
else:
line = copy.copy(self)
line.pk = None
line.quantity = unique_product['quantity']
line.product_unique = unique_product['product_unique']
line.save()
else:
self.product_unique = None
elif self.order and line_old.order is None:
# associate line with order
# locked product unique!!
if self.product_final.product.force_stock:
if self.product_unique is None:
products_unique = self.get_product_unique(self.quantity)
first = True
for unique_product in products_unique:
if first:
first = False
self.quantity = unique_product['quantity']
self.product_unique = unique_product['product_unique']
else:
line = copy.copy(self)
line.pk = None
line.quantity = unique_product['quantity']
line.product_unique = unique_product['product_unique']
line.save()
else:
available = self.product_unique.stock_real - self.product_unique.stock_locked
if available < self.quantity:
products_unique = self.get_product_unique(self.quantity)
first = True
for unique_product in products_unique:
if first:
first = False
self.quantity = unique_product['quantity']
self.product_unique = unique_product['product_unique']
else:
line = copy.copy(self)
line.pk = None
line.quantity = unique_product['quantity']
line.product_unique = unique_product['product_unique']
line.save()
# calculate value of equivalence_surcharge
# save tax label
# save price recommended
# save tax foreignkey
if self.basket:
if self.tax_basket_fk is None:
self.tax_basket_fk = self.product_final.product.tax
if not self.tax_label_basket:
self.tax_label_basket = self.product_final.product.tax.name
if not self.tax_basket:
self.tax_basket = self.product_final.product.tax.tax
if self.basket.get_customer().apply_equivalence_surcharge:
self.equivalence_surcharge_basket = self.basket.get_customer().tax.recargo_equivalencia
if self.price_recommended_basket is None:
self.price_recommended_basket = self.product_final.price_base
if not self.description_basket:
self.description_basket = '{}'.format(self.product_final)
update_basket = self.__update_subtotal_basket(line_old)
if self.order:
if self.tax_order_fk is None:
self.tax_order_fk = self.product_final.product.tax
if self.tax_label_order is None:
self.tax_label_order = self.product_final.product.tax.name
if not self.tax_order:
self.tax_order = self.product_final.product.tax.tax
if self.order.get_customer().apply_equivalence_surcharge:
self.equivalence_surcharge_order = self.order.get_customer().tax.recargo_equivalencia
if self.price_recommended_order is None:
self.price_recommended_order = self.product_final.price_base
if not self.description_order:
self.description_order = '{}'.format(self.product_final)
update_order = self.__update_subtotal_order(line_old)
if self.ticket:
if self.tax_ticket_fk is None:
self.tax_ticket_fk = self.product_final.product.tax
if self.tax_label_ticket is None:
self.tax_label_ticket = self.product_final.product.tax.name
if not self.tax_ticket:
self.tax_ticket = self.product_final.product.tax.tax
if self.ticket.get_customer().apply_equivalence_surcharge:
self.equivalence_surcharge_ticket = self.ticket.get_customer().tax.recargo_equivalencia
if self.price_recommended_ticket is None:
self.price_recommended_ticket = self.product_final.price_base
if not self.description_ticket:
self.description_ticket = '{}'.format(self.product_final)
update_ticket = self.__update_subtotal_ticket(line_old)
if self.invoice:
if self.tax_invoice_fk is None:
self.tax_invoice_fk = self.product_final.product.tax
if self.tax_label_invoice is None:
self.tax_label_invoice = self.product_final.product.tax.name
if not self.tax_invoice:
self.tax_invoice = self.product_final.product.tax.tax
if self.invoice.get_customer().apply_equivalence_surcharge:
self.equivalence_surcharge_invoice = self.invoice.get_customer().tax.recargo_equivalencia
if self.price_recommended_invoice is None:
self.price_recommended_invoice = self.product_final.price_base
if not self.description_invoice:
self.description_invoice = '{}'.format(self.product_final)
update_invoice = self.__update_subtotal_invoice(line_old)
result = super(self._meta.model, self).save(*args, **kwargs)
# update totals
if update_basket:
self.basket.update_totales()
if self.order and update_order:
self.order.update_totales()
if self.albaran:
self.albaran.update_totales()
if self.ticket and update_ticket:
self.ticket.update_totales()
if self.ticket_rectification:
self.ticket_rectification.update_totales()
if self.invoice and update_invoice:
self.invoice.update_totales()
if self.invoice_rectification:
self.invoice_rectification.update_totales()
return result
def __update_subtotal_basket(self, line_old):
self.subtotal_basket = Decimal(self.quantity) * self.price_base_basket
self.discounts_basket = round_decimal(self.subtotal_basket * self.discount_basket / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.taxes_basket = round_decimal(self.subtotal_basket * Decimal(self.tax_basket) / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.equivalence_surcharges_basket = round_decimal(self.subtotal_basket * Decimal(self.equivalence_surcharge_basket) / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.total_basket = self.subtotal_basket + self.taxes_basket - self.discounts_basket
if self.quantity:
self.price_unit_basket = self.total_basket / Decimal(self.quantity)
else:
self.price_unit_basket = Decimal('0')
if line_old is None:
update = True
elif self.subtotal_basket != line_old.subtotal_basket or self.discounts_basket != line_old.discounts_basket or self.taxes_basket != line_old.taxes_basket or self.equivalence_surcharges_basket != line_old.equivalence_surcharges_basket or self.total_basket != line_old.total_basket:
update = True
else:
update = False
return update
def __update_subtotal_order(self, line_old):
self.subtotal_order = Decimal(self.quantity) * self.price_base_order
self.discounts_order = round_decimal(self.subtotal_order * self.discount_order / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.taxes_order = round_decimal(self.subtotal_order * Decimal(self.tax_order) / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.equivalence_surcharges_order = round_decimal(self.subtotal_order * Decimal(self.equivalence_surcharge_order) / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.total_order = self.subtotal_order + self.taxes_order - self.discounts_order
if self.quantity:
self.price_unit_order = self.total_order / Decimal(self.quantity)
else:
self.price_unit_order = Decimal('0')
if line_old is None:
update = True
elif self.subtotal_order != line_old.subtotal_order or self.discounts_order != line_old.discounts_order or self.taxes_order != line_old.taxes_order or self.equivalence_surcharges_order != line_old.equivalence_surcharges_order or self.total_order != line_old.total_order:
update = True
else:
update = False
return update
def __update_subtotal_ticket(self, line_old):
self.subtotal_ticket = Decimal(self.quantity) * self.price_base_ticket
self.discounts_ticket = round_decimal(self.subtotal_ticket * self.discount_ticket / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.taxes_ticket = round_decimal(self.subtotal_ticket * Decimal(self.tax_ticket) / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.equivalence_surcharges_ticket = round_decimal(self.subtotal_ticket * Decimal(self.equivalence_surcharge_ticket) / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.total_ticket = self.subtotal_ticket + self.taxes_ticket - self.discounts_ticket
if self.quantity:
self.price_unit_ticket = self.total_ticket / Decimal(self.quantity)
else:
self.price_unit_ticket = Decimal('0')
if line_old is None:
update = True
elif self.subtotal_ticket != line_old.subtotal_ticket or self.discounts_ticket != line_old.discounts_ticket or self.taxes_ticket != line_old.taxes_ticket or self.equivalence_surcharges_ticket != line_old.equivalence_surcharges_ticket or self.total_ticket != line_old.total_ticket:
update = True
else:
update = False
return update
def __update_subtotal_invoice(self, line_old):
self.subtotal_invoice = Decimal(self.quantity) * self.price_base_invoice
self.discounts_invoice = round_decimal(self.subtotal_invoice * self.discount_invoice / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.taxes_invoice = round_decimal(self.subtotal_invoice * Decimal(self.tax_invoice) / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.equivalence_surcharges_invoice = round_decimal(self.subtotal_invoice * Decimal(self.equivalence_surcharge_invoice) / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.total_invoice = self.subtotal_invoice + self.taxes_invoice - self.discounts_invoice
if self.quantity:
self.price_unit_invoice = self.total_invoice / Decimal(self.quantity)
else:
self.price_unit_invoice = Decimal('0')
if line_old is None:
update = True
elif self.subtotal_invoice != line_old.subtotal_invoice or self.discounts_invoice != line_old.discounts_invoice or self.taxes_invoice != line_old.taxes_invoice or self.equivalence_surcharges_invoice != line_old.equivalence_surcharges_invoice or self.total_invoice != line_old.total_invoice:
update = True
else:
update = False
return update
def lock_delete(self, request=None):
# Un SalesLines si:
# * Esta en una factura
# * Esta en un ticket
# * Esta en un albaran
# * Esta en un pedido
# * Esta en un budget bloqueado
if self.invoice:
return _('Cannot delete, it is related to invoice')
elif self.ticket:
return _('Cannot delete, it is related to ticket')
elif self.albaran:
return _('Cannot delete, it is related to albaran')
elif self.order:
return _('Cannot delete, it is related to order')
elif self.basket.lock:
return _('Cannot delete, the order is lock')
return super().lock_delete()
def delete(self):
with transaction.atomic():
if not hasattr(settings, 'CDNX_INVOICING_LOGICAL_DELETION') or settings.CDNX_INVOICING_LOGICAL_DELETION is False:
return super(SalesLines, self).delete()
else:
self.removed = True
self.save()
def __limitQ__(self, info):
return {'removed': Q(removed=False)}
@staticmethod
def delete_doc(doc):
if isinstance(doc, SalesBasket):
qs = doc.lines_sales.filter(Q(order__isnull=False) | Q(albaran__isnull=False) | Q(ticket__isnull=False) | Q(invoice__isnull=False)).exists()
if qs:
raise SalesLinesNotDelete(_('No se puede eliminar el presupuesto al estar relacionado con pedido, albaran, ticket o factura'))
else:
with transaction.atomic():
doc.lines_sales.objects.filter(removed=False).delete()
elif isinstance(doc, SalesOrder):
qs = doc.lines_sales.filter(Q(albaran__isnull=False) | Q(ticket__isnull=False) | Q(invoice__isnull=False)).exists()
if qs:
raise SalesLinesNotDelete(_('No se puede eliminar el presupuesto al estar relacionado con albaran, ticket o factura'))
else:
with transaction.atomic():
for line in doc.lines_sales.filter(removed=False):
nline = copy.copy(line)
nline.pk = None
nline.order = None
nline.save()
line.delete()
elif isinstance(doc, SalesAlbaran):
qs = doc.lines_sales.filter(Q(ticket__isnull=False) | Q(invoice__isnull=False)).exists()
if qs:
raise SalesLinesNotDelete(_('No se puede eliminar el presupuesto al estar relacionado ticket o factura'))
else:
with transaction.atomic():
for line in doc.lines_sales.filter(removed=False):
nline = copy.copy(line)
nline.pk = None
nline.albaran = None
nline.save()
line.delete()
elif isinstance(doc, SalesTicket):
qs = doc.lines_sales.filter(Q(ticket_rectification__isnull=False)).exists()
if qs:
raise SalesLinesNotDelete(_('No se puede eliminar el presupuesto al estar relacionado con ticket rectificativos'))
else:
with transaction.atomic():
for line in doc.lines_sales.filter(removed=False):
nline = copy.copy(line)
nline.pk = None
nline.ticket = None
nline.save()
line.delete()
elif isinstance(doc, SalesTicketRectification):
with transaction.atomic():
for line in doc.lines_sales.filter(removed=False):
nline = copy.copy(line)
nline.pk = None
nline.ticket_rectification = None
nline.save()
line.delete()
elif isinstance(doc, SalesInvoice):
qs = doc.lines_sales.filter(Q(invoice_rectification__isnull=False)).exists()
if qs:
raise SalesLinesNotDelete(_('No se puede eliminar el presupuesto al estar relacionado con factura rectificativos'))
else:
with transaction.atomic():
for line in doc.lines_sales.filter(removed=False):
nline = copy.copy(line)
nline.pk = None
nline.invoice = None
nline.save()
line.delete()
elif isinstance(doc, SalesInvoiceRectification):
with transaction.atomic():
for line in doc.lines_sales.filter(removed=False):
nline = copy.copy(line)
nline.pk = None
nline.invoice_rectification = None
nline.save()
line.delete()
@staticmethod
def create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL,
url_reverse,
msg_error_relation, msg_error_not_found, msg_error_line_not_found,
unique):
"""
pk: pk del documento origen
list_lines: listado de pk de lineas de origen
MODEL_SOURCE: modelo del documento origen
MODEL_FINAL: model del documento final
url_reverse: url del destino
msg_error_relation: Mensaje de error indicando que las lineas ya estΓ‘n relacionadas
msg_error_not_found: Mensaje de error indicando que no se encuentra el objeto origen
unique: (True/False) Indica si puede haber mΓ‘s de una linea asociada a otras lineas
"""
context = {}
obj_src = MODEL_SOURCE.objects.filter(pk=pk).first()
if list_lines and obj_src:
# parse to int
list_lines = [int(x) for x in list_lines]
obj_final = MODEL_FINAL()
complete = True
field_final_tax = None
if isinstance(obj_final, SalesOrder):
obj_final.budget = obj_src
field_final = 'order'
field_final_tax = 'tax_order_fk'
elif isinstance(obj_final, SalesAlbaran):
field_final = 'albaran'
field_final_tax = 'tax_albaran_fk'
complete = False
elif isinstance(obj_final, SalesTicket):
field_final = 'ticket'
field_final_tax = 'tax_ticket_fk'
elif isinstance(obj_final, SalesTicketRectification):
field_final = 'ticket_rectification'
complete = False
elif isinstance(obj_final, SalesInvoice):
field_final = 'invoice'
field_final_tax = 'tax_invoice_fk'
elif isinstance(obj_final, SalesInvoiceRectification):
field_final = 'invoice_rectification'
complete = False
# list of lines objects
if unique:
create = not SalesLines.objects.filter(**{
"pk__in": list_lines,
"{}__isnull".format(field_final): False
}).exists()
else:
create = True
"""
si debiendo ser filas unicas no las encuentra en el modelo final, se crea el nuevo documento
"""
if create:
with transaction.atomic():
if hasattr(obj_src, 'customer'):
customer = obj_src.customer
else:
customer = obj_src.lines_sales.filter(removed=False).first().order.customer
obj_final.customer = customer
obj_final.date = datetime.datetime.now()
obj_final.billing_series = obj_src.billing_series
field_src_tax = None
if isinstance(obj_src, SalesBasket):
field_src = 'basket'
field_src_tax = 'tax_basket_fk'
elif isinstance(obj_src, SalesOrder) or isinstance(obj_src, SalesAlbaran):
field_src = 'order'
field_src_tax = 'tax_order_fk'
elif isinstance(obj_src, SalesTicket) or isinstance(obj_src, SalesTicketRectification):
field_src = 'ticket'
field_src_tax = 'tax_ticket_fk'
elif isinstance(obj_src, SalesInvoice) or isinstance(obj_src, SalesInvoiceRectification):
field_src = 'invoice'
field_src_tax = 'tax_invoice_fk'
obj_final.save()
qs = SalesLines.objects.filter(**{'pk__in': list_lines, '{}__isnull'.format(field_final): True})
if qs:
for line in qs:
setattr(line, field_final, obj_final)
if complete:
setattr(line, 'description_{}'.format(field_final), getattr(line, 'description_{}'.format(field_src)))
setattr(line, 'price_base_{}'.format(field_final), getattr(line, 'price_base_{}'.format(field_src)))
setattr(line, 'discount_{}'.format(field_final), getattr(line, 'discount_{}'.format(field_src)))
setattr(line, 'tax_{}'.format(field_final), getattr(line, 'tax_{}'.format(field_src)))
setattr(line, 'equivalence_surcharge_{}'.format(field_final), getattr(line, 'equivalence_surcharge_{}'.format(field_src)))
setattr(line, 'tax_label_{}'.format(field_final), getattr(line, 'tax_label_{}'.format(field_src)))
if field_src_tax and field_final_tax:
setattr(line, '{}'.format(field_final_tax), getattr(line, '{}'.format(field_src_tax)))
setattr(line, 'notes_{}'.format(field_final), getattr(line, 'notes_{}'.format(field_src)))
line.save()
"""
FALTA LOS PACKS
if hasattr(line_src, 'line_basket_option_sales') and line_src.line_basket_option_sales.exists():
for opt_src in line_src.line_basket_option_sales.all():
opt_dst = SalesLineOrderOption()
opt_dst.line_order = line_final
opt_dst.product_option = opt_src.product_option
opt_dst.product_final = opt_src.product_final
opt_dst.quantity = opt_src.quantity
opt_dst.save()
"""
# bloqueamos el documento origen
obj_src.lock = True
obj_src.save()
# context['url'] = reverse('ordersaless_details', kwargs={'pk': order.pk})
context['url'] = "{}#/{}".format(reverse(url_reverse), obj_final.pk)
context['obj_final'] = obj_final
else:
context['error'] = msg_error_relation
else:
# _("Hay lineas asignadas a pedidos")
context['error'] = msg_error_relation
else:
# _('Budget not found')
context['error'] = msg_error_not_found
return context
@staticmethod # ok
def create_order_from_budget_all(order, signed_obligatorily=True):
lines_budget = order.budget.lines_sales.filter(removed=False)
lines = [x[0] for x in lines_budget.values_list('pk')]
result = SalesLines.create_order_from_budget(order.pk, lines, signed_obligatorily)
order = result['obj_final']
return lines_budget.count() == order.lines_sales.filter(removed=False).count()
@staticmethod # ok
def create_order_from_budget(pk, list_lines, signed_obligatorily=True):
MODEL_SOURCE = SalesBasket
MODEL_FINAL = SalesOrder
url_reverse = 'CDNX_invoicing_ordersaless_list'
# type_doc
msg_error_relation = _("Hay lineas asignadas a pedidos")
msg_error_not_found = _('Budget not found')
msg_error_line_not_found = _('Todas las lineas ya se han pasado a pedido')
budget = MODEL_SOURCE.objects.get(pk=pk)
if signed_obligatorily and not budget.signed:
# el presupuesto tiene que estar firmado para poder generar el pedido
context = {}
context['error'] = _("Unsigned budget!")
return context
else:
# duplicamos el presupuesto si el numero de lineas es diferente
# relacionando el pedido a este nuevo presupuesto
if list_lines and len(list_lines) != SalesLines.objects.filter(removed=False, basket=pk).count():
new_budget = budget.duplicate(list_lines)
pk = new_budget.pk
list_lines = [x[0] for x in SalesLines.objects.filter(removed=False, basket=pk).values_list('pk')]
return SalesLines.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, url_reverse,
msg_error_relation, msg_error_not_found, msg_error_line_not_found,
True)
@staticmethod # ok
def create_albaran_automatic(pk, list_lines):
"""
creamos de forma automatica el albaran
"""
lines = SalesLines.objects.filter(pk__in=list_lines, removed=False).exclude(albaran__isnull=False).values_list('pk')
lines_to_albaran = [x[0] for x in lines]
SalesLines.create_albaran_from_order(pk, lines_to_albaran)
@staticmethod
def create_albaran_from_order(pk, list_lines):
MODEL_SOURCE = SalesOrder
MODEL_FINAL = SalesAlbaran
url_reverse = 'CDNX_invoicing_albaransaless_list'
# type_doc
msg_error_relation = _("Hay lineas asignadas a albaranes")
msg_error_not_found = _('Sales order not found')
msg_error_line_not_found = _('Todas las lineas ya se han pasado a albaran')
context = SalesLines.create_document_from_another(pk, list_lines, MODEL_SOURCE, MODEL_FINAL, url_reverse, msg_error_relation, msg_error_not_found, msg_error_line_not_found, False)
# If there was not any error
if 'error' not in context:
# Get albaran
albaran = context['obj_final']
# Reserve stock
try:
with transaction.atomic():
# For each line
for line in albaran.lines_sales.all():
if line.product_unique:
# It is a unique product
pus = [line.product_unique, ]
else:
# It is not a unique product, get all of them
pus = line.product_final.products_unique.filter(stock_real__gt=F('stock_locked'))
# Reserve as many as we can
quantity = line.quantity
for pu in pus:
# Check how many are free and lock as many as we need
available = pu.stock_real - pu.stock_locked
# Choose how many we are going to lock
to_lock = min(available, quantity)
# Mark as locked
pu.stock_locked += to_lock
pu.save()
# Count down from quantity
quantity -= to_lock
# When we are done, break bucle
if not quantity:
break
# If we are not done
if quantity:
# Fail
raise IOError("Not enought products for line '{}'!".format(line))
except IOError as e:
# Remove all line's from albaran before failing
for line in albaran.lines_sales.all():
line.delete()
# Remove albaran before failing
albaran.delete()
# Set error
context = {}
context['error'] = e
# Return result
return context
@staticmethod
def create_ticket_from_order(pk, list_lines):
MODEL_SOURCE = SalesOrder
MODEL_FINAL = SalesTicket
url_reverse = 'CDNX_invoicing_ticketsaless_list'
# type_doc
msg_error_relation = _("Hay lineas asignadas a ticket")
msg_error_not_found = _('Sales order not found')
msg_error_line_not_found = _('Todas las lineas ya se han pasado a ticket')
with transaction.atomic():
SalesLines.create_albaran_automatic(pk, list_lines)
return SalesLines.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, url_reverse,
msg_error_relation, msg_error_not_found, msg_error_line_not_found,
False)
@staticmethod
def create_ticket_from_slot(slot_pk):
context = {
"error": None,
"obj_final": None,
}
# order line not paid
line_orders = SalesLines.objects.filter(
order__budget__pos_slot__pk=slot_pk,
order__payment__isnull=True,
order__cash_movements__isnull=True,
order__budget__removed=False,
order__removed=False,
removed=False
)
if line_orders:
# create o update ticket
tickets = []
for line in line_orders:
if line.ticket:
tickets.append(line.ticket)
if len(set(tickets)) > 1:
context['error'] = _(u'There are orders that are in several different tickets')
else:
if tickets:
# update line
with transaction.atomic():
ticket = SalesTicket.objects.get(pk=tickets[0], removed=False)
# There are already orders associated with a ticket
for line in line_orders:
if line.ticket is None:
line.ticket = ticket
line.tax_ticket = line.tax_order
line.discount_ticket = line.discount_order
line.description_ticket = line.description_order
line.notes_ticket = line.notes_order
line.quantity_ticket = line.quantity_order
line.price_recommended_ticket = line.price_recommended_order
line.price_base_ticket = line.price_base_order
line.save()
else:
# update line
if line.quantity_ticket != line.quantity_order:
line.quantity_ticket = line.quantity_order
line.save()
else:
# new ticket
with transaction.atomic():
ticket = SalesTicket()
ticket.billing_series = BillingSeries.objects.filter(default=True).first()
ticket.customer = line_orders[0].order.customer
ticket.save()
for line in line_orders:
line.ticket = ticket
line.tax_ticket = line.tax_order
line.discount_ticket = line.discount_order
line.description_ticket = line.description_order
line.notes_ticket = line.notes_order
line.quantity_ticket = line.quantity_order
line.price_recommended_ticket = line.price_recommended_order
line.price_base_ticket = line.price_base_order
line.save()
context['obj_final'] = ticket
else:
# get ticket
line_order = SalesLines.objects.filter(
order__budget__pos_slot__pk=slot_pk,
order__budget__removed=False,
order__removed=False,
removed=False,
).last()
ticket = SalesTicket.objects.filter(
customer=line_order.order.customer,
lines_sales=line_order,
lines_sales__removed=False,
removed=False
).first()
if ticket:
context['obj_final'] = ticket
else:
context['error'] = _("Ticket don't found")
return context
@staticmethod
def create_invoice_from_order(pk, list_lines):
MODEL_SOURCE = SalesOrder
MODEL_FINAL = SalesInvoice
url_reverse = 'CDNX_invoicing_invoicesaless_list'
# type_doc
msg_error_relation = _("Hay lineas asignadas a facturas")
msg_error_not_found = _('Sales order not found')
msg_error_line_not_found = _('Todas las lineas ya se han pasado a facturas')
with transaction.atomic():
SalesLines.create_albaran_automatic(pk, list_lines)
return SalesLines.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, url_reverse,
msg_error_relation, msg_error_not_found, msg_error_line_not_found,
False)
@staticmethod
def create_ticket_from_albaran(pk, list_lines):
MODEL_SOURCE = SalesAlbaran
MODEL_FINAL = SalesTicket
url_reverse = 'CDNX_invoicing_ticketsaless_list'
# type_doc
msg_error_relation = _("Hay lineas asignadas a ticket")
msg_error_not_found = _('Sales albaran not found')
msg_error_line_not_found = _('Todas las lineas ya se han pasado a ticket')
return SalesLines.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, url_reverse,
msg_error_relation, msg_error_not_found, msg_error_line_not_found,
False)
"""
context = {}
if list_lines:
new_list_lines = SalesLines.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(
invoice__isnull=True
).values_list('pk')
if new_list_lines:
new_pk = SalesLines.objects.values_list('order__pk').filter(pk__in=new_list_lines).first()
if new_pk:
context = SalesLines.create_ticket_from_order(new_pk, new_list_lines)
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
"""
@staticmethod
@staticmethod
def create_invoice_from_ticket(pk, list_lines):
MODEL_SOURCE = SalesTicket
MODEL_FINAL = SalesInvoice
url_reverse = 'CDNX_invoicing_invoicesaless_list'
# type_doc
msg_error_relation = _("Hay lineas asignadas a facturas")
msg_error_not_found = _('Sales ticket not found')
msg_error_line_not_found = _('Todas las lineas ya se han pasado a facturas')
return SalesLines.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, url_reverse,
msg_error_relation, msg_error_not_found, msg_error_line_not_found,
False)
"""
context = {}
if list_lines:
new_list_lines = SalesLines.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(
invoice__isnull=True
)
if new_list_lines:
new_pk = new_list_lines.first()
if new_pk:
context = SalesLines.create_invoice_from_order(
new_pk.order.pk,
[x['pk'] for x in new_list_lines.values('pk')])
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
"""
|
codenerix/django-codenerix-invoicing
|
codenerix_invoicing/models_sales.py
|
SalesLines.create_invoice_from_ticket
|
python
|
def create_invoice_from_ticket(pk, list_lines):
MODEL_SOURCE = SalesTicket
MODEL_FINAL = SalesInvoice
url_reverse = 'CDNX_invoicing_invoicesaless_list'
# type_doc
msg_error_relation = _("Hay lineas asignadas a facturas")
msg_error_not_found = _('Sales ticket not found')
msg_error_line_not_found = _('Todas las lineas ya se han pasado a facturas')
return SalesLines.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, url_reverse,
msg_error_relation, msg_error_not_found, msg_error_line_not_found,
False)
"""
context = {}
if list_lines:
new_list_lines = SalesLines.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(
invoice__isnull=True
)
if new_list_lines:
new_pk = new_list_lines.first()
if new_pk:
context = SalesLines.create_invoice_from_order(
new_pk.order.pk,
[x['pk'] for x in new_list_lines.values('pk')])
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
"""
|
context = {}
if list_lines:
new_list_lines = SalesLines.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(
invoice__isnull=True
)
if new_list_lines:
new_pk = new_list_lines.first()
if new_pk:
context = SalesLines.create_invoice_from_order(
new_pk.order.pk,
[x['pk'] for x in new_list_lines.values('pk')])
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
|
train
|
https://github.com/codenerix/django-codenerix-invoicing/blob/7db5c62f335f9215a8b308603848625208b48698/codenerix_invoicing/models_sales.py#L2450-L2486
| null |
class SalesLines(CodenerixModel):
basket = models.ForeignKey(SalesBasket, related_name='lines_sales', verbose_name=_("Basket"), on_delete=models.CASCADE)
tax_basket_fk = models.ForeignKey(TypeTax, related_name='lines_sales_basket', verbose_name=_("Tax Basket"), on_delete=models.CASCADE)
order = models.ForeignKey(SalesOrder, related_name='lines_sales', verbose_name=_("Sales order"), on_delete=models.CASCADE, null=True, blank=True)
tax_order_fk = models.ForeignKey(TypeTax, related_name='lines_sales_order', verbose_name=_("Tax Sales order"), on_delete=models.CASCADE, null=True, blank=True)
albaran = models.ForeignKey(SalesAlbaran, related_name='lines_sales', verbose_name=_("Albaran"), on_delete=models.CASCADE, null=True, blank=True)
ticket = models.ForeignKey(SalesTicket, related_name='lines_sales', verbose_name=_("Ticket"), on_delete=models.CASCADE, null=True, blank=True)
tax_ticket_fk = models.ForeignKey(TypeTax, related_name='lines_sales_ticket', verbose_name=_("Tax Ticket"), on_delete=models.CASCADE, null=True, blank=True)
ticket_rectification = models.ForeignKey(SalesTicketRectification, related_name='lines_sales', verbose_name=_("Ticket rectification"), on_delete=models.CASCADE, null=True, blank=True)
invoice = models.ForeignKey(SalesInvoice, related_name='lines_sales', verbose_name=_("Invoice"), on_delete=models.CASCADE, null=True, blank=True)
tax_invoice_fk = models.ForeignKey(TypeTax, related_name='lines_sales_invoice', verbose_name=_("Tax Invoice"), on_delete=models.CASCADE, null=True, blank=True)
invoice_rectification = models.ForeignKey(SalesInvoiceRectification, related_name='lines_sales', verbose_name=_("Invoice rectification"), on_delete=models.CASCADE, null=True, blank=True)
product_final = models.ForeignKey(ProductFinal, related_name='lines_sales', verbose_name=_("Product"), on_delete=models.CASCADE)
product_unique = models.ForeignKey(ProductUnique, related_name='lines_sales', verbose_name=_("Product Unique"), on_delete=models.CASCADE, null=True, blank=True)
# invoiced is True if 'invoice' is not null
# invoiced = models.BooleanField(_("Invoiced"), blank=False, default=False)
# logical deletion
removed = models.BooleanField(_("Removed"), blank=False, default=False, editable=False)
quantity = models.FloatField(_("Quantity"), blank=False, null=False)
code = models.CharField(_("Code"), max_length=250, blank=True, null=True, default=None)
# ####
# desde el formulario se podrΓ‘ modificar el precio y la descripcion del producto
# se guarda el tax usado y la relacion para poder hacer un seguimiento
# ####
# info basket
price_recommended_basket = models.DecimalField(_("Recomended price base"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
description_basket = models.CharField(_("Description"), max_length=256, blank=True, null=True)
price_base_basket = models.DecimalField(_("Price base"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
discount_basket = models.DecimalField(_("Discount (%)"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
tax_basket = models.FloatField(_("Tax (%)"), blank=True, null=True, default=0)
equivalence_surcharge_basket = models.FloatField(_("Equivalence surcharge (%)"), blank=True, null=True, default=0)
tax_label_basket = models.CharField(_("Tax Name"), max_length=250, blank=True, null=True)
notes_basket = models.CharField(_("Notes"), max_length=256, blank=True, null=True)
# info basket total
price_unit_basket = models.DecimalField(_("unit_price"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
subtotal_basket = models.DecimalField(_("Subtotal"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
discounts_basket = models.DecimalField(_("Discounts"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
taxes_basket = models.DecimalField(_("Taxes"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
equivalence_surcharges_basket = models.DecimalField(_("Equivalence surcharge"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
total_basket = models.DecimalField(_("Total"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
# info order
price_recommended_order = models.DecimalField(_("Recomended price base"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
description_order = models.CharField(_("Description"), max_length=256, blank=True, null=True)
price_base_order = models.DecimalField(_("Price base"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
discount_order = models.DecimalField(_("Discount (%)"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
tax_order = models.FloatField(_("Tax (%)"), blank=True, null=True, default=0)
equivalence_surcharge_order = models.FloatField(_("Equivalence surcharge (%)"), blank=True, null=True, default=0)
tax_label_order = models.CharField(_("Tax Name"), max_length=250, blank=True, null=True)
notes_order = models.CharField(_("Notes"), max_length=256, blank=True, null=True)
# info basket total
price_unit_order = models.DecimalField(_("unit_price"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
subtotal_order = models.DecimalField(_("Subtotal"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
discounts_order = models.DecimalField(_("Discounts"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
taxes_order = models.DecimalField(_("Taxes"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
equivalence_surcharges_order = models.DecimalField(_("Equivalence surcharge"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
total_order = models.DecimalField(_("Total"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
# info albaran - basic
notes_albaran = models.CharField(_("Notes"), max_length=256, blank=True, null=True)
# info ticket
price_recommended_ticket = models.DecimalField(_("Recomended price base"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
description_ticket = models.CharField(_("Description"), max_length=256, blank=True, null=True)
price_base_ticket = models.DecimalField(_("Price base"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
discount_ticket = models.DecimalField(_("Discount (%)"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
tax_ticket = models.FloatField(_("Tax (%)"), blank=True, null=True, default=0)
equivalence_surcharge_ticket = models.FloatField(_("Equivalence surcharge (%)"), blank=True, null=True, default=0)
tax_label_ticket = models.CharField(_("Tax Name"), max_length=250, blank=True, null=True)
notes_ticket = models.CharField(_("Notes"), max_length=256, blank=True, null=True)
# info basket total
price_unit_ticket = models.DecimalField(_("unit_price"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
subtotal_ticket = models.DecimalField(_("Subtotal"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
discounts_ticket = models.DecimalField(_("Discounts"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
taxes_ticket = models.DecimalField(_("Taxes"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
equivalence_surcharges_ticket = models.DecimalField(_("Equivalence surcharge"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
total_ticket = models.DecimalField(_("Total"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
# info ticket rectification - basic
notes_ticket_rectification = models.CharField(_("Notes"), max_length=256, blank=True, null=True)
# info invoice
price_recommended_invoice = models.DecimalField(_("Recomended price base"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
description_invoice = models.CharField(_("Description"), max_length=256, blank=True, null=True)
price_base_invoice = models.DecimalField(_("Price base"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES)
discount_invoice = models.DecimalField(_("Discount (%)"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
tax_invoice = models.FloatField(_("Tax (%)"), blank=True, null=True, default=0)
equivalence_surcharge_invoice = models.FloatField(_("Equivalence surcharge (%)"), blank=True, null=True, default=0)
tax_label_invoice = models.CharField(_("Tax Name"), max_length=250, blank=True, null=True)
notes_invoice = models.CharField(_("Notes"), max_length=256, blank=True, null=True)
# info basket total
price_unit_invoice = models.DecimalField(_("unit_price"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
subtotal_invoice = models.DecimalField(_("Subtotal"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
discounts_invoice = models.DecimalField(_("Discounts"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
taxes_invoice = models.DecimalField(_("Taxes"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
equivalence_surcharges_invoice = models.DecimalField(_("Equivalence surcharge"), blank=True, null=True, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0)
total_invoice = models.DecimalField(_("Total"), blank=False, null=False, max_digits=CURRENCY_MAX_DIGITS, decimal_places=CURRENCY_DECIMAL_PLACES, default=0, editable=False)
# info invoice rectification - basic
notes_invoice_rectification = models.CharField(_("Notes"), max_length=256, blank=True, null=True)
def __str__(self):
return u"{} - {}".format(self.product_final, self.quantity)
def __unicode__(self):
return self.__str__()
def __fields__(self, info):
fields = []
fields.append(('product_final', _("Product final")))
fields.append(('product_unique', _("Product unique")))
fields.append(('quantity', _("Quantity")))
return fields
def get_product_unique(self, quantity, pos=None):
if self.product_final.sample:
raise SalesLinesProductFinalIsSample(_("This product can not be sold, it is marked as 'sample'"))
else:
products_unique = []
with transaction.atomic():
qs = ProductUnique.objects.filter(
product_final=self.product_final,
stock_real__gt=0,
stock_locked__lt=F('stock_real')
)
if pos:
qs = qs.filter(box__box_structure__zone__storage__in=pos.storage_stock.filter(storage_zones__salable=True))
elif self.basket.pos:
qs = qs.filter(box__box_structure__zone__storage__in=self.basket.pos.storage_stock.filter(storage_zones__salable=True))
if self.product_final.product.force_stock is False:
product_unique = qs.first()
if product_unique:
products_unique = [
{
'quantity': quantity,
'product_unique': product_unique
}
]
else:
raise SalesLinesUniqueProductNotExists(_('Unique product not exists! No stock!'))
else:
stock_available = None
for unique_product in qs:
if quantity <= 0:
break
stock_available = unique_product.stock_real - unique_product.stock_locked
if stock_available > quantity:
stock_available = quantity
unique_product.duplicate(quantity)
unique_product.locked_stock(stock_available)
products_unique.append({
'product_unique': unique_product,
'quantity': stock_available
})
quantity -= stock_available
if quantity > 0:
raise SalesLinesInsufficientStock(_("Insufficient stock. Product: {}".format(self.product_final)))
else:
return products_unique
def save(self, *args, **kwargs):
with transaction.atomic():
if self.pk is None:
line_old = None
if self.product_final.code:
self.code = self.product_final.code
else:
self.code = self.product_final.product.code
if self.product_unique is None:
if getattr(settings, 'CDNX_INVOICING_FORCE_STOCK_IN_BUDGET', True):
products_unique = self.get_product_unique(self.quantity)
first = True
for unique_product in products_unique:
if first:
first = False
self.quantity = unique_product['quantity']
self.product_unique = unique_product['product_unique']
else:
line = copy.copy(self)
line.pk = None
line.quantity = unique_product['quantity']
line.product_unique = unique_product['product_unique']
line.save()
elif self.pk:
line_old = SalesLines.objects.filter(pk=self.pk).first()
if line_old:
product_final_old = line_old.product_final
else:
product_final_old = None
if self.product_final != product_final_old:
if self.order or self.albaran or self.ticket or self.invoice:
raise SalesLinesNotModifiable(_('You can not modify product'))
elif self.description_basket == '{}'.format(product_final_old):
self.description_basket = ''
# solo se puede cambiar el producto si no esta en un pedido, albaran, ticket o factura
self.price_recommended_basket = None
self.tax_label_basket = None
if getattr(settings, 'CDNX_INVOICING_FORCE_STOCK_IN_BUDGET', True):
products_unique = self.get_product_unique(self.quantity)
first = True
for unique_product in products_unique:
if first:
first = False
self.quantity = unique_product['quantity']
self.product_unique = unique_product['product_unique']
else:
line = copy.copy(self)
line.pk = None
line.quantity = unique_product['quantity']
line.product_unique = unique_product['product_unique']
line.save()
else:
self.product_unique = None
elif self.order and line_old.order is None:
# associate line with order
# locked product unique!!
if self.product_final.product.force_stock:
if self.product_unique is None:
products_unique = self.get_product_unique(self.quantity)
first = True
for unique_product in products_unique:
if first:
first = False
self.quantity = unique_product['quantity']
self.product_unique = unique_product['product_unique']
else:
line = copy.copy(self)
line.pk = None
line.quantity = unique_product['quantity']
line.product_unique = unique_product['product_unique']
line.save()
else:
available = self.product_unique.stock_real - self.product_unique.stock_locked
if available < self.quantity:
products_unique = self.get_product_unique(self.quantity)
first = True
for unique_product in products_unique:
if first:
first = False
self.quantity = unique_product['quantity']
self.product_unique = unique_product['product_unique']
else:
line = copy.copy(self)
line.pk = None
line.quantity = unique_product['quantity']
line.product_unique = unique_product['product_unique']
line.save()
# calculate value of equivalence_surcharge
# save tax label
# save price recommended
# save tax foreignkey
if self.basket:
if self.tax_basket_fk is None:
self.tax_basket_fk = self.product_final.product.tax
if not self.tax_label_basket:
self.tax_label_basket = self.product_final.product.tax.name
if not self.tax_basket:
self.tax_basket = self.product_final.product.tax.tax
if self.basket.get_customer().apply_equivalence_surcharge:
self.equivalence_surcharge_basket = self.basket.get_customer().tax.recargo_equivalencia
if self.price_recommended_basket is None:
self.price_recommended_basket = self.product_final.price_base
if not self.description_basket:
self.description_basket = '{}'.format(self.product_final)
update_basket = self.__update_subtotal_basket(line_old)
if self.order:
if self.tax_order_fk is None:
self.tax_order_fk = self.product_final.product.tax
if self.tax_label_order is None:
self.tax_label_order = self.product_final.product.tax.name
if not self.tax_order:
self.tax_order = self.product_final.product.tax.tax
if self.order.get_customer().apply_equivalence_surcharge:
self.equivalence_surcharge_order = self.order.get_customer().tax.recargo_equivalencia
if self.price_recommended_order is None:
self.price_recommended_order = self.product_final.price_base
if not self.description_order:
self.description_order = '{}'.format(self.product_final)
update_order = self.__update_subtotal_order(line_old)
if self.ticket:
if self.tax_ticket_fk is None:
self.tax_ticket_fk = self.product_final.product.tax
if self.tax_label_ticket is None:
self.tax_label_ticket = self.product_final.product.tax.name
if not self.tax_ticket:
self.tax_ticket = self.product_final.product.tax.tax
if self.ticket.get_customer().apply_equivalence_surcharge:
self.equivalence_surcharge_ticket = self.ticket.get_customer().tax.recargo_equivalencia
if self.price_recommended_ticket is None:
self.price_recommended_ticket = self.product_final.price_base
if not self.description_ticket:
self.description_ticket = '{}'.format(self.product_final)
update_ticket = self.__update_subtotal_ticket(line_old)
if self.invoice:
if self.tax_invoice_fk is None:
self.tax_invoice_fk = self.product_final.product.tax
if self.tax_label_invoice is None:
self.tax_label_invoice = self.product_final.product.tax.name
if not self.tax_invoice:
self.tax_invoice = self.product_final.product.tax.tax
if self.invoice.get_customer().apply_equivalence_surcharge:
self.equivalence_surcharge_invoice = self.invoice.get_customer().tax.recargo_equivalencia
if self.price_recommended_invoice is None:
self.price_recommended_invoice = self.product_final.price_base
if not self.description_invoice:
self.description_invoice = '{}'.format(self.product_final)
update_invoice = self.__update_subtotal_invoice(line_old)
result = super(self._meta.model, self).save(*args, **kwargs)
# update totals
if update_basket:
self.basket.update_totales()
if self.order and update_order:
self.order.update_totales()
if self.albaran:
self.albaran.update_totales()
if self.ticket and update_ticket:
self.ticket.update_totales()
if self.ticket_rectification:
self.ticket_rectification.update_totales()
if self.invoice and update_invoice:
self.invoice.update_totales()
if self.invoice_rectification:
self.invoice_rectification.update_totales()
return result
def __update_subtotal_basket(self, line_old):
self.subtotal_basket = Decimal(self.quantity) * self.price_base_basket
self.discounts_basket = round_decimal(self.subtotal_basket * self.discount_basket / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.taxes_basket = round_decimal(self.subtotal_basket * Decimal(self.tax_basket) / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.equivalence_surcharges_basket = round_decimal(self.subtotal_basket * Decimal(self.equivalence_surcharge_basket) / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.total_basket = self.subtotal_basket + self.taxes_basket - self.discounts_basket
if self.quantity:
self.price_unit_basket = self.total_basket / Decimal(self.quantity)
else:
self.price_unit_basket = Decimal('0')
if line_old is None:
update = True
elif self.subtotal_basket != line_old.subtotal_basket or self.discounts_basket != line_old.discounts_basket or self.taxes_basket != line_old.taxes_basket or self.equivalence_surcharges_basket != line_old.equivalence_surcharges_basket or self.total_basket != line_old.total_basket:
update = True
else:
update = False
return update
def __update_subtotal_order(self, line_old):
self.subtotal_order = Decimal(self.quantity) * self.price_base_order
self.discounts_order = round_decimal(self.subtotal_order * self.discount_order / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.taxes_order = round_decimal(self.subtotal_order * Decimal(self.tax_order) / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.equivalence_surcharges_order = round_decimal(self.subtotal_order * Decimal(self.equivalence_surcharge_order) / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.total_order = self.subtotal_order + self.taxes_order - self.discounts_order
if self.quantity:
self.price_unit_order = self.total_order / Decimal(self.quantity)
else:
self.price_unit_order = Decimal('0')
if line_old is None:
update = True
elif self.subtotal_order != line_old.subtotal_order or self.discounts_order != line_old.discounts_order or self.taxes_order != line_old.taxes_order or self.equivalence_surcharges_order != line_old.equivalence_surcharges_order or self.total_order != line_old.total_order:
update = True
else:
update = False
return update
def __update_subtotal_ticket(self, line_old):
self.subtotal_ticket = Decimal(self.quantity) * self.price_base_ticket
self.discounts_ticket = round_decimal(self.subtotal_ticket * self.discount_ticket / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.taxes_ticket = round_decimal(self.subtotal_ticket * Decimal(self.tax_ticket) / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.equivalence_surcharges_ticket = round_decimal(self.subtotal_ticket * Decimal(self.equivalence_surcharge_ticket) / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.total_ticket = self.subtotal_ticket + self.taxes_ticket - self.discounts_ticket
if self.quantity:
self.price_unit_ticket = self.total_ticket / Decimal(self.quantity)
else:
self.price_unit_ticket = Decimal('0')
if line_old is None:
update = True
elif self.subtotal_ticket != line_old.subtotal_ticket or self.discounts_ticket != line_old.discounts_ticket or self.taxes_ticket != line_old.taxes_ticket or self.equivalence_surcharges_ticket != line_old.equivalence_surcharges_ticket or self.total_ticket != line_old.total_ticket:
update = True
else:
update = False
return update
def __update_subtotal_invoice(self, line_old):
self.subtotal_invoice = Decimal(self.quantity) * self.price_base_invoice
self.discounts_invoice = round_decimal(self.subtotal_invoice * self.discount_invoice / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.taxes_invoice = round_decimal(self.subtotal_invoice * Decimal(self.tax_invoice) / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.equivalence_surcharges_invoice = round_decimal(self.subtotal_invoice * Decimal(self.equivalence_surcharge_invoice) / Decimal(100), CURRENCY_DECIMAL_PLACES)
self.total_invoice = self.subtotal_invoice + self.taxes_invoice - self.discounts_invoice
if self.quantity:
self.price_unit_invoice = self.total_invoice / Decimal(self.quantity)
else:
self.price_unit_invoice = Decimal('0')
if line_old is None:
update = True
elif self.subtotal_invoice != line_old.subtotal_invoice or self.discounts_invoice != line_old.discounts_invoice or self.taxes_invoice != line_old.taxes_invoice or self.equivalence_surcharges_invoice != line_old.equivalence_surcharges_invoice or self.total_invoice != line_old.total_invoice:
update = True
else:
update = False
return update
def lock_delete(self, request=None):
# Un SalesLines si:
# * Esta en una factura
# * Esta en un ticket
# * Esta en un albaran
# * Esta en un pedido
# * Esta en un budget bloqueado
if self.invoice:
return _('Cannot delete, it is related to invoice')
elif self.ticket:
return _('Cannot delete, it is related to ticket')
elif self.albaran:
return _('Cannot delete, it is related to albaran')
elif self.order:
return _('Cannot delete, it is related to order')
elif self.basket.lock:
return _('Cannot delete, the order is lock')
return super().lock_delete()
def delete(self):
with transaction.atomic():
if not hasattr(settings, 'CDNX_INVOICING_LOGICAL_DELETION') or settings.CDNX_INVOICING_LOGICAL_DELETION is False:
return super(SalesLines, self).delete()
else:
self.removed = True
self.save()
def __limitQ__(self, info):
return {'removed': Q(removed=False)}
@staticmethod
def delete_doc(doc):
if isinstance(doc, SalesBasket):
qs = doc.lines_sales.filter(Q(order__isnull=False) | Q(albaran__isnull=False) | Q(ticket__isnull=False) | Q(invoice__isnull=False)).exists()
if qs:
raise SalesLinesNotDelete(_('No se puede eliminar el presupuesto al estar relacionado con pedido, albaran, ticket o factura'))
else:
with transaction.atomic():
doc.lines_sales.objects.filter(removed=False).delete()
elif isinstance(doc, SalesOrder):
qs = doc.lines_sales.filter(Q(albaran__isnull=False) | Q(ticket__isnull=False) | Q(invoice__isnull=False)).exists()
if qs:
raise SalesLinesNotDelete(_('No se puede eliminar el presupuesto al estar relacionado con albaran, ticket o factura'))
else:
with transaction.atomic():
for line in doc.lines_sales.filter(removed=False):
nline = copy.copy(line)
nline.pk = None
nline.order = None
nline.save()
line.delete()
elif isinstance(doc, SalesAlbaran):
qs = doc.lines_sales.filter(Q(ticket__isnull=False) | Q(invoice__isnull=False)).exists()
if qs:
raise SalesLinesNotDelete(_('No se puede eliminar el presupuesto al estar relacionado ticket o factura'))
else:
with transaction.atomic():
for line in doc.lines_sales.filter(removed=False):
nline = copy.copy(line)
nline.pk = None
nline.albaran = None
nline.save()
line.delete()
elif isinstance(doc, SalesTicket):
qs = doc.lines_sales.filter(Q(ticket_rectification__isnull=False)).exists()
if qs:
raise SalesLinesNotDelete(_('No se puede eliminar el presupuesto al estar relacionado con ticket rectificativos'))
else:
with transaction.atomic():
for line in doc.lines_sales.filter(removed=False):
nline = copy.copy(line)
nline.pk = None
nline.ticket = None
nline.save()
line.delete()
elif isinstance(doc, SalesTicketRectification):
with transaction.atomic():
for line in doc.lines_sales.filter(removed=False):
nline = copy.copy(line)
nline.pk = None
nline.ticket_rectification = None
nline.save()
line.delete()
elif isinstance(doc, SalesInvoice):
qs = doc.lines_sales.filter(Q(invoice_rectification__isnull=False)).exists()
if qs:
raise SalesLinesNotDelete(_('No se puede eliminar el presupuesto al estar relacionado con factura rectificativos'))
else:
with transaction.atomic():
for line in doc.lines_sales.filter(removed=False):
nline = copy.copy(line)
nline.pk = None
nline.invoice = None
nline.save()
line.delete()
elif isinstance(doc, SalesInvoiceRectification):
with transaction.atomic():
for line in doc.lines_sales.filter(removed=False):
nline = copy.copy(line)
nline.pk = None
nline.invoice_rectification = None
nline.save()
line.delete()
@staticmethod
def create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL,
url_reverse,
msg_error_relation, msg_error_not_found, msg_error_line_not_found,
unique):
"""
pk: pk del documento origen
list_lines: listado de pk de lineas de origen
MODEL_SOURCE: modelo del documento origen
MODEL_FINAL: model del documento final
url_reverse: url del destino
msg_error_relation: Mensaje de error indicando que las lineas ya estΓ‘n relacionadas
msg_error_not_found: Mensaje de error indicando que no se encuentra el objeto origen
unique: (True/False) Indica si puede haber mΓ‘s de una linea asociada a otras lineas
"""
context = {}
obj_src = MODEL_SOURCE.objects.filter(pk=pk).first()
if list_lines and obj_src:
# parse to int
list_lines = [int(x) for x in list_lines]
obj_final = MODEL_FINAL()
complete = True
field_final_tax = None
if isinstance(obj_final, SalesOrder):
obj_final.budget = obj_src
field_final = 'order'
field_final_tax = 'tax_order_fk'
elif isinstance(obj_final, SalesAlbaran):
field_final = 'albaran'
field_final_tax = 'tax_albaran_fk'
complete = False
elif isinstance(obj_final, SalesTicket):
field_final = 'ticket'
field_final_tax = 'tax_ticket_fk'
elif isinstance(obj_final, SalesTicketRectification):
field_final = 'ticket_rectification'
complete = False
elif isinstance(obj_final, SalesInvoice):
field_final = 'invoice'
field_final_tax = 'tax_invoice_fk'
elif isinstance(obj_final, SalesInvoiceRectification):
field_final = 'invoice_rectification'
complete = False
# list of lines objects
if unique:
create = not SalesLines.objects.filter(**{
"pk__in": list_lines,
"{}__isnull".format(field_final): False
}).exists()
else:
create = True
"""
si debiendo ser filas unicas no las encuentra en el modelo final, se crea el nuevo documento
"""
if create:
with transaction.atomic():
if hasattr(obj_src, 'customer'):
customer = obj_src.customer
else:
customer = obj_src.lines_sales.filter(removed=False).first().order.customer
obj_final.customer = customer
obj_final.date = datetime.datetime.now()
obj_final.billing_series = obj_src.billing_series
field_src_tax = None
if isinstance(obj_src, SalesBasket):
field_src = 'basket'
field_src_tax = 'tax_basket_fk'
elif isinstance(obj_src, SalesOrder) or isinstance(obj_src, SalesAlbaran):
field_src = 'order'
field_src_tax = 'tax_order_fk'
elif isinstance(obj_src, SalesTicket) or isinstance(obj_src, SalesTicketRectification):
field_src = 'ticket'
field_src_tax = 'tax_ticket_fk'
elif isinstance(obj_src, SalesInvoice) or isinstance(obj_src, SalesInvoiceRectification):
field_src = 'invoice'
field_src_tax = 'tax_invoice_fk'
obj_final.save()
qs = SalesLines.objects.filter(**{'pk__in': list_lines, '{}__isnull'.format(field_final): True})
if qs:
for line in qs:
setattr(line, field_final, obj_final)
if complete:
setattr(line, 'description_{}'.format(field_final), getattr(line, 'description_{}'.format(field_src)))
setattr(line, 'price_base_{}'.format(field_final), getattr(line, 'price_base_{}'.format(field_src)))
setattr(line, 'discount_{}'.format(field_final), getattr(line, 'discount_{}'.format(field_src)))
setattr(line, 'tax_{}'.format(field_final), getattr(line, 'tax_{}'.format(field_src)))
setattr(line, 'equivalence_surcharge_{}'.format(field_final), getattr(line, 'equivalence_surcharge_{}'.format(field_src)))
setattr(line, 'tax_label_{}'.format(field_final), getattr(line, 'tax_label_{}'.format(field_src)))
if field_src_tax and field_final_tax:
setattr(line, '{}'.format(field_final_tax), getattr(line, '{}'.format(field_src_tax)))
setattr(line, 'notes_{}'.format(field_final), getattr(line, 'notes_{}'.format(field_src)))
line.save()
"""
FALTA LOS PACKS
if hasattr(line_src, 'line_basket_option_sales') and line_src.line_basket_option_sales.exists():
for opt_src in line_src.line_basket_option_sales.all():
opt_dst = SalesLineOrderOption()
opt_dst.line_order = line_final
opt_dst.product_option = opt_src.product_option
opt_dst.product_final = opt_src.product_final
opt_dst.quantity = opt_src.quantity
opt_dst.save()
"""
# bloqueamos el documento origen
obj_src.lock = True
obj_src.save()
# context['url'] = reverse('ordersaless_details', kwargs={'pk': order.pk})
context['url'] = "{}#/{}".format(reverse(url_reverse), obj_final.pk)
context['obj_final'] = obj_final
else:
context['error'] = msg_error_relation
else:
# _("Hay lineas asignadas a pedidos")
context['error'] = msg_error_relation
else:
# _('Budget not found')
context['error'] = msg_error_not_found
return context
@staticmethod # ok
def create_order_from_budget_all(order, signed_obligatorily=True):
lines_budget = order.budget.lines_sales.filter(removed=False)
lines = [x[0] for x in lines_budget.values_list('pk')]
result = SalesLines.create_order_from_budget(order.pk, lines, signed_obligatorily)
order = result['obj_final']
return lines_budget.count() == order.lines_sales.filter(removed=False).count()
@staticmethod # ok
def create_order_from_budget(pk, list_lines, signed_obligatorily=True):
MODEL_SOURCE = SalesBasket
MODEL_FINAL = SalesOrder
url_reverse = 'CDNX_invoicing_ordersaless_list'
# type_doc
msg_error_relation = _("Hay lineas asignadas a pedidos")
msg_error_not_found = _('Budget not found')
msg_error_line_not_found = _('Todas las lineas ya se han pasado a pedido')
budget = MODEL_SOURCE.objects.get(pk=pk)
if signed_obligatorily and not budget.signed:
# el presupuesto tiene que estar firmado para poder generar el pedido
context = {}
context['error'] = _("Unsigned budget!")
return context
else:
# duplicamos el presupuesto si el numero de lineas es diferente
# relacionando el pedido a este nuevo presupuesto
if list_lines and len(list_lines) != SalesLines.objects.filter(removed=False, basket=pk).count():
new_budget = budget.duplicate(list_lines)
pk = new_budget.pk
list_lines = [x[0] for x in SalesLines.objects.filter(removed=False, basket=pk).values_list('pk')]
return SalesLines.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, url_reverse,
msg_error_relation, msg_error_not_found, msg_error_line_not_found,
True)
@staticmethod # ok
def create_albaran_automatic(pk, list_lines):
"""
creamos de forma automatica el albaran
"""
lines = SalesLines.objects.filter(pk__in=list_lines, removed=False).exclude(albaran__isnull=False).values_list('pk')
lines_to_albaran = [x[0] for x in lines]
SalesLines.create_albaran_from_order(pk, lines_to_albaran)
@staticmethod
def create_albaran_from_order(pk, list_lines):
MODEL_SOURCE = SalesOrder
MODEL_FINAL = SalesAlbaran
url_reverse = 'CDNX_invoicing_albaransaless_list'
# type_doc
msg_error_relation = _("Hay lineas asignadas a albaranes")
msg_error_not_found = _('Sales order not found')
msg_error_line_not_found = _('Todas las lineas ya se han pasado a albaran')
context = SalesLines.create_document_from_another(pk, list_lines, MODEL_SOURCE, MODEL_FINAL, url_reverse, msg_error_relation, msg_error_not_found, msg_error_line_not_found, False)
# If there was not any error
if 'error' not in context:
# Get albaran
albaran = context['obj_final']
# Reserve stock
try:
with transaction.atomic():
# For each line
for line in albaran.lines_sales.all():
if line.product_unique:
# It is a unique product
pus = [line.product_unique, ]
else:
# It is not a unique product, get all of them
pus = line.product_final.products_unique.filter(stock_real__gt=F('stock_locked'))
# Reserve as many as we can
quantity = line.quantity
for pu in pus:
# Check how many are free and lock as many as we need
available = pu.stock_real - pu.stock_locked
# Choose how many we are going to lock
to_lock = min(available, quantity)
# Mark as locked
pu.stock_locked += to_lock
pu.save()
# Count down from quantity
quantity -= to_lock
# When we are done, break bucle
if not quantity:
break
# If we are not done
if quantity:
# Fail
raise IOError("Not enought products for line '{}'!".format(line))
except IOError as e:
# Remove all line's from albaran before failing
for line in albaran.lines_sales.all():
line.delete()
# Remove albaran before failing
albaran.delete()
# Set error
context = {}
context['error'] = e
# Return result
return context
@staticmethod
def create_ticket_from_order(pk, list_lines):
MODEL_SOURCE = SalesOrder
MODEL_FINAL = SalesTicket
url_reverse = 'CDNX_invoicing_ticketsaless_list'
# type_doc
msg_error_relation = _("Hay lineas asignadas a ticket")
msg_error_not_found = _('Sales order not found')
msg_error_line_not_found = _('Todas las lineas ya se han pasado a ticket')
with transaction.atomic():
SalesLines.create_albaran_automatic(pk, list_lines)
return SalesLines.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, url_reverse,
msg_error_relation, msg_error_not_found, msg_error_line_not_found,
False)
@staticmethod
def create_ticket_from_slot(slot_pk):
context = {
"error": None,
"obj_final": None,
}
# order line not paid
line_orders = SalesLines.objects.filter(
order__budget__pos_slot__pk=slot_pk,
order__payment__isnull=True,
order__cash_movements__isnull=True,
order__budget__removed=False,
order__removed=False,
removed=False
)
if line_orders:
# create o update ticket
tickets = []
for line in line_orders:
if line.ticket:
tickets.append(line.ticket)
if len(set(tickets)) > 1:
context['error'] = _(u'There are orders that are in several different tickets')
else:
if tickets:
# update line
with transaction.atomic():
ticket = SalesTicket.objects.get(pk=tickets[0], removed=False)
# There are already orders associated with a ticket
for line in line_orders:
if line.ticket is None:
line.ticket = ticket
line.tax_ticket = line.tax_order
line.discount_ticket = line.discount_order
line.description_ticket = line.description_order
line.notes_ticket = line.notes_order
line.quantity_ticket = line.quantity_order
line.price_recommended_ticket = line.price_recommended_order
line.price_base_ticket = line.price_base_order
line.save()
else:
# update line
if line.quantity_ticket != line.quantity_order:
line.quantity_ticket = line.quantity_order
line.save()
else:
# new ticket
with transaction.atomic():
ticket = SalesTicket()
ticket.billing_series = BillingSeries.objects.filter(default=True).first()
ticket.customer = line_orders[0].order.customer
ticket.save()
for line in line_orders:
line.ticket = ticket
line.tax_ticket = line.tax_order
line.discount_ticket = line.discount_order
line.description_ticket = line.description_order
line.notes_ticket = line.notes_order
line.quantity_ticket = line.quantity_order
line.price_recommended_ticket = line.price_recommended_order
line.price_base_ticket = line.price_base_order
line.save()
context['obj_final'] = ticket
else:
# get ticket
line_order = SalesLines.objects.filter(
order__budget__pos_slot__pk=slot_pk,
order__budget__removed=False,
order__removed=False,
removed=False,
).last()
ticket = SalesTicket.objects.filter(
customer=line_order.order.customer,
lines_sales=line_order,
lines_sales__removed=False,
removed=False
).first()
if ticket:
context['obj_final'] = ticket
else:
context['error'] = _("Ticket don't found")
return context
@staticmethod
def create_invoice_from_order(pk, list_lines):
MODEL_SOURCE = SalesOrder
MODEL_FINAL = SalesInvoice
url_reverse = 'CDNX_invoicing_invoicesaless_list'
# type_doc
msg_error_relation = _("Hay lineas asignadas a facturas")
msg_error_not_found = _('Sales order not found')
msg_error_line_not_found = _('Todas las lineas ya se han pasado a facturas')
with transaction.atomic():
SalesLines.create_albaran_automatic(pk, list_lines)
return SalesLines.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, url_reverse,
msg_error_relation, msg_error_not_found, msg_error_line_not_found,
False)
@staticmethod
def create_ticket_from_albaran(pk, list_lines):
MODEL_SOURCE = SalesAlbaran
MODEL_FINAL = SalesTicket
url_reverse = 'CDNX_invoicing_ticketsaless_list'
# type_doc
msg_error_relation = _("Hay lineas asignadas a ticket")
msg_error_not_found = _('Sales albaran not found')
msg_error_line_not_found = _('Todas las lineas ya se han pasado a ticket')
return SalesLines.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, url_reverse,
msg_error_relation, msg_error_not_found, msg_error_line_not_found,
False)
"""
context = {}
if list_lines:
new_list_lines = SalesLines.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(
invoice__isnull=True
).values_list('pk')
if new_list_lines:
new_pk = SalesLines.objects.values_list('order__pk').filter(pk__in=new_list_lines).first()
if new_pk:
context = SalesLines.create_ticket_from_order(new_pk, new_list_lines)
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
"""
@staticmethod
def create_invoice_from_albaran(pk, list_lines):
MODEL_SOURCE = SalesAlbaran
MODEL_FINAL = SalesInvoice
url_reverse = 'CDNX_invoicing_invoicesaless_list'
# type_doc
msg_error_relation = _("Hay lineas asignadas a facturas")
msg_error_not_found = _('Sales albaran not found')
msg_error_line_not_found = _('Todas las lineas ya se han pasado a facturas')
return SalesLines.create_document_from_another(pk, list_lines,
MODEL_SOURCE, MODEL_FINAL, url_reverse,
msg_error_relation, msg_error_not_found, msg_error_line_not_found,
False)
"""
context = {}
if list_lines:
new_list_lines = SalesLines.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(
invoice__isnull=False
)
if new_list_lines:
new_pk = new_list_lines.first()
if new_pk:
context = SalesLines.create_invoice_from_order(
new_pk.order.pk,
[x['pk'] for x in new_list_lines.values('pk')])
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
"""
@staticmethod
|
codenerix/django-codenerix-invoicing
|
codenerix_invoicing/views_purchases.py
|
LineAlbaranCreate.form_valid
|
python
|
def form_valid(self, form):
if self.__pk:
obj = PurchasesAlbaran.objects.get(pk=self.__pk)
self.request.albaran = obj
form.instance.albaran = obj
form.instance.validator_user = self.request.user
raise Exception("revisar StorageBatch")
# comprueba si el producto comprado requiere un valor de atributo especial
product_final = ProductFinal.objects.filter(pk=form.data['product']).first()
feature_special_value = None
if not product_final:
errors = form._errors.setdefault("feature_special_value", ErrorList())
errors.append(_("Product not selected"))
return super(LineAlbaranCreate, self).form_invalid(form)
elif product_final.product.feature_special:
# es obligatorio la informacion de caracteristicas especiales
if 'feature_special_value' not in form.data or not form.data['feature_special_value']:
errors = form._errors.setdefault("feature_special_value", ErrorList())
errors.append(_("Product needs information of feature special"))
return super(LineAlbaranCreate, self).form_invalid(form)
else:
feature_special_value = list(set(filter(None, form.data['feature_special_value'].split('\n'))))
try:
quantity = int(float(form.data['quantity']))
except ValueError:
errors = form._errors.setdefault("quantity", ErrorList())
errors.append(_("Quantity is not valid"))
return super(LineAlbaranCreate, self).form_invalid(form)
if product_final.product.feature_special.unique:
# mismo numero de caracteristicas que de cantidades
# si el feature special esta marcado como 'unico'
if len(feature_special_value) != quantity:
errors = form._errors.setdefault("feature_special_value", ErrorList())
errors.append(_("Quantity and values of feature special not equals"))
return super(LineAlbaranCreate, self).form_invalid(form)
# no existen las caracteristicas especiales dadas de alta en el sistema
elif ProductUnique.objects.filter(product_final=product_final, value__in=feature_special_value).exists():
errors = form._errors.setdefault("feature_special_value", ErrorList())
errors.append(_("Some value of feature special exists"))
return super(LineAlbaranCreate, self).form_invalid(form)
elif len(feature_special_value) != 1:
errors = form._errors.setdefault("feature_special_value", ErrorList())
errors.append(_("The special feature must be unique for all products"))
return super(LineAlbaranCreate, self).form_invalid(form)
try:
with transaction.atomic():
# save line albaran
result = super(LineAlbaranCreate, self).form_valid(form)
raise Exception("Cambiar ProductStock por ProductUnique")
"""
if self.object.status != PURCHASE_ALBARAN_LINE_STATUS_REJECTED:
# prepare stock
ps = ProductStock()
ps.product_final = product_final
ps.line_albaran = self.object
ps.batch = batch
# save stock
ps.quantity = self.object.quantity
ps.save()
if feature_special_value:
# prepare product feature special
if product_final.product.feature_special.unique:
pfs = ProductUnique()
pfs.product_final = product_final
# save product featureSpecial and stock
for fs in feature_special_value:
pfs.pk = None
pfs.value = fs
pfs.save()
else:
pfs = ProductUnique.objects.filter(
value=feature_special_value[0],
product_final=product_final
).first()
if pfs:
pfs.stock_real += self.object.quantity
else:
pfs = ProductUnique()
pfs.product_final = product_final
pfs.value = feature_special_value[0]
pfs.stock_real = self.object.quantity
pfs.save()
else:
# product unique by default
pfs = ProductUnique.objects.filter(product_final=product_final).first()
if not pfs:
pfs = ProductUnique()
pfs.product_final = product_final
pfs.stock_real = self.object.quantity
else:
pfs.stock_real += self.object.quantity
pfs.save()
"""
return result
except IntegrityError as e:
errors = form._errors.setdefault("product", ErrorList())
errors.append(_("Integrity Error: {}".format(e)))
return super(LineAlbaranCreate, self).form_invalid(form)
|
batch = StorageBatch.objects.filter(pk=form.data['batch']).first()
if not batch:
errors = form._errors.setdefault("batch", ErrorList())
errors.append(_("Batch invalid"))
return super(LineAlbaranCreate, self).form_invalid(form)
|
train
|
https://github.com/codenerix/django-codenerix-invoicing/blob/7db5c62f335f9215a8b308603848625208b48698/codenerix_invoicing/views_purchases.py#L669-L780
| null |
class LineAlbaranCreate(GenLineAlbaranUrl, GenCreate):
model = PurchasesLineAlbaran
form_class = LineAlbaranForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
self.__pk = kwargs.get('pk', None)
return super(LineAlbaranCreate, self).dispatch(*args, **kwargs)
|
codenerix/django-codenerix-invoicing
|
codenerix_invoicing/views_purchases.py
|
LineAlbaranUpdate.get_form
|
python
|
def get_form(self, form_class=None):
form = super(LineAlbaranUpdate, self).get_form(form_class)
raise Exception("Cambiar ProductStock por ProductUnique")
return form
|
ps = ProductStock.objects.filter(line_albaran=self.object).first()
if ps:
# initial field
form.fields['storage'].initial = ps.batch.zone.storage
form.fields['zone'].initial = ps.batch.zone
form.fields['batch'].initial = ps.batch
|
train
|
https://github.com/codenerix/django-codenerix-invoicing/blob/7db5c62f335f9215a8b308603848625208b48698/codenerix_invoicing/views_purchases.py#L796-L808
| null |
class LineAlbaranUpdate(GenLineAlbaranUrl, GenUpdate):
model = PurchasesLineAlbaran
form_class = LineAlbaranForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
self.__pk = kwargs.get('pk', None)
return super(LineAlbaranUpdate, self).dispatch(*args, **kwargs)
def form_valid(self, form):
with transaction.atomic():
old = PurchasesLineAlbaran.objects.get(pk=self.object.pk)
raise Exception("revisar StorageBatch")
"""
batch = StorageBatch.objects.filter(pk=form.data['batch']).first()
if batch is None:
errors = form._errors.setdefault("batch", ErrorList())
errors.append(_("Batch not selected"))
return super(LineAlbaranUpdate, self).form_invalid(form)
"""
status = form.data['status']
try:
quantity = float(form.data['quantity'])
except ValueError:
errors = form._errors.setdefault("quantity", ErrorList())
errors.append(_("Quantity is not valid."))
return super(LineAlbaranUpdate, self).form_invalid(form)
# comprueba si el producto comprado requiere un valor de atributo especial
product_final = ProductFinal.objects.filter(pk=form.data['product']).first()
if not product_final:
errors = form._errors.setdefault("feature_special_value", ErrorList())
errors.append(_("Product not selected"))
return super(LineAlbaranUpdate, self).form_invalid(form)
# FIN VALIDACION MINIMA
if product_final != old.product:
"""
comprobar q no se ha vendido el anterior producto
mirando si las cantidades coinciden y las caracteristicas especiales
"""
if product_final.product.feature_special:
# es obligatorio la informacion de caracteristicas especiales
if 'feature_special_value' not in form.data or not form.data['feature_special_value']:
errors = form._errors.setdefault("feature_special_value", ErrorList())
errors.append(_("Product needs information of feature special"))
return super(LineAlbaranUpdate, self).form_invalid(form)
try:
quantity = int(quantity)
except ValueError:
errors = form._errors.setdefault("quantity", ErrorList())
errors.append(_("Quantity is not valid"))
return super(LineAlbaranUpdate, self).form_invalid(form)
feature_special_value = list(set(filter(None, form.data['feature_special_value'].split('\n'))))
if product_final.product.feature_special.unique:
# mismo numero de caracteristicas que de cantidades
if len(feature_special_value) != quantity:
errors = form._errors.setdefault("feature_special_value", ErrorList())
errors.append(_("Quantity and values of feature special not equals"))
return super(LineAlbaranUpdate, self).form_invalid(form)
# no existen las caracteristicas especiales dadas de alta en el sistema
if ProductUnique.objects.filter(
product_final=product_final,
product_final__product__feature_special=product_final.product.feature_special,
value__in=feature_special_value
).exists():
errors = form._errors.setdefault("feature_special_value", ErrorList())
errors.append(_("Some value of feature special exists"))
return super(LineAlbaranUpdate, self).form_invalid(form)
if old.product.product.feature_special:
"""
comprobamos que el stock sea el mismo para poder eliminarlo
"""
fs_value_old = list(set(filter(None, old.feature_special_value.split('\n'))))
if old.product.product.feature_special.unique:
if ProductUnique.objects.filter(
product_final=old.product,
product_final__product__feature_special=old.product.product.feature_special,
value__in=fs_value_old
).aggregate(
q=Sum('stock_real')
)['q'] != old.quantity:
errors = form._errors.setdefault("product", ErrorList())
errors.append(_("Products were bought, you can not change product"))
return super(LineAlbaranUpdate, self).form_invalid(form)
else:
if ProductUnique.objects.filter(
product_final=old.product,
product_final__product__feature_special=old.product.product.feature_special,
value=fs_value_old[0]
).values('stock_real').first()['stock_real'] < old.quantity:
errors = form._errors.setdefault("product", ErrorList())
errors.append(_("Products were bought, you can not change product"))
return super(LineAlbaranUpdate, self).form_invalid(form)
# guardar, borrar antiguo y dar de alta los nuevos
result = super(LineAlbaranUpdate, self).form_valid(form)
raise Exception("Cambiar ProductStock por ProductUnique")
"""
ProductStock.objects.filter(
line_albaran=self.object,
product_final=old.product,
product_final__product__feature_special=old.product.product.feature_special,
).delete()
if self.object.status != PURCHASE_ALBARAN_LINE_STATUS_REJECTED:
# prepare stock
ps = ProductStock()
ps.line_albaran = self.object
ps.product_final = product_final
ps.batch = batch
ps.quantity = self.object.quantity
ps.save()
# prepare product feature special
pu = ProductUnique()
pu.product_final = product_final
if product_final.product.feature_special.unique:
# borra lo antiguo y crea lo nuevo
ProductUnique.objects.filter(
product_final=old.product,
product_final__product__feature_special=old.product.product.feature_special,
value__in=fs_value_old
).delete()
if self.object.status != PURCHASE_ALBARAN_LINE_STATUS_REJECTED:
# save product featureSpecial and stock
for fs in feature_special_value:
pu.pk = None
pu.value = fs
pu.stock_real = 1
pu.save()
else:
if self.object.status != PURCHASE_ALBARAN_LINE_STATUS_REJECTED:
# creamos o actualizamos el producto unico
product_unique = ProductUnique.objects.filter(
product_final=product_final,
value=feature_special_value[0]
).first()
if product_unique is None:
product_unique = pu
product_unique.stock_real = 0
product_unique.stock_real += self.object.quantity
product_unique.save()
# actualizo el producto unico del registro anterior
product_unique_old = old.products_unique.filter(
value=fs_value_old[0]
).first()
product_unique_old.stock_real -= old.quantity
product_unique_old.save()
"""
else:
# cuando el producto final anterior no tiene una caracteristica especial
"""
comprobamos que las cantidades que fueron introducidas en el albaran son las mismas que hay disponible
"""
product_unique = ProductUnique.objects.filter(
product_final=product_final,
value=None
).first()
if product_unique and old.quantity > product_unique.stock_real:
# error porque la cantidad a descontar es menor que la cantidad en stock
errors = form._errors.setdefault("product", ErrorList())
errors.append(_("Product were bought, you can not change product"))
return super(LineAlbaranUpdate, self).form_invalid(form)
else:
# guardar, borrar antiguo y dar de alta los nuevos
result = super(LineAlbaranUpdate, self).form_valid(form)
raise Exception("Cambiar ProductStock por ProductUnique")
"""
ProductStock.objects.filter(
Q(line_albaran=self.object, product_final=old.product)
).delete()
if self.object.status != PURCHASE_ALBARAN_LINE_STATUS_REJECTED:
# prepare stock
ps = ProductStock()
ps.line_albaran = self.object
ps.product_final = product_final
ps.batch = batch
ps.quantity = self.object.quantity
ps.save()
if product_unique:
# actualizamos el stock quitandole al producto unico
# la cantidad del anterior registro
product_unique.stock_real -= old.quantity
product_unique.save()
# prepare product unique
pu = ProductUnique.objects.filter(
product_final=product_final,
value__in=feature_special_value
).first()
if self.object.status != PURCHASE_ALBARAN_LINE_STATUS_REJECTED and pu:
pu.stock_real -= self.object.quantity
pu.save()
else:
if pu is None:
pu = ProductUnique()
pu.product_final = product_final
if product_final.product.feature_special.unique:
# save product unique
for fs in feature_special_value:
pu.pk = None
pu.value = fs
pu.stock_real = 1
pu.save()
else:
pu.value = feature_special_value[0]
pu.stock_real += self.object.quantity
pu.save()
"""
# FIN if product_final.product.feature_special
if old.product.product.feature_special:
"""
si el nuevo producto no necesita de una caracteristicas especial y el antiguo si
comprobamos que el stock sea el mismo para poder eliminarlo
"""
raise Exception("Cambiar ProductStock por ProductUnique")
"""
if ProductStock.objects.filter(
line_albaran=self.object,
product_final=old.product,
feature_special=old.product.product.feature_special,
value__in=list(set(filter(None, old.feature_special_value.split('\n'))))
).aggregate(
q=Sum('quantity')
)['q'] != old.quantity:
errors = form._errors.setdefault("product", ErrorList())
errors.append(_("Products were bought, you can not change product"))
return super(LineAlbaranUpdate, self).form_invalid(form)
else:
# guardar y actualizar
result = super(LineAlbaranUpdate, self).form_valid(form)
# borra stock antiguo
ProductStock.objects.filter(
line_albaran=self.object,
product_final=old.product,
feature_special=old.product.product.feature_special,
value__in=list(set(filter(None, old.feature_special_value.split('\n'))))
).delete()
if self.object.status != PURCHASE_ALBARAN_LINE_STATUS_REJECTED:
# guarda nuevo stock
ps = ProductStock()
ps.product_final = product_final
ps.line_albaran = self.object
ps.batch = batch
ps.quantity = quantity
ps.save()
# borra las caracteristicas especiales
ProductUnique.objects.filter(
product_final=self.object.product,
feature_special=old.product.product.feature_special,
value__in=list(set(filter(None, old.feature_special_value.split('\n'))))
).delete()
"""
if product_final.product.feature_special is None and old.product.product.feature_special is None:
"""
comprobamos que las cantidades que fueron introducidas en el albaran son las mismas que hay disponible
"""
if ProductUnique.objects.filter(
product_final=old.product,
stock_real__lte=old.quantity
).exists():
errors = form._errors.setdefault("product", ErrorList())
errors.append(_("Product were bought, you can not change product"))
return super(LineInvoiceUpdate, self).form_invalid(form)
# guardar y actualizar
result = super(LineAlbaranUpdate, self).form_valid(form)
if status != old.status and status == PURCHASE_ALBARAN_LINE_STATUS_REJECTED:
# quitar los productos de stock
raise Exception("Cambiar ProductStock por ProductUnique")
"""
ProductStock.objects.filter(
Q(line_albaran=self.object, product_final=old.product)
).update(
batch=batch,
product=product_final,
quantity=0
)
"""
elif status != old.status and old.status == PURCHASE_ALBARAN_LINE_STATUS_REJECTED:
# crear los productos en stock
raise Exception("Cambiar ProductStock por ProductUnique")
"""
ps = ProductStock()
ps.product_final = product_final
ps.line_albaran = self.object
ps.batch = batch
ps.quantity = quantity
ps.save()
"""
else:
# actualizar stock
raise Exception("Cambiar ProductStock por ProductUnique")
"""
ProductStock.objects.filter(
Q(line_albaran=self.object, product_final=old.product)
).update(
batch=batch,
product=product_final,
quantity=quantity
)
"""
# actualizamos/creamos el nuevo registro
product_unique = ProductUnique.objects.filter(
product_final=product_final, value=None
).first()
if product_unique is None:
product_unique = ProductUnique()
product_unique.product_final = product_final
product_unique.value = None
product_unique.stock_real = 0
product_unique.stock_real += self.object.quantity
product_unique.save()
# actualizamos el registro anterior
product_unique_old = old.products_unique.get()
product_unique_old.stock_real -= old.quantity
product_unique_old.save()
elif product_final.product.feature_special:
"""
modificamos el mismo producto
"""
# es obligatorio la informacion de caracteristicas especiales
if 'feature_special_value' not in form.data or not form.data['feature_special_value']:
errors = form._errors.setdefault("feature_special_value", ErrorList())
errors.append(_("Product needs information of feature special"))
return super(LineAlbaranUpdate, self).form_invalid(form)
items_remove = None
items_new = None
feature_special_value = list(set(filter(None, form.data['feature_special_value'].split('\n'))))
fs_value_old = list(set(filter(None, old.feature_special_value.split('\n'))))
product_unique = None
if product_final.product.feature_special.unique:
# mismo numero de caracteristicas que de cantidades
if len(feature_special_value) != quantity:
errors = form._errors.setdefault("feature_special_value", ErrorList())
errors.append(_("Quantity and values of feature special not equals"))
return super(LineAlbaranUpdate, self).form_invalid(form)
else:
# no existen las caracteristicas especiales dadas de alta en el sistema
if ProductUnique.objects.filter(
product_final=product_final,
product_final__product__feature_special=product_final.product.feature_special,
value__in=feature_special_value
).exists():
errors = form._errors.setdefault("feature_special_value", ErrorList())
errors.append(_("Some value of feature special exists"))
return super(LineAlbaranUpdate, self).form_invalid(form)
# se han modificado las caracteristicas especiales
if fs_value_old != feature_special_value:
items_new = []
# elementos eliminados
for x in feature_special_value:
if x not in fs_value_old:
items_new.append(x)
items_remove = []
# elementos nuevos
for x in fs_value_old:
if x not in feature_special_value:
items_remove.append(x)
if items_remove:
"""
comprobar que no se ha vendido el producto con la caracteristica que falta
para eso la cantidad del ProductUnique asociado debe ser mayor a 0
mostrar error
"""
if ProductUnique.objects.filter(
product_final=self.object.product,
value__in=items_remove,
stock_real__lte=0
).exists():
errors = form._errors.setdefault("feature_special_value", ErrorList())
errors.append(_("Some features special can not be delete because it were bought"))
return super(LineAlbaranUpdate, self).form_invalid(form)
else:
"""
busco el producto unico
y actualizo la cantidad y el valor de la caracteristicas especial
"""
product_unique = ProductUnique.objects.filter(
product_final=old.product,
value__in=fs_value_old
).first()
if product_unique:
# compruebo que el stock sea positivo
if product_unique.stock_real + self.object.quantity - old.quantity <= 0:
errors = form._errors.setdefault("feature_special_value", ErrorList())
errors.append(_("Some features special can not be delete because it were bought"))
return super(LineAlbaranUpdate, self).form_invalid(form)
else:
errors = form._errors.setdefault("product", ErrorList())
errors.append(_("Product is not valid."))
return super(LineAlbaranUpdate, self).form_invalid(form)
result = super(LineAlbaranUpdate, self).form_valid(form)
# solo entrara si la caracteristica es NO unica
if product_unique:
if self.object.quantity != old.quantity:
product_unique.stock_real += self.object.quantity - old.quantity
product_unique.save()
if fs_value_old == feature_special_value:
product_unique.value = feature_special_value[0]
product_unique.save()
# solo entrara si la caracteristica es unica
if items_remove:
"""
una vez que se guarde, eliminar o poner a 0 el ProductStock asociado y el ProductUnique
"""
raise Exception("Cambiar ProductStock por ProductUnique")
"""
ProductStock.objects.filter(
line_albaran=self.object,
product_final=self.object.product,
product_final__product__feature_special=self.object.product.product.feature_special,
value__in=items_remove).delete()
ProductUnique.objects.filter(
product_final=self.object.product,
value__in=items_remove).delete()
"""
# solo entrara si la caracteristica es unica
if items_new:
"""
buscar si hay elementos nuevos y darlos de alta en ProductStock y ProductUnique
"""
# prepare stock
raise Exception("Cambiar ProductStock por ProductUnique")
"""
ps = ProductStock()
ps.product_final = product_final
ps.line_albaran = self.object
ps.batch = batch
# prepare product feature special
pu = ProductUnique()
pu.product_final = product_final
# save product featureSpecial and stock
for fs in items_new:
pu.pk = None
pu.value = fs
pu.save()
ps.pk = None
ps.value = fs
ps.quantity = 1
ps.save()
"""
else:
"""
comprobamos que las nuevas cantidades esten aun en stock
"""
raise Exception("Cambiar ProductStock por ProductUnique")
"""
if quantity < old.quantity:
if ProductStock.objects.filter(
line_albaran=self.object,
product_final=self.object.product,
quantity__lt=quantity
).exists():
errors = form._errors.setdefault("quantity", ErrorList())
errors.append(_("Quantity invalid, no stock"))
return super(LineAlbaranUpdate, self).form_invalid(form)
"""
result = super(LineAlbaranUpdate, self).form_valid(form)
raise Exception("Cambiar ProductStock por ProductUnique")
"""
if status != old.status and status == PURCHASE_ALBARAN_LINE_STATUS_REJECTED:
# quitar los productos de stock
ProductStock.objects.filter(
batch=batch,
line_albaran=self.object,
product_final=self.object.product).update(quantity=0)
elif status != old.status and old.status == PURCHASE_ALBARAN_LINE_STATUS_REJECTED:
# crear los productos en stock
ps = ProductStock()
ps.product_final = self.object.product
ps.line_albaran = self.object
ps.batch = batch
ps.quantity = quantity
ps.save()
else:
# actualizar stock
ProductStock.objects.filter(
batch=batch,
line_albaran=self.object,
product_final=self.object.product).update(quantity=quantity)
"""
return result
|
xtream1101/cutil
|
cutil/custom_terminal.py
|
CustomTerminal.cprint
|
python
|
def cprint(self, cstr):
cstr = str(cstr) # Force it to be a string
cstr_len = len(cstr)
prev_cstr_len = len(self._prev_cstr)
num_spaces = 0
if cstr_len < prev_cstr_len:
num_spaces = abs(prev_cstr_len - cstr_len)
try:
print(cstr + " " * num_spaces, end='\r')
self._prev_cstr = cstr
except UnicodeEncodeError:
print('Processing...', end='\r')
self._prev_cstr = 'Processing...'
|
Clear line, then reprint on same line
:param cstr: string to print on current line
|
train
|
https://github.com/xtream1101/cutil/blob/2e4d1f00e66154b44d4ccffb9b1db3f37e87f2e8/cutil/custom_terminal.py#L30-L46
| null |
class CustomTerminal:
def __init__(self):
self._prev_cstr = ''
self._bprint_disable = False
self._proxy_list = []
self._current_proxy = None
self._custom_proxy = False
self._apikey_list = []
self._current_apikey = None
# Block print display messages and values
self._bprint_messages = None
# Block print display order (only items listed here will be displayed)
self._bprint_order = None
####
# Terminal/display related functions
####
def enable_bprint(self, bprint_msg={}, bprint_order=[]):
# Block print display messages and values
self.bprint_messages = bprint_msg
# Block print display order (only items listed here will be displayed)
self.bprint_order = bprint_order
# Start instance of block print
self._bprint_display()
def disable_bprint(self):
self.bprint_disable = True
def bprint(self, bmsg, line):
"""
bprint: Block Print
self.bprint_messages[line][0] is always the display text
self.bprint_messages[line][1] is always the value
"""
self.bprint_messages[line][1] = bmsg
def _bprint_display(self):
self.bprint_messages['title'][1] = time.time()
os.system('cls' if os.name == 'nt' else 'clear')
for item in self.bprint_order:
print(self.bprint_messages[item][0] + ": " + str(self.bprint_messages[item][1]))
if self.bprint_disable is not True:
# Update terminal every n seconds
t_reload = threading.Timer(.5, self._bprint_display)
t_reload.setDaemon(True)
t_reload.start()
|
xtream1101/cutil
|
cutil/database.py
|
_check_values
|
python
|
def _check_values(in_values):
out_values = []
for value in in_values:
# if isinstance(value, (dict, list)):
# out_values.append(json.dumps(value))
# else:
out_values.append(value)
return tuple(out_values)
|
Check if values need to be converted before they get mogrify'd
|
train
|
https://github.com/xtream1101/cutil/blob/2e4d1f00e66154b44d4ccffb9b1db3f37e87f2e8/cutil/database.py#L10-L20
| null |
import sys
import copy
import json
import logging
from contextlib import contextmanager
logger = logging.getLogger(__name__)
class Database:
def __init__(self, db_config, table_raw=None, max_connections=10):
from psycopg2.pool import ThreadedConnectionPool
self.table_raw = table_raw
try:
# Set default port is port is not passed
if 'db_port' not in db_config:
db_config['db_port'] = 5432
self.pool = ThreadedConnectionPool(minconn=1,
maxconn=max_connections,
dsn="dbname={db_name} user={db_user} host={db_host} password={db_pass} port={db_port}"
.format(**db_config))
except Exception:
logger.exception("Error in db connection")
sys.exit(1)
logger.debug("Connected to database: {host}".format(host=db_config['db_host']))
@contextmanager
def getcursor(self, **kwargs):
conn = self.pool.getconn()
try:
yield conn.cursor(**kwargs)
conn.commit()
except Exception as e:
conn.rollback()
raise e.with_traceback(sys.exc_info()[2])
finally:
self.pool.putconn(conn)
def close(self):
self.pool.closeall()
def insert(self, table, data_list, return_cols='id'):
"""
Create a bulk insert statement which is much faster (~2x in tests with 10k & 100k rows and n cols)
for inserting data then executemany()
TODO: Is there a limit of length the query can be? If so handle it.
"""
data_list = copy.deepcopy(data_list) # Create deepcopy so the original list does not get modified
# Make sure that `data_list` is a list
if not isinstance(data_list, list):
data_list = [data_list]
# Make sure data_list has content
if len(data_list) == 0:
# No need to continue
return []
# Data in the list must be dicts (just check the first one)
if not isinstance(data_list[0], dict):
logger.critical("Data must be a list of dicts")
# Do not return here, let the exception handle the error that will be thrown when the query runs
# Make sure return_cols is a list
if return_cols is None or len(return_cols) == 0 or return_cols[0] is None:
return_cols = ''
elif not isinstance(return_cols, list):
return_cols = [return_cols]
if len(return_cols) > 0:
return_cols = 'RETURNING ' + ','.join(return_cols)
try:
with self.getcursor() as cur:
query = "INSERT INTO {table} ({fields}) VALUES {values} {return_cols}"\
.format(table=table,
fields='"{0}"'.format('", "'.join(data_list[0].keys())),
values=','.join(['%s'] * len(data_list)),
return_cols=return_cols,
)
values = []
for row in [tuple(v.values()) for v in data_list]:
values.append(_check_values(row))
query = cur.mogrify(query, values)
cur.execute(query)
try:
return cur.fetchall()
except Exception:
return None
except Exception as e:
logger.exception("Error inserting data")
logger.debug("Error inserting data: {data}".format(data=data_list))
raise e.with_traceback(sys.exc_info()[2])
def upsert(self, table, data_list, on_conflict_fields, on_conflict_action='update',
update_fields=None, return_cols='id'):
"""
Create a bulk upsert statement which is much faster (~6x in tests with 10k & 100k rows and n cols)
for upserting data then executemany()
TODO: Is there a limit of length the query can be? If so handle it.
"""
data_list = copy.deepcopy(data_list) # Create deepcopy so the original list does not get modified
# Make sure that `data_list` is a list
if not isinstance(data_list, list):
data_list = [data_list]
# Make sure data_list has content
if len(data_list) == 0:
# No need to continue
return []
# Data in the list must be dicts (just check the first one)
if not isinstance(data_list[0], dict):
logger.critical("Data must be a list of dicts")
# TODO: raise some error here rather then returning None
return None
# Make sure on_conflict_fields is a list
if not isinstance(on_conflict_fields, list):
on_conflict_fields = [on_conflict_fields]
# Make sure on_conflict_fields has data
if len(on_conflict_fields) == 0 or on_conflict_fields[0] is None:
# No need to continue
logger.critical("Must pass in `on_conflict_fields` argument")
# TODO: raise some error here rather then returning None
return None
# Make sure return_cols is a list
if return_cols is None or len(return_cols) == 0 or return_cols[0] is None:
return_cols = ''
elif not isinstance(return_cols, list):
return_cols = [return_cols]
if len(return_cols) > 0:
return_cols = 'RETURNING ' + ','.join(return_cols)
# Make sure update_fields is a list/valid
if on_conflict_action == 'update':
if not isinstance(update_fields, list):
update_fields = [update_fields]
# If noting is passed in, set `update_fields` to all (data_list-on_conflict_fields)
if len(update_fields) == 0 or update_fields[0] is None:
update_fields = list(set(data_list[0].keys()) - set(on_conflict_fields))
# If update_fields is empty here that could only mean that all fields are set as conflict_fields
if len(update_fields) == 0:
logger.critical("Not all the fields can be `on_conflict_fields` when doing an update")
# TODO: raise some error here rather then returning None
return None
# If everything is good to go with the update fields
fields_update_tmp = []
for key in data_list[0].keys():
fields_update_tmp.append('"{0}"="excluded"."{0}"'.format(key))
conflict_action_sql = 'UPDATE SET {update_fields}'\
.format(update_fields=', '.join(fields_update_tmp))
else:
# Do nothing on conflict
conflict_action_sql = 'NOTHING'
try:
with self.getcursor() as cur:
query = """INSERT INTO {table} ({insert_fields})
VALUES {values}
ON CONFLICT ({on_conflict_fields}) DO
{conflict_action_sql}
{return_cols}
""".format(table=table,
insert_fields='"{0}"'.format('","'.join(data_list[0].keys())),
values=','.join(['%s'] * len(data_list)),
on_conflict_fields=','.join(on_conflict_fields),
conflict_action_sql=conflict_action_sql,
return_cols=return_cols,
)
# Get all the values for each row and create a lists of lists
values = []
for row in [list(v.values()) for v in data_list]:
values.append(_check_values(row))
query = cur.mogrify(query, values)
cur.execute(query)
try:
return cur.fetchall()
except Exception:
return None
except Exception as e:
logger.exception("Error upserting data")
logger.debug("Error upserting data: {data}".format(data=data_list))
raise e.with_traceback(sys.exc_info()[2])
def update(self, table, data_list, matched_field=None, return_cols='id'):
"""
Create a bulk insert statement which is much faster (~2x in tests with 10k & 100k rows and 4 cols)
for inserting data then executemany()
TODO: Is there a limit of length the query can be? If so handle it.
"""
data_list = copy.deepcopy(data_list) # Create deepcopy so the original list does not get modified
if matched_field is None:
# Assume the id field
logger.info("Matched field not defined, assuming the `id` field")
matched_field = 'id'
# Make sure that `data_list` is a list
if not isinstance(data_list, list):
data_list = [data_list]
if len(data_list) == 0:
# No need to continue
return []
# Make sure return_cols is a list
if return_cols is None or len(return_cols) == 0 or return_cols[0] is None:
return_cols = ''
elif not isinstance(return_cols, list):
return_cols = [return_cols]
if len(return_cols) > 0:
return_cols = 'RETURNING ' + ','.join(return_cols)
# Data in the list must be dicts (just check the first one)
if not isinstance(data_list[0], dict):
logger.critical("Data must be a list of dicts")
# Do not return here, let the exception handle the error that will be thrown when the query runs
try:
with self.getcursor() as cur:
query_list = []
# TODO: change to return data from the database, not just what you passed in
return_list = []
for row in data_list:
if row.get(matched_field) is None:
logger.debug("Cannot update row. Missing field {field} in data {data}"
.format(field=matched_field, data=row))
logger.error("Cannot update row. Missing field {field} in data".format(field=matched_field))
continue
# Pull matched_value from data to be updated and remove that key
matched_value = row.get(matched_field)
del row[matched_field]
query = "UPDATE {table} SET {data} WHERE {matched_field}=%s {return_cols}"\
.format(table=table,
data=','.join("%s=%%s" % u for u in row.keys()),
matched_field=matched_field,
return_cols=return_cols
)
values = list(row.values())
values.append(matched_value)
values = _check_values(values)
query = cur.mogrify(query, values)
query_list.append(query)
return_list.append(matched_value)
finial_query = b';'.join(query_list)
cur.execute(finial_query)
try:
return cur.fetchall()
except Exception:
return None
except Exception as e:
logger.exception("Error updating data")
logger.debug("Error updating data: {data}".format(data=data_list))
raise e.with_traceback(sys.exc_info()[2])
|
xtream1101/cutil
|
cutil/database.py
|
Database.insert
|
python
|
def insert(self, table, data_list, return_cols='id'):
data_list = copy.deepcopy(data_list) # Create deepcopy so the original list does not get modified
# Make sure that `data_list` is a list
if not isinstance(data_list, list):
data_list = [data_list]
# Make sure data_list has content
if len(data_list) == 0:
# No need to continue
return []
# Data in the list must be dicts (just check the first one)
if not isinstance(data_list[0], dict):
logger.critical("Data must be a list of dicts")
# Do not return here, let the exception handle the error that will be thrown when the query runs
# Make sure return_cols is a list
if return_cols is None or len(return_cols) == 0 or return_cols[0] is None:
return_cols = ''
elif not isinstance(return_cols, list):
return_cols = [return_cols]
if len(return_cols) > 0:
return_cols = 'RETURNING ' + ','.join(return_cols)
try:
with self.getcursor() as cur:
query = "INSERT INTO {table} ({fields}) VALUES {values} {return_cols}"\
.format(table=table,
fields='"{0}"'.format('", "'.join(data_list[0].keys())),
values=','.join(['%s'] * len(data_list)),
return_cols=return_cols,
)
values = []
for row in [tuple(v.values()) for v in data_list]:
values.append(_check_values(row))
query = cur.mogrify(query, values)
cur.execute(query)
try:
return cur.fetchall()
except Exception:
return None
except Exception as e:
logger.exception("Error inserting data")
logger.debug("Error inserting data: {data}".format(data=data_list))
raise e.with_traceback(sys.exc_info()[2])
|
Create a bulk insert statement which is much faster (~2x in tests with 10k & 100k rows and n cols)
for inserting data then executemany()
TODO: Is there a limit of length the query can be? If so handle it.
|
train
|
https://github.com/xtream1101/cutil/blob/2e4d1f00e66154b44d4ccffb9b1db3f37e87f2e8/cutil/database.py#L61-L114
|
[
"def _check_values(in_values):\n \"\"\" Check if values need to be converted before they get mogrify'd\n \"\"\"\n out_values = []\n for value in in_values:\n # if isinstance(value, (dict, list)):\n # out_values.append(json.dumps(value))\n # else:\n out_values.append(value)\n\n return tuple(out_values)\n"
] |
class Database:
def __init__(self, db_config, table_raw=None, max_connections=10):
from psycopg2.pool import ThreadedConnectionPool
self.table_raw = table_raw
try:
# Set default port is port is not passed
if 'db_port' not in db_config:
db_config['db_port'] = 5432
self.pool = ThreadedConnectionPool(minconn=1,
maxconn=max_connections,
dsn="dbname={db_name} user={db_user} host={db_host} password={db_pass} port={db_port}"
.format(**db_config))
except Exception:
logger.exception("Error in db connection")
sys.exit(1)
logger.debug("Connected to database: {host}".format(host=db_config['db_host']))
@contextmanager
def getcursor(self, **kwargs):
conn = self.pool.getconn()
try:
yield conn.cursor(**kwargs)
conn.commit()
except Exception as e:
conn.rollback()
raise e.with_traceback(sys.exc_info()[2])
finally:
self.pool.putconn(conn)
def close(self):
self.pool.closeall()
def upsert(self, table, data_list, on_conflict_fields, on_conflict_action='update',
update_fields=None, return_cols='id'):
"""
Create a bulk upsert statement which is much faster (~6x in tests with 10k & 100k rows and n cols)
for upserting data then executemany()
TODO: Is there a limit of length the query can be? If so handle it.
"""
data_list = copy.deepcopy(data_list) # Create deepcopy so the original list does not get modified
# Make sure that `data_list` is a list
if not isinstance(data_list, list):
data_list = [data_list]
# Make sure data_list has content
if len(data_list) == 0:
# No need to continue
return []
# Data in the list must be dicts (just check the first one)
if not isinstance(data_list[0], dict):
logger.critical("Data must be a list of dicts")
# TODO: raise some error here rather then returning None
return None
# Make sure on_conflict_fields is a list
if not isinstance(on_conflict_fields, list):
on_conflict_fields = [on_conflict_fields]
# Make sure on_conflict_fields has data
if len(on_conflict_fields) == 0 or on_conflict_fields[0] is None:
# No need to continue
logger.critical("Must pass in `on_conflict_fields` argument")
# TODO: raise some error here rather then returning None
return None
# Make sure return_cols is a list
if return_cols is None or len(return_cols) == 0 or return_cols[0] is None:
return_cols = ''
elif not isinstance(return_cols, list):
return_cols = [return_cols]
if len(return_cols) > 0:
return_cols = 'RETURNING ' + ','.join(return_cols)
# Make sure update_fields is a list/valid
if on_conflict_action == 'update':
if not isinstance(update_fields, list):
update_fields = [update_fields]
# If noting is passed in, set `update_fields` to all (data_list-on_conflict_fields)
if len(update_fields) == 0 or update_fields[0] is None:
update_fields = list(set(data_list[0].keys()) - set(on_conflict_fields))
# If update_fields is empty here that could only mean that all fields are set as conflict_fields
if len(update_fields) == 0:
logger.critical("Not all the fields can be `on_conflict_fields` when doing an update")
# TODO: raise some error here rather then returning None
return None
# If everything is good to go with the update fields
fields_update_tmp = []
for key in data_list[0].keys():
fields_update_tmp.append('"{0}"="excluded"."{0}"'.format(key))
conflict_action_sql = 'UPDATE SET {update_fields}'\
.format(update_fields=', '.join(fields_update_tmp))
else:
# Do nothing on conflict
conflict_action_sql = 'NOTHING'
try:
with self.getcursor() as cur:
query = """INSERT INTO {table} ({insert_fields})
VALUES {values}
ON CONFLICT ({on_conflict_fields}) DO
{conflict_action_sql}
{return_cols}
""".format(table=table,
insert_fields='"{0}"'.format('","'.join(data_list[0].keys())),
values=','.join(['%s'] * len(data_list)),
on_conflict_fields=','.join(on_conflict_fields),
conflict_action_sql=conflict_action_sql,
return_cols=return_cols,
)
# Get all the values for each row and create a lists of lists
values = []
for row in [list(v.values()) for v in data_list]:
values.append(_check_values(row))
query = cur.mogrify(query, values)
cur.execute(query)
try:
return cur.fetchall()
except Exception:
return None
except Exception as e:
logger.exception("Error upserting data")
logger.debug("Error upserting data: {data}".format(data=data_list))
raise e.with_traceback(sys.exc_info()[2])
def update(self, table, data_list, matched_field=None, return_cols='id'):
"""
Create a bulk insert statement which is much faster (~2x in tests with 10k & 100k rows and 4 cols)
for inserting data then executemany()
TODO: Is there a limit of length the query can be? If so handle it.
"""
data_list = copy.deepcopy(data_list) # Create deepcopy so the original list does not get modified
if matched_field is None:
# Assume the id field
logger.info("Matched field not defined, assuming the `id` field")
matched_field = 'id'
# Make sure that `data_list` is a list
if not isinstance(data_list, list):
data_list = [data_list]
if len(data_list) == 0:
# No need to continue
return []
# Make sure return_cols is a list
if return_cols is None or len(return_cols) == 0 or return_cols[0] is None:
return_cols = ''
elif not isinstance(return_cols, list):
return_cols = [return_cols]
if len(return_cols) > 0:
return_cols = 'RETURNING ' + ','.join(return_cols)
# Data in the list must be dicts (just check the first one)
if not isinstance(data_list[0], dict):
logger.critical("Data must be a list of dicts")
# Do not return here, let the exception handle the error that will be thrown when the query runs
try:
with self.getcursor() as cur:
query_list = []
# TODO: change to return data from the database, not just what you passed in
return_list = []
for row in data_list:
if row.get(matched_field) is None:
logger.debug("Cannot update row. Missing field {field} in data {data}"
.format(field=matched_field, data=row))
logger.error("Cannot update row. Missing field {field} in data".format(field=matched_field))
continue
# Pull matched_value from data to be updated and remove that key
matched_value = row.get(matched_field)
del row[matched_field]
query = "UPDATE {table} SET {data} WHERE {matched_field}=%s {return_cols}"\
.format(table=table,
data=','.join("%s=%%s" % u for u in row.keys()),
matched_field=matched_field,
return_cols=return_cols
)
values = list(row.values())
values.append(matched_value)
values = _check_values(values)
query = cur.mogrify(query, values)
query_list.append(query)
return_list.append(matched_value)
finial_query = b';'.join(query_list)
cur.execute(finial_query)
try:
return cur.fetchall()
except Exception:
return None
except Exception as e:
logger.exception("Error updating data")
logger.debug("Error updating data: {data}".format(data=data_list))
raise e.with_traceback(sys.exc_info()[2])
|
xtream1101/cutil
|
cutil/database.py
|
Database.upsert
|
python
|
def upsert(self, table, data_list, on_conflict_fields, on_conflict_action='update',
update_fields=None, return_cols='id'):
data_list = copy.deepcopy(data_list) # Create deepcopy so the original list does not get modified
# Make sure that `data_list` is a list
if not isinstance(data_list, list):
data_list = [data_list]
# Make sure data_list has content
if len(data_list) == 0:
# No need to continue
return []
# Data in the list must be dicts (just check the first one)
if not isinstance(data_list[0], dict):
logger.critical("Data must be a list of dicts")
# TODO: raise some error here rather then returning None
return None
# Make sure on_conflict_fields is a list
if not isinstance(on_conflict_fields, list):
on_conflict_fields = [on_conflict_fields]
# Make sure on_conflict_fields has data
if len(on_conflict_fields) == 0 or on_conflict_fields[0] is None:
# No need to continue
logger.critical("Must pass in `on_conflict_fields` argument")
# TODO: raise some error here rather then returning None
return None
# Make sure return_cols is a list
if return_cols is None or len(return_cols) == 0 or return_cols[0] is None:
return_cols = ''
elif not isinstance(return_cols, list):
return_cols = [return_cols]
if len(return_cols) > 0:
return_cols = 'RETURNING ' + ','.join(return_cols)
# Make sure update_fields is a list/valid
if on_conflict_action == 'update':
if not isinstance(update_fields, list):
update_fields = [update_fields]
# If noting is passed in, set `update_fields` to all (data_list-on_conflict_fields)
if len(update_fields) == 0 or update_fields[0] is None:
update_fields = list(set(data_list[0].keys()) - set(on_conflict_fields))
# If update_fields is empty here that could only mean that all fields are set as conflict_fields
if len(update_fields) == 0:
logger.critical("Not all the fields can be `on_conflict_fields` when doing an update")
# TODO: raise some error here rather then returning None
return None
# If everything is good to go with the update fields
fields_update_tmp = []
for key in data_list[0].keys():
fields_update_tmp.append('"{0}"="excluded"."{0}"'.format(key))
conflict_action_sql = 'UPDATE SET {update_fields}'\
.format(update_fields=', '.join(fields_update_tmp))
else:
# Do nothing on conflict
conflict_action_sql = 'NOTHING'
try:
with self.getcursor() as cur:
query = """INSERT INTO {table} ({insert_fields})
VALUES {values}
ON CONFLICT ({on_conflict_fields}) DO
{conflict_action_sql}
{return_cols}
""".format(table=table,
insert_fields='"{0}"'.format('","'.join(data_list[0].keys())),
values=','.join(['%s'] * len(data_list)),
on_conflict_fields=','.join(on_conflict_fields),
conflict_action_sql=conflict_action_sql,
return_cols=return_cols,
)
# Get all the values for each row and create a lists of lists
values = []
for row in [list(v.values()) for v in data_list]:
values.append(_check_values(row))
query = cur.mogrify(query, values)
cur.execute(query)
try:
return cur.fetchall()
except Exception:
return None
except Exception as e:
logger.exception("Error upserting data")
logger.debug("Error upserting data: {data}".format(data=data_list))
raise e.with_traceback(sys.exc_info()[2])
|
Create a bulk upsert statement which is much faster (~6x in tests with 10k & 100k rows and n cols)
for upserting data then executemany()
TODO: Is there a limit of length the query can be? If so handle it.
|
train
|
https://github.com/xtream1101/cutil/blob/2e4d1f00e66154b44d4ccffb9b1db3f37e87f2e8/cutil/database.py#L116-L211
|
[
"def _check_values(in_values):\n \"\"\" Check if values need to be converted before they get mogrify'd\n \"\"\"\n out_values = []\n for value in in_values:\n # if isinstance(value, (dict, list)):\n # out_values.append(json.dumps(value))\n # else:\n out_values.append(value)\n\n return tuple(out_values)\n"
] |
class Database:
def __init__(self, db_config, table_raw=None, max_connections=10):
from psycopg2.pool import ThreadedConnectionPool
self.table_raw = table_raw
try:
# Set default port is port is not passed
if 'db_port' not in db_config:
db_config['db_port'] = 5432
self.pool = ThreadedConnectionPool(minconn=1,
maxconn=max_connections,
dsn="dbname={db_name} user={db_user} host={db_host} password={db_pass} port={db_port}"
.format(**db_config))
except Exception:
logger.exception("Error in db connection")
sys.exit(1)
logger.debug("Connected to database: {host}".format(host=db_config['db_host']))
@contextmanager
def getcursor(self, **kwargs):
conn = self.pool.getconn()
try:
yield conn.cursor(**kwargs)
conn.commit()
except Exception as e:
conn.rollback()
raise e.with_traceback(sys.exc_info()[2])
finally:
self.pool.putconn(conn)
def close(self):
self.pool.closeall()
def insert(self, table, data_list, return_cols='id'):
"""
Create a bulk insert statement which is much faster (~2x in tests with 10k & 100k rows and n cols)
for inserting data then executemany()
TODO: Is there a limit of length the query can be? If so handle it.
"""
data_list = copy.deepcopy(data_list) # Create deepcopy so the original list does not get modified
# Make sure that `data_list` is a list
if not isinstance(data_list, list):
data_list = [data_list]
# Make sure data_list has content
if len(data_list) == 0:
# No need to continue
return []
# Data in the list must be dicts (just check the first one)
if not isinstance(data_list[0], dict):
logger.critical("Data must be a list of dicts")
# Do not return here, let the exception handle the error that will be thrown when the query runs
# Make sure return_cols is a list
if return_cols is None or len(return_cols) == 0 or return_cols[0] is None:
return_cols = ''
elif not isinstance(return_cols, list):
return_cols = [return_cols]
if len(return_cols) > 0:
return_cols = 'RETURNING ' + ','.join(return_cols)
try:
with self.getcursor() as cur:
query = "INSERT INTO {table} ({fields}) VALUES {values} {return_cols}"\
.format(table=table,
fields='"{0}"'.format('", "'.join(data_list[0].keys())),
values=','.join(['%s'] * len(data_list)),
return_cols=return_cols,
)
values = []
for row in [tuple(v.values()) for v in data_list]:
values.append(_check_values(row))
query = cur.mogrify(query, values)
cur.execute(query)
try:
return cur.fetchall()
except Exception:
return None
except Exception as e:
logger.exception("Error inserting data")
logger.debug("Error inserting data: {data}".format(data=data_list))
raise e.with_traceback(sys.exc_info()[2])
def update(self, table, data_list, matched_field=None, return_cols='id'):
"""
Create a bulk insert statement which is much faster (~2x in tests with 10k & 100k rows and 4 cols)
for inserting data then executemany()
TODO: Is there a limit of length the query can be? If so handle it.
"""
data_list = copy.deepcopy(data_list) # Create deepcopy so the original list does not get modified
if matched_field is None:
# Assume the id field
logger.info("Matched field not defined, assuming the `id` field")
matched_field = 'id'
# Make sure that `data_list` is a list
if not isinstance(data_list, list):
data_list = [data_list]
if len(data_list) == 0:
# No need to continue
return []
# Make sure return_cols is a list
if return_cols is None or len(return_cols) == 0 or return_cols[0] is None:
return_cols = ''
elif not isinstance(return_cols, list):
return_cols = [return_cols]
if len(return_cols) > 0:
return_cols = 'RETURNING ' + ','.join(return_cols)
# Data in the list must be dicts (just check the first one)
if not isinstance(data_list[0], dict):
logger.critical("Data must be a list of dicts")
# Do not return here, let the exception handle the error that will be thrown when the query runs
try:
with self.getcursor() as cur:
query_list = []
# TODO: change to return data from the database, not just what you passed in
return_list = []
for row in data_list:
if row.get(matched_field) is None:
logger.debug("Cannot update row. Missing field {field} in data {data}"
.format(field=matched_field, data=row))
logger.error("Cannot update row. Missing field {field} in data".format(field=matched_field))
continue
# Pull matched_value from data to be updated and remove that key
matched_value = row.get(matched_field)
del row[matched_field]
query = "UPDATE {table} SET {data} WHERE {matched_field}=%s {return_cols}"\
.format(table=table,
data=','.join("%s=%%s" % u for u in row.keys()),
matched_field=matched_field,
return_cols=return_cols
)
values = list(row.values())
values.append(matched_value)
values = _check_values(values)
query = cur.mogrify(query, values)
query_list.append(query)
return_list.append(matched_value)
finial_query = b';'.join(query_list)
cur.execute(finial_query)
try:
return cur.fetchall()
except Exception:
return None
except Exception as e:
logger.exception("Error updating data")
logger.debug("Error updating data: {data}".format(data=data_list))
raise e.with_traceback(sys.exc_info()[2])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.