repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
spotify/docker_interface | docker_interface/util.py | merge | python | def merge(x, y):
keys_x = set(x)
keys_y = set(y)
for key in keys_y - keys_x:
x[key] = y[key]
for key in keys_x & keys_y:
value_x = x[key]
value_y = y[key]
if isinstance(value_x, dict) and isinstance(value_y, dict):
x[key] = merge(value_x, value_y)
else:
if value_x != value_y:
raise ValueError
return x | Merge two dictionaries and raise an error for inconsistencies.
Parameters
----------
x : dict
dictionary x
y : dict
dictionary y
Returns
-------
x : dict
merged dictionary
Raises
------
ValueError
if `x` and `y` are inconsistent | train | https://github.com/spotify/docker_interface/blob/4df80e1fe072d958020080d32c16551ff7703d51/docker_interface/util.py#L181-L218 | [
"def merge(x, y):\n \"\"\"\n Merge two dictionaries and raise an error for inconsistencies.\n\n Parameters\n ----------\n x : dict\n dictionary x\n y : dict\n dictionary y\n\n Returns\n -------\n x : dict\n merged dictionary\n\n Raises\n ------\n ValueError\n if `x` and `y` are inconsistent\n \"\"\"\n keys_x = set(x)\n keys_y = set(y)\n\n for key in keys_y - keys_x:\n x[key] = y[key]\n\n for key in keys_x & keys_y:\n value_x = x[key]\n value_y = y[key]\n\n if isinstance(value_x, dict) and isinstance(value_y, dict):\n x[key] = merge(value_x, value_y)\n else:\n if value_x != value_y:\n raise ValueError\n\n return x\n"
] | # Copyright 2018 Spotify AB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
import socket
TYPES = {
'integer': int,
'string': str,
'number': float,
'boolean': bool,
'array': list,
}
def abspath(path, ref=None):
"""
Create an absolute path.
Parameters
----------
path : str
absolute or relative path with respect to `ref`
ref : str or None
reference path if `path` is relative
Returns
-------
path : str
absolute path
Raises
------
ValueError
if an absolute path cannot be constructed
"""
if ref:
path = os.path.join(ref, path)
if not os.path.isabs(path):
raise ValueError("expected an absolute path but got '%s'" % path)
return path
def split_path(path, ref=None):
"""
Split a path into its components.
Parameters
----------
path : str
absolute or relative path with respect to `ref`
ref : str or None
reference path if `path` is relative
Returns
-------
list : str
components of the path
"""
path = abspath(path, ref)
return path.strip(os.path.sep).split(os.path.sep)
def get_value(instance, path, ref=None):
"""
Get the value from `instance` at the given `path`.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
ref : str or None
reference path if `path` is relative
Returns
-------
value :
value at `path` in `instance`
Raises
------
KeyError
if `path` is not valid
TypeError
if a value along the `path` is not a list or dictionary
"""
for part in split_path(path, ref):
if isinstance(instance, list):
part = int(part)
elif not isinstance(instance, dict):
raise TypeError("expected `list` or `dict` but got `%s`" % instance)
try:
instance = instance[part]
except KeyError:
raise KeyError(abspath(path, ref))
return instance
def pop_value(instance, path, ref=None):
"""
Pop the value from `instance` at the given `path`.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
ref : str or None
reference path if `path` is relative
Returns
-------
value :
value at `path` in `instance`
"""
head, tail = os.path.split(abspath(path, ref))
instance = get_value(instance, head)
if isinstance(instance, list):
tail = int(tail)
return instance.pop(tail)
def set_value(instance, path, value, ref=None):
"""
Set `value` on `instance` at the given `path` and create missing intermediate objects.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
value :
value to set
ref : str or None
reference path if `path` is relative
"""
*head, tail = split_path(path, ref)
for part in head:
instance = instance.setdefault(part, {})
instance[tail] = value
def set_default(instance, path, value, ref=None):
"""
Set `value` on `instance` at the given `path` and create missing intermediate objects.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
value :
value to set
ref : str or None
reference path if `path` is relative
"""
*head, tail = split_path(path, ref)
for part in head:
instance = instance.setdefault(part, {})
return instance.setdefault(tail, value)
def set_default_from_schema(instance, schema):
"""
Populate default values on an `instance` given a `schema`.
Parameters
----------
instance : dict
instance to populate default values for
schema : dict
JSON schema with default values
Returns
-------
instance : dict
instance with populated default values
"""
for name, property_ in schema.get('properties', {}).items():
# Set the defaults at this level of the schema
if 'default' in property_:
instance.setdefault(name, property_['default'])
# Descend one level if the property is an object
if 'properties' in property_:
set_default_from_schema(instance.setdefault(name, {}), property_)
return instance
def apply(instance, func, path=None):
"""
Apply `func` to all fundamental types of `instance`.
Parameters
----------
instance : dict
instance to apply functions to
func : callable
function with two arguments (instance, path) to apply to all fundamental types recursively
path : str
path in the document (defaults to '/')
Returns
-------
instance : dict
instance after applying `func` to fundamental types
"""
path = path or os.path.sep
if isinstance(instance, list):
return [apply(item, func, os.path.join(path, str(i))) for i, item in enumerate(instance)]
elif isinstance(instance, dict):
return {key: apply(value, func, os.path.join(path, key)) for key, value in instance.items()}
return func(instance, path)
def get_free_port(ports=None):
"""
Get a free port.
Parameters
----------
ports : iterable
ports to check (obtain a random port by default)
Returns
-------
port : int
a free port
"""
if ports is None:
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as _socket:
_socket.bind(('', 0))
_, port = _socket.getsockname()
return port
# Get ports from the specified list
for port in ports:
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as _socket:
try:
_socket.bind(('', port))
return port
except socket.error as ex:
if ex.errno not in (48, 98):
raise
raise RuntimeError("could not find a free port")
|
spotify/docker_interface | docker_interface/util.py | set_default_from_schema | python | def set_default_from_schema(instance, schema):
for name, property_ in schema.get('properties', {}).items():
# Set the defaults at this level of the schema
if 'default' in property_:
instance.setdefault(name, property_['default'])
# Descend one level if the property is an object
if 'properties' in property_:
set_default_from_schema(instance.setdefault(name, {}), property_)
return instance | Populate default values on an `instance` given a `schema`.
Parameters
----------
instance : dict
instance to populate default values for
schema : dict
JSON schema with default values
Returns
-------
instance : dict
instance with populated default values | train | https://github.com/spotify/docker_interface/blob/4df80e1fe072d958020080d32c16551ff7703d51/docker_interface/util.py#L221-L244 | [
"def set_default_from_schema(instance, schema):\n \"\"\"\n Populate default values on an `instance` given a `schema`.\n\n Parameters\n ----------\n instance : dict\n instance to populate default values for\n schema : dict\n JSON schema with default values\n\n Returns\n -------\n instance : dict\n instance with populated default values\n \"\"\"\n for name, property_ in schema.get('properties', {}).items():\n # Set the defaults at this level of the schema\n if 'default' in property_:\n instance.setdefault(name, property_['default'])\n # Descend one level if the property is an object\n if 'properties' in property_:\n set_default_from_schema(instance.setdefault(name, {}), property_)\n return instance\n"
] | # Copyright 2018 Spotify AB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
import socket
TYPES = {
'integer': int,
'string': str,
'number': float,
'boolean': bool,
'array': list,
}
def abspath(path, ref=None):
"""
Create an absolute path.
Parameters
----------
path : str
absolute or relative path with respect to `ref`
ref : str or None
reference path if `path` is relative
Returns
-------
path : str
absolute path
Raises
------
ValueError
if an absolute path cannot be constructed
"""
if ref:
path = os.path.join(ref, path)
if not os.path.isabs(path):
raise ValueError("expected an absolute path but got '%s'" % path)
return path
def split_path(path, ref=None):
"""
Split a path into its components.
Parameters
----------
path : str
absolute or relative path with respect to `ref`
ref : str or None
reference path if `path` is relative
Returns
-------
list : str
components of the path
"""
path = abspath(path, ref)
return path.strip(os.path.sep).split(os.path.sep)
def get_value(instance, path, ref=None):
"""
Get the value from `instance` at the given `path`.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
ref : str or None
reference path if `path` is relative
Returns
-------
value :
value at `path` in `instance`
Raises
------
KeyError
if `path` is not valid
TypeError
if a value along the `path` is not a list or dictionary
"""
for part in split_path(path, ref):
if isinstance(instance, list):
part = int(part)
elif not isinstance(instance, dict):
raise TypeError("expected `list` or `dict` but got `%s`" % instance)
try:
instance = instance[part]
except KeyError:
raise KeyError(abspath(path, ref))
return instance
def pop_value(instance, path, ref=None):
"""
Pop the value from `instance` at the given `path`.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
ref : str or None
reference path if `path` is relative
Returns
-------
value :
value at `path` in `instance`
"""
head, tail = os.path.split(abspath(path, ref))
instance = get_value(instance, head)
if isinstance(instance, list):
tail = int(tail)
return instance.pop(tail)
def set_value(instance, path, value, ref=None):
"""
Set `value` on `instance` at the given `path` and create missing intermediate objects.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
value :
value to set
ref : str or None
reference path if `path` is relative
"""
*head, tail = split_path(path, ref)
for part in head:
instance = instance.setdefault(part, {})
instance[tail] = value
def set_default(instance, path, value, ref=None):
"""
Set `value` on `instance` at the given `path` and create missing intermediate objects.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
value :
value to set
ref : str or None
reference path if `path` is relative
"""
*head, tail = split_path(path, ref)
for part in head:
instance = instance.setdefault(part, {})
return instance.setdefault(tail, value)
def merge(x, y):
"""
Merge two dictionaries and raise an error for inconsistencies.
Parameters
----------
x : dict
dictionary x
y : dict
dictionary y
Returns
-------
x : dict
merged dictionary
Raises
------
ValueError
if `x` and `y` are inconsistent
"""
keys_x = set(x)
keys_y = set(y)
for key in keys_y - keys_x:
x[key] = y[key]
for key in keys_x & keys_y:
value_x = x[key]
value_y = y[key]
if isinstance(value_x, dict) and isinstance(value_y, dict):
x[key] = merge(value_x, value_y)
else:
if value_x != value_y:
raise ValueError
return x
def apply(instance, func, path=None):
"""
Apply `func` to all fundamental types of `instance`.
Parameters
----------
instance : dict
instance to apply functions to
func : callable
function with two arguments (instance, path) to apply to all fundamental types recursively
path : str
path in the document (defaults to '/')
Returns
-------
instance : dict
instance after applying `func` to fundamental types
"""
path = path or os.path.sep
if isinstance(instance, list):
return [apply(item, func, os.path.join(path, str(i))) for i, item in enumerate(instance)]
elif isinstance(instance, dict):
return {key: apply(value, func, os.path.join(path, key)) for key, value in instance.items()}
return func(instance, path)
def get_free_port(ports=None):
"""
Get a free port.
Parameters
----------
ports : iterable
ports to check (obtain a random port by default)
Returns
-------
port : int
a free port
"""
if ports is None:
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as _socket:
_socket.bind(('', 0))
_, port = _socket.getsockname()
return port
# Get ports from the specified list
for port in ports:
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as _socket:
try:
_socket.bind(('', port))
return port
except socket.error as ex:
if ex.errno not in (48, 98):
raise
raise RuntimeError("could not find a free port")
|
spotify/docker_interface | docker_interface/util.py | apply | python | def apply(instance, func, path=None):
path = path or os.path.sep
if isinstance(instance, list):
return [apply(item, func, os.path.join(path, str(i))) for i, item in enumerate(instance)]
elif isinstance(instance, dict):
return {key: apply(value, func, os.path.join(path, key)) for key, value in instance.items()}
return func(instance, path) | Apply `func` to all fundamental types of `instance`.
Parameters
----------
instance : dict
instance to apply functions to
func : callable
function with two arguments (instance, path) to apply to all fundamental types recursively
path : str
path in the document (defaults to '/')
Returns
-------
instance : dict
instance after applying `func` to fundamental types | train | https://github.com/spotify/docker_interface/blob/4df80e1fe072d958020080d32c16551ff7703d51/docker_interface/util.py#L247-L270 | null | # Copyright 2018 Spotify AB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
import socket
TYPES = {
'integer': int,
'string': str,
'number': float,
'boolean': bool,
'array': list,
}
def abspath(path, ref=None):
"""
Create an absolute path.
Parameters
----------
path : str
absolute or relative path with respect to `ref`
ref : str or None
reference path if `path` is relative
Returns
-------
path : str
absolute path
Raises
------
ValueError
if an absolute path cannot be constructed
"""
if ref:
path = os.path.join(ref, path)
if not os.path.isabs(path):
raise ValueError("expected an absolute path but got '%s'" % path)
return path
def split_path(path, ref=None):
"""
Split a path into its components.
Parameters
----------
path : str
absolute or relative path with respect to `ref`
ref : str or None
reference path if `path` is relative
Returns
-------
list : str
components of the path
"""
path = abspath(path, ref)
return path.strip(os.path.sep).split(os.path.sep)
def get_value(instance, path, ref=None):
"""
Get the value from `instance` at the given `path`.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
ref : str or None
reference path if `path` is relative
Returns
-------
value :
value at `path` in `instance`
Raises
------
KeyError
if `path` is not valid
TypeError
if a value along the `path` is not a list or dictionary
"""
for part in split_path(path, ref):
if isinstance(instance, list):
part = int(part)
elif not isinstance(instance, dict):
raise TypeError("expected `list` or `dict` but got `%s`" % instance)
try:
instance = instance[part]
except KeyError:
raise KeyError(abspath(path, ref))
return instance
def pop_value(instance, path, ref=None):
"""
Pop the value from `instance` at the given `path`.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
ref : str or None
reference path if `path` is relative
Returns
-------
value :
value at `path` in `instance`
"""
head, tail = os.path.split(abspath(path, ref))
instance = get_value(instance, head)
if isinstance(instance, list):
tail = int(tail)
return instance.pop(tail)
def set_value(instance, path, value, ref=None):
"""
Set `value` on `instance` at the given `path` and create missing intermediate objects.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
value :
value to set
ref : str or None
reference path if `path` is relative
"""
*head, tail = split_path(path, ref)
for part in head:
instance = instance.setdefault(part, {})
instance[tail] = value
def set_default(instance, path, value, ref=None):
"""
Set `value` on `instance` at the given `path` and create missing intermediate objects.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
value :
value to set
ref : str or None
reference path if `path` is relative
"""
*head, tail = split_path(path, ref)
for part in head:
instance = instance.setdefault(part, {})
return instance.setdefault(tail, value)
def merge(x, y):
"""
Merge two dictionaries and raise an error for inconsistencies.
Parameters
----------
x : dict
dictionary x
y : dict
dictionary y
Returns
-------
x : dict
merged dictionary
Raises
------
ValueError
if `x` and `y` are inconsistent
"""
keys_x = set(x)
keys_y = set(y)
for key in keys_y - keys_x:
x[key] = y[key]
for key in keys_x & keys_y:
value_x = x[key]
value_y = y[key]
if isinstance(value_x, dict) and isinstance(value_y, dict):
x[key] = merge(value_x, value_y)
else:
if value_x != value_y:
raise ValueError
return x
def set_default_from_schema(instance, schema):
"""
Populate default values on an `instance` given a `schema`.
Parameters
----------
instance : dict
instance to populate default values for
schema : dict
JSON schema with default values
Returns
-------
instance : dict
instance with populated default values
"""
for name, property_ in schema.get('properties', {}).items():
# Set the defaults at this level of the schema
if 'default' in property_:
instance.setdefault(name, property_['default'])
# Descend one level if the property is an object
if 'properties' in property_:
set_default_from_schema(instance.setdefault(name, {}), property_)
return instance
def get_free_port(ports=None):
"""
Get a free port.
Parameters
----------
ports : iterable
ports to check (obtain a random port by default)
Returns
-------
port : int
a free port
"""
if ports is None:
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as _socket:
_socket.bind(('', 0))
_, port = _socket.getsockname()
return port
# Get ports from the specified list
for port in ports:
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as _socket:
try:
_socket.bind(('', port))
return port
except socket.error as ex:
if ex.errno not in (48, 98):
raise
raise RuntimeError("could not find a free port")
|
spotify/docker_interface | docker_interface/util.py | get_free_port | python | def get_free_port(ports=None):
if ports is None:
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as _socket:
_socket.bind(('', 0))
_, port = _socket.getsockname()
return port
# Get ports from the specified list
for port in ports:
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as _socket:
try:
_socket.bind(('', port))
return port
except socket.error as ex:
if ex.errno not in (48, 98):
raise
raise RuntimeError("could not find a free port") | Get a free port.
Parameters
----------
ports : iterable
ports to check (obtain a random port by default)
Returns
-------
port : int
a free port | train | https://github.com/spotify/docker_interface/blob/4df80e1fe072d958020080d32c16551ff7703d51/docker_interface/util.py#L273-L303 | null | # Copyright 2018 Spotify AB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
import socket
TYPES = {
'integer': int,
'string': str,
'number': float,
'boolean': bool,
'array': list,
}
def abspath(path, ref=None):
"""
Create an absolute path.
Parameters
----------
path : str
absolute or relative path with respect to `ref`
ref : str or None
reference path if `path` is relative
Returns
-------
path : str
absolute path
Raises
------
ValueError
if an absolute path cannot be constructed
"""
if ref:
path = os.path.join(ref, path)
if not os.path.isabs(path):
raise ValueError("expected an absolute path but got '%s'" % path)
return path
def split_path(path, ref=None):
"""
Split a path into its components.
Parameters
----------
path : str
absolute or relative path with respect to `ref`
ref : str or None
reference path if `path` is relative
Returns
-------
list : str
components of the path
"""
path = abspath(path, ref)
return path.strip(os.path.sep).split(os.path.sep)
def get_value(instance, path, ref=None):
"""
Get the value from `instance` at the given `path`.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
ref : str or None
reference path if `path` is relative
Returns
-------
value :
value at `path` in `instance`
Raises
------
KeyError
if `path` is not valid
TypeError
if a value along the `path` is not a list or dictionary
"""
for part in split_path(path, ref):
if isinstance(instance, list):
part = int(part)
elif not isinstance(instance, dict):
raise TypeError("expected `list` or `dict` but got `%s`" % instance)
try:
instance = instance[part]
except KeyError:
raise KeyError(abspath(path, ref))
return instance
def pop_value(instance, path, ref=None):
"""
Pop the value from `instance` at the given `path`.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
ref : str or None
reference path if `path` is relative
Returns
-------
value :
value at `path` in `instance`
"""
head, tail = os.path.split(abspath(path, ref))
instance = get_value(instance, head)
if isinstance(instance, list):
tail = int(tail)
return instance.pop(tail)
def set_value(instance, path, value, ref=None):
"""
Set `value` on `instance` at the given `path` and create missing intermediate objects.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
value :
value to set
ref : str or None
reference path if `path` is relative
"""
*head, tail = split_path(path, ref)
for part in head:
instance = instance.setdefault(part, {})
instance[tail] = value
def set_default(instance, path, value, ref=None):
"""
Set `value` on `instance` at the given `path` and create missing intermediate objects.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
value :
value to set
ref : str or None
reference path if `path` is relative
"""
*head, tail = split_path(path, ref)
for part in head:
instance = instance.setdefault(part, {})
return instance.setdefault(tail, value)
def merge(x, y):
"""
Merge two dictionaries and raise an error for inconsistencies.
Parameters
----------
x : dict
dictionary x
y : dict
dictionary y
Returns
-------
x : dict
merged dictionary
Raises
------
ValueError
if `x` and `y` are inconsistent
"""
keys_x = set(x)
keys_y = set(y)
for key in keys_y - keys_x:
x[key] = y[key]
for key in keys_x & keys_y:
value_x = x[key]
value_y = y[key]
if isinstance(value_x, dict) and isinstance(value_y, dict):
x[key] = merge(value_x, value_y)
else:
if value_x != value_y:
raise ValueError
return x
def set_default_from_schema(instance, schema):
"""
Populate default values on an `instance` given a `schema`.
Parameters
----------
instance : dict
instance to populate default values for
schema : dict
JSON schema with default values
Returns
-------
instance : dict
instance with populated default values
"""
for name, property_ in schema.get('properties', {}).items():
# Set the defaults at this level of the schema
if 'default' in property_:
instance.setdefault(name, property_['default'])
# Descend one level if the property is an object
if 'properties' in property_:
set_default_from_schema(instance.setdefault(name, {}), property_)
return instance
def apply(instance, func, path=None):
"""
Apply `func` to all fundamental types of `instance`.
Parameters
----------
instance : dict
instance to apply functions to
func : callable
function with two arguments (instance, path) to apply to all fundamental types recursively
path : str
path in the document (defaults to '/')
Returns
-------
instance : dict
instance after applying `func` to fundamental types
"""
path = path or os.path.sep
if isinstance(instance, list):
return [apply(item, func, os.path.join(path, str(i))) for i, item in enumerate(instance)]
elif isinstance(instance, dict):
return {key: apply(value, func, os.path.join(path, key)) for key, value in instance.items()}
return func(instance, path)
|
spotify/docker_interface | docker_interface/docker_interface.py | build_parameter_parts | python | def build_parameter_parts(configuration, *parameters):
for parameter in parameters:
values = configuration.pop(parameter, [])
if values:
if not isinstance(values, list):
values = [values]
for value in values:
yield '--%s=%s' % (parameter, value) | Construct command parts for one or more parameters.
Parameters
----------
configuration : dict
configuration
parameters : list
list of parameters to create command line arguments for
Yields
------
argument : str
command line argument | train | https://github.com/spotify/docker_interface/blob/4df80e1fe072d958020080d32c16551ff7703d51/docker_interface/docker_interface.py#L18-L40 | null | # Copyright 2018 Spotify AB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def build_dict_parameter_parts(configuration, *parameters, **defaults):
"""
Construct command parts for one or more parameters, each of which constitutes an assignment of
the form `key=value`.
Parameters
----------
configuration : dict
configuration
parameters : list
list of parameters to create command line arguments for
defaults : dict
default values to use if a parameter is missing
Yields
------
argument : str
command line argument
"""
for parameter in parameters:
for key, value in configuration.pop(parameter, {}).items():
yield '--%s=%s=%s' % (parameter, key, value)
def build_docker_run_command(configuration):
"""
Translate a declarative docker `configuration` to a `docker run` command.
Parameters
----------
configuration : dict
configuration
Returns
-------
args : list
sequence of command line arguments to run a command in a container
"""
parts = configuration.pop('docker').split()
parts.append('run')
run = configuration.pop('run')
# Ensure all env-files have proper paths
if 'env-file' in run:
run['env-file'] = [os.path.join(configuration['workspace'], env_file)
for env_file in run['env-file']]
parts.extend(build_parameter_parts(
run, 'user', 'workdir', 'rm', 'interactive', 'tty', 'env-file', 'cpu-shares', 'name',
'network', 'label', 'memory', 'entrypoint', 'runtime', 'privileged', 'group-add'
))
# Add the mounts
# The following code requires docker >= 17.06
'''for mount in run.pop('mount', []):
if mount['type'] == 'bind':
mount['source'] = os.path.join(
configuration['workspace'], mount['source'])
parts.extend(['--mount', ",".join(["%s=%s" % item for item in mount.items()])])'''
# Add the mounts
for mount in run.pop('mount', []):
if mount['type'] == 'tmpfs':
raise RuntimeError('tmpfs-mounts are currently not supported via the mount ' +
'directive in docker_interface. Consider using the tmpfs ' +
'directive instead.')
if mount['type'] == 'bind':
mount['source'] = os.path.abspath(
os.path.join(configuration['workspace'], mount['source']))
vol_config = '--volume=%s:%s' % (mount['source'], mount['destination'])
if 'readonly' in mount and mount['readonly']:
vol_config += ':ro'
parts.append(vol_config)
# Set or forward environment variables
for key, value in run.pop('env', {}).items():
if value is None:
parts.append('--env=%s' % key)
else:
parts.append('--env=%s=%s' % (key, value))
parts.append('--env=DOCKER_INTERFACE=true')
# Forward ports
for publish in run.pop('publish', []):
parts.append('--publish=%s:%s:%s' % tuple([
publish.get(key, '') for key in "ip host container".split()]))
# Add temporary file systems
for tmpfs in run.pop('tmpfs', []):
destination = tmpfs['destination']
options = tmpfs.pop('options', [])
for key in ['mode', 'size']:
if key in tmpfs:
options.append('%s=%s' % (key, tmpfs[key]))
if options:
destination = "%s:%s" % (destination, ",".join(options))
parts.extend(['--tmpfs', destination])
parts.append(run.pop('image'))
parts.extend(run.pop('cmd', []))
return parts
def build_docker_build_command(configuration):
"""
Translate a declarative docker `configuration` to a `docker build` command.
Parameters
----------
configuration : dict
configuration
Returns
-------
args : list
sequence of command line arguments to build an image
"""
parts = configuration.pop('docker', 'docker').split()
parts.append('build')
build = configuration.pop('build')
build['path'] = os.path.join(configuration['workspace'], build['path'])
build['file'] = os.path.join(build['path'], build['file'])
parts.extend(build_parameter_parts(
build, 'tag', 'file', 'no-cache', 'quiet', 'cpu-shares', 'memory'))
parts.extend(build_dict_parameter_parts(build, 'build-arg'))
parts.append(build.pop('path'))
return parts
|
spotify/docker_interface | docker_interface/docker_interface.py | build_dict_parameter_parts | python | def build_dict_parameter_parts(configuration, *parameters, **defaults):
for parameter in parameters:
for key, value in configuration.pop(parameter, {}).items():
yield '--%s=%s=%s' % (parameter, key, value) | Construct command parts for one or more parameters, each of which constitutes an assignment of
the form `key=value`.
Parameters
----------
configuration : dict
configuration
parameters : list
list of parameters to create command line arguments for
defaults : dict
default values to use if a parameter is missing
Yields
------
argument : str
command line argument | train | https://github.com/spotify/docker_interface/blob/4df80e1fe072d958020080d32c16551ff7703d51/docker_interface/docker_interface.py#L43-L64 | null | # Copyright 2018 Spotify AB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def build_parameter_parts(configuration, *parameters):
"""
Construct command parts for one or more parameters.
Parameters
----------
configuration : dict
configuration
parameters : list
list of parameters to create command line arguments for
Yields
------
argument : str
command line argument
"""
for parameter in parameters:
values = configuration.pop(parameter, [])
if values:
if not isinstance(values, list):
values = [values]
for value in values:
yield '--%s=%s' % (parameter, value)
def build_docker_run_command(configuration):
"""
Translate a declarative docker `configuration` to a `docker run` command.
Parameters
----------
configuration : dict
configuration
Returns
-------
args : list
sequence of command line arguments to run a command in a container
"""
parts = configuration.pop('docker').split()
parts.append('run')
run = configuration.pop('run')
# Ensure all env-files have proper paths
if 'env-file' in run:
run['env-file'] = [os.path.join(configuration['workspace'], env_file)
for env_file in run['env-file']]
parts.extend(build_parameter_parts(
run, 'user', 'workdir', 'rm', 'interactive', 'tty', 'env-file', 'cpu-shares', 'name',
'network', 'label', 'memory', 'entrypoint', 'runtime', 'privileged', 'group-add'
))
# Add the mounts
# The following code requires docker >= 17.06
'''for mount in run.pop('mount', []):
if mount['type'] == 'bind':
mount['source'] = os.path.join(
configuration['workspace'], mount['source'])
parts.extend(['--mount', ",".join(["%s=%s" % item for item in mount.items()])])'''
# Add the mounts
for mount in run.pop('mount', []):
if mount['type'] == 'tmpfs':
raise RuntimeError('tmpfs-mounts are currently not supported via the mount ' +
'directive in docker_interface. Consider using the tmpfs ' +
'directive instead.')
if mount['type'] == 'bind':
mount['source'] = os.path.abspath(
os.path.join(configuration['workspace'], mount['source']))
vol_config = '--volume=%s:%s' % (mount['source'], mount['destination'])
if 'readonly' in mount and mount['readonly']:
vol_config += ':ro'
parts.append(vol_config)
# Set or forward environment variables
for key, value in run.pop('env', {}).items():
if value is None:
parts.append('--env=%s' % key)
else:
parts.append('--env=%s=%s' % (key, value))
parts.append('--env=DOCKER_INTERFACE=true')
# Forward ports
for publish in run.pop('publish', []):
parts.append('--publish=%s:%s:%s' % tuple([
publish.get(key, '') for key in "ip host container".split()]))
# Add temporary file systems
for tmpfs in run.pop('tmpfs', []):
destination = tmpfs['destination']
options = tmpfs.pop('options', [])
for key in ['mode', 'size']:
if key in tmpfs:
options.append('%s=%s' % (key, tmpfs[key]))
if options:
destination = "%s:%s" % (destination, ",".join(options))
parts.extend(['--tmpfs', destination])
parts.append(run.pop('image'))
parts.extend(run.pop('cmd', []))
return parts
def build_docker_build_command(configuration):
"""
Translate a declarative docker `configuration` to a `docker build` command.
Parameters
----------
configuration : dict
configuration
Returns
-------
args : list
sequence of command line arguments to build an image
"""
parts = configuration.pop('docker', 'docker').split()
parts.append('build')
build = configuration.pop('build')
build['path'] = os.path.join(configuration['workspace'], build['path'])
build['file'] = os.path.join(build['path'], build['file'])
parts.extend(build_parameter_parts(
build, 'tag', 'file', 'no-cache', 'quiet', 'cpu-shares', 'memory'))
parts.extend(build_dict_parameter_parts(build, 'build-arg'))
parts.append(build.pop('path'))
return parts
|
spotify/docker_interface | docker_interface/docker_interface.py | build_docker_run_command | python | def build_docker_run_command(configuration):
parts = configuration.pop('docker').split()
parts.append('run')
run = configuration.pop('run')
# Ensure all env-files have proper paths
if 'env-file' in run:
run['env-file'] = [os.path.join(configuration['workspace'], env_file)
for env_file in run['env-file']]
parts.extend(build_parameter_parts(
run, 'user', 'workdir', 'rm', 'interactive', 'tty', 'env-file', 'cpu-shares', 'name',
'network', 'label', 'memory', 'entrypoint', 'runtime', 'privileged', 'group-add'
))
# Add the mounts
# The following code requires docker >= 17.06
'''for mount in run.pop('mount', []):
if mount['type'] == 'bind':
mount['source'] = os.path.join(
configuration['workspace'], mount['source'])
parts.extend(['--mount', ",".join(["%s=%s" % item for item in mount.items()])])'''
# Add the mounts
for mount in run.pop('mount', []):
if mount['type'] == 'tmpfs':
raise RuntimeError('tmpfs-mounts are currently not supported via the mount ' +
'directive in docker_interface. Consider using the tmpfs ' +
'directive instead.')
if mount['type'] == 'bind':
mount['source'] = os.path.abspath(
os.path.join(configuration['workspace'], mount['source']))
vol_config = '--volume=%s:%s' % (mount['source'], mount['destination'])
if 'readonly' in mount and mount['readonly']:
vol_config += ':ro'
parts.append(vol_config)
# Set or forward environment variables
for key, value in run.pop('env', {}).items():
if value is None:
parts.append('--env=%s' % key)
else:
parts.append('--env=%s=%s' % (key, value))
parts.append('--env=DOCKER_INTERFACE=true')
# Forward ports
for publish in run.pop('publish', []):
parts.append('--publish=%s:%s:%s' % tuple([
publish.get(key, '') for key in "ip host container".split()]))
# Add temporary file systems
for tmpfs in run.pop('tmpfs', []):
destination = tmpfs['destination']
options = tmpfs.pop('options', [])
for key in ['mode', 'size']:
if key in tmpfs:
options.append('%s=%s' % (key, tmpfs[key]))
if options:
destination = "%s:%s" % (destination, ",".join(options))
parts.extend(['--tmpfs', destination])
parts.append(run.pop('image'))
parts.extend(run.pop('cmd', []))
return parts | Translate a declarative docker `configuration` to a `docker run` command.
Parameters
----------
configuration : dict
configuration
Returns
-------
args : list
sequence of command line arguments to run a command in a container | train | https://github.com/spotify/docker_interface/blob/4df80e1fe072d958020080d32c16551ff7703d51/docker_interface/docker_interface.py#L67-L145 | [
"def build_parameter_parts(configuration, *parameters):\n \"\"\"\n Construct command parts for one or more parameters.\n\n Parameters\n ----------\n configuration : dict\n configuration\n parameters : list\n list of parameters to create command line arguments for\n\n Yields\n ------\n argument : str\n command line argument\n \"\"\"\n for parameter in parameters:\n values = configuration.pop(parameter, [])\n if values:\n if not isinstance(values, list):\n values = [values]\n for value in values:\n yield '--%s=%s' % (parameter, value)\n"
] | # Copyright 2018 Spotify AB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def build_parameter_parts(configuration, *parameters):
"""
Construct command parts for one or more parameters.
Parameters
----------
configuration : dict
configuration
parameters : list
list of parameters to create command line arguments for
Yields
------
argument : str
command line argument
"""
for parameter in parameters:
values = configuration.pop(parameter, [])
if values:
if not isinstance(values, list):
values = [values]
for value in values:
yield '--%s=%s' % (parameter, value)
def build_dict_parameter_parts(configuration, *parameters, **defaults):
"""
Construct command parts for one or more parameters, each of which constitutes an assignment of
the form `key=value`.
Parameters
----------
configuration : dict
configuration
parameters : list
list of parameters to create command line arguments for
defaults : dict
default values to use if a parameter is missing
Yields
------
argument : str
command line argument
"""
for parameter in parameters:
for key, value in configuration.pop(parameter, {}).items():
yield '--%s=%s=%s' % (parameter, key, value)
def build_docker_build_command(configuration):
"""
Translate a declarative docker `configuration` to a `docker build` command.
Parameters
----------
configuration : dict
configuration
Returns
-------
args : list
sequence of command line arguments to build an image
"""
parts = configuration.pop('docker', 'docker').split()
parts.append('build')
build = configuration.pop('build')
build['path'] = os.path.join(configuration['workspace'], build['path'])
build['file'] = os.path.join(build['path'], build['file'])
parts.extend(build_parameter_parts(
build, 'tag', 'file', 'no-cache', 'quiet', 'cpu-shares', 'memory'))
parts.extend(build_dict_parameter_parts(build, 'build-arg'))
parts.append(build.pop('path'))
return parts
|
spotify/docker_interface | docker_interface/docker_interface.py | build_docker_build_command | python | def build_docker_build_command(configuration):
parts = configuration.pop('docker', 'docker').split()
parts.append('build')
build = configuration.pop('build')
build['path'] = os.path.join(configuration['workspace'], build['path'])
build['file'] = os.path.join(build['path'], build['file'])
parts.extend(build_parameter_parts(
build, 'tag', 'file', 'no-cache', 'quiet', 'cpu-shares', 'memory'))
parts.extend(build_dict_parameter_parts(build, 'build-arg'))
parts.append(build.pop('path'))
return parts | Translate a declarative docker `configuration` to a `docker build` command.
Parameters
----------
configuration : dict
configuration
Returns
-------
args : list
sequence of command line arguments to build an image | train | https://github.com/spotify/docker_interface/blob/4df80e1fe072d958020080d32c16551ff7703d51/docker_interface/docker_interface.py#L148-L176 | [
"def build_parameter_parts(configuration, *parameters):\n \"\"\"\n Construct command parts for one or more parameters.\n\n Parameters\n ----------\n configuration : dict\n configuration\n parameters : list\n list of parameters to create command line arguments for\n\n Yields\n ------\n argument : str\n command line argument\n \"\"\"\n for parameter in parameters:\n values = configuration.pop(parameter, [])\n if values:\n if not isinstance(values, list):\n values = [values]\n for value in values:\n yield '--%s=%s' % (parameter, value)\n",
"def build_dict_parameter_parts(configuration, *parameters, **defaults):\n \"\"\"\n Construct command parts for one or more parameters, each of which constitutes an assignment of\n the form `key=value`.\n\n Parameters\n ----------\n configuration : dict\n configuration\n parameters : list\n list of parameters to create command line arguments for\n defaults : dict\n default values to use if a parameter is missing\n\n Yields\n ------\n argument : str\n command line argument\n \"\"\"\n for parameter in parameters:\n for key, value in configuration.pop(parameter, {}).items():\n yield '--%s=%s=%s' % (parameter, key, value)\n"
] | # Copyright 2018 Spotify AB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def build_parameter_parts(configuration, *parameters):
"""
Construct command parts for one or more parameters.
Parameters
----------
configuration : dict
configuration
parameters : list
list of parameters to create command line arguments for
Yields
------
argument : str
command line argument
"""
for parameter in parameters:
values = configuration.pop(parameter, [])
if values:
if not isinstance(values, list):
values = [values]
for value in values:
yield '--%s=%s' % (parameter, value)
def build_dict_parameter_parts(configuration, *parameters, **defaults):
"""
Construct command parts for one or more parameters, each of which constitutes an assignment of
the form `key=value`.
Parameters
----------
configuration : dict
configuration
parameters : list
list of parameters to create command line arguments for
defaults : dict
default values to use if a parameter is missing
Yields
------
argument : str
command line argument
"""
for parameter in parameters:
for key, value in configuration.pop(parameter, {}).items():
yield '--%s=%s=%s' % (parameter, key, value)
def build_docker_run_command(configuration):
"""
Translate a declarative docker `configuration` to a `docker run` command.
Parameters
----------
configuration : dict
configuration
Returns
-------
args : list
sequence of command line arguments to run a command in a container
"""
parts = configuration.pop('docker').split()
parts.append('run')
run = configuration.pop('run')
# Ensure all env-files have proper paths
if 'env-file' in run:
run['env-file'] = [os.path.join(configuration['workspace'], env_file)
for env_file in run['env-file']]
parts.extend(build_parameter_parts(
run, 'user', 'workdir', 'rm', 'interactive', 'tty', 'env-file', 'cpu-shares', 'name',
'network', 'label', 'memory', 'entrypoint', 'runtime', 'privileged', 'group-add'
))
# Add the mounts
# The following code requires docker >= 17.06
'''for mount in run.pop('mount', []):
if mount['type'] == 'bind':
mount['source'] = os.path.join(
configuration['workspace'], mount['source'])
parts.extend(['--mount', ",".join(["%s=%s" % item for item in mount.items()])])'''
# Add the mounts
for mount in run.pop('mount', []):
if mount['type'] == 'tmpfs':
raise RuntimeError('tmpfs-mounts are currently not supported via the mount ' +
'directive in docker_interface. Consider using the tmpfs ' +
'directive instead.')
if mount['type'] == 'bind':
mount['source'] = os.path.abspath(
os.path.join(configuration['workspace'], mount['source']))
vol_config = '--volume=%s:%s' % (mount['source'], mount['destination'])
if 'readonly' in mount and mount['readonly']:
vol_config += ':ro'
parts.append(vol_config)
# Set or forward environment variables
for key, value in run.pop('env', {}).items():
if value is None:
parts.append('--env=%s' % key)
else:
parts.append('--env=%s=%s' % (key, value))
parts.append('--env=DOCKER_INTERFACE=true')
# Forward ports
for publish in run.pop('publish', []):
parts.append('--publish=%s:%s:%s' % tuple([
publish.get(key, '') for key in "ip host container".split()]))
# Add temporary file systems
for tmpfs in run.pop('tmpfs', []):
destination = tmpfs['destination']
options = tmpfs.pop('options', [])
for key in ['mode', 'size']:
if key in tmpfs:
options.append('%s=%s' % (key, tmpfs[key]))
if options:
destination = "%s:%s" % (destination, ",".join(options))
parts.extend(['--tmpfs', destination])
parts.append(run.pop('image'))
parts.extend(run.pop('cmd', []))
return parts
|
spotify/docker_interface | docker_interface/plugins/base.py | Plugin.add_argument | python | def add_argument(self, parser, path, name=None, schema=None, **kwargs):
schema = schema or self.SCHEMA
name = name or ('--%s' % os.path.basename(path))
self.arguments[name.strip('-')] = path
# Build a path to the help in the schema
path = util.split_path(path)
path = os.path.sep.join(
it.chain([os.path.sep], *zip(it.repeat("properties"), path)))
property_ = util.get_value(schema, path)
defaults = {
'choices': property_.get('enum'),
'help': property_.get('description')
}
if 'type' in property_:
defaults['type'] = util.TYPES[property_['type']]
defaults.update(kwargs)
return parser.add_argument(name, **defaults) | Add an argument to the `parser` based on a schema definition.
Parameters
----------
parser : argparse.ArgumentParser
parser to add an argument to
path : str
path in the configuration document to add an argument for
name : str or None
name of the command line parameter (defaults to the name in the schema)
schema : dict
JSON schema definition (defaults to the schema of the plugin)
Returns
-------
arg :
command line argument definition | train | https://github.com/spotify/docker_interface/blob/4df80e1fe072d958020080d32c16551ff7703d51/docker_interface/plugins/base.py#L42-L77 | [
"def split_path(path, ref=None):\n \"\"\"\n Split a path into its components.\n\n Parameters\n ----------\n path : str\n absolute or relative path with respect to `ref`\n ref : str or None\n reference path if `path` is relative\n\n Returns\n -------\n list : str\n components of the path\n \"\"\"\n path = abspath(path, ref)\n return path.strip(os.path.sep).split(os.path.sep)\n",
"def get_value(instance, path, ref=None):\n \"\"\"\n Get the value from `instance` at the given `path`.\n\n Parameters\n ----------\n instance : dict or list\n instance from which to retrieve a value\n path : str\n path to retrieve a value from\n ref : str or None\n reference path if `path` is relative\n\n Returns\n -------\n value :\n value at `path` in `instance`\n\n Raises\n ------\n KeyError\n if `path` is not valid\n TypeError\n if a value along the `path` is not a list or dictionary\n \"\"\"\n for part in split_path(path, ref):\n if isinstance(instance, list):\n part = int(part)\n elif not isinstance(instance, dict):\n raise TypeError(\"expected `list` or `dict` but got `%s`\" % instance)\n try:\n instance = instance[part]\n except KeyError:\n raise KeyError(abspath(path, ref))\n return instance\n"
] | class Plugin:
"""
Abstract base class for plugins.
"""
ENABLED = True
SCHEMA = {}
ORDER = None
COMMANDS = None
def __init__(self):
self.logger = logging.getLogger(self.__class__.__name__)
self.arguments = {}
def add_arguments(self, parser):
"""
Add arguments to the parser.
Inheriting plugins should implement this method to add parameters to the command line
parser.
Parameters
----------
parser : argparse.ArgumentParser
parser to add arguments to
"""
pass
def apply(self, configuration, schema, args):
"""
Apply the plugin to the configuration.
Inheriting plugins should implement this method to add additional functionality.
Parameters
----------
configuration : dict
configuration
schema : dict
JSON schema
args : argparse.NameSpace
parsed command line arguments
Returns
-------
configuration : dict
updated configuration after applying the plugin
"""
# Set values from the command line
for name, path in self.arguments.items():
value = getattr(args, name.replace('-', '_'))
if value is not None:
util.set_value(configuration, path, value)
return configuration
@staticmethod
def load_plugins():
"""
Load all availabe plugins.
Returns
-------
plugin_cls : dict
mapping from plugin names to plugin classes
"""
plugin_cls = {}
for entry_point in pkg_resources.iter_entry_points('docker_interface.plugins'):
cls = entry_point.load()
assert cls.COMMANDS is not None, \
"plugin '%s' does not define its commands" % entry_point.name
assert cls.ORDER is not None, \
"plugin '%s' does not define its priority" % entry_point.name
plugin_cls[entry_point.name] = cls
return plugin_cls
def cleanup(self):
"""
Tear down the plugin and clean up any resources used.
Inheriting plugins should implement this method to add additional functionality.
"""
pass
|
spotify/docker_interface | docker_interface/plugins/base.py | Plugin.apply | python | def apply(self, configuration, schema, args):
# Set values from the command line
for name, path in self.arguments.items():
value = getattr(args, name.replace('-', '_'))
if value is not None:
util.set_value(configuration, path, value)
return configuration | Apply the plugin to the configuration.
Inheriting plugins should implement this method to add additional functionality.
Parameters
----------
configuration : dict
configuration
schema : dict
JSON schema
args : argparse.NameSpace
parsed command line arguments
Returns
-------
configuration : dict
updated configuration after applying the plugin | train | https://github.com/spotify/docker_interface/blob/4df80e1fe072d958020080d32c16551ff7703d51/docker_interface/plugins/base.py#L93-L119 | [
"def set_value(instance, path, value, ref=None):\n \"\"\"\n Set `value` on `instance` at the given `path` and create missing intermediate objects.\n\n Parameters\n ----------\n instance : dict or list\n instance from which to retrieve a value\n path : str\n path to retrieve a value from\n value :\n value to set\n ref : str or None\n reference path if `path` is relative\n \"\"\"\n *head, tail = split_path(path, ref)\n for part in head:\n instance = instance.setdefault(part, {})\n instance[tail] = value\n"
] | class Plugin:
"""
Abstract base class for plugins.
"""
ENABLED = True
SCHEMA = {}
ORDER = None
COMMANDS = None
def __init__(self):
self.logger = logging.getLogger(self.__class__.__name__)
self.arguments = {}
def add_argument(self, parser, path, name=None, schema=None, **kwargs):
"""
Add an argument to the `parser` based on a schema definition.
Parameters
----------
parser : argparse.ArgumentParser
parser to add an argument to
path : str
path in the configuration document to add an argument for
name : str or None
name of the command line parameter (defaults to the name in the schema)
schema : dict
JSON schema definition (defaults to the schema of the plugin)
Returns
-------
arg :
command line argument definition
"""
schema = schema or self.SCHEMA
name = name or ('--%s' % os.path.basename(path))
self.arguments[name.strip('-')] = path
# Build a path to the help in the schema
path = util.split_path(path)
path = os.path.sep.join(
it.chain([os.path.sep], *zip(it.repeat("properties"), path)))
property_ = util.get_value(schema, path)
defaults = {
'choices': property_.get('enum'),
'help': property_.get('description')
}
if 'type' in property_:
defaults['type'] = util.TYPES[property_['type']]
defaults.update(kwargs)
return parser.add_argument(name, **defaults)
def add_arguments(self, parser):
"""
Add arguments to the parser.
Inheriting plugins should implement this method to add parameters to the command line
parser.
Parameters
----------
parser : argparse.ArgumentParser
parser to add arguments to
"""
pass
@staticmethod
def load_plugins():
"""
Load all availabe plugins.
Returns
-------
plugin_cls : dict
mapping from plugin names to plugin classes
"""
plugin_cls = {}
for entry_point in pkg_resources.iter_entry_points('docker_interface.plugins'):
cls = entry_point.load()
assert cls.COMMANDS is not None, \
"plugin '%s' does not define its commands" % entry_point.name
assert cls.ORDER is not None, \
"plugin '%s' does not define its priority" % entry_point.name
plugin_cls[entry_point.name] = cls
return plugin_cls
def cleanup(self):
"""
Tear down the plugin and clean up any resources used.
Inheriting plugins should implement this method to add additional functionality.
"""
pass
|
spotify/docker_interface | docker_interface/plugins/base.py | Plugin.load_plugins | python | def load_plugins():
plugin_cls = {}
for entry_point in pkg_resources.iter_entry_points('docker_interface.plugins'):
cls = entry_point.load()
assert cls.COMMANDS is not None, \
"plugin '%s' does not define its commands" % entry_point.name
assert cls.ORDER is not None, \
"plugin '%s' does not define its priority" % entry_point.name
plugin_cls[entry_point.name] = cls
return plugin_cls | Load all availabe plugins.
Returns
-------
plugin_cls : dict
mapping from plugin names to plugin classes | train | https://github.com/spotify/docker_interface/blob/4df80e1fe072d958020080d32c16551ff7703d51/docker_interface/plugins/base.py#L122-L139 | null | class Plugin:
"""
Abstract base class for plugins.
"""
ENABLED = True
SCHEMA = {}
ORDER = None
COMMANDS = None
def __init__(self):
self.logger = logging.getLogger(self.__class__.__name__)
self.arguments = {}
def add_argument(self, parser, path, name=None, schema=None, **kwargs):
"""
Add an argument to the `parser` based on a schema definition.
Parameters
----------
parser : argparse.ArgumentParser
parser to add an argument to
path : str
path in the configuration document to add an argument for
name : str or None
name of the command line parameter (defaults to the name in the schema)
schema : dict
JSON schema definition (defaults to the schema of the plugin)
Returns
-------
arg :
command line argument definition
"""
schema = schema or self.SCHEMA
name = name or ('--%s' % os.path.basename(path))
self.arguments[name.strip('-')] = path
# Build a path to the help in the schema
path = util.split_path(path)
path = os.path.sep.join(
it.chain([os.path.sep], *zip(it.repeat("properties"), path)))
property_ = util.get_value(schema, path)
defaults = {
'choices': property_.get('enum'),
'help': property_.get('description')
}
if 'type' in property_:
defaults['type'] = util.TYPES[property_['type']]
defaults.update(kwargs)
return parser.add_argument(name, **defaults)
def add_arguments(self, parser):
"""
Add arguments to the parser.
Inheriting plugins should implement this method to add parameters to the command line
parser.
Parameters
----------
parser : argparse.ArgumentParser
parser to add arguments to
"""
pass
def apply(self, configuration, schema, args):
"""
Apply the plugin to the configuration.
Inheriting plugins should implement this method to add additional functionality.
Parameters
----------
configuration : dict
configuration
schema : dict
JSON schema
args : argparse.NameSpace
parsed command line arguments
Returns
-------
configuration : dict
updated configuration after applying the plugin
"""
# Set values from the command line
for name, path in self.arguments.items():
value = getattr(args, name.replace('-', '_'))
if value is not None:
util.set_value(configuration, path, value)
return configuration
@staticmethod
def cleanup(self):
"""
Tear down the plugin and clean up any resources used.
Inheriting plugins should implement this method to add additional functionality.
"""
pass
|
spotify/docker_interface | docker_interface/plugins/base.py | ExecutePlugin.execute_command | python | def execute_command(self, parts, dry_run):
if dry_run:
self.logger.info("dry-run command '%s'", " ".join(map(str, parts)))
return 0
else: # pragma: no cover
self.logger.debug("executing command '%s'", " ".join(map(str, parts)))
status_code = os.spawnvpe(os.P_WAIT, parts[0], parts, os.environ)
if status_code:
self.logger.warning("command '%s' returned status code %d",
" ".join(map(str, parts)), status_code)
return status_code | Execute a command.
Parameters
----------
parts : list
Sequence of strings constituting a command.
dry_run : bool
Whether to just log the command instead of executing it.
Returns
-------
status : int
Status code of the executed command or 0 if `dry_run` is `True`. | train | https://github.com/spotify/docker_interface/blob/4df80e1fe072d958020080d32c16551ff7703d51/docker_interface/plugins/base.py#L199-L224 | null | class ExecutePlugin(Plugin):
"""
Base class for plugins that execute shell commands.
Inheriting classes should define the method :code:`build_command` which takes a configuration
document as its only argument.
"""
def build_command(self, configuration):
"""
Construct a command and return its parts.
Parameters
----------
configuration : dict
configuration
Returns
-------
args : list
sequence of command line arguments
"""
raise NotImplementedError
def apply(self, configuration, schema, args):
super(ExecutePlugin, self).apply(configuration, schema, args)
parts = self.build_command(configuration)
if parts:
configuration['status-code'] = self.execute_command(parts, configuration['dry-run'])
else:
configuration['status-code'] = 0
return configuration
|
spotify/docker_interface | docker_interface/plugins/base.py | SubstitutionPlugin.substitute_variables | python | def substitute_variables(cls, configuration, value, ref):
if isinstance(value, str):
# Substitute all intra-document references
while True:
match = cls.REF_PATTERN.search(value)
if match is None:
break
path = os.path.join(os.path.dirname(ref), match.group('path'))
try:
value = value.replace(
match.group(0), str(util.get_value(configuration, path)))
except KeyError:
raise KeyError(path)
# Substitute all variable references
while True:
match = cls.VAR_PATTERN.search(value)
if match is None:
break
value = value.replace(
match.group(0),
str(util.get_value(cls.VARIABLES, match.group('path'), '/')))
return value | Substitute variables in `value` from `configuration` where any path reference is relative to
`ref`.
Parameters
----------
configuration : dict
configuration (required to resolve intra-document references)
value :
value to resolve substitutions for
ref : str
path to `value` in the `configuration`
Returns
-------
value :
value after substitution | train | https://github.com/spotify/docker_interface/blob/4df80e1fe072d958020080d32c16551ff7703d51/docker_interface/plugins/base.py#L347-L387 | [
"def get_value(instance, path, ref=None):\n \"\"\"\n Get the value from `instance` at the given `path`.\n\n Parameters\n ----------\n instance : dict or list\n instance from which to retrieve a value\n path : str\n path to retrieve a value from\n ref : str or None\n reference path if `path` is relative\n\n Returns\n -------\n value :\n value at `path` in `instance`\n\n Raises\n ------\n KeyError\n if `path` is not valid\n TypeError\n if a value along the `path` is not a list or dictionary\n \"\"\"\n for part in split_path(path, ref):\n if isinstance(instance, list):\n part = int(part)\n elif not isinstance(instance, dict):\n raise TypeError(\"expected `list` or `dict` but got `%s`\" % instance)\n try:\n instance = instance[part]\n except KeyError:\n raise KeyError(abspath(path, ref))\n return instance\n"
] | class SubstitutionPlugin(Plugin):
"""
Substitute variables in strings.
String values in the configuration document may
* reference other parts of the configuration document using :code:`#{path}`, where :code:`path`
may be an absolute or relative path in the document.
* reference a variable using :code:`${path}`, where :code:`path` is assumed to be an absolute
path in the :code:`VARIABLES` class attribute of the plugin.
By default, the plugin provides environment variables using the :code:`env` prefix. For example,
a value could reference the user name on the host using :code:`${env/USER}`. Other plugins can
provide variables for substitution by extending the :code:`VARIABLES` class attribute and should
do so using a unique prefix.
"""
REF_PATTERN = re.compile(r'#\{(?P<path>.*?)\}')
VAR_PATTERN = re.compile(r'\$\{(?P<path>.*?)\}')
COMMANDS = 'all'
ORDER = 980
VARIABLES = {
'env': dict(os.environ)
}
@classmethod
def apply(self, configuration, schema, args):
super(SubstitutionPlugin, self).apply(configuration, schema, args)
return util.apply(configuration, ft.partial(self.substitute_variables, configuration))
|
spotify/docker_interface | docker_interface/plugins/user.py | UserPlugin.get_user_group | python | def get_user_group(self, user=None, group=None):
user = user or os.getuid()
# Convert the information we have obtained to a user object
try:
try:
user = pwd.getpwuid(int(user))
except ValueError:
user = pwd.getpwnam(user)
except KeyError as ex: # pragma: no cover
self.logger.fatal("could not resolve user: %s", ex)
raise
# Get the group
group = group or user.pw_gid
try:
try:
group = grp.getgrgid(int(group))
except ValueError:
group = grp.getgrnam(group)
except KeyError as ex: # pragma: no cover
self.logger.fatal("could not resolve group:%s", ex)
raise
return user, group | Get the user and group information.
Parameters
----------
user : str
User name or user id (default is the `os.getuid()`).
group : str
Group name or group id (default is the group of `user`).
Returns
-------
user : pwd.struct_passwd
User object.
group : grp.struct_group
Group object. | train | https://github.com/spotify/docker_interface/blob/4df80e1fe072d958020080d32c16551ff7703d51/docker_interface/plugins/user.py#L60-L100 | null | class UserPlugin(Plugin):
"""
Share the host user id and group id with the container.
The plugin provides the following additional variables for substitution:
* :code:`user/name`: Name of the user on the host.
* :code:`user/uid`: User id of the user on the host.
* :code:`group/name`: Name of the user group on the host.
* :code:`group/gid`: Group id of the user group on the host.
"""
COMMANDS = ['run']
ORDER = 510
SCHEMA = {
"properties": {
"run": {
"properties": {
"user": util.get_value(
RunConfigurationPlugin.SCHEMA, '/properties/run/properties/user')
},
"additionalProperties": False
}
},
"additionalProperties": False
}
def __init__(self):
super(UserPlugin, self).__init__()
self.tempdir = None
def add_arguments(self, parser):
self.add_argument(parser, '/run/user')
def apply(self, configuration, schema, args):
# Do not call the super class because we want to do something more sophisticated with the
# arguments
user, group = self.get_user_group(*(args.user or '').split(':'))
SubstitutionPlugin.VARIABLES['user'] = {
'uid': user.pw_uid,
'name': user.pw_name,
}
SubstitutionPlugin.VARIABLES['group'] = {
'gid': group.gr_gid,
'name': group.gr_name,
}
util.set_value(configuration, '/run/user', "${user/uid}:${group/gid}")
# Create a temporary directory and copy the group and passwd files
if configuration['dry-run']:
self.logger.warning("cannot mount /etc/passwd and /etc/groups during dry-run")
else:
self.tempdir = tempfile.TemporaryDirectory(dir='/tmp')
name = uuid.uuid4().hex
# Create a docker image
image = util.get_value(configuration, '/run/image')
image = SubstitutionPlugin.substitute_variables(configuration, image, '/run')
status = subprocess.call([configuration['docker'], 'create', '--name', name, image, 'sh'])
if status:
raise RuntimeError(
"Could not create container from image '%s'. Did you run `di build`?" % image)
# Copy out the passwd and group files, mount them, and append the necessary information
for filename in ['passwd', 'group']:
path = os.path.join(self.tempdir.name, filename)
subprocess.check_call([
configuration['docker'], 'cp', '%s:/etc/%s' % (name, filename), path])
util.set_default(configuration, '/run/mount', []).append({
'type': 'bind',
'source': path,
'destination': '/etc/%s' % filename
})
with open(path, 'a') as fp:
variables = {
'user': user.pw_name,
'uid': user.pw_uid,
'group': group.gr_name,
'gid': group.gr_gid
}
if filename == 'passwd':
line = "%(user)s:x:%(uid)d:%(gid)d:%(user)s:/%(user)s:/bin/sh\n" % variables
else:
line = "%(group)s:x:%(gid)d:%(user)s\n" % variables
fp.write(line)
assert os.path.isfile(path)
# Destroy the container
subprocess.check_call(['docker', 'rm', name])
return configuration
def cleanup(self):
if self.tempdir:
self.tempdir.cleanup()
|
spotify/docker_interface | docker_interface/cli.py | entry_point | python | def entry_point(args=None, configuration=None):
# Parse basic information
parser = argparse.ArgumentParser('di')
base = BasePlugin()
base.add_arguments(parser)
args, remainder = parser.parse_known_args(args)
command = args.command
configuration = base.apply(configuration, None, args)
logger = logging.getLogger('di')
# Load all plugins and en/disable as desired
plugin_cls = Plugin.load_plugins()
plugins = configuration.get('plugins')
if isinstance(plugins, list):
plugins = [plugin_cls[name.lower()] for name in plugins]
else:
# Disable and enable specific plugins
if isinstance(plugins, dict):
try:
for name in plugins.get('enable', []):
plugin_cls[name.lower()].ENABLED = True
for name in plugins.get('disable', []):
plugin_cls[name.lower()].ENABLED = False
except KeyError as ex: # pragma: no cover
logger.fatal("could not resolve plugin %s. Available plugins: %s",
ex, ", ".join(plugin_cls))
raise SystemExit(2)
elif plugins is not None: # pragma: no cover
logger.fatal("'plugins' must be a `list`, `dict`, or `None` but got `%s`",
type(plugins))
raise SystemExit(2)
# Restrict plugins to enabled ones
plugins = list(sorted([cls() for cls in plugin_cls.values() if cls.ENABLED],
key=lambda x: x.ORDER))
# Construct the schema
schema = base.SCHEMA
for cls in plugin_cls.values():
schema = util.merge(schema, cls.SCHEMA)
# Ensure that the plugins are relevant to the command
plugins = [plugin for plugin in plugins
if plugin.COMMANDS == 'all' or command in plugin.COMMANDS]
parser = argparse.ArgumentParser('di %s' % command)
for plugin in plugins:
plugin.add_arguments(parser)
args = parser.parse_args(remainder)
# Apply defaults
util.set_default_from_schema(configuration, schema)
# Apply all the plugins in order
status_code = 0
logger.debug("configuration:\n%s", json.dumps(configuration, indent=4))
for plugin in plugins:
logger.debug("applying plugin '%s'", plugin)
try:
configuration = plugin.apply(configuration, schema, args)
assert configuration is not None, "plugin '%s' returned `None`" % plugin
except Exception as ex: # pragma: no cover
logger.exception("failed to apply plugin '%s': %s", plugin, ex)
message = "please rerun the command using `di --log-level debug` and file a new " \
"issue containing the output of the command here: https://github.com/" \
"spotify/docker_interface/issues/new"
logger.fatal("\033[%dm%s\033[0m", 31, message)
status_code = 3
break
logger.debug("configuration:\n%s", json.dumps(configuration, indent=4))
for plugin in reversed(plugins):
logger.debug("tearing down plugin '%s'", plugin)
plugin.cleanup()
status_code = configuration.get('status-code', status_code)
if status_code:
raise SystemExit(status_code) | Standard entry point for the docker interface CLI.
Parameters
----------
args : list or None
list of command line arguments or `None` to use `sys.argv`
configuration : dict
parsed configuration or `None` to load and build a configuration given the command line
arguments
Raises
------
SystemExit
if the configuration is malformed or the docker subprocesses returns a non-zero status code | train | https://github.com/spotify/docker_interface/blob/4df80e1fe072d958020080d32c16551ff7703d51/docker_interface/cli.py#L23-L116 | [
"def merge(x, y):\n \"\"\"\n Merge two dictionaries and raise an error for inconsistencies.\n\n Parameters\n ----------\n x : dict\n dictionary x\n y : dict\n dictionary y\n\n Returns\n -------\n x : dict\n merged dictionary\n\n Raises\n ------\n ValueError\n if `x` and `y` are inconsistent\n \"\"\"\n keys_x = set(x)\n keys_y = set(y)\n\n for key in keys_y - keys_x:\n x[key] = y[key]\n\n for key in keys_x & keys_y:\n value_x = x[key]\n value_y = y[key]\n\n if isinstance(value_x, dict) and isinstance(value_y, dict):\n x[key] = merge(value_x, value_y)\n else:\n if value_x != value_y:\n raise ValueError\n\n return x\n",
"def set_default_from_schema(instance, schema):\n \"\"\"\n Populate default values on an `instance` given a `schema`.\n\n Parameters\n ----------\n instance : dict\n instance to populate default values for\n schema : dict\n JSON schema with default values\n\n Returns\n -------\n instance : dict\n instance with populated default values\n \"\"\"\n for name, property_ in schema.get('properties', {}).items():\n # Set the defaults at this level of the schema\n if 'default' in property_:\n instance.setdefault(name, property_['default'])\n # Descend one level if the property is an object\n if 'properties' in property_:\n set_default_from_schema(instance.setdefault(name, {}), property_)\n return instance\n",
"def load_plugins():\n \"\"\"\n Load all availabe plugins.\n\n Returns\n -------\n plugin_cls : dict\n mapping from plugin names to plugin classes\n \"\"\"\n plugin_cls = {}\n for entry_point in pkg_resources.iter_entry_points('docker_interface.plugins'):\n cls = entry_point.load()\n assert cls.COMMANDS is not None, \\\n \"plugin '%s' does not define its commands\" % entry_point.name\n assert cls.ORDER is not None, \\\n \"plugin '%s' does not define its priority\" % entry_point.name\n plugin_cls[entry_point.name] = cls\n return plugin_cls\n",
"def add_arguments(self, parser):\n parser.add_argument('--file', '-f', help='Configuration file.', default='di.yml')\n self.add_argument(parser, '/workspace')\n self.add_argument(parser, '/docker')\n self.add_argument(parser, '/log-level')\n self.add_argument(parser, '/dry-run')\n parser.add_argument('command', help='Docker interface command to execute.',\n choices=['run', 'build'])\n",
"def apply(self, configuration, schema, args):\n # Load the configuration\n if configuration is None and os.path.isfile(args.file):\n filename = os.path.abspath(args.file)\n with open(filename) as fp: # pylint: disable=invalid-name\n configuration = yaml.load(fp)\n self.logger.debug(\"loaded configuration from '%s'\", filename)\n dirname = os.path.dirname(filename)\n configuration['workspace'] = os.path.join(dirname, configuration.get('workspace', '.'))\n elif configuration is None:\n raise FileNotFoundError(\n \"missing configuration; could not find configuration file '%s'\" % args.file)\n\n configuration = super(BasePlugin, self).apply(configuration, schema, args)\n\n logging.basicConfig(level=configuration.get('log-level', 'info').upper())\n return configuration\n"
] | # Copyright 2018 Spotify AB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
from .plugins import Plugin, BasePlugin
from . import util
|
vsergeev/python-periphery | periphery/led.py | LED.read | python | def read(self):
# Read value
try:
buf = os.read(self._fd, 8)
except OSError as e:
raise LEDError(e.errno, "Reading LED brightness: " + e.strerror)
# Rewind
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise LEDError(e.errno, "Rewinding LED brightness: " + e.strerror)
return int(buf) | Read the brightness of the LED.
Returns:
int: Current brightness.
Raises:
LEDError: if an I/O or OS error occurs. | train | https://github.com/vsergeev/python-periphery/blob/ff4d535691a1747a76962a3d077d96d224308611/periphery/led.py#L78-L100 | null | class LED(object):
def __init__(self, name, brightness=None):
"""Instantiate an LED object and open the sysfs LED corresponding to
the specified name.
`brightness` can be a boolean for on/off, integer value for a specific
brightness, or None to preserve existing brightness. Default is
preserve existing brightness.
Args:
name (str): Linux led name.
brightness (bool, int, None): Initial brightness.
Returns:
LED: LED object.
Raises:
LEDError: if an I/O or OS error occurs.
TypeError: if `name` or `brightness` types are invalid.
ValueError: if `brightness` value is invalid.
"""
self._fd = None
self._name = None
self._max_brightness = None
self._open(name, brightness)
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, t, value, traceback):
self.close()
def _open(self, name, brightness):
if not isinstance(name, str):
raise TypeError("Invalid name type, should be string.")
if not isinstance(brightness, (bool, int, type(None))):
raise TypeError("Invalid brightness type, should be bool, int, or None.")
led_path = "/sys/class/leds/%s" % name
if not os.path.isdir(led_path):
raise ValueError("LED %s not found!" % name)
# Read max brightness
try:
with open("/sys/class/leds/%s/max_brightness" % name, "r") as f_max_brightness:
max_brightness = int(f_max_brightness.read())
except IOError as e:
raise LEDError(e.errno, "Reading LED max brightness: " + e.strerror)
# Open brightness
try:
self._fd = os.open("/sys/class/leds/%s/brightness" % name, os.O_RDWR)
except OSError as e:
raise LEDError(e.errno, "Opening LED brightness: " + e.strerror)
self._max_brightness = max_brightness
self._name = name
# Set initial brightness
if brightness:
self.write(brightness)
# Methods
def write(self, brightness):
"""Set the brightness of the LED to `brightness`.
`brightness` can be a boolean for on/off, or integer value for a
specific brightness.
Args:
brightness (bool, int): Brightness value to set.
Raises:
LEDError: if an I/O or OS error occurs.
TypeError: if `brightness` type is not bool or int.
"""
if not isinstance(brightness, (bool, int)):
raise TypeError("Invalid brightness type, should be bool or int.")
if isinstance(brightness, bool):
brightness = self._max_brightness if brightness else 0
else:
if not 0 <= brightness <= self._max_brightness:
raise ValueError("Invalid brightness value, should be between 0 and %d." % self._max_brightness)
# Write value
try:
os.write(self._fd, b"%d\n" % brightness)
except OSError as e:
raise LEDError(e.errno, "Writing LED brightness: " + e.strerror)
# Rewind
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise LEDError(e.errno, "Rewinding LED brightness: " + e.strerror)
def close(self):
"""Close the sysfs LED.
Raises:
LEDError: if an I/O or OS error occurs.
"""
if self._fd is None:
return
try:
os.close(self._fd)
except OSError as e:
raise LEDError(e.errno, "Closing LED: " + e.strerror)
self._fd = None
# Immutable properties
@property
def fd(self):
"""Get the file descriptor for the underlying sysfs LED "brightness"
file of the LED object.
:type: int
"""
return self._fd
@property
def name(self):
"""Get the sysfs LED name.
:type: str
"""
return self._name
@property
def max_brightness(self):
"""Get the LED's max brightness.
:type: int
"""
return self._max_brightness
# Mutable properties
def _get_brightness(self):
# Read brightness
return self.read()
def _set_brightness(self, brightness):
return self.write(brightness)
brightness = property(_get_brightness, _set_brightness)
"""Get or set the LED's brightness.
Value can be a boolean for on/off, or integer value a for specific
brightness.
Raises:
LEDError: if an I/O or OS error occurs.
TypeError: if `brightness` type is not bool or int.
ValueError: if `brightness` value is invalid.
:type: int
"""
# String representation
def __str__(self):
return "LED %s (fd=%d, max_brightness=%d)" % (self._name, self._fd, self._max_brightness)
|
vsergeev/python-periphery | periphery/led.py | LED.write | python | def write(self, brightness):
if not isinstance(brightness, (bool, int)):
raise TypeError("Invalid brightness type, should be bool or int.")
if isinstance(brightness, bool):
brightness = self._max_brightness if brightness else 0
else:
if not 0 <= brightness <= self._max_brightness:
raise ValueError("Invalid brightness value, should be between 0 and %d." % self._max_brightness)
# Write value
try:
os.write(self._fd, b"%d\n" % brightness)
except OSError as e:
raise LEDError(e.errno, "Writing LED brightness: " + e.strerror)
# Rewind
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise LEDError(e.errno, "Rewinding LED brightness: " + e.strerror) | Set the brightness of the LED to `brightness`.
`brightness` can be a boolean for on/off, or integer value for a
specific brightness.
Args:
brightness (bool, int): Brightness value to set.
Raises:
LEDError: if an I/O or OS error occurs.
TypeError: if `brightness` type is not bool or int. | train | https://github.com/vsergeev/python-periphery/blob/ff4d535691a1747a76962a3d077d96d224308611/periphery/led.py#L102-L135 | null | class LED(object):
def __init__(self, name, brightness=None):
"""Instantiate an LED object and open the sysfs LED corresponding to
the specified name.
`brightness` can be a boolean for on/off, integer value for a specific
brightness, or None to preserve existing brightness. Default is
preserve existing brightness.
Args:
name (str): Linux led name.
brightness (bool, int, None): Initial brightness.
Returns:
LED: LED object.
Raises:
LEDError: if an I/O or OS error occurs.
TypeError: if `name` or `brightness` types are invalid.
ValueError: if `brightness` value is invalid.
"""
self._fd = None
self._name = None
self._max_brightness = None
self._open(name, brightness)
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, t, value, traceback):
self.close()
def _open(self, name, brightness):
if not isinstance(name, str):
raise TypeError("Invalid name type, should be string.")
if not isinstance(brightness, (bool, int, type(None))):
raise TypeError("Invalid brightness type, should be bool, int, or None.")
led_path = "/sys/class/leds/%s" % name
if not os.path.isdir(led_path):
raise ValueError("LED %s not found!" % name)
# Read max brightness
try:
with open("/sys/class/leds/%s/max_brightness" % name, "r") as f_max_brightness:
max_brightness = int(f_max_brightness.read())
except IOError as e:
raise LEDError(e.errno, "Reading LED max brightness: " + e.strerror)
# Open brightness
try:
self._fd = os.open("/sys/class/leds/%s/brightness" % name, os.O_RDWR)
except OSError as e:
raise LEDError(e.errno, "Opening LED brightness: " + e.strerror)
self._max_brightness = max_brightness
self._name = name
# Set initial brightness
if brightness:
self.write(brightness)
# Methods
def read(self):
"""Read the brightness of the LED.
Returns:
int: Current brightness.
Raises:
LEDError: if an I/O or OS error occurs.
"""
# Read value
try:
buf = os.read(self._fd, 8)
except OSError as e:
raise LEDError(e.errno, "Reading LED brightness: " + e.strerror)
# Rewind
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise LEDError(e.errno, "Rewinding LED brightness: " + e.strerror)
return int(buf)
def close(self):
"""Close the sysfs LED.
Raises:
LEDError: if an I/O or OS error occurs.
"""
if self._fd is None:
return
try:
os.close(self._fd)
except OSError as e:
raise LEDError(e.errno, "Closing LED: " + e.strerror)
self._fd = None
# Immutable properties
@property
def fd(self):
"""Get the file descriptor for the underlying sysfs LED "brightness"
file of the LED object.
:type: int
"""
return self._fd
@property
def name(self):
"""Get the sysfs LED name.
:type: str
"""
return self._name
@property
def max_brightness(self):
"""Get the LED's max brightness.
:type: int
"""
return self._max_brightness
# Mutable properties
def _get_brightness(self):
# Read brightness
return self.read()
def _set_brightness(self, brightness):
return self.write(brightness)
brightness = property(_get_brightness, _set_brightness)
"""Get or set the LED's brightness.
Value can be a boolean for on/off, or integer value a for specific
brightness.
Raises:
LEDError: if an I/O or OS error occurs.
TypeError: if `brightness` type is not bool or int.
ValueError: if `brightness` value is invalid.
:type: int
"""
# String representation
def __str__(self):
return "LED %s (fd=%d, max_brightness=%d)" % (self._name, self._fd, self._max_brightness)
|
vsergeev/python-periphery | periphery/led.py | LED.close | python | def close(self):
if self._fd is None:
return
try:
os.close(self._fd)
except OSError as e:
raise LEDError(e.errno, "Closing LED: " + e.strerror)
self._fd = None | Close the sysfs LED.
Raises:
LEDError: if an I/O or OS error occurs. | train | https://github.com/vsergeev/python-periphery/blob/ff4d535691a1747a76962a3d077d96d224308611/periphery/led.py#L137-L152 | null | class LED(object):
def __init__(self, name, brightness=None):
"""Instantiate an LED object and open the sysfs LED corresponding to
the specified name.
`brightness` can be a boolean for on/off, integer value for a specific
brightness, or None to preserve existing brightness. Default is
preserve existing brightness.
Args:
name (str): Linux led name.
brightness (bool, int, None): Initial brightness.
Returns:
LED: LED object.
Raises:
LEDError: if an I/O or OS error occurs.
TypeError: if `name` or `brightness` types are invalid.
ValueError: if `brightness` value is invalid.
"""
self._fd = None
self._name = None
self._max_brightness = None
self._open(name, brightness)
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, t, value, traceback):
self.close()
def _open(self, name, brightness):
if not isinstance(name, str):
raise TypeError("Invalid name type, should be string.")
if not isinstance(brightness, (bool, int, type(None))):
raise TypeError("Invalid brightness type, should be bool, int, or None.")
led_path = "/sys/class/leds/%s" % name
if not os.path.isdir(led_path):
raise ValueError("LED %s not found!" % name)
# Read max brightness
try:
with open("/sys/class/leds/%s/max_brightness" % name, "r") as f_max_brightness:
max_brightness = int(f_max_brightness.read())
except IOError as e:
raise LEDError(e.errno, "Reading LED max brightness: " + e.strerror)
# Open brightness
try:
self._fd = os.open("/sys/class/leds/%s/brightness" % name, os.O_RDWR)
except OSError as e:
raise LEDError(e.errno, "Opening LED brightness: " + e.strerror)
self._max_brightness = max_brightness
self._name = name
# Set initial brightness
if brightness:
self.write(brightness)
# Methods
def read(self):
"""Read the brightness of the LED.
Returns:
int: Current brightness.
Raises:
LEDError: if an I/O or OS error occurs.
"""
# Read value
try:
buf = os.read(self._fd, 8)
except OSError as e:
raise LEDError(e.errno, "Reading LED brightness: " + e.strerror)
# Rewind
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise LEDError(e.errno, "Rewinding LED brightness: " + e.strerror)
return int(buf)
def write(self, brightness):
"""Set the brightness of the LED to `brightness`.
`brightness` can be a boolean for on/off, or integer value for a
specific brightness.
Args:
brightness (bool, int): Brightness value to set.
Raises:
LEDError: if an I/O or OS error occurs.
TypeError: if `brightness` type is not bool or int.
"""
if not isinstance(brightness, (bool, int)):
raise TypeError("Invalid brightness type, should be bool or int.")
if isinstance(brightness, bool):
brightness = self._max_brightness if brightness else 0
else:
if not 0 <= brightness <= self._max_brightness:
raise ValueError("Invalid brightness value, should be between 0 and %d." % self._max_brightness)
# Write value
try:
os.write(self._fd, b"%d\n" % brightness)
except OSError as e:
raise LEDError(e.errno, "Writing LED brightness: " + e.strerror)
# Rewind
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise LEDError(e.errno, "Rewinding LED brightness: " + e.strerror)
# Immutable properties
@property
def fd(self):
"""Get the file descriptor for the underlying sysfs LED "brightness"
file of the LED object.
:type: int
"""
return self._fd
@property
def name(self):
"""Get the sysfs LED name.
:type: str
"""
return self._name
@property
def max_brightness(self):
"""Get the LED's max brightness.
:type: int
"""
return self._max_brightness
# Mutable properties
def _get_brightness(self):
# Read brightness
return self.read()
def _set_brightness(self, brightness):
return self.write(brightness)
brightness = property(_get_brightness, _set_brightness)
"""Get or set the LED's brightness.
Value can be a boolean for on/off, or integer value a for specific
brightness.
Raises:
LEDError: if an I/O or OS error occurs.
TypeError: if `brightness` type is not bool or int.
ValueError: if `brightness` value is invalid.
:type: int
"""
# String representation
def __str__(self):
return "LED %s (fd=%d, max_brightness=%d)" % (self._name, self._fd, self._max_brightness)
|
vsergeev/python-periphery | periphery/i2c.py | I2C.transfer | python | def transfer(self, address, messages):
if not isinstance(messages, list):
raise TypeError("Invalid messages type, should be list of I2C.Message.")
elif len(messages) == 0:
raise ValueError("Invalid messages data, should be non-zero length.")
# Convert I2C.Message messages to _CI2CMessage messages
cmessages = (_CI2CMessage * len(messages))()
for i in range(len(messages)):
# Convert I2C.Message data to bytes
if isinstance(messages[i].data, bytes):
data = messages[i].data
elif isinstance(messages[i].data, bytearray):
data = bytes(messages[i].data)
elif isinstance(messages[i].data, list):
data = bytes(bytearray(messages[i].data))
cmessages[i].addr = address
cmessages[i].flags = messages[i].flags | (I2C._I2C_M_RD if messages[i].read else 0)
cmessages[i].len = len(data)
cmessages[i].buf = ctypes.cast(ctypes.create_string_buffer(data, len(data)), ctypes.POINTER(ctypes.c_ubyte))
# Prepare transfer structure
i2c_xfer = _CI2CIocTransfer()
i2c_xfer.nmsgs = len(cmessages)
i2c_xfer.msgs = cmessages
# Transfer
try:
fcntl.ioctl(self._fd, I2C._I2C_IOC_RDWR, i2c_xfer, False)
except IOError as e:
raise I2CError(e.errno, "I2C transfer: " + e.strerror)
# Update any read I2C.Message messages
for i in range(len(messages)):
if messages[i].read:
data = [cmessages[i].buf[j] for j in range(cmessages[i].len)]
# Convert read data to type used in I2C.Message messages
if isinstance(messages[i].data, list):
messages[i].data = data
elif isinstance(messages[i].data, bytearray):
messages[i].data = bytearray(data)
elif isinstance(messages[i].data, bytes):
messages[i].data = bytes(bytearray(data)) | Transfer `messages` to the specified I2C `address`. Modifies the
`messages` array with the results of any read transactions.
Args:
address (int): I2C address.
messages (list): list of I2C.Message messages.
Raises:
I2CError: if an I/O or OS error occurs.
TypeError: if `messages` type is not list.
ValueError: if `messages` length is zero, or if message data is not valid bytes. | train | https://github.com/vsergeev/python-periphery/blob/ff4d535691a1747a76962a3d077d96d224308611/periphery/i2c.py#L93-L149 | null | class I2C(object):
# Constants scraped from <linux/i2c-dev.h> and <linux/i2c.h>
_I2C_IOC_FUNCS = 0x705
_I2C_IOC_RDWR = 0x707
_I2C_FUNC_I2C = 0x1
_I2C_M_TEN = 0x0010
_I2C_M_RD = 0x0001
_I2C_M_STOP = 0x8000
_I2C_M_NOSTART = 0x4000
_I2C_M_REV_DIR_ADDR = 0x2000
_I2C_M_IGNORE_NAK = 0x1000
_I2C_M_NO_RD_ACK = 0x0800
_I2C_M_RECV_LEN = 0x0400
def __init__(self, devpath):
"""Instantiate an I2C object and open the i2c-dev device at the
specified path.
Args:
devpath (str): i2c-dev device path.
Returns:
I2C: I2C object.
Raises:
I2CError: if an I/O or OS error occurs.
"""
self._fd = None
self._devpath = None
self._open(devpath)
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, t, value, traceback):
self.close()
def _open(self, devpath):
# Open i2c device
try:
self._fd = os.open(devpath, os.O_RDWR)
except OSError as e:
raise I2CError(e.errno, "Opening I2C device: " + e.strerror)
self._devpath = devpath
# Query supported functions
buf = array.array('I', [0])
try:
fcntl.ioctl(self._fd, I2C._I2C_IOC_FUNCS, buf, True)
except OSError as e:
self.close()
raise I2CError(e.errno, "Querying supported functions: " + e.strerror)
# Check that I2C_RDWR ioctl() is supported on this device
if (buf[0] & I2C._I2C_FUNC_I2C) == 0:
self.close()
raise I2CError(None, "I2C not supported on device %s." % devpath)
# Methods
def close(self):
"""Close the i2c-dev I2C device.
Raises:
I2CError: if an I/O or OS error occurs.
"""
if self._fd is None:
return
try:
os.close(self._fd)
except OSError as e:
raise I2CError(e.errno, "Closing I2C device: " + e.strerror)
self._fd = None
# Immutable properties
@property
def fd(self):
"""Get the file descriptor of the underlying i2c-dev device.
:type: int
"""
return self._fd
@property
def devpath(self):
"""Get the device path of the underlying i2c-dev device.
:type: str
"""
return self._devpath
# String representation
def __str__(self):
return "I2C (device=%s, fd=%d)" % (self.devpath, self.fd)
class Message:
def __init__(self, data, read=False, flags=0):
"""Instantiate an I2C Message object.
Args:
data (bytes, bytearray, list): a byte array or list of 8-bit
integers to write.
read (bool): specify this as a read message, where `data`
serves as placeholder bytes for the read.
flags (int): additional i2c-dev flags for this message.
Returns:
Message: Message object.
Raises:
TypeError: if `data`, `read`, or `flags` types are invalid.
"""
if not isinstance(data, (bytes, bytearray, list)):
raise TypeError("Invalid data type, should be bytes, bytearray, or list.")
if not isinstance(read, bool):
raise TypeError("Invalid read type, should be boolean.")
if not isinstance(flags, int):
raise TypeError("Invalid flags type, should be integer.")
self.data = data
self.read = read
self.flags = flags
|
vsergeev/python-periphery | periphery/serial.py | Serial.read | python | def read(self, length, timeout=None):
data = b""
# Read length bytes if timeout is None
# Read up to length bytes if timeout is not None
while True:
if timeout is not None:
# Select
(rlist, _, _) = select.select([self._fd], [], [], timeout)
# If timeout
if self._fd not in rlist:
break
try:
data += os.read(self._fd, length - len(data))
except OSError as e:
raise SerialError(e.errno, "Reading serial port: " + e.strerror)
if len(data) == length:
break
return data | Read up to `length` number of bytes from the serial port with an
optional timeout.
`timeout` can be positive for a timeout in seconds, 0 for a
non-blocking read, or negative or None for a blocking read that will
block until `length` number of bytes are read. Default is a blocking
read.
For a non-blocking or timeout-bound read, read() may return data whose
length is less than or equal to the requested length.
Args:
length (int): length in bytes.
timeout (int, float, None): timeout duration in seconds.
Returns:
bytes: data read.
Raises:
SerialError: if an I/O or OS error occurs. | train | https://github.com/vsergeev/python-periphery/blob/ff4d535691a1747a76962a3d077d96d224308611/periphery/serial.py#L172-L216 | null | class Serial(object):
_DATABITS_TO_CFLAG = {
5: termios.CS5, 6: termios.CS6, 7: termios.CS7, 8: termios.CS8
}
_CFLAG_TO_DATABITS = {v: k for k, v in _DATABITS_TO_CFLAG.items()}
_BAUDRATE_TO_OSPEED = {
50: termios.B50, 75: termios.B75, 110: termios.B110, 134: termios.B134,
150: termios.B150, 200: termios.B200, 300: termios.B300,
600: termios.B600, 1200: termios.B1200, 1800: termios.B1800,
2400: termios.B2400, 4800: termios.B4800, 9600: termios.B9600,
19200: termios.B19200, 38400: termios.B38400, 57600: termios.B57600,
115200: termios.B115200, 230400: termios.B230400,
# Linux baudrates bits missing in termios module included below
460800: 0x1004, 500000: 0x1005, 576000: 0x1006,
921600: 0x1007, 1000000: 0x1008, 1152000: 0x1009,
1500000: 0x100A, 2000000: 0x100B, 2500000: 0x100C,
3000000: 0x100D, 3500000: 0x100E, 4000000: 0x100F,
}
_OSPEED_TO_BAUDRATE = {v: k for k, v in _BAUDRATE_TO_OSPEED.items()}
def __init__(self, devpath, baudrate, databits=8, parity="none", stopbits=1, xonxoff=False, rtscts=False):
"""Instantiate a Serial object and open the tty device at the specified
path with the specified baudrate, and the defaults of 8 data bits, no
parity, 1 stop bit, no software flow control (xonxoff), and no hardware
flow control (rtscts).
Args:
devpath (str): tty device path.
baudrate (int): baudrate.
databits (int): data bits, can be 5, 6, 7, 8.
parity (str): parity, can be "none", "even", "odd".
stopbits (int): stop bits, can be 1 or 2.
xonxoff (bool): software flow control.
rtscts (bool): hardware flow control.
Returns:
Serial: Serial object.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `devpath`, `baudrate`, `databits`, `parity`, `stopbits`, `xonxoff`, or `rtscts` types are invalid.
ValueError: if `baudrate`, `databits`, `parity`, or `stopbits` values are invalid.
"""
self._fd = None
self._devpath = None
self._open(devpath, baudrate, databits, parity, stopbits, xonxoff, rtscts)
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, t, value, traceback):
self.close()
def _open(self, devpath, baudrate, databits, parity, stopbits, xonxoff, rtscts):
if not isinstance(devpath, str):
raise TypeError("Invalid devpath type, should be string.")
elif not isinstance(baudrate, int):
raise TypeError("Invalid baud rate type, should be integer.")
elif not isinstance(databits, int):
raise TypeError("Invalid data bits type, should be integer.")
elif not isinstance(parity, str):
raise TypeError("Invalid parity type, should be string.")
elif not isinstance(stopbits, int):
raise TypeError("Invalid stop bits type, should be integer.")
elif not isinstance(xonxoff, bool):
raise TypeError("Invalid xonxoff type, should be boolean.")
elif not isinstance(rtscts, bool):
raise TypeError("Invalid rtscts type, should be boolean.")
if baudrate not in Serial._BAUDRATE_TO_OSPEED:
raise ValueError("Unknown baud rate %d." % baudrate)
elif databits not in [5, 6, 7, 8]:
raise ValueError("Invalid data bits, can be 5, 6, 7, 8.")
elif parity.lower() not in ["none", "even", "odd"]:
raise ValueError("Invalid parity, can be: \"none\", \"even\", \"odd\".")
elif stopbits not in [1, 2]:
raise ValueError("Invalid stop bits, can be 1, 2.")
# Open tty
try:
self._fd = os.open(devpath, os.O_RDWR | os.O_NOCTTY)
except OSError as e:
raise SerialError(e.errno, "Opening serial port: " + e.strerror)
self._devpath = devpath
parity = parity.lower()
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = (0, 0, 0, 0, 0, 0, [0] * 32)
###
# iflag
# Ignore break characters
iflag = termios.IGNBRK
# Setup parity
if parity != "none":
iflag |= (termios.INPCK | termios.ISTRIP)
# Setup xonxoff
if xonxoff:
iflag |= (termios.IXON | termios.IXOFF)
#######
# oflag
oflag = 0
#######
# lflag
lflag = 0
#######
# cflag
# Enable receiver, ignore modem control lines
cflag = (termios.CREAD | termios.CLOCAL)
# Setup data bits
cflag |= Serial._DATABITS_TO_CFLAG[databits]
# Setup parity
if parity == "even":
cflag |= termios.PARENB
elif parity == "odd":
cflag |= (termios.PARENB | termios.PARODD)
# Setup stop bits
if stopbits == 2:
cflag |= termios.CSTOPB
# Setup rtscts
if rtscts:
cflag |= termios.CRTSCTS
# Setup baud rate
cflag |= Serial._BAUDRATE_TO_OSPEED[baudrate]
########
# ispeed
ispeed = Serial._BAUDRATE_TO_OSPEED[baudrate]
########
# ospeed
ospeed = Serial._BAUDRATE_TO_OSPEED[baudrate]
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
# Methods
def write(self, data):
"""Write `data` to the serial port and return the number of bytes
written.
Args:
data (bytes, bytearray, list): a byte array or list of 8-bit integers to write.
Returns:
int: number of bytes written.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `data` type is invalid.
ValueError: if data is not valid bytes.
"""
if not isinstance(data, (bytes, bytearray, list)):
raise TypeError("Invalid data type, should be bytes, bytearray, or list.")
if isinstance(data, list):
data = bytearray(data)
try:
return os.write(self._fd, data)
except OSError as e:
raise SerialError(e.errno, "Writing serial port: " + e.strerror)
def poll(self, timeout=None):
"""Poll for data available for reading from the serial port.
`timeout` can be positive for a timeout in seconds, 0 for a
non-blocking poll, or negative or None for a blocking poll. Default is
a blocking poll.
Args:
timeout (int, float, None): timeout duration in seconds.
Returns:
bool: ``True`` if data is available for reading from the serial port, ``False`` if not.
"""
p = select.poll()
p.register(self._fd, select.POLLIN | select.POLLPRI)
events = p.poll(int(timeout * 1000))
if len(events) > 0:
return True
return False
def flush(self):
"""Flush the write buffer of the serial port, blocking until all bytes
are written.
Raises:
SerialError: if an I/O or OS error occurs.
"""
try:
termios.tcdrain(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Flushing serial port: " + e.strerror)
def input_waiting(self):
"""Query the number of bytes waiting to be read from the serial port.
Returns:
int: number of bytes waiting to be read.
Raises:
SerialError: if an I/O or OS error occurs.
"""
# Get input waiting
buf = array.array('I', [0])
try:
fcntl.ioctl(self._fd, termios.TIOCINQ, buf, True)
except OSError as e:
raise SerialError(e.errno, "Querying input waiting: " + e.strerror)
return buf[0]
def output_waiting(self):
"""Query the number of bytes waiting to be written to the serial port.
Returns:
int: number of bytes waiting to be written.
Raises:
SerialError: if an I/O or OS error occurs.
"""
# Get input waiting
buf = array.array('I', [0])
try:
fcntl.ioctl(self._fd, termios.TIOCOUTQ, buf, True)
except OSError as e:
raise SerialError(e.errno, "Querying output waiting: " + e.strerror)
return buf[0]
def close(self):
"""Close the tty device.
Raises:
SerialError: if an I/O or OS error occurs.
"""
if self._fd is None:
return
try:
os.close(self._fd)
except OSError as e:
raise SerialError(e.errno, "Closing serial port: " + e.strerror)
self._fd = None
# Immutable properties
@property
def fd(self):
"""Get the file descriptor of the underlying tty device.
:type: int
"""
return self._fd
@property
def devpath(self):
"""Get the device path of the underlying tty device.
:type: str
"""
return self._devpath
# Mutable properties
def _get_baudrate(self):
# Get tty attributes
try:
(_, _, _, _, _, ospeed, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if ospeed not in Serial._OSPEED_TO_BAUDRATE:
raise SerialError(None, "Unknown baud rate: ospeed 0x%x." % ospeed)
return Serial._OSPEED_TO_BAUDRATE[ospeed]
def _set_baudrate(self, baudrate):
if not isinstance(baudrate, int):
raise TypeError("Invalid baud rate type, should be integer.")
if baudrate not in Serial._BAUDRATE_TO_OSPEED:
raise ValueError("Unknown baud rate %d." % baudrate)
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
cflag &= ~(termios.CBAUD | termios.CBAUDEX)
cflag |= Serial._BAUDRATE_TO_OSPEED[baudrate]
ispeed = Serial._BAUDRATE_TO_OSPEED[baudrate]
ospeed = Serial._BAUDRATE_TO_OSPEED[baudrate]
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
baudrate = property(_get_baudrate, _set_baudrate)
"""Get or set the baudrate.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `baudrate` type is not int.
ValueError: if `baudrate` value is not supported.
:type: int
"""
def _get_databits(self):
# Get tty attributes
try:
(_, _, cflag, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
cs = cflag & termios.CSIZE
if cs not in Serial._CFLAG_TO_DATABITS:
raise SerialError(None, "Unknown data bits setting: csize 0x%x." % cs)
return Serial._CFLAG_TO_DATABITS[cs]
def _set_databits(self, databits):
if not isinstance(databits, int):
raise TypeError("Invalid data bits type, should be integer.")
elif databits not in [5, 6, 7, 8]:
raise ValueError("Invalid data bits, can be 5, 6, 7, 8.")
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
cflag &= ~termios.CSIZE
cflag |= Serial._DATABITS_TO_CFLAG[databits]
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
databits = property(_get_databits, _set_databits)
"""Get or set the data bits. Can be 5, 6, 7, 8.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `databits` type is not int.
ValueError: if `databits` value is invalid.
:type: int
"""
def _get_parity(self):
# Get tty attributes
try:
(_, _, cflag, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if (cflag & termios.PARENB) == 0:
return "none"
elif (cflag & termios.PARODD) == 0:
return "even"
else:
return "odd"
def _set_parity(self, parity):
if not isinstance(parity, str):
raise TypeError("Invalid parity type, should be string.")
elif parity.lower() not in ["none", "even", "odd"]:
raise ValueError("Invalid parity, can be: \"none\", \"even\", \"odd\".")
parity = parity.lower()
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
iflag &= ~(termios.INPCK | termios.ISTRIP)
cflag &= ~(termios.PARENB | termios.PARODD)
if parity != "none":
iflag |= (termios.INPCK | termios.ISTRIP)
cflag |= termios.PARENB
if parity == "odd":
cflag |= termios.PARODD
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
parity = property(_get_parity, _set_parity)
"""Get or set the parity. Can be "none", "even", "odd".
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `parity` type is not str.
ValueError: if `parity` value is invalid.
:type: str
"""
def _get_stopbits(self):
# Get tty attributes
try:
(_, _, cflag, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if (cflag & termios.CSTOPB) != 0:
return 2
else:
return 1
def _set_stopbits(self, stopbits):
if not isinstance(stopbits, int):
raise TypeError("Invalid stop bits type, should be integer.")
elif stopbits not in [1, 2]:
raise ValueError("Invalid stop bits, can be 1, 2.")
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
cflag &= ~termios.CSTOPB
if stopbits == 2:
cflag |= termios.CSTOPB
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
stopbits = property(_get_stopbits, _set_stopbits)
"""Get or set the stop bits. Can be 1 or 2.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `stopbits` type is not int.
ValueError: if `stopbits` value is invalid.
:type: int
"""
def _get_xonxoff(self):
# Get tty attributes
try:
(iflag, _, _, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if (iflag & (termios.IXON | termios.IXOFF)) != 0:
return True
else:
return False
def _set_xonxoff(self, enabled):
if not isinstance(enabled, bool):
raise TypeError("Invalid enabled type, should be boolean.")
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
iflag &= ~(termios.IXON | termios.IXOFF | termios.IXANY)
if enabled:
iflag |= (termios.IXON | termios.IXOFF)
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
xonxoff = property(_get_xonxoff, _set_xonxoff)
"""Get or set software flow control.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `xonxoff` type is not bool.
:type: bool
"""
def _get_rtscts(self):
# Get tty attributes
try:
(_, _, cflag, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if (cflag & termios.CRTSCTS) != 0:
return True
else:
return False
def _set_rtscts(self, enabled):
if not isinstance(enabled, bool):
raise TypeError("Invalid enabled type, should be boolean.")
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
cflag = ~termios.CRTSCTS
if enabled:
cflag |= termios.CRTSCTS
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
rtscts = property(_get_rtscts, _set_rtscts)
"""Get or set hardware flow control.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `rtscts` type is not bool.
:type: bool
"""
# String representation
def __str__(self):
return "Serial (device=%s, fd=%d, baudrate=%d, databits=%d, parity=%s, stopbits=%d, xonxoff=%s, rtscts=%s)" % (self.devpath, self.fd, self.baudrate, self.databits, self.parity, self.stopbits, str(self.xonxoff), str(self.rtscts))
|
vsergeev/python-periphery | periphery/serial.py | Serial.write | python | def write(self, data):
if not isinstance(data, (bytes, bytearray, list)):
raise TypeError("Invalid data type, should be bytes, bytearray, or list.")
if isinstance(data, list):
data = bytearray(data)
try:
return os.write(self._fd, data)
except OSError as e:
raise SerialError(e.errno, "Writing serial port: " + e.strerror) | Write `data` to the serial port and return the number of bytes
written.
Args:
data (bytes, bytearray, list): a byte array or list of 8-bit integers to write.
Returns:
int: number of bytes written.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `data` type is invalid.
ValueError: if data is not valid bytes. | train | https://github.com/vsergeev/python-periphery/blob/ff4d535691a1747a76962a3d077d96d224308611/periphery/serial.py#L218-L243 | null | class Serial(object):
_DATABITS_TO_CFLAG = {
5: termios.CS5, 6: termios.CS6, 7: termios.CS7, 8: termios.CS8
}
_CFLAG_TO_DATABITS = {v: k for k, v in _DATABITS_TO_CFLAG.items()}
_BAUDRATE_TO_OSPEED = {
50: termios.B50, 75: termios.B75, 110: termios.B110, 134: termios.B134,
150: termios.B150, 200: termios.B200, 300: termios.B300,
600: termios.B600, 1200: termios.B1200, 1800: termios.B1800,
2400: termios.B2400, 4800: termios.B4800, 9600: termios.B9600,
19200: termios.B19200, 38400: termios.B38400, 57600: termios.B57600,
115200: termios.B115200, 230400: termios.B230400,
# Linux baudrates bits missing in termios module included below
460800: 0x1004, 500000: 0x1005, 576000: 0x1006,
921600: 0x1007, 1000000: 0x1008, 1152000: 0x1009,
1500000: 0x100A, 2000000: 0x100B, 2500000: 0x100C,
3000000: 0x100D, 3500000: 0x100E, 4000000: 0x100F,
}
_OSPEED_TO_BAUDRATE = {v: k for k, v in _BAUDRATE_TO_OSPEED.items()}
def __init__(self, devpath, baudrate, databits=8, parity="none", stopbits=1, xonxoff=False, rtscts=False):
"""Instantiate a Serial object and open the tty device at the specified
path with the specified baudrate, and the defaults of 8 data bits, no
parity, 1 stop bit, no software flow control (xonxoff), and no hardware
flow control (rtscts).
Args:
devpath (str): tty device path.
baudrate (int): baudrate.
databits (int): data bits, can be 5, 6, 7, 8.
parity (str): parity, can be "none", "even", "odd".
stopbits (int): stop bits, can be 1 or 2.
xonxoff (bool): software flow control.
rtscts (bool): hardware flow control.
Returns:
Serial: Serial object.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `devpath`, `baudrate`, `databits`, `parity`, `stopbits`, `xonxoff`, or `rtscts` types are invalid.
ValueError: if `baudrate`, `databits`, `parity`, or `stopbits` values are invalid.
"""
self._fd = None
self._devpath = None
self._open(devpath, baudrate, databits, parity, stopbits, xonxoff, rtscts)
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, t, value, traceback):
self.close()
def _open(self, devpath, baudrate, databits, parity, stopbits, xonxoff, rtscts):
if not isinstance(devpath, str):
raise TypeError("Invalid devpath type, should be string.")
elif not isinstance(baudrate, int):
raise TypeError("Invalid baud rate type, should be integer.")
elif not isinstance(databits, int):
raise TypeError("Invalid data bits type, should be integer.")
elif not isinstance(parity, str):
raise TypeError("Invalid parity type, should be string.")
elif not isinstance(stopbits, int):
raise TypeError("Invalid stop bits type, should be integer.")
elif not isinstance(xonxoff, bool):
raise TypeError("Invalid xonxoff type, should be boolean.")
elif not isinstance(rtscts, bool):
raise TypeError("Invalid rtscts type, should be boolean.")
if baudrate not in Serial._BAUDRATE_TO_OSPEED:
raise ValueError("Unknown baud rate %d." % baudrate)
elif databits not in [5, 6, 7, 8]:
raise ValueError("Invalid data bits, can be 5, 6, 7, 8.")
elif parity.lower() not in ["none", "even", "odd"]:
raise ValueError("Invalid parity, can be: \"none\", \"even\", \"odd\".")
elif stopbits not in [1, 2]:
raise ValueError("Invalid stop bits, can be 1, 2.")
# Open tty
try:
self._fd = os.open(devpath, os.O_RDWR | os.O_NOCTTY)
except OSError as e:
raise SerialError(e.errno, "Opening serial port: " + e.strerror)
self._devpath = devpath
parity = parity.lower()
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = (0, 0, 0, 0, 0, 0, [0] * 32)
###
# iflag
# Ignore break characters
iflag = termios.IGNBRK
# Setup parity
if parity != "none":
iflag |= (termios.INPCK | termios.ISTRIP)
# Setup xonxoff
if xonxoff:
iflag |= (termios.IXON | termios.IXOFF)
#######
# oflag
oflag = 0
#######
# lflag
lflag = 0
#######
# cflag
# Enable receiver, ignore modem control lines
cflag = (termios.CREAD | termios.CLOCAL)
# Setup data bits
cflag |= Serial._DATABITS_TO_CFLAG[databits]
# Setup parity
if parity == "even":
cflag |= termios.PARENB
elif parity == "odd":
cflag |= (termios.PARENB | termios.PARODD)
# Setup stop bits
if stopbits == 2:
cflag |= termios.CSTOPB
# Setup rtscts
if rtscts:
cflag |= termios.CRTSCTS
# Setup baud rate
cflag |= Serial._BAUDRATE_TO_OSPEED[baudrate]
########
# ispeed
ispeed = Serial._BAUDRATE_TO_OSPEED[baudrate]
########
# ospeed
ospeed = Serial._BAUDRATE_TO_OSPEED[baudrate]
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
# Methods
def read(self, length, timeout=None):
"""Read up to `length` number of bytes from the serial port with an
optional timeout.
`timeout` can be positive for a timeout in seconds, 0 for a
non-blocking read, or negative or None for a blocking read that will
block until `length` number of bytes are read. Default is a blocking
read.
For a non-blocking or timeout-bound read, read() may return data whose
length is less than or equal to the requested length.
Args:
length (int): length in bytes.
timeout (int, float, None): timeout duration in seconds.
Returns:
bytes: data read.
Raises:
SerialError: if an I/O or OS error occurs.
"""
data = b""
# Read length bytes if timeout is None
# Read up to length bytes if timeout is not None
while True:
if timeout is not None:
# Select
(rlist, _, _) = select.select([self._fd], [], [], timeout)
# If timeout
if self._fd not in rlist:
break
try:
data += os.read(self._fd, length - len(data))
except OSError as e:
raise SerialError(e.errno, "Reading serial port: " + e.strerror)
if len(data) == length:
break
return data
def poll(self, timeout=None):
"""Poll for data available for reading from the serial port.
`timeout` can be positive for a timeout in seconds, 0 for a
non-blocking poll, or negative or None for a blocking poll. Default is
a blocking poll.
Args:
timeout (int, float, None): timeout duration in seconds.
Returns:
bool: ``True`` if data is available for reading from the serial port, ``False`` if not.
"""
p = select.poll()
p.register(self._fd, select.POLLIN | select.POLLPRI)
events = p.poll(int(timeout * 1000))
if len(events) > 0:
return True
return False
def flush(self):
"""Flush the write buffer of the serial port, blocking until all bytes
are written.
Raises:
SerialError: if an I/O or OS error occurs.
"""
try:
termios.tcdrain(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Flushing serial port: " + e.strerror)
def input_waiting(self):
"""Query the number of bytes waiting to be read from the serial port.
Returns:
int: number of bytes waiting to be read.
Raises:
SerialError: if an I/O or OS error occurs.
"""
# Get input waiting
buf = array.array('I', [0])
try:
fcntl.ioctl(self._fd, termios.TIOCINQ, buf, True)
except OSError as e:
raise SerialError(e.errno, "Querying input waiting: " + e.strerror)
return buf[0]
def output_waiting(self):
"""Query the number of bytes waiting to be written to the serial port.
Returns:
int: number of bytes waiting to be written.
Raises:
SerialError: if an I/O or OS error occurs.
"""
# Get input waiting
buf = array.array('I', [0])
try:
fcntl.ioctl(self._fd, termios.TIOCOUTQ, buf, True)
except OSError as e:
raise SerialError(e.errno, "Querying output waiting: " + e.strerror)
return buf[0]
def close(self):
"""Close the tty device.
Raises:
SerialError: if an I/O or OS error occurs.
"""
if self._fd is None:
return
try:
os.close(self._fd)
except OSError as e:
raise SerialError(e.errno, "Closing serial port: " + e.strerror)
self._fd = None
# Immutable properties
@property
def fd(self):
"""Get the file descriptor of the underlying tty device.
:type: int
"""
return self._fd
@property
def devpath(self):
"""Get the device path of the underlying tty device.
:type: str
"""
return self._devpath
# Mutable properties
def _get_baudrate(self):
# Get tty attributes
try:
(_, _, _, _, _, ospeed, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if ospeed not in Serial._OSPEED_TO_BAUDRATE:
raise SerialError(None, "Unknown baud rate: ospeed 0x%x." % ospeed)
return Serial._OSPEED_TO_BAUDRATE[ospeed]
def _set_baudrate(self, baudrate):
if not isinstance(baudrate, int):
raise TypeError("Invalid baud rate type, should be integer.")
if baudrate not in Serial._BAUDRATE_TO_OSPEED:
raise ValueError("Unknown baud rate %d." % baudrate)
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
cflag &= ~(termios.CBAUD | termios.CBAUDEX)
cflag |= Serial._BAUDRATE_TO_OSPEED[baudrate]
ispeed = Serial._BAUDRATE_TO_OSPEED[baudrate]
ospeed = Serial._BAUDRATE_TO_OSPEED[baudrate]
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
baudrate = property(_get_baudrate, _set_baudrate)
"""Get or set the baudrate.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `baudrate` type is not int.
ValueError: if `baudrate` value is not supported.
:type: int
"""
def _get_databits(self):
# Get tty attributes
try:
(_, _, cflag, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
cs = cflag & termios.CSIZE
if cs not in Serial._CFLAG_TO_DATABITS:
raise SerialError(None, "Unknown data bits setting: csize 0x%x." % cs)
return Serial._CFLAG_TO_DATABITS[cs]
def _set_databits(self, databits):
if not isinstance(databits, int):
raise TypeError("Invalid data bits type, should be integer.")
elif databits not in [5, 6, 7, 8]:
raise ValueError("Invalid data bits, can be 5, 6, 7, 8.")
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
cflag &= ~termios.CSIZE
cflag |= Serial._DATABITS_TO_CFLAG[databits]
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
databits = property(_get_databits, _set_databits)
"""Get or set the data bits. Can be 5, 6, 7, 8.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `databits` type is not int.
ValueError: if `databits` value is invalid.
:type: int
"""
def _get_parity(self):
# Get tty attributes
try:
(_, _, cflag, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if (cflag & termios.PARENB) == 0:
return "none"
elif (cflag & termios.PARODD) == 0:
return "even"
else:
return "odd"
def _set_parity(self, parity):
if not isinstance(parity, str):
raise TypeError("Invalid parity type, should be string.")
elif parity.lower() not in ["none", "even", "odd"]:
raise ValueError("Invalid parity, can be: \"none\", \"even\", \"odd\".")
parity = parity.lower()
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
iflag &= ~(termios.INPCK | termios.ISTRIP)
cflag &= ~(termios.PARENB | termios.PARODD)
if parity != "none":
iflag |= (termios.INPCK | termios.ISTRIP)
cflag |= termios.PARENB
if parity == "odd":
cflag |= termios.PARODD
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
parity = property(_get_parity, _set_parity)
"""Get or set the parity. Can be "none", "even", "odd".
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `parity` type is not str.
ValueError: if `parity` value is invalid.
:type: str
"""
def _get_stopbits(self):
# Get tty attributes
try:
(_, _, cflag, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if (cflag & termios.CSTOPB) != 0:
return 2
else:
return 1
def _set_stopbits(self, stopbits):
if not isinstance(stopbits, int):
raise TypeError("Invalid stop bits type, should be integer.")
elif stopbits not in [1, 2]:
raise ValueError("Invalid stop bits, can be 1, 2.")
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
cflag &= ~termios.CSTOPB
if stopbits == 2:
cflag |= termios.CSTOPB
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
stopbits = property(_get_stopbits, _set_stopbits)
"""Get or set the stop bits. Can be 1 or 2.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `stopbits` type is not int.
ValueError: if `stopbits` value is invalid.
:type: int
"""
def _get_xonxoff(self):
# Get tty attributes
try:
(iflag, _, _, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if (iflag & (termios.IXON | termios.IXOFF)) != 0:
return True
else:
return False
def _set_xonxoff(self, enabled):
if not isinstance(enabled, bool):
raise TypeError("Invalid enabled type, should be boolean.")
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
iflag &= ~(termios.IXON | termios.IXOFF | termios.IXANY)
if enabled:
iflag |= (termios.IXON | termios.IXOFF)
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
xonxoff = property(_get_xonxoff, _set_xonxoff)
"""Get or set software flow control.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `xonxoff` type is not bool.
:type: bool
"""
def _get_rtscts(self):
# Get tty attributes
try:
(_, _, cflag, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if (cflag & termios.CRTSCTS) != 0:
return True
else:
return False
def _set_rtscts(self, enabled):
if not isinstance(enabled, bool):
raise TypeError("Invalid enabled type, should be boolean.")
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
cflag = ~termios.CRTSCTS
if enabled:
cflag |= termios.CRTSCTS
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
rtscts = property(_get_rtscts, _set_rtscts)
"""Get or set hardware flow control.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `rtscts` type is not bool.
:type: bool
"""
# String representation
def __str__(self):
return "Serial (device=%s, fd=%d, baudrate=%d, databits=%d, parity=%s, stopbits=%d, xonxoff=%s, rtscts=%s)" % (self.devpath, self.fd, self.baudrate, self.databits, self.parity, self.stopbits, str(self.xonxoff), str(self.rtscts))
|
vsergeev/python-periphery | periphery/serial.py | Serial.poll | python | def poll(self, timeout=None):
p = select.poll()
p.register(self._fd, select.POLLIN | select.POLLPRI)
events = p.poll(int(timeout * 1000))
if len(events) > 0:
return True
return False | Poll for data available for reading from the serial port.
`timeout` can be positive for a timeout in seconds, 0 for a
non-blocking poll, or negative or None for a blocking poll. Default is
a blocking poll.
Args:
timeout (int, float, None): timeout duration in seconds.
Returns:
bool: ``True`` if data is available for reading from the serial port, ``False`` if not. | train | https://github.com/vsergeev/python-periphery/blob/ff4d535691a1747a76962a3d077d96d224308611/periphery/serial.py#L245-L266 | null | class Serial(object):
_DATABITS_TO_CFLAG = {
5: termios.CS5, 6: termios.CS6, 7: termios.CS7, 8: termios.CS8
}
_CFLAG_TO_DATABITS = {v: k for k, v in _DATABITS_TO_CFLAG.items()}
_BAUDRATE_TO_OSPEED = {
50: termios.B50, 75: termios.B75, 110: termios.B110, 134: termios.B134,
150: termios.B150, 200: termios.B200, 300: termios.B300,
600: termios.B600, 1200: termios.B1200, 1800: termios.B1800,
2400: termios.B2400, 4800: termios.B4800, 9600: termios.B9600,
19200: termios.B19200, 38400: termios.B38400, 57600: termios.B57600,
115200: termios.B115200, 230400: termios.B230400,
# Linux baudrates bits missing in termios module included below
460800: 0x1004, 500000: 0x1005, 576000: 0x1006,
921600: 0x1007, 1000000: 0x1008, 1152000: 0x1009,
1500000: 0x100A, 2000000: 0x100B, 2500000: 0x100C,
3000000: 0x100D, 3500000: 0x100E, 4000000: 0x100F,
}
_OSPEED_TO_BAUDRATE = {v: k for k, v in _BAUDRATE_TO_OSPEED.items()}
def __init__(self, devpath, baudrate, databits=8, parity="none", stopbits=1, xonxoff=False, rtscts=False):
"""Instantiate a Serial object and open the tty device at the specified
path with the specified baudrate, and the defaults of 8 data bits, no
parity, 1 stop bit, no software flow control (xonxoff), and no hardware
flow control (rtscts).
Args:
devpath (str): tty device path.
baudrate (int): baudrate.
databits (int): data bits, can be 5, 6, 7, 8.
parity (str): parity, can be "none", "even", "odd".
stopbits (int): stop bits, can be 1 or 2.
xonxoff (bool): software flow control.
rtscts (bool): hardware flow control.
Returns:
Serial: Serial object.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `devpath`, `baudrate`, `databits`, `parity`, `stopbits`, `xonxoff`, or `rtscts` types are invalid.
ValueError: if `baudrate`, `databits`, `parity`, or `stopbits` values are invalid.
"""
self._fd = None
self._devpath = None
self._open(devpath, baudrate, databits, parity, stopbits, xonxoff, rtscts)
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, t, value, traceback):
self.close()
def _open(self, devpath, baudrate, databits, parity, stopbits, xonxoff, rtscts):
if not isinstance(devpath, str):
raise TypeError("Invalid devpath type, should be string.")
elif not isinstance(baudrate, int):
raise TypeError("Invalid baud rate type, should be integer.")
elif not isinstance(databits, int):
raise TypeError("Invalid data bits type, should be integer.")
elif not isinstance(parity, str):
raise TypeError("Invalid parity type, should be string.")
elif not isinstance(stopbits, int):
raise TypeError("Invalid stop bits type, should be integer.")
elif not isinstance(xonxoff, bool):
raise TypeError("Invalid xonxoff type, should be boolean.")
elif not isinstance(rtscts, bool):
raise TypeError("Invalid rtscts type, should be boolean.")
if baudrate not in Serial._BAUDRATE_TO_OSPEED:
raise ValueError("Unknown baud rate %d." % baudrate)
elif databits not in [5, 6, 7, 8]:
raise ValueError("Invalid data bits, can be 5, 6, 7, 8.")
elif parity.lower() not in ["none", "even", "odd"]:
raise ValueError("Invalid parity, can be: \"none\", \"even\", \"odd\".")
elif stopbits not in [1, 2]:
raise ValueError("Invalid stop bits, can be 1, 2.")
# Open tty
try:
self._fd = os.open(devpath, os.O_RDWR | os.O_NOCTTY)
except OSError as e:
raise SerialError(e.errno, "Opening serial port: " + e.strerror)
self._devpath = devpath
parity = parity.lower()
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = (0, 0, 0, 0, 0, 0, [0] * 32)
###
# iflag
# Ignore break characters
iflag = termios.IGNBRK
# Setup parity
if parity != "none":
iflag |= (termios.INPCK | termios.ISTRIP)
# Setup xonxoff
if xonxoff:
iflag |= (termios.IXON | termios.IXOFF)
#######
# oflag
oflag = 0
#######
# lflag
lflag = 0
#######
# cflag
# Enable receiver, ignore modem control lines
cflag = (termios.CREAD | termios.CLOCAL)
# Setup data bits
cflag |= Serial._DATABITS_TO_CFLAG[databits]
# Setup parity
if parity == "even":
cflag |= termios.PARENB
elif parity == "odd":
cflag |= (termios.PARENB | termios.PARODD)
# Setup stop bits
if stopbits == 2:
cflag |= termios.CSTOPB
# Setup rtscts
if rtscts:
cflag |= termios.CRTSCTS
# Setup baud rate
cflag |= Serial._BAUDRATE_TO_OSPEED[baudrate]
########
# ispeed
ispeed = Serial._BAUDRATE_TO_OSPEED[baudrate]
########
# ospeed
ospeed = Serial._BAUDRATE_TO_OSPEED[baudrate]
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
# Methods
def read(self, length, timeout=None):
"""Read up to `length` number of bytes from the serial port with an
optional timeout.
`timeout` can be positive for a timeout in seconds, 0 for a
non-blocking read, or negative or None for a blocking read that will
block until `length` number of bytes are read. Default is a blocking
read.
For a non-blocking or timeout-bound read, read() may return data whose
length is less than or equal to the requested length.
Args:
length (int): length in bytes.
timeout (int, float, None): timeout duration in seconds.
Returns:
bytes: data read.
Raises:
SerialError: if an I/O or OS error occurs.
"""
data = b""
# Read length bytes if timeout is None
# Read up to length bytes if timeout is not None
while True:
if timeout is not None:
# Select
(rlist, _, _) = select.select([self._fd], [], [], timeout)
# If timeout
if self._fd not in rlist:
break
try:
data += os.read(self._fd, length - len(data))
except OSError as e:
raise SerialError(e.errno, "Reading serial port: " + e.strerror)
if len(data) == length:
break
return data
def write(self, data):
"""Write `data` to the serial port and return the number of bytes
written.
Args:
data (bytes, bytearray, list): a byte array or list of 8-bit integers to write.
Returns:
int: number of bytes written.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `data` type is invalid.
ValueError: if data is not valid bytes.
"""
if not isinstance(data, (bytes, bytearray, list)):
raise TypeError("Invalid data type, should be bytes, bytearray, or list.")
if isinstance(data, list):
data = bytearray(data)
try:
return os.write(self._fd, data)
except OSError as e:
raise SerialError(e.errno, "Writing serial port: " + e.strerror)
def flush(self):
"""Flush the write buffer of the serial port, blocking until all bytes
are written.
Raises:
SerialError: if an I/O or OS error occurs.
"""
try:
termios.tcdrain(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Flushing serial port: " + e.strerror)
def input_waiting(self):
"""Query the number of bytes waiting to be read from the serial port.
Returns:
int: number of bytes waiting to be read.
Raises:
SerialError: if an I/O or OS error occurs.
"""
# Get input waiting
buf = array.array('I', [0])
try:
fcntl.ioctl(self._fd, termios.TIOCINQ, buf, True)
except OSError as e:
raise SerialError(e.errno, "Querying input waiting: " + e.strerror)
return buf[0]
def output_waiting(self):
"""Query the number of bytes waiting to be written to the serial port.
Returns:
int: number of bytes waiting to be written.
Raises:
SerialError: if an I/O or OS error occurs.
"""
# Get input waiting
buf = array.array('I', [0])
try:
fcntl.ioctl(self._fd, termios.TIOCOUTQ, buf, True)
except OSError as e:
raise SerialError(e.errno, "Querying output waiting: " + e.strerror)
return buf[0]
def close(self):
"""Close the tty device.
Raises:
SerialError: if an I/O or OS error occurs.
"""
if self._fd is None:
return
try:
os.close(self._fd)
except OSError as e:
raise SerialError(e.errno, "Closing serial port: " + e.strerror)
self._fd = None
# Immutable properties
@property
def fd(self):
"""Get the file descriptor of the underlying tty device.
:type: int
"""
return self._fd
@property
def devpath(self):
"""Get the device path of the underlying tty device.
:type: str
"""
return self._devpath
# Mutable properties
def _get_baudrate(self):
# Get tty attributes
try:
(_, _, _, _, _, ospeed, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if ospeed not in Serial._OSPEED_TO_BAUDRATE:
raise SerialError(None, "Unknown baud rate: ospeed 0x%x." % ospeed)
return Serial._OSPEED_TO_BAUDRATE[ospeed]
def _set_baudrate(self, baudrate):
if not isinstance(baudrate, int):
raise TypeError("Invalid baud rate type, should be integer.")
if baudrate not in Serial._BAUDRATE_TO_OSPEED:
raise ValueError("Unknown baud rate %d." % baudrate)
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
cflag &= ~(termios.CBAUD | termios.CBAUDEX)
cflag |= Serial._BAUDRATE_TO_OSPEED[baudrate]
ispeed = Serial._BAUDRATE_TO_OSPEED[baudrate]
ospeed = Serial._BAUDRATE_TO_OSPEED[baudrate]
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
baudrate = property(_get_baudrate, _set_baudrate)
"""Get or set the baudrate.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `baudrate` type is not int.
ValueError: if `baudrate` value is not supported.
:type: int
"""
def _get_databits(self):
# Get tty attributes
try:
(_, _, cflag, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
cs = cflag & termios.CSIZE
if cs not in Serial._CFLAG_TO_DATABITS:
raise SerialError(None, "Unknown data bits setting: csize 0x%x." % cs)
return Serial._CFLAG_TO_DATABITS[cs]
def _set_databits(self, databits):
if not isinstance(databits, int):
raise TypeError("Invalid data bits type, should be integer.")
elif databits not in [5, 6, 7, 8]:
raise ValueError("Invalid data bits, can be 5, 6, 7, 8.")
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
cflag &= ~termios.CSIZE
cflag |= Serial._DATABITS_TO_CFLAG[databits]
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
databits = property(_get_databits, _set_databits)
"""Get or set the data bits. Can be 5, 6, 7, 8.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `databits` type is not int.
ValueError: if `databits` value is invalid.
:type: int
"""
def _get_parity(self):
# Get tty attributes
try:
(_, _, cflag, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if (cflag & termios.PARENB) == 0:
return "none"
elif (cflag & termios.PARODD) == 0:
return "even"
else:
return "odd"
def _set_parity(self, parity):
if not isinstance(parity, str):
raise TypeError("Invalid parity type, should be string.")
elif parity.lower() not in ["none", "even", "odd"]:
raise ValueError("Invalid parity, can be: \"none\", \"even\", \"odd\".")
parity = parity.lower()
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
iflag &= ~(termios.INPCK | termios.ISTRIP)
cflag &= ~(termios.PARENB | termios.PARODD)
if parity != "none":
iflag |= (termios.INPCK | termios.ISTRIP)
cflag |= termios.PARENB
if parity == "odd":
cflag |= termios.PARODD
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
parity = property(_get_parity, _set_parity)
"""Get or set the parity. Can be "none", "even", "odd".
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `parity` type is not str.
ValueError: if `parity` value is invalid.
:type: str
"""
def _get_stopbits(self):
# Get tty attributes
try:
(_, _, cflag, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if (cflag & termios.CSTOPB) != 0:
return 2
else:
return 1
def _set_stopbits(self, stopbits):
if not isinstance(stopbits, int):
raise TypeError("Invalid stop bits type, should be integer.")
elif stopbits not in [1, 2]:
raise ValueError("Invalid stop bits, can be 1, 2.")
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
cflag &= ~termios.CSTOPB
if stopbits == 2:
cflag |= termios.CSTOPB
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
stopbits = property(_get_stopbits, _set_stopbits)
"""Get or set the stop bits. Can be 1 or 2.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `stopbits` type is not int.
ValueError: if `stopbits` value is invalid.
:type: int
"""
def _get_xonxoff(self):
# Get tty attributes
try:
(iflag, _, _, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if (iflag & (termios.IXON | termios.IXOFF)) != 0:
return True
else:
return False
def _set_xonxoff(self, enabled):
if not isinstance(enabled, bool):
raise TypeError("Invalid enabled type, should be boolean.")
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
iflag &= ~(termios.IXON | termios.IXOFF | termios.IXANY)
if enabled:
iflag |= (termios.IXON | termios.IXOFF)
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
xonxoff = property(_get_xonxoff, _set_xonxoff)
"""Get or set software flow control.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `xonxoff` type is not bool.
:type: bool
"""
def _get_rtscts(self):
# Get tty attributes
try:
(_, _, cflag, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if (cflag & termios.CRTSCTS) != 0:
return True
else:
return False
def _set_rtscts(self, enabled):
if not isinstance(enabled, bool):
raise TypeError("Invalid enabled type, should be boolean.")
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
cflag = ~termios.CRTSCTS
if enabled:
cflag |= termios.CRTSCTS
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
rtscts = property(_get_rtscts, _set_rtscts)
"""Get or set hardware flow control.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `rtscts` type is not bool.
:type: bool
"""
# String representation
def __str__(self):
return "Serial (device=%s, fd=%d, baudrate=%d, databits=%d, parity=%s, stopbits=%d, xonxoff=%s, rtscts=%s)" % (self.devpath, self.fd, self.baudrate, self.databits, self.parity, self.stopbits, str(self.xonxoff), str(self.rtscts))
|
vsergeev/python-periphery | periphery/serial.py | Serial.flush | python | def flush(self):
try:
termios.tcdrain(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Flushing serial port: " + e.strerror) | Flush the write buffer of the serial port, blocking until all bytes
are written.
Raises:
SerialError: if an I/O or OS error occurs. | train | https://github.com/vsergeev/python-periphery/blob/ff4d535691a1747a76962a3d077d96d224308611/periphery/serial.py#L268-L279 | null | class Serial(object):
_DATABITS_TO_CFLAG = {
5: termios.CS5, 6: termios.CS6, 7: termios.CS7, 8: termios.CS8
}
_CFLAG_TO_DATABITS = {v: k for k, v in _DATABITS_TO_CFLAG.items()}
_BAUDRATE_TO_OSPEED = {
50: termios.B50, 75: termios.B75, 110: termios.B110, 134: termios.B134,
150: termios.B150, 200: termios.B200, 300: termios.B300,
600: termios.B600, 1200: termios.B1200, 1800: termios.B1800,
2400: termios.B2400, 4800: termios.B4800, 9600: termios.B9600,
19200: termios.B19200, 38400: termios.B38400, 57600: termios.B57600,
115200: termios.B115200, 230400: termios.B230400,
# Linux baudrates bits missing in termios module included below
460800: 0x1004, 500000: 0x1005, 576000: 0x1006,
921600: 0x1007, 1000000: 0x1008, 1152000: 0x1009,
1500000: 0x100A, 2000000: 0x100B, 2500000: 0x100C,
3000000: 0x100D, 3500000: 0x100E, 4000000: 0x100F,
}
_OSPEED_TO_BAUDRATE = {v: k for k, v in _BAUDRATE_TO_OSPEED.items()}
def __init__(self, devpath, baudrate, databits=8, parity="none", stopbits=1, xonxoff=False, rtscts=False):
"""Instantiate a Serial object and open the tty device at the specified
path with the specified baudrate, and the defaults of 8 data bits, no
parity, 1 stop bit, no software flow control (xonxoff), and no hardware
flow control (rtscts).
Args:
devpath (str): tty device path.
baudrate (int): baudrate.
databits (int): data bits, can be 5, 6, 7, 8.
parity (str): parity, can be "none", "even", "odd".
stopbits (int): stop bits, can be 1 or 2.
xonxoff (bool): software flow control.
rtscts (bool): hardware flow control.
Returns:
Serial: Serial object.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `devpath`, `baudrate`, `databits`, `parity`, `stopbits`, `xonxoff`, or `rtscts` types are invalid.
ValueError: if `baudrate`, `databits`, `parity`, or `stopbits` values are invalid.
"""
self._fd = None
self._devpath = None
self._open(devpath, baudrate, databits, parity, stopbits, xonxoff, rtscts)
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, t, value, traceback):
self.close()
def _open(self, devpath, baudrate, databits, parity, stopbits, xonxoff, rtscts):
if not isinstance(devpath, str):
raise TypeError("Invalid devpath type, should be string.")
elif not isinstance(baudrate, int):
raise TypeError("Invalid baud rate type, should be integer.")
elif not isinstance(databits, int):
raise TypeError("Invalid data bits type, should be integer.")
elif not isinstance(parity, str):
raise TypeError("Invalid parity type, should be string.")
elif not isinstance(stopbits, int):
raise TypeError("Invalid stop bits type, should be integer.")
elif not isinstance(xonxoff, bool):
raise TypeError("Invalid xonxoff type, should be boolean.")
elif not isinstance(rtscts, bool):
raise TypeError("Invalid rtscts type, should be boolean.")
if baudrate not in Serial._BAUDRATE_TO_OSPEED:
raise ValueError("Unknown baud rate %d." % baudrate)
elif databits not in [5, 6, 7, 8]:
raise ValueError("Invalid data bits, can be 5, 6, 7, 8.")
elif parity.lower() not in ["none", "even", "odd"]:
raise ValueError("Invalid parity, can be: \"none\", \"even\", \"odd\".")
elif stopbits not in [1, 2]:
raise ValueError("Invalid stop bits, can be 1, 2.")
# Open tty
try:
self._fd = os.open(devpath, os.O_RDWR | os.O_NOCTTY)
except OSError as e:
raise SerialError(e.errno, "Opening serial port: " + e.strerror)
self._devpath = devpath
parity = parity.lower()
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = (0, 0, 0, 0, 0, 0, [0] * 32)
###
# iflag
# Ignore break characters
iflag = termios.IGNBRK
# Setup parity
if parity != "none":
iflag |= (termios.INPCK | termios.ISTRIP)
# Setup xonxoff
if xonxoff:
iflag |= (termios.IXON | termios.IXOFF)
#######
# oflag
oflag = 0
#######
# lflag
lflag = 0
#######
# cflag
# Enable receiver, ignore modem control lines
cflag = (termios.CREAD | termios.CLOCAL)
# Setup data bits
cflag |= Serial._DATABITS_TO_CFLAG[databits]
# Setup parity
if parity == "even":
cflag |= termios.PARENB
elif parity == "odd":
cflag |= (termios.PARENB | termios.PARODD)
# Setup stop bits
if stopbits == 2:
cflag |= termios.CSTOPB
# Setup rtscts
if rtscts:
cflag |= termios.CRTSCTS
# Setup baud rate
cflag |= Serial._BAUDRATE_TO_OSPEED[baudrate]
########
# ispeed
ispeed = Serial._BAUDRATE_TO_OSPEED[baudrate]
########
# ospeed
ospeed = Serial._BAUDRATE_TO_OSPEED[baudrate]
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
# Methods
def read(self, length, timeout=None):
"""Read up to `length` number of bytes from the serial port with an
optional timeout.
`timeout` can be positive for a timeout in seconds, 0 for a
non-blocking read, or negative or None for a blocking read that will
block until `length` number of bytes are read. Default is a blocking
read.
For a non-blocking or timeout-bound read, read() may return data whose
length is less than or equal to the requested length.
Args:
length (int): length in bytes.
timeout (int, float, None): timeout duration in seconds.
Returns:
bytes: data read.
Raises:
SerialError: if an I/O or OS error occurs.
"""
data = b""
# Read length bytes if timeout is None
# Read up to length bytes if timeout is not None
while True:
if timeout is not None:
# Select
(rlist, _, _) = select.select([self._fd], [], [], timeout)
# If timeout
if self._fd not in rlist:
break
try:
data += os.read(self._fd, length - len(data))
except OSError as e:
raise SerialError(e.errno, "Reading serial port: " + e.strerror)
if len(data) == length:
break
return data
def write(self, data):
"""Write `data` to the serial port and return the number of bytes
written.
Args:
data (bytes, bytearray, list): a byte array or list of 8-bit integers to write.
Returns:
int: number of bytes written.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `data` type is invalid.
ValueError: if data is not valid bytes.
"""
if not isinstance(data, (bytes, bytearray, list)):
raise TypeError("Invalid data type, should be bytes, bytearray, or list.")
if isinstance(data, list):
data = bytearray(data)
try:
return os.write(self._fd, data)
except OSError as e:
raise SerialError(e.errno, "Writing serial port: " + e.strerror)
def poll(self, timeout=None):
"""Poll for data available for reading from the serial port.
`timeout` can be positive for a timeout in seconds, 0 for a
non-blocking poll, or negative or None for a blocking poll. Default is
a blocking poll.
Args:
timeout (int, float, None): timeout duration in seconds.
Returns:
bool: ``True`` if data is available for reading from the serial port, ``False`` if not.
"""
p = select.poll()
p.register(self._fd, select.POLLIN | select.POLLPRI)
events = p.poll(int(timeout * 1000))
if len(events) > 0:
return True
return False
def input_waiting(self):
"""Query the number of bytes waiting to be read from the serial port.
Returns:
int: number of bytes waiting to be read.
Raises:
SerialError: if an I/O or OS error occurs.
"""
# Get input waiting
buf = array.array('I', [0])
try:
fcntl.ioctl(self._fd, termios.TIOCINQ, buf, True)
except OSError as e:
raise SerialError(e.errno, "Querying input waiting: " + e.strerror)
return buf[0]
def output_waiting(self):
"""Query the number of bytes waiting to be written to the serial port.
Returns:
int: number of bytes waiting to be written.
Raises:
SerialError: if an I/O or OS error occurs.
"""
# Get input waiting
buf = array.array('I', [0])
try:
fcntl.ioctl(self._fd, termios.TIOCOUTQ, buf, True)
except OSError as e:
raise SerialError(e.errno, "Querying output waiting: " + e.strerror)
return buf[0]
def close(self):
"""Close the tty device.
Raises:
SerialError: if an I/O or OS error occurs.
"""
if self._fd is None:
return
try:
os.close(self._fd)
except OSError as e:
raise SerialError(e.errno, "Closing serial port: " + e.strerror)
self._fd = None
# Immutable properties
@property
def fd(self):
"""Get the file descriptor of the underlying tty device.
:type: int
"""
return self._fd
@property
def devpath(self):
"""Get the device path of the underlying tty device.
:type: str
"""
return self._devpath
# Mutable properties
def _get_baudrate(self):
# Get tty attributes
try:
(_, _, _, _, _, ospeed, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if ospeed not in Serial._OSPEED_TO_BAUDRATE:
raise SerialError(None, "Unknown baud rate: ospeed 0x%x." % ospeed)
return Serial._OSPEED_TO_BAUDRATE[ospeed]
def _set_baudrate(self, baudrate):
if not isinstance(baudrate, int):
raise TypeError("Invalid baud rate type, should be integer.")
if baudrate not in Serial._BAUDRATE_TO_OSPEED:
raise ValueError("Unknown baud rate %d." % baudrate)
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
cflag &= ~(termios.CBAUD | termios.CBAUDEX)
cflag |= Serial._BAUDRATE_TO_OSPEED[baudrate]
ispeed = Serial._BAUDRATE_TO_OSPEED[baudrate]
ospeed = Serial._BAUDRATE_TO_OSPEED[baudrate]
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
baudrate = property(_get_baudrate, _set_baudrate)
"""Get or set the baudrate.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `baudrate` type is not int.
ValueError: if `baudrate` value is not supported.
:type: int
"""
def _get_databits(self):
# Get tty attributes
try:
(_, _, cflag, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
cs = cflag & termios.CSIZE
if cs not in Serial._CFLAG_TO_DATABITS:
raise SerialError(None, "Unknown data bits setting: csize 0x%x." % cs)
return Serial._CFLAG_TO_DATABITS[cs]
def _set_databits(self, databits):
if not isinstance(databits, int):
raise TypeError("Invalid data bits type, should be integer.")
elif databits not in [5, 6, 7, 8]:
raise ValueError("Invalid data bits, can be 5, 6, 7, 8.")
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
cflag &= ~termios.CSIZE
cflag |= Serial._DATABITS_TO_CFLAG[databits]
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
databits = property(_get_databits, _set_databits)
"""Get or set the data bits. Can be 5, 6, 7, 8.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `databits` type is not int.
ValueError: if `databits` value is invalid.
:type: int
"""
def _get_parity(self):
# Get tty attributes
try:
(_, _, cflag, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if (cflag & termios.PARENB) == 0:
return "none"
elif (cflag & termios.PARODD) == 0:
return "even"
else:
return "odd"
def _set_parity(self, parity):
if not isinstance(parity, str):
raise TypeError("Invalid parity type, should be string.")
elif parity.lower() not in ["none", "even", "odd"]:
raise ValueError("Invalid parity, can be: \"none\", \"even\", \"odd\".")
parity = parity.lower()
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
iflag &= ~(termios.INPCK | termios.ISTRIP)
cflag &= ~(termios.PARENB | termios.PARODD)
if parity != "none":
iflag |= (termios.INPCK | termios.ISTRIP)
cflag |= termios.PARENB
if parity == "odd":
cflag |= termios.PARODD
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
parity = property(_get_parity, _set_parity)
"""Get or set the parity. Can be "none", "even", "odd".
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `parity` type is not str.
ValueError: if `parity` value is invalid.
:type: str
"""
def _get_stopbits(self):
# Get tty attributes
try:
(_, _, cflag, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if (cflag & termios.CSTOPB) != 0:
return 2
else:
return 1
def _set_stopbits(self, stopbits):
if not isinstance(stopbits, int):
raise TypeError("Invalid stop bits type, should be integer.")
elif stopbits not in [1, 2]:
raise ValueError("Invalid stop bits, can be 1, 2.")
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
cflag &= ~termios.CSTOPB
if stopbits == 2:
cflag |= termios.CSTOPB
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
stopbits = property(_get_stopbits, _set_stopbits)
"""Get or set the stop bits. Can be 1 or 2.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `stopbits` type is not int.
ValueError: if `stopbits` value is invalid.
:type: int
"""
def _get_xonxoff(self):
# Get tty attributes
try:
(iflag, _, _, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if (iflag & (termios.IXON | termios.IXOFF)) != 0:
return True
else:
return False
def _set_xonxoff(self, enabled):
if not isinstance(enabled, bool):
raise TypeError("Invalid enabled type, should be boolean.")
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
iflag &= ~(termios.IXON | termios.IXOFF | termios.IXANY)
if enabled:
iflag |= (termios.IXON | termios.IXOFF)
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
xonxoff = property(_get_xonxoff, _set_xonxoff)
"""Get or set software flow control.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `xonxoff` type is not bool.
:type: bool
"""
def _get_rtscts(self):
# Get tty attributes
try:
(_, _, cflag, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if (cflag & termios.CRTSCTS) != 0:
return True
else:
return False
def _set_rtscts(self, enabled):
if not isinstance(enabled, bool):
raise TypeError("Invalid enabled type, should be boolean.")
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
cflag = ~termios.CRTSCTS
if enabled:
cflag |= termios.CRTSCTS
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
rtscts = property(_get_rtscts, _set_rtscts)
"""Get or set hardware flow control.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `rtscts` type is not bool.
:type: bool
"""
# String representation
def __str__(self):
return "Serial (device=%s, fd=%d, baudrate=%d, databits=%d, parity=%s, stopbits=%d, xonxoff=%s, rtscts=%s)" % (self.devpath, self.fd, self.baudrate, self.databits, self.parity, self.stopbits, str(self.xonxoff), str(self.rtscts))
|
vsergeev/python-periphery | periphery/serial.py | Serial.input_waiting | python | def input_waiting(self):
# Get input waiting
buf = array.array('I', [0])
try:
fcntl.ioctl(self._fd, termios.TIOCINQ, buf, True)
except OSError as e:
raise SerialError(e.errno, "Querying input waiting: " + e.strerror)
return buf[0] | Query the number of bytes waiting to be read from the serial port.
Returns:
int: number of bytes waiting to be read.
Raises:
SerialError: if an I/O or OS error occurs. | train | https://github.com/vsergeev/python-periphery/blob/ff4d535691a1747a76962a3d077d96d224308611/periphery/serial.py#L281-L298 | null | class Serial(object):
_DATABITS_TO_CFLAG = {
5: termios.CS5, 6: termios.CS6, 7: termios.CS7, 8: termios.CS8
}
_CFLAG_TO_DATABITS = {v: k for k, v in _DATABITS_TO_CFLAG.items()}
_BAUDRATE_TO_OSPEED = {
50: termios.B50, 75: termios.B75, 110: termios.B110, 134: termios.B134,
150: termios.B150, 200: termios.B200, 300: termios.B300,
600: termios.B600, 1200: termios.B1200, 1800: termios.B1800,
2400: termios.B2400, 4800: termios.B4800, 9600: termios.B9600,
19200: termios.B19200, 38400: termios.B38400, 57600: termios.B57600,
115200: termios.B115200, 230400: termios.B230400,
# Linux baudrates bits missing in termios module included below
460800: 0x1004, 500000: 0x1005, 576000: 0x1006,
921600: 0x1007, 1000000: 0x1008, 1152000: 0x1009,
1500000: 0x100A, 2000000: 0x100B, 2500000: 0x100C,
3000000: 0x100D, 3500000: 0x100E, 4000000: 0x100F,
}
_OSPEED_TO_BAUDRATE = {v: k for k, v in _BAUDRATE_TO_OSPEED.items()}
def __init__(self, devpath, baudrate, databits=8, parity="none", stopbits=1, xonxoff=False, rtscts=False):
"""Instantiate a Serial object and open the tty device at the specified
path with the specified baudrate, and the defaults of 8 data bits, no
parity, 1 stop bit, no software flow control (xonxoff), and no hardware
flow control (rtscts).
Args:
devpath (str): tty device path.
baudrate (int): baudrate.
databits (int): data bits, can be 5, 6, 7, 8.
parity (str): parity, can be "none", "even", "odd".
stopbits (int): stop bits, can be 1 or 2.
xonxoff (bool): software flow control.
rtscts (bool): hardware flow control.
Returns:
Serial: Serial object.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `devpath`, `baudrate`, `databits`, `parity`, `stopbits`, `xonxoff`, or `rtscts` types are invalid.
ValueError: if `baudrate`, `databits`, `parity`, or `stopbits` values are invalid.
"""
self._fd = None
self._devpath = None
self._open(devpath, baudrate, databits, parity, stopbits, xonxoff, rtscts)
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, t, value, traceback):
self.close()
def _open(self, devpath, baudrate, databits, parity, stopbits, xonxoff, rtscts):
if not isinstance(devpath, str):
raise TypeError("Invalid devpath type, should be string.")
elif not isinstance(baudrate, int):
raise TypeError("Invalid baud rate type, should be integer.")
elif not isinstance(databits, int):
raise TypeError("Invalid data bits type, should be integer.")
elif not isinstance(parity, str):
raise TypeError("Invalid parity type, should be string.")
elif not isinstance(stopbits, int):
raise TypeError("Invalid stop bits type, should be integer.")
elif not isinstance(xonxoff, bool):
raise TypeError("Invalid xonxoff type, should be boolean.")
elif not isinstance(rtscts, bool):
raise TypeError("Invalid rtscts type, should be boolean.")
if baudrate not in Serial._BAUDRATE_TO_OSPEED:
raise ValueError("Unknown baud rate %d." % baudrate)
elif databits not in [5, 6, 7, 8]:
raise ValueError("Invalid data bits, can be 5, 6, 7, 8.")
elif parity.lower() not in ["none", "even", "odd"]:
raise ValueError("Invalid parity, can be: \"none\", \"even\", \"odd\".")
elif stopbits not in [1, 2]:
raise ValueError("Invalid stop bits, can be 1, 2.")
# Open tty
try:
self._fd = os.open(devpath, os.O_RDWR | os.O_NOCTTY)
except OSError as e:
raise SerialError(e.errno, "Opening serial port: " + e.strerror)
self._devpath = devpath
parity = parity.lower()
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = (0, 0, 0, 0, 0, 0, [0] * 32)
###
# iflag
# Ignore break characters
iflag = termios.IGNBRK
# Setup parity
if parity != "none":
iflag |= (termios.INPCK | termios.ISTRIP)
# Setup xonxoff
if xonxoff:
iflag |= (termios.IXON | termios.IXOFF)
#######
# oflag
oflag = 0
#######
# lflag
lflag = 0
#######
# cflag
# Enable receiver, ignore modem control lines
cflag = (termios.CREAD | termios.CLOCAL)
# Setup data bits
cflag |= Serial._DATABITS_TO_CFLAG[databits]
# Setup parity
if parity == "even":
cflag |= termios.PARENB
elif parity == "odd":
cflag |= (termios.PARENB | termios.PARODD)
# Setup stop bits
if stopbits == 2:
cflag |= termios.CSTOPB
# Setup rtscts
if rtscts:
cflag |= termios.CRTSCTS
# Setup baud rate
cflag |= Serial._BAUDRATE_TO_OSPEED[baudrate]
########
# ispeed
ispeed = Serial._BAUDRATE_TO_OSPEED[baudrate]
########
# ospeed
ospeed = Serial._BAUDRATE_TO_OSPEED[baudrate]
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
# Methods
def read(self, length, timeout=None):
"""Read up to `length` number of bytes from the serial port with an
optional timeout.
`timeout` can be positive for a timeout in seconds, 0 for a
non-blocking read, or negative or None for a blocking read that will
block until `length` number of bytes are read. Default is a blocking
read.
For a non-blocking or timeout-bound read, read() may return data whose
length is less than or equal to the requested length.
Args:
length (int): length in bytes.
timeout (int, float, None): timeout duration in seconds.
Returns:
bytes: data read.
Raises:
SerialError: if an I/O or OS error occurs.
"""
data = b""
# Read length bytes if timeout is None
# Read up to length bytes if timeout is not None
while True:
if timeout is not None:
# Select
(rlist, _, _) = select.select([self._fd], [], [], timeout)
# If timeout
if self._fd not in rlist:
break
try:
data += os.read(self._fd, length - len(data))
except OSError as e:
raise SerialError(e.errno, "Reading serial port: " + e.strerror)
if len(data) == length:
break
return data
def write(self, data):
"""Write `data` to the serial port and return the number of bytes
written.
Args:
data (bytes, bytearray, list): a byte array or list of 8-bit integers to write.
Returns:
int: number of bytes written.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `data` type is invalid.
ValueError: if data is not valid bytes.
"""
if not isinstance(data, (bytes, bytearray, list)):
raise TypeError("Invalid data type, should be bytes, bytearray, or list.")
if isinstance(data, list):
data = bytearray(data)
try:
return os.write(self._fd, data)
except OSError as e:
raise SerialError(e.errno, "Writing serial port: " + e.strerror)
def poll(self, timeout=None):
"""Poll for data available for reading from the serial port.
`timeout` can be positive for a timeout in seconds, 0 for a
non-blocking poll, or negative or None for a blocking poll. Default is
a blocking poll.
Args:
timeout (int, float, None): timeout duration in seconds.
Returns:
bool: ``True`` if data is available for reading from the serial port, ``False`` if not.
"""
p = select.poll()
p.register(self._fd, select.POLLIN | select.POLLPRI)
events = p.poll(int(timeout * 1000))
if len(events) > 0:
return True
return False
def flush(self):
"""Flush the write buffer of the serial port, blocking until all bytes
are written.
Raises:
SerialError: if an I/O or OS error occurs.
"""
try:
termios.tcdrain(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Flushing serial port: " + e.strerror)
def output_waiting(self):
"""Query the number of bytes waiting to be written to the serial port.
Returns:
int: number of bytes waiting to be written.
Raises:
SerialError: if an I/O or OS error occurs.
"""
# Get input waiting
buf = array.array('I', [0])
try:
fcntl.ioctl(self._fd, termios.TIOCOUTQ, buf, True)
except OSError as e:
raise SerialError(e.errno, "Querying output waiting: " + e.strerror)
return buf[0]
def close(self):
"""Close the tty device.
Raises:
SerialError: if an I/O or OS error occurs.
"""
if self._fd is None:
return
try:
os.close(self._fd)
except OSError as e:
raise SerialError(e.errno, "Closing serial port: " + e.strerror)
self._fd = None
# Immutable properties
@property
def fd(self):
"""Get the file descriptor of the underlying tty device.
:type: int
"""
return self._fd
@property
def devpath(self):
"""Get the device path of the underlying tty device.
:type: str
"""
return self._devpath
# Mutable properties
def _get_baudrate(self):
# Get tty attributes
try:
(_, _, _, _, _, ospeed, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if ospeed not in Serial._OSPEED_TO_BAUDRATE:
raise SerialError(None, "Unknown baud rate: ospeed 0x%x." % ospeed)
return Serial._OSPEED_TO_BAUDRATE[ospeed]
def _set_baudrate(self, baudrate):
if not isinstance(baudrate, int):
raise TypeError("Invalid baud rate type, should be integer.")
if baudrate not in Serial._BAUDRATE_TO_OSPEED:
raise ValueError("Unknown baud rate %d." % baudrate)
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
cflag &= ~(termios.CBAUD | termios.CBAUDEX)
cflag |= Serial._BAUDRATE_TO_OSPEED[baudrate]
ispeed = Serial._BAUDRATE_TO_OSPEED[baudrate]
ospeed = Serial._BAUDRATE_TO_OSPEED[baudrate]
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
baudrate = property(_get_baudrate, _set_baudrate)
"""Get or set the baudrate.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `baudrate` type is not int.
ValueError: if `baudrate` value is not supported.
:type: int
"""
def _get_databits(self):
# Get tty attributes
try:
(_, _, cflag, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
cs = cflag & termios.CSIZE
if cs not in Serial._CFLAG_TO_DATABITS:
raise SerialError(None, "Unknown data bits setting: csize 0x%x." % cs)
return Serial._CFLAG_TO_DATABITS[cs]
def _set_databits(self, databits):
if not isinstance(databits, int):
raise TypeError("Invalid data bits type, should be integer.")
elif databits not in [5, 6, 7, 8]:
raise ValueError("Invalid data bits, can be 5, 6, 7, 8.")
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
cflag &= ~termios.CSIZE
cflag |= Serial._DATABITS_TO_CFLAG[databits]
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
databits = property(_get_databits, _set_databits)
"""Get or set the data bits. Can be 5, 6, 7, 8.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `databits` type is not int.
ValueError: if `databits` value is invalid.
:type: int
"""
def _get_parity(self):
# Get tty attributes
try:
(_, _, cflag, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if (cflag & termios.PARENB) == 0:
return "none"
elif (cflag & termios.PARODD) == 0:
return "even"
else:
return "odd"
def _set_parity(self, parity):
if not isinstance(parity, str):
raise TypeError("Invalid parity type, should be string.")
elif parity.lower() not in ["none", "even", "odd"]:
raise ValueError("Invalid parity, can be: \"none\", \"even\", \"odd\".")
parity = parity.lower()
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
iflag &= ~(termios.INPCK | termios.ISTRIP)
cflag &= ~(termios.PARENB | termios.PARODD)
if parity != "none":
iflag |= (termios.INPCK | termios.ISTRIP)
cflag |= termios.PARENB
if parity == "odd":
cflag |= termios.PARODD
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
parity = property(_get_parity, _set_parity)
"""Get or set the parity. Can be "none", "even", "odd".
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `parity` type is not str.
ValueError: if `parity` value is invalid.
:type: str
"""
def _get_stopbits(self):
# Get tty attributes
try:
(_, _, cflag, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if (cflag & termios.CSTOPB) != 0:
return 2
else:
return 1
def _set_stopbits(self, stopbits):
if not isinstance(stopbits, int):
raise TypeError("Invalid stop bits type, should be integer.")
elif stopbits not in [1, 2]:
raise ValueError("Invalid stop bits, can be 1, 2.")
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
cflag &= ~termios.CSTOPB
if stopbits == 2:
cflag |= termios.CSTOPB
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
stopbits = property(_get_stopbits, _set_stopbits)
"""Get or set the stop bits. Can be 1 or 2.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `stopbits` type is not int.
ValueError: if `stopbits` value is invalid.
:type: int
"""
def _get_xonxoff(self):
# Get tty attributes
try:
(iflag, _, _, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if (iflag & (termios.IXON | termios.IXOFF)) != 0:
return True
else:
return False
def _set_xonxoff(self, enabled):
if not isinstance(enabled, bool):
raise TypeError("Invalid enabled type, should be boolean.")
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
iflag &= ~(termios.IXON | termios.IXOFF | termios.IXANY)
if enabled:
iflag |= (termios.IXON | termios.IXOFF)
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
xonxoff = property(_get_xonxoff, _set_xonxoff)
"""Get or set software flow control.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `xonxoff` type is not bool.
:type: bool
"""
def _get_rtscts(self):
# Get tty attributes
try:
(_, _, cflag, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if (cflag & termios.CRTSCTS) != 0:
return True
else:
return False
def _set_rtscts(self, enabled):
if not isinstance(enabled, bool):
raise TypeError("Invalid enabled type, should be boolean.")
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
cflag = ~termios.CRTSCTS
if enabled:
cflag |= termios.CRTSCTS
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
rtscts = property(_get_rtscts, _set_rtscts)
"""Get or set hardware flow control.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `rtscts` type is not bool.
:type: bool
"""
# String representation
def __str__(self):
return "Serial (device=%s, fd=%d, baudrate=%d, databits=%d, parity=%s, stopbits=%d, xonxoff=%s, rtscts=%s)" % (self.devpath, self.fd, self.baudrate, self.databits, self.parity, self.stopbits, str(self.xonxoff), str(self.rtscts))
|
vsergeev/python-periphery | periphery/serial.py | Serial.output_waiting | python | def output_waiting(self):
# Get input waiting
buf = array.array('I', [0])
try:
fcntl.ioctl(self._fd, termios.TIOCOUTQ, buf, True)
except OSError as e:
raise SerialError(e.errno, "Querying output waiting: " + e.strerror)
return buf[0] | Query the number of bytes waiting to be written to the serial port.
Returns:
int: number of bytes waiting to be written.
Raises:
SerialError: if an I/O or OS error occurs. | train | https://github.com/vsergeev/python-periphery/blob/ff4d535691a1747a76962a3d077d96d224308611/periphery/serial.py#L300-L317 | null | class Serial(object):
_DATABITS_TO_CFLAG = {
5: termios.CS5, 6: termios.CS6, 7: termios.CS7, 8: termios.CS8
}
_CFLAG_TO_DATABITS = {v: k for k, v in _DATABITS_TO_CFLAG.items()}
_BAUDRATE_TO_OSPEED = {
50: termios.B50, 75: termios.B75, 110: termios.B110, 134: termios.B134,
150: termios.B150, 200: termios.B200, 300: termios.B300,
600: termios.B600, 1200: termios.B1200, 1800: termios.B1800,
2400: termios.B2400, 4800: termios.B4800, 9600: termios.B9600,
19200: termios.B19200, 38400: termios.B38400, 57600: termios.B57600,
115200: termios.B115200, 230400: termios.B230400,
# Linux baudrates bits missing in termios module included below
460800: 0x1004, 500000: 0x1005, 576000: 0x1006,
921600: 0x1007, 1000000: 0x1008, 1152000: 0x1009,
1500000: 0x100A, 2000000: 0x100B, 2500000: 0x100C,
3000000: 0x100D, 3500000: 0x100E, 4000000: 0x100F,
}
_OSPEED_TO_BAUDRATE = {v: k for k, v in _BAUDRATE_TO_OSPEED.items()}
def __init__(self, devpath, baudrate, databits=8, parity="none", stopbits=1, xonxoff=False, rtscts=False):
"""Instantiate a Serial object and open the tty device at the specified
path with the specified baudrate, and the defaults of 8 data bits, no
parity, 1 stop bit, no software flow control (xonxoff), and no hardware
flow control (rtscts).
Args:
devpath (str): tty device path.
baudrate (int): baudrate.
databits (int): data bits, can be 5, 6, 7, 8.
parity (str): parity, can be "none", "even", "odd".
stopbits (int): stop bits, can be 1 or 2.
xonxoff (bool): software flow control.
rtscts (bool): hardware flow control.
Returns:
Serial: Serial object.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `devpath`, `baudrate`, `databits`, `parity`, `stopbits`, `xonxoff`, or `rtscts` types are invalid.
ValueError: if `baudrate`, `databits`, `parity`, or `stopbits` values are invalid.
"""
self._fd = None
self._devpath = None
self._open(devpath, baudrate, databits, parity, stopbits, xonxoff, rtscts)
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, t, value, traceback):
self.close()
def _open(self, devpath, baudrate, databits, parity, stopbits, xonxoff, rtscts):
if not isinstance(devpath, str):
raise TypeError("Invalid devpath type, should be string.")
elif not isinstance(baudrate, int):
raise TypeError("Invalid baud rate type, should be integer.")
elif not isinstance(databits, int):
raise TypeError("Invalid data bits type, should be integer.")
elif not isinstance(parity, str):
raise TypeError("Invalid parity type, should be string.")
elif not isinstance(stopbits, int):
raise TypeError("Invalid stop bits type, should be integer.")
elif not isinstance(xonxoff, bool):
raise TypeError("Invalid xonxoff type, should be boolean.")
elif not isinstance(rtscts, bool):
raise TypeError("Invalid rtscts type, should be boolean.")
if baudrate not in Serial._BAUDRATE_TO_OSPEED:
raise ValueError("Unknown baud rate %d." % baudrate)
elif databits not in [5, 6, 7, 8]:
raise ValueError("Invalid data bits, can be 5, 6, 7, 8.")
elif parity.lower() not in ["none", "even", "odd"]:
raise ValueError("Invalid parity, can be: \"none\", \"even\", \"odd\".")
elif stopbits not in [1, 2]:
raise ValueError("Invalid stop bits, can be 1, 2.")
# Open tty
try:
self._fd = os.open(devpath, os.O_RDWR | os.O_NOCTTY)
except OSError as e:
raise SerialError(e.errno, "Opening serial port: " + e.strerror)
self._devpath = devpath
parity = parity.lower()
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = (0, 0, 0, 0, 0, 0, [0] * 32)
###
# iflag
# Ignore break characters
iflag = termios.IGNBRK
# Setup parity
if parity != "none":
iflag |= (termios.INPCK | termios.ISTRIP)
# Setup xonxoff
if xonxoff:
iflag |= (termios.IXON | termios.IXOFF)
#######
# oflag
oflag = 0
#######
# lflag
lflag = 0
#######
# cflag
# Enable receiver, ignore modem control lines
cflag = (termios.CREAD | termios.CLOCAL)
# Setup data bits
cflag |= Serial._DATABITS_TO_CFLAG[databits]
# Setup parity
if parity == "even":
cflag |= termios.PARENB
elif parity == "odd":
cflag |= (termios.PARENB | termios.PARODD)
# Setup stop bits
if stopbits == 2:
cflag |= termios.CSTOPB
# Setup rtscts
if rtscts:
cflag |= termios.CRTSCTS
# Setup baud rate
cflag |= Serial._BAUDRATE_TO_OSPEED[baudrate]
########
# ispeed
ispeed = Serial._BAUDRATE_TO_OSPEED[baudrate]
########
# ospeed
ospeed = Serial._BAUDRATE_TO_OSPEED[baudrate]
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
# Methods
def read(self, length, timeout=None):
"""Read up to `length` number of bytes from the serial port with an
optional timeout.
`timeout` can be positive for a timeout in seconds, 0 for a
non-blocking read, or negative or None for a blocking read that will
block until `length` number of bytes are read. Default is a blocking
read.
For a non-blocking or timeout-bound read, read() may return data whose
length is less than or equal to the requested length.
Args:
length (int): length in bytes.
timeout (int, float, None): timeout duration in seconds.
Returns:
bytes: data read.
Raises:
SerialError: if an I/O or OS error occurs.
"""
data = b""
# Read length bytes if timeout is None
# Read up to length bytes if timeout is not None
while True:
if timeout is not None:
# Select
(rlist, _, _) = select.select([self._fd], [], [], timeout)
# If timeout
if self._fd not in rlist:
break
try:
data += os.read(self._fd, length - len(data))
except OSError as e:
raise SerialError(e.errno, "Reading serial port: " + e.strerror)
if len(data) == length:
break
return data
def write(self, data):
"""Write `data` to the serial port and return the number of bytes
written.
Args:
data (bytes, bytearray, list): a byte array or list of 8-bit integers to write.
Returns:
int: number of bytes written.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `data` type is invalid.
ValueError: if data is not valid bytes.
"""
if not isinstance(data, (bytes, bytearray, list)):
raise TypeError("Invalid data type, should be bytes, bytearray, or list.")
if isinstance(data, list):
data = bytearray(data)
try:
return os.write(self._fd, data)
except OSError as e:
raise SerialError(e.errno, "Writing serial port: " + e.strerror)
def poll(self, timeout=None):
"""Poll for data available for reading from the serial port.
`timeout` can be positive for a timeout in seconds, 0 for a
non-blocking poll, or negative or None for a blocking poll. Default is
a blocking poll.
Args:
timeout (int, float, None): timeout duration in seconds.
Returns:
bool: ``True`` if data is available for reading from the serial port, ``False`` if not.
"""
p = select.poll()
p.register(self._fd, select.POLLIN | select.POLLPRI)
events = p.poll(int(timeout * 1000))
if len(events) > 0:
return True
return False
def flush(self):
"""Flush the write buffer of the serial port, blocking until all bytes
are written.
Raises:
SerialError: if an I/O or OS error occurs.
"""
try:
termios.tcdrain(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Flushing serial port: " + e.strerror)
def input_waiting(self):
"""Query the number of bytes waiting to be read from the serial port.
Returns:
int: number of bytes waiting to be read.
Raises:
SerialError: if an I/O or OS error occurs.
"""
# Get input waiting
buf = array.array('I', [0])
try:
fcntl.ioctl(self._fd, termios.TIOCINQ, buf, True)
except OSError as e:
raise SerialError(e.errno, "Querying input waiting: " + e.strerror)
return buf[0]
def close(self):
"""Close the tty device.
Raises:
SerialError: if an I/O or OS error occurs.
"""
if self._fd is None:
return
try:
os.close(self._fd)
except OSError as e:
raise SerialError(e.errno, "Closing serial port: " + e.strerror)
self._fd = None
# Immutable properties
@property
def fd(self):
"""Get the file descriptor of the underlying tty device.
:type: int
"""
return self._fd
@property
def devpath(self):
"""Get the device path of the underlying tty device.
:type: str
"""
return self._devpath
# Mutable properties
def _get_baudrate(self):
# Get tty attributes
try:
(_, _, _, _, _, ospeed, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if ospeed not in Serial._OSPEED_TO_BAUDRATE:
raise SerialError(None, "Unknown baud rate: ospeed 0x%x." % ospeed)
return Serial._OSPEED_TO_BAUDRATE[ospeed]
def _set_baudrate(self, baudrate):
if not isinstance(baudrate, int):
raise TypeError("Invalid baud rate type, should be integer.")
if baudrate not in Serial._BAUDRATE_TO_OSPEED:
raise ValueError("Unknown baud rate %d." % baudrate)
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
cflag &= ~(termios.CBAUD | termios.CBAUDEX)
cflag |= Serial._BAUDRATE_TO_OSPEED[baudrate]
ispeed = Serial._BAUDRATE_TO_OSPEED[baudrate]
ospeed = Serial._BAUDRATE_TO_OSPEED[baudrate]
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
baudrate = property(_get_baudrate, _set_baudrate)
"""Get or set the baudrate.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `baudrate` type is not int.
ValueError: if `baudrate` value is not supported.
:type: int
"""
def _get_databits(self):
# Get tty attributes
try:
(_, _, cflag, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
cs = cflag & termios.CSIZE
if cs not in Serial._CFLAG_TO_DATABITS:
raise SerialError(None, "Unknown data bits setting: csize 0x%x." % cs)
return Serial._CFLAG_TO_DATABITS[cs]
def _set_databits(self, databits):
if not isinstance(databits, int):
raise TypeError("Invalid data bits type, should be integer.")
elif databits not in [5, 6, 7, 8]:
raise ValueError("Invalid data bits, can be 5, 6, 7, 8.")
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
cflag &= ~termios.CSIZE
cflag |= Serial._DATABITS_TO_CFLAG[databits]
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
databits = property(_get_databits, _set_databits)
"""Get or set the data bits. Can be 5, 6, 7, 8.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `databits` type is not int.
ValueError: if `databits` value is invalid.
:type: int
"""
def _get_parity(self):
# Get tty attributes
try:
(_, _, cflag, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if (cflag & termios.PARENB) == 0:
return "none"
elif (cflag & termios.PARODD) == 0:
return "even"
else:
return "odd"
def _set_parity(self, parity):
if not isinstance(parity, str):
raise TypeError("Invalid parity type, should be string.")
elif parity.lower() not in ["none", "even", "odd"]:
raise ValueError("Invalid parity, can be: \"none\", \"even\", \"odd\".")
parity = parity.lower()
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
iflag &= ~(termios.INPCK | termios.ISTRIP)
cflag &= ~(termios.PARENB | termios.PARODD)
if parity != "none":
iflag |= (termios.INPCK | termios.ISTRIP)
cflag |= termios.PARENB
if parity == "odd":
cflag |= termios.PARODD
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
parity = property(_get_parity, _set_parity)
"""Get or set the parity. Can be "none", "even", "odd".
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `parity` type is not str.
ValueError: if `parity` value is invalid.
:type: str
"""
def _get_stopbits(self):
# Get tty attributes
try:
(_, _, cflag, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if (cflag & termios.CSTOPB) != 0:
return 2
else:
return 1
def _set_stopbits(self, stopbits):
if not isinstance(stopbits, int):
raise TypeError("Invalid stop bits type, should be integer.")
elif stopbits not in [1, 2]:
raise ValueError("Invalid stop bits, can be 1, 2.")
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
cflag &= ~termios.CSTOPB
if stopbits == 2:
cflag |= termios.CSTOPB
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
stopbits = property(_get_stopbits, _set_stopbits)
"""Get or set the stop bits. Can be 1 or 2.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `stopbits` type is not int.
ValueError: if `stopbits` value is invalid.
:type: int
"""
def _get_xonxoff(self):
# Get tty attributes
try:
(iflag, _, _, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if (iflag & (termios.IXON | termios.IXOFF)) != 0:
return True
else:
return False
def _set_xonxoff(self, enabled):
if not isinstance(enabled, bool):
raise TypeError("Invalid enabled type, should be boolean.")
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
iflag &= ~(termios.IXON | termios.IXOFF | termios.IXANY)
if enabled:
iflag |= (termios.IXON | termios.IXOFF)
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
xonxoff = property(_get_xonxoff, _set_xonxoff)
"""Get or set software flow control.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `xonxoff` type is not bool.
:type: bool
"""
def _get_rtscts(self):
# Get tty attributes
try:
(_, _, cflag, _, _, _, _) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
if (cflag & termios.CRTSCTS) != 0:
return True
else:
return False
def _set_rtscts(self, enabled):
if not isinstance(enabled, bool):
raise TypeError("Invalid enabled type, should be boolean.")
# Get tty attributes
try:
(iflag, oflag, cflag, lflag, ispeed, ospeed, cc) = termios.tcgetattr(self._fd)
except termios.error as e:
raise SerialError(e.errno, "Getting serial port attributes: " + e.strerror)
# Modify tty attributes
cflag = ~termios.CRTSCTS
if enabled:
cflag |= termios.CRTSCTS
# Set tty attributes
try:
termios.tcsetattr(self._fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
except termios.error as e:
raise SerialError(e.errno, "Setting serial port attributes: " + e.strerror)
rtscts = property(_get_rtscts, _set_rtscts)
"""Get or set hardware flow control.
Raises:
SerialError: if an I/O or OS error occurs.
TypeError: if `rtscts` type is not bool.
:type: bool
"""
# String representation
def __str__(self):
return "Serial (device=%s, fd=%d, baudrate=%d, databits=%d, parity=%s, stopbits=%d, xonxoff=%s, rtscts=%s)" % (self.devpath, self.fd, self.baudrate, self.databits, self.parity, self.stopbits, str(self.xonxoff), str(self.rtscts))
|
vsergeev/python-periphery | periphery/gpio.py | GPIO.read | python | def read(self):
# Read value
try:
buf = os.read(self._fd, 2)
except OSError as e:
raise GPIOError(e.errno, "Reading GPIO: " + e.strerror)
# Rewind
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise GPIOError(e.errno, "Rewinding GPIO: " + e.strerror)
if buf[0] == b"0"[0]:
return False
elif buf[0] == b"1"[0]:
return True
raise GPIOError(None, "Unknown GPIO value: \"%s\"" % buf[0]) | Read the state of the GPIO.
Returns:
bool: ``True`` for high state, ``False`` for low state.
Raises:
GPIOError: if an I/O or OS error occurs. | train | https://github.com/vsergeev/python-periphery/blob/ff4d535691a1747a76962a3d077d96d224308611/periphery/gpio.py#L103-L130 | null | class GPIO(object):
# Number of retries to check for successful GPIO export
GPIO_EXPORT_STAT_RETRIES = 10
# Delay between check for GPIO export (100ms)
GPIO_EXPORT_STAT_DELAY = 0.1
def __init__(self, pin, direction="preserve"):
"""Instantiate a GPIO object and open the sysfs GPIO corresponding to
the specified pin, with the specified direction.
`direction` can be "in" for input; "out" for output, initialized to
low; "high" for output, initialized to high; "low" for output,
initialized to low, or "preserve" for preserving existing direction.
Default is "preserve".
Args:
pin (int): Linux pin number.
direction (str): pin direction, can be "in", "out", "high", "low",
or "preserve".
Returns:
GPIO: GPIO object.
Raises:
GPIOError: if an I/O or OS error occurs.
TypeError: if `pin` or `direction` types are invalid.
ValueError: if `direction` value is invalid.
TimeoutError: if waiting for GPIO export times out.
"""
self._fd = None
self._pin = None
self._open(pin, direction)
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, t, value, traceback):
self.close()
def _open(self, pin, direction):
if not isinstance(pin, int):
raise TypeError("Invalid pin type, should be integer.")
if not isinstance(direction, str):
raise TypeError("Invalid direction type, should be string.")
if direction.lower() not in ["in", "out", "high", "low", "preserve"]:
raise ValueError("Invalid direction, can be: \"in\", \"out\", \"high\", \"low\", \"preserve\".")
gpio_path = "/sys/class/gpio/gpio%d" % pin
if not os.path.isdir(gpio_path):
# Export the pin
try:
with open("/sys/class/gpio/export", "w") as f_export:
f_export.write("%d\n" % pin)
except IOError as e:
raise GPIOError(e.errno, "Exporting GPIO: " + e.strerror)
# Loop until GPIO is exported
exported = False
for i in range(GPIO.GPIO_EXPORT_STAT_RETRIES):
if os.path.isdir(gpio_path):
exported = True
break
time.sleep(GPIO.GPIO_EXPORT_STAT_DELAY)
if not exported:
raise TimeoutError("Exporting GPIO: waiting for '%s' timed out" % gpio_path)
# Write direction, if it's not to be preserved
direction = direction.lower()
if direction != "preserve":
try:
with open("/sys/class/gpio/gpio%d/direction" % pin, "w") as f_direction:
f_direction.write(direction + "\n")
except IOError as e:
raise GPIOError(e.errno, "Setting GPIO direction: " + e.strerror)
# Open value
try:
self._fd = os.open("/sys/class/gpio/gpio%d/value" % pin, os.O_RDWR)
except OSError as e:
raise GPIOError(e.errno, "Opening GPIO: " + e.strerror)
self._pin = pin
# Methods
def write(self, value):
"""Set the state of the GPIO to `value`.
Args:
value (bool): ``True`` for high state, ``False`` for low state.
Raises:
GPIOError: if an I/O or OS error occurs.
TypeError: if `value` type is not bool.
"""
if not isinstance(value, bool):
raise TypeError("Invalid value type, should be bool.")
# Write value
try:
if value:
os.write(self._fd, b"1\n")
else:
os.write(self._fd, b"0\n")
except OSError as e:
raise GPIOError(e.errno, "Writing GPIO: " + e.strerror)
# Rewind
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise GPIOError(e.errno, "Rewinding GPIO: " + e.strerror)
def poll(self, timeout=None):
"""Poll a GPIO for the edge event configured with the .edge property.
`timeout` can be a positive number for a timeout in seconds, 0 for a
non-blocking poll, or negative or None for a blocking poll. Defaults to
blocking poll.
Args:
timeout (int, float, None): timeout duration in seconds.
Returns:
bool: ``True`` if an edge event occurred, ``False`` on timeout.
Raises:
GPIOError: if an I/O or OS error occurs.
TypeError: if `timeout` type is not None or int.
"""
if not isinstance(timeout, (int, float, type(None))):
raise TypeError("Invalid timeout type, should be integer, float, or None.")
# Setup epoll
p = select.epoll()
p.register(self._fd, select.EPOLLIN | select.EPOLLET | select.EPOLLPRI)
# Poll twice, as first call returns with current state
for _ in range(2):
events = p.poll(timeout)
# If GPIO edge interrupt occurred
if events:
# Rewind
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise GPIOError(e.errno, "Rewinding GPIO: " + e.strerror)
return True
return False
def close(self):
"""Close the sysfs GPIO.
Raises:
GPIOError: if an I/O or OS error occurs.
"""
if self._fd is None:
return
try:
os.close(self._fd)
except OSError as e:
raise GPIOError(e.errno, "Closing GPIO: " + e.strerror)
self._fd = None
# Immutable properties
@property
def fd(self):
"""Get the file descriptor for the underlying sysfs GPIO "value" file
of the GPIO object.
:type: int
"""
return self._fd
@property
def pin(self):
"""Get the sysfs GPIO pin number.
:type: int
"""
return self._pin
@property
def supports_interrupts(self):
"""Get whether or not this GPIO supports edge interrupts, configurable
with the .edge property.
:type: bool
"""
return os.path.isfile("/sys/class/gpio/gpio%d/edge" % self._pin)
# Mutable properties
def _get_direction(self):
# Read direction
try:
with open("/sys/class/gpio/gpio%d/direction" % self._pin, "r") as f_direction:
direction = f_direction.read()
except IOError as e:
raise GPIOError(e.errno, "Getting GPIO direction: " + e.strerror)
return direction.strip()
def _set_direction(self, direction):
if not isinstance(direction, str):
raise TypeError("Invalid direction type, should be string.")
if direction.lower() not in ["in", "out", "high", "low"]:
raise ValueError("Invalid direction, can be: \"in\", \"out\", \"high\", \"low\".")
# Write direction
try:
direction = direction.lower()
with open("/sys/class/gpio/gpio%d/direction" % self._pin, "w") as f_direction:
f_direction.write(direction + "\n")
except IOError as e:
raise GPIOError(e.errno, "Setting GPIO direction: " + e.strerror)
direction = property(_get_direction, _set_direction)
"""Get or set the GPIO's direction. Can be "in", "out", "high", "low".
Direction "in" is input; "out" is output, initialized to low; "high" is
output, initialized to high; and "low" is output, initialized to low.
Raises:
GPIOError: if an I/O or OS error occurs.
TypeError: if `direction` type is not str.
ValueError: if `direction` value is invalid.
:type: str
"""
def _get_edge(self):
# Read edge
try:
with open("/sys/class/gpio/gpio%d/edge" % self._pin, "r") as f_edge:
edge = f_edge.read()
except IOError as e:
raise GPIOError(e.errno, "Getting GPIO edge: " + e.strerror)
return edge.strip()
def _set_edge(self, edge):
if not isinstance(edge, str):
raise TypeError("Invalid edge type, should be string.")
if edge.lower() not in ["none", "rising", "falling", "both"]:
raise ValueError("Invalid edge, can be: \"none\", \"rising\", \"falling\", \"both\".")
# Write edge
try:
edge = edge.lower()
with open("/sys/class/gpio/gpio%d/edge" % self._pin, "w") as f_edge:
f_edge.write(edge + "\n")
except IOError as e:
raise GPIOError(e.errno, "Setting GPIO edge: " + e.strerror)
edge = property(_get_edge, _set_edge)
"""Get or set the GPIO's interrupt edge. Can be "none", "rising", "falling", "both".
Raises:
GPIOError: if an I/O or OS error occurs.
TypeError: if `edge` type is not str.
ValueError: if `edge` value is invalid.
:type: str
"""
# String representation
def __str__(self):
if self.supports_interrupts:
return "GPIO %d (fd=%d, direction=%s, supports interrupts, edge=%s)" % (self._pin, self._fd, self.direction, self.edge)
return "GPIO %d (fd=%d, direction=%s, no interrupts)" % (self._pin, self._fd, self.direction)
|
vsergeev/python-periphery | periphery/gpio.py | GPIO.write | python | def write(self, value):
if not isinstance(value, bool):
raise TypeError("Invalid value type, should be bool.")
# Write value
try:
if value:
os.write(self._fd, b"1\n")
else:
os.write(self._fd, b"0\n")
except OSError as e:
raise GPIOError(e.errno, "Writing GPIO: " + e.strerror)
# Rewind
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise GPIOError(e.errno, "Rewinding GPIO: " + e.strerror) | Set the state of the GPIO to `value`.
Args:
value (bool): ``True`` for high state, ``False`` for low state.
Raises:
GPIOError: if an I/O or OS error occurs.
TypeError: if `value` type is not bool. | train | https://github.com/vsergeev/python-periphery/blob/ff4d535691a1747a76962a3d077d96d224308611/periphery/gpio.py#L132-L159 | null | class GPIO(object):
# Number of retries to check for successful GPIO export
GPIO_EXPORT_STAT_RETRIES = 10
# Delay between check for GPIO export (100ms)
GPIO_EXPORT_STAT_DELAY = 0.1
def __init__(self, pin, direction="preserve"):
"""Instantiate a GPIO object and open the sysfs GPIO corresponding to
the specified pin, with the specified direction.
`direction` can be "in" for input; "out" for output, initialized to
low; "high" for output, initialized to high; "low" for output,
initialized to low, or "preserve" for preserving existing direction.
Default is "preserve".
Args:
pin (int): Linux pin number.
direction (str): pin direction, can be "in", "out", "high", "low",
or "preserve".
Returns:
GPIO: GPIO object.
Raises:
GPIOError: if an I/O or OS error occurs.
TypeError: if `pin` or `direction` types are invalid.
ValueError: if `direction` value is invalid.
TimeoutError: if waiting for GPIO export times out.
"""
self._fd = None
self._pin = None
self._open(pin, direction)
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, t, value, traceback):
self.close()
def _open(self, pin, direction):
if not isinstance(pin, int):
raise TypeError("Invalid pin type, should be integer.")
if not isinstance(direction, str):
raise TypeError("Invalid direction type, should be string.")
if direction.lower() not in ["in", "out", "high", "low", "preserve"]:
raise ValueError("Invalid direction, can be: \"in\", \"out\", \"high\", \"low\", \"preserve\".")
gpio_path = "/sys/class/gpio/gpio%d" % pin
if not os.path.isdir(gpio_path):
# Export the pin
try:
with open("/sys/class/gpio/export", "w") as f_export:
f_export.write("%d\n" % pin)
except IOError as e:
raise GPIOError(e.errno, "Exporting GPIO: " + e.strerror)
# Loop until GPIO is exported
exported = False
for i in range(GPIO.GPIO_EXPORT_STAT_RETRIES):
if os.path.isdir(gpio_path):
exported = True
break
time.sleep(GPIO.GPIO_EXPORT_STAT_DELAY)
if not exported:
raise TimeoutError("Exporting GPIO: waiting for '%s' timed out" % gpio_path)
# Write direction, if it's not to be preserved
direction = direction.lower()
if direction != "preserve":
try:
with open("/sys/class/gpio/gpio%d/direction" % pin, "w") as f_direction:
f_direction.write(direction + "\n")
except IOError as e:
raise GPIOError(e.errno, "Setting GPIO direction: " + e.strerror)
# Open value
try:
self._fd = os.open("/sys/class/gpio/gpio%d/value" % pin, os.O_RDWR)
except OSError as e:
raise GPIOError(e.errno, "Opening GPIO: " + e.strerror)
self._pin = pin
# Methods
def read(self):
"""Read the state of the GPIO.
Returns:
bool: ``True`` for high state, ``False`` for low state.
Raises:
GPIOError: if an I/O or OS error occurs.
"""
# Read value
try:
buf = os.read(self._fd, 2)
except OSError as e:
raise GPIOError(e.errno, "Reading GPIO: " + e.strerror)
# Rewind
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise GPIOError(e.errno, "Rewinding GPIO: " + e.strerror)
if buf[0] == b"0"[0]:
return False
elif buf[0] == b"1"[0]:
return True
raise GPIOError(None, "Unknown GPIO value: \"%s\"" % buf[0])
def poll(self, timeout=None):
"""Poll a GPIO for the edge event configured with the .edge property.
`timeout` can be a positive number for a timeout in seconds, 0 for a
non-blocking poll, or negative or None for a blocking poll. Defaults to
blocking poll.
Args:
timeout (int, float, None): timeout duration in seconds.
Returns:
bool: ``True`` if an edge event occurred, ``False`` on timeout.
Raises:
GPIOError: if an I/O or OS error occurs.
TypeError: if `timeout` type is not None or int.
"""
if not isinstance(timeout, (int, float, type(None))):
raise TypeError("Invalid timeout type, should be integer, float, or None.")
# Setup epoll
p = select.epoll()
p.register(self._fd, select.EPOLLIN | select.EPOLLET | select.EPOLLPRI)
# Poll twice, as first call returns with current state
for _ in range(2):
events = p.poll(timeout)
# If GPIO edge interrupt occurred
if events:
# Rewind
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise GPIOError(e.errno, "Rewinding GPIO: " + e.strerror)
return True
return False
def close(self):
"""Close the sysfs GPIO.
Raises:
GPIOError: if an I/O or OS error occurs.
"""
if self._fd is None:
return
try:
os.close(self._fd)
except OSError as e:
raise GPIOError(e.errno, "Closing GPIO: " + e.strerror)
self._fd = None
# Immutable properties
@property
def fd(self):
"""Get the file descriptor for the underlying sysfs GPIO "value" file
of the GPIO object.
:type: int
"""
return self._fd
@property
def pin(self):
"""Get the sysfs GPIO pin number.
:type: int
"""
return self._pin
@property
def supports_interrupts(self):
"""Get whether or not this GPIO supports edge interrupts, configurable
with the .edge property.
:type: bool
"""
return os.path.isfile("/sys/class/gpio/gpio%d/edge" % self._pin)
# Mutable properties
def _get_direction(self):
# Read direction
try:
with open("/sys/class/gpio/gpio%d/direction" % self._pin, "r") as f_direction:
direction = f_direction.read()
except IOError as e:
raise GPIOError(e.errno, "Getting GPIO direction: " + e.strerror)
return direction.strip()
def _set_direction(self, direction):
if not isinstance(direction, str):
raise TypeError("Invalid direction type, should be string.")
if direction.lower() not in ["in", "out", "high", "low"]:
raise ValueError("Invalid direction, can be: \"in\", \"out\", \"high\", \"low\".")
# Write direction
try:
direction = direction.lower()
with open("/sys/class/gpio/gpio%d/direction" % self._pin, "w") as f_direction:
f_direction.write(direction + "\n")
except IOError as e:
raise GPIOError(e.errno, "Setting GPIO direction: " + e.strerror)
direction = property(_get_direction, _set_direction)
"""Get or set the GPIO's direction. Can be "in", "out", "high", "low".
Direction "in" is input; "out" is output, initialized to low; "high" is
output, initialized to high; and "low" is output, initialized to low.
Raises:
GPIOError: if an I/O or OS error occurs.
TypeError: if `direction` type is not str.
ValueError: if `direction` value is invalid.
:type: str
"""
def _get_edge(self):
# Read edge
try:
with open("/sys/class/gpio/gpio%d/edge" % self._pin, "r") as f_edge:
edge = f_edge.read()
except IOError as e:
raise GPIOError(e.errno, "Getting GPIO edge: " + e.strerror)
return edge.strip()
def _set_edge(self, edge):
if not isinstance(edge, str):
raise TypeError("Invalid edge type, should be string.")
if edge.lower() not in ["none", "rising", "falling", "both"]:
raise ValueError("Invalid edge, can be: \"none\", \"rising\", \"falling\", \"both\".")
# Write edge
try:
edge = edge.lower()
with open("/sys/class/gpio/gpio%d/edge" % self._pin, "w") as f_edge:
f_edge.write(edge + "\n")
except IOError as e:
raise GPIOError(e.errno, "Setting GPIO edge: " + e.strerror)
edge = property(_get_edge, _set_edge)
"""Get or set the GPIO's interrupt edge. Can be "none", "rising", "falling", "both".
Raises:
GPIOError: if an I/O or OS error occurs.
TypeError: if `edge` type is not str.
ValueError: if `edge` value is invalid.
:type: str
"""
# String representation
def __str__(self):
if self.supports_interrupts:
return "GPIO %d (fd=%d, direction=%s, supports interrupts, edge=%s)" % (self._pin, self._fd, self.direction, self.edge)
return "GPIO %d (fd=%d, direction=%s, no interrupts)" % (self._pin, self._fd, self.direction)
|
vsergeev/python-periphery | periphery/gpio.py | GPIO.poll | python | def poll(self, timeout=None):
if not isinstance(timeout, (int, float, type(None))):
raise TypeError("Invalid timeout type, should be integer, float, or None.")
# Setup epoll
p = select.epoll()
p.register(self._fd, select.EPOLLIN | select.EPOLLET | select.EPOLLPRI)
# Poll twice, as first call returns with current state
for _ in range(2):
events = p.poll(timeout)
# If GPIO edge interrupt occurred
if events:
# Rewind
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise GPIOError(e.errno, "Rewinding GPIO: " + e.strerror)
return True
return False | Poll a GPIO for the edge event configured with the .edge property.
`timeout` can be a positive number for a timeout in seconds, 0 for a
non-blocking poll, or negative or None for a blocking poll. Defaults to
blocking poll.
Args:
timeout (int, float, None): timeout duration in seconds.
Returns:
bool: ``True`` if an edge event occurred, ``False`` on timeout.
Raises:
GPIOError: if an I/O or OS error occurs.
TypeError: if `timeout` type is not None or int. | train | https://github.com/vsergeev/python-periphery/blob/ff4d535691a1747a76962a3d077d96d224308611/periphery/gpio.py#L161-L200 | null | class GPIO(object):
# Number of retries to check for successful GPIO export
GPIO_EXPORT_STAT_RETRIES = 10
# Delay between check for GPIO export (100ms)
GPIO_EXPORT_STAT_DELAY = 0.1
def __init__(self, pin, direction="preserve"):
"""Instantiate a GPIO object and open the sysfs GPIO corresponding to
the specified pin, with the specified direction.
`direction` can be "in" for input; "out" for output, initialized to
low; "high" for output, initialized to high; "low" for output,
initialized to low, or "preserve" for preserving existing direction.
Default is "preserve".
Args:
pin (int): Linux pin number.
direction (str): pin direction, can be "in", "out", "high", "low",
or "preserve".
Returns:
GPIO: GPIO object.
Raises:
GPIOError: if an I/O or OS error occurs.
TypeError: if `pin` or `direction` types are invalid.
ValueError: if `direction` value is invalid.
TimeoutError: if waiting for GPIO export times out.
"""
self._fd = None
self._pin = None
self._open(pin, direction)
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, t, value, traceback):
self.close()
def _open(self, pin, direction):
if not isinstance(pin, int):
raise TypeError("Invalid pin type, should be integer.")
if not isinstance(direction, str):
raise TypeError("Invalid direction type, should be string.")
if direction.lower() not in ["in", "out", "high", "low", "preserve"]:
raise ValueError("Invalid direction, can be: \"in\", \"out\", \"high\", \"low\", \"preserve\".")
gpio_path = "/sys/class/gpio/gpio%d" % pin
if not os.path.isdir(gpio_path):
# Export the pin
try:
with open("/sys/class/gpio/export", "w") as f_export:
f_export.write("%d\n" % pin)
except IOError as e:
raise GPIOError(e.errno, "Exporting GPIO: " + e.strerror)
# Loop until GPIO is exported
exported = False
for i in range(GPIO.GPIO_EXPORT_STAT_RETRIES):
if os.path.isdir(gpio_path):
exported = True
break
time.sleep(GPIO.GPIO_EXPORT_STAT_DELAY)
if not exported:
raise TimeoutError("Exporting GPIO: waiting for '%s' timed out" % gpio_path)
# Write direction, if it's not to be preserved
direction = direction.lower()
if direction != "preserve":
try:
with open("/sys/class/gpio/gpio%d/direction" % pin, "w") as f_direction:
f_direction.write(direction + "\n")
except IOError as e:
raise GPIOError(e.errno, "Setting GPIO direction: " + e.strerror)
# Open value
try:
self._fd = os.open("/sys/class/gpio/gpio%d/value" % pin, os.O_RDWR)
except OSError as e:
raise GPIOError(e.errno, "Opening GPIO: " + e.strerror)
self._pin = pin
# Methods
def read(self):
"""Read the state of the GPIO.
Returns:
bool: ``True`` for high state, ``False`` for low state.
Raises:
GPIOError: if an I/O or OS error occurs.
"""
# Read value
try:
buf = os.read(self._fd, 2)
except OSError as e:
raise GPIOError(e.errno, "Reading GPIO: " + e.strerror)
# Rewind
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise GPIOError(e.errno, "Rewinding GPIO: " + e.strerror)
if buf[0] == b"0"[0]:
return False
elif buf[0] == b"1"[0]:
return True
raise GPIOError(None, "Unknown GPIO value: \"%s\"" % buf[0])
def write(self, value):
"""Set the state of the GPIO to `value`.
Args:
value (bool): ``True`` for high state, ``False`` for low state.
Raises:
GPIOError: if an I/O or OS error occurs.
TypeError: if `value` type is not bool.
"""
if not isinstance(value, bool):
raise TypeError("Invalid value type, should be bool.")
# Write value
try:
if value:
os.write(self._fd, b"1\n")
else:
os.write(self._fd, b"0\n")
except OSError as e:
raise GPIOError(e.errno, "Writing GPIO: " + e.strerror)
# Rewind
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise GPIOError(e.errno, "Rewinding GPIO: " + e.strerror)
def close(self):
"""Close the sysfs GPIO.
Raises:
GPIOError: if an I/O or OS error occurs.
"""
if self._fd is None:
return
try:
os.close(self._fd)
except OSError as e:
raise GPIOError(e.errno, "Closing GPIO: " + e.strerror)
self._fd = None
# Immutable properties
@property
def fd(self):
"""Get the file descriptor for the underlying sysfs GPIO "value" file
of the GPIO object.
:type: int
"""
return self._fd
@property
def pin(self):
"""Get the sysfs GPIO pin number.
:type: int
"""
return self._pin
@property
def supports_interrupts(self):
"""Get whether or not this GPIO supports edge interrupts, configurable
with the .edge property.
:type: bool
"""
return os.path.isfile("/sys/class/gpio/gpio%d/edge" % self._pin)
# Mutable properties
def _get_direction(self):
# Read direction
try:
with open("/sys/class/gpio/gpio%d/direction" % self._pin, "r") as f_direction:
direction = f_direction.read()
except IOError as e:
raise GPIOError(e.errno, "Getting GPIO direction: " + e.strerror)
return direction.strip()
def _set_direction(self, direction):
if not isinstance(direction, str):
raise TypeError("Invalid direction type, should be string.")
if direction.lower() not in ["in", "out", "high", "low"]:
raise ValueError("Invalid direction, can be: \"in\", \"out\", \"high\", \"low\".")
# Write direction
try:
direction = direction.lower()
with open("/sys/class/gpio/gpio%d/direction" % self._pin, "w") as f_direction:
f_direction.write(direction + "\n")
except IOError as e:
raise GPIOError(e.errno, "Setting GPIO direction: " + e.strerror)
direction = property(_get_direction, _set_direction)
"""Get or set the GPIO's direction. Can be "in", "out", "high", "low".
Direction "in" is input; "out" is output, initialized to low; "high" is
output, initialized to high; and "low" is output, initialized to low.
Raises:
GPIOError: if an I/O or OS error occurs.
TypeError: if `direction` type is not str.
ValueError: if `direction` value is invalid.
:type: str
"""
def _get_edge(self):
# Read edge
try:
with open("/sys/class/gpio/gpio%d/edge" % self._pin, "r") as f_edge:
edge = f_edge.read()
except IOError as e:
raise GPIOError(e.errno, "Getting GPIO edge: " + e.strerror)
return edge.strip()
def _set_edge(self, edge):
if not isinstance(edge, str):
raise TypeError("Invalid edge type, should be string.")
if edge.lower() not in ["none", "rising", "falling", "both"]:
raise ValueError("Invalid edge, can be: \"none\", \"rising\", \"falling\", \"both\".")
# Write edge
try:
edge = edge.lower()
with open("/sys/class/gpio/gpio%d/edge" % self._pin, "w") as f_edge:
f_edge.write(edge + "\n")
except IOError as e:
raise GPIOError(e.errno, "Setting GPIO edge: " + e.strerror)
edge = property(_get_edge, _set_edge)
"""Get or set the GPIO's interrupt edge. Can be "none", "rising", "falling", "both".
Raises:
GPIOError: if an I/O or OS error occurs.
TypeError: if `edge` type is not str.
ValueError: if `edge` value is invalid.
:type: str
"""
# String representation
def __str__(self):
if self.supports_interrupts:
return "GPIO %d (fd=%d, direction=%s, supports interrupts, edge=%s)" % (self._pin, self._fd, self.direction, self.edge)
return "GPIO %d (fd=%d, direction=%s, no interrupts)" % (self._pin, self._fd, self.direction)
|
vsergeev/python-periphery | periphery/mmio.py | MMIO.read32 | python | def read32(self, offset):
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 4)
return struct.unpack("=L", self.mapping[offset:offset + 4])[0] | Read 32-bits from the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
Returns:
int: 32-bit value read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds. | train | https://github.com/vsergeev/python-periphery/blob/ff4d535691a1747a76962a3d077d96d224308611/periphery/mmio.py#L84-L104 | [
"def _adjust_offset(self, offset):\n return offset + (self._physaddr - self._aligned_physaddr)\n",
"def _validate_offset(self, offset, length):\n if (offset + length) > self._aligned_size:\n raise ValueError(\"Offset out of bounds.\")\n"
] | class MMIO(object):
def __init__(self, physaddr, size):
"""Instantiate an MMIO object and map the region of physical memory
specified by the address base `physaddr` and size `size` in bytes.
Args:
physaddr (int, long): base physical address of memory region.
size (int, long): size of memory region.
Returns:
MMIO: MMIO object.
Raises:
MMIOError: if an I/O or OS error occurs.
TypeError: if `physaddr` or `size` types are invalid.
"""
self.mapping = None
self._open(physaddr, size)
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, t, value, traceback):
self.close()
def _open(self, physaddr, size):
if not isinstance(physaddr, (int, long)):
raise TypeError("Invalid physaddr type, should be integer.")
if not isinstance(size, (int, long)):
raise TypeError("Invalid size type, should be integer.")
pagesize = os.sysconf(os.sysconf_names['SC_PAGESIZE'])
self._physaddr = physaddr
self._size = size
self._aligned_physaddr = physaddr - (physaddr % pagesize)
self._aligned_size = size + (physaddr - self._aligned_physaddr)
try:
fd = os.open("/dev/mem", os.O_RDWR | os.O_SYNC)
except OSError as e:
raise MMIOError(e.errno, "Opening /dev/mem: " + e.strerror)
try:
self.mapping = mmap.mmap(fd, self._aligned_size, flags=mmap.MAP_SHARED, prot=(mmap.PROT_READ | mmap.PROT_WRITE), offset=self._aligned_physaddr)
except OSError as e:
raise MMIOError(e.errno, "Mapping /dev/mem: " + e.strerror)
try:
os.close(fd)
except OSError as e:
raise MMIOError(e.errno, "Closing /dev/mem: " + e.strerror)
# Methods
def _adjust_offset(self, offset):
return offset + (self._physaddr - self._aligned_physaddr)
def _validate_offset(self, offset, length):
if (offset + length) > self._aligned_size:
raise ValueError("Offset out of bounds.")
def read16(self, offset):
"""Read 16-bits from the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
Returns:
int: 16-bit value read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 2)
return struct.unpack("=H", self.mapping[offset:offset + 2])[0]
def read8(self, offset):
"""Read 8-bits from the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
Returns:
int: 8-bit value read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 1)
return struct.unpack("B", self.mapping[offset:offset + 1])[0]
def read(self, offset, length):
"""Read a string of bytes from the specified `offset` in bytes,
relative to the base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
length (int): number of bytes to read.
Returns:
bytes: bytes read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, length)
return bytes(self.mapping[offset:offset + length])
def write32(self, offset, value):
"""Write 32-bits to the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
value (int, long): 32-bit value to write.
Raises:
TypeError: if `offset` or `value` type are invalid.
ValueError: if `offset` or `value` are out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
if not isinstance(value, (int, long)):
raise TypeError("Invalid value type, should be integer.")
if value < 0 or value > 0xffffffff:
raise ValueError("Value out of bounds.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 4)
self.mapping[offset:offset + 4] = struct.pack("=L", value)
def write16(self, offset, value):
"""Write 16-bits to the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
value (int, long): 16-bit value to write.
Raises:
TypeError: if `offset` or `value` type are invalid.
ValueError: if `offset` or `value` are out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
if not isinstance(value, (int, long)):
raise TypeError("Invalid value type, should be integer.")
if value < 0 or value > 0xffff:
raise ValueError("Value out of bounds.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 2)
self.mapping[offset:offset + 2] = struct.pack("=H", value)
def write8(self, offset, value):
"""Write 8-bits to the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
value (int, long): 8-bit value to write.
Raises:
TypeError: if `offset` or `value` type are invalid.
ValueError: if `offset` or `value` are out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
if not isinstance(value, (int, long)):
raise TypeError("Invalid value type, should be integer.")
if value < 0 or value > 0xff:
raise ValueError("Value out of bounds.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 1)
self.mapping[offset:offset + 1] = struct.pack("B", value)
def write(self, offset, data):
"""Write a string of bytes to the specified `offset` in bytes, relative
to the base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
data (bytes, bytearray, list): a byte array or list of 8-bit
integers to write.
Raises:
TypeError: if `offset` or `data` type are invalid.
ValueError: if `offset` is out of bounds, or if data is not valid bytes.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
if not isinstance(data, (bytes, bytearray, list)):
raise TypeError("Invalid data type, expected bytes, bytearray, or list.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, len(data))
data = bytes(bytearray(data))
self.mapping[offset:offset + len(data)] = data
def close(self):
"""Unmap the MMIO object's mapped physical memory."""
if self.mapping is None:
return
self.mapping.close()
self.mapping = None
self._fd = None
# Immutable properties
@property
def base(self):
"""Get the base physical address of the MMIO region.
:type: int
"""
return self._physaddr
@property
def size(self):
"""Get the mapping size of the MMIO region.
:type: int
"""
return self._size
@property
def pointer(self):
"""Get a ctypes void pointer to the memory mapped region.
:type: ctypes.c_void_p
"""
return ctypes.cast(ctypes.pointer(ctypes.c_uint8.from_buffer(self.mapping, 0)), ctypes.c_void_p)
# String representation
def __str__(self):
return "MMIO 0x%08x (size=%d)" % (self.base, self.size)
|
vsergeev/python-periphery | periphery/mmio.py | MMIO.read | python | def read(self, offset, length):
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, length)
return bytes(self.mapping[offset:offset + length]) | Read a string of bytes from the specified `offset` in bytes,
relative to the base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
length (int): number of bytes to read.
Returns:
bytes: bytes read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds. | train | https://github.com/vsergeev/python-periphery/blob/ff4d535691a1747a76962a3d077d96d224308611/periphery/mmio.py#L150-L171 | [
"def _adjust_offset(self, offset):\n return offset + (self._physaddr - self._aligned_physaddr)\n",
"def _validate_offset(self, offset, length):\n if (offset + length) > self._aligned_size:\n raise ValueError(\"Offset out of bounds.\")\n"
] | class MMIO(object):
def __init__(self, physaddr, size):
"""Instantiate an MMIO object and map the region of physical memory
specified by the address base `physaddr` and size `size` in bytes.
Args:
physaddr (int, long): base physical address of memory region.
size (int, long): size of memory region.
Returns:
MMIO: MMIO object.
Raises:
MMIOError: if an I/O or OS error occurs.
TypeError: if `physaddr` or `size` types are invalid.
"""
self.mapping = None
self._open(physaddr, size)
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, t, value, traceback):
self.close()
def _open(self, physaddr, size):
if not isinstance(physaddr, (int, long)):
raise TypeError("Invalid physaddr type, should be integer.")
if not isinstance(size, (int, long)):
raise TypeError("Invalid size type, should be integer.")
pagesize = os.sysconf(os.sysconf_names['SC_PAGESIZE'])
self._physaddr = physaddr
self._size = size
self._aligned_physaddr = physaddr - (physaddr % pagesize)
self._aligned_size = size + (physaddr - self._aligned_physaddr)
try:
fd = os.open("/dev/mem", os.O_RDWR | os.O_SYNC)
except OSError as e:
raise MMIOError(e.errno, "Opening /dev/mem: " + e.strerror)
try:
self.mapping = mmap.mmap(fd, self._aligned_size, flags=mmap.MAP_SHARED, prot=(mmap.PROT_READ | mmap.PROT_WRITE), offset=self._aligned_physaddr)
except OSError as e:
raise MMIOError(e.errno, "Mapping /dev/mem: " + e.strerror)
try:
os.close(fd)
except OSError as e:
raise MMIOError(e.errno, "Closing /dev/mem: " + e.strerror)
# Methods
def _adjust_offset(self, offset):
return offset + (self._physaddr - self._aligned_physaddr)
def _validate_offset(self, offset, length):
if (offset + length) > self._aligned_size:
raise ValueError("Offset out of bounds.")
def read32(self, offset):
"""Read 32-bits from the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
Returns:
int: 32-bit value read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 4)
return struct.unpack("=L", self.mapping[offset:offset + 4])[0]
def read16(self, offset):
"""Read 16-bits from the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
Returns:
int: 16-bit value read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 2)
return struct.unpack("=H", self.mapping[offset:offset + 2])[0]
def read8(self, offset):
"""Read 8-bits from the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
Returns:
int: 8-bit value read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 1)
return struct.unpack("B", self.mapping[offset:offset + 1])[0]
def write32(self, offset, value):
"""Write 32-bits to the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
value (int, long): 32-bit value to write.
Raises:
TypeError: if `offset` or `value` type are invalid.
ValueError: if `offset` or `value` are out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
if not isinstance(value, (int, long)):
raise TypeError("Invalid value type, should be integer.")
if value < 0 or value > 0xffffffff:
raise ValueError("Value out of bounds.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 4)
self.mapping[offset:offset + 4] = struct.pack("=L", value)
def write16(self, offset, value):
"""Write 16-bits to the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
value (int, long): 16-bit value to write.
Raises:
TypeError: if `offset` or `value` type are invalid.
ValueError: if `offset` or `value` are out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
if not isinstance(value, (int, long)):
raise TypeError("Invalid value type, should be integer.")
if value < 0 or value > 0xffff:
raise ValueError("Value out of bounds.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 2)
self.mapping[offset:offset + 2] = struct.pack("=H", value)
def write8(self, offset, value):
"""Write 8-bits to the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
value (int, long): 8-bit value to write.
Raises:
TypeError: if `offset` or `value` type are invalid.
ValueError: if `offset` or `value` are out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
if not isinstance(value, (int, long)):
raise TypeError("Invalid value type, should be integer.")
if value < 0 or value > 0xff:
raise ValueError("Value out of bounds.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 1)
self.mapping[offset:offset + 1] = struct.pack("B", value)
def write(self, offset, data):
"""Write a string of bytes to the specified `offset` in bytes, relative
to the base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
data (bytes, bytearray, list): a byte array or list of 8-bit
integers to write.
Raises:
TypeError: if `offset` or `data` type are invalid.
ValueError: if `offset` is out of bounds, or if data is not valid bytes.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
if not isinstance(data, (bytes, bytearray, list)):
raise TypeError("Invalid data type, expected bytes, bytearray, or list.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, len(data))
data = bytes(bytearray(data))
self.mapping[offset:offset + len(data)] = data
def close(self):
"""Unmap the MMIO object's mapped physical memory."""
if self.mapping is None:
return
self.mapping.close()
self.mapping = None
self._fd = None
# Immutable properties
@property
def base(self):
"""Get the base physical address of the MMIO region.
:type: int
"""
return self._physaddr
@property
def size(self):
"""Get the mapping size of the MMIO region.
:type: int
"""
return self._size
@property
def pointer(self):
"""Get a ctypes void pointer to the memory mapped region.
:type: ctypes.c_void_p
"""
return ctypes.cast(ctypes.pointer(ctypes.c_uint8.from_buffer(self.mapping, 0)), ctypes.c_void_p)
# String representation
def __str__(self):
return "MMIO 0x%08x (size=%d)" % (self.base, self.size)
|
vsergeev/python-periphery | periphery/mmio.py | MMIO.write8 | python | def write8(self, offset, value):
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
if not isinstance(value, (int, long)):
raise TypeError("Invalid value type, should be integer.")
if value < 0 or value > 0xff:
raise ValueError("Value out of bounds.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 1)
self.mapping[offset:offset + 1] = struct.pack("B", value) | Write 8-bits to the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
value (int, long): 8-bit value to write.
Raises:
TypeError: if `offset` or `value` type are invalid.
ValueError: if `offset` or `value` are out of bounds. | train | https://github.com/vsergeev/python-periphery/blob/ff4d535691a1747a76962a3d077d96d224308611/periphery/mmio.py#L221-L243 | [
"def _adjust_offset(self, offset):\n return offset + (self._physaddr - self._aligned_physaddr)\n",
"def _validate_offset(self, offset, length):\n if (offset + length) > self._aligned_size:\n raise ValueError(\"Offset out of bounds.\")\n"
] | class MMIO(object):
def __init__(self, physaddr, size):
"""Instantiate an MMIO object and map the region of physical memory
specified by the address base `physaddr` and size `size` in bytes.
Args:
physaddr (int, long): base physical address of memory region.
size (int, long): size of memory region.
Returns:
MMIO: MMIO object.
Raises:
MMIOError: if an I/O or OS error occurs.
TypeError: if `physaddr` or `size` types are invalid.
"""
self.mapping = None
self._open(physaddr, size)
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, t, value, traceback):
self.close()
def _open(self, physaddr, size):
if not isinstance(physaddr, (int, long)):
raise TypeError("Invalid physaddr type, should be integer.")
if not isinstance(size, (int, long)):
raise TypeError("Invalid size type, should be integer.")
pagesize = os.sysconf(os.sysconf_names['SC_PAGESIZE'])
self._physaddr = physaddr
self._size = size
self._aligned_physaddr = physaddr - (physaddr % pagesize)
self._aligned_size = size + (physaddr - self._aligned_physaddr)
try:
fd = os.open("/dev/mem", os.O_RDWR | os.O_SYNC)
except OSError as e:
raise MMIOError(e.errno, "Opening /dev/mem: " + e.strerror)
try:
self.mapping = mmap.mmap(fd, self._aligned_size, flags=mmap.MAP_SHARED, prot=(mmap.PROT_READ | mmap.PROT_WRITE), offset=self._aligned_physaddr)
except OSError as e:
raise MMIOError(e.errno, "Mapping /dev/mem: " + e.strerror)
try:
os.close(fd)
except OSError as e:
raise MMIOError(e.errno, "Closing /dev/mem: " + e.strerror)
# Methods
def _adjust_offset(self, offset):
return offset + (self._physaddr - self._aligned_physaddr)
def _validate_offset(self, offset, length):
if (offset + length) > self._aligned_size:
raise ValueError("Offset out of bounds.")
def read32(self, offset):
"""Read 32-bits from the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
Returns:
int: 32-bit value read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 4)
return struct.unpack("=L", self.mapping[offset:offset + 4])[0]
def read16(self, offset):
"""Read 16-bits from the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
Returns:
int: 16-bit value read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 2)
return struct.unpack("=H", self.mapping[offset:offset + 2])[0]
def read8(self, offset):
"""Read 8-bits from the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
Returns:
int: 8-bit value read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 1)
return struct.unpack("B", self.mapping[offset:offset + 1])[0]
def read(self, offset, length):
"""Read a string of bytes from the specified `offset` in bytes,
relative to the base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
length (int): number of bytes to read.
Returns:
bytes: bytes read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, length)
return bytes(self.mapping[offset:offset + length])
def write32(self, offset, value):
"""Write 32-bits to the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
value (int, long): 32-bit value to write.
Raises:
TypeError: if `offset` or `value` type are invalid.
ValueError: if `offset` or `value` are out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
if not isinstance(value, (int, long)):
raise TypeError("Invalid value type, should be integer.")
if value < 0 or value > 0xffffffff:
raise ValueError("Value out of bounds.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 4)
self.mapping[offset:offset + 4] = struct.pack("=L", value)
def write16(self, offset, value):
"""Write 16-bits to the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
value (int, long): 16-bit value to write.
Raises:
TypeError: if `offset` or `value` type are invalid.
ValueError: if `offset` or `value` are out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
if not isinstance(value, (int, long)):
raise TypeError("Invalid value type, should be integer.")
if value < 0 or value > 0xffff:
raise ValueError("Value out of bounds.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 2)
self.mapping[offset:offset + 2] = struct.pack("=H", value)
def write(self, offset, data):
"""Write a string of bytes to the specified `offset` in bytes, relative
to the base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
data (bytes, bytearray, list): a byte array or list of 8-bit
integers to write.
Raises:
TypeError: if `offset` or `data` type are invalid.
ValueError: if `offset` is out of bounds, or if data is not valid bytes.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
if not isinstance(data, (bytes, bytearray, list)):
raise TypeError("Invalid data type, expected bytes, bytearray, or list.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, len(data))
data = bytes(bytearray(data))
self.mapping[offset:offset + len(data)] = data
def close(self):
"""Unmap the MMIO object's mapped physical memory."""
if self.mapping is None:
return
self.mapping.close()
self.mapping = None
self._fd = None
# Immutable properties
@property
def base(self):
"""Get the base physical address of the MMIO region.
:type: int
"""
return self._physaddr
@property
def size(self):
"""Get the mapping size of the MMIO region.
:type: int
"""
return self._size
@property
def pointer(self):
"""Get a ctypes void pointer to the memory mapped region.
:type: ctypes.c_void_p
"""
return ctypes.cast(ctypes.pointer(ctypes.c_uint8.from_buffer(self.mapping, 0)), ctypes.c_void_p)
# String representation
def __str__(self):
return "MMIO 0x%08x (size=%d)" % (self.base, self.size)
|
vsergeev/python-periphery | periphery/mmio.py | MMIO.write | python | def write(self, offset, data):
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
if not isinstance(data, (bytes, bytearray, list)):
raise TypeError("Invalid data type, expected bytes, bytearray, or list.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, len(data))
data = bytes(bytearray(data))
self.mapping[offset:offset + len(data)] = data | Write a string of bytes to the specified `offset` in bytes, relative
to the base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
data (bytes, bytearray, list): a byte array or list of 8-bit
integers to write.
Raises:
TypeError: if `offset` or `data` type are invalid.
ValueError: if `offset` is out of bounds, or if data is not valid bytes. | train | https://github.com/vsergeev/python-periphery/blob/ff4d535691a1747a76962a3d077d96d224308611/periphery/mmio.py#L245-L268 | [
"def _adjust_offset(self, offset):\n return offset + (self._physaddr - self._aligned_physaddr)\n",
"def _validate_offset(self, offset, length):\n if (offset + length) > self._aligned_size:\n raise ValueError(\"Offset out of bounds.\")\n"
] | class MMIO(object):
def __init__(self, physaddr, size):
"""Instantiate an MMIO object and map the region of physical memory
specified by the address base `physaddr` and size `size` in bytes.
Args:
physaddr (int, long): base physical address of memory region.
size (int, long): size of memory region.
Returns:
MMIO: MMIO object.
Raises:
MMIOError: if an I/O or OS error occurs.
TypeError: if `physaddr` or `size` types are invalid.
"""
self.mapping = None
self._open(physaddr, size)
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, t, value, traceback):
self.close()
def _open(self, physaddr, size):
if not isinstance(physaddr, (int, long)):
raise TypeError("Invalid physaddr type, should be integer.")
if not isinstance(size, (int, long)):
raise TypeError("Invalid size type, should be integer.")
pagesize = os.sysconf(os.sysconf_names['SC_PAGESIZE'])
self._physaddr = physaddr
self._size = size
self._aligned_physaddr = physaddr - (physaddr % pagesize)
self._aligned_size = size + (physaddr - self._aligned_physaddr)
try:
fd = os.open("/dev/mem", os.O_RDWR | os.O_SYNC)
except OSError as e:
raise MMIOError(e.errno, "Opening /dev/mem: " + e.strerror)
try:
self.mapping = mmap.mmap(fd, self._aligned_size, flags=mmap.MAP_SHARED, prot=(mmap.PROT_READ | mmap.PROT_WRITE), offset=self._aligned_physaddr)
except OSError as e:
raise MMIOError(e.errno, "Mapping /dev/mem: " + e.strerror)
try:
os.close(fd)
except OSError as e:
raise MMIOError(e.errno, "Closing /dev/mem: " + e.strerror)
# Methods
def _adjust_offset(self, offset):
return offset + (self._physaddr - self._aligned_physaddr)
def _validate_offset(self, offset, length):
if (offset + length) > self._aligned_size:
raise ValueError("Offset out of bounds.")
def read32(self, offset):
"""Read 32-bits from the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
Returns:
int: 32-bit value read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 4)
return struct.unpack("=L", self.mapping[offset:offset + 4])[0]
def read16(self, offset):
"""Read 16-bits from the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
Returns:
int: 16-bit value read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 2)
return struct.unpack("=H", self.mapping[offset:offset + 2])[0]
def read8(self, offset):
"""Read 8-bits from the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
Returns:
int: 8-bit value read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 1)
return struct.unpack("B", self.mapping[offset:offset + 1])[0]
def read(self, offset, length):
"""Read a string of bytes from the specified `offset` in bytes,
relative to the base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
length (int): number of bytes to read.
Returns:
bytes: bytes read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, length)
return bytes(self.mapping[offset:offset + length])
def write32(self, offset, value):
"""Write 32-bits to the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
value (int, long): 32-bit value to write.
Raises:
TypeError: if `offset` or `value` type are invalid.
ValueError: if `offset` or `value` are out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
if not isinstance(value, (int, long)):
raise TypeError("Invalid value type, should be integer.")
if value < 0 or value > 0xffffffff:
raise ValueError("Value out of bounds.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 4)
self.mapping[offset:offset + 4] = struct.pack("=L", value)
def write16(self, offset, value):
"""Write 16-bits to the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
value (int, long): 16-bit value to write.
Raises:
TypeError: if `offset` or `value` type are invalid.
ValueError: if `offset` or `value` are out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
if not isinstance(value, (int, long)):
raise TypeError("Invalid value type, should be integer.")
if value < 0 or value > 0xffff:
raise ValueError("Value out of bounds.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 2)
self.mapping[offset:offset + 2] = struct.pack("=H", value)
def write8(self, offset, value):
"""Write 8-bits to the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
value (int, long): 8-bit value to write.
Raises:
TypeError: if `offset` or `value` type are invalid.
ValueError: if `offset` or `value` are out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
if not isinstance(value, (int, long)):
raise TypeError("Invalid value type, should be integer.")
if value < 0 or value > 0xff:
raise ValueError("Value out of bounds.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 1)
self.mapping[offset:offset + 1] = struct.pack("B", value)
def close(self):
"""Unmap the MMIO object's mapped physical memory."""
if self.mapping is None:
return
self.mapping.close()
self.mapping = None
self._fd = None
# Immutable properties
@property
def base(self):
"""Get the base physical address of the MMIO region.
:type: int
"""
return self._physaddr
@property
def size(self):
"""Get the mapping size of the MMIO region.
:type: int
"""
return self._size
@property
def pointer(self):
"""Get a ctypes void pointer to the memory mapped region.
:type: ctypes.c_void_p
"""
return ctypes.cast(ctypes.pointer(ctypes.c_uint8.from_buffer(self.mapping, 0)), ctypes.c_void_p)
# String representation
def __str__(self):
return "MMIO 0x%08x (size=%d)" % (self.base, self.size)
|
vsergeev/python-periphery | periphery/mmio.py | MMIO.close | python | def close(self):
if self.mapping is None:
return
self.mapping.close()
self.mapping = None
self._fd = None | Unmap the MMIO object's mapped physical memory. | train | https://github.com/vsergeev/python-periphery/blob/ff4d535691a1747a76962a3d077d96d224308611/periphery/mmio.py#L270-L278 | null | class MMIO(object):
def __init__(self, physaddr, size):
"""Instantiate an MMIO object and map the region of physical memory
specified by the address base `physaddr` and size `size` in bytes.
Args:
physaddr (int, long): base physical address of memory region.
size (int, long): size of memory region.
Returns:
MMIO: MMIO object.
Raises:
MMIOError: if an I/O or OS error occurs.
TypeError: if `physaddr` or `size` types are invalid.
"""
self.mapping = None
self._open(physaddr, size)
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, t, value, traceback):
self.close()
def _open(self, physaddr, size):
if not isinstance(physaddr, (int, long)):
raise TypeError("Invalid physaddr type, should be integer.")
if not isinstance(size, (int, long)):
raise TypeError("Invalid size type, should be integer.")
pagesize = os.sysconf(os.sysconf_names['SC_PAGESIZE'])
self._physaddr = physaddr
self._size = size
self._aligned_physaddr = physaddr - (physaddr % pagesize)
self._aligned_size = size + (physaddr - self._aligned_physaddr)
try:
fd = os.open("/dev/mem", os.O_RDWR | os.O_SYNC)
except OSError as e:
raise MMIOError(e.errno, "Opening /dev/mem: " + e.strerror)
try:
self.mapping = mmap.mmap(fd, self._aligned_size, flags=mmap.MAP_SHARED, prot=(mmap.PROT_READ | mmap.PROT_WRITE), offset=self._aligned_physaddr)
except OSError as e:
raise MMIOError(e.errno, "Mapping /dev/mem: " + e.strerror)
try:
os.close(fd)
except OSError as e:
raise MMIOError(e.errno, "Closing /dev/mem: " + e.strerror)
# Methods
def _adjust_offset(self, offset):
return offset + (self._physaddr - self._aligned_physaddr)
def _validate_offset(self, offset, length):
if (offset + length) > self._aligned_size:
raise ValueError("Offset out of bounds.")
def read32(self, offset):
"""Read 32-bits from the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
Returns:
int: 32-bit value read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 4)
return struct.unpack("=L", self.mapping[offset:offset + 4])[0]
def read16(self, offset):
"""Read 16-bits from the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
Returns:
int: 16-bit value read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 2)
return struct.unpack("=H", self.mapping[offset:offset + 2])[0]
def read8(self, offset):
"""Read 8-bits from the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
Returns:
int: 8-bit value read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 1)
return struct.unpack("B", self.mapping[offset:offset + 1])[0]
def read(self, offset, length):
"""Read a string of bytes from the specified `offset` in bytes,
relative to the base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
length (int): number of bytes to read.
Returns:
bytes: bytes read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, length)
return bytes(self.mapping[offset:offset + length])
def write32(self, offset, value):
"""Write 32-bits to the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
value (int, long): 32-bit value to write.
Raises:
TypeError: if `offset` or `value` type are invalid.
ValueError: if `offset` or `value` are out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
if not isinstance(value, (int, long)):
raise TypeError("Invalid value type, should be integer.")
if value < 0 or value > 0xffffffff:
raise ValueError("Value out of bounds.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 4)
self.mapping[offset:offset + 4] = struct.pack("=L", value)
def write16(self, offset, value):
"""Write 16-bits to the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
value (int, long): 16-bit value to write.
Raises:
TypeError: if `offset` or `value` type are invalid.
ValueError: if `offset` or `value` are out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
if not isinstance(value, (int, long)):
raise TypeError("Invalid value type, should be integer.")
if value < 0 or value > 0xffff:
raise ValueError("Value out of bounds.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 2)
self.mapping[offset:offset + 2] = struct.pack("=H", value)
def write8(self, offset, value):
"""Write 8-bits to the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
value (int, long): 8-bit value to write.
Raises:
TypeError: if `offset` or `value` type are invalid.
ValueError: if `offset` or `value` are out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
if not isinstance(value, (int, long)):
raise TypeError("Invalid value type, should be integer.")
if value < 0 or value > 0xff:
raise ValueError("Value out of bounds.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 1)
self.mapping[offset:offset + 1] = struct.pack("B", value)
def write(self, offset, data):
"""Write a string of bytes to the specified `offset` in bytes, relative
to the base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
data (bytes, bytearray, list): a byte array or list of 8-bit
integers to write.
Raises:
TypeError: if `offset` or `data` type are invalid.
ValueError: if `offset` is out of bounds, or if data is not valid bytes.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
if not isinstance(data, (bytes, bytearray, list)):
raise TypeError("Invalid data type, expected bytes, bytearray, or list.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, len(data))
data = bytes(bytearray(data))
self.mapping[offset:offset + len(data)] = data
# Immutable properties
@property
def base(self):
"""Get the base physical address of the MMIO region.
:type: int
"""
return self._physaddr
@property
def size(self):
"""Get the mapping size of the MMIO region.
:type: int
"""
return self._size
@property
def pointer(self):
"""Get a ctypes void pointer to the memory mapped region.
:type: ctypes.c_void_p
"""
return ctypes.cast(ctypes.pointer(ctypes.c_uint8.from_buffer(self.mapping, 0)), ctypes.c_void_p)
# String representation
def __str__(self):
return "MMIO 0x%08x (size=%d)" % (self.base, self.size)
|
vsergeev/python-periphery | periphery/mmio.py | MMIO.pointer | python | def pointer(self):
return ctypes.cast(ctypes.pointer(ctypes.c_uint8.from_buffer(self.mapping, 0)), ctypes.c_void_p) | Get a ctypes void pointer to the memory mapped region.
:type: ctypes.c_void_p | train | https://github.com/vsergeev/python-periphery/blob/ff4d535691a1747a76962a3d077d96d224308611/periphery/mmio.py#L299-L304 | null | class MMIO(object):
def __init__(self, physaddr, size):
"""Instantiate an MMIO object and map the region of physical memory
specified by the address base `physaddr` and size `size` in bytes.
Args:
physaddr (int, long): base physical address of memory region.
size (int, long): size of memory region.
Returns:
MMIO: MMIO object.
Raises:
MMIOError: if an I/O or OS error occurs.
TypeError: if `physaddr` or `size` types are invalid.
"""
self.mapping = None
self._open(physaddr, size)
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, t, value, traceback):
self.close()
def _open(self, physaddr, size):
if not isinstance(physaddr, (int, long)):
raise TypeError("Invalid physaddr type, should be integer.")
if not isinstance(size, (int, long)):
raise TypeError("Invalid size type, should be integer.")
pagesize = os.sysconf(os.sysconf_names['SC_PAGESIZE'])
self._physaddr = physaddr
self._size = size
self._aligned_physaddr = physaddr - (physaddr % pagesize)
self._aligned_size = size + (physaddr - self._aligned_physaddr)
try:
fd = os.open("/dev/mem", os.O_RDWR | os.O_SYNC)
except OSError as e:
raise MMIOError(e.errno, "Opening /dev/mem: " + e.strerror)
try:
self.mapping = mmap.mmap(fd, self._aligned_size, flags=mmap.MAP_SHARED, prot=(mmap.PROT_READ | mmap.PROT_WRITE), offset=self._aligned_physaddr)
except OSError as e:
raise MMIOError(e.errno, "Mapping /dev/mem: " + e.strerror)
try:
os.close(fd)
except OSError as e:
raise MMIOError(e.errno, "Closing /dev/mem: " + e.strerror)
# Methods
def _adjust_offset(self, offset):
return offset + (self._physaddr - self._aligned_physaddr)
def _validate_offset(self, offset, length):
if (offset + length) > self._aligned_size:
raise ValueError("Offset out of bounds.")
def read32(self, offset):
"""Read 32-bits from the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
Returns:
int: 32-bit value read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 4)
return struct.unpack("=L", self.mapping[offset:offset + 4])[0]
def read16(self, offset):
"""Read 16-bits from the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
Returns:
int: 16-bit value read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 2)
return struct.unpack("=H", self.mapping[offset:offset + 2])[0]
def read8(self, offset):
"""Read 8-bits from the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
Returns:
int: 8-bit value read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 1)
return struct.unpack("B", self.mapping[offset:offset + 1])[0]
def read(self, offset, length):
"""Read a string of bytes from the specified `offset` in bytes,
relative to the base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
length (int): number of bytes to read.
Returns:
bytes: bytes read.
Raises:
TypeError: if `offset` type is invalid.
ValueError: if `offset` is out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, length)
return bytes(self.mapping[offset:offset + length])
def write32(self, offset, value):
"""Write 32-bits to the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
value (int, long): 32-bit value to write.
Raises:
TypeError: if `offset` or `value` type are invalid.
ValueError: if `offset` or `value` are out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
if not isinstance(value, (int, long)):
raise TypeError("Invalid value type, should be integer.")
if value < 0 or value > 0xffffffff:
raise ValueError("Value out of bounds.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 4)
self.mapping[offset:offset + 4] = struct.pack("=L", value)
def write16(self, offset, value):
"""Write 16-bits to the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
value (int, long): 16-bit value to write.
Raises:
TypeError: if `offset` or `value` type are invalid.
ValueError: if `offset` or `value` are out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
if not isinstance(value, (int, long)):
raise TypeError("Invalid value type, should be integer.")
if value < 0 or value > 0xffff:
raise ValueError("Value out of bounds.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 2)
self.mapping[offset:offset + 2] = struct.pack("=H", value)
def write8(self, offset, value):
"""Write 8-bits to the specified `offset` in bytes, relative to the
base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
value (int, long): 8-bit value to write.
Raises:
TypeError: if `offset` or `value` type are invalid.
ValueError: if `offset` or `value` are out of bounds.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
if not isinstance(value, (int, long)):
raise TypeError("Invalid value type, should be integer.")
if value < 0 or value > 0xff:
raise ValueError("Value out of bounds.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, 1)
self.mapping[offset:offset + 1] = struct.pack("B", value)
def write(self, offset, data):
"""Write a string of bytes to the specified `offset` in bytes, relative
to the base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
data (bytes, bytearray, list): a byte array or list of 8-bit
integers to write.
Raises:
TypeError: if `offset` or `data` type are invalid.
ValueError: if `offset` is out of bounds, or if data is not valid bytes.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
if not isinstance(data, (bytes, bytearray, list)):
raise TypeError("Invalid data type, expected bytes, bytearray, or list.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, len(data))
data = bytes(bytearray(data))
self.mapping[offset:offset + len(data)] = data
def close(self):
"""Unmap the MMIO object's mapped physical memory."""
if self.mapping is None:
return
self.mapping.close()
self.mapping = None
self._fd = None
# Immutable properties
@property
def base(self):
"""Get the base physical address of the MMIO region.
:type: int
"""
return self._physaddr
@property
def size(self):
"""Get the mapping size of the MMIO region.
:type: int
"""
return self._size
@property
# String representation
def __str__(self):
return "MMIO 0x%08x (size=%d)" % (self.base, self.size)
|
vsergeev/python-periphery | periphery/spi.py | SPI.transfer | python | def transfer(self, data):
if not isinstance(data, (bytes, bytearray, list)):
raise TypeError("Invalid data type, should be bytes, bytearray, or list.")
# Create mutable array
try:
buf = array.array('B', data)
except OverflowError:
raise ValueError("Invalid data bytes.")
buf_addr, buf_len = buf.buffer_info()
# Prepare transfer structure
spi_xfer = _CSpiIocTransfer()
spi_xfer.tx_buf = buf_addr
spi_xfer.rx_buf = buf_addr
spi_xfer.len = buf_len
# Transfer
try:
fcntl.ioctl(self._fd, SPI._SPI_IOC_MESSAGE_1, spi_xfer)
except OSError as e:
raise SPIError(e.errno, "SPI transfer: " + e.strerror)
# Return shifted out data with the same type as shifted in data
if isinstance(data, bytes):
return bytes(bytearray(buf))
elif isinstance(data, bytearray):
return bytearray(buf)
elif isinstance(data, list):
return buf.tolist() | Shift out `data` and return shifted in data.
Args:
data (bytes, bytearray, list): a byte array or list of 8-bit integers to shift out.
Returns:
bytes, bytearray, list: data shifted in.
Raises:
SPIError: if an I/O or OS error occurs.
TypeError: if `data` type is invalid.
ValueError: if data is not valid bytes. | train | https://github.com/vsergeev/python-periphery/blob/ff4d535691a1747a76962a3d077d96d224308611/periphery/spi.py#L131-L175 | null | class SPI(object):
# Constants scraped from <linux/spi/spidev.h>
_SPI_CPHA = 0x1
_SPI_CPOL = 0x2
_SPI_LSB_FIRST = 0x8
_SPI_IOC_WR_MODE = 0x40016b01
_SPI_IOC_RD_MODE = 0x80016b01
_SPI_IOC_WR_MAX_SPEED_HZ = 0x40046b04
_SPI_IOC_RD_MAX_SPEED_HZ = 0x80046b04
_SPI_IOC_WR_BITS_PER_WORD = 0x40016b03
_SPI_IOC_RD_BITS_PER_WORD = 0x80016b03
_SPI_IOC_MESSAGE_1 = 0x40206b00
def __init__(self, devpath, mode, max_speed, bit_order="msb", bits_per_word=8, extra_flags=0):
"""Instantiate a SPI object and open the spidev device at the specified
path with the specified SPI mode, max speed in hertz, and the defaults
of "msb" bit order and 8 bits per word.
Args:
devpath (str): spidev device path.
mode (int): SPI mode, can be 0, 1, 2, 3.
max_speed (int, float): maximum speed in Hertz.
bit_order (str): bit order, can be "msb" or "lsb".
bits_per_word (int): bits per word.
extra_flags (int): extra spidev flags to be bitwise-ORed with the SPI mode.
Returns:
SPI: SPI object.
Raises:
SPIError: if an I/O or OS error occurs.
TypeError: if `devpath`, `mode`, `max_speed`, `bit_order`, `bits_per_word`, or `extra_flags` types are invalid.
ValueError: if `mode`, `bit_order`, `bits_per_word`, or `extra_flags` values are invalid.
"""
self._fd = None
self._devpath = None
self._open(devpath, mode, max_speed, bit_order, bits_per_word, extra_flags)
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, t, value, traceback):
self.close()
def _open(self, devpath, mode, max_speed, bit_order, bits_per_word, extra_flags):
if not isinstance(devpath, str):
raise TypeError("Invalid devpath type, should be string.")
elif not isinstance(mode, int):
raise TypeError("Invalid mode type, should be integer.")
elif not isinstance(max_speed, (int, float)):
raise TypeError("Invalid max_speed type, should be integer or float.")
elif not isinstance(bit_order, str):
raise TypeError("Invalid bit_order type, should be string.")
elif not isinstance(bits_per_word, int):
raise TypeError("Invalid bits_per_word type, should be integer.")
elif not isinstance(extra_flags, int):
raise TypeError("Invalid extra_flags type, should be integer.")
if mode not in [0, 1, 2, 3]:
raise ValueError("Invalid mode, can be 0, 1, 2, 3.")
elif bit_order.lower() not in ["msb", "lsb"]:
raise ValueError("Invalid bit_order, can be \"msb\" or \"lsb\".")
elif bits_per_word < 0 or bits_per_word > 255:
raise ValueError("Invalid bits_per_word, must be 0-255.")
elif extra_flags < 0 or extra_flags > 255:
raise ValueError("Invalid extra_flags, must be 0-255.")
# Open spidev
try:
self._fd = os.open(devpath, os.O_RDWR)
except OSError as e:
raise SPIError(e.errno, "Opening SPI device: " + e.strerror)
self._devpath = devpath
bit_order = bit_order.lower()
# Set mode, bit order, extra flags
buf = array.array("B", [mode | (SPI._SPI_LSB_FIRST if bit_order == "lsb" else 0) | extra_flags])
try:
fcntl.ioctl(self._fd, SPI._SPI_IOC_WR_MODE, buf, False)
except OSError as e:
raise SPIError(e.errno, "Setting SPI mode: " + e.strerror)
# Set max speed
buf = array.array("I", [int(max_speed)])
try:
fcntl.ioctl(self._fd, SPI._SPI_IOC_WR_MAX_SPEED_HZ, buf, False)
except OSError as e:
raise SPIError(e.errno, "Setting SPI max speed: " + e.strerror)
# Set bits per word
buf = array.array("B", [bits_per_word])
try:
fcntl.ioctl(self._fd, SPI._SPI_IOC_WR_BITS_PER_WORD, buf, False)
except OSError as e:
raise SPIError(e.errno, "Setting SPI bits per word: " + e.strerror)
# Methods
def close(self):
"""Close the spidev SPI device.
Raises:
SPIError: if an I/O or OS error occurs.
"""
if self._fd is None:
return
try:
os.close(self._fd)
except OSError as e:
raise SPIError(e.errno, "Closing SPI device: " + e.strerror)
self._fd = None
# Immutable properties
@property
def fd(self):
"""Get the file descriptor of the underlying spidev device.
:type: int
"""
return self._fd
@property
def devpath(self):
"""Get the device path of the underlying spidev device.
:type: str
"""
return self._devpath
# Mutable properties
def _get_mode(self):
buf = array.array('B', [0])
# Get mode
try:
fcntl.ioctl(self._fd, SPI._SPI_IOC_RD_MODE, buf, True)
except OSError as e:
raise SPIError(e.errno, "Getting SPI mode: " + e.strerror)
return buf[0] & 0x3
def _set_mode(self, mode):
if not isinstance(mode, int):
raise TypeError("Invalid mode type, should be integer.")
if mode not in [0, 1, 2, 3]:
raise ValueError("Invalid mode, can be 0, 1, 2, 3.")
# Read-modify-write mode, because the mode contains bits for other settings
# Get mode
buf = array.array('B', [0])
try:
fcntl.ioctl(self._fd, SPI._SPI_IOC_RD_MODE, buf, True)
except OSError as e:
raise SPIError(e.errno, "Getting SPI mode: " + e.strerror)
buf[0] = (buf[0] & ~(SPI._SPI_CPOL | SPI._SPI_CPHA)) | mode
# Set mode
try:
fcntl.ioctl(self._fd, SPI._SPI_IOC_WR_MODE, buf, False)
except OSError as e:
raise SPIError(e.errno, "Setting SPI mode: " + e.strerror)
mode = property(_get_mode, _set_mode)
"""Get or set the SPI mode. Can be 0, 1, 2, 3.
Raises:
SPIError: if an I/O or OS error occurs.
TypeError: if `mode` type is not int.
ValueError: if `mode` value is invalid.
:type: int
"""
def _get_max_speed(self):
# Get max speed
buf = array.array('I', [0])
try:
fcntl.ioctl(self._fd, SPI._SPI_IOC_RD_MAX_SPEED_HZ, buf, True)
except OSError as e:
raise SPIError(e.errno, "Getting SPI max speed: " + e.strerror)
return buf[0]
def _set_max_speed(self, max_speed):
if not isinstance(max_speed, (int, float)):
raise TypeError("Invalid max_speed type, should be integer or float.")
# Set max speed
buf = array.array('I', [int(max_speed)])
try:
fcntl.ioctl(self._fd, SPI._SPI_IOC_WR_MAX_SPEED_HZ, buf, False)
except OSError as e:
raise SPIError(e.errno, "Setting SPI max speed: " + e.strerror)
max_speed = property(_get_max_speed, _set_max_speed)
"""Get or set the maximum speed in Hertz.
Raises:
SPIError: if an I/O or OS error occurs.
TypeError: if `max_speed` type is not int or float.
:type: int, float
"""
def _get_bit_order(self):
# Get mode
buf = array.array('B', [0])
try:
fcntl.ioctl(self._fd, SPI._SPI_IOC_RD_MODE, buf, True)
except OSError as e:
raise SPIError(e.errno, "Getting SPI mode: " + e.strerror)
if (buf[0] & SPI._SPI_LSB_FIRST) > 0:
return "lsb"
return "msb"
def _set_bit_order(self, bit_order):
if not isinstance(bit_order, str):
raise TypeError("Invalid bit_order type, should be string.")
elif bit_order.lower() not in ["msb", "lsb"]:
raise ValueError("Invalid bit_order, can be \"msb\" or \"lsb\".")
# Read-modify-write mode, because the mode contains bits for other settings
# Get mode
buf = array.array('B', [0])
try:
fcntl.ioctl(self._fd, SPI._SPI_IOC_RD_MODE, buf, True)
except OSError as e:
raise SPIError(e.errno, "Getting SPI mode: " + e.strerror)
bit_order = bit_order.lower()
buf[0] = (buf[0] & ~SPI._SPI_LSB_FIRST) | (SPI._SPI_LSB_FIRST if bit_order == "lsb" else 0)
# Set mode
try:
fcntl.ioctl(self._fd, SPI._SPI_IOC_WR_MODE, buf, False)
except OSError as e:
raise SPIError(e.errno, "Setting SPI mode: " + e.strerror)
bit_order = property(_get_bit_order, _set_bit_order)
"""Get or set the SPI bit order. Can be "msb" or "lsb".
Raises:
SPIError: if an I/O or OS error occurs.
TypeError: if `bit_order` type is not str.
ValueError: if `bit_order` value is invalid.
:type: str
"""
def _get_bits_per_word(self):
# Get bits per word
buf = array.array('B', [0])
try:
fcntl.ioctl(self._fd, SPI._SPI_IOC_RD_BITS_PER_WORD, buf, True)
except OSError as e:
raise SPIError(e.errno, "Getting SPI bits per word: " + e.strerror)
return buf[0]
def _set_bits_per_word(self, bits_per_word):
if not isinstance(bits_per_word, int):
raise TypeError("Invalid bits_per_word type, should be integer.")
if bits_per_word < 0 or bits_per_word > 255:
raise ValueError("Invalid bits_per_word, must be 0-255.")
# Set bits per word
buf = array.array('B', [bits_per_word])
try:
fcntl.ioctl(self._fd, SPI._SPI_IOC_WR_BITS_PER_WORD, buf, False)
except OSError as e:
raise SPIError(e.errno, "Setting SPI bits per word: " + e.strerror)
bits_per_word = property(_get_bits_per_word, _set_bits_per_word)
"""Get or set the SPI bits per word.
Raises:
SPIError: if an I/O or OS error occurs.
TypeError: if `bits_per_word` type is not int.
ValueError: if `bits_per_word` value is invalid.
:type: int
"""
def _get_extra_flags(self):
# Get mode
buf = array.array('B', [0])
try:
fcntl.ioctl(self._fd, SPI._SPI_IOC_RD_MODE, buf, True)
except OSError as e:
raise SPIError(e.errno, "Getting SPI mode: " + e.strerror)
return buf[0] & ~(SPI._SPI_LSB_FIRST | SPI._SPI_CPHA | SPI._SPI_CPOL)
def _set_extra_flags(self, extra_flags):
if not isinstance(extra_flags, int):
raise TypeError("Invalid extra_flags type, should be integer.")
if extra_flags < 0 or extra_flags > 255:
raise ValueError("Invalid extra_flags, must be 0-255.")
# Read-modify-write mode, because the mode contains bits for other settings
# Get mode
buf = array.array('B', [0])
try:
fcntl.ioctl(self._fd, SPI._SPI_IOC_RD_MODE, buf, True)
except OSError as e:
raise SPIError(e.errno, "Getting SPI mode: " + e.strerror)
buf[0] = (buf[0] & (SPI._SPI_LSB_FIRST | SPI._SPI_CPHA | SPI._SPI_CPOL)) | extra_flags
# Set mode
try:
fcntl.ioctl(self._fd, SPI._SPI_IOC_WR_MODE, buf, False)
except OSError as e:
raise SPIError(e.errno, "Setting SPI mode: " + e.strerror)
extra_flags = property(_get_extra_flags, _set_extra_flags)
"""Get or set the spidev extra flags. Extra flags are bitwise-ORed with the SPI mode.
Raises:
SPIError: if an I/O or OS error occurs.
TypeError: if `extra_flags` type is not int.
ValueError: if `extra_flags` value is invalid.
:type: int
"""
# String representation
def __str__(self):
return "SPI (device=%s, fd=%d, mode=%s, max_speed=%d, bit_order=%s, bits_per_word=%d, extra_flags=0x%02x)" % (self.devpath, self.fd, self.mode, self.max_speed, self.bit_order, self.bits_per_word, self.extra_flags)
|
piglei/uwsgi-sloth | uwsgi_sloth/structures.py | ValuesAggregation.merge_with | python | def merge_with(self, other):
result = ValuesAggregation()
result.total = self.total + other.total
result.count = self.count + other.count
result.min = min(self.min, other.min)
result.max = max(self.max, other.max)
return result | Merge this ``ValuesAggregation`` with another one | train | https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/structures.py#L35-L42 | null | class ValuesAggregation(object):
"""For response time analyze"""
def __init__(self, values=[]):
self.min = None
self.max = None
self.total = 0
self.count = 0
# Init with values
for value in values:
self.add_value(value)
def add_value(self, value):
self.count += 1
self.total += value
if self.max is None or value > self.max:
self.max = value
if self.min is None or value < self.min:
self.min = value
def add_values(self, values):
for value in values:
self.add_value(value)
@property
def avg(self):
if not self.count:
return 0
return self.total / float(self.count)
def get_result(self):
return {
'min': self.min,
'max': self.max,
'avg': self.avg,
}
|
piglei/uwsgi-sloth | uwsgi_sloth/utils.py | parse_url_rules | python | def parse_url_rules(urls_fp):
url_rules = []
for line in urls_fp:
re_url = line.strip()
if re_url:
url_rules.append({'str': re_url, 're': re.compile(re_url)})
return url_rules | URL rules from given fp | train | https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/utils.py#L5-L12 | null | # -*- coding: utf-8 -*-
import os
import re
def makedir_if_none_exists(d):
if not os.path.exists(d):
os.makedirs(d)
def total_seconds(td):
"""Return timedelta's total seconds"""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
def force_bytes(s, encoding='utf-8', errors='strict'):
"""A function turns "s" into bytes object, similar to django.utils.encoding.force_bytes
"""
# Handle the common case first for performance reasons.
if isinstance(s, bytes):
if encoding == 'utf-8':
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
else:
return s.encode(encoding, errors)
def force_text(s, encoding='utf-8', errors='strict'):
"""A function turns "s" into text type, similar to django.utils.encoding.force_text
"""
if issubclass(type(s), str):
return s
try:
if isinstance(s, bytes):
s = str(s, encoding, errors)
else:
s = str(s)
except UnicodeDecodeError as e:
raise DjangoUnicodeDecodeError(s, *e.args)
return s |
piglei/uwsgi-sloth | uwsgi_sloth/utils.py | force_bytes | python | def force_bytes(s, encoding='utf-8', errors='strict'):
# Handle the common case first for performance reasons.
if isinstance(s, bytes):
if encoding == 'utf-8':
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
else:
return s.encode(encoding, errors) | A function turns "s" into bytes object, similar to django.utils.encoding.force_bytes | train | https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/utils.py#L25-L35 | null | # -*- coding: utf-8 -*-
import os
import re
def parse_url_rules(urls_fp):
"""URL rules from given fp"""
url_rules = []
for line in urls_fp:
re_url = line.strip()
if re_url:
url_rules.append({'str': re_url, 're': re.compile(re_url)})
return url_rules
def makedir_if_none_exists(d):
if not os.path.exists(d):
os.makedirs(d)
def total_seconds(td):
"""Return timedelta's total seconds"""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
def force_text(s, encoding='utf-8', errors='strict'):
"""A function turns "s" into text type, similar to django.utils.encoding.force_text
"""
if issubclass(type(s), str):
return s
try:
if isinstance(s, bytes):
s = str(s, encoding, errors)
else:
s = str(s)
except UnicodeDecodeError as e:
raise DjangoUnicodeDecodeError(s, *e.args)
return s |
piglei/uwsgi-sloth | uwsgi_sloth/utils.py | force_text | python | def force_text(s, encoding='utf-8', errors='strict'):
if issubclass(type(s), str):
return s
try:
if isinstance(s, bytes):
s = str(s, encoding, errors)
else:
s = str(s)
except UnicodeDecodeError as e:
raise DjangoUnicodeDecodeError(s, *e.args)
return s | A function turns "s" into text type, similar to django.utils.encoding.force_text | train | https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/utils.py#L38-L50 | null | # -*- coding: utf-8 -*-
import os
import re
def parse_url_rules(urls_fp):
"""URL rules from given fp"""
url_rules = []
for line in urls_fp:
re_url = line.strip()
if re_url:
url_rules.append({'str': re_url, 're': re.compile(re_url)})
return url_rules
def makedir_if_none_exists(d):
if not os.path.exists(d):
os.makedirs(d)
def total_seconds(td):
"""Return timedelta's total seconds"""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
def force_bytes(s, encoding='utf-8', errors='strict'):
"""A function turns "s" into bytes object, similar to django.utils.encoding.force_bytes
"""
# Handle the common case first for performance reasons.
if isinstance(s, bytes):
if encoding == 'utf-8':
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
else:
return s.encode(encoding, errors)
|
piglei/uwsgi-sloth | uwsgi_sloth/models.py | merge_urls_data_to | python | def merge_urls_data_to(to, food={}):
if not to:
to.update(food)
for url, data in food.items():
if url not in to:
to[url] = data
else:
to[url] = to[url].merge_with(data) | Merge urls data | train | https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/models.py#L53-L62 | null | # -*- coding: utf-8 -*-
"""Data models functions"""
import os
import logging
import pickle
logger = logging.getLogger(__name__)
class SavePoint(object):
"""Model: SavePoint"""
default_file_name = 'savepoint.pickle'
def __init__(self, db_dir):
self.db_file_path = os.path.join(db_dir, self.default_file_name)
if os.path.exists(self.db_file_path):
with open(self.db_file_path, 'rb') as fp:
self.data = pickle.load(fp)
else:
self.data = {}
def set_last_datetime(self, datetime):
self.data['last_datetime'] = datetime
def get_last_datetime(self):
return self.data.get('last_datetime')
def save(self):
logger.info('SavePoint value change to %s' % self.get_last_datetime())
with open(self.db_file_path, 'wb') as fp:
pickle.dump(self.data, fp)
class RequestsData(object):
"""Model: RequestsData"""
def __init__(self, date, db_dir):
self.date = date
self.db_file_path = os.path.join(db_dir, '%s.pickle' % date)
if os.path.exists(self.db_file_path):
with open(self.db_file_path, 'rb') as fp:
self.data = pickle.load(fp)
else:
self.data = {}
def save(self):
with open(self.db_file_path, 'wb') as fp:
pickle.dump(self.data, fp)
# Utils for requests data
def merge_requests_data_to(to, food={}):
"""Merge a small analyzed result to a big one, this function will modify the
original ``to``"""
if not to:
to.update(food)
to['requests_counter']['normal'] += food['requests_counter']['normal']
to['requests_counter']['slow'] += food['requests_counter']['slow']
to['total_slow_duration'] += food['total_slow_duration']
for group_name, urls in food['data_details'].items():
if group_name not in to['data_details']:
to['data_details'][group_name] = urls
else:
to_urls = to['data_details'][group_name]
to_urls['duration_agr_data'] = to_urls['duration_agr_data'].merge_with(
urls['duration_agr_data'])
# Merge urls data
merge_urls_data_to(to_urls['urls'], urls['urls'])
|
piglei/uwsgi-sloth | uwsgi_sloth/models.py | merge_requests_data_to | python | def merge_requests_data_to(to, food={}):
if not to:
to.update(food)
to['requests_counter']['normal'] += food['requests_counter']['normal']
to['requests_counter']['slow'] += food['requests_counter']['slow']
to['total_slow_duration'] += food['total_slow_duration']
for group_name, urls in food['data_details'].items():
if group_name not in to['data_details']:
to['data_details'][group_name] = urls
else:
to_urls = to['data_details'][group_name]
to_urls['duration_agr_data'] = to_urls['duration_agr_data'].merge_with(
urls['duration_agr_data'])
# Merge urls data
merge_urls_data_to(to_urls['urls'], urls['urls']) | Merge a small analyzed result to a big one, this function will modify the
original ``to`` | train | https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/models.py#L65-L84 | [
"def merge_urls_data_to(to, food={}):\n \"\"\"Merge urls data\"\"\"\n if not to:\n to.update(food)\n\n for url, data in food.items():\n if url not in to:\n to[url] = data\n else:\n to[url] = to[url].merge_with(data)\n"
] | # -*- coding: utf-8 -*-
"""Data models functions"""
import os
import logging
import pickle
logger = logging.getLogger(__name__)
class SavePoint(object):
"""Model: SavePoint"""
default_file_name = 'savepoint.pickle'
def __init__(self, db_dir):
self.db_file_path = os.path.join(db_dir, self.default_file_name)
if os.path.exists(self.db_file_path):
with open(self.db_file_path, 'rb') as fp:
self.data = pickle.load(fp)
else:
self.data = {}
def set_last_datetime(self, datetime):
self.data['last_datetime'] = datetime
def get_last_datetime(self):
return self.data.get('last_datetime')
def save(self):
logger.info('SavePoint value change to %s' % self.get_last_datetime())
with open(self.db_file_path, 'wb') as fp:
pickle.dump(self.data, fp)
class RequestsData(object):
"""Model: RequestsData"""
def __init__(self, date, db_dir):
self.date = date
self.db_file_path = os.path.join(db_dir, '%s.pickle' % date)
if os.path.exists(self.db_file_path):
with open(self.db_file_path, 'rb') as fp:
self.data = pickle.load(fp)
else:
self.data = {}
def save(self):
with open(self.db_file_path, 'wb') as fp:
pickle.dump(self.data, fp)
# Utils for requests data
def merge_urls_data_to(to, food={}):
"""Merge urls data"""
if not to:
to.update(food)
for url, data in food.items():
if url not in to:
to[url] = data
else:
to[url] = to[url].merge_with(data)
|
piglei/uwsgi-sloth | uwsgi_sloth/analyzer.py | format_data | python | def format_data(raw_data, limit_per_url_group=LIMIT_PER_URL_GROUP, limit_url_groups=LIMIT_URL_GROUPS):
data = copy.deepcopy(raw_data)
for k, v in list(data['data_details'].items()):
# Only reserve first ``limit_per_url_group`` items
v['urls'] = sorted(list(v['urls'].items()), key=lambda k_v: k_v[1].total,
reverse=True)[:limit_per_url_group]
data_details = sorted(iter(data['data_details'].items()),
key=lambda k_v1: k_v1[1]["duration_agr_data"].total,
reverse=True)[:limit_url_groups]
if data['requests_counter']['normal']:
slow_rate = format(data['requests_counter']['slow'] / \
float(data['requests_counter']['normal']), '.2%')
else:
slow_rate = '-'
data.update({
'slow_rate': slow_rate,
'data_details': data_details,
})
return data | Fomat data from LogAnalyzer for render purpose | train | https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/analyzer.py#L221-L242 | null | # -*- coding: utf-8 -*-
"""Analyzer for uwsgi log"""
import re
import copy
import datetime
from uwsgi_sloth.utils import total_seconds
from uwsgi_sloth.structures import ValuesAggregation
from uwsgi_sloth.settings import FILTER_METHODS, FILTER_STATUS, LIMIT_URL_GROUPS, \
LIMIT_PER_URL_GROUP, ROOT, REALTIME_UPDATE_INTERVAL
class UWSGILogParser(object):
"""Parser for uwsgi log file, support only default log format:
log format: "[pid: 27011|app: 0|req: 16858/537445] 58.251.73.227 () {40 vars in 1030 bytes} \
[Tue Apr 29 00:13:10 2014] POST /trips/2387949771/add_waypoint/ => \
generated 1053 bytes in 2767 msecs (HTTP/1.1 200) 4 headers in 282 bytes \
(1 switches on core 0)"
Returns:
~~~~~~~~
An dict of parsed log result.
"""
DATETIME_FORMAT = '%a %b %d %H:%M:%S %Y'
RE_LOG_LINE = re.compile(r'''}\ \[(?P<datetime>.*?)\]\ (?P<request_method>POST|GET|DELETE|PUT|PATCH)\s
(?P<request_uri>[^ ]*?)\ =>\ generated\ (?:.*?)\ in\ (?P<resp_msecs>\d+)\ msecs\s
\(HTTP/[\d.]+\ (?P<resp_status>\d+)\)''', re.VERBOSE)
def __init__(self):
pass
def parse(self, line):
matched = self.RE_LOG_LINE.search(line)
if matched:
matched_dict = matched.groupdict()
method = matched_dict['request_method']
status = matched_dict['resp_status']
if not method in FILTER_METHODS or status not in FILTER_STATUS:
return
url = matched_dict['request_uri'].replace('//', '/')
url_path = url.split('?')[0]
resp_time = int(matched_dict['resp_msecs'])
request_datetime = datetime.datetime.strptime(matched_dict['datetime'],
self.DATETIME_FORMAT)
return {
'method': method,
'url': url,
'url_path': url_path,
'resp_time': resp_time,
'status': status,
'request_datetime': request_datetime
}
return
class URLClassifier(object):
"""A simple url classifier, current rules:
- replacing sequential digits part by '(\d+)'
"""
RE_SIMPLIFY_URL = re.compile(r'(?<=/)\d+(/|$)')
def __init__(self, user_defined_rules=[]):
self.user_defined_rules = user_defined_rules
def classify(self, url_path):
"""Classify an url"""
for dict_api_url in self.user_defined_rules:
api_url = dict_api_url['str']
re_api_url = dict_api_url['re']
if re_api_url.match(url_path[1:]):
return api_url
return self.RE_SIMPLIFY_URL.sub(r'(\\d+)/', url_path)
class LogAnalyzer(object):
"""Log analyzer"""
def __init__(self, url_classifier=None, min_msecs=200, start_from_datetime=None):
self.data = {}
self.requests_counter = {'normal': 0, 'slow': 0}
self.total_slow_duration = 0
self.min_msecs = min_msecs
self.start_from_datetime = start_from_datetime
self.datetime_range = [None, None]
self.url_classifier = url_classifier or URLClassifier()
self.log_parser = UWSGILogParser()
def analyze_line(self, line):
line = line.strip()
result = self.log_parser.parse(line)
# Ignore invalid log
if not result:
return
if self.start_from_datetime and result['request_datetime'] <= self.start_from_datetime:
return
self.requests_counter['normal'] += 1
if not self.datetime_range[0]:
self.datetime_range[0] = result['request_datetime']
self.datetime_range[1] = result['request_datetime']
if result['resp_time'] < self.min_msecs:
return
resp_time = result['resp_time']
# Use url_classifier to classify url
matched_url_rule = self.url_classifier.classify(result['url_path'])
big_d = self.data.setdefault((result['method'], matched_url_rule), {
'urls': {},
'duration_agr_data': ValuesAggregation(),
})
big_d['duration_agr_data'].add_value(resp_time)
big_d['urls'].setdefault(result['url'], ValuesAggregation()).add_value(resp_time)
self.requests_counter['slow'] += 1
self.total_slow_duration += resp_time
def get_data(self):
return {
'requests_counter': self.requests_counter,
'total_slow_duration': self.total_slow_duration,
'datetime_range': self.datetime_range,
'data_details': self.data
}
class RealtimeLogAnalyzer(object):
"""Log analyzer for realtime support"""
default_data = {
'requests_counter': {'normal': 0, 'slow': 0},
'total_slow_duration': 0,
'data_details': {}
}
def __init__(self, url_classifier=None, min_msecs=200, start_from_datetime=None):
self.data = {}
self.min_msecs = min_msecs
self.start_from_datetime = start_from_datetime
self.last_analyzed_datetime = None
self.url_classifier = url_classifier or URLClassifier()
self.log_parser = UWSGILogParser()
def analyze_line(self, line):
line = line.strip()
result = self.log_parser.parse(line)
# Ignore invalid log
if not result:
return
if self.start_from_datetime and result['request_datetime'] <= self.start_from_datetime:
return
request_datetime = result['request_datetime']
self.last_analyzed_datetime = request_datetime
groups = self.get_result_group_names(request_datetime)
if not groups:
return
for group in groups:
if group not in self.data:
self.data[group] = copy.deepcopy(self.default_data)
for group in groups:
self.data[group]['requests_counter']['normal'] += 1
if result['resp_time'] < self.min_msecs:
return
resp_time = result['resp_time']
# Use url_classifier to classify url
matched_url_rule = self.url_classifier.classify(result['url_path'])
for group in groups:
big_d = self.data[group]['data_details'].setdefault((result['method'], matched_url_rule), {
'urls': {},
'duration_agr_data': ValuesAggregation(),
})
big_d['duration_agr_data'].add_value(resp_time)
big_d['urls'].setdefault(result['url'], ValuesAggregation()).add_value(resp_time)
self.data[group]['requests_counter']['slow'] += 1
self.data[group]['total_slow_duration'] += resp_time
def get_result_group_names(self, request_datetime):
"""Only today/yesterday/last interval are valid datetime"""
request_date = request_datetime.date()
today = datetime.date.today()
yesterday = datetime.date.today() - datetime.timedelta(days=1)
result = []
if total_seconds(datetime.datetime.now() - request_datetime) < REALTIME_UPDATE_INTERVAL:
result.append('last_interval')
if request_date == today:
result.append(today.isoformat())
elif request_date == yesterday:
result.append(yesterday.isoformat())
return result
def get_data(self, key=None):
if key:
return self.data.get(key, self.default_data)
return self.data
def clean_data_by_key(self, key):
try:
del self.data[key]
except KeyError:
pass
|
piglei/uwsgi-sloth | uwsgi_sloth/analyzer.py | URLClassifier.classify | python | def classify(self, url_path):
for dict_api_url in self.user_defined_rules:
api_url = dict_api_url['str']
re_api_url = dict_api_url['re']
if re_api_url.match(url_path[1:]):
return api_url
return self.RE_SIMPLIFY_URL.sub(r'(\\d+)/', url_path) | Classify an url | train | https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/analyzer.py#L69-L77 | null | class URLClassifier(object):
"""A simple url classifier, current rules:
- replacing sequential digits part by '(\d+)'
"""
RE_SIMPLIFY_URL = re.compile(r'(?<=/)\d+(/|$)')
def __init__(self, user_defined_rules=[]):
self.user_defined_rules = user_defined_rules
|
piglei/uwsgi-sloth | uwsgi_sloth/analyzer.py | RealtimeLogAnalyzer.get_result_group_names | python | def get_result_group_names(self, request_datetime):
request_date = request_datetime.date()
today = datetime.date.today()
yesterday = datetime.date.today() - datetime.timedelta(days=1)
result = []
if total_seconds(datetime.datetime.now() - request_datetime) < REALTIME_UPDATE_INTERVAL:
result.append('last_interval')
if request_date == today:
result.append(today.isoformat())
elif request_date == yesterday:
result.append(yesterday.isoformat())
return result | Only today/yesterday/last interval are valid datetime | train | https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/analyzer.py#L195-L207 | [
"def total_seconds(td):\n \"\"\"Return timedelta's total seconds\"\"\"\n return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6\n"
] | class RealtimeLogAnalyzer(object):
"""Log analyzer for realtime support"""
default_data = {
'requests_counter': {'normal': 0, 'slow': 0},
'total_slow_duration': 0,
'data_details': {}
}
def __init__(self, url_classifier=None, min_msecs=200, start_from_datetime=None):
self.data = {}
self.min_msecs = min_msecs
self.start_from_datetime = start_from_datetime
self.last_analyzed_datetime = None
self.url_classifier = url_classifier or URLClassifier()
self.log_parser = UWSGILogParser()
def analyze_line(self, line):
line = line.strip()
result = self.log_parser.parse(line)
# Ignore invalid log
if not result:
return
if self.start_from_datetime and result['request_datetime'] <= self.start_from_datetime:
return
request_datetime = result['request_datetime']
self.last_analyzed_datetime = request_datetime
groups = self.get_result_group_names(request_datetime)
if not groups:
return
for group in groups:
if group not in self.data:
self.data[group] = copy.deepcopy(self.default_data)
for group in groups:
self.data[group]['requests_counter']['normal'] += 1
if result['resp_time'] < self.min_msecs:
return
resp_time = result['resp_time']
# Use url_classifier to classify url
matched_url_rule = self.url_classifier.classify(result['url_path'])
for group in groups:
big_d = self.data[group]['data_details'].setdefault((result['method'], matched_url_rule), {
'urls': {},
'duration_agr_data': ValuesAggregation(),
})
big_d['duration_agr_data'].add_value(resp_time)
big_d['urls'].setdefault(result['url'], ValuesAggregation()).add_value(resp_time)
self.data[group]['requests_counter']['slow'] += 1
self.data[group]['total_slow_duration'] += resp_time
def get_data(self, key=None):
if key:
return self.data.get(key, self.default_data)
return self.data
def clean_data_by_key(self, key):
try:
del self.data[key]
except KeyError:
pass
|
piglei/uwsgi-sloth | uwsgi_sloth/commands/echo_conf.py | load_subcommand | python | def load_subcommand(subparsers):
parser_analyze = subparsers.add_parser('echo_conf', help='Echo sample configuration file')
parser_analyze.set_defaults(func=echo_conf) | Load this subcommand | train | https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/commands/echo_conf.py#L11-L14 | null | # -*- coding: utf-8 -*-
from __future__ import print_function
import pkg_resources
from uwsgi_sloth.utils import force_text
def echo_conf(args):
print(force_text(pkg_resources.resource_string('uwsgi_sloth', "sample.conf")))
|
piglei/uwsgi-sloth | uwsgi_sloth/commands/analyze.py | analyze_log | python | def analyze_log(fp, configs, url_rules):
url_classifier = URLClassifier(url_rules)
analyzer = LogAnalyzer(url_classifier=url_classifier, min_msecs=configs.min_msecs)
for line in fp:
analyzer.analyze_line(line)
return analyzer.get_data() | Analyze log file | train | https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/commands/analyze.py#L15-L21 | [
"def analyze_line(self, line):\n line = line.strip()\n result = self.log_parser.parse(line)\n # Ignore invalid log\n if not result:\n return\n if self.start_from_datetime and result['request_datetime'] <= self.start_from_datetime:\n return\n\n self.requests_counter['normal'] += 1\n\n if not self.datetime_range[0]:\n self.datetime_range[0] = result['request_datetime']\n self.datetime_range[1] = result['request_datetime']\n if result['resp_time'] < self.min_msecs:\n return\n\n resp_time = result['resp_time']\n\n # Use url_classifier to classify url\n matched_url_rule = self.url_classifier.classify(result['url_path'])\n\n big_d = self.data.setdefault((result['method'], matched_url_rule), {\n 'urls': {},\n 'duration_agr_data': ValuesAggregation(),\n })\n\n big_d['duration_agr_data'].add_value(resp_time)\n big_d['urls'].setdefault(result['url'], ValuesAggregation()).add_value(resp_time)\n\n self.requests_counter['slow'] += 1\n self.total_slow_duration += resp_time\n",
"def get_data(self):\n return {\n 'requests_counter': self.requests_counter,\n 'total_slow_duration': self.total_slow_duration,\n 'datetime_range': self.datetime_range,\n 'data_details': self.data\n }\n"
] | # -*- coding: utf-8 -*-
import sys
import time
import logging
import argparse
from uwsgi_sloth.settings import LIMIT_URL_GROUPS, LIMIT_PER_URL_GROUP
from uwsgi_sloth.analyzer import URLClassifier, LogAnalyzer, format_data
from uwsgi_sloth.template import render_template
from uwsgi_sloth.utils import parse_url_rules
logger = logging.getLogger('uwsgi_sloth.analyze')
def analyze(args):
# Get custom url rules
url_rules = []
if args.url_file:
url_rules = parse_url_rules(args.url_file)
logger.info('Analyzing log file "%s"...' % args.filepath.name)
start_time = time.time()
data = analyze_log(args.filepath, args, url_rules)
data = format_data(data, args.limit_per_url_group, args.limit_url_groups)
data.update({
'domain': args.domain,
'input_filename': args.filepath.name,
'min_duration': args.min_msecs,
})
# Pre-process data
html_data = render_template('report.html', data)
args.output.write(html_data)
args.output.close()
logger.info('Finished in %.2f seconds.' % (time.time() - start_time))
def load_subcommand(subparsers):
"""Load this subcommand
"""
parser_analyze = subparsers.add_parser('analyze', help='Analyze uwsgi log to get report')
parser_analyze.add_argument('-f', '--filepath', type=argparse.FileType('r'), dest='filepath',
help='Path of uwsgi log file', required=True)
parser_analyze.add_argument('--output', dest="output", type=argparse.FileType('w'), default=sys.stdout,
help='HTML report file path')
parser_analyze.add_argument('--min-msecs', dest="min_msecs", type=int, default=200,
help='Request serve time lower than this value will not be counted, default: 200')
parser_analyze.add_argument('--domain', dest="domain", type=str, required=False,
help='Make url in report become a hyper-link by settings a domain')
parser_analyze.add_argument('--url-file', dest="url_file", type=argparse.FileType('r'), required=False,
help='Customized url rules in regular expression')
parser_analyze.add_argument('--limit-url-groups', dest="limit_url_groups", type=int, required=False,
default=LIMIT_URL_GROUPS, help='Number of url groups considered, default: 200')
parser_analyze.add_argument('--limit-per-url-group', dest="limit_per_url_group", type=int,
required=False, default=LIMIT_PER_URL_GROUP,
help='Number of urls per group considered, default: 20')
parser_analyze.set_defaults(func=analyze)
|
piglei/uwsgi-sloth | uwsgi_sloth/commands/analyze.py | load_subcommand | python | def load_subcommand(subparsers):
parser_analyze = subparsers.add_parser('analyze', help='Analyze uwsgi log to get report')
parser_analyze.add_argument('-f', '--filepath', type=argparse.FileType('r'), dest='filepath',
help='Path of uwsgi log file', required=True)
parser_analyze.add_argument('--output', dest="output", type=argparse.FileType('w'), default=sys.stdout,
help='HTML report file path')
parser_analyze.add_argument('--min-msecs', dest="min_msecs", type=int, default=200,
help='Request serve time lower than this value will not be counted, default: 200')
parser_analyze.add_argument('--domain', dest="domain", type=str, required=False,
help='Make url in report become a hyper-link by settings a domain')
parser_analyze.add_argument('--url-file', dest="url_file", type=argparse.FileType('r'), required=False,
help='Customized url rules in regular expression')
parser_analyze.add_argument('--limit-url-groups', dest="limit_url_groups", type=int, required=False,
default=LIMIT_URL_GROUPS, help='Number of url groups considered, default: 200')
parser_analyze.add_argument('--limit-per-url-group', dest="limit_per_url_group", type=int,
required=False, default=LIMIT_PER_URL_GROUP,
help='Number of urls per group considered, default: 20')
parser_analyze.set_defaults(func=analyze) | Load this subcommand | train | https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/commands/analyze.py#L48-L67 | null | # -*- coding: utf-8 -*-
import sys
import time
import logging
import argparse
from uwsgi_sloth.settings import LIMIT_URL_GROUPS, LIMIT_PER_URL_GROUP
from uwsgi_sloth.analyzer import URLClassifier, LogAnalyzer, format_data
from uwsgi_sloth.template import render_template
from uwsgi_sloth.utils import parse_url_rules
logger = logging.getLogger('uwsgi_sloth.analyze')
def analyze_log(fp, configs, url_rules):
"""Analyze log file"""
url_classifier = URLClassifier(url_rules)
analyzer = LogAnalyzer(url_classifier=url_classifier, min_msecs=configs.min_msecs)
for line in fp:
analyzer.analyze_line(line)
return analyzer.get_data()
def analyze(args):
# Get custom url rules
url_rules = []
if args.url_file:
url_rules = parse_url_rules(args.url_file)
logger.info('Analyzing log file "%s"...' % args.filepath.name)
start_time = time.time()
data = analyze_log(args.filepath, args, url_rules)
data = format_data(data, args.limit_per_url_group, args.limit_url_groups)
data.update({
'domain': args.domain,
'input_filename': args.filepath.name,
'min_duration': args.min_msecs,
})
# Pre-process data
html_data = render_template('report.html', data)
args.output.write(html_data)
args.output.close()
logger.info('Finished in %.2f seconds.' % (time.time() - start_time))
|
piglei/uwsgi-sloth | uwsgi_sloth/commands/start.py | update_html_symlink | python | def update_html_symlink(html_dir):
"
today = datetime.date.today()
yesterday = datetime.date.today() - datetime.timedelta(days=1)
for from_date, alias_name in (
(today, 'today.html'), (yesterday, 'yesterday.html')):
from_date_file_path = os.path.join(html_dir, 'day_%s.html' % from_date)
symlink_path = os.path.join(html_dir, alias_name)
try:
os.unlink(symlink_path)
except OSError:
pass
os.symlink(from_date_file_path, symlink_path) | Maintail symlink: "today.html", "yesterday.html" | train | https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/commands/start.py#L37-L50 | null | # -*- coding: utf-8 -*-
"""Start uwsgi-sloth workers"""
import os
import signal
import argparse
import datetime
from configobj import ConfigObj
from uwsgi_sloth.analyzer import format_data, RealtimeLogAnalyzer, URLClassifier
from uwsgi_sloth.tailer import Tailer, no_new_line
from uwsgi_sloth.template import render_template
from uwsgi_sloth.utils import makedir_if_none_exists, total_seconds, parse_url_rules
from uwsgi_sloth.models import merge_requests_data_to, RequestsData, SavePoint
from uwsgi_sloth.settings import REALTIME_UPDATE_INTERVAL, DEFAULT_MIN_MSECS
import logging
logger = logging.getLogger('uwsgi_sloth')
class HTMLRender(object):
"""helper for render HTML"""
def __init__(self, html_dir, domain=None):
self.html_dir = html_dir
self.domain = domain
def render_requests_data_to_html(self, data, file_name, context={}):
"""Render to HTML file"""
file_path = os.path.join(self.html_dir, file_name)
logger.info('Rendering HTML file %s...' % file_path)
data = format_data(data)
data.update(context)
data.update(domain=self.domain)
with open(file_path, 'w') as fp:
fp.write(render_template('realtime.html', data))
def start(args):
# Load config file
config = ConfigObj(infile=args.config.name)
data_dir = config['data_dir']
uwsgi_log_path = config['uwsgi_log_path']
min_msecs = int(config.get('min_msecs', DEFAULT_MIN_MSECS))
url_file = config.get('url_file')
# Load custom url rules
url_rules = []
if url_file:
with open(url_file, 'r') as fp:
url_rules = parse_url_rules(fp)
html_dir = os.path.join(data_dir, 'html')
db_dir = os.path.join(data_dir, 'data')
makedir_if_none_exists(html_dir)
makedir_if_none_exists(db_dir)
save_point = SavePoint(db_dir)
last_log_datetime = save_point.get_last_datetime() or \
(datetime.datetime.now() - datetime.timedelta(seconds=REALTIME_UPDATE_INTERVAL))
logger.info('Start from last savepoint, last_log_datetime: %s' % last_log_datetime)
last_update_datetime = None
url_classifier = URLClassifier(user_defined_rules=url_rules)
analyzer = RealtimeLogAnalyzer(url_classifier=url_classifier, min_msecs=min_msecs,
start_from_datetime=last_log_datetime)
file_tailer = Tailer(uwsgi_log_path)
html_render = HTMLRender(html_dir, domain=config.get('domain'))
# Listen INT/TERM signal
def gracefully_exit(*args):
logger.info('Sinal received, exit.')
file_tailer.stop_follow()
signal.signal(signal.SIGINT, gracefully_exit)
for line in file_tailer:
# Analyze line
if line != no_new_line:
analyzer.analyze_line(line)
now = datetime.datetime.now()
if not file_tailer.trailing:
continue
if last_update_datetime and \
total_seconds(now - last_update_datetime) < REALTIME_UPDATE_INTERVAL:
continue
# Render HTML file when:
# - file_tailer reaches end of file.
# - last_update_datetime if over one `interval` from now
# Render latest interval HTML file
html_render.render_requests_data_to_html(analyzer.get_data('last_interval'),
'latest_5mins.html', context={'datetime_range': 'Last 5 minutes'})
analyzer.clean_data_by_key('last_interval')
for date in list(analyzer.data.keys()):
day_requests_data = RequestsData(date, db_dir)
merge_requests_data_to(day_requests_data.data, analyzer.get_data(date))
# Render to HTML file
html_render.render_requests_data_to_html(day_requests_data.data,
'day_%s.html' % date, context={'datetime_range': date})
# Save data to pickle file
day_requests_data.save()
# Reset Everything
analyzer.clean_data_by_key(date)
update_html_symlink(html_dir)
last_update_datetime = now
if analyzer.last_analyzed_datetime:
save_point.set_last_datetime(analyzer.last_analyzed_datetime)
save_point.save()
def load_subcommand(subparsers):
"""Load this subcommand"""
parser_start = subparsers.add_parser('start', help='Start uwsgi-sloth process for realtime analyzing.')
parser_start.add_argument('-c', '--config', type=argparse.FileType('r'), dest='config',
help='uwsgi-sloth config file, use "uwsgi-sloth echo_conf" for a default one', required=True)
parser_start.set_defaults(func=start)
|
piglei/uwsgi-sloth | uwsgi_sloth/commands/start.py | load_subcommand | python | def load_subcommand(subparsers):
parser_start = subparsers.add_parser('start', help='Start uwsgi-sloth process for realtime analyzing.')
parser_start.add_argument('-c', '--config', type=argparse.FileType('r'), dest='config',
help='uwsgi-sloth config file, use "uwsgi-sloth echo_conf" for a default one', required=True)
parser_start.set_defaults(func=start) | Load this subcommand | train | https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/commands/start.py#L129-L134 | null | # -*- coding: utf-8 -*-
"""Start uwsgi-sloth workers"""
import os
import signal
import argparse
import datetime
from configobj import ConfigObj
from uwsgi_sloth.analyzer import format_data, RealtimeLogAnalyzer, URLClassifier
from uwsgi_sloth.tailer import Tailer, no_new_line
from uwsgi_sloth.template import render_template
from uwsgi_sloth.utils import makedir_if_none_exists, total_seconds, parse_url_rules
from uwsgi_sloth.models import merge_requests_data_to, RequestsData, SavePoint
from uwsgi_sloth.settings import REALTIME_UPDATE_INTERVAL, DEFAULT_MIN_MSECS
import logging
logger = logging.getLogger('uwsgi_sloth')
class HTMLRender(object):
"""helper for render HTML"""
def __init__(self, html_dir, domain=None):
self.html_dir = html_dir
self.domain = domain
def render_requests_data_to_html(self, data, file_name, context={}):
"""Render to HTML file"""
file_path = os.path.join(self.html_dir, file_name)
logger.info('Rendering HTML file %s...' % file_path)
data = format_data(data)
data.update(context)
data.update(domain=self.domain)
with open(file_path, 'w') as fp:
fp.write(render_template('realtime.html', data))
def update_html_symlink(html_dir):
""""Maintail symlink: "today.html", "yesterday.html" """
today = datetime.date.today()
yesterday = datetime.date.today() - datetime.timedelta(days=1)
for from_date, alias_name in (
(today, 'today.html'), (yesterday, 'yesterday.html')):
from_date_file_path = os.path.join(html_dir, 'day_%s.html' % from_date)
symlink_path = os.path.join(html_dir, alias_name)
try:
os.unlink(symlink_path)
except OSError:
pass
os.symlink(from_date_file_path, symlink_path)
def start(args):
# Load config file
config = ConfigObj(infile=args.config.name)
data_dir = config['data_dir']
uwsgi_log_path = config['uwsgi_log_path']
min_msecs = int(config.get('min_msecs', DEFAULT_MIN_MSECS))
url_file = config.get('url_file')
# Load custom url rules
url_rules = []
if url_file:
with open(url_file, 'r') as fp:
url_rules = parse_url_rules(fp)
html_dir = os.path.join(data_dir, 'html')
db_dir = os.path.join(data_dir, 'data')
makedir_if_none_exists(html_dir)
makedir_if_none_exists(db_dir)
save_point = SavePoint(db_dir)
last_log_datetime = save_point.get_last_datetime() or \
(datetime.datetime.now() - datetime.timedelta(seconds=REALTIME_UPDATE_INTERVAL))
logger.info('Start from last savepoint, last_log_datetime: %s' % last_log_datetime)
last_update_datetime = None
url_classifier = URLClassifier(user_defined_rules=url_rules)
analyzer = RealtimeLogAnalyzer(url_classifier=url_classifier, min_msecs=min_msecs,
start_from_datetime=last_log_datetime)
file_tailer = Tailer(uwsgi_log_path)
html_render = HTMLRender(html_dir, domain=config.get('domain'))
# Listen INT/TERM signal
def gracefully_exit(*args):
logger.info('Sinal received, exit.')
file_tailer.stop_follow()
signal.signal(signal.SIGINT, gracefully_exit)
for line in file_tailer:
# Analyze line
if line != no_new_line:
analyzer.analyze_line(line)
now = datetime.datetime.now()
if not file_tailer.trailing:
continue
if last_update_datetime and \
total_seconds(now - last_update_datetime) < REALTIME_UPDATE_INTERVAL:
continue
# Render HTML file when:
# - file_tailer reaches end of file.
# - last_update_datetime if over one `interval` from now
# Render latest interval HTML file
html_render.render_requests_data_to_html(analyzer.get_data('last_interval'),
'latest_5mins.html', context={'datetime_range': 'Last 5 minutes'})
analyzer.clean_data_by_key('last_interval')
for date in list(analyzer.data.keys()):
day_requests_data = RequestsData(date, db_dir)
merge_requests_data_to(day_requests_data.data, analyzer.get_data(date))
# Render to HTML file
html_render.render_requests_data_to_html(day_requests_data.data,
'day_%s.html' % date, context={'datetime_range': date})
# Save data to pickle file
day_requests_data.save()
# Reset Everything
analyzer.clean_data_by_key(date)
update_html_symlink(html_dir)
last_update_datetime = now
if analyzer.last_analyzed_datetime:
save_point.set_last_datetime(analyzer.last_analyzed_datetime)
save_point.save()
|
piglei/uwsgi-sloth | uwsgi_sloth/commands/start.py | HTMLRender.render_requests_data_to_html | python | def render_requests_data_to_html(self, data, file_name, context={}):
file_path = os.path.join(self.html_dir, file_name)
logger.info('Rendering HTML file %s...' % file_path)
data = format_data(data)
data.update(context)
data.update(domain=self.domain)
with open(file_path, 'w') as fp:
fp.write(render_template('realtime.html', data)) | Render to HTML file | train | https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/commands/start.py#L26-L34 | [
"def format_data(raw_data, limit_per_url_group=LIMIT_PER_URL_GROUP, limit_url_groups=LIMIT_URL_GROUPS):\n \"\"\"Fomat data from LogAnalyzer for render purpose\"\"\"\n data = copy.deepcopy(raw_data)\n for k, v in list(data['data_details'].items()):\n # Only reserve first ``limit_per_url_group`` items\n v['urls'] = sorted(list(v['urls'].items()), key=lambda k_v: k_v[1].total,\n reverse=True)[:limit_per_url_group]\n\n data_details = sorted(iter(data['data_details'].items()),\n key=lambda k_v1: k_v1[1][\"duration_agr_data\"].total, \n reverse=True)[:limit_url_groups]\n\n if data['requests_counter']['normal']:\n slow_rate = format(data['requests_counter']['slow'] / \\\n float(data['requests_counter']['normal']), '.2%')\n else:\n slow_rate = '-'\n data.update({\n 'slow_rate': slow_rate,\n 'data_details': data_details,\n })\n return data\n",
"def render_template(template_name, context={}):\n template = env.get_template(template_name)\n context.update(\n SETTINGS=settings,\n now=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n version='.'.join(map(str, __VERSION__)))\n return template.render(**context)\n"
] | class HTMLRender(object):
"""helper for render HTML"""
def __init__(self, html_dir, domain=None):
self.html_dir = html_dir
self.domain = domain
|
piglei/uwsgi-sloth | uwsgi_sloth/tailer.py | Tailer.seek_line_forward | python | def seek_line_forward(self):
pos = start_pos = self.file.tell()
bytes_read, read_str = self.read(self.read_size)
start = 0
if bytes_read and read_str[0] in self.line_terminators:
# The first charachter is a line terminator, don't count this one
start += 1
while bytes_read > 0:
# Scan forwards, counting the newlines in this bufferfull
i = start
while i < bytes_read:
if read_str[i] in self.line_terminators:
self.seek(pos + i + 1)
return self.file.tell()
i += 1
pos += self.read_size
self.seek(pos)
bytes_read, read_str = self.read(self.read_size)
return None | \
Searches forward from the current file position for a line terminator
and seeks to the charachter after it. | train | https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/tailer.py#L53-L81 | [
"def seek(self, pos, whence=0):\n self.file.seek(pos, whence)\n",
"def read(self, read_size=None):\n if read_size:\n read_str = self.file.read(read_size)\n else:\n read_str = self.file.read()\n\n return len(read_str), read_str\n"
] | class Tailer(object):
"""Implements tailing and heading functionality like GNU tail and head
commands.
"""
line_terminators = ('\r\n', '\n', '\r')
DEFAULT_BLOCK_SIZE = 4096
MAX_UNCHANGED_STATS = 5
def __init__(self, file, read_size=DEFAULT_BLOCK_SIZE, end=False):
if isinstance(file, str):
file = open(file, 'r')
self.should_stop_follow = False
self.read_size = read_size
self.file = file
self.start_pos = self.file.tell()
if end:
self.seek_end()
def splitlines(self, data):
return re.split('|'.join(self.line_terminators), data)
def seek_end(self):
self.seek(0, 2)
def seek(self, pos, whence=0):
self.file.seek(pos, whence)
def read(self, read_size=None):
if read_size:
read_str = self.file.read(read_size)
else:
read_str = self.file.read()
return len(read_str), read_str
def seek_line(self):
"""\
Searches backwards from the current file position for a line terminator
and seeks to the charachter after it.
"""
pos = end_pos = self.file.tell()
read_size = self.read_size
if pos > read_size:
pos -= read_size
else:
pos = 0
read_size = end_pos
self.seek(pos)
bytes_read, read_str = self.read(read_size)
if bytes_read and read_str[-1] in self.line_terminators:
# The last charachter is a line terminator, don't count this one
bytes_read -= 1
if read_str[-2:] == '\r\n' and '\r\n' in self.line_terminators:
# found crlf
bytes_read -= 1
while bytes_read > 0:
# Scan backward, counting the newlines in this bufferfull
i = bytes_read - 1
while i >= 0:
if read_str[i] in self.line_terminators:
self.seek(pos + i + 1)
return self.file.tell()
i -= 1
if pos == 0 or pos - self.read_size < 0:
# Not enought lines in the buffer, send the whole file
self.seek(0)
return None
pos -= self.read_size
self.seek(pos)
bytes_read, read_str = self.read(self.read_size)
return None
def tail(self, lines=10):
"""\
Return the last lines of the file.
"""
self.seek_end()
end_pos = self.file.tell()
for i in range(lines):
if not self.seek_line():
break
data = self.file.read(end_pos - self.file.tell() - 1)
if data:
return self.splitlines(data)
else:
return []
def head(self, lines=10):
"""\
Return the top lines of the file.
"""
self.seek(0)
for i in range(lines):
if not self.seek_line_forward():
break
end_pos = self.file.tell()
self.seek(0)
data = self.file.read(end_pos - 1)
if data:
return self.splitlines(data)
else:
return []
def follow(self, delay=1.0):
"""\
Iterator generator that returns lines as data is added to the file.
Based on: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/157035
"""
# TODO: Handle log file rotation
self.trailing = True
unchanged_stats = 0
while not self.should_stop_follow:
where = self.file.tell()
line = self.file.readline()
if line:
if self.trailing and line in self.line_terminators:
# This is just the line terminator added to the end of the file
# before a new line, ignore.
self.trailing = False
continue
if line[-1] in self.line_terminators:
line = line[:-1]
if line[-1:] == '\r\n' and '\r\n' in self.line_terminators:
# found crlf
line = line[:-1]
self.trailing = False
unchanged_stats = 0
yield line
else:
self.trailing = True
self.seek(where)
yield no_new_line
# Try to catch up rotated log file
unchanged_stats += 1
if unchanged_stats >= self.MAX_UNCHANGED_STATS and \
where != os.stat(self.file.name).st_size:
logger.info('Reopen log file because file may has been rotated.')
self.reopen_file()
time.sleep(delay)
def reopen_file(self):
self.file = open(self.file.name, 'r')
def stop_follow(self):
self.should_stop_follow = True
def __iter__(self):
return self.follow()
def close(self):
self.file.close()
|
piglei/uwsgi-sloth | uwsgi_sloth/tailer.py | Tailer.seek_line | python | def seek_line(self):
pos = end_pos = self.file.tell()
read_size = self.read_size
if pos > read_size:
pos -= read_size
else:
pos = 0
read_size = end_pos
self.seek(pos)
bytes_read, read_str = self.read(read_size)
if bytes_read and read_str[-1] in self.line_terminators:
# The last charachter is a line terminator, don't count this one
bytes_read -= 1
if read_str[-2:] == '\r\n' and '\r\n' in self.line_terminators:
# found crlf
bytes_read -= 1
while bytes_read > 0:
# Scan backward, counting the newlines in this bufferfull
i = bytes_read - 1
while i >= 0:
if read_str[i] in self.line_terminators:
self.seek(pos + i + 1)
return self.file.tell()
i -= 1
if pos == 0 or pos - self.read_size < 0:
# Not enought lines in the buffer, send the whole file
self.seek(0)
return None
pos -= self.read_size
self.seek(pos)
bytes_read, read_str = self.read(self.read_size)
return None | \
Searches backwards from the current file position for a line terminator
and seeks to the charachter after it. | train | https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/tailer.py#L83-L128 | [
"def seek(self, pos, whence=0):\n self.file.seek(pos, whence)\n",
"def read(self, read_size=None):\n if read_size:\n read_str = self.file.read(read_size)\n else:\n read_str = self.file.read()\n\n return len(read_str), read_str\n"
] | class Tailer(object):
"""Implements tailing and heading functionality like GNU tail and head
commands.
"""
line_terminators = ('\r\n', '\n', '\r')
DEFAULT_BLOCK_SIZE = 4096
MAX_UNCHANGED_STATS = 5
def __init__(self, file, read_size=DEFAULT_BLOCK_SIZE, end=False):
if isinstance(file, str):
file = open(file, 'r')
self.should_stop_follow = False
self.read_size = read_size
self.file = file
self.start_pos = self.file.tell()
if end:
self.seek_end()
def splitlines(self, data):
return re.split('|'.join(self.line_terminators), data)
def seek_end(self):
self.seek(0, 2)
def seek(self, pos, whence=0):
self.file.seek(pos, whence)
def read(self, read_size=None):
if read_size:
read_str = self.file.read(read_size)
else:
read_str = self.file.read()
return len(read_str), read_str
def seek_line_forward(self):
"""\
Searches forward from the current file position for a line terminator
and seeks to the charachter after it.
"""
pos = start_pos = self.file.tell()
bytes_read, read_str = self.read(self.read_size)
start = 0
if bytes_read and read_str[0] in self.line_terminators:
# The first charachter is a line terminator, don't count this one
start += 1
while bytes_read > 0:
# Scan forwards, counting the newlines in this bufferfull
i = start
while i < bytes_read:
if read_str[i] in self.line_terminators:
self.seek(pos + i + 1)
return self.file.tell()
i += 1
pos += self.read_size
self.seek(pos)
bytes_read, read_str = self.read(self.read_size)
return None
def tail(self, lines=10):
"""\
Return the last lines of the file.
"""
self.seek_end()
end_pos = self.file.tell()
for i in range(lines):
if not self.seek_line():
break
data = self.file.read(end_pos - self.file.tell() - 1)
if data:
return self.splitlines(data)
else:
return []
def head(self, lines=10):
"""\
Return the top lines of the file.
"""
self.seek(0)
for i in range(lines):
if not self.seek_line_forward():
break
end_pos = self.file.tell()
self.seek(0)
data = self.file.read(end_pos - 1)
if data:
return self.splitlines(data)
else:
return []
def follow(self, delay=1.0):
"""\
Iterator generator that returns lines as data is added to the file.
Based on: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/157035
"""
# TODO: Handle log file rotation
self.trailing = True
unchanged_stats = 0
while not self.should_stop_follow:
where = self.file.tell()
line = self.file.readline()
if line:
if self.trailing and line in self.line_terminators:
# This is just the line terminator added to the end of the file
# before a new line, ignore.
self.trailing = False
continue
if line[-1] in self.line_terminators:
line = line[:-1]
if line[-1:] == '\r\n' and '\r\n' in self.line_terminators:
# found crlf
line = line[:-1]
self.trailing = False
unchanged_stats = 0
yield line
else:
self.trailing = True
self.seek(where)
yield no_new_line
# Try to catch up rotated log file
unchanged_stats += 1
if unchanged_stats >= self.MAX_UNCHANGED_STATS and \
where != os.stat(self.file.name).st_size:
logger.info('Reopen log file because file may has been rotated.')
self.reopen_file()
time.sleep(delay)
def reopen_file(self):
self.file = open(self.file.name, 'r')
def stop_follow(self):
self.should_stop_follow = True
def __iter__(self):
return self.follow()
def close(self):
self.file.close()
|
piglei/uwsgi-sloth | uwsgi_sloth/tailer.py | Tailer.tail | python | def tail(self, lines=10):
self.seek_end()
end_pos = self.file.tell()
for i in range(lines):
if not self.seek_line():
break
data = self.file.read(end_pos - self.file.tell() - 1)
if data:
return self.splitlines(data)
else:
return [] | \
Return the last lines of the file. | train | https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/tailer.py#L130-L145 | [
"def splitlines(self, data):\n return re.split('|'.join(self.line_terminators), data)\n",
"def seek_end(self):\n self.seek(0, 2)\n",
"def seek_line(self):\n \"\"\"\\\n Searches backwards from the current file position for a line terminator\n and seeks to the charachter after it.\n \"\"\"\n pos = end_pos = self.file.tell()\n\n read_size = self.read_size\n if pos > read_size:\n pos -= read_size\n else:\n pos = 0\n read_size = end_pos\n\n self.seek(pos)\n\n bytes_read, read_str = self.read(read_size)\n\n if bytes_read and read_str[-1] in self.line_terminators:\n # The last charachter is a line terminator, don't count this one\n bytes_read -= 1\n\n if read_str[-2:] == '\\r\\n' and '\\r\\n' in self.line_terminators:\n # found crlf\n bytes_read -= 1\n\n while bytes_read > 0: \n # Scan backward, counting the newlines in this bufferfull\n i = bytes_read - 1\n while i >= 0:\n if read_str[i] in self.line_terminators:\n self.seek(pos + i + 1)\n return self.file.tell()\n i -= 1\n\n if pos == 0 or pos - self.read_size < 0:\n # Not enought lines in the buffer, send the whole file\n self.seek(0)\n return None\n\n pos -= self.read_size\n self.seek(pos)\n\n bytes_read, read_str = self.read(self.read_size)\n\n return None\n"
] | class Tailer(object):
"""Implements tailing and heading functionality like GNU tail and head
commands.
"""
line_terminators = ('\r\n', '\n', '\r')
DEFAULT_BLOCK_SIZE = 4096
MAX_UNCHANGED_STATS = 5
def __init__(self, file, read_size=DEFAULT_BLOCK_SIZE, end=False):
if isinstance(file, str):
file = open(file, 'r')
self.should_stop_follow = False
self.read_size = read_size
self.file = file
self.start_pos = self.file.tell()
if end:
self.seek_end()
def splitlines(self, data):
return re.split('|'.join(self.line_terminators), data)
def seek_end(self):
self.seek(0, 2)
def seek(self, pos, whence=0):
self.file.seek(pos, whence)
def read(self, read_size=None):
if read_size:
read_str = self.file.read(read_size)
else:
read_str = self.file.read()
return len(read_str), read_str
def seek_line_forward(self):
"""\
Searches forward from the current file position for a line terminator
and seeks to the charachter after it.
"""
pos = start_pos = self.file.tell()
bytes_read, read_str = self.read(self.read_size)
start = 0
if bytes_read and read_str[0] in self.line_terminators:
# The first charachter is a line terminator, don't count this one
start += 1
while bytes_read > 0:
# Scan forwards, counting the newlines in this bufferfull
i = start
while i < bytes_read:
if read_str[i] in self.line_terminators:
self.seek(pos + i + 1)
return self.file.tell()
i += 1
pos += self.read_size
self.seek(pos)
bytes_read, read_str = self.read(self.read_size)
return None
def seek_line(self):
"""\
Searches backwards from the current file position for a line terminator
and seeks to the charachter after it.
"""
pos = end_pos = self.file.tell()
read_size = self.read_size
if pos > read_size:
pos -= read_size
else:
pos = 0
read_size = end_pos
self.seek(pos)
bytes_read, read_str = self.read(read_size)
if bytes_read and read_str[-1] in self.line_terminators:
# The last charachter is a line terminator, don't count this one
bytes_read -= 1
if read_str[-2:] == '\r\n' and '\r\n' in self.line_terminators:
# found crlf
bytes_read -= 1
while bytes_read > 0:
# Scan backward, counting the newlines in this bufferfull
i = bytes_read - 1
while i >= 0:
if read_str[i] in self.line_terminators:
self.seek(pos + i + 1)
return self.file.tell()
i -= 1
if pos == 0 or pos - self.read_size < 0:
# Not enought lines in the buffer, send the whole file
self.seek(0)
return None
pos -= self.read_size
self.seek(pos)
bytes_read, read_str = self.read(self.read_size)
return None
def head(self, lines=10):
"""\
Return the top lines of the file.
"""
self.seek(0)
for i in range(lines):
if not self.seek_line_forward():
break
end_pos = self.file.tell()
self.seek(0)
data = self.file.read(end_pos - 1)
if data:
return self.splitlines(data)
else:
return []
def follow(self, delay=1.0):
"""\
Iterator generator that returns lines as data is added to the file.
Based on: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/157035
"""
# TODO: Handle log file rotation
self.trailing = True
unchanged_stats = 0
while not self.should_stop_follow:
where = self.file.tell()
line = self.file.readline()
if line:
if self.trailing and line in self.line_terminators:
# This is just the line terminator added to the end of the file
# before a new line, ignore.
self.trailing = False
continue
if line[-1] in self.line_terminators:
line = line[:-1]
if line[-1:] == '\r\n' and '\r\n' in self.line_terminators:
# found crlf
line = line[:-1]
self.trailing = False
unchanged_stats = 0
yield line
else:
self.trailing = True
self.seek(where)
yield no_new_line
# Try to catch up rotated log file
unchanged_stats += 1
if unchanged_stats >= self.MAX_UNCHANGED_STATS and \
where != os.stat(self.file.name).st_size:
logger.info('Reopen log file because file may has been rotated.')
self.reopen_file()
time.sleep(delay)
def reopen_file(self):
self.file = open(self.file.name, 'r')
def stop_follow(self):
self.should_stop_follow = True
def __iter__(self):
return self.follow()
def close(self):
self.file.close()
|
piglei/uwsgi-sloth | uwsgi_sloth/tailer.py | Tailer.head | python | def head(self, lines=10):
self.seek(0)
for i in range(lines):
if not self.seek_line_forward():
break
end_pos = self.file.tell()
self.seek(0)
data = self.file.read(end_pos - 1)
if data:
return self.splitlines(data)
else:
return [] | \
Return the top lines of the file. | train | https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/tailer.py#L147-L165 | [
"def splitlines(self, data):\n return re.split('|'.join(self.line_terminators), data)\n",
"def seek(self, pos, whence=0):\n self.file.seek(pos, whence)\n",
"def seek_line_forward(self):\n \"\"\"\\\n Searches forward from the current file position for a line terminator\n and seeks to the charachter after it.\n \"\"\"\n pos = start_pos = self.file.tell()\n\n bytes_read, read_str = self.read(self.read_size)\n\n start = 0\n if bytes_read and read_str[0] in self.line_terminators:\n # The first charachter is a line terminator, don't count this one\n start += 1\n\n while bytes_read > 0: \n # Scan forwards, counting the newlines in this bufferfull\n i = start\n while i < bytes_read:\n if read_str[i] in self.line_terminators:\n self.seek(pos + i + 1)\n return self.file.tell()\n i += 1\n\n pos += self.read_size\n self.seek(pos)\n\n bytes_read, read_str = self.read(self.read_size)\n\n return None\n"
] | class Tailer(object):
"""Implements tailing and heading functionality like GNU tail and head
commands.
"""
line_terminators = ('\r\n', '\n', '\r')
DEFAULT_BLOCK_SIZE = 4096
MAX_UNCHANGED_STATS = 5
def __init__(self, file, read_size=DEFAULT_BLOCK_SIZE, end=False):
if isinstance(file, str):
file = open(file, 'r')
self.should_stop_follow = False
self.read_size = read_size
self.file = file
self.start_pos = self.file.tell()
if end:
self.seek_end()
def splitlines(self, data):
return re.split('|'.join(self.line_terminators), data)
def seek_end(self):
self.seek(0, 2)
def seek(self, pos, whence=0):
self.file.seek(pos, whence)
def read(self, read_size=None):
if read_size:
read_str = self.file.read(read_size)
else:
read_str = self.file.read()
return len(read_str), read_str
def seek_line_forward(self):
"""\
Searches forward from the current file position for a line terminator
and seeks to the charachter after it.
"""
pos = start_pos = self.file.tell()
bytes_read, read_str = self.read(self.read_size)
start = 0
if bytes_read and read_str[0] in self.line_terminators:
# The first charachter is a line terminator, don't count this one
start += 1
while bytes_read > 0:
# Scan forwards, counting the newlines in this bufferfull
i = start
while i < bytes_read:
if read_str[i] in self.line_terminators:
self.seek(pos + i + 1)
return self.file.tell()
i += 1
pos += self.read_size
self.seek(pos)
bytes_read, read_str = self.read(self.read_size)
return None
def seek_line(self):
"""\
Searches backwards from the current file position for a line terminator
and seeks to the charachter after it.
"""
pos = end_pos = self.file.tell()
read_size = self.read_size
if pos > read_size:
pos -= read_size
else:
pos = 0
read_size = end_pos
self.seek(pos)
bytes_read, read_str = self.read(read_size)
if bytes_read and read_str[-1] in self.line_terminators:
# The last charachter is a line terminator, don't count this one
bytes_read -= 1
if read_str[-2:] == '\r\n' and '\r\n' in self.line_terminators:
# found crlf
bytes_read -= 1
while bytes_read > 0:
# Scan backward, counting the newlines in this bufferfull
i = bytes_read - 1
while i >= 0:
if read_str[i] in self.line_terminators:
self.seek(pos + i + 1)
return self.file.tell()
i -= 1
if pos == 0 or pos - self.read_size < 0:
# Not enought lines in the buffer, send the whole file
self.seek(0)
return None
pos -= self.read_size
self.seek(pos)
bytes_read, read_str = self.read(self.read_size)
return None
def tail(self, lines=10):
"""\
Return the last lines of the file.
"""
self.seek_end()
end_pos = self.file.tell()
for i in range(lines):
if not self.seek_line():
break
data = self.file.read(end_pos - self.file.tell() - 1)
if data:
return self.splitlines(data)
else:
return []
def head(self, lines=10):
"""\
Return the top lines of the file.
"""
self.seek(0)
for i in range(lines):
if not self.seek_line_forward():
break
end_pos = self.file.tell()
self.seek(0)
data = self.file.read(end_pos - 1)
if data:
return self.splitlines(data)
else:
return []
def follow(self, delay=1.0):
"""\
Iterator generator that returns lines as data is added to the file.
Based on: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/157035
"""
# TODO: Handle log file rotation
self.trailing = True
unchanged_stats = 0
while not self.should_stop_follow:
where = self.file.tell()
line = self.file.readline()
if line:
if self.trailing and line in self.line_terminators:
# This is just the line terminator added to the end of the file
# before a new line, ignore.
self.trailing = False
continue
if line[-1] in self.line_terminators:
line = line[:-1]
if line[-1:] == '\r\n' and '\r\n' in self.line_terminators:
# found crlf
line = line[:-1]
self.trailing = False
unchanged_stats = 0
yield line
else:
self.trailing = True
self.seek(where)
yield no_new_line
# Try to catch up rotated log file
unchanged_stats += 1
if unchanged_stats >= self.MAX_UNCHANGED_STATS and \
where != os.stat(self.file.name).st_size:
logger.info('Reopen log file because file may has been rotated.')
self.reopen_file()
time.sleep(delay)
def reopen_file(self):
self.file = open(self.file.name, 'r')
def stop_follow(self):
self.should_stop_follow = True
def __iter__(self):
return self.follow()
def close(self):
self.file.close()
|
piglei/uwsgi-sloth | uwsgi_sloth/tailer.py | Tailer.follow | python | def follow(self, delay=1.0):
# TODO: Handle log file rotation
self.trailing = True
unchanged_stats = 0
while not self.should_stop_follow:
where = self.file.tell()
line = self.file.readline()
if line:
if self.trailing and line in self.line_terminators:
# This is just the line terminator added to the end of the file
# before a new line, ignore.
self.trailing = False
continue
if line[-1] in self.line_terminators:
line = line[:-1]
if line[-1:] == '\r\n' and '\r\n' in self.line_terminators:
# found crlf
line = line[:-1]
self.trailing = False
unchanged_stats = 0
yield line
else:
self.trailing = True
self.seek(where)
yield no_new_line
# Try to catch up rotated log file
unchanged_stats += 1
if unchanged_stats >= self.MAX_UNCHANGED_STATS and \
where != os.stat(self.file.name).st_size:
logger.info('Reopen log file because file may has been rotated.')
self.reopen_file()
time.sleep(delay) | \
Iterator generator that returns lines as data is added to the file.
Based on: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/157035 | train | https://github.com/piglei/uwsgi-sloth/blob/2834ac5ed17d89ca5f19151c649ac610f6f37bd1/uwsgi_sloth/tailer.py#L167-L207 | [
"def seek(self, pos, whence=0):\n self.file.seek(pos, whence)\n",
"def reopen_file(self):\n self.file = open(self.file.name, 'r')\n"
] | class Tailer(object):
"""Implements tailing and heading functionality like GNU tail and head
commands.
"""
line_terminators = ('\r\n', '\n', '\r')
DEFAULT_BLOCK_SIZE = 4096
MAX_UNCHANGED_STATS = 5
def __init__(self, file, read_size=DEFAULT_BLOCK_SIZE, end=False):
if isinstance(file, str):
file = open(file, 'r')
self.should_stop_follow = False
self.read_size = read_size
self.file = file
self.start_pos = self.file.tell()
if end:
self.seek_end()
def splitlines(self, data):
return re.split('|'.join(self.line_terminators), data)
def seek_end(self):
self.seek(0, 2)
def seek(self, pos, whence=0):
self.file.seek(pos, whence)
def read(self, read_size=None):
if read_size:
read_str = self.file.read(read_size)
else:
read_str = self.file.read()
return len(read_str), read_str
def seek_line_forward(self):
"""\
Searches forward from the current file position for a line terminator
and seeks to the charachter after it.
"""
pos = start_pos = self.file.tell()
bytes_read, read_str = self.read(self.read_size)
start = 0
if bytes_read and read_str[0] in self.line_terminators:
# The first charachter is a line terminator, don't count this one
start += 1
while bytes_read > 0:
# Scan forwards, counting the newlines in this bufferfull
i = start
while i < bytes_read:
if read_str[i] in self.line_terminators:
self.seek(pos + i + 1)
return self.file.tell()
i += 1
pos += self.read_size
self.seek(pos)
bytes_read, read_str = self.read(self.read_size)
return None
def seek_line(self):
"""\
Searches backwards from the current file position for a line terminator
and seeks to the charachter after it.
"""
pos = end_pos = self.file.tell()
read_size = self.read_size
if pos > read_size:
pos -= read_size
else:
pos = 0
read_size = end_pos
self.seek(pos)
bytes_read, read_str = self.read(read_size)
if bytes_read and read_str[-1] in self.line_terminators:
# The last charachter is a line terminator, don't count this one
bytes_read -= 1
if read_str[-2:] == '\r\n' and '\r\n' in self.line_terminators:
# found crlf
bytes_read -= 1
while bytes_read > 0:
# Scan backward, counting the newlines in this bufferfull
i = bytes_read - 1
while i >= 0:
if read_str[i] in self.line_terminators:
self.seek(pos + i + 1)
return self.file.tell()
i -= 1
if pos == 0 or pos - self.read_size < 0:
# Not enought lines in the buffer, send the whole file
self.seek(0)
return None
pos -= self.read_size
self.seek(pos)
bytes_read, read_str = self.read(self.read_size)
return None
def tail(self, lines=10):
"""\
Return the last lines of the file.
"""
self.seek_end()
end_pos = self.file.tell()
for i in range(lines):
if not self.seek_line():
break
data = self.file.read(end_pos - self.file.tell() - 1)
if data:
return self.splitlines(data)
else:
return []
def head(self, lines=10):
"""\
Return the top lines of the file.
"""
self.seek(0)
for i in range(lines):
if not self.seek_line_forward():
break
end_pos = self.file.tell()
self.seek(0)
data = self.file.read(end_pos - 1)
if data:
return self.splitlines(data)
else:
return []
def follow(self, delay=1.0):
"""\
Iterator generator that returns lines as data is added to the file.
Based on: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/157035
"""
# TODO: Handle log file rotation
self.trailing = True
unchanged_stats = 0
while not self.should_stop_follow:
where = self.file.tell()
line = self.file.readline()
if line:
if self.trailing and line in self.line_terminators:
# This is just the line terminator added to the end of the file
# before a new line, ignore.
self.trailing = False
continue
if line[-1] in self.line_terminators:
line = line[:-1]
if line[-1:] == '\r\n' and '\r\n' in self.line_terminators:
# found crlf
line = line[:-1]
self.trailing = False
unchanged_stats = 0
yield line
else:
self.trailing = True
self.seek(where)
yield no_new_line
# Try to catch up rotated log file
unchanged_stats += 1
if unchanged_stats >= self.MAX_UNCHANGED_STATS and \
where != os.stat(self.file.name).st_size:
logger.info('Reopen log file because file may has been rotated.')
self.reopen_file()
time.sleep(delay)
def reopen_file(self):
self.file = open(self.file.name, 'r')
def stop_follow(self):
self.should_stop_follow = True
def __iter__(self):
return self.follow()
def close(self):
self.file.close()
|
ambitioninc/django-manager-utils | settings.py | configure_settings | python | def configure_settings():
if not settings.configured:
# Determine the database settings depending on if a test_db var is set in CI mode or not
test_db = os.environ.get('DB', None)
if test_db is None:
db_config = {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'ambition',
'USER': 'ambition',
'PASSWORD': 'ambition',
'HOST': 'db'
}
elif test_db == 'postgres':
db_config = {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': 'postgres',
'NAME': 'manager_utils',
}
elif test_db == 'sqlite':
db_config = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'manager_utils',
}
else:
raise RuntimeError('Unsupported test DB {0}'.format(test_db))
settings.configure(
TEST_RUNNER='django_nose.NoseTestSuiteRunner',
NOSE_ARGS=['--nocapture', '--nologcapture', '--verbosity=1'],
MIDDLEWARE_CLASSES={},
DATABASES={
'default': db_config,
},
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'manager_utils',
'manager_utils.tests',
),
ROOT_URLCONF='manager_utils.urls',
DEBUG=False,
) | Configures settings for manage.py and for run_tests.py. | train | https://github.com/ambitioninc/django-manager-utils/blob/1f111cb4846ed6cd6b78eca320a9dcc27826bf97/settings.py#L6-L52 | null | import os
from django.conf import settings
|
ambitioninc/django-manager-utils | manager_utils/upsert2.py | _get_update_fields | python | def _get_update_fields(model, uniques, to_update):
fields = {
field.attname: field
for field in model._meta.fields
}
if to_update is None:
to_update = [
field.attname for field in model._meta.fields
]
to_update = [
attname for attname in to_update
if (attname not in uniques
and not getattr(fields[attname], 'auto_now_add', False)
and not fields[attname].auto_created)
]
return to_update | Get the fields to be updated in an upsert.
Always exclude auto_now_add, auto_created fields, and unique fields in an update | train | https://github.com/ambitioninc/django-manager-utils/blob/1f111cb4846ed6cd6b78eca320a9dcc27826bf97/manager_utils/upsert2.py#L38-L61 | null | """
The new interface for manager utils upsert
"""
from collections import namedtuple
from django.db import connection, models
from django.utils import timezone
class UpsertResult(list):
"""
Returned by the upsert operation.
Wraps a list and provides properties to access created, updated,
untouched, and deleted elements
"""
@property
def created(self):
return (i for i in self if i.status_ == 'c')
@property
def updated(self):
return (i for i in self if i.status_ == 'u')
@property
def untouched(self):
return (i for i in self if i.status_ == 'n')
@property
def deleted(self):
return (i for i in self if i.status_ == 'd')
def _quote(field):
return '"{0}"'.format(field)
def _fill_auto_fields(model, values):
"""
Given a list of models, fill in auto_now and auto_now_add fields
for upserts. Since django manager utils passes Django's ORM, these values
have to be automatically constructed
"""
auto_field_names = [
f.attname
for f in model._meta.fields
if getattr(f, 'auto_now', False) or getattr(f, 'auto_now_add', False)
]
now = timezone.now()
for value in values:
for f in auto_field_names:
setattr(value, f, now)
return values
def _sort_by_unique_fields(model, model_objs, unique_fields):
"""
Sort a list of models by their unique fields.
Sorting models in an upsert greatly reduces the chances of deadlock
when doing concurrent upserts
"""
unique_fields = [
field for field in model._meta.fields
if field.attname in unique_fields
]
def sort_key(model_obj):
return tuple(
field.get_db_prep_save(getattr(model_obj, field.attname),
connection)
for field in unique_fields
)
return sorted(model_objs, key=sort_key)
def _get_values_for_row(model_obj, all_fields):
return [
# Convert field value to db value
# Use attname here to support fields with custom db_column names
field.get_db_prep_save(getattr(model_obj, field.attname), connection)
for field in all_fields
]
def _get_values_for_rows(model_objs, all_fields):
row_values = []
sql_args = []
for i, model_obj in enumerate(model_objs):
sql_args.extend(_get_values_for_row(model_obj, all_fields))
if i == 0:
row_values.append('({0})'.format(
', '.join(['%s::{0}'.format(f.db_type(connection)) for f in all_fields]))
)
else:
row_values.append('({0})'.format(', '.join(['%s'] * len(all_fields))))
return row_values, sql_args
def _get_return_fields_sql(returning, return_status=False, alias=None):
if alias:
return_fields_sql = ', '.join('{0}.{1}'.format(alias, _quote(field)) for field in returning)
else:
return_fields_sql = ', '.join(_quote(field) for field in returning)
if return_status:
return_fields_sql += ', CASE WHEN xmax = 0 THEN \'c\' ELSE \'u\' END AS status_'
return return_fields_sql
def _get_upsert_sql(queryset, model_objs, unique_fields, update_fields, returning,
ignore_duplicate_updates=True, return_untouched=False):
"""
Generates the postgres specific sql necessary to perform an upsert (ON CONFLICT)
INSERT INTO table_name (field1, field2)
VALUES (1, 'two')
ON CONFLICT (unique_field) DO UPDATE SET field2 = EXCLUDED.field2;
"""
model = queryset.model
# Use all fields except pk unless the uniqueness constraint is the pk field
all_fields = [
field for field in model._meta.fields
if field.column != model._meta.pk.name or not field.auto_created
]
all_field_names = [field.column for field in all_fields]
returning = returning if returning is not True else [f.column for f in model._meta.fields]
all_field_names_sql = ', '.join([_quote(field) for field in all_field_names])
# Convert field names to db column names
unique_fields = [
model._meta.get_field(unique_field)
for unique_field in unique_fields
]
update_fields = [
model._meta.get_field(update_field)
for update_field in update_fields
]
unique_field_names_sql = ', '.join([
_quote(field.column) for field in unique_fields
])
update_fields_sql = ', '.join([
'{0} = EXCLUDED.{0}'.format(_quote(field.column))
for field in update_fields
])
row_values, sql_args = _get_values_for_rows(model_objs, all_fields)
return_sql = 'RETURNING ' + _get_return_fields_sql(returning, return_status=True) if returning else ''
ignore_duplicates_sql = ''
if ignore_duplicate_updates:
ignore_duplicates_sql = (
' WHERE ({update_fields_sql}) IS DISTINCT FROM ({excluded_update_fields_sql}) '
).format(
update_fields_sql=', '.join(
'{0}.{1}'.format(model._meta.db_table, _quote(field.column))
for field in update_fields
),
excluded_update_fields_sql=', '.join(
'EXCLUDED.' + _quote(field.column)
for field in update_fields
)
)
on_conflict = (
'DO UPDATE SET {0} {1}'.format(update_fields_sql, ignore_duplicates_sql) if update_fields else 'DO NOTHING'
)
if return_untouched:
row_values_sql = ', '.join([
'(\'{0}\', {1})'.format(i, row_value[1:-1])
for i, row_value in enumerate(row_values)
])
sql = (
' WITH input_rows("temp_id_", {all_field_names_sql}) AS ('
' VALUES {row_values_sql}'
' ), ins AS ( '
' INSERT INTO {table_name} ({all_field_names_sql})'
' SELECT {all_field_names_sql} FROM input_rows ORDER BY temp_id_'
' ON CONFLICT ({unique_field_names_sql}) {on_conflict} {return_sql}'
' )'
' SELECT DISTINCT ON ({table_pk_name}) * FROM ('
' SELECT status_, {return_fields_sql}'
' FROM ins'
' UNION ALL'
' SELECT \'n\' AS status_, {aliased_return_fields_sql}'
' FROM input_rows'
' JOIN {table_name} c USING ({unique_field_names_sql})'
' ) as results'
' ORDER BY results."{table_pk_name}", CASE WHEN(status_ = \'n\') THEN 1 ELSE 0 END;'
).format(
all_field_names_sql=all_field_names_sql,
row_values_sql=row_values_sql,
table_name=model._meta.db_table,
unique_field_names_sql=unique_field_names_sql,
on_conflict=on_conflict,
return_sql=return_sql,
table_pk_name=model._meta.pk.name,
return_fields_sql=_get_return_fields_sql(returning),
aliased_return_fields_sql=_get_return_fields_sql(returning, alias='c')
)
else:
row_values_sql = ', '.join(row_values)
sql = (
' INSERT INTO {table_name} ({all_field_names_sql})'
' VALUES {row_values_sql}'
' ON CONFLICT ({unique_field_names_sql}) {on_conflict} {return_sql}'
).format(
table_name=model._meta.db_table,
all_field_names_sql=all_field_names_sql,
row_values_sql=row_values_sql,
unique_field_names_sql=unique_field_names_sql,
on_conflict=on_conflict,
return_sql=return_sql
)
return sql, sql_args
def _fetch(
queryset, model_objs, unique_fields, update_fields, returning, sync,
ignore_duplicate_updates=True, return_untouched=False
):
"""
Perfom the upsert and do an optional sync operation
"""
model = queryset.model
if (return_untouched or sync) and returning is not True:
returning = set(returning) if returning else set()
returning.add(model._meta.pk.name)
upserted = []
deleted = []
# We must return untouched rows when doing a sync operation
return_untouched = True if sync else return_untouched
if model_objs:
sql, sql_args = _get_upsert_sql(queryset, model_objs, unique_fields, update_fields, returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
with connection.cursor() as cursor:
cursor.execute(sql, sql_args)
if cursor.description:
nt_result = namedtuple('Result', [col[0] for col in cursor.description])
upserted = [nt_result(*row) for row in cursor.fetchall()]
pk_field = model._meta.pk.name
if sync:
orig_ids = queryset.values_list(pk_field, flat=True)
deleted = set(orig_ids) - {getattr(r, pk_field) for r in upserted}
model.objects.filter(pk__in=deleted).delete()
nt_deleted_result = namedtuple('DeletedResult', [model._meta.pk.name, 'status_'])
return UpsertResult(
upserted + [nt_deleted_result(**{pk_field: d, 'status_': 'd'}) for d in deleted]
)
def upsert(
queryset, model_objs, unique_fields,
update_fields=None, returning=False, sync=False,
ignore_duplicate_updates=True,
return_untouched=False
):
"""
Perform a bulk upsert on a table, optionally syncing the results.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If `None`, all fields will be updated.
returning (bool|List[str]): If True, returns all fields. If a list, only returns
fields in the list
sync (bool, default=False): Perform a sync operation on the queryset
ignore_duplicate_updates (bool, default=False): Don't perform an update if the row is
a duplicate.
return_untouched (bool, default=False): Return untouched rows by the operation
"""
queryset = queryset if isinstance(queryset, models.QuerySet) else queryset.objects.all()
model = queryset.model
# Populate automatically generated fields in the rows like date times
_fill_auto_fields(model, model_objs)
# Sort the rows to reduce the chances of deadlock during concurrent upserts
model_objs = _sort_by_unique_fields(model, model_objs, unique_fields)
update_fields = _get_update_fields(model, unique_fields, update_fields)
return _fetch(queryset, model_objs, unique_fields, update_fields, returning, sync,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
|
ambitioninc/django-manager-utils | manager_utils/upsert2.py | _fill_auto_fields | python | def _fill_auto_fields(model, values):
auto_field_names = [
f.attname
for f in model._meta.fields
if getattr(f, 'auto_now', False) or getattr(f, 'auto_now_add', False)
]
now = timezone.now()
for value in values:
for f in auto_field_names:
setattr(value, f, now)
return values | Given a list of models, fill in auto_now and auto_now_add fields
for upserts. Since django manager utils passes Django's ORM, these values
have to be automatically constructed | train | https://github.com/ambitioninc/django-manager-utils/blob/1f111cb4846ed6cd6b78eca320a9dcc27826bf97/manager_utils/upsert2.py#L64-L80 | null | """
The new interface for manager utils upsert
"""
from collections import namedtuple
from django.db import connection, models
from django.utils import timezone
class UpsertResult(list):
"""
Returned by the upsert operation.
Wraps a list and provides properties to access created, updated,
untouched, and deleted elements
"""
@property
def created(self):
return (i for i in self if i.status_ == 'c')
@property
def updated(self):
return (i for i in self if i.status_ == 'u')
@property
def untouched(self):
return (i for i in self if i.status_ == 'n')
@property
def deleted(self):
return (i for i in self if i.status_ == 'd')
def _quote(field):
return '"{0}"'.format(field)
def _get_update_fields(model, uniques, to_update):
"""
Get the fields to be updated in an upsert.
Always exclude auto_now_add, auto_created fields, and unique fields in an update
"""
fields = {
field.attname: field
for field in model._meta.fields
}
if to_update is None:
to_update = [
field.attname for field in model._meta.fields
]
to_update = [
attname for attname in to_update
if (attname not in uniques
and not getattr(fields[attname], 'auto_now_add', False)
and not fields[attname].auto_created)
]
return to_update
def _sort_by_unique_fields(model, model_objs, unique_fields):
"""
Sort a list of models by their unique fields.
Sorting models in an upsert greatly reduces the chances of deadlock
when doing concurrent upserts
"""
unique_fields = [
field for field in model._meta.fields
if field.attname in unique_fields
]
def sort_key(model_obj):
return tuple(
field.get_db_prep_save(getattr(model_obj, field.attname),
connection)
for field in unique_fields
)
return sorted(model_objs, key=sort_key)
def _get_values_for_row(model_obj, all_fields):
return [
# Convert field value to db value
# Use attname here to support fields with custom db_column names
field.get_db_prep_save(getattr(model_obj, field.attname), connection)
for field in all_fields
]
def _get_values_for_rows(model_objs, all_fields):
row_values = []
sql_args = []
for i, model_obj in enumerate(model_objs):
sql_args.extend(_get_values_for_row(model_obj, all_fields))
if i == 0:
row_values.append('({0})'.format(
', '.join(['%s::{0}'.format(f.db_type(connection)) for f in all_fields]))
)
else:
row_values.append('({0})'.format(', '.join(['%s'] * len(all_fields))))
return row_values, sql_args
def _get_return_fields_sql(returning, return_status=False, alias=None):
if alias:
return_fields_sql = ', '.join('{0}.{1}'.format(alias, _quote(field)) for field in returning)
else:
return_fields_sql = ', '.join(_quote(field) for field in returning)
if return_status:
return_fields_sql += ', CASE WHEN xmax = 0 THEN \'c\' ELSE \'u\' END AS status_'
return return_fields_sql
def _get_upsert_sql(queryset, model_objs, unique_fields, update_fields, returning,
ignore_duplicate_updates=True, return_untouched=False):
"""
Generates the postgres specific sql necessary to perform an upsert (ON CONFLICT)
INSERT INTO table_name (field1, field2)
VALUES (1, 'two')
ON CONFLICT (unique_field) DO UPDATE SET field2 = EXCLUDED.field2;
"""
model = queryset.model
# Use all fields except pk unless the uniqueness constraint is the pk field
all_fields = [
field for field in model._meta.fields
if field.column != model._meta.pk.name or not field.auto_created
]
all_field_names = [field.column for field in all_fields]
returning = returning if returning is not True else [f.column for f in model._meta.fields]
all_field_names_sql = ', '.join([_quote(field) for field in all_field_names])
# Convert field names to db column names
unique_fields = [
model._meta.get_field(unique_field)
for unique_field in unique_fields
]
update_fields = [
model._meta.get_field(update_field)
for update_field in update_fields
]
unique_field_names_sql = ', '.join([
_quote(field.column) for field in unique_fields
])
update_fields_sql = ', '.join([
'{0} = EXCLUDED.{0}'.format(_quote(field.column))
for field in update_fields
])
row_values, sql_args = _get_values_for_rows(model_objs, all_fields)
return_sql = 'RETURNING ' + _get_return_fields_sql(returning, return_status=True) if returning else ''
ignore_duplicates_sql = ''
if ignore_duplicate_updates:
ignore_duplicates_sql = (
' WHERE ({update_fields_sql}) IS DISTINCT FROM ({excluded_update_fields_sql}) '
).format(
update_fields_sql=', '.join(
'{0}.{1}'.format(model._meta.db_table, _quote(field.column))
for field in update_fields
),
excluded_update_fields_sql=', '.join(
'EXCLUDED.' + _quote(field.column)
for field in update_fields
)
)
on_conflict = (
'DO UPDATE SET {0} {1}'.format(update_fields_sql, ignore_duplicates_sql) if update_fields else 'DO NOTHING'
)
if return_untouched:
row_values_sql = ', '.join([
'(\'{0}\', {1})'.format(i, row_value[1:-1])
for i, row_value in enumerate(row_values)
])
sql = (
' WITH input_rows("temp_id_", {all_field_names_sql}) AS ('
' VALUES {row_values_sql}'
' ), ins AS ( '
' INSERT INTO {table_name} ({all_field_names_sql})'
' SELECT {all_field_names_sql} FROM input_rows ORDER BY temp_id_'
' ON CONFLICT ({unique_field_names_sql}) {on_conflict} {return_sql}'
' )'
' SELECT DISTINCT ON ({table_pk_name}) * FROM ('
' SELECT status_, {return_fields_sql}'
' FROM ins'
' UNION ALL'
' SELECT \'n\' AS status_, {aliased_return_fields_sql}'
' FROM input_rows'
' JOIN {table_name} c USING ({unique_field_names_sql})'
' ) as results'
' ORDER BY results."{table_pk_name}", CASE WHEN(status_ = \'n\') THEN 1 ELSE 0 END;'
).format(
all_field_names_sql=all_field_names_sql,
row_values_sql=row_values_sql,
table_name=model._meta.db_table,
unique_field_names_sql=unique_field_names_sql,
on_conflict=on_conflict,
return_sql=return_sql,
table_pk_name=model._meta.pk.name,
return_fields_sql=_get_return_fields_sql(returning),
aliased_return_fields_sql=_get_return_fields_sql(returning, alias='c')
)
else:
row_values_sql = ', '.join(row_values)
sql = (
' INSERT INTO {table_name} ({all_field_names_sql})'
' VALUES {row_values_sql}'
' ON CONFLICT ({unique_field_names_sql}) {on_conflict} {return_sql}'
).format(
table_name=model._meta.db_table,
all_field_names_sql=all_field_names_sql,
row_values_sql=row_values_sql,
unique_field_names_sql=unique_field_names_sql,
on_conflict=on_conflict,
return_sql=return_sql
)
return sql, sql_args
def _fetch(
queryset, model_objs, unique_fields, update_fields, returning, sync,
ignore_duplicate_updates=True, return_untouched=False
):
"""
Perfom the upsert and do an optional sync operation
"""
model = queryset.model
if (return_untouched or sync) and returning is not True:
returning = set(returning) if returning else set()
returning.add(model._meta.pk.name)
upserted = []
deleted = []
# We must return untouched rows when doing a sync operation
return_untouched = True if sync else return_untouched
if model_objs:
sql, sql_args = _get_upsert_sql(queryset, model_objs, unique_fields, update_fields, returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
with connection.cursor() as cursor:
cursor.execute(sql, sql_args)
if cursor.description:
nt_result = namedtuple('Result', [col[0] for col in cursor.description])
upserted = [nt_result(*row) for row in cursor.fetchall()]
pk_field = model._meta.pk.name
if sync:
orig_ids = queryset.values_list(pk_field, flat=True)
deleted = set(orig_ids) - {getattr(r, pk_field) for r in upserted}
model.objects.filter(pk__in=deleted).delete()
nt_deleted_result = namedtuple('DeletedResult', [model._meta.pk.name, 'status_'])
return UpsertResult(
upserted + [nt_deleted_result(**{pk_field: d, 'status_': 'd'}) for d in deleted]
)
def upsert(
queryset, model_objs, unique_fields,
update_fields=None, returning=False, sync=False,
ignore_duplicate_updates=True,
return_untouched=False
):
"""
Perform a bulk upsert on a table, optionally syncing the results.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If `None`, all fields will be updated.
returning (bool|List[str]): If True, returns all fields. If a list, only returns
fields in the list
sync (bool, default=False): Perform a sync operation on the queryset
ignore_duplicate_updates (bool, default=False): Don't perform an update if the row is
a duplicate.
return_untouched (bool, default=False): Return untouched rows by the operation
"""
queryset = queryset if isinstance(queryset, models.QuerySet) else queryset.objects.all()
model = queryset.model
# Populate automatically generated fields in the rows like date times
_fill_auto_fields(model, model_objs)
# Sort the rows to reduce the chances of deadlock during concurrent upserts
model_objs = _sort_by_unique_fields(model, model_objs, unique_fields)
update_fields = _get_update_fields(model, unique_fields, update_fields)
return _fetch(queryset, model_objs, unique_fields, update_fields, returning, sync,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
|
ambitioninc/django-manager-utils | manager_utils/upsert2.py | _sort_by_unique_fields | python | def _sort_by_unique_fields(model, model_objs, unique_fields):
unique_fields = [
field for field in model._meta.fields
if field.attname in unique_fields
]
def sort_key(model_obj):
return tuple(
field.get_db_prep_save(getattr(model_obj, field.attname),
connection)
for field in unique_fields
)
return sorted(model_objs, key=sort_key) | Sort a list of models by their unique fields.
Sorting models in an upsert greatly reduces the chances of deadlock
when doing concurrent upserts | train | https://github.com/ambitioninc/django-manager-utils/blob/1f111cb4846ed6cd6b78eca320a9dcc27826bf97/manager_utils/upsert2.py#L83-L101 | null | """
The new interface for manager utils upsert
"""
from collections import namedtuple
from django.db import connection, models
from django.utils import timezone
class UpsertResult(list):
"""
Returned by the upsert operation.
Wraps a list and provides properties to access created, updated,
untouched, and deleted elements
"""
@property
def created(self):
return (i for i in self if i.status_ == 'c')
@property
def updated(self):
return (i for i in self if i.status_ == 'u')
@property
def untouched(self):
return (i for i in self if i.status_ == 'n')
@property
def deleted(self):
return (i for i in self if i.status_ == 'd')
def _quote(field):
return '"{0}"'.format(field)
def _get_update_fields(model, uniques, to_update):
"""
Get the fields to be updated in an upsert.
Always exclude auto_now_add, auto_created fields, and unique fields in an update
"""
fields = {
field.attname: field
for field in model._meta.fields
}
if to_update is None:
to_update = [
field.attname for field in model._meta.fields
]
to_update = [
attname for attname in to_update
if (attname not in uniques
and not getattr(fields[attname], 'auto_now_add', False)
and not fields[attname].auto_created)
]
return to_update
def _fill_auto_fields(model, values):
"""
Given a list of models, fill in auto_now and auto_now_add fields
for upserts. Since django manager utils passes Django's ORM, these values
have to be automatically constructed
"""
auto_field_names = [
f.attname
for f in model._meta.fields
if getattr(f, 'auto_now', False) or getattr(f, 'auto_now_add', False)
]
now = timezone.now()
for value in values:
for f in auto_field_names:
setattr(value, f, now)
return values
def _get_values_for_row(model_obj, all_fields):
return [
# Convert field value to db value
# Use attname here to support fields with custom db_column names
field.get_db_prep_save(getattr(model_obj, field.attname), connection)
for field in all_fields
]
def _get_values_for_rows(model_objs, all_fields):
row_values = []
sql_args = []
for i, model_obj in enumerate(model_objs):
sql_args.extend(_get_values_for_row(model_obj, all_fields))
if i == 0:
row_values.append('({0})'.format(
', '.join(['%s::{0}'.format(f.db_type(connection)) for f in all_fields]))
)
else:
row_values.append('({0})'.format(', '.join(['%s'] * len(all_fields))))
return row_values, sql_args
def _get_return_fields_sql(returning, return_status=False, alias=None):
if alias:
return_fields_sql = ', '.join('{0}.{1}'.format(alias, _quote(field)) for field in returning)
else:
return_fields_sql = ', '.join(_quote(field) for field in returning)
if return_status:
return_fields_sql += ', CASE WHEN xmax = 0 THEN \'c\' ELSE \'u\' END AS status_'
return return_fields_sql
def _get_upsert_sql(queryset, model_objs, unique_fields, update_fields, returning,
ignore_duplicate_updates=True, return_untouched=False):
"""
Generates the postgres specific sql necessary to perform an upsert (ON CONFLICT)
INSERT INTO table_name (field1, field2)
VALUES (1, 'two')
ON CONFLICT (unique_field) DO UPDATE SET field2 = EXCLUDED.field2;
"""
model = queryset.model
# Use all fields except pk unless the uniqueness constraint is the pk field
all_fields = [
field for field in model._meta.fields
if field.column != model._meta.pk.name or not field.auto_created
]
all_field_names = [field.column for field in all_fields]
returning = returning if returning is not True else [f.column for f in model._meta.fields]
all_field_names_sql = ', '.join([_quote(field) for field in all_field_names])
# Convert field names to db column names
unique_fields = [
model._meta.get_field(unique_field)
for unique_field in unique_fields
]
update_fields = [
model._meta.get_field(update_field)
for update_field in update_fields
]
unique_field_names_sql = ', '.join([
_quote(field.column) for field in unique_fields
])
update_fields_sql = ', '.join([
'{0} = EXCLUDED.{0}'.format(_quote(field.column))
for field in update_fields
])
row_values, sql_args = _get_values_for_rows(model_objs, all_fields)
return_sql = 'RETURNING ' + _get_return_fields_sql(returning, return_status=True) if returning else ''
ignore_duplicates_sql = ''
if ignore_duplicate_updates:
ignore_duplicates_sql = (
' WHERE ({update_fields_sql}) IS DISTINCT FROM ({excluded_update_fields_sql}) '
).format(
update_fields_sql=', '.join(
'{0}.{1}'.format(model._meta.db_table, _quote(field.column))
for field in update_fields
),
excluded_update_fields_sql=', '.join(
'EXCLUDED.' + _quote(field.column)
for field in update_fields
)
)
on_conflict = (
'DO UPDATE SET {0} {1}'.format(update_fields_sql, ignore_duplicates_sql) if update_fields else 'DO NOTHING'
)
if return_untouched:
row_values_sql = ', '.join([
'(\'{0}\', {1})'.format(i, row_value[1:-1])
for i, row_value in enumerate(row_values)
])
sql = (
' WITH input_rows("temp_id_", {all_field_names_sql}) AS ('
' VALUES {row_values_sql}'
' ), ins AS ( '
' INSERT INTO {table_name} ({all_field_names_sql})'
' SELECT {all_field_names_sql} FROM input_rows ORDER BY temp_id_'
' ON CONFLICT ({unique_field_names_sql}) {on_conflict} {return_sql}'
' )'
' SELECT DISTINCT ON ({table_pk_name}) * FROM ('
' SELECT status_, {return_fields_sql}'
' FROM ins'
' UNION ALL'
' SELECT \'n\' AS status_, {aliased_return_fields_sql}'
' FROM input_rows'
' JOIN {table_name} c USING ({unique_field_names_sql})'
' ) as results'
' ORDER BY results."{table_pk_name}", CASE WHEN(status_ = \'n\') THEN 1 ELSE 0 END;'
).format(
all_field_names_sql=all_field_names_sql,
row_values_sql=row_values_sql,
table_name=model._meta.db_table,
unique_field_names_sql=unique_field_names_sql,
on_conflict=on_conflict,
return_sql=return_sql,
table_pk_name=model._meta.pk.name,
return_fields_sql=_get_return_fields_sql(returning),
aliased_return_fields_sql=_get_return_fields_sql(returning, alias='c')
)
else:
row_values_sql = ', '.join(row_values)
sql = (
' INSERT INTO {table_name} ({all_field_names_sql})'
' VALUES {row_values_sql}'
' ON CONFLICT ({unique_field_names_sql}) {on_conflict} {return_sql}'
).format(
table_name=model._meta.db_table,
all_field_names_sql=all_field_names_sql,
row_values_sql=row_values_sql,
unique_field_names_sql=unique_field_names_sql,
on_conflict=on_conflict,
return_sql=return_sql
)
return sql, sql_args
def _fetch(
queryset, model_objs, unique_fields, update_fields, returning, sync,
ignore_duplicate_updates=True, return_untouched=False
):
"""
Perfom the upsert and do an optional sync operation
"""
model = queryset.model
if (return_untouched or sync) and returning is not True:
returning = set(returning) if returning else set()
returning.add(model._meta.pk.name)
upserted = []
deleted = []
# We must return untouched rows when doing a sync operation
return_untouched = True if sync else return_untouched
if model_objs:
sql, sql_args = _get_upsert_sql(queryset, model_objs, unique_fields, update_fields, returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
with connection.cursor() as cursor:
cursor.execute(sql, sql_args)
if cursor.description:
nt_result = namedtuple('Result', [col[0] for col in cursor.description])
upserted = [nt_result(*row) for row in cursor.fetchall()]
pk_field = model._meta.pk.name
if sync:
orig_ids = queryset.values_list(pk_field, flat=True)
deleted = set(orig_ids) - {getattr(r, pk_field) for r in upserted}
model.objects.filter(pk__in=deleted).delete()
nt_deleted_result = namedtuple('DeletedResult', [model._meta.pk.name, 'status_'])
return UpsertResult(
upserted + [nt_deleted_result(**{pk_field: d, 'status_': 'd'}) for d in deleted]
)
def upsert(
queryset, model_objs, unique_fields,
update_fields=None, returning=False, sync=False,
ignore_duplicate_updates=True,
return_untouched=False
):
"""
Perform a bulk upsert on a table, optionally syncing the results.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If `None`, all fields will be updated.
returning (bool|List[str]): If True, returns all fields. If a list, only returns
fields in the list
sync (bool, default=False): Perform a sync operation on the queryset
ignore_duplicate_updates (bool, default=False): Don't perform an update if the row is
a duplicate.
return_untouched (bool, default=False): Return untouched rows by the operation
"""
queryset = queryset if isinstance(queryset, models.QuerySet) else queryset.objects.all()
model = queryset.model
# Populate automatically generated fields in the rows like date times
_fill_auto_fields(model, model_objs)
# Sort the rows to reduce the chances of deadlock during concurrent upserts
model_objs = _sort_by_unique_fields(model, model_objs, unique_fields)
update_fields = _get_update_fields(model, unique_fields, update_fields)
return _fetch(queryset, model_objs, unique_fields, update_fields, returning, sync,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
|
ambitioninc/django-manager-utils | manager_utils/upsert2.py | _get_upsert_sql | python | def _get_upsert_sql(queryset, model_objs, unique_fields, update_fields, returning,
ignore_duplicate_updates=True, return_untouched=False):
model = queryset.model
# Use all fields except pk unless the uniqueness constraint is the pk field
all_fields = [
field for field in model._meta.fields
if field.column != model._meta.pk.name or not field.auto_created
]
all_field_names = [field.column for field in all_fields]
returning = returning if returning is not True else [f.column for f in model._meta.fields]
all_field_names_sql = ', '.join([_quote(field) for field in all_field_names])
# Convert field names to db column names
unique_fields = [
model._meta.get_field(unique_field)
for unique_field in unique_fields
]
update_fields = [
model._meta.get_field(update_field)
for update_field in update_fields
]
unique_field_names_sql = ', '.join([
_quote(field.column) for field in unique_fields
])
update_fields_sql = ', '.join([
'{0} = EXCLUDED.{0}'.format(_quote(field.column))
for field in update_fields
])
row_values, sql_args = _get_values_for_rows(model_objs, all_fields)
return_sql = 'RETURNING ' + _get_return_fields_sql(returning, return_status=True) if returning else ''
ignore_duplicates_sql = ''
if ignore_duplicate_updates:
ignore_duplicates_sql = (
' WHERE ({update_fields_sql}) IS DISTINCT FROM ({excluded_update_fields_sql}) '
).format(
update_fields_sql=', '.join(
'{0}.{1}'.format(model._meta.db_table, _quote(field.column))
for field in update_fields
),
excluded_update_fields_sql=', '.join(
'EXCLUDED.' + _quote(field.column)
for field in update_fields
)
)
on_conflict = (
'DO UPDATE SET {0} {1}'.format(update_fields_sql, ignore_duplicates_sql) if update_fields else 'DO NOTHING'
)
if return_untouched:
row_values_sql = ', '.join([
'(\'{0}\', {1})'.format(i, row_value[1:-1])
for i, row_value in enumerate(row_values)
])
sql = (
' WITH input_rows("temp_id_", {all_field_names_sql}) AS ('
' VALUES {row_values_sql}'
' ), ins AS ( '
' INSERT INTO {table_name} ({all_field_names_sql})'
' SELECT {all_field_names_sql} FROM input_rows ORDER BY temp_id_'
' ON CONFLICT ({unique_field_names_sql}) {on_conflict} {return_sql}'
' )'
' SELECT DISTINCT ON ({table_pk_name}) * FROM ('
' SELECT status_, {return_fields_sql}'
' FROM ins'
' UNION ALL'
' SELECT \'n\' AS status_, {aliased_return_fields_sql}'
' FROM input_rows'
' JOIN {table_name} c USING ({unique_field_names_sql})'
' ) as results'
' ORDER BY results."{table_pk_name}", CASE WHEN(status_ = \'n\') THEN 1 ELSE 0 END;'
).format(
all_field_names_sql=all_field_names_sql,
row_values_sql=row_values_sql,
table_name=model._meta.db_table,
unique_field_names_sql=unique_field_names_sql,
on_conflict=on_conflict,
return_sql=return_sql,
table_pk_name=model._meta.pk.name,
return_fields_sql=_get_return_fields_sql(returning),
aliased_return_fields_sql=_get_return_fields_sql(returning, alias='c')
)
else:
row_values_sql = ', '.join(row_values)
sql = (
' INSERT INTO {table_name} ({all_field_names_sql})'
' VALUES {row_values_sql}'
' ON CONFLICT ({unique_field_names_sql}) {on_conflict} {return_sql}'
).format(
table_name=model._meta.db_table,
all_field_names_sql=all_field_names_sql,
row_values_sql=row_values_sql,
unique_field_names_sql=unique_field_names_sql,
on_conflict=on_conflict,
return_sql=return_sql
)
return sql, sql_args | Generates the postgres specific sql necessary to perform an upsert (ON CONFLICT)
INSERT INTO table_name (field1, field2)
VALUES (1, 'two')
ON CONFLICT (unique_field) DO UPDATE SET field2 = EXCLUDED.field2; | train | https://github.com/ambitioninc/django-manager-utils/blob/1f111cb4846ed6cd6b78eca320a9dcc27826bf97/manager_utils/upsert2.py#L141-L249 | null | """
The new interface for manager utils upsert
"""
from collections import namedtuple
from django.db import connection, models
from django.utils import timezone
class UpsertResult(list):
"""
Returned by the upsert operation.
Wraps a list and provides properties to access created, updated,
untouched, and deleted elements
"""
@property
def created(self):
return (i for i in self if i.status_ == 'c')
@property
def updated(self):
return (i for i in self if i.status_ == 'u')
@property
def untouched(self):
return (i for i in self if i.status_ == 'n')
@property
def deleted(self):
return (i for i in self if i.status_ == 'd')
def _quote(field):
return '"{0}"'.format(field)
def _get_update_fields(model, uniques, to_update):
"""
Get the fields to be updated in an upsert.
Always exclude auto_now_add, auto_created fields, and unique fields in an update
"""
fields = {
field.attname: field
for field in model._meta.fields
}
if to_update is None:
to_update = [
field.attname for field in model._meta.fields
]
to_update = [
attname for attname in to_update
if (attname not in uniques
and not getattr(fields[attname], 'auto_now_add', False)
and not fields[attname].auto_created)
]
return to_update
def _fill_auto_fields(model, values):
"""
Given a list of models, fill in auto_now and auto_now_add fields
for upserts. Since django manager utils passes Django's ORM, these values
have to be automatically constructed
"""
auto_field_names = [
f.attname
for f in model._meta.fields
if getattr(f, 'auto_now', False) or getattr(f, 'auto_now_add', False)
]
now = timezone.now()
for value in values:
for f in auto_field_names:
setattr(value, f, now)
return values
def _sort_by_unique_fields(model, model_objs, unique_fields):
"""
Sort a list of models by their unique fields.
Sorting models in an upsert greatly reduces the chances of deadlock
when doing concurrent upserts
"""
unique_fields = [
field for field in model._meta.fields
if field.attname in unique_fields
]
def sort_key(model_obj):
return tuple(
field.get_db_prep_save(getattr(model_obj, field.attname),
connection)
for field in unique_fields
)
return sorted(model_objs, key=sort_key)
def _get_values_for_row(model_obj, all_fields):
return [
# Convert field value to db value
# Use attname here to support fields with custom db_column names
field.get_db_prep_save(getattr(model_obj, field.attname), connection)
for field in all_fields
]
def _get_values_for_rows(model_objs, all_fields):
row_values = []
sql_args = []
for i, model_obj in enumerate(model_objs):
sql_args.extend(_get_values_for_row(model_obj, all_fields))
if i == 0:
row_values.append('({0})'.format(
', '.join(['%s::{0}'.format(f.db_type(connection)) for f in all_fields]))
)
else:
row_values.append('({0})'.format(', '.join(['%s'] * len(all_fields))))
return row_values, sql_args
def _get_return_fields_sql(returning, return_status=False, alias=None):
if alias:
return_fields_sql = ', '.join('{0}.{1}'.format(alias, _quote(field)) for field in returning)
else:
return_fields_sql = ', '.join(_quote(field) for field in returning)
if return_status:
return_fields_sql += ', CASE WHEN xmax = 0 THEN \'c\' ELSE \'u\' END AS status_'
return return_fields_sql
def _fetch(
queryset, model_objs, unique_fields, update_fields, returning, sync,
ignore_duplicate_updates=True, return_untouched=False
):
"""
Perfom the upsert and do an optional sync operation
"""
model = queryset.model
if (return_untouched or sync) and returning is not True:
returning = set(returning) if returning else set()
returning.add(model._meta.pk.name)
upserted = []
deleted = []
# We must return untouched rows when doing a sync operation
return_untouched = True if sync else return_untouched
if model_objs:
sql, sql_args = _get_upsert_sql(queryset, model_objs, unique_fields, update_fields, returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
with connection.cursor() as cursor:
cursor.execute(sql, sql_args)
if cursor.description:
nt_result = namedtuple('Result', [col[0] for col in cursor.description])
upserted = [nt_result(*row) for row in cursor.fetchall()]
pk_field = model._meta.pk.name
if sync:
orig_ids = queryset.values_list(pk_field, flat=True)
deleted = set(orig_ids) - {getattr(r, pk_field) for r in upserted}
model.objects.filter(pk__in=deleted).delete()
nt_deleted_result = namedtuple('DeletedResult', [model._meta.pk.name, 'status_'])
return UpsertResult(
upserted + [nt_deleted_result(**{pk_field: d, 'status_': 'd'}) for d in deleted]
)
def upsert(
queryset, model_objs, unique_fields,
update_fields=None, returning=False, sync=False,
ignore_duplicate_updates=True,
return_untouched=False
):
"""
Perform a bulk upsert on a table, optionally syncing the results.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If `None`, all fields will be updated.
returning (bool|List[str]): If True, returns all fields. If a list, only returns
fields in the list
sync (bool, default=False): Perform a sync operation on the queryset
ignore_duplicate_updates (bool, default=False): Don't perform an update if the row is
a duplicate.
return_untouched (bool, default=False): Return untouched rows by the operation
"""
queryset = queryset if isinstance(queryset, models.QuerySet) else queryset.objects.all()
model = queryset.model
# Populate automatically generated fields in the rows like date times
_fill_auto_fields(model, model_objs)
# Sort the rows to reduce the chances of deadlock during concurrent upserts
model_objs = _sort_by_unique_fields(model, model_objs, unique_fields)
update_fields = _get_update_fields(model, unique_fields, update_fields)
return _fetch(queryset, model_objs, unique_fields, update_fields, returning, sync,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
|
ambitioninc/django-manager-utils | manager_utils/upsert2.py | _fetch | python | def _fetch(
queryset, model_objs, unique_fields, update_fields, returning, sync,
ignore_duplicate_updates=True, return_untouched=False
):
model = queryset.model
if (return_untouched or sync) and returning is not True:
returning = set(returning) if returning else set()
returning.add(model._meta.pk.name)
upserted = []
deleted = []
# We must return untouched rows when doing a sync operation
return_untouched = True if sync else return_untouched
if model_objs:
sql, sql_args = _get_upsert_sql(queryset, model_objs, unique_fields, update_fields, returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
with connection.cursor() as cursor:
cursor.execute(sql, sql_args)
if cursor.description:
nt_result = namedtuple('Result', [col[0] for col in cursor.description])
upserted = [nt_result(*row) for row in cursor.fetchall()]
pk_field = model._meta.pk.name
if sync:
orig_ids = queryset.values_list(pk_field, flat=True)
deleted = set(orig_ids) - {getattr(r, pk_field) for r in upserted}
model.objects.filter(pk__in=deleted).delete()
nt_deleted_result = namedtuple('DeletedResult', [model._meta.pk.name, 'status_'])
return UpsertResult(
upserted + [nt_deleted_result(**{pk_field: d, 'status_': 'd'}) for d in deleted]
) | Perfom the upsert and do an optional sync operation | train | https://github.com/ambitioninc/django-manager-utils/blob/1f111cb4846ed6cd6b78eca320a9dcc27826bf97/manager_utils/upsert2.py#L252-L288 | [
"def _get_upsert_sql(queryset, model_objs, unique_fields, update_fields, returning,\n ignore_duplicate_updates=True, return_untouched=False):\n \"\"\"\n Generates the postgres specific sql necessary to perform an upsert (ON CONFLICT)\n INSERT INTO table_name (field1, field2)\n VALUES (1, 'two')\n ON CONFLICT (unique_field) DO UPDATE SET field2 = EXCLUDED.field2;\n \"\"\"\n model = queryset.model\n\n # Use all fields except pk unless the uniqueness constraint is the pk field\n all_fields = [\n field for field in model._meta.fields\n if field.column != model._meta.pk.name or not field.auto_created\n ]\n\n all_field_names = [field.column for field in all_fields]\n returning = returning if returning is not True else [f.column for f in model._meta.fields]\n all_field_names_sql = ', '.join([_quote(field) for field in all_field_names])\n\n # Convert field names to db column names\n unique_fields = [\n model._meta.get_field(unique_field)\n for unique_field in unique_fields\n ]\n update_fields = [\n model._meta.get_field(update_field)\n for update_field in update_fields\n ]\n\n unique_field_names_sql = ', '.join([\n _quote(field.column) for field in unique_fields\n ])\n update_fields_sql = ', '.join([\n '{0} = EXCLUDED.{0}'.format(_quote(field.column))\n for field in update_fields\n ])\n\n row_values, sql_args = _get_values_for_rows(model_objs, all_fields)\n\n return_sql = 'RETURNING ' + _get_return_fields_sql(returning, return_status=True) if returning else ''\n ignore_duplicates_sql = ''\n if ignore_duplicate_updates:\n ignore_duplicates_sql = (\n ' WHERE ({update_fields_sql}) IS DISTINCT FROM ({excluded_update_fields_sql}) '\n ).format(\n update_fields_sql=', '.join(\n '{0}.{1}'.format(model._meta.db_table, _quote(field.column))\n for field in update_fields\n ),\n excluded_update_fields_sql=', '.join(\n 'EXCLUDED.' + _quote(field.column)\n for field in update_fields\n )\n )\n\n on_conflict = (\n 'DO UPDATE SET {0} {1}'.format(update_fields_sql, ignore_duplicates_sql) if update_fields else 'DO NOTHING'\n )\n\n if return_untouched:\n row_values_sql = ', '.join([\n '(\\'{0}\\', {1})'.format(i, row_value[1:-1])\n for i, row_value in enumerate(row_values)\n ])\n sql = (\n ' WITH input_rows(\"temp_id_\", {all_field_names_sql}) AS ('\n ' VALUES {row_values_sql}'\n ' ), ins AS ( '\n ' INSERT INTO {table_name} ({all_field_names_sql})'\n ' SELECT {all_field_names_sql} FROM input_rows ORDER BY temp_id_'\n ' ON CONFLICT ({unique_field_names_sql}) {on_conflict} {return_sql}'\n ' )'\n ' SELECT DISTINCT ON ({table_pk_name}) * FROM ('\n ' SELECT status_, {return_fields_sql}'\n ' FROM ins'\n ' UNION ALL'\n ' SELECT \\'n\\' AS status_, {aliased_return_fields_sql}'\n ' FROM input_rows'\n ' JOIN {table_name} c USING ({unique_field_names_sql})'\n ' ) as results'\n ' ORDER BY results.\"{table_pk_name}\", CASE WHEN(status_ = \\'n\\') THEN 1 ELSE 0 END;'\n ).format(\n all_field_names_sql=all_field_names_sql,\n row_values_sql=row_values_sql,\n table_name=model._meta.db_table,\n unique_field_names_sql=unique_field_names_sql,\n on_conflict=on_conflict,\n return_sql=return_sql,\n table_pk_name=model._meta.pk.name,\n return_fields_sql=_get_return_fields_sql(returning),\n aliased_return_fields_sql=_get_return_fields_sql(returning, alias='c')\n )\n else:\n row_values_sql = ', '.join(row_values)\n sql = (\n ' INSERT INTO {table_name} ({all_field_names_sql})'\n ' VALUES {row_values_sql}'\n ' ON CONFLICT ({unique_field_names_sql}) {on_conflict} {return_sql}'\n ).format(\n table_name=model._meta.db_table,\n all_field_names_sql=all_field_names_sql,\n row_values_sql=row_values_sql,\n unique_field_names_sql=unique_field_names_sql,\n on_conflict=on_conflict,\n return_sql=return_sql\n )\n\n return sql, sql_args\n"
] | """
The new interface for manager utils upsert
"""
from collections import namedtuple
from django.db import connection, models
from django.utils import timezone
class UpsertResult(list):
"""
Returned by the upsert operation.
Wraps a list and provides properties to access created, updated,
untouched, and deleted elements
"""
@property
def created(self):
return (i for i in self if i.status_ == 'c')
@property
def updated(self):
return (i for i in self if i.status_ == 'u')
@property
def untouched(self):
return (i for i in self if i.status_ == 'n')
@property
def deleted(self):
return (i for i in self if i.status_ == 'd')
def _quote(field):
return '"{0}"'.format(field)
def _get_update_fields(model, uniques, to_update):
"""
Get the fields to be updated in an upsert.
Always exclude auto_now_add, auto_created fields, and unique fields in an update
"""
fields = {
field.attname: field
for field in model._meta.fields
}
if to_update is None:
to_update = [
field.attname for field in model._meta.fields
]
to_update = [
attname for attname in to_update
if (attname not in uniques
and not getattr(fields[attname], 'auto_now_add', False)
and not fields[attname].auto_created)
]
return to_update
def _fill_auto_fields(model, values):
"""
Given a list of models, fill in auto_now and auto_now_add fields
for upserts. Since django manager utils passes Django's ORM, these values
have to be automatically constructed
"""
auto_field_names = [
f.attname
for f in model._meta.fields
if getattr(f, 'auto_now', False) or getattr(f, 'auto_now_add', False)
]
now = timezone.now()
for value in values:
for f in auto_field_names:
setattr(value, f, now)
return values
def _sort_by_unique_fields(model, model_objs, unique_fields):
"""
Sort a list of models by their unique fields.
Sorting models in an upsert greatly reduces the chances of deadlock
when doing concurrent upserts
"""
unique_fields = [
field for field in model._meta.fields
if field.attname in unique_fields
]
def sort_key(model_obj):
return tuple(
field.get_db_prep_save(getattr(model_obj, field.attname),
connection)
for field in unique_fields
)
return sorted(model_objs, key=sort_key)
def _get_values_for_row(model_obj, all_fields):
return [
# Convert field value to db value
# Use attname here to support fields with custom db_column names
field.get_db_prep_save(getattr(model_obj, field.attname), connection)
for field in all_fields
]
def _get_values_for_rows(model_objs, all_fields):
row_values = []
sql_args = []
for i, model_obj in enumerate(model_objs):
sql_args.extend(_get_values_for_row(model_obj, all_fields))
if i == 0:
row_values.append('({0})'.format(
', '.join(['%s::{0}'.format(f.db_type(connection)) for f in all_fields]))
)
else:
row_values.append('({0})'.format(', '.join(['%s'] * len(all_fields))))
return row_values, sql_args
def _get_return_fields_sql(returning, return_status=False, alias=None):
if alias:
return_fields_sql = ', '.join('{0}.{1}'.format(alias, _quote(field)) for field in returning)
else:
return_fields_sql = ', '.join(_quote(field) for field in returning)
if return_status:
return_fields_sql += ', CASE WHEN xmax = 0 THEN \'c\' ELSE \'u\' END AS status_'
return return_fields_sql
def _get_upsert_sql(queryset, model_objs, unique_fields, update_fields, returning,
ignore_duplicate_updates=True, return_untouched=False):
"""
Generates the postgres specific sql necessary to perform an upsert (ON CONFLICT)
INSERT INTO table_name (field1, field2)
VALUES (1, 'two')
ON CONFLICT (unique_field) DO UPDATE SET field2 = EXCLUDED.field2;
"""
model = queryset.model
# Use all fields except pk unless the uniqueness constraint is the pk field
all_fields = [
field for field in model._meta.fields
if field.column != model._meta.pk.name or not field.auto_created
]
all_field_names = [field.column for field in all_fields]
returning = returning if returning is not True else [f.column for f in model._meta.fields]
all_field_names_sql = ', '.join([_quote(field) for field in all_field_names])
# Convert field names to db column names
unique_fields = [
model._meta.get_field(unique_field)
for unique_field in unique_fields
]
update_fields = [
model._meta.get_field(update_field)
for update_field in update_fields
]
unique_field_names_sql = ', '.join([
_quote(field.column) for field in unique_fields
])
update_fields_sql = ', '.join([
'{0} = EXCLUDED.{0}'.format(_quote(field.column))
for field in update_fields
])
row_values, sql_args = _get_values_for_rows(model_objs, all_fields)
return_sql = 'RETURNING ' + _get_return_fields_sql(returning, return_status=True) if returning else ''
ignore_duplicates_sql = ''
if ignore_duplicate_updates:
ignore_duplicates_sql = (
' WHERE ({update_fields_sql}) IS DISTINCT FROM ({excluded_update_fields_sql}) '
).format(
update_fields_sql=', '.join(
'{0}.{1}'.format(model._meta.db_table, _quote(field.column))
for field in update_fields
),
excluded_update_fields_sql=', '.join(
'EXCLUDED.' + _quote(field.column)
for field in update_fields
)
)
on_conflict = (
'DO UPDATE SET {0} {1}'.format(update_fields_sql, ignore_duplicates_sql) if update_fields else 'DO NOTHING'
)
if return_untouched:
row_values_sql = ', '.join([
'(\'{0}\', {1})'.format(i, row_value[1:-1])
for i, row_value in enumerate(row_values)
])
sql = (
' WITH input_rows("temp_id_", {all_field_names_sql}) AS ('
' VALUES {row_values_sql}'
' ), ins AS ( '
' INSERT INTO {table_name} ({all_field_names_sql})'
' SELECT {all_field_names_sql} FROM input_rows ORDER BY temp_id_'
' ON CONFLICT ({unique_field_names_sql}) {on_conflict} {return_sql}'
' )'
' SELECT DISTINCT ON ({table_pk_name}) * FROM ('
' SELECT status_, {return_fields_sql}'
' FROM ins'
' UNION ALL'
' SELECT \'n\' AS status_, {aliased_return_fields_sql}'
' FROM input_rows'
' JOIN {table_name} c USING ({unique_field_names_sql})'
' ) as results'
' ORDER BY results."{table_pk_name}", CASE WHEN(status_ = \'n\') THEN 1 ELSE 0 END;'
).format(
all_field_names_sql=all_field_names_sql,
row_values_sql=row_values_sql,
table_name=model._meta.db_table,
unique_field_names_sql=unique_field_names_sql,
on_conflict=on_conflict,
return_sql=return_sql,
table_pk_name=model._meta.pk.name,
return_fields_sql=_get_return_fields_sql(returning),
aliased_return_fields_sql=_get_return_fields_sql(returning, alias='c')
)
else:
row_values_sql = ', '.join(row_values)
sql = (
' INSERT INTO {table_name} ({all_field_names_sql})'
' VALUES {row_values_sql}'
' ON CONFLICT ({unique_field_names_sql}) {on_conflict} {return_sql}'
).format(
table_name=model._meta.db_table,
all_field_names_sql=all_field_names_sql,
row_values_sql=row_values_sql,
unique_field_names_sql=unique_field_names_sql,
on_conflict=on_conflict,
return_sql=return_sql
)
return sql, sql_args
def upsert(
queryset, model_objs, unique_fields,
update_fields=None, returning=False, sync=False,
ignore_duplicate_updates=True,
return_untouched=False
):
"""
Perform a bulk upsert on a table, optionally syncing the results.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If `None`, all fields will be updated.
returning (bool|List[str]): If True, returns all fields. If a list, only returns
fields in the list
sync (bool, default=False): Perform a sync operation on the queryset
ignore_duplicate_updates (bool, default=False): Don't perform an update if the row is
a duplicate.
return_untouched (bool, default=False): Return untouched rows by the operation
"""
queryset = queryset if isinstance(queryset, models.QuerySet) else queryset.objects.all()
model = queryset.model
# Populate automatically generated fields in the rows like date times
_fill_auto_fields(model, model_objs)
# Sort the rows to reduce the chances of deadlock during concurrent upserts
model_objs = _sort_by_unique_fields(model, model_objs, unique_fields)
update_fields = _get_update_fields(model, unique_fields, update_fields)
return _fetch(queryset, model_objs, unique_fields, update_fields, returning, sync,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
|
ambitioninc/django-manager-utils | manager_utils/upsert2.py | upsert | python | def upsert(
queryset, model_objs, unique_fields,
update_fields=None, returning=False, sync=False,
ignore_duplicate_updates=True,
return_untouched=False
):
queryset = queryset if isinstance(queryset, models.QuerySet) else queryset.objects.all()
model = queryset.model
# Populate automatically generated fields in the rows like date times
_fill_auto_fields(model, model_objs)
# Sort the rows to reduce the chances of deadlock during concurrent upserts
model_objs = _sort_by_unique_fields(model, model_objs, unique_fields)
update_fields = _get_update_fields(model, unique_fields, update_fields)
return _fetch(queryset, model_objs, unique_fields, update_fields, returning, sync,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched) | Perform a bulk upsert on a table, optionally syncing the results.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If `None`, all fields will be updated.
returning (bool|List[str]): If True, returns all fields. If a list, only returns
fields in the list
sync (bool, default=False): Perform a sync operation on the queryset
ignore_duplicate_updates (bool, default=False): Don't perform an update if the row is
a duplicate.
return_untouched (bool, default=False): Return untouched rows by the operation | train | https://github.com/ambitioninc/django-manager-utils/blob/1f111cb4846ed6cd6b78eca320a9dcc27826bf97/manager_utils/upsert2.py#L291-L329 | [
"def _get_update_fields(model, uniques, to_update):\n \"\"\"\n Get the fields to be updated in an upsert.\n\n Always exclude auto_now_add, auto_created fields, and unique fields in an update\n \"\"\"\n fields = {\n field.attname: field\n for field in model._meta.fields\n }\n\n if to_update is None:\n to_update = [\n field.attname for field in model._meta.fields\n ]\n\n to_update = [\n attname for attname in to_update\n if (attname not in uniques\n and not getattr(fields[attname], 'auto_now_add', False)\n and not fields[attname].auto_created)\n ]\n\n return to_update\n",
"def _fill_auto_fields(model, values):\n \"\"\"\n Given a list of models, fill in auto_now and auto_now_add fields\n for upserts. Since django manager utils passes Django's ORM, these values\n have to be automatically constructed\n \"\"\"\n auto_field_names = [\n f.attname\n for f in model._meta.fields\n if getattr(f, 'auto_now', False) or getattr(f, 'auto_now_add', False)\n ]\n now = timezone.now()\n for value in values:\n for f in auto_field_names:\n setattr(value, f, now)\n\n return values\n",
"def _sort_by_unique_fields(model, model_objs, unique_fields):\n \"\"\"\n Sort a list of models by their unique fields.\n\n Sorting models in an upsert greatly reduces the chances of deadlock\n when doing concurrent upserts\n \"\"\"\n unique_fields = [\n field for field in model._meta.fields\n if field.attname in unique_fields\n ]\n\n def sort_key(model_obj):\n return tuple(\n field.get_db_prep_save(getattr(model_obj, field.attname),\n connection)\n for field in unique_fields\n )\n return sorted(model_objs, key=sort_key)\n",
"def _fetch(\n queryset, model_objs, unique_fields, update_fields, returning, sync,\n ignore_duplicate_updates=True, return_untouched=False\n):\n \"\"\"\n Perfom the upsert and do an optional sync operation\n \"\"\"\n model = queryset.model\n if (return_untouched or sync) and returning is not True:\n returning = set(returning) if returning else set()\n returning.add(model._meta.pk.name)\n upserted = []\n deleted = []\n # We must return untouched rows when doing a sync operation\n return_untouched = True if sync else return_untouched\n\n if model_objs:\n sql, sql_args = _get_upsert_sql(queryset, model_objs, unique_fields, update_fields, returning,\n ignore_duplicate_updates=ignore_duplicate_updates,\n return_untouched=return_untouched)\n\n with connection.cursor() as cursor:\n cursor.execute(sql, sql_args)\n if cursor.description:\n nt_result = namedtuple('Result', [col[0] for col in cursor.description])\n upserted = [nt_result(*row) for row in cursor.fetchall()]\n\n pk_field = model._meta.pk.name\n if sync:\n orig_ids = queryset.values_list(pk_field, flat=True)\n deleted = set(orig_ids) - {getattr(r, pk_field) for r in upserted}\n model.objects.filter(pk__in=deleted).delete()\n\n nt_deleted_result = namedtuple('DeletedResult', [model._meta.pk.name, 'status_'])\n return UpsertResult(\n upserted + [nt_deleted_result(**{pk_field: d, 'status_': 'd'}) for d in deleted]\n )\n"
] | """
The new interface for manager utils upsert
"""
from collections import namedtuple
from django.db import connection, models
from django.utils import timezone
class UpsertResult(list):
"""
Returned by the upsert operation.
Wraps a list and provides properties to access created, updated,
untouched, and deleted elements
"""
@property
def created(self):
return (i for i in self if i.status_ == 'c')
@property
def updated(self):
return (i for i in self if i.status_ == 'u')
@property
def untouched(self):
return (i for i in self if i.status_ == 'n')
@property
def deleted(self):
return (i for i in self if i.status_ == 'd')
def _quote(field):
return '"{0}"'.format(field)
def _get_update_fields(model, uniques, to_update):
"""
Get the fields to be updated in an upsert.
Always exclude auto_now_add, auto_created fields, and unique fields in an update
"""
fields = {
field.attname: field
for field in model._meta.fields
}
if to_update is None:
to_update = [
field.attname for field in model._meta.fields
]
to_update = [
attname for attname in to_update
if (attname not in uniques
and not getattr(fields[attname], 'auto_now_add', False)
and not fields[attname].auto_created)
]
return to_update
def _fill_auto_fields(model, values):
"""
Given a list of models, fill in auto_now and auto_now_add fields
for upserts. Since django manager utils passes Django's ORM, these values
have to be automatically constructed
"""
auto_field_names = [
f.attname
for f in model._meta.fields
if getattr(f, 'auto_now', False) or getattr(f, 'auto_now_add', False)
]
now = timezone.now()
for value in values:
for f in auto_field_names:
setattr(value, f, now)
return values
def _sort_by_unique_fields(model, model_objs, unique_fields):
"""
Sort a list of models by their unique fields.
Sorting models in an upsert greatly reduces the chances of deadlock
when doing concurrent upserts
"""
unique_fields = [
field for field in model._meta.fields
if field.attname in unique_fields
]
def sort_key(model_obj):
return tuple(
field.get_db_prep_save(getattr(model_obj, field.attname),
connection)
for field in unique_fields
)
return sorted(model_objs, key=sort_key)
def _get_values_for_row(model_obj, all_fields):
return [
# Convert field value to db value
# Use attname here to support fields with custom db_column names
field.get_db_prep_save(getattr(model_obj, field.attname), connection)
for field in all_fields
]
def _get_values_for_rows(model_objs, all_fields):
row_values = []
sql_args = []
for i, model_obj in enumerate(model_objs):
sql_args.extend(_get_values_for_row(model_obj, all_fields))
if i == 0:
row_values.append('({0})'.format(
', '.join(['%s::{0}'.format(f.db_type(connection)) for f in all_fields]))
)
else:
row_values.append('({0})'.format(', '.join(['%s'] * len(all_fields))))
return row_values, sql_args
def _get_return_fields_sql(returning, return_status=False, alias=None):
if alias:
return_fields_sql = ', '.join('{0}.{1}'.format(alias, _quote(field)) for field in returning)
else:
return_fields_sql = ', '.join(_quote(field) for field in returning)
if return_status:
return_fields_sql += ', CASE WHEN xmax = 0 THEN \'c\' ELSE \'u\' END AS status_'
return return_fields_sql
def _get_upsert_sql(queryset, model_objs, unique_fields, update_fields, returning,
ignore_duplicate_updates=True, return_untouched=False):
"""
Generates the postgres specific sql necessary to perform an upsert (ON CONFLICT)
INSERT INTO table_name (field1, field2)
VALUES (1, 'two')
ON CONFLICT (unique_field) DO UPDATE SET field2 = EXCLUDED.field2;
"""
model = queryset.model
# Use all fields except pk unless the uniqueness constraint is the pk field
all_fields = [
field for field in model._meta.fields
if field.column != model._meta.pk.name or not field.auto_created
]
all_field_names = [field.column for field in all_fields]
returning = returning if returning is not True else [f.column for f in model._meta.fields]
all_field_names_sql = ', '.join([_quote(field) for field in all_field_names])
# Convert field names to db column names
unique_fields = [
model._meta.get_field(unique_field)
for unique_field in unique_fields
]
update_fields = [
model._meta.get_field(update_field)
for update_field in update_fields
]
unique_field_names_sql = ', '.join([
_quote(field.column) for field in unique_fields
])
update_fields_sql = ', '.join([
'{0} = EXCLUDED.{0}'.format(_quote(field.column))
for field in update_fields
])
row_values, sql_args = _get_values_for_rows(model_objs, all_fields)
return_sql = 'RETURNING ' + _get_return_fields_sql(returning, return_status=True) if returning else ''
ignore_duplicates_sql = ''
if ignore_duplicate_updates:
ignore_duplicates_sql = (
' WHERE ({update_fields_sql}) IS DISTINCT FROM ({excluded_update_fields_sql}) '
).format(
update_fields_sql=', '.join(
'{0}.{1}'.format(model._meta.db_table, _quote(field.column))
for field in update_fields
),
excluded_update_fields_sql=', '.join(
'EXCLUDED.' + _quote(field.column)
for field in update_fields
)
)
on_conflict = (
'DO UPDATE SET {0} {1}'.format(update_fields_sql, ignore_duplicates_sql) if update_fields else 'DO NOTHING'
)
if return_untouched:
row_values_sql = ', '.join([
'(\'{0}\', {1})'.format(i, row_value[1:-1])
for i, row_value in enumerate(row_values)
])
sql = (
' WITH input_rows("temp_id_", {all_field_names_sql}) AS ('
' VALUES {row_values_sql}'
' ), ins AS ( '
' INSERT INTO {table_name} ({all_field_names_sql})'
' SELECT {all_field_names_sql} FROM input_rows ORDER BY temp_id_'
' ON CONFLICT ({unique_field_names_sql}) {on_conflict} {return_sql}'
' )'
' SELECT DISTINCT ON ({table_pk_name}) * FROM ('
' SELECT status_, {return_fields_sql}'
' FROM ins'
' UNION ALL'
' SELECT \'n\' AS status_, {aliased_return_fields_sql}'
' FROM input_rows'
' JOIN {table_name} c USING ({unique_field_names_sql})'
' ) as results'
' ORDER BY results."{table_pk_name}", CASE WHEN(status_ = \'n\') THEN 1 ELSE 0 END;'
).format(
all_field_names_sql=all_field_names_sql,
row_values_sql=row_values_sql,
table_name=model._meta.db_table,
unique_field_names_sql=unique_field_names_sql,
on_conflict=on_conflict,
return_sql=return_sql,
table_pk_name=model._meta.pk.name,
return_fields_sql=_get_return_fields_sql(returning),
aliased_return_fields_sql=_get_return_fields_sql(returning, alias='c')
)
else:
row_values_sql = ', '.join(row_values)
sql = (
' INSERT INTO {table_name} ({all_field_names_sql})'
' VALUES {row_values_sql}'
' ON CONFLICT ({unique_field_names_sql}) {on_conflict} {return_sql}'
).format(
table_name=model._meta.db_table,
all_field_names_sql=all_field_names_sql,
row_values_sql=row_values_sql,
unique_field_names_sql=unique_field_names_sql,
on_conflict=on_conflict,
return_sql=return_sql
)
return sql, sql_args
def _fetch(
queryset, model_objs, unique_fields, update_fields, returning, sync,
ignore_duplicate_updates=True, return_untouched=False
):
"""
Perfom the upsert and do an optional sync operation
"""
model = queryset.model
if (return_untouched or sync) and returning is not True:
returning = set(returning) if returning else set()
returning.add(model._meta.pk.name)
upserted = []
deleted = []
# We must return untouched rows when doing a sync operation
return_untouched = True if sync else return_untouched
if model_objs:
sql, sql_args = _get_upsert_sql(queryset, model_objs, unique_fields, update_fields, returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
with connection.cursor() as cursor:
cursor.execute(sql, sql_args)
if cursor.description:
nt_result = namedtuple('Result', [col[0] for col in cursor.description])
upserted = [nt_result(*row) for row in cursor.fetchall()]
pk_field = model._meta.pk.name
if sync:
orig_ids = queryset.values_list(pk_field, flat=True)
deleted = set(orig_ids) - {getattr(r, pk_field) for r in upserted}
model.objects.filter(pk__in=deleted).delete()
nt_deleted_result = namedtuple('DeletedResult', [model._meta.pk.name, 'status_'])
return UpsertResult(
upserted + [nt_deleted_result(**{pk_field: d, 'status_': 'd'}) for d in deleted]
)
|
ambitioninc/django-manager-utils | manager_utils/manager_utils.py | _get_upserts_distinct | python | def _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields):
# Keep track of the created models
created_models = []
# If we created new models query for them
if model_objs_created:
created_models.extend(
queryset.extra(
where=['({unique_fields_sql}) in %s'.format(
unique_fields_sql=', '.join(unique_fields)
)],
params=[
tuple([
tuple([
getattr(model_obj, field)
for field in unique_fields
])
for model_obj in model_objs_created
])
]
)
)
# Return the models
return model_objs_updated, created_models | Given a list of model objects that were updated and model objects that were created,
fetch the pks of the newly created models and return the two lists in a tuple | train | https://github.com/ambitioninc/django-manager-utils/blob/1f111cb4846ed6cd6b78eca320a9dcc27826bf97/manager_utils/manager_utils.py#L37-L66 | null | import itertools
from django.db import connection
from django.db.models import Manager
from django.db.models.query import QuerySet
from django.dispatch import Signal
from querybuilder.query import Query
from . import upsert2
# A signal that is emitted when any bulk operation occurs
post_bulk_operation = Signal(providing_args=['model'])
def id_dict(queryset):
"""
Returns a dictionary of all the objects keyed on their ID.
:rtype: dict
:returns: A dictionary of objects from the queryset or manager that is keyed
on the objects' IDs.
Examples:
.. code-block:: python
TestModel.objects.create(int_field=1)
TestModel.objects.create(int_field=2)
print(id_dict(TestModel.objects.all()))
"""
return {obj.pk: obj for obj in queryset}
def _get_upserts(queryset, model_objs_updated, model_objs_created, unique_fields):
"""
Given a list of model objects that were updated and model objects that were created,
return the list of all model objects upserted. Doing this requires fetching all of
the models created with bulk create (since django can't return bulk_create pks)
"""
updated, created = _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields)
return updated + created
def _get_model_objs_to_update_and_create(model_objs, unique_fields, update_fields, extant_model_objs):
"""
Used by bulk_upsert to gather lists of models that should be updated and created.
"""
# Find all of the objects to update and all of the objects to create
model_objs_to_update, model_objs_to_create = list(), list()
for model_obj in model_objs:
extant_model_obj = extant_model_objs.get(tuple(getattr(model_obj, field) for field in unique_fields), None)
if extant_model_obj is None:
# If the object needs to be created, make a new instance of it
model_objs_to_create.append(model_obj)
else:
# If the object needs to be updated, update its fields
for field in update_fields:
setattr(extant_model_obj, field, getattr(model_obj, field))
model_objs_to_update.append(extant_model_obj)
return model_objs_to_update, model_objs_to_create
def _get_prepped_model_field(model_obj, field):
"""
Gets the value of a field of a model obj that is prepared for the db.
"""
# Get the field
field = model_obj._meta.get_field(field)
# Get the value
value = field.get_db_prep_save(getattr(model_obj, field.attname), connection)
# Return the value
return value
def bulk_upsert(
queryset, model_objs, unique_fields, update_fields=None, return_upserts=False, return_upserts_distinct=False,
sync=False, native=False
):
"""
Performs a bulk update or insert on a list of model objects. Matches all objects in the queryset
with the objs provided using the field values in unique_fields.
If an existing object is matched, it is updated with the values from the provided objects. Objects
that don't match anything are bulk created.
A user can provide a list update_fields so that any changed values on those fields will be updated.
However, if update_fields is not provided, this function reduces down to performing a bulk_create
on any non extant objects.
:type model_objs: list of dict
:param model_objs: A list of dictionaries that have fields corresponding to the model in the manager.
:type unique_fields: list of str
:param unique_fields: A list of fields that are used to determine if an object in objs matches a model
from the queryset.
:type update_fields: list of str
:param update_fields: A list of fields used from the objects in objs as fields when updating existing
models. If None, this function will only perform a bulk create for model_objs that do not
currently exist in the database.
:type return_upserts_distinct: bool
:param return_upserts_distinct: A flag specifying whether to return the upserted values as a list of distinct lists,
one containing the updated models and the other containing the new models. If True, this performs an
additional query to fetch any bulk created values.
:type return_upserts: bool
:param return_upserts: A flag specifying whether to return the upserted values. If True, this performs
an additional query to fetch any bulk created values.
:type sync: bool
:param sync: A flag specifying whether a sync operation should be applied to the bulk_upsert. If this
is True, all values in the queryset that were not updated will be deleted such that the
entire list of model objects is synced to the queryset.
:type native: bool
:param native: A flag specifying whether to use postgres insert on conflict (upsert).
:signals: Emits a post_bulk_operation when a bulk_update or a bulk_create occurs.
Examples:
.. code-block:: python
# Start off with no objects in the database. Call a bulk_upsert on the TestModel, which includes
# a char_field, int_field, and float_field
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
], ['int_field'], ['char_field'])
# All objects should have been created
print(TestModel.objects.count())
3
# Now perform a bulk upsert on all the char_field values. Since the objects existed previously
# (known by the int_field uniqueness constraint), the char fields should be updated
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='0', int_field=1),
TestModel(float_field=2.0, char_field='0', int_field=2),
TestModel(float_field=3.0, char_field='0', int_field=3),
], ['int_field'], ['char_field'])
# No more new objects should have been created, and every char field should be 0
print(TestModel.objects.count(), TestModel.objects.filter(char_field='-1').count())
3, 3
# Do the exact same operation, but this time add an additional object that is not already
# stored. It will be created.
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be one more object
print(TestModel.objects.count())
4
# Note that one can also do the upsert on a queryset. Perform the same data upsert on a
# filter for int_field=1. In this case, only one object has the ability to be updated.
# All of the other objects will be created
bulk_upsert(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be three more objects
print(TestModel.objects.count())
7
"""
if not unique_fields:
raise ValueError('Must provide unique_fields argument')
update_fields = update_fields or []
if native:
if return_upserts_distinct:
raise NotImplementedError('return upserts distinct not supported with native postgres upsert')
return_value = Query().from_table(table=queryset.model).upsert(
model_objs, unique_fields, update_fields, return_models=return_upserts or sync
) or []
if sync:
orig_ids = frozenset(queryset.values_list('pk', flat=True))
queryset.filter(pk__in=orig_ids - frozenset([m.pk for m in return_value])).delete()
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return return_value
# Create a look up table for all of the objects in the queryset keyed on the unique_fields
extant_model_objs = {
tuple(getattr(extant_model_obj, field) for field in unique_fields): extant_model_obj
for extant_model_obj in queryset
}
# Find all of the objects to update and all of the objects to create
model_objs_to_update, model_objs_to_create = _get_model_objs_to_update_and_create(
model_objs, unique_fields, update_fields, extant_model_objs)
# Find all objects in the queryset that will not be updated. These will be deleted if the sync option is
# True
if sync:
model_objs_to_update_set = frozenset(model_objs_to_update)
model_objs_to_delete = [
model_obj.pk for model_obj in extant_model_objs.values() if model_obj not in model_objs_to_update_set
]
if model_objs_to_delete:
queryset.filter(pk__in=model_objs_to_delete).delete()
# Apply bulk updates and creates
if update_fields:
bulk_update(queryset, model_objs_to_update, update_fields)
queryset.bulk_create(model_objs_to_create)
# Optionally return the bulk upserted values
if return_upserts_distinct:
# return a list of lists, the first being the updated models, the second being the newly created objects
return _get_upserts_distinct(queryset, model_objs_to_update, model_objs_to_create, unique_fields)
if return_upserts:
return _get_upserts(queryset, model_objs_to_update, model_objs_to_create, unique_fields)
def bulk_upsert2(
queryset, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False
):
"""
Performs a bulk update or insert on a list of model objects. Matches all objects in the queryset
with the objs provided using the field values in unique_fields.
If an existing object is matched, it is updated with the values from the provided objects. Objects
that don't match anything are bulk created.
A user can provide a list update_fields so that any changed values on those fields will be updated.
However, if update_fields is not provided, this function reduces down to performing a bulk_create
on any non extant objects.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If ``None``, all fields will be updated.
returning (bool|List[str]): If ``True``, returns all fields. If a list, only returns
fields in the list. Return values are split in a tuple of created and updated models
ignore_duplicate_updates (bool, default=False): Ignore updating a row in the upsert if all of the update fields
are duplicates
return_untouched (bool, default=False): Return values that were not touched by the upsert operation
Returns:
UpsertResult: A list of results if ``returning`` is not ``False``. created, updated, and untouched,
results can be obtained by accessing the ``created``, ``updated``, and ``untouched`` properties
of the result.
Examples:
.. code-block:: python
# Start off with no objects in the database. Call a bulk_upsert on the TestModel, which includes
# a char_field, int_field, and float_field
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
], ['int_field'], ['char_field'])
# All objects should have been created
print(TestModel.objects.count())
3
# Now perform a bulk upsert on all the char_field values. Since the objects existed previously
# (known by the int_field uniqueness constraint), the char fields should be updated
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='0', int_field=1),
TestModel(float_field=2.0, char_field='0', int_field=2),
TestModel(float_field=3.0, char_field='0', int_field=3),
], ['int_field'], ['char_field'])
# No more new objects should have been created, and every char field should be 0
print(TestModel.objects.count(), TestModel.objects.filter(char_field='-1').count())
3, 3
# Do the exact same operation, but this time add an additional object that is not already
# stored. It will be created.
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be one more object
print(TestModel.objects.count())
4
# Note that one can also do the upsert on a queryset. Perform the same data upsert on a
# filter for int_field=1. In this case, only one object has the ability to be updated.
# All of the other objects will be created
bulk_upsert2(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be three more objects
print(TestModel.objects.count())
7
# Return creates and updates on the same set of models
created, updated = bulk_upsert2(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# All four objects should be updated
print(len(updated))
4
"""
results = upsert2.upsert(queryset, model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return results
def sync(queryset, model_objs, unique_fields, update_fields=None, **kwargs):
"""
Performs a sync operation on a queryset, making the contents of the
queryset match the contents of model_objs.
This function calls bulk_upsert underneath the hood with sync=True.
:type model_objs: list of :class:`Models<django:django.db.models.Model>`
:param model_objs: The models to sync
:type update_fields: list of str
:param unique_fields: A list of fields that are used to determine if an
object in objs matches a model from the queryset.
:type update_fields: list of str
:param update_fields: A list of fields used from the objects in objs as fields when updating existing
models. If None, this function will only perform a bulk create for model_objs that do not
currently exist in the database.
:type native: bool
:param native: A flag specifying whether to use postgres insert on conflict (upsert) when performing
bulk upsert.
"""
return bulk_upsert(queryset, model_objs, unique_fields, update_fields=update_fields, sync=True, **kwargs)
def sync2(queryset, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
"""
Performs a sync operation on a queryset, making the contents of the
queryset match the contents of model_objs.
Note: The definition of a sync requires that we return untouched rows from the upsert opertion. There is
no way to turn off returning untouched rows in a sync.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If `None`, all fields will be updated.
returning (bool|List[str]): If True, returns all fields. If a list, only returns
fields in the list. Return values are split in a tuple of created, updated, and
deleted models.
ignore_duplicate_updates (bool, default=False): Ignore updating a row in the upsert if all
of the update fields are duplicates
Returns:
UpsertResult: A list of results if ``returning`` is not ``False``. created, updated, untouched,
and deleted results can be obtained by accessing the ``created``, ``updated``, ``untouched``,
and ``deleted`` properties of the result.
"""
results = upsert2.upsert(queryset, model_objs, unique_fields,
update_fields=update_fields, returning=returning, sync=True,
ignore_duplicate_updates=ignore_duplicate_updates)
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return results
def get_or_none(queryset, **query_params):
"""
Get an object or return None if it doesn't exist.
:param query_params: The query parameters used in the lookup.
:returns: A model object if one exists with the query params, None otherwise.
Examples:
.. code-block:: python
model_obj = get_or_none(TestModel.objects, int_field=1)
print(model_obj)
None
TestModel.objects.create(int_field=1)
model_obj = get_or_none(TestModel.objects, int_field=1)
print(model_obj.int_field)
1
"""
try:
obj = queryset.get(**query_params)
except queryset.model.DoesNotExist:
obj = None
return obj
def single(queryset):
"""
Assumes that this model only has one element in the table and returns it.
If the table has more than one or no value, an exception is raised.
:returns: The only model object in the queryset.
:raises: :class:`DoesNotExist <django:django.core.exceptions.ObjectDoesNotExist>`
error when the object does not exist or a
:class:`MultipleObjectsReturned <django:django.core.exceptions.MultipleObjectsReturned>`
error when thereis more than one object.
Examples:
.. code-block:: python
TestModel.objects.create(int_field=1)
model_obj = single(TestModel.objects)
print(model_obj.int_field)
1
"""
return queryset.get()
def bulk_update(manager, model_objs, fields_to_update):
"""
Bulk updates a list of model objects that are already saved.
:type model_objs: list of :class:`Models<django:django.db.models.Model>`
:param model_objs: A list of model objects that have been updated.
fields_to_update: A list of fields to be updated. Only these fields will be updated
:signals: Emits a post_bulk_operation signal when completed.
Examples:
.. code-block:: python
# Create a couple test models
model_obj1 = TestModel.objects.create(int_field=1, float_field=2.0, char_field='Hi')
model_obj2 = TestModel.objects.create(int_field=3, float_field=4.0, char_field='Hello')
# Change their fields and do a bulk update
model_obj1.int_field = 10
model_obj1.float_field = 20.0
model_obj2.int_field = 30
model_obj2.float_field = 40.0
bulk_update(TestModel.objects, [model_obj1, model_obj2], ['int_field', 'float_field'])
# Reload the models and view their changes
model_obj1 = TestModel.objects.get(id=model_obj1.id)
print(model_obj1.int_field, model_obj1.float_field)
10, 20.0
model_obj2 = TestModel.objects.get(id=model_obj2.id)
print(model_obj2.int_field, model_obj2.float_field)
10, 20.0
"""
# Add the pk to the value fields so we can join
value_fields = [manager.model._meta.pk.attname] + fields_to_update
# Build the row values
row_values = [
[_get_prepped_model_field(model_obj, field_name) for field_name in value_fields]
for model_obj in model_objs
]
# If we do not have any values or fields to update just return
if len(row_values) == 0 or len(fields_to_update) == 0:
return
# Create a map of db types
db_types = [
manager.model._meta.get_field(field).db_type(connection)
for field in value_fields
]
# Build the value fields sql
value_fields_sql = ', '.join(
'"{field}"'.format(field=manager.model._meta.get_field(field).column)
for field in value_fields
)
# Build the set sql
update_fields_sql = ', '.join([
'"{field}" = "new_values"."{field}"'.format(
field=manager.model._meta.get_field(field).column
)
for field in fields_to_update
])
# Build the values sql
values_sql = ', '.join([
'({0})'.format(
', '.join([
'%s::{0}'.format(
db_types[i]
) if not row_number and i else '%s'
for i, _ in enumerate(row)
])
)
for row_number, row in enumerate(row_values)
])
# Start building the query
update_sql = (
'UPDATE {table} '
'SET {update_fields_sql} '
'FROM (VALUES {values_sql}) AS new_values ({value_fields_sql}) '
'WHERE "{table}"."{pk_field}" = "new_values"."{pk_field}"'
).format(
table=manager.model._meta.db_table,
pk_field=manager.model._meta.pk.column,
update_fields_sql=update_fields_sql,
values_sql=values_sql,
value_fields_sql=value_fields_sql
)
# Combine all the row values
update_sql_params = list(itertools.chain(*row_values))
# Run the update query
with connection.cursor() as cursor:
cursor.execute(update_sql, update_sql_params)
# call the bulk operation signal
post_bulk_operation.send(sender=manager.model, model=manager.model)
def upsert(manager, defaults=None, updates=None, **kwargs):
"""
Performs an update on an object or an insert if the object does not exist.
:type defaults: dict
:param defaults: These values are set when the object is created, but are irrelevant
when the object already exists. This field should only be used when values only need to
be set during creation.
:type updates: dict
:param updates: These values are updated when the object is updated. They also override any
values provided in the defaults when inserting the object.
:param kwargs: These values provide the arguments used when checking for the existence of
the object. They are used in a similar manner to Django's get_or_create function.
:returns: A tuple of the upserted object and a Boolean that is True if it was created (False otherwise)
Examples:
.. code-block:: python
# Upsert a test model with an int value of 1. Use default values that will be given to it when created
model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 2.0})
print(created)
True
print(model_obj.int_field, model_obj.float_field)
1, 2.0
# Do an upsert on that same model with different default fields. Since it already exists, the defaults
# are not used
model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 3.0})
print(created)
False
print(model_obj.int_field, model_obj.float_field)
1, 2.0
# In order to update the float field in an existing object, use the updates dictionary
model_obj, created = upsert(TestModel.objects, int_field=1, updates={'float_field': 3.0})
print(created)
False
print(model_obj.int_field, model_obj.float_field)
1, 3.0
# You can use updates on a newly created object that will also be used as initial values.
model_obj, created = upsert(TestModel.objects, int_field=2, updates={'float_field': 4.0})
print(created)
True
print(model_obj.int_field, model_obj.float_field)
2, 4.0
"""
defaults = defaults or {}
# Override any defaults with updates
defaults.update(updates or {})
# Do a get or create
obj, created = manager.get_or_create(defaults=defaults, **kwargs)
# Update any necessary fields
if updates is not None and not created and any(getattr(obj, k) != updates[k] for k in updates):
for k, v in updates.items():
setattr(obj, k, v)
obj.save(update_fields=updates)
return obj, created
class ManagerUtilsQuerySet(QuerySet):
"""
Defines the methods in the manager utils that can also be applied to querysets.
"""
def id_dict(self):
return id_dict(self)
def bulk_upsert(self, model_objs, unique_fields, update_fields=None, return_upserts=False, native=False):
return bulk_upsert(
self, model_objs, unique_fields, update_fields=update_fields, return_upserts=return_upserts, native=native
)
def bulk_upsert2(self, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False):
return bulk_upsert2(self, model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
def bulk_create(self, *args, **kwargs):
"""
Overrides Django's bulk_create function to emit a post_bulk_operation signal when bulk_create
is finished.
"""
ret_val = super(ManagerUtilsQuerySet, self).bulk_create(*args, **kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val
def sync(self, model_objs, unique_fields, update_fields=None, native=False):
return sync(self, model_objs, unique_fields, update_fields=update_fields, native=native)
def sync2(self, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
return sync2(self, model_objs, unique_fields, update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates)
def get_or_none(self, **query_params):
return get_or_none(self, **query_params)
def single(self):
return single(self)
def update(self, **kwargs):
"""
Overrides Django's update method to emit a post_bulk_operation signal when it completes.
"""
ret_val = super(ManagerUtilsQuerySet, self).update(**kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val
class ManagerUtilsMixin(object):
"""
A mixin that can be used by django model managers. It provides additional functionality on top
of the regular Django Manager class.
"""
def get_queryset(self):
return ManagerUtilsQuerySet(self.model)
def id_dict(self):
return id_dict(self.get_queryset())
def bulk_upsert(
self, model_objs, unique_fields, update_fields=None, return_upserts=False, return_upserts_distinct=False,
native=False):
return bulk_upsert(
self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, return_upserts=return_upserts,
return_upserts_distinct=return_upserts_distinct, native=native)
def bulk_upsert2(self, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False):
return bulk_upsert2(
self.get_queryset(), model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
def sync(self, model_objs, unique_fields, update_fields=None, native=False):
return sync(self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, native=native)
def sync2(self, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
return sync2(
self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates)
def bulk_update(self, model_objs, fields_to_update):
return bulk_update(self.get_queryset(), model_objs, fields_to_update)
def upsert(self, defaults=None, updates=None, **kwargs):
return upsert(self.get_queryset(), defaults=defaults, updates=updates, **kwargs)
def get_or_none(self, **query_params):
return get_or_none(self.get_queryset(), **query_params)
def single(self):
return single(self.get_queryset())
class ManagerUtilsManager(ManagerUtilsMixin, Manager):
"""
A class that can be used as a manager. It already inherits the Django Manager class and adds
the mixin.
"""
pass
|
ambitioninc/django-manager-utils | manager_utils/manager_utils.py | _get_upserts | python | def _get_upserts(queryset, model_objs_updated, model_objs_created, unique_fields):
updated, created = _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields)
return updated + created | Given a list of model objects that were updated and model objects that were created,
return the list of all model objects upserted. Doing this requires fetching all of
the models created with bulk create (since django can't return bulk_create pks) | train | https://github.com/ambitioninc/django-manager-utils/blob/1f111cb4846ed6cd6b78eca320a9dcc27826bf97/manager_utils/manager_utils.py#L69-L76 | null | import itertools
from django.db import connection
from django.db.models import Manager
from django.db.models.query import QuerySet
from django.dispatch import Signal
from querybuilder.query import Query
from . import upsert2
# A signal that is emitted when any bulk operation occurs
post_bulk_operation = Signal(providing_args=['model'])
def id_dict(queryset):
"""
Returns a dictionary of all the objects keyed on their ID.
:rtype: dict
:returns: A dictionary of objects from the queryset or manager that is keyed
on the objects' IDs.
Examples:
.. code-block:: python
TestModel.objects.create(int_field=1)
TestModel.objects.create(int_field=2)
print(id_dict(TestModel.objects.all()))
"""
return {obj.pk: obj for obj in queryset}
def _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields):
"""
Given a list of model objects that were updated and model objects that were created,
fetch the pks of the newly created models and return the two lists in a tuple
"""
# Keep track of the created models
created_models = []
# If we created new models query for them
if model_objs_created:
created_models.extend(
queryset.extra(
where=['({unique_fields_sql}) in %s'.format(
unique_fields_sql=', '.join(unique_fields)
)],
params=[
tuple([
tuple([
getattr(model_obj, field)
for field in unique_fields
])
for model_obj in model_objs_created
])
]
)
)
# Return the models
return model_objs_updated, created_models
def _get_model_objs_to_update_and_create(model_objs, unique_fields, update_fields, extant_model_objs):
"""
Used by bulk_upsert to gather lists of models that should be updated and created.
"""
# Find all of the objects to update and all of the objects to create
model_objs_to_update, model_objs_to_create = list(), list()
for model_obj in model_objs:
extant_model_obj = extant_model_objs.get(tuple(getattr(model_obj, field) for field in unique_fields), None)
if extant_model_obj is None:
# If the object needs to be created, make a new instance of it
model_objs_to_create.append(model_obj)
else:
# If the object needs to be updated, update its fields
for field in update_fields:
setattr(extant_model_obj, field, getattr(model_obj, field))
model_objs_to_update.append(extant_model_obj)
return model_objs_to_update, model_objs_to_create
def _get_prepped_model_field(model_obj, field):
"""
Gets the value of a field of a model obj that is prepared for the db.
"""
# Get the field
field = model_obj._meta.get_field(field)
# Get the value
value = field.get_db_prep_save(getattr(model_obj, field.attname), connection)
# Return the value
return value
def bulk_upsert(
queryset, model_objs, unique_fields, update_fields=None, return_upserts=False, return_upserts_distinct=False,
sync=False, native=False
):
"""
Performs a bulk update or insert on a list of model objects. Matches all objects in the queryset
with the objs provided using the field values in unique_fields.
If an existing object is matched, it is updated with the values from the provided objects. Objects
that don't match anything are bulk created.
A user can provide a list update_fields so that any changed values on those fields will be updated.
However, if update_fields is not provided, this function reduces down to performing a bulk_create
on any non extant objects.
:type model_objs: list of dict
:param model_objs: A list of dictionaries that have fields corresponding to the model in the manager.
:type unique_fields: list of str
:param unique_fields: A list of fields that are used to determine if an object in objs matches a model
from the queryset.
:type update_fields: list of str
:param update_fields: A list of fields used from the objects in objs as fields when updating existing
models. If None, this function will only perform a bulk create for model_objs that do not
currently exist in the database.
:type return_upserts_distinct: bool
:param return_upserts_distinct: A flag specifying whether to return the upserted values as a list of distinct lists,
one containing the updated models and the other containing the new models. If True, this performs an
additional query to fetch any bulk created values.
:type return_upserts: bool
:param return_upserts: A flag specifying whether to return the upserted values. If True, this performs
an additional query to fetch any bulk created values.
:type sync: bool
:param sync: A flag specifying whether a sync operation should be applied to the bulk_upsert. If this
is True, all values in the queryset that were not updated will be deleted such that the
entire list of model objects is synced to the queryset.
:type native: bool
:param native: A flag specifying whether to use postgres insert on conflict (upsert).
:signals: Emits a post_bulk_operation when a bulk_update or a bulk_create occurs.
Examples:
.. code-block:: python
# Start off with no objects in the database. Call a bulk_upsert on the TestModel, which includes
# a char_field, int_field, and float_field
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
], ['int_field'], ['char_field'])
# All objects should have been created
print(TestModel.objects.count())
3
# Now perform a bulk upsert on all the char_field values. Since the objects existed previously
# (known by the int_field uniqueness constraint), the char fields should be updated
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='0', int_field=1),
TestModel(float_field=2.0, char_field='0', int_field=2),
TestModel(float_field=3.0, char_field='0', int_field=3),
], ['int_field'], ['char_field'])
# No more new objects should have been created, and every char field should be 0
print(TestModel.objects.count(), TestModel.objects.filter(char_field='-1').count())
3, 3
# Do the exact same operation, but this time add an additional object that is not already
# stored. It will be created.
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be one more object
print(TestModel.objects.count())
4
# Note that one can also do the upsert on a queryset. Perform the same data upsert on a
# filter for int_field=1. In this case, only one object has the ability to be updated.
# All of the other objects will be created
bulk_upsert(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be three more objects
print(TestModel.objects.count())
7
"""
if not unique_fields:
raise ValueError('Must provide unique_fields argument')
update_fields = update_fields or []
if native:
if return_upserts_distinct:
raise NotImplementedError('return upserts distinct not supported with native postgres upsert')
return_value = Query().from_table(table=queryset.model).upsert(
model_objs, unique_fields, update_fields, return_models=return_upserts or sync
) or []
if sync:
orig_ids = frozenset(queryset.values_list('pk', flat=True))
queryset.filter(pk__in=orig_ids - frozenset([m.pk for m in return_value])).delete()
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return return_value
# Create a look up table for all of the objects in the queryset keyed on the unique_fields
extant_model_objs = {
tuple(getattr(extant_model_obj, field) for field in unique_fields): extant_model_obj
for extant_model_obj in queryset
}
# Find all of the objects to update and all of the objects to create
model_objs_to_update, model_objs_to_create = _get_model_objs_to_update_and_create(
model_objs, unique_fields, update_fields, extant_model_objs)
# Find all objects in the queryset that will not be updated. These will be deleted if the sync option is
# True
if sync:
model_objs_to_update_set = frozenset(model_objs_to_update)
model_objs_to_delete = [
model_obj.pk for model_obj in extant_model_objs.values() if model_obj not in model_objs_to_update_set
]
if model_objs_to_delete:
queryset.filter(pk__in=model_objs_to_delete).delete()
# Apply bulk updates and creates
if update_fields:
bulk_update(queryset, model_objs_to_update, update_fields)
queryset.bulk_create(model_objs_to_create)
# Optionally return the bulk upserted values
if return_upserts_distinct:
# return a list of lists, the first being the updated models, the second being the newly created objects
return _get_upserts_distinct(queryset, model_objs_to_update, model_objs_to_create, unique_fields)
if return_upserts:
return _get_upserts(queryset, model_objs_to_update, model_objs_to_create, unique_fields)
def bulk_upsert2(
queryset, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False
):
"""
Performs a bulk update or insert on a list of model objects. Matches all objects in the queryset
with the objs provided using the field values in unique_fields.
If an existing object is matched, it is updated with the values from the provided objects. Objects
that don't match anything are bulk created.
A user can provide a list update_fields so that any changed values on those fields will be updated.
However, if update_fields is not provided, this function reduces down to performing a bulk_create
on any non extant objects.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If ``None``, all fields will be updated.
returning (bool|List[str]): If ``True``, returns all fields. If a list, only returns
fields in the list. Return values are split in a tuple of created and updated models
ignore_duplicate_updates (bool, default=False): Ignore updating a row in the upsert if all of the update fields
are duplicates
return_untouched (bool, default=False): Return values that were not touched by the upsert operation
Returns:
UpsertResult: A list of results if ``returning`` is not ``False``. created, updated, and untouched,
results can be obtained by accessing the ``created``, ``updated``, and ``untouched`` properties
of the result.
Examples:
.. code-block:: python
# Start off with no objects in the database. Call a bulk_upsert on the TestModel, which includes
# a char_field, int_field, and float_field
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
], ['int_field'], ['char_field'])
# All objects should have been created
print(TestModel.objects.count())
3
# Now perform a bulk upsert on all the char_field values. Since the objects existed previously
# (known by the int_field uniqueness constraint), the char fields should be updated
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='0', int_field=1),
TestModel(float_field=2.0, char_field='0', int_field=2),
TestModel(float_field=3.0, char_field='0', int_field=3),
], ['int_field'], ['char_field'])
# No more new objects should have been created, and every char field should be 0
print(TestModel.objects.count(), TestModel.objects.filter(char_field='-1').count())
3, 3
# Do the exact same operation, but this time add an additional object that is not already
# stored. It will be created.
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be one more object
print(TestModel.objects.count())
4
# Note that one can also do the upsert on a queryset. Perform the same data upsert on a
# filter for int_field=1. In this case, only one object has the ability to be updated.
# All of the other objects will be created
bulk_upsert2(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be three more objects
print(TestModel.objects.count())
7
# Return creates and updates on the same set of models
created, updated = bulk_upsert2(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# All four objects should be updated
print(len(updated))
4
"""
results = upsert2.upsert(queryset, model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return results
def sync(queryset, model_objs, unique_fields, update_fields=None, **kwargs):
"""
Performs a sync operation on a queryset, making the contents of the
queryset match the contents of model_objs.
This function calls bulk_upsert underneath the hood with sync=True.
:type model_objs: list of :class:`Models<django:django.db.models.Model>`
:param model_objs: The models to sync
:type update_fields: list of str
:param unique_fields: A list of fields that are used to determine if an
object in objs matches a model from the queryset.
:type update_fields: list of str
:param update_fields: A list of fields used from the objects in objs as fields when updating existing
models. If None, this function will only perform a bulk create for model_objs that do not
currently exist in the database.
:type native: bool
:param native: A flag specifying whether to use postgres insert on conflict (upsert) when performing
bulk upsert.
"""
return bulk_upsert(queryset, model_objs, unique_fields, update_fields=update_fields, sync=True, **kwargs)
def sync2(queryset, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
"""
Performs a sync operation on a queryset, making the contents of the
queryset match the contents of model_objs.
Note: The definition of a sync requires that we return untouched rows from the upsert opertion. There is
no way to turn off returning untouched rows in a sync.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If `None`, all fields will be updated.
returning (bool|List[str]): If True, returns all fields. If a list, only returns
fields in the list. Return values are split in a tuple of created, updated, and
deleted models.
ignore_duplicate_updates (bool, default=False): Ignore updating a row in the upsert if all
of the update fields are duplicates
Returns:
UpsertResult: A list of results if ``returning`` is not ``False``. created, updated, untouched,
and deleted results can be obtained by accessing the ``created``, ``updated``, ``untouched``,
and ``deleted`` properties of the result.
"""
results = upsert2.upsert(queryset, model_objs, unique_fields,
update_fields=update_fields, returning=returning, sync=True,
ignore_duplicate_updates=ignore_duplicate_updates)
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return results
def get_or_none(queryset, **query_params):
"""
Get an object or return None if it doesn't exist.
:param query_params: The query parameters used in the lookup.
:returns: A model object if one exists with the query params, None otherwise.
Examples:
.. code-block:: python
model_obj = get_or_none(TestModel.objects, int_field=1)
print(model_obj)
None
TestModel.objects.create(int_field=1)
model_obj = get_or_none(TestModel.objects, int_field=1)
print(model_obj.int_field)
1
"""
try:
obj = queryset.get(**query_params)
except queryset.model.DoesNotExist:
obj = None
return obj
def single(queryset):
"""
Assumes that this model only has one element in the table and returns it.
If the table has more than one or no value, an exception is raised.
:returns: The only model object in the queryset.
:raises: :class:`DoesNotExist <django:django.core.exceptions.ObjectDoesNotExist>`
error when the object does not exist or a
:class:`MultipleObjectsReturned <django:django.core.exceptions.MultipleObjectsReturned>`
error when thereis more than one object.
Examples:
.. code-block:: python
TestModel.objects.create(int_field=1)
model_obj = single(TestModel.objects)
print(model_obj.int_field)
1
"""
return queryset.get()
def bulk_update(manager, model_objs, fields_to_update):
"""
Bulk updates a list of model objects that are already saved.
:type model_objs: list of :class:`Models<django:django.db.models.Model>`
:param model_objs: A list of model objects that have been updated.
fields_to_update: A list of fields to be updated. Only these fields will be updated
:signals: Emits a post_bulk_operation signal when completed.
Examples:
.. code-block:: python
# Create a couple test models
model_obj1 = TestModel.objects.create(int_field=1, float_field=2.0, char_field='Hi')
model_obj2 = TestModel.objects.create(int_field=3, float_field=4.0, char_field='Hello')
# Change their fields and do a bulk update
model_obj1.int_field = 10
model_obj1.float_field = 20.0
model_obj2.int_field = 30
model_obj2.float_field = 40.0
bulk_update(TestModel.objects, [model_obj1, model_obj2], ['int_field', 'float_field'])
# Reload the models and view their changes
model_obj1 = TestModel.objects.get(id=model_obj1.id)
print(model_obj1.int_field, model_obj1.float_field)
10, 20.0
model_obj2 = TestModel.objects.get(id=model_obj2.id)
print(model_obj2.int_field, model_obj2.float_field)
10, 20.0
"""
# Add the pk to the value fields so we can join
value_fields = [manager.model._meta.pk.attname] + fields_to_update
# Build the row values
row_values = [
[_get_prepped_model_field(model_obj, field_name) for field_name in value_fields]
for model_obj in model_objs
]
# If we do not have any values or fields to update just return
if len(row_values) == 0 or len(fields_to_update) == 0:
return
# Create a map of db types
db_types = [
manager.model._meta.get_field(field).db_type(connection)
for field in value_fields
]
# Build the value fields sql
value_fields_sql = ', '.join(
'"{field}"'.format(field=manager.model._meta.get_field(field).column)
for field in value_fields
)
# Build the set sql
update_fields_sql = ', '.join([
'"{field}" = "new_values"."{field}"'.format(
field=manager.model._meta.get_field(field).column
)
for field in fields_to_update
])
# Build the values sql
values_sql = ', '.join([
'({0})'.format(
', '.join([
'%s::{0}'.format(
db_types[i]
) if not row_number and i else '%s'
for i, _ in enumerate(row)
])
)
for row_number, row in enumerate(row_values)
])
# Start building the query
update_sql = (
'UPDATE {table} '
'SET {update_fields_sql} '
'FROM (VALUES {values_sql}) AS new_values ({value_fields_sql}) '
'WHERE "{table}"."{pk_field}" = "new_values"."{pk_field}"'
).format(
table=manager.model._meta.db_table,
pk_field=manager.model._meta.pk.column,
update_fields_sql=update_fields_sql,
values_sql=values_sql,
value_fields_sql=value_fields_sql
)
# Combine all the row values
update_sql_params = list(itertools.chain(*row_values))
# Run the update query
with connection.cursor() as cursor:
cursor.execute(update_sql, update_sql_params)
# call the bulk operation signal
post_bulk_operation.send(sender=manager.model, model=manager.model)
def upsert(manager, defaults=None, updates=None, **kwargs):
"""
Performs an update on an object or an insert if the object does not exist.
:type defaults: dict
:param defaults: These values are set when the object is created, but are irrelevant
when the object already exists. This field should only be used when values only need to
be set during creation.
:type updates: dict
:param updates: These values are updated when the object is updated. They also override any
values provided in the defaults when inserting the object.
:param kwargs: These values provide the arguments used when checking for the existence of
the object. They are used in a similar manner to Django's get_or_create function.
:returns: A tuple of the upserted object and a Boolean that is True if it was created (False otherwise)
Examples:
.. code-block:: python
# Upsert a test model with an int value of 1. Use default values that will be given to it when created
model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 2.0})
print(created)
True
print(model_obj.int_field, model_obj.float_field)
1, 2.0
# Do an upsert on that same model with different default fields. Since it already exists, the defaults
# are not used
model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 3.0})
print(created)
False
print(model_obj.int_field, model_obj.float_field)
1, 2.0
# In order to update the float field in an existing object, use the updates dictionary
model_obj, created = upsert(TestModel.objects, int_field=1, updates={'float_field': 3.0})
print(created)
False
print(model_obj.int_field, model_obj.float_field)
1, 3.0
# You can use updates on a newly created object that will also be used as initial values.
model_obj, created = upsert(TestModel.objects, int_field=2, updates={'float_field': 4.0})
print(created)
True
print(model_obj.int_field, model_obj.float_field)
2, 4.0
"""
defaults = defaults or {}
# Override any defaults with updates
defaults.update(updates or {})
# Do a get or create
obj, created = manager.get_or_create(defaults=defaults, **kwargs)
# Update any necessary fields
if updates is not None and not created and any(getattr(obj, k) != updates[k] for k in updates):
for k, v in updates.items():
setattr(obj, k, v)
obj.save(update_fields=updates)
return obj, created
class ManagerUtilsQuerySet(QuerySet):
"""
Defines the methods in the manager utils that can also be applied to querysets.
"""
def id_dict(self):
return id_dict(self)
def bulk_upsert(self, model_objs, unique_fields, update_fields=None, return_upserts=False, native=False):
return bulk_upsert(
self, model_objs, unique_fields, update_fields=update_fields, return_upserts=return_upserts, native=native
)
def bulk_upsert2(self, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False):
return bulk_upsert2(self, model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
def bulk_create(self, *args, **kwargs):
"""
Overrides Django's bulk_create function to emit a post_bulk_operation signal when bulk_create
is finished.
"""
ret_val = super(ManagerUtilsQuerySet, self).bulk_create(*args, **kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val
def sync(self, model_objs, unique_fields, update_fields=None, native=False):
return sync(self, model_objs, unique_fields, update_fields=update_fields, native=native)
def sync2(self, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
return sync2(self, model_objs, unique_fields, update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates)
def get_or_none(self, **query_params):
return get_or_none(self, **query_params)
def single(self):
return single(self)
def update(self, **kwargs):
"""
Overrides Django's update method to emit a post_bulk_operation signal when it completes.
"""
ret_val = super(ManagerUtilsQuerySet, self).update(**kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val
class ManagerUtilsMixin(object):
"""
A mixin that can be used by django model managers. It provides additional functionality on top
of the regular Django Manager class.
"""
def get_queryset(self):
return ManagerUtilsQuerySet(self.model)
def id_dict(self):
return id_dict(self.get_queryset())
def bulk_upsert(
self, model_objs, unique_fields, update_fields=None, return_upserts=False, return_upserts_distinct=False,
native=False):
return bulk_upsert(
self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, return_upserts=return_upserts,
return_upserts_distinct=return_upserts_distinct, native=native)
def bulk_upsert2(self, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False):
return bulk_upsert2(
self.get_queryset(), model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
def sync(self, model_objs, unique_fields, update_fields=None, native=False):
return sync(self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, native=native)
def sync2(self, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
return sync2(
self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates)
def bulk_update(self, model_objs, fields_to_update):
return bulk_update(self.get_queryset(), model_objs, fields_to_update)
def upsert(self, defaults=None, updates=None, **kwargs):
return upsert(self.get_queryset(), defaults=defaults, updates=updates, **kwargs)
def get_or_none(self, **query_params):
return get_or_none(self.get_queryset(), **query_params)
def single(self):
return single(self.get_queryset())
class ManagerUtilsManager(ManagerUtilsMixin, Manager):
"""
A class that can be used as a manager. It already inherits the Django Manager class and adds
the mixin.
"""
pass
|
ambitioninc/django-manager-utils | manager_utils/manager_utils.py | _get_model_objs_to_update_and_create | python | def _get_model_objs_to_update_and_create(model_objs, unique_fields, update_fields, extant_model_objs):
# Find all of the objects to update and all of the objects to create
model_objs_to_update, model_objs_to_create = list(), list()
for model_obj in model_objs:
extant_model_obj = extant_model_objs.get(tuple(getattr(model_obj, field) for field in unique_fields), None)
if extant_model_obj is None:
# If the object needs to be created, make a new instance of it
model_objs_to_create.append(model_obj)
else:
# If the object needs to be updated, update its fields
for field in update_fields:
setattr(extant_model_obj, field, getattr(model_obj, field))
model_objs_to_update.append(extant_model_obj)
return model_objs_to_update, model_objs_to_create | Used by bulk_upsert to gather lists of models that should be updated and created. | train | https://github.com/ambitioninc/django-manager-utils/blob/1f111cb4846ed6cd6b78eca320a9dcc27826bf97/manager_utils/manager_utils.py#L79-L97 | null | import itertools
from django.db import connection
from django.db.models import Manager
from django.db.models.query import QuerySet
from django.dispatch import Signal
from querybuilder.query import Query
from . import upsert2
# A signal that is emitted when any bulk operation occurs
post_bulk_operation = Signal(providing_args=['model'])
def id_dict(queryset):
"""
Returns a dictionary of all the objects keyed on their ID.
:rtype: dict
:returns: A dictionary of objects from the queryset or manager that is keyed
on the objects' IDs.
Examples:
.. code-block:: python
TestModel.objects.create(int_field=1)
TestModel.objects.create(int_field=2)
print(id_dict(TestModel.objects.all()))
"""
return {obj.pk: obj for obj in queryset}
def _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields):
"""
Given a list of model objects that were updated and model objects that were created,
fetch the pks of the newly created models and return the two lists in a tuple
"""
# Keep track of the created models
created_models = []
# If we created new models query for them
if model_objs_created:
created_models.extend(
queryset.extra(
where=['({unique_fields_sql}) in %s'.format(
unique_fields_sql=', '.join(unique_fields)
)],
params=[
tuple([
tuple([
getattr(model_obj, field)
for field in unique_fields
])
for model_obj in model_objs_created
])
]
)
)
# Return the models
return model_objs_updated, created_models
def _get_upserts(queryset, model_objs_updated, model_objs_created, unique_fields):
"""
Given a list of model objects that were updated and model objects that were created,
return the list of all model objects upserted. Doing this requires fetching all of
the models created with bulk create (since django can't return bulk_create pks)
"""
updated, created = _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields)
return updated + created
def _get_prepped_model_field(model_obj, field):
"""
Gets the value of a field of a model obj that is prepared for the db.
"""
# Get the field
field = model_obj._meta.get_field(field)
# Get the value
value = field.get_db_prep_save(getattr(model_obj, field.attname), connection)
# Return the value
return value
def bulk_upsert(
queryset, model_objs, unique_fields, update_fields=None, return_upserts=False, return_upserts_distinct=False,
sync=False, native=False
):
"""
Performs a bulk update or insert on a list of model objects. Matches all objects in the queryset
with the objs provided using the field values in unique_fields.
If an existing object is matched, it is updated with the values from the provided objects. Objects
that don't match anything are bulk created.
A user can provide a list update_fields so that any changed values on those fields will be updated.
However, if update_fields is not provided, this function reduces down to performing a bulk_create
on any non extant objects.
:type model_objs: list of dict
:param model_objs: A list of dictionaries that have fields corresponding to the model in the manager.
:type unique_fields: list of str
:param unique_fields: A list of fields that are used to determine if an object in objs matches a model
from the queryset.
:type update_fields: list of str
:param update_fields: A list of fields used from the objects in objs as fields when updating existing
models. If None, this function will only perform a bulk create for model_objs that do not
currently exist in the database.
:type return_upserts_distinct: bool
:param return_upserts_distinct: A flag specifying whether to return the upserted values as a list of distinct lists,
one containing the updated models and the other containing the new models. If True, this performs an
additional query to fetch any bulk created values.
:type return_upserts: bool
:param return_upserts: A flag specifying whether to return the upserted values. If True, this performs
an additional query to fetch any bulk created values.
:type sync: bool
:param sync: A flag specifying whether a sync operation should be applied to the bulk_upsert. If this
is True, all values in the queryset that were not updated will be deleted such that the
entire list of model objects is synced to the queryset.
:type native: bool
:param native: A flag specifying whether to use postgres insert on conflict (upsert).
:signals: Emits a post_bulk_operation when a bulk_update or a bulk_create occurs.
Examples:
.. code-block:: python
# Start off with no objects in the database. Call a bulk_upsert on the TestModel, which includes
# a char_field, int_field, and float_field
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
], ['int_field'], ['char_field'])
# All objects should have been created
print(TestModel.objects.count())
3
# Now perform a bulk upsert on all the char_field values. Since the objects existed previously
# (known by the int_field uniqueness constraint), the char fields should be updated
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='0', int_field=1),
TestModel(float_field=2.0, char_field='0', int_field=2),
TestModel(float_field=3.0, char_field='0', int_field=3),
], ['int_field'], ['char_field'])
# No more new objects should have been created, and every char field should be 0
print(TestModel.objects.count(), TestModel.objects.filter(char_field='-1').count())
3, 3
# Do the exact same operation, but this time add an additional object that is not already
# stored. It will be created.
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be one more object
print(TestModel.objects.count())
4
# Note that one can also do the upsert on a queryset. Perform the same data upsert on a
# filter for int_field=1. In this case, only one object has the ability to be updated.
# All of the other objects will be created
bulk_upsert(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be three more objects
print(TestModel.objects.count())
7
"""
if not unique_fields:
raise ValueError('Must provide unique_fields argument')
update_fields = update_fields or []
if native:
if return_upserts_distinct:
raise NotImplementedError('return upserts distinct not supported with native postgres upsert')
return_value = Query().from_table(table=queryset.model).upsert(
model_objs, unique_fields, update_fields, return_models=return_upserts or sync
) or []
if sync:
orig_ids = frozenset(queryset.values_list('pk', flat=True))
queryset.filter(pk__in=orig_ids - frozenset([m.pk for m in return_value])).delete()
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return return_value
# Create a look up table for all of the objects in the queryset keyed on the unique_fields
extant_model_objs = {
tuple(getattr(extant_model_obj, field) for field in unique_fields): extant_model_obj
for extant_model_obj in queryset
}
# Find all of the objects to update and all of the objects to create
model_objs_to_update, model_objs_to_create = _get_model_objs_to_update_and_create(
model_objs, unique_fields, update_fields, extant_model_objs)
# Find all objects in the queryset that will not be updated. These will be deleted if the sync option is
# True
if sync:
model_objs_to_update_set = frozenset(model_objs_to_update)
model_objs_to_delete = [
model_obj.pk for model_obj in extant_model_objs.values() if model_obj not in model_objs_to_update_set
]
if model_objs_to_delete:
queryset.filter(pk__in=model_objs_to_delete).delete()
# Apply bulk updates and creates
if update_fields:
bulk_update(queryset, model_objs_to_update, update_fields)
queryset.bulk_create(model_objs_to_create)
# Optionally return the bulk upserted values
if return_upserts_distinct:
# return a list of lists, the first being the updated models, the second being the newly created objects
return _get_upserts_distinct(queryset, model_objs_to_update, model_objs_to_create, unique_fields)
if return_upserts:
return _get_upserts(queryset, model_objs_to_update, model_objs_to_create, unique_fields)
def bulk_upsert2(
queryset, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False
):
"""
Performs a bulk update or insert on a list of model objects. Matches all objects in the queryset
with the objs provided using the field values in unique_fields.
If an existing object is matched, it is updated with the values from the provided objects. Objects
that don't match anything are bulk created.
A user can provide a list update_fields so that any changed values on those fields will be updated.
However, if update_fields is not provided, this function reduces down to performing a bulk_create
on any non extant objects.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If ``None``, all fields will be updated.
returning (bool|List[str]): If ``True``, returns all fields. If a list, only returns
fields in the list. Return values are split in a tuple of created and updated models
ignore_duplicate_updates (bool, default=False): Ignore updating a row in the upsert if all of the update fields
are duplicates
return_untouched (bool, default=False): Return values that were not touched by the upsert operation
Returns:
UpsertResult: A list of results if ``returning`` is not ``False``. created, updated, and untouched,
results can be obtained by accessing the ``created``, ``updated``, and ``untouched`` properties
of the result.
Examples:
.. code-block:: python
# Start off with no objects in the database. Call a bulk_upsert on the TestModel, which includes
# a char_field, int_field, and float_field
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
], ['int_field'], ['char_field'])
# All objects should have been created
print(TestModel.objects.count())
3
# Now perform a bulk upsert on all the char_field values. Since the objects existed previously
# (known by the int_field uniqueness constraint), the char fields should be updated
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='0', int_field=1),
TestModel(float_field=2.0, char_field='0', int_field=2),
TestModel(float_field=3.0, char_field='0', int_field=3),
], ['int_field'], ['char_field'])
# No more new objects should have been created, and every char field should be 0
print(TestModel.objects.count(), TestModel.objects.filter(char_field='-1').count())
3, 3
# Do the exact same operation, but this time add an additional object that is not already
# stored. It will be created.
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be one more object
print(TestModel.objects.count())
4
# Note that one can also do the upsert on a queryset. Perform the same data upsert on a
# filter for int_field=1. In this case, only one object has the ability to be updated.
# All of the other objects will be created
bulk_upsert2(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be three more objects
print(TestModel.objects.count())
7
# Return creates and updates on the same set of models
created, updated = bulk_upsert2(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# All four objects should be updated
print(len(updated))
4
"""
results = upsert2.upsert(queryset, model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return results
def sync(queryset, model_objs, unique_fields, update_fields=None, **kwargs):
"""
Performs a sync operation on a queryset, making the contents of the
queryset match the contents of model_objs.
This function calls bulk_upsert underneath the hood with sync=True.
:type model_objs: list of :class:`Models<django:django.db.models.Model>`
:param model_objs: The models to sync
:type update_fields: list of str
:param unique_fields: A list of fields that are used to determine if an
object in objs matches a model from the queryset.
:type update_fields: list of str
:param update_fields: A list of fields used from the objects in objs as fields when updating existing
models. If None, this function will only perform a bulk create for model_objs that do not
currently exist in the database.
:type native: bool
:param native: A flag specifying whether to use postgres insert on conflict (upsert) when performing
bulk upsert.
"""
return bulk_upsert(queryset, model_objs, unique_fields, update_fields=update_fields, sync=True, **kwargs)
def sync2(queryset, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
"""
Performs a sync operation on a queryset, making the contents of the
queryset match the contents of model_objs.
Note: The definition of a sync requires that we return untouched rows from the upsert opertion. There is
no way to turn off returning untouched rows in a sync.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If `None`, all fields will be updated.
returning (bool|List[str]): If True, returns all fields. If a list, only returns
fields in the list. Return values are split in a tuple of created, updated, and
deleted models.
ignore_duplicate_updates (bool, default=False): Ignore updating a row in the upsert if all
of the update fields are duplicates
Returns:
UpsertResult: A list of results if ``returning`` is not ``False``. created, updated, untouched,
and deleted results can be obtained by accessing the ``created``, ``updated``, ``untouched``,
and ``deleted`` properties of the result.
"""
results = upsert2.upsert(queryset, model_objs, unique_fields,
update_fields=update_fields, returning=returning, sync=True,
ignore_duplicate_updates=ignore_duplicate_updates)
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return results
def get_or_none(queryset, **query_params):
"""
Get an object or return None if it doesn't exist.
:param query_params: The query parameters used in the lookup.
:returns: A model object if one exists with the query params, None otherwise.
Examples:
.. code-block:: python
model_obj = get_or_none(TestModel.objects, int_field=1)
print(model_obj)
None
TestModel.objects.create(int_field=1)
model_obj = get_or_none(TestModel.objects, int_field=1)
print(model_obj.int_field)
1
"""
try:
obj = queryset.get(**query_params)
except queryset.model.DoesNotExist:
obj = None
return obj
def single(queryset):
"""
Assumes that this model only has one element in the table and returns it.
If the table has more than one or no value, an exception is raised.
:returns: The only model object in the queryset.
:raises: :class:`DoesNotExist <django:django.core.exceptions.ObjectDoesNotExist>`
error when the object does not exist or a
:class:`MultipleObjectsReturned <django:django.core.exceptions.MultipleObjectsReturned>`
error when thereis more than one object.
Examples:
.. code-block:: python
TestModel.objects.create(int_field=1)
model_obj = single(TestModel.objects)
print(model_obj.int_field)
1
"""
return queryset.get()
def bulk_update(manager, model_objs, fields_to_update):
"""
Bulk updates a list of model objects that are already saved.
:type model_objs: list of :class:`Models<django:django.db.models.Model>`
:param model_objs: A list of model objects that have been updated.
fields_to_update: A list of fields to be updated. Only these fields will be updated
:signals: Emits a post_bulk_operation signal when completed.
Examples:
.. code-block:: python
# Create a couple test models
model_obj1 = TestModel.objects.create(int_field=1, float_field=2.0, char_field='Hi')
model_obj2 = TestModel.objects.create(int_field=3, float_field=4.0, char_field='Hello')
# Change their fields and do a bulk update
model_obj1.int_field = 10
model_obj1.float_field = 20.0
model_obj2.int_field = 30
model_obj2.float_field = 40.0
bulk_update(TestModel.objects, [model_obj1, model_obj2], ['int_field', 'float_field'])
# Reload the models and view their changes
model_obj1 = TestModel.objects.get(id=model_obj1.id)
print(model_obj1.int_field, model_obj1.float_field)
10, 20.0
model_obj2 = TestModel.objects.get(id=model_obj2.id)
print(model_obj2.int_field, model_obj2.float_field)
10, 20.0
"""
# Add the pk to the value fields so we can join
value_fields = [manager.model._meta.pk.attname] + fields_to_update
# Build the row values
row_values = [
[_get_prepped_model_field(model_obj, field_name) for field_name in value_fields]
for model_obj in model_objs
]
# If we do not have any values or fields to update just return
if len(row_values) == 0 or len(fields_to_update) == 0:
return
# Create a map of db types
db_types = [
manager.model._meta.get_field(field).db_type(connection)
for field in value_fields
]
# Build the value fields sql
value_fields_sql = ', '.join(
'"{field}"'.format(field=manager.model._meta.get_field(field).column)
for field in value_fields
)
# Build the set sql
update_fields_sql = ', '.join([
'"{field}" = "new_values"."{field}"'.format(
field=manager.model._meta.get_field(field).column
)
for field in fields_to_update
])
# Build the values sql
values_sql = ', '.join([
'({0})'.format(
', '.join([
'%s::{0}'.format(
db_types[i]
) if not row_number and i else '%s'
for i, _ in enumerate(row)
])
)
for row_number, row in enumerate(row_values)
])
# Start building the query
update_sql = (
'UPDATE {table} '
'SET {update_fields_sql} '
'FROM (VALUES {values_sql}) AS new_values ({value_fields_sql}) '
'WHERE "{table}"."{pk_field}" = "new_values"."{pk_field}"'
).format(
table=manager.model._meta.db_table,
pk_field=manager.model._meta.pk.column,
update_fields_sql=update_fields_sql,
values_sql=values_sql,
value_fields_sql=value_fields_sql
)
# Combine all the row values
update_sql_params = list(itertools.chain(*row_values))
# Run the update query
with connection.cursor() as cursor:
cursor.execute(update_sql, update_sql_params)
# call the bulk operation signal
post_bulk_operation.send(sender=manager.model, model=manager.model)
def upsert(manager, defaults=None, updates=None, **kwargs):
"""
Performs an update on an object or an insert if the object does not exist.
:type defaults: dict
:param defaults: These values are set when the object is created, but are irrelevant
when the object already exists. This field should only be used when values only need to
be set during creation.
:type updates: dict
:param updates: These values are updated when the object is updated. They also override any
values provided in the defaults when inserting the object.
:param kwargs: These values provide the arguments used when checking for the existence of
the object. They are used in a similar manner to Django's get_or_create function.
:returns: A tuple of the upserted object and a Boolean that is True if it was created (False otherwise)
Examples:
.. code-block:: python
# Upsert a test model with an int value of 1. Use default values that will be given to it when created
model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 2.0})
print(created)
True
print(model_obj.int_field, model_obj.float_field)
1, 2.0
# Do an upsert on that same model with different default fields. Since it already exists, the defaults
# are not used
model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 3.0})
print(created)
False
print(model_obj.int_field, model_obj.float_field)
1, 2.0
# In order to update the float field in an existing object, use the updates dictionary
model_obj, created = upsert(TestModel.objects, int_field=1, updates={'float_field': 3.0})
print(created)
False
print(model_obj.int_field, model_obj.float_field)
1, 3.0
# You can use updates on a newly created object that will also be used as initial values.
model_obj, created = upsert(TestModel.objects, int_field=2, updates={'float_field': 4.0})
print(created)
True
print(model_obj.int_field, model_obj.float_field)
2, 4.0
"""
defaults = defaults or {}
# Override any defaults with updates
defaults.update(updates or {})
# Do a get or create
obj, created = manager.get_or_create(defaults=defaults, **kwargs)
# Update any necessary fields
if updates is not None and not created and any(getattr(obj, k) != updates[k] for k in updates):
for k, v in updates.items():
setattr(obj, k, v)
obj.save(update_fields=updates)
return obj, created
class ManagerUtilsQuerySet(QuerySet):
"""
Defines the methods in the manager utils that can also be applied to querysets.
"""
def id_dict(self):
return id_dict(self)
def bulk_upsert(self, model_objs, unique_fields, update_fields=None, return_upserts=False, native=False):
return bulk_upsert(
self, model_objs, unique_fields, update_fields=update_fields, return_upserts=return_upserts, native=native
)
def bulk_upsert2(self, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False):
return bulk_upsert2(self, model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
def bulk_create(self, *args, **kwargs):
"""
Overrides Django's bulk_create function to emit a post_bulk_operation signal when bulk_create
is finished.
"""
ret_val = super(ManagerUtilsQuerySet, self).bulk_create(*args, **kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val
def sync(self, model_objs, unique_fields, update_fields=None, native=False):
return sync(self, model_objs, unique_fields, update_fields=update_fields, native=native)
def sync2(self, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
return sync2(self, model_objs, unique_fields, update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates)
def get_or_none(self, **query_params):
return get_or_none(self, **query_params)
def single(self):
return single(self)
def update(self, **kwargs):
"""
Overrides Django's update method to emit a post_bulk_operation signal when it completes.
"""
ret_val = super(ManagerUtilsQuerySet, self).update(**kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val
class ManagerUtilsMixin(object):
"""
A mixin that can be used by django model managers. It provides additional functionality on top
of the regular Django Manager class.
"""
def get_queryset(self):
return ManagerUtilsQuerySet(self.model)
def id_dict(self):
return id_dict(self.get_queryset())
def bulk_upsert(
self, model_objs, unique_fields, update_fields=None, return_upserts=False, return_upserts_distinct=False,
native=False):
return bulk_upsert(
self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, return_upserts=return_upserts,
return_upserts_distinct=return_upserts_distinct, native=native)
def bulk_upsert2(self, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False):
return bulk_upsert2(
self.get_queryset(), model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
def sync(self, model_objs, unique_fields, update_fields=None, native=False):
return sync(self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, native=native)
def sync2(self, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
return sync2(
self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates)
def bulk_update(self, model_objs, fields_to_update):
return bulk_update(self.get_queryset(), model_objs, fields_to_update)
def upsert(self, defaults=None, updates=None, **kwargs):
return upsert(self.get_queryset(), defaults=defaults, updates=updates, **kwargs)
def get_or_none(self, **query_params):
return get_or_none(self.get_queryset(), **query_params)
def single(self):
return single(self.get_queryset())
class ManagerUtilsManager(ManagerUtilsMixin, Manager):
"""
A class that can be used as a manager. It already inherits the Django Manager class and adds
the mixin.
"""
pass
|
ambitioninc/django-manager-utils | manager_utils/manager_utils.py | _get_prepped_model_field | python | def _get_prepped_model_field(model_obj, field):
# Get the field
field = model_obj._meta.get_field(field)
# Get the value
value = field.get_db_prep_save(getattr(model_obj, field.attname), connection)
# Return the value
return value | Gets the value of a field of a model obj that is prepared for the db. | train | https://github.com/ambitioninc/django-manager-utils/blob/1f111cb4846ed6cd6b78eca320a9dcc27826bf97/manager_utils/manager_utils.py#L100-L112 | null | import itertools
from django.db import connection
from django.db.models import Manager
from django.db.models.query import QuerySet
from django.dispatch import Signal
from querybuilder.query import Query
from . import upsert2
# A signal that is emitted when any bulk operation occurs
post_bulk_operation = Signal(providing_args=['model'])
def id_dict(queryset):
"""
Returns a dictionary of all the objects keyed on their ID.
:rtype: dict
:returns: A dictionary of objects from the queryset or manager that is keyed
on the objects' IDs.
Examples:
.. code-block:: python
TestModel.objects.create(int_field=1)
TestModel.objects.create(int_field=2)
print(id_dict(TestModel.objects.all()))
"""
return {obj.pk: obj for obj in queryset}
def _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields):
"""
Given a list of model objects that were updated and model objects that were created,
fetch the pks of the newly created models and return the two lists in a tuple
"""
# Keep track of the created models
created_models = []
# If we created new models query for them
if model_objs_created:
created_models.extend(
queryset.extra(
where=['({unique_fields_sql}) in %s'.format(
unique_fields_sql=', '.join(unique_fields)
)],
params=[
tuple([
tuple([
getattr(model_obj, field)
for field in unique_fields
])
for model_obj in model_objs_created
])
]
)
)
# Return the models
return model_objs_updated, created_models
def _get_upserts(queryset, model_objs_updated, model_objs_created, unique_fields):
"""
Given a list of model objects that were updated and model objects that were created,
return the list of all model objects upserted. Doing this requires fetching all of
the models created with bulk create (since django can't return bulk_create pks)
"""
updated, created = _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields)
return updated + created
def _get_model_objs_to_update_and_create(model_objs, unique_fields, update_fields, extant_model_objs):
"""
Used by bulk_upsert to gather lists of models that should be updated and created.
"""
# Find all of the objects to update and all of the objects to create
model_objs_to_update, model_objs_to_create = list(), list()
for model_obj in model_objs:
extant_model_obj = extant_model_objs.get(tuple(getattr(model_obj, field) for field in unique_fields), None)
if extant_model_obj is None:
# If the object needs to be created, make a new instance of it
model_objs_to_create.append(model_obj)
else:
# If the object needs to be updated, update its fields
for field in update_fields:
setattr(extant_model_obj, field, getattr(model_obj, field))
model_objs_to_update.append(extant_model_obj)
return model_objs_to_update, model_objs_to_create
def bulk_upsert(
queryset, model_objs, unique_fields, update_fields=None, return_upserts=False, return_upserts_distinct=False,
sync=False, native=False
):
"""
Performs a bulk update or insert on a list of model objects. Matches all objects in the queryset
with the objs provided using the field values in unique_fields.
If an existing object is matched, it is updated with the values from the provided objects. Objects
that don't match anything are bulk created.
A user can provide a list update_fields so that any changed values on those fields will be updated.
However, if update_fields is not provided, this function reduces down to performing a bulk_create
on any non extant objects.
:type model_objs: list of dict
:param model_objs: A list of dictionaries that have fields corresponding to the model in the manager.
:type unique_fields: list of str
:param unique_fields: A list of fields that are used to determine if an object in objs matches a model
from the queryset.
:type update_fields: list of str
:param update_fields: A list of fields used from the objects in objs as fields when updating existing
models. If None, this function will only perform a bulk create for model_objs that do not
currently exist in the database.
:type return_upserts_distinct: bool
:param return_upserts_distinct: A flag specifying whether to return the upserted values as a list of distinct lists,
one containing the updated models and the other containing the new models. If True, this performs an
additional query to fetch any bulk created values.
:type return_upserts: bool
:param return_upserts: A flag specifying whether to return the upserted values. If True, this performs
an additional query to fetch any bulk created values.
:type sync: bool
:param sync: A flag specifying whether a sync operation should be applied to the bulk_upsert. If this
is True, all values in the queryset that were not updated will be deleted such that the
entire list of model objects is synced to the queryset.
:type native: bool
:param native: A flag specifying whether to use postgres insert on conflict (upsert).
:signals: Emits a post_bulk_operation when a bulk_update or a bulk_create occurs.
Examples:
.. code-block:: python
# Start off with no objects in the database. Call a bulk_upsert on the TestModel, which includes
# a char_field, int_field, and float_field
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
], ['int_field'], ['char_field'])
# All objects should have been created
print(TestModel.objects.count())
3
# Now perform a bulk upsert on all the char_field values. Since the objects existed previously
# (known by the int_field uniqueness constraint), the char fields should be updated
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='0', int_field=1),
TestModel(float_field=2.0, char_field='0', int_field=2),
TestModel(float_field=3.0, char_field='0', int_field=3),
], ['int_field'], ['char_field'])
# No more new objects should have been created, and every char field should be 0
print(TestModel.objects.count(), TestModel.objects.filter(char_field='-1').count())
3, 3
# Do the exact same operation, but this time add an additional object that is not already
# stored. It will be created.
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be one more object
print(TestModel.objects.count())
4
# Note that one can also do the upsert on a queryset. Perform the same data upsert on a
# filter for int_field=1. In this case, only one object has the ability to be updated.
# All of the other objects will be created
bulk_upsert(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be three more objects
print(TestModel.objects.count())
7
"""
if not unique_fields:
raise ValueError('Must provide unique_fields argument')
update_fields = update_fields or []
if native:
if return_upserts_distinct:
raise NotImplementedError('return upserts distinct not supported with native postgres upsert')
return_value = Query().from_table(table=queryset.model).upsert(
model_objs, unique_fields, update_fields, return_models=return_upserts or sync
) or []
if sync:
orig_ids = frozenset(queryset.values_list('pk', flat=True))
queryset.filter(pk__in=orig_ids - frozenset([m.pk for m in return_value])).delete()
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return return_value
# Create a look up table for all of the objects in the queryset keyed on the unique_fields
extant_model_objs = {
tuple(getattr(extant_model_obj, field) for field in unique_fields): extant_model_obj
for extant_model_obj in queryset
}
# Find all of the objects to update and all of the objects to create
model_objs_to_update, model_objs_to_create = _get_model_objs_to_update_and_create(
model_objs, unique_fields, update_fields, extant_model_objs)
# Find all objects in the queryset that will not be updated. These will be deleted if the sync option is
# True
if sync:
model_objs_to_update_set = frozenset(model_objs_to_update)
model_objs_to_delete = [
model_obj.pk for model_obj in extant_model_objs.values() if model_obj not in model_objs_to_update_set
]
if model_objs_to_delete:
queryset.filter(pk__in=model_objs_to_delete).delete()
# Apply bulk updates and creates
if update_fields:
bulk_update(queryset, model_objs_to_update, update_fields)
queryset.bulk_create(model_objs_to_create)
# Optionally return the bulk upserted values
if return_upserts_distinct:
# return a list of lists, the first being the updated models, the second being the newly created objects
return _get_upserts_distinct(queryset, model_objs_to_update, model_objs_to_create, unique_fields)
if return_upserts:
return _get_upserts(queryset, model_objs_to_update, model_objs_to_create, unique_fields)
def bulk_upsert2(
queryset, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False
):
"""
Performs a bulk update or insert on a list of model objects. Matches all objects in the queryset
with the objs provided using the field values in unique_fields.
If an existing object is matched, it is updated with the values from the provided objects. Objects
that don't match anything are bulk created.
A user can provide a list update_fields so that any changed values on those fields will be updated.
However, if update_fields is not provided, this function reduces down to performing a bulk_create
on any non extant objects.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If ``None``, all fields will be updated.
returning (bool|List[str]): If ``True``, returns all fields. If a list, only returns
fields in the list. Return values are split in a tuple of created and updated models
ignore_duplicate_updates (bool, default=False): Ignore updating a row in the upsert if all of the update fields
are duplicates
return_untouched (bool, default=False): Return values that were not touched by the upsert operation
Returns:
UpsertResult: A list of results if ``returning`` is not ``False``. created, updated, and untouched,
results can be obtained by accessing the ``created``, ``updated``, and ``untouched`` properties
of the result.
Examples:
.. code-block:: python
# Start off with no objects in the database. Call a bulk_upsert on the TestModel, which includes
# a char_field, int_field, and float_field
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
], ['int_field'], ['char_field'])
# All objects should have been created
print(TestModel.objects.count())
3
# Now perform a bulk upsert on all the char_field values. Since the objects existed previously
# (known by the int_field uniqueness constraint), the char fields should be updated
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='0', int_field=1),
TestModel(float_field=2.0, char_field='0', int_field=2),
TestModel(float_field=3.0, char_field='0', int_field=3),
], ['int_field'], ['char_field'])
# No more new objects should have been created, and every char field should be 0
print(TestModel.objects.count(), TestModel.objects.filter(char_field='-1').count())
3, 3
# Do the exact same operation, but this time add an additional object that is not already
# stored. It will be created.
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be one more object
print(TestModel.objects.count())
4
# Note that one can also do the upsert on a queryset. Perform the same data upsert on a
# filter for int_field=1. In this case, only one object has the ability to be updated.
# All of the other objects will be created
bulk_upsert2(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be three more objects
print(TestModel.objects.count())
7
# Return creates and updates on the same set of models
created, updated = bulk_upsert2(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# All four objects should be updated
print(len(updated))
4
"""
results = upsert2.upsert(queryset, model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return results
def sync(queryset, model_objs, unique_fields, update_fields=None, **kwargs):
"""
Performs a sync operation on a queryset, making the contents of the
queryset match the contents of model_objs.
This function calls bulk_upsert underneath the hood with sync=True.
:type model_objs: list of :class:`Models<django:django.db.models.Model>`
:param model_objs: The models to sync
:type update_fields: list of str
:param unique_fields: A list of fields that are used to determine if an
object in objs matches a model from the queryset.
:type update_fields: list of str
:param update_fields: A list of fields used from the objects in objs as fields when updating existing
models. If None, this function will only perform a bulk create for model_objs that do not
currently exist in the database.
:type native: bool
:param native: A flag specifying whether to use postgres insert on conflict (upsert) when performing
bulk upsert.
"""
return bulk_upsert(queryset, model_objs, unique_fields, update_fields=update_fields, sync=True, **kwargs)
def sync2(queryset, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
"""
Performs a sync operation on a queryset, making the contents of the
queryset match the contents of model_objs.
Note: The definition of a sync requires that we return untouched rows from the upsert opertion. There is
no way to turn off returning untouched rows in a sync.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If `None`, all fields will be updated.
returning (bool|List[str]): If True, returns all fields. If a list, only returns
fields in the list. Return values are split in a tuple of created, updated, and
deleted models.
ignore_duplicate_updates (bool, default=False): Ignore updating a row in the upsert if all
of the update fields are duplicates
Returns:
UpsertResult: A list of results if ``returning`` is not ``False``. created, updated, untouched,
and deleted results can be obtained by accessing the ``created``, ``updated``, ``untouched``,
and ``deleted`` properties of the result.
"""
results = upsert2.upsert(queryset, model_objs, unique_fields,
update_fields=update_fields, returning=returning, sync=True,
ignore_duplicate_updates=ignore_duplicate_updates)
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return results
def get_or_none(queryset, **query_params):
"""
Get an object or return None if it doesn't exist.
:param query_params: The query parameters used in the lookup.
:returns: A model object if one exists with the query params, None otherwise.
Examples:
.. code-block:: python
model_obj = get_or_none(TestModel.objects, int_field=1)
print(model_obj)
None
TestModel.objects.create(int_field=1)
model_obj = get_or_none(TestModel.objects, int_field=1)
print(model_obj.int_field)
1
"""
try:
obj = queryset.get(**query_params)
except queryset.model.DoesNotExist:
obj = None
return obj
def single(queryset):
"""
Assumes that this model only has one element in the table and returns it.
If the table has more than one or no value, an exception is raised.
:returns: The only model object in the queryset.
:raises: :class:`DoesNotExist <django:django.core.exceptions.ObjectDoesNotExist>`
error when the object does not exist or a
:class:`MultipleObjectsReturned <django:django.core.exceptions.MultipleObjectsReturned>`
error when thereis more than one object.
Examples:
.. code-block:: python
TestModel.objects.create(int_field=1)
model_obj = single(TestModel.objects)
print(model_obj.int_field)
1
"""
return queryset.get()
def bulk_update(manager, model_objs, fields_to_update):
"""
Bulk updates a list of model objects that are already saved.
:type model_objs: list of :class:`Models<django:django.db.models.Model>`
:param model_objs: A list of model objects that have been updated.
fields_to_update: A list of fields to be updated. Only these fields will be updated
:signals: Emits a post_bulk_operation signal when completed.
Examples:
.. code-block:: python
# Create a couple test models
model_obj1 = TestModel.objects.create(int_field=1, float_field=2.0, char_field='Hi')
model_obj2 = TestModel.objects.create(int_field=3, float_field=4.0, char_field='Hello')
# Change their fields and do a bulk update
model_obj1.int_field = 10
model_obj1.float_field = 20.0
model_obj2.int_field = 30
model_obj2.float_field = 40.0
bulk_update(TestModel.objects, [model_obj1, model_obj2], ['int_field', 'float_field'])
# Reload the models and view their changes
model_obj1 = TestModel.objects.get(id=model_obj1.id)
print(model_obj1.int_field, model_obj1.float_field)
10, 20.0
model_obj2 = TestModel.objects.get(id=model_obj2.id)
print(model_obj2.int_field, model_obj2.float_field)
10, 20.0
"""
# Add the pk to the value fields so we can join
value_fields = [manager.model._meta.pk.attname] + fields_to_update
# Build the row values
row_values = [
[_get_prepped_model_field(model_obj, field_name) for field_name in value_fields]
for model_obj in model_objs
]
# If we do not have any values or fields to update just return
if len(row_values) == 0 or len(fields_to_update) == 0:
return
# Create a map of db types
db_types = [
manager.model._meta.get_field(field).db_type(connection)
for field in value_fields
]
# Build the value fields sql
value_fields_sql = ', '.join(
'"{field}"'.format(field=manager.model._meta.get_field(field).column)
for field in value_fields
)
# Build the set sql
update_fields_sql = ', '.join([
'"{field}" = "new_values"."{field}"'.format(
field=manager.model._meta.get_field(field).column
)
for field in fields_to_update
])
# Build the values sql
values_sql = ', '.join([
'({0})'.format(
', '.join([
'%s::{0}'.format(
db_types[i]
) if not row_number and i else '%s'
for i, _ in enumerate(row)
])
)
for row_number, row in enumerate(row_values)
])
# Start building the query
update_sql = (
'UPDATE {table} '
'SET {update_fields_sql} '
'FROM (VALUES {values_sql}) AS new_values ({value_fields_sql}) '
'WHERE "{table}"."{pk_field}" = "new_values"."{pk_field}"'
).format(
table=manager.model._meta.db_table,
pk_field=manager.model._meta.pk.column,
update_fields_sql=update_fields_sql,
values_sql=values_sql,
value_fields_sql=value_fields_sql
)
# Combine all the row values
update_sql_params = list(itertools.chain(*row_values))
# Run the update query
with connection.cursor() as cursor:
cursor.execute(update_sql, update_sql_params)
# call the bulk operation signal
post_bulk_operation.send(sender=manager.model, model=manager.model)
def upsert(manager, defaults=None, updates=None, **kwargs):
"""
Performs an update on an object or an insert if the object does not exist.
:type defaults: dict
:param defaults: These values are set when the object is created, but are irrelevant
when the object already exists. This field should only be used when values only need to
be set during creation.
:type updates: dict
:param updates: These values are updated when the object is updated. They also override any
values provided in the defaults when inserting the object.
:param kwargs: These values provide the arguments used when checking for the existence of
the object. They are used in a similar manner to Django's get_or_create function.
:returns: A tuple of the upserted object and a Boolean that is True if it was created (False otherwise)
Examples:
.. code-block:: python
# Upsert a test model with an int value of 1. Use default values that will be given to it when created
model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 2.0})
print(created)
True
print(model_obj.int_field, model_obj.float_field)
1, 2.0
# Do an upsert on that same model with different default fields. Since it already exists, the defaults
# are not used
model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 3.0})
print(created)
False
print(model_obj.int_field, model_obj.float_field)
1, 2.0
# In order to update the float field in an existing object, use the updates dictionary
model_obj, created = upsert(TestModel.objects, int_field=1, updates={'float_field': 3.0})
print(created)
False
print(model_obj.int_field, model_obj.float_field)
1, 3.0
# You can use updates on a newly created object that will also be used as initial values.
model_obj, created = upsert(TestModel.objects, int_field=2, updates={'float_field': 4.0})
print(created)
True
print(model_obj.int_field, model_obj.float_field)
2, 4.0
"""
defaults = defaults or {}
# Override any defaults with updates
defaults.update(updates or {})
# Do a get or create
obj, created = manager.get_or_create(defaults=defaults, **kwargs)
# Update any necessary fields
if updates is not None and not created and any(getattr(obj, k) != updates[k] for k in updates):
for k, v in updates.items():
setattr(obj, k, v)
obj.save(update_fields=updates)
return obj, created
class ManagerUtilsQuerySet(QuerySet):
"""
Defines the methods in the manager utils that can also be applied to querysets.
"""
def id_dict(self):
return id_dict(self)
def bulk_upsert(self, model_objs, unique_fields, update_fields=None, return_upserts=False, native=False):
return bulk_upsert(
self, model_objs, unique_fields, update_fields=update_fields, return_upserts=return_upserts, native=native
)
def bulk_upsert2(self, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False):
return bulk_upsert2(self, model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
def bulk_create(self, *args, **kwargs):
"""
Overrides Django's bulk_create function to emit a post_bulk_operation signal when bulk_create
is finished.
"""
ret_val = super(ManagerUtilsQuerySet, self).bulk_create(*args, **kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val
def sync(self, model_objs, unique_fields, update_fields=None, native=False):
return sync(self, model_objs, unique_fields, update_fields=update_fields, native=native)
def sync2(self, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
return sync2(self, model_objs, unique_fields, update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates)
def get_or_none(self, **query_params):
return get_or_none(self, **query_params)
def single(self):
return single(self)
def update(self, **kwargs):
"""
Overrides Django's update method to emit a post_bulk_operation signal when it completes.
"""
ret_val = super(ManagerUtilsQuerySet, self).update(**kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val
class ManagerUtilsMixin(object):
"""
A mixin that can be used by django model managers. It provides additional functionality on top
of the regular Django Manager class.
"""
def get_queryset(self):
return ManagerUtilsQuerySet(self.model)
def id_dict(self):
return id_dict(self.get_queryset())
def bulk_upsert(
self, model_objs, unique_fields, update_fields=None, return_upserts=False, return_upserts_distinct=False,
native=False):
return bulk_upsert(
self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, return_upserts=return_upserts,
return_upserts_distinct=return_upserts_distinct, native=native)
def bulk_upsert2(self, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False):
return bulk_upsert2(
self.get_queryset(), model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
def sync(self, model_objs, unique_fields, update_fields=None, native=False):
return sync(self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, native=native)
def sync2(self, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
return sync2(
self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates)
def bulk_update(self, model_objs, fields_to_update):
return bulk_update(self.get_queryset(), model_objs, fields_to_update)
def upsert(self, defaults=None, updates=None, **kwargs):
return upsert(self.get_queryset(), defaults=defaults, updates=updates, **kwargs)
def get_or_none(self, **query_params):
return get_or_none(self.get_queryset(), **query_params)
def single(self):
return single(self.get_queryset())
class ManagerUtilsManager(ManagerUtilsMixin, Manager):
"""
A class that can be used as a manager. It already inherits the Django Manager class and adds
the mixin.
"""
pass
|
ambitioninc/django-manager-utils | manager_utils/manager_utils.py | bulk_upsert | python | def bulk_upsert(
queryset, model_objs, unique_fields, update_fields=None, return_upserts=False, return_upserts_distinct=False,
sync=False, native=False
):
if not unique_fields:
raise ValueError('Must provide unique_fields argument')
update_fields = update_fields or []
if native:
if return_upserts_distinct:
raise NotImplementedError('return upserts distinct not supported with native postgres upsert')
return_value = Query().from_table(table=queryset.model).upsert(
model_objs, unique_fields, update_fields, return_models=return_upserts or sync
) or []
if sync:
orig_ids = frozenset(queryset.values_list('pk', flat=True))
queryset.filter(pk__in=orig_ids - frozenset([m.pk for m in return_value])).delete()
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return return_value
# Create a look up table for all of the objects in the queryset keyed on the unique_fields
extant_model_objs = {
tuple(getattr(extant_model_obj, field) for field in unique_fields): extant_model_obj
for extant_model_obj in queryset
}
# Find all of the objects to update and all of the objects to create
model_objs_to_update, model_objs_to_create = _get_model_objs_to_update_and_create(
model_objs, unique_fields, update_fields, extant_model_objs)
# Find all objects in the queryset that will not be updated. These will be deleted if the sync option is
# True
if sync:
model_objs_to_update_set = frozenset(model_objs_to_update)
model_objs_to_delete = [
model_obj.pk for model_obj in extant_model_objs.values() if model_obj not in model_objs_to_update_set
]
if model_objs_to_delete:
queryset.filter(pk__in=model_objs_to_delete).delete()
# Apply bulk updates and creates
if update_fields:
bulk_update(queryset, model_objs_to_update, update_fields)
queryset.bulk_create(model_objs_to_create)
# Optionally return the bulk upserted values
if return_upserts_distinct:
# return a list of lists, the first being the updated models, the second being the newly created objects
return _get_upserts_distinct(queryset, model_objs_to_update, model_objs_to_create, unique_fields)
if return_upserts:
return _get_upserts(queryset, model_objs_to_update, model_objs_to_create, unique_fields) | Performs a bulk update or insert on a list of model objects. Matches all objects in the queryset
with the objs provided using the field values in unique_fields.
If an existing object is matched, it is updated with the values from the provided objects. Objects
that don't match anything are bulk created.
A user can provide a list update_fields so that any changed values on those fields will be updated.
However, if update_fields is not provided, this function reduces down to performing a bulk_create
on any non extant objects.
:type model_objs: list of dict
:param model_objs: A list of dictionaries that have fields corresponding to the model in the manager.
:type unique_fields: list of str
:param unique_fields: A list of fields that are used to determine if an object in objs matches a model
from the queryset.
:type update_fields: list of str
:param update_fields: A list of fields used from the objects in objs as fields when updating existing
models. If None, this function will only perform a bulk create for model_objs that do not
currently exist in the database.
:type return_upserts_distinct: bool
:param return_upserts_distinct: A flag specifying whether to return the upserted values as a list of distinct lists,
one containing the updated models and the other containing the new models. If True, this performs an
additional query to fetch any bulk created values.
:type return_upserts: bool
:param return_upserts: A flag specifying whether to return the upserted values. If True, this performs
an additional query to fetch any bulk created values.
:type sync: bool
:param sync: A flag specifying whether a sync operation should be applied to the bulk_upsert. If this
is True, all values in the queryset that were not updated will be deleted such that the
entire list of model objects is synced to the queryset.
:type native: bool
:param native: A flag specifying whether to use postgres insert on conflict (upsert).
:signals: Emits a post_bulk_operation when a bulk_update or a bulk_create occurs.
Examples:
.. code-block:: python
# Start off with no objects in the database. Call a bulk_upsert on the TestModel, which includes
# a char_field, int_field, and float_field
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
], ['int_field'], ['char_field'])
# All objects should have been created
print(TestModel.objects.count())
3
# Now perform a bulk upsert on all the char_field values. Since the objects existed previously
# (known by the int_field uniqueness constraint), the char fields should be updated
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='0', int_field=1),
TestModel(float_field=2.0, char_field='0', int_field=2),
TestModel(float_field=3.0, char_field='0', int_field=3),
], ['int_field'], ['char_field'])
# No more new objects should have been created, and every char field should be 0
print(TestModel.objects.count(), TestModel.objects.filter(char_field='-1').count())
3, 3
# Do the exact same operation, but this time add an additional object that is not already
# stored. It will be created.
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be one more object
print(TestModel.objects.count())
4
# Note that one can also do the upsert on a queryset. Perform the same data upsert on a
# filter for int_field=1. In this case, only one object has the ability to be updated.
# All of the other objects will be created
bulk_upsert(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be three more objects
print(TestModel.objects.count())
7 | train | https://github.com/ambitioninc/django-manager-utils/blob/1f111cb4846ed6cd6b78eca320a9dcc27826bf97/manager_utils/manager_utils.py#L115-L263 | [
"def _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields):\n \"\"\"\n Given a list of model objects that were updated and model objects that were created,\n fetch the pks of the newly created models and return the two lists in a tuple\n \"\"\"\n\n # Keep track of the created models\n created_models = []\n\n # If we created new models query for them\n if model_objs_created:\n created_models.extend(\n queryset.extra(\n where=['({unique_fields_sql}) in %s'.format(\n unique_fields_sql=', '.join(unique_fields)\n )],\n params=[\n tuple([\n tuple([\n getattr(model_obj, field)\n for field in unique_fields\n ])\n for model_obj in model_objs_created\n ])\n ]\n )\n )\n\n # Return the models\n return model_objs_updated, created_models\n",
"def _get_upserts(queryset, model_objs_updated, model_objs_created, unique_fields):\n \"\"\"\n Given a list of model objects that were updated and model objects that were created,\n return the list of all model objects upserted. Doing this requires fetching all of\n the models created with bulk create (since django can't return bulk_create pks)\n \"\"\"\n updated, created = _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields)\n return updated + created\n",
"def _get_model_objs_to_update_and_create(model_objs, unique_fields, update_fields, extant_model_objs):\n \"\"\"\n Used by bulk_upsert to gather lists of models that should be updated and created.\n \"\"\"\n\n # Find all of the objects to update and all of the objects to create\n model_objs_to_update, model_objs_to_create = list(), list()\n for model_obj in model_objs:\n extant_model_obj = extant_model_objs.get(tuple(getattr(model_obj, field) for field in unique_fields), None)\n if extant_model_obj is None:\n # If the object needs to be created, make a new instance of it\n model_objs_to_create.append(model_obj)\n else:\n # If the object needs to be updated, update its fields\n for field in update_fields:\n setattr(extant_model_obj, field, getattr(model_obj, field))\n model_objs_to_update.append(extant_model_obj)\n\n return model_objs_to_update, model_objs_to_create\n"
] | import itertools
from django.db import connection
from django.db.models import Manager
from django.db.models.query import QuerySet
from django.dispatch import Signal
from querybuilder.query import Query
from . import upsert2
# A signal that is emitted when any bulk operation occurs
post_bulk_operation = Signal(providing_args=['model'])
def id_dict(queryset):
"""
Returns a dictionary of all the objects keyed on their ID.
:rtype: dict
:returns: A dictionary of objects from the queryset or manager that is keyed
on the objects' IDs.
Examples:
.. code-block:: python
TestModel.objects.create(int_field=1)
TestModel.objects.create(int_field=2)
print(id_dict(TestModel.objects.all()))
"""
return {obj.pk: obj for obj in queryset}
def _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields):
"""
Given a list of model objects that were updated and model objects that were created,
fetch the pks of the newly created models and return the two lists in a tuple
"""
# Keep track of the created models
created_models = []
# If we created new models query for them
if model_objs_created:
created_models.extend(
queryset.extra(
where=['({unique_fields_sql}) in %s'.format(
unique_fields_sql=', '.join(unique_fields)
)],
params=[
tuple([
tuple([
getattr(model_obj, field)
for field in unique_fields
])
for model_obj in model_objs_created
])
]
)
)
# Return the models
return model_objs_updated, created_models
def _get_upserts(queryset, model_objs_updated, model_objs_created, unique_fields):
"""
Given a list of model objects that were updated and model objects that were created,
return the list of all model objects upserted. Doing this requires fetching all of
the models created with bulk create (since django can't return bulk_create pks)
"""
updated, created = _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields)
return updated + created
def _get_model_objs_to_update_and_create(model_objs, unique_fields, update_fields, extant_model_objs):
"""
Used by bulk_upsert to gather lists of models that should be updated and created.
"""
# Find all of the objects to update and all of the objects to create
model_objs_to_update, model_objs_to_create = list(), list()
for model_obj in model_objs:
extant_model_obj = extant_model_objs.get(tuple(getattr(model_obj, field) for field in unique_fields), None)
if extant_model_obj is None:
# If the object needs to be created, make a new instance of it
model_objs_to_create.append(model_obj)
else:
# If the object needs to be updated, update its fields
for field in update_fields:
setattr(extant_model_obj, field, getattr(model_obj, field))
model_objs_to_update.append(extant_model_obj)
return model_objs_to_update, model_objs_to_create
def _get_prepped_model_field(model_obj, field):
"""
Gets the value of a field of a model obj that is prepared for the db.
"""
# Get the field
field = model_obj._meta.get_field(field)
# Get the value
value = field.get_db_prep_save(getattr(model_obj, field.attname), connection)
# Return the value
return value
def bulk_upsert2(
queryset, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False
):
"""
Performs a bulk update or insert on a list of model objects. Matches all objects in the queryset
with the objs provided using the field values in unique_fields.
If an existing object is matched, it is updated with the values from the provided objects. Objects
that don't match anything are bulk created.
A user can provide a list update_fields so that any changed values on those fields will be updated.
However, if update_fields is not provided, this function reduces down to performing a bulk_create
on any non extant objects.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If ``None``, all fields will be updated.
returning (bool|List[str]): If ``True``, returns all fields. If a list, only returns
fields in the list. Return values are split in a tuple of created and updated models
ignore_duplicate_updates (bool, default=False): Ignore updating a row in the upsert if all of the update fields
are duplicates
return_untouched (bool, default=False): Return values that were not touched by the upsert operation
Returns:
UpsertResult: A list of results if ``returning`` is not ``False``. created, updated, and untouched,
results can be obtained by accessing the ``created``, ``updated``, and ``untouched`` properties
of the result.
Examples:
.. code-block:: python
# Start off with no objects in the database. Call a bulk_upsert on the TestModel, which includes
# a char_field, int_field, and float_field
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
], ['int_field'], ['char_field'])
# All objects should have been created
print(TestModel.objects.count())
3
# Now perform a bulk upsert on all the char_field values. Since the objects existed previously
# (known by the int_field uniqueness constraint), the char fields should be updated
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='0', int_field=1),
TestModel(float_field=2.0, char_field='0', int_field=2),
TestModel(float_field=3.0, char_field='0', int_field=3),
], ['int_field'], ['char_field'])
# No more new objects should have been created, and every char field should be 0
print(TestModel.objects.count(), TestModel.objects.filter(char_field='-1').count())
3, 3
# Do the exact same operation, but this time add an additional object that is not already
# stored. It will be created.
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be one more object
print(TestModel.objects.count())
4
# Note that one can also do the upsert on a queryset. Perform the same data upsert on a
# filter for int_field=1. In this case, only one object has the ability to be updated.
# All of the other objects will be created
bulk_upsert2(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be three more objects
print(TestModel.objects.count())
7
# Return creates and updates on the same set of models
created, updated = bulk_upsert2(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# All four objects should be updated
print(len(updated))
4
"""
results = upsert2.upsert(queryset, model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return results
def sync(queryset, model_objs, unique_fields, update_fields=None, **kwargs):
"""
Performs a sync operation on a queryset, making the contents of the
queryset match the contents of model_objs.
This function calls bulk_upsert underneath the hood with sync=True.
:type model_objs: list of :class:`Models<django:django.db.models.Model>`
:param model_objs: The models to sync
:type update_fields: list of str
:param unique_fields: A list of fields that are used to determine if an
object in objs matches a model from the queryset.
:type update_fields: list of str
:param update_fields: A list of fields used from the objects in objs as fields when updating existing
models. If None, this function will only perform a bulk create for model_objs that do not
currently exist in the database.
:type native: bool
:param native: A flag specifying whether to use postgres insert on conflict (upsert) when performing
bulk upsert.
"""
return bulk_upsert(queryset, model_objs, unique_fields, update_fields=update_fields, sync=True, **kwargs)
def sync2(queryset, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
"""
Performs a sync operation on a queryset, making the contents of the
queryset match the contents of model_objs.
Note: The definition of a sync requires that we return untouched rows from the upsert opertion. There is
no way to turn off returning untouched rows in a sync.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If `None`, all fields will be updated.
returning (bool|List[str]): If True, returns all fields. If a list, only returns
fields in the list. Return values are split in a tuple of created, updated, and
deleted models.
ignore_duplicate_updates (bool, default=False): Ignore updating a row in the upsert if all
of the update fields are duplicates
Returns:
UpsertResult: A list of results if ``returning`` is not ``False``. created, updated, untouched,
and deleted results can be obtained by accessing the ``created``, ``updated``, ``untouched``,
and ``deleted`` properties of the result.
"""
results = upsert2.upsert(queryset, model_objs, unique_fields,
update_fields=update_fields, returning=returning, sync=True,
ignore_duplicate_updates=ignore_duplicate_updates)
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return results
def get_or_none(queryset, **query_params):
"""
Get an object or return None if it doesn't exist.
:param query_params: The query parameters used in the lookup.
:returns: A model object if one exists with the query params, None otherwise.
Examples:
.. code-block:: python
model_obj = get_or_none(TestModel.objects, int_field=1)
print(model_obj)
None
TestModel.objects.create(int_field=1)
model_obj = get_or_none(TestModel.objects, int_field=1)
print(model_obj.int_field)
1
"""
try:
obj = queryset.get(**query_params)
except queryset.model.DoesNotExist:
obj = None
return obj
def single(queryset):
"""
Assumes that this model only has one element in the table and returns it.
If the table has more than one or no value, an exception is raised.
:returns: The only model object in the queryset.
:raises: :class:`DoesNotExist <django:django.core.exceptions.ObjectDoesNotExist>`
error when the object does not exist or a
:class:`MultipleObjectsReturned <django:django.core.exceptions.MultipleObjectsReturned>`
error when thereis more than one object.
Examples:
.. code-block:: python
TestModel.objects.create(int_field=1)
model_obj = single(TestModel.objects)
print(model_obj.int_field)
1
"""
return queryset.get()
def bulk_update(manager, model_objs, fields_to_update):
"""
Bulk updates a list of model objects that are already saved.
:type model_objs: list of :class:`Models<django:django.db.models.Model>`
:param model_objs: A list of model objects that have been updated.
fields_to_update: A list of fields to be updated. Only these fields will be updated
:signals: Emits a post_bulk_operation signal when completed.
Examples:
.. code-block:: python
# Create a couple test models
model_obj1 = TestModel.objects.create(int_field=1, float_field=2.0, char_field='Hi')
model_obj2 = TestModel.objects.create(int_field=3, float_field=4.0, char_field='Hello')
# Change their fields and do a bulk update
model_obj1.int_field = 10
model_obj1.float_field = 20.0
model_obj2.int_field = 30
model_obj2.float_field = 40.0
bulk_update(TestModel.objects, [model_obj1, model_obj2], ['int_field', 'float_field'])
# Reload the models and view their changes
model_obj1 = TestModel.objects.get(id=model_obj1.id)
print(model_obj1.int_field, model_obj1.float_field)
10, 20.0
model_obj2 = TestModel.objects.get(id=model_obj2.id)
print(model_obj2.int_field, model_obj2.float_field)
10, 20.0
"""
# Add the pk to the value fields so we can join
value_fields = [manager.model._meta.pk.attname] + fields_to_update
# Build the row values
row_values = [
[_get_prepped_model_field(model_obj, field_name) for field_name in value_fields]
for model_obj in model_objs
]
# If we do not have any values or fields to update just return
if len(row_values) == 0 or len(fields_to_update) == 0:
return
# Create a map of db types
db_types = [
manager.model._meta.get_field(field).db_type(connection)
for field in value_fields
]
# Build the value fields sql
value_fields_sql = ', '.join(
'"{field}"'.format(field=manager.model._meta.get_field(field).column)
for field in value_fields
)
# Build the set sql
update_fields_sql = ', '.join([
'"{field}" = "new_values"."{field}"'.format(
field=manager.model._meta.get_field(field).column
)
for field in fields_to_update
])
# Build the values sql
values_sql = ', '.join([
'({0})'.format(
', '.join([
'%s::{0}'.format(
db_types[i]
) if not row_number and i else '%s'
for i, _ in enumerate(row)
])
)
for row_number, row in enumerate(row_values)
])
# Start building the query
update_sql = (
'UPDATE {table} '
'SET {update_fields_sql} '
'FROM (VALUES {values_sql}) AS new_values ({value_fields_sql}) '
'WHERE "{table}"."{pk_field}" = "new_values"."{pk_field}"'
).format(
table=manager.model._meta.db_table,
pk_field=manager.model._meta.pk.column,
update_fields_sql=update_fields_sql,
values_sql=values_sql,
value_fields_sql=value_fields_sql
)
# Combine all the row values
update_sql_params = list(itertools.chain(*row_values))
# Run the update query
with connection.cursor() as cursor:
cursor.execute(update_sql, update_sql_params)
# call the bulk operation signal
post_bulk_operation.send(sender=manager.model, model=manager.model)
def upsert(manager, defaults=None, updates=None, **kwargs):
"""
Performs an update on an object or an insert if the object does not exist.
:type defaults: dict
:param defaults: These values are set when the object is created, but are irrelevant
when the object already exists. This field should only be used when values only need to
be set during creation.
:type updates: dict
:param updates: These values are updated when the object is updated. They also override any
values provided in the defaults when inserting the object.
:param kwargs: These values provide the arguments used when checking for the existence of
the object. They are used in a similar manner to Django's get_or_create function.
:returns: A tuple of the upserted object and a Boolean that is True if it was created (False otherwise)
Examples:
.. code-block:: python
# Upsert a test model with an int value of 1. Use default values that will be given to it when created
model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 2.0})
print(created)
True
print(model_obj.int_field, model_obj.float_field)
1, 2.0
# Do an upsert on that same model with different default fields. Since it already exists, the defaults
# are not used
model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 3.0})
print(created)
False
print(model_obj.int_field, model_obj.float_field)
1, 2.0
# In order to update the float field in an existing object, use the updates dictionary
model_obj, created = upsert(TestModel.objects, int_field=1, updates={'float_field': 3.0})
print(created)
False
print(model_obj.int_field, model_obj.float_field)
1, 3.0
# You can use updates on a newly created object that will also be used as initial values.
model_obj, created = upsert(TestModel.objects, int_field=2, updates={'float_field': 4.0})
print(created)
True
print(model_obj.int_field, model_obj.float_field)
2, 4.0
"""
defaults = defaults or {}
# Override any defaults with updates
defaults.update(updates or {})
# Do a get or create
obj, created = manager.get_or_create(defaults=defaults, **kwargs)
# Update any necessary fields
if updates is not None and not created and any(getattr(obj, k) != updates[k] for k in updates):
for k, v in updates.items():
setattr(obj, k, v)
obj.save(update_fields=updates)
return obj, created
class ManagerUtilsQuerySet(QuerySet):
"""
Defines the methods in the manager utils that can also be applied to querysets.
"""
def id_dict(self):
return id_dict(self)
def bulk_upsert(self, model_objs, unique_fields, update_fields=None, return_upserts=False, native=False):
return bulk_upsert(
self, model_objs, unique_fields, update_fields=update_fields, return_upserts=return_upserts, native=native
)
def bulk_upsert2(self, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False):
return bulk_upsert2(self, model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
def bulk_create(self, *args, **kwargs):
"""
Overrides Django's bulk_create function to emit a post_bulk_operation signal when bulk_create
is finished.
"""
ret_val = super(ManagerUtilsQuerySet, self).bulk_create(*args, **kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val
def sync(self, model_objs, unique_fields, update_fields=None, native=False):
return sync(self, model_objs, unique_fields, update_fields=update_fields, native=native)
def sync2(self, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
return sync2(self, model_objs, unique_fields, update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates)
def get_or_none(self, **query_params):
return get_or_none(self, **query_params)
def single(self):
return single(self)
def update(self, **kwargs):
"""
Overrides Django's update method to emit a post_bulk_operation signal when it completes.
"""
ret_val = super(ManagerUtilsQuerySet, self).update(**kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val
class ManagerUtilsMixin(object):
"""
A mixin that can be used by django model managers. It provides additional functionality on top
of the regular Django Manager class.
"""
def get_queryset(self):
return ManagerUtilsQuerySet(self.model)
def id_dict(self):
return id_dict(self.get_queryset())
def bulk_upsert(
self, model_objs, unique_fields, update_fields=None, return_upserts=False, return_upserts_distinct=False,
native=False):
return bulk_upsert(
self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, return_upserts=return_upserts,
return_upserts_distinct=return_upserts_distinct, native=native)
def bulk_upsert2(self, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False):
return bulk_upsert2(
self.get_queryset(), model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
def sync(self, model_objs, unique_fields, update_fields=None, native=False):
return sync(self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, native=native)
def sync2(self, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
return sync2(
self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates)
def bulk_update(self, model_objs, fields_to_update):
return bulk_update(self.get_queryset(), model_objs, fields_to_update)
def upsert(self, defaults=None, updates=None, **kwargs):
return upsert(self.get_queryset(), defaults=defaults, updates=updates, **kwargs)
def get_or_none(self, **query_params):
return get_or_none(self.get_queryset(), **query_params)
def single(self):
return single(self.get_queryset())
class ManagerUtilsManager(ManagerUtilsMixin, Manager):
"""
A class that can be used as a manager. It already inherits the Django Manager class and adds
the mixin.
"""
pass
|
ambitioninc/django-manager-utils | manager_utils/manager_utils.py | bulk_upsert2 | python | def bulk_upsert2(
queryset, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False
):
results = upsert2.upsert(queryset, model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return results | Performs a bulk update or insert on a list of model objects. Matches all objects in the queryset
with the objs provided using the field values in unique_fields.
If an existing object is matched, it is updated with the values from the provided objects. Objects
that don't match anything are bulk created.
A user can provide a list update_fields so that any changed values on those fields will be updated.
However, if update_fields is not provided, this function reduces down to performing a bulk_create
on any non extant objects.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If ``None``, all fields will be updated.
returning (bool|List[str]): If ``True``, returns all fields. If a list, only returns
fields in the list. Return values are split in a tuple of created and updated models
ignore_duplicate_updates (bool, default=False): Ignore updating a row in the upsert if all of the update fields
are duplicates
return_untouched (bool, default=False): Return values that were not touched by the upsert operation
Returns:
UpsertResult: A list of results if ``returning`` is not ``False``. created, updated, and untouched,
results can be obtained by accessing the ``created``, ``updated``, and ``untouched`` properties
of the result.
Examples:
.. code-block:: python
# Start off with no objects in the database. Call a bulk_upsert on the TestModel, which includes
# a char_field, int_field, and float_field
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
], ['int_field'], ['char_field'])
# All objects should have been created
print(TestModel.objects.count())
3
# Now perform a bulk upsert on all the char_field values. Since the objects existed previously
# (known by the int_field uniqueness constraint), the char fields should be updated
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='0', int_field=1),
TestModel(float_field=2.0, char_field='0', int_field=2),
TestModel(float_field=3.0, char_field='0', int_field=3),
], ['int_field'], ['char_field'])
# No more new objects should have been created, and every char field should be 0
print(TestModel.objects.count(), TestModel.objects.filter(char_field='-1').count())
3, 3
# Do the exact same operation, but this time add an additional object that is not already
# stored. It will be created.
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be one more object
print(TestModel.objects.count())
4
# Note that one can also do the upsert on a queryset. Perform the same data upsert on a
# filter for int_field=1. In this case, only one object has the ability to be updated.
# All of the other objects will be created
bulk_upsert2(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be three more objects
print(TestModel.objects.count())
7
# Return creates and updates on the same set of models
created, updated = bulk_upsert2(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# All four objects should be updated
print(len(updated))
4 | train | https://github.com/ambitioninc/django-manager-utils/blob/1f111cb4846ed6cd6b78eca320a9dcc27826bf97/manager_utils/manager_utils.py#L266-L372 | [
"def upsert(\n queryset, model_objs, unique_fields,\n update_fields=None, returning=False, sync=False,\n ignore_duplicate_updates=True,\n return_untouched=False\n):\n \"\"\"\n Perform a bulk upsert on a table, optionally syncing the results.\n\n Args:\n queryset (Model|QuerySet): A model or a queryset that defines the collection to sync\n model_objs (List[Model]): A list of Django models to sync. All models in this list\n will be bulk upserted and any models not in the table (or queryset) will be deleted\n if sync=True.\n unique_fields (List[str]): A list of fields that define the uniqueness of the model. The\n model must have a unique constraint on these fields\n update_fields (List[str], default=None): A list of fields to update whenever objects\n already exist. If an empty list is provided, it is equivalent to doing a bulk\n insert on the objects that don't exist. If `None`, all fields will be updated.\n returning (bool|List[str]): If True, returns all fields. If a list, only returns\n fields in the list\n sync (bool, default=False): Perform a sync operation on the queryset\n ignore_duplicate_updates (bool, default=False): Don't perform an update if the row is\n a duplicate.\n return_untouched (bool, default=False): Return untouched rows by the operation\n \"\"\"\n queryset = queryset if isinstance(queryset, models.QuerySet) else queryset.objects.all()\n model = queryset.model\n\n # Populate automatically generated fields in the rows like date times\n _fill_auto_fields(model, model_objs)\n\n # Sort the rows to reduce the chances of deadlock during concurrent upserts\n model_objs = _sort_by_unique_fields(model, model_objs, unique_fields)\n update_fields = _get_update_fields(model, unique_fields, update_fields)\n\n return _fetch(queryset, model_objs, unique_fields, update_fields, returning, sync,\n ignore_duplicate_updates=ignore_duplicate_updates,\n return_untouched=return_untouched)\n"
] | import itertools
from django.db import connection
from django.db.models import Manager
from django.db.models.query import QuerySet
from django.dispatch import Signal
from querybuilder.query import Query
from . import upsert2
# A signal that is emitted when any bulk operation occurs
post_bulk_operation = Signal(providing_args=['model'])
def id_dict(queryset):
"""
Returns a dictionary of all the objects keyed on their ID.
:rtype: dict
:returns: A dictionary of objects from the queryset or manager that is keyed
on the objects' IDs.
Examples:
.. code-block:: python
TestModel.objects.create(int_field=1)
TestModel.objects.create(int_field=2)
print(id_dict(TestModel.objects.all()))
"""
return {obj.pk: obj for obj in queryset}
def _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields):
"""
Given a list of model objects that were updated and model objects that were created,
fetch the pks of the newly created models and return the two lists in a tuple
"""
# Keep track of the created models
created_models = []
# If we created new models query for them
if model_objs_created:
created_models.extend(
queryset.extra(
where=['({unique_fields_sql}) in %s'.format(
unique_fields_sql=', '.join(unique_fields)
)],
params=[
tuple([
tuple([
getattr(model_obj, field)
for field in unique_fields
])
for model_obj in model_objs_created
])
]
)
)
# Return the models
return model_objs_updated, created_models
def _get_upserts(queryset, model_objs_updated, model_objs_created, unique_fields):
"""
Given a list of model objects that were updated and model objects that were created,
return the list of all model objects upserted. Doing this requires fetching all of
the models created with bulk create (since django can't return bulk_create pks)
"""
updated, created = _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields)
return updated + created
def _get_model_objs_to_update_and_create(model_objs, unique_fields, update_fields, extant_model_objs):
"""
Used by bulk_upsert to gather lists of models that should be updated and created.
"""
# Find all of the objects to update and all of the objects to create
model_objs_to_update, model_objs_to_create = list(), list()
for model_obj in model_objs:
extant_model_obj = extant_model_objs.get(tuple(getattr(model_obj, field) for field in unique_fields), None)
if extant_model_obj is None:
# If the object needs to be created, make a new instance of it
model_objs_to_create.append(model_obj)
else:
# If the object needs to be updated, update its fields
for field in update_fields:
setattr(extant_model_obj, field, getattr(model_obj, field))
model_objs_to_update.append(extant_model_obj)
return model_objs_to_update, model_objs_to_create
def _get_prepped_model_field(model_obj, field):
"""
Gets the value of a field of a model obj that is prepared for the db.
"""
# Get the field
field = model_obj._meta.get_field(field)
# Get the value
value = field.get_db_prep_save(getattr(model_obj, field.attname), connection)
# Return the value
return value
def bulk_upsert(
queryset, model_objs, unique_fields, update_fields=None, return_upserts=False, return_upserts_distinct=False,
sync=False, native=False
):
"""
Performs a bulk update or insert on a list of model objects. Matches all objects in the queryset
with the objs provided using the field values in unique_fields.
If an existing object is matched, it is updated with the values from the provided objects. Objects
that don't match anything are bulk created.
A user can provide a list update_fields so that any changed values on those fields will be updated.
However, if update_fields is not provided, this function reduces down to performing a bulk_create
on any non extant objects.
:type model_objs: list of dict
:param model_objs: A list of dictionaries that have fields corresponding to the model in the manager.
:type unique_fields: list of str
:param unique_fields: A list of fields that are used to determine if an object in objs matches a model
from the queryset.
:type update_fields: list of str
:param update_fields: A list of fields used from the objects in objs as fields when updating existing
models. If None, this function will only perform a bulk create for model_objs that do not
currently exist in the database.
:type return_upserts_distinct: bool
:param return_upserts_distinct: A flag specifying whether to return the upserted values as a list of distinct lists,
one containing the updated models and the other containing the new models. If True, this performs an
additional query to fetch any bulk created values.
:type return_upserts: bool
:param return_upserts: A flag specifying whether to return the upserted values. If True, this performs
an additional query to fetch any bulk created values.
:type sync: bool
:param sync: A flag specifying whether a sync operation should be applied to the bulk_upsert. If this
is True, all values in the queryset that were not updated will be deleted such that the
entire list of model objects is synced to the queryset.
:type native: bool
:param native: A flag specifying whether to use postgres insert on conflict (upsert).
:signals: Emits a post_bulk_operation when a bulk_update or a bulk_create occurs.
Examples:
.. code-block:: python
# Start off with no objects in the database. Call a bulk_upsert on the TestModel, which includes
# a char_field, int_field, and float_field
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
], ['int_field'], ['char_field'])
# All objects should have been created
print(TestModel.objects.count())
3
# Now perform a bulk upsert on all the char_field values. Since the objects existed previously
# (known by the int_field uniqueness constraint), the char fields should be updated
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='0', int_field=1),
TestModel(float_field=2.0, char_field='0', int_field=2),
TestModel(float_field=3.0, char_field='0', int_field=3),
], ['int_field'], ['char_field'])
# No more new objects should have been created, and every char field should be 0
print(TestModel.objects.count(), TestModel.objects.filter(char_field='-1').count())
3, 3
# Do the exact same operation, but this time add an additional object that is not already
# stored. It will be created.
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be one more object
print(TestModel.objects.count())
4
# Note that one can also do the upsert on a queryset. Perform the same data upsert on a
# filter for int_field=1. In this case, only one object has the ability to be updated.
# All of the other objects will be created
bulk_upsert(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be three more objects
print(TestModel.objects.count())
7
"""
if not unique_fields:
raise ValueError('Must provide unique_fields argument')
update_fields = update_fields or []
if native:
if return_upserts_distinct:
raise NotImplementedError('return upserts distinct not supported with native postgres upsert')
return_value = Query().from_table(table=queryset.model).upsert(
model_objs, unique_fields, update_fields, return_models=return_upserts or sync
) or []
if sync:
orig_ids = frozenset(queryset.values_list('pk', flat=True))
queryset.filter(pk__in=orig_ids - frozenset([m.pk for m in return_value])).delete()
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return return_value
# Create a look up table for all of the objects in the queryset keyed on the unique_fields
extant_model_objs = {
tuple(getattr(extant_model_obj, field) for field in unique_fields): extant_model_obj
for extant_model_obj in queryset
}
# Find all of the objects to update and all of the objects to create
model_objs_to_update, model_objs_to_create = _get_model_objs_to_update_and_create(
model_objs, unique_fields, update_fields, extant_model_objs)
# Find all objects in the queryset that will not be updated. These will be deleted if the sync option is
# True
if sync:
model_objs_to_update_set = frozenset(model_objs_to_update)
model_objs_to_delete = [
model_obj.pk for model_obj in extant_model_objs.values() if model_obj not in model_objs_to_update_set
]
if model_objs_to_delete:
queryset.filter(pk__in=model_objs_to_delete).delete()
# Apply bulk updates and creates
if update_fields:
bulk_update(queryset, model_objs_to_update, update_fields)
queryset.bulk_create(model_objs_to_create)
# Optionally return the bulk upserted values
if return_upserts_distinct:
# return a list of lists, the first being the updated models, the second being the newly created objects
return _get_upserts_distinct(queryset, model_objs_to_update, model_objs_to_create, unique_fields)
if return_upserts:
return _get_upserts(queryset, model_objs_to_update, model_objs_to_create, unique_fields)
def sync(queryset, model_objs, unique_fields, update_fields=None, **kwargs):
"""
Performs a sync operation on a queryset, making the contents of the
queryset match the contents of model_objs.
This function calls bulk_upsert underneath the hood with sync=True.
:type model_objs: list of :class:`Models<django:django.db.models.Model>`
:param model_objs: The models to sync
:type update_fields: list of str
:param unique_fields: A list of fields that are used to determine if an
object in objs matches a model from the queryset.
:type update_fields: list of str
:param update_fields: A list of fields used from the objects in objs as fields when updating existing
models. If None, this function will only perform a bulk create for model_objs that do not
currently exist in the database.
:type native: bool
:param native: A flag specifying whether to use postgres insert on conflict (upsert) when performing
bulk upsert.
"""
return bulk_upsert(queryset, model_objs, unique_fields, update_fields=update_fields, sync=True, **kwargs)
def sync2(queryset, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
"""
Performs a sync operation on a queryset, making the contents of the
queryset match the contents of model_objs.
Note: The definition of a sync requires that we return untouched rows from the upsert opertion. There is
no way to turn off returning untouched rows in a sync.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If `None`, all fields will be updated.
returning (bool|List[str]): If True, returns all fields. If a list, only returns
fields in the list. Return values are split in a tuple of created, updated, and
deleted models.
ignore_duplicate_updates (bool, default=False): Ignore updating a row in the upsert if all
of the update fields are duplicates
Returns:
UpsertResult: A list of results if ``returning`` is not ``False``. created, updated, untouched,
and deleted results can be obtained by accessing the ``created``, ``updated``, ``untouched``,
and ``deleted`` properties of the result.
"""
results = upsert2.upsert(queryset, model_objs, unique_fields,
update_fields=update_fields, returning=returning, sync=True,
ignore_duplicate_updates=ignore_duplicate_updates)
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return results
def get_or_none(queryset, **query_params):
"""
Get an object or return None if it doesn't exist.
:param query_params: The query parameters used in the lookup.
:returns: A model object if one exists with the query params, None otherwise.
Examples:
.. code-block:: python
model_obj = get_or_none(TestModel.objects, int_field=1)
print(model_obj)
None
TestModel.objects.create(int_field=1)
model_obj = get_or_none(TestModel.objects, int_field=1)
print(model_obj.int_field)
1
"""
try:
obj = queryset.get(**query_params)
except queryset.model.DoesNotExist:
obj = None
return obj
def single(queryset):
"""
Assumes that this model only has one element in the table and returns it.
If the table has more than one or no value, an exception is raised.
:returns: The only model object in the queryset.
:raises: :class:`DoesNotExist <django:django.core.exceptions.ObjectDoesNotExist>`
error when the object does not exist or a
:class:`MultipleObjectsReturned <django:django.core.exceptions.MultipleObjectsReturned>`
error when thereis more than one object.
Examples:
.. code-block:: python
TestModel.objects.create(int_field=1)
model_obj = single(TestModel.objects)
print(model_obj.int_field)
1
"""
return queryset.get()
def bulk_update(manager, model_objs, fields_to_update):
"""
Bulk updates a list of model objects that are already saved.
:type model_objs: list of :class:`Models<django:django.db.models.Model>`
:param model_objs: A list of model objects that have been updated.
fields_to_update: A list of fields to be updated. Only these fields will be updated
:signals: Emits a post_bulk_operation signal when completed.
Examples:
.. code-block:: python
# Create a couple test models
model_obj1 = TestModel.objects.create(int_field=1, float_field=2.0, char_field='Hi')
model_obj2 = TestModel.objects.create(int_field=3, float_field=4.0, char_field='Hello')
# Change their fields and do a bulk update
model_obj1.int_field = 10
model_obj1.float_field = 20.0
model_obj2.int_field = 30
model_obj2.float_field = 40.0
bulk_update(TestModel.objects, [model_obj1, model_obj2], ['int_field', 'float_field'])
# Reload the models and view their changes
model_obj1 = TestModel.objects.get(id=model_obj1.id)
print(model_obj1.int_field, model_obj1.float_field)
10, 20.0
model_obj2 = TestModel.objects.get(id=model_obj2.id)
print(model_obj2.int_field, model_obj2.float_field)
10, 20.0
"""
# Add the pk to the value fields so we can join
value_fields = [manager.model._meta.pk.attname] + fields_to_update
# Build the row values
row_values = [
[_get_prepped_model_field(model_obj, field_name) for field_name in value_fields]
for model_obj in model_objs
]
# If we do not have any values or fields to update just return
if len(row_values) == 0 or len(fields_to_update) == 0:
return
# Create a map of db types
db_types = [
manager.model._meta.get_field(field).db_type(connection)
for field in value_fields
]
# Build the value fields sql
value_fields_sql = ', '.join(
'"{field}"'.format(field=manager.model._meta.get_field(field).column)
for field in value_fields
)
# Build the set sql
update_fields_sql = ', '.join([
'"{field}" = "new_values"."{field}"'.format(
field=manager.model._meta.get_field(field).column
)
for field in fields_to_update
])
# Build the values sql
values_sql = ', '.join([
'({0})'.format(
', '.join([
'%s::{0}'.format(
db_types[i]
) if not row_number and i else '%s'
for i, _ in enumerate(row)
])
)
for row_number, row in enumerate(row_values)
])
# Start building the query
update_sql = (
'UPDATE {table} '
'SET {update_fields_sql} '
'FROM (VALUES {values_sql}) AS new_values ({value_fields_sql}) '
'WHERE "{table}"."{pk_field}" = "new_values"."{pk_field}"'
).format(
table=manager.model._meta.db_table,
pk_field=manager.model._meta.pk.column,
update_fields_sql=update_fields_sql,
values_sql=values_sql,
value_fields_sql=value_fields_sql
)
# Combine all the row values
update_sql_params = list(itertools.chain(*row_values))
# Run the update query
with connection.cursor() as cursor:
cursor.execute(update_sql, update_sql_params)
# call the bulk operation signal
post_bulk_operation.send(sender=manager.model, model=manager.model)
def upsert(manager, defaults=None, updates=None, **kwargs):
"""
Performs an update on an object or an insert if the object does not exist.
:type defaults: dict
:param defaults: These values are set when the object is created, but are irrelevant
when the object already exists. This field should only be used when values only need to
be set during creation.
:type updates: dict
:param updates: These values are updated when the object is updated. They also override any
values provided in the defaults when inserting the object.
:param kwargs: These values provide the arguments used when checking for the existence of
the object. They are used in a similar manner to Django's get_or_create function.
:returns: A tuple of the upserted object and a Boolean that is True if it was created (False otherwise)
Examples:
.. code-block:: python
# Upsert a test model with an int value of 1. Use default values that will be given to it when created
model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 2.0})
print(created)
True
print(model_obj.int_field, model_obj.float_field)
1, 2.0
# Do an upsert on that same model with different default fields. Since it already exists, the defaults
# are not used
model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 3.0})
print(created)
False
print(model_obj.int_field, model_obj.float_field)
1, 2.0
# In order to update the float field in an existing object, use the updates dictionary
model_obj, created = upsert(TestModel.objects, int_field=1, updates={'float_field': 3.0})
print(created)
False
print(model_obj.int_field, model_obj.float_field)
1, 3.0
# You can use updates on a newly created object that will also be used as initial values.
model_obj, created = upsert(TestModel.objects, int_field=2, updates={'float_field': 4.0})
print(created)
True
print(model_obj.int_field, model_obj.float_field)
2, 4.0
"""
defaults = defaults or {}
# Override any defaults with updates
defaults.update(updates or {})
# Do a get or create
obj, created = manager.get_or_create(defaults=defaults, **kwargs)
# Update any necessary fields
if updates is not None and not created and any(getattr(obj, k) != updates[k] for k in updates):
for k, v in updates.items():
setattr(obj, k, v)
obj.save(update_fields=updates)
return obj, created
class ManagerUtilsQuerySet(QuerySet):
"""
Defines the methods in the manager utils that can also be applied to querysets.
"""
def id_dict(self):
return id_dict(self)
def bulk_upsert(self, model_objs, unique_fields, update_fields=None, return_upserts=False, native=False):
return bulk_upsert(
self, model_objs, unique_fields, update_fields=update_fields, return_upserts=return_upserts, native=native
)
def bulk_upsert2(self, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False):
return bulk_upsert2(self, model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
def bulk_create(self, *args, **kwargs):
"""
Overrides Django's bulk_create function to emit a post_bulk_operation signal when bulk_create
is finished.
"""
ret_val = super(ManagerUtilsQuerySet, self).bulk_create(*args, **kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val
def sync(self, model_objs, unique_fields, update_fields=None, native=False):
return sync(self, model_objs, unique_fields, update_fields=update_fields, native=native)
def sync2(self, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
return sync2(self, model_objs, unique_fields, update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates)
def get_or_none(self, **query_params):
return get_or_none(self, **query_params)
def single(self):
return single(self)
def update(self, **kwargs):
"""
Overrides Django's update method to emit a post_bulk_operation signal when it completes.
"""
ret_val = super(ManagerUtilsQuerySet, self).update(**kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val
class ManagerUtilsMixin(object):
"""
A mixin that can be used by django model managers. It provides additional functionality on top
of the regular Django Manager class.
"""
def get_queryset(self):
return ManagerUtilsQuerySet(self.model)
def id_dict(self):
return id_dict(self.get_queryset())
def bulk_upsert(
self, model_objs, unique_fields, update_fields=None, return_upserts=False, return_upserts_distinct=False,
native=False):
return bulk_upsert(
self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, return_upserts=return_upserts,
return_upserts_distinct=return_upserts_distinct, native=native)
def bulk_upsert2(self, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False):
return bulk_upsert2(
self.get_queryset(), model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
def sync(self, model_objs, unique_fields, update_fields=None, native=False):
return sync(self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, native=native)
def sync2(self, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
return sync2(
self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates)
def bulk_update(self, model_objs, fields_to_update):
return bulk_update(self.get_queryset(), model_objs, fields_to_update)
def upsert(self, defaults=None, updates=None, **kwargs):
return upsert(self.get_queryset(), defaults=defaults, updates=updates, **kwargs)
def get_or_none(self, **query_params):
return get_or_none(self.get_queryset(), **query_params)
def single(self):
return single(self.get_queryset())
class ManagerUtilsManager(ManagerUtilsMixin, Manager):
"""
A class that can be used as a manager. It already inherits the Django Manager class and adds
the mixin.
"""
pass
|
ambitioninc/django-manager-utils | manager_utils/manager_utils.py | sync | python | def sync(queryset, model_objs, unique_fields, update_fields=None, **kwargs):
return bulk_upsert(queryset, model_objs, unique_fields, update_fields=update_fields, sync=True, **kwargs) | Performs a sync operation on a queryset, making the contents of the
queryset match the contents of model_objs.
This function calls bulk_upsert underneath the hood with sync=True.
:type model_objs: list of :class:`Models<django:django.db.models.Model>`
:param model_objs: The models to sync
:type update_fields: list of str
:param unique_fields: A list of fields that are used to determine if an
object in objs matches a model from the queryset.
:type update_fields: list of str
:param update_fields: A list of fields used from the objects in objs as fields when updating existing
models. If None, this function will only perform a bulk create for model_objs that do not
currently exist in the database.
:type native: bool
:param native: A flag specifying whether to use postgres insert on conflict (upsert) when performing
bulk upsert. | train | https://github.com/ambitioninc/django-manager-utils/blob/1f111cb4846ed6cd6b78eca320a9dcc27826bf97/manager_utils/manager_utils.py#L375-L398 | [
"def bulk_upsert(\n queryset, model_objs, unique_fields, update_fields=None, return_upserts=False, return_upserts_distinct=False,\n sync=False, native=False\n):\n \"\"\"\n Performs a bulk update or insert on a list of model objects. Matches all objects in the queryset\n with the objs provided using the field values in unique_fields.\n If an existing object is matched, it is updated with the values from the provided objects. Objects\n that don't match anything are bulk created.\n A user can provide a list update_fields so that any changed values on those fields will be updated.\n However, if update_fields is not provided, this function reduces down to performing a bulk_create\n on any non extant objects.\n\n :type model_objs: list of dict\n :param model_objs: A list of dictionaries that have fields corresponding to the model in the manager.\n\n :type unique_fields: list of str\n :param unique_fields: A list of fields that are used to determine if an object in objs matches a model\n from the queryset.\n\n :type update_fields: list of str\n :param update_fields: A list of fields used from the objects in objs as fields when updating existing\n models. If None, this function will only perform a bulk create for model_objs that do not\n currently exist in the database.\n\n :type return_upserts_distinct: bool\n :param return_upserts_distinct: A flag specifying whether to return the upserted values as a list of distinct lists,\n one containing the updated models and the other containing the new models. If True, this performs an\n additional query to fetch any bulk created values.\n\n :type return_upserts: bool\n :param return_upserts: A flag specifying whether to return the upserted values. If True, this performs\n an additional query to fetch any bulk created values.\n\n :type sync: bool\n :param sync: A flag specifying whether a sync operation should be applied to the bulk_upsert. If this\n is True, all values in the queryset that were not updated will be deleted such that the\n entire list of model objects is synced to the queryset.\n\n :type native: bool\n :param native: A flag specifying whether to use postgres insert on conflict (upsert).\n\n :signals: Emits a post_bulk_operation when a bulk_update or a bulk_create occurs.\n\n Examples:\n\n .. code-block:: python\n\n # Start off with no objects in the database. Call a bulk_upsert on the TestModel, which includes\n # a char_field, int_field, and float_field\n bulk_upsert(TestModel.objects.all(), [\n TestModel(float_field=1.0, char_field='1', int_field=1),\n TestModel(float_field=2.0, char_field='2', int_field=2),\n TestModel(float_field=3.0, char_field='3', int_field=3),\n ], ['int_field'], ['char_field'])\n\n # All objects should have been created\n print(TestModel.objects.count())\n 3\n\n # Now perform a bulk upsert on all the char_field values. Since the objects existed previously\n # (known by the int_field uniqueness constraint), the char fields should be updated\n bulk_upsert(TestModel.objects.all(), [\n TestModel(float_field=1.0, char_field='0', int_field=1),\n TestModel(float_field=2.0, char_field='0', int_field=2),\n TestModel(float_field=3.0, char_field='0', int_field=3),\n ], ['int_field'], ['char_field'])\n\n # No more new objects should have been created, and every char field should be 0\n print(TestModel.objects.count(), TestModel.objects.filter(char_field='-1').count())\n 3, 3\n\n # Do the exact same operation, but this time add an additional object that is not already\n # stored. It will be created.\n bulk_upsert(TestModel.objects.all(), [\n TestModel(float_field=1.0, char_field='1', int_field=1),\n TestModel(float_field=2.0, char_field='2', int_field=2),\n TestModel(float_field=3.0, char_field='3', int_field=3),\n TestModel(float_field=4.0, char_field='4', int_field=4),\n ], ['int_field'], ['char_field'])\n\n # There should be one more object\n print(TestModel.objects.count())\n 4\n\n # Note that one can also do the upsert on a queryset. Perform the same data upsert on a\n # filter for int_field=1. In this case, only one object has the ability to be updated.\n # All of the other objects will be created\n bulk_upsert(TestModel.objects.filter(int_field=1), [\n TestModel(float_field=1.0, char_field='1', int_field=1),\n TestModel(float_field=2.0, char_field='2', int_field=2),\n TestModel(float_field=3.0, char_field='3', int_field=3),\n TestModel(float_field=4.0, char_field='4', int_field=4),\n ], ['int_field'], ['char_field'])\n\n # There should be three more objects\n print(TestModel.objects.count())\n 7\n\n \"\"\"\n if not unique_fields:\n raise ValueError('Must provide unique_fields argument')\n update_fields = update_fields or []\n\n if native:\n if return_upserts_distinct:\n raise NotImplementedError('return upserts distinct not supported with native postgres upsert')\n return_value = Query().from_table(table=queryset.model).upsert(\n model_objs, unique_fields, update_fields, return_models=return_upserts or sync\n ) or []\n if sync:\n orig_ids = frozenset(queryset.values_list('pk', flat=True))\n queryset.filter(pk__in=orig_ids - frozenset([m.pk for m in return_value])).delete()\n\n post_bulk_operation.send(sender=queryset.model, model=queryset.model)\n\n return return_value\n\n # Create a look up table for all of the objects in the queryset keyed on the unique_fields\n extant_model_objs = {\n tuple(getattr(extant_model_obj, field) for field in unique_fields): extant_model_obj\n for extant_model_obj in queryset\n }\n\n # Find all of the objects to update and all of the objects to create\n model_objs_to_update, model_objs_to_create = _get_model_objs_to_update_and_create(\n model_objs, unique_fields, update_fields, extant_model_objs)\n\n # Find all objects in the queryset that will not be updated. These will be deleted if the sync option is\n # True\n if sync:\n model_objs_to_update_set = frozenset(model_objs_to_update)\n model_objs_to_delete = [\n model_obj.pk for model_obj in extant_model_objs.values() if model_obj not in model_objs_to_update_set\n ]\n if model_objs_to_delete:\n queryset.filter(pk__in=model_objs_to_delete).delete()\n\n # Apply bulk updates and creates\n if update_fields:\n bulk_update(queryset, model_objs_to_update, update_fields)\n queryset.bulk_create(model_objs_to_create)\n\n # Optionally return the bulk upserted values\n if return_upserts_distinct:\n # return a list of lists, the first being the updated models, the second being the newly created objects\n return _get_upserts_distinct(queryset, model_objs_to_update, model_objs_to_create, unique_fields)\n if return_upserts:\n return _get_upserts(queryset, model_objs_to_update, model_objs_to_create, unique_fields)\n"
] | import itertools
from django.db import connection
from django.db.models import Manager
from django.db.models.query import QuerySet
from django.dispatch import Signal
from querybuilder.query import Query
from . import upsert2
# A signal that is emitted when any bulk operation occurs
post_bulk_operation = Signal(providing_args=['model'])
def id_dict(queryset):
"""
Returns a dictionary of all the objects keyed on their ID.
:rtype: dict
:returns: A dictionary of objects from the queryset or manager that is keyed
on the objects' IDs.
Examples:
.. code-block:: python
TestModel.objects.create(int_field=1)
TestModel.objects.create(int_field=2)
print(id_dict(TestModel.objects.all()))
"""
return {obj.pk: obj for obj in queryset}
def _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields):
"""
Given a list of model objects that were updated and model objects that were created,
fetch the pks of the newly created models and return the two lists in a tuple
"""
# Keep track of the created models
created_models = []
# If we created new models query for them
if model_objs_created:
created_models.extend(
queryset.extra(
where=['({unique_fields_sql}) in %s'.format(
unique_fields_sql=', '.join(unique_fields)
)],
params=[
tuple([
tuple([
getattr(model_obj, field)
for field in unique_fields
])
for model_obj in model_objs_created
])
]
)
)
# Return the models
return model_objs_updated, created_models
def _get_upserts(queryset, model_objs_updated, model_objs_created, unique_fields):
"""
Given a list of model objects that were updated and model objects that were created,
return the list of all model objects upserted. Doing this requires fetching all of
the models created with bulk create (since django can't return bulk_create pks)
"""
updated, created = _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields)
return updated + created
def _get_model_objs_to_update_and_create(model_objs, unique_fields, update_fields, extant_model_objs):
"""
Used by bulk_upsert to gather lists of models that should be updated and created.
"""
# Find all of the objects to update and all of the objects to create
model_objs_to_update, model_objs_to_create = list(), list()
for model_obj in model_objs:
extant_model_obj = extant_model_objs.get(tuple(getattr(model_obj, field) for field in unique_fields), None)
if extant_model_obj is None:
# If the object needs to be created, make a new instance of it
model_objs_to_create.append(model_obj)
else:
# If the object needs to be updated, update its fields
for field in update_fields:
setattr(extant_model_obj, field, getattr(model_obj, field))
model_objs_to_update.append(extant_model_obj)
return model_objs_to_update, model_objs_to_create
def _get_prepped_model_field(model_obj, field):
"""
Gets the value of a field of a model obj that is prepared for the db.
"""
# Get the field
field = model_obj._meta.get_field(field)
# Get the value
value = field.get_db_prep_save(getattr(model_obj, field.attname), connection)
# Return the value
return value
def bulk_upsert(
queryset, model_objs, unique_fields, update_fields=None, return_upserts=False, return_upserts_distinct=False,
sync=False, native=False
):
"""
Performs a bulk update or insert on a list of model objects. Matches all objects in the queryset
with the objs provided using the field values in unique_fields.
If an existing object is matched, it is updated with the values from the provided objects. Objects
that don't match anything are bulk created.
A user can provide a list update_fields so that any changed values on those fields will be updated.
However, if update_fields is not provided, this function reduces down to performing a bulk_create
on any non extant objects.
:type model_objs: list of dict
:param model_objs: A list of dictionaries that have fields corresponding to the model in the manager.
:type unique_fields: list of str
:param unique_fields: A list of fields that are used to determine if an object in objs matches a model
from the queryset.
:type update_fields: list of str
:param update_fields: A list of fields used from the objects in objs as fields when updating existing
models. If None, this function will only perform a bulk create for model_objs that do not
currently exist in the database.
:type return_upserts_distinct: bool
:param return_upserts_distinct: A flag specifying whether to return the upserted values as a list of distinct lists,
one containing the updated models and the other containing the new models. If True, this performs an
additional query to fetch any bulk created values.
:type return_upserts: bool
:param return_upserts: A flag specifying whether to return the upserted values. If True, this performs
an additional query to fetch any bulk created values.
:type sync: bool
:param sync: A flag specifying whether a sync operation should be applied to the bulk_upsert. If this
is True, all values in the queryset that were not updated will be deleted such that the
entire list of model objects is synced to the queryset.
:type native: bool
:param native: A flag specifying whether to use postgres insert on conflict (upsert).
:signals: Emits a post_bulk_operation when a bulk_update or a bulk_create occurs.
Examples:
.. code-block:: python
# Start off with no objects in the database. Call a bulk_upsert on the TestModel, which includes
# a char_field, int_field, and float_field
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
], ['int_field'], ['char_field'])
# All objects should have been created
print(TestModel.objects.count())
3
# Now perform a bulk upsert on all the char_field values. Since the objects existed previously
# (known by the int_field uniqueness constraint), the char fields should be updated
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='0', int_field=1),
TestModel(float_field=2.0, char_field='0', int_field=2),
TestModel(float_field=3.0, char_field='0', int_field=3),
], ['int_field'], ['char_field'])
# No more new objects should have been created, and every char field should be 0
print(TestModel.objects.count(), TestModel.objects.filter(char_field='-1').count())
3, 3
# Do the exact same operation, but this time add an additional object that is not already
# stored. It will be created.
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be one more object
print(TestModel.objects.count())
4
# Note that one can also do the upsert on a queryset. Perform the same data upsert on a
# filter for int_field=1. In this case, only one object has the ability to be updated.
# All of the other objects will be created
bulk_upsert(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be three more objects
print(TestModel.objects.count())
7
"""
if not unique_fields:
raise ValueError('Must provide unique_fields argument')
update_fields = update_fields or []
if native:
if return_upserts_distinct:
raise NotImplementedError('return upserts distinct not supported with native postgres upsert')
return_value = Query().from_table(table=queryset.model).upsert(
model_objs, unique_fields, update_fields, return_models=return_upserts or sync
) or []
if sync:
orig_ids = frozenset(queryset.values_list('pk', flat=True))
queryset.filter(pk__in=orig_ids - frozenset([m.pk for m in return_value])).delete()
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return return_value
# Create a look up table for all of the objects in the queryset keyed on the unique_fields
extant_model_objs = {
tuple(getattr(extant_model_obj, field) for field in unique_fields): extant_model_obj
for extant_model_obj in queryset
}
# Find all of the objects to update and all of the objects to create
model_objs_to_update, model_objs_to_create = _get_model_objs_to_update_and_create(
model_objs, unique_fields, update_fields, extant_model_objs)
# Find all objects in the queryset that will not be updated. These will be deleted if the sync option is
# True
if sync:
model_objs_to_update_set = frozenset(model_objs_to_update)
model_objs_to_delete = [
model_obj.pk for model_obj in extant_model_objs.values() if model_obj not in model_objs_to_update_set
]
if model_objs_to_delete:
queryset.filter(pk__in=model_objs_to_delete).delete()
# Apply bulk updates and creates
if update_fields:
bulk_update(queryset, model_objs_to_update, update_fields)
queryset.bulk_create(model_objs_to_create)
# Optionally return the bulk upserted values
if return_upserts_distinct:
# return a list of lists, the first being the updated models, the second being the newly created objects
return _get_upserts_distinct(queryset, model_objs_to_update, model_objs_to_create, unique_fields)
if return_upserts:
return _get_upserts(queryset, model_objs_to_update, model_objs_to_create, unique_fields)
def bulk_upsert2(
queryset, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False
):
"""
Performs a bulk update or insert on a list of model objects. Matches all objects in the queryset
with the objs provided using the field values in unique_fields.
If an existing object is matched, it is updated with the values from the provided objects. Objects
that don't match anything are bulk created.
A user can provide a list update_fields so that any changed values on those fields will be updated.
However, if update_fields is not provided, this function reduces down to performing a bulk_create
on any non extant objects.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If ``None``, all fields will be updated.
returning (bool|List[str]): If ``True``, returns all fields. If a list, only returns
fields in the list. Return values are split in a tuple of created and updated models
ignore_duplicate_updates (bool, default=False): Ignore updating a row in the upsert if all of the update fields
are duplicates
return_untouched (bool, default=False): Return values that were not touched by the upsert operation
Returns:
UpsertResult: A list of results if ``returning`` is not ``False``. created, updated, and untouched,
results can be obtained by accessing the ``created``, ``updated``, and ``untouched`` properties
of the result.
Examples:
.. code-block:: python
# Start off with no objects in the database. Call a bulk_upsert on the TestModel, which includes
# a char_field, int_field, and float_field
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
], ['int_field'], ['char_field'])
# All objects should have been created
print(TestModel.objects.count())
3
# Now perform a bulk upsert on all the char_field values. Since the objects existed previously
# (known by the int_field uniqueness constraint), the char fields should be updated
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='0', int_field=1),
TestModel(float_field=2.0, char_field='0', int_field=2),
TestModel(float_field=3.0, char_field='0', int_field=3),
], ['int_field'], ['char_field'])
# No more new objects should have been created, and every char field should be 0
print(TestModel.objects.count(), TestModel.objects.filter(char_field='-1').count())
3, 3
# Do the exact same operation, but this time add an additional object that is not already
# stored. It will be created.
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be one more object
print(TestModel.objects.count())
4
# Note that one can also do the upsert on a queryset. Perform the same data upsert on a
# filter for int_field=1. In this case, only one object has the ability to be updated.
# All of the other objects will be created
bulk_upsert2(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be three more objects
print(TestModel.objects.count())
7
# Return creates and updates on the same set of models
created, updated = bulk_upsert2(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# All four objects should be updated
print(len(updated))
4
"""
results = upsert2.upsert(queryset, model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return results
def sync2(queryset, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
"""
Performs a sync operation on a queryset, making the contents of the
queryset match the contents of model_objs.
Note: The definition of a sync requires that we return untouched rows from the upsert opertion. There is
no way to turn off returning untouched rows in a sync.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If `None`, all fields will be updated.
returning (bool|List[str]): If True, returns all fields. If a list, only returns
fields in the list. Return values are split in a tuple of created, updated, and
deleted models.
ignore_duplicate_updates (bool, default=False): Ignore updating a row in the upsert if all
of the update fields are duplicates
Returns:
UpsertResult: A list of results if ``returning`` is not ``False``. created, updated, untouched,
and deleted results can be obtained by accessing the ``created``, ``updated``, ``untouched``,
and ``deleted`` properties of the result.
"""
results = upsert2.upsert(queryset, model_objs, unique_fields,
update_fields=update_fields, returning=returning, sync=True,
ignore_duplicate_updates=ignore_duplicate_updates)
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return results
def get_or_none(queryset, **query_params):
"""
Get an object or return None if it doesn't exist.
:param query_params: The query parameters used in the lookup.
:returns: A model object if one exists with the query params, None otherwise.
Examples:
.. code-block:: python
model_obj = get_or_none(TestModel.objects, int_field=1)
print(model_obj)
None
TestModel.objects.create(int_field=1)
model_obj = get_or_none(TestModel.objects, int_field=1)
print(model_obj.int_field)
1
"""
try:
obj = queryset.get(**query_params)
except queryset.model.DoesNotExist:
obj = None
return obj
def single(queryset):
"""
Assumes that this model only has one element in the table and returns it.
If the table has more than one or no value, an exception is raised.
:returns: The only model object in the queryset.
:raises: :class:`DoesNotExist <django:django.core.exceptions.ObjectDoesNotExist>`
error when the object does not exist or a
:class:`MultipleObjectsReturned <django:django.core.exceptions.MultipleObjectsReturned>`
error when thereis more than one object.
Examples:
.. code-block:: python
TestModel.objects.create(int_field=1)
model_obj = single(TestModel.objects)
print(model_obj.int_field)
1
"""
return queryset.get()
def bulk_update(manager, model_objs, fields_to_update):
"""
Bulk updates a list of model objects that are already saved.
:type model_objs: list of :class:`Models<django:django.db.models.Model>`
:param model_objs: A list of model objects that have been updated.
fields_to_update: A list of fields to be updated. Only these fields will be updated
:signals: Emits a post_bulk_operation signal when completed.
Examples:
.. code-block:: python
# Create a couple test models
model_obj1 = TestModel.objects.create(int_field=1, float_field=2.0, char_field='Hi')
model_obj2 = TestModel.objects.create(int_field=3, float_field=4.0, char_field='Hello')
# Change their fields and do a bulk update
model_obj1.int_field = 10
model_obj1.float_field = 20.0
model_obj2.int_field = 30
model_obj2.float_field = 40.0
bulk_update(TestModel.objects, [model_obj1, model_obj2], ['int_field', 'float_field'])
# Reload the models and view their changes
model_obj1 = TestModel.objects.get(id=model_obj1.id)
print(model_obj1.int_field, model_obj1.float_field)
10, 20.0
model_obj2 = TestModel.objects.get(id=model_obj2.id)
print(model_obj2.int_field, model_obj2.float_field)
10, 20.0
"""
# Add the pk to the value fields so we can join
value_fields = [manager.model._meta.pk.attname] + fields_to_update
# Build the row values
row_values = [
[_get_prepped_model_field(model_obj, field_name) for field_name in value_fields]
for model_obj in model_objs
]
# If we do not have any values or fields to update just return
if len(row_values) == 0 or len(fields_to_update) == 0:
return
# Create a map of db types
db_types = [
manager.model._meta.get_field(field).db_type(connection)
for field in value_fields
]
# Build the value fields sql
value_fields_sql = ', '.join(
'"{field}"'.format(field=manager.model._meta.get_field(field).column)
for field in value_fields
)
# Build the set sql
update_fields_sql = ', '.join([
'"{field}" = "new_values"."{field}"'.format(
field=manager.model._meta.get_field(field).column
)
for field in fields_to_update
])
# Build the values sql
values_sql = ', '.join([
'({0})'.format(
', '.join([
'%s::{0}'.format(
db_types[i]
) if not row_number and i else '%s'
for i, _ in enumerate(row)
])
)
for row_number, row in enumerate(row_values)
])
# Start building the query
update_sql = (
'UPDATE {table} '
'SET {update_fields_sql} '
'FROM (VALUES {values_sql}) AS new_values ({value_fields_sql}) '
'WHERE "{table}"."{pk_field}" = "new_values"."{pk_field}"'
).format(
table=manager.model._meta.db_table,
pk_field=manager.model._meta.pk.column,
update_fields_sql=update_fields_sql,
values_sql=values_sql,
value_fields_sql=value_fields_sql
)
# Combine all the row values
update_sql_params = list(itertools.chain(*row_values))
# Run the update query
with connection.cursor() as cursor:
cursor.execute(update_sql, update_sql_params)
# call the bulk operation signal
post_bulk_operation.send(sender=manager.model, model=manager.model)
def upsert(manager, defaults=None, updates=None, **kwargs):
"""
Performs an update on an object or an insert if the object does not exist.
:type defaults: dict
:param defaults: These values are set when the object is created, but are irrelevant
when the object already exists. This field should only be used when values only need to
be set during creation.
:type updates: dict
:param updates: These values are updated when the object is updated. They also override any
values provided in the defaults when inserting the object.
:param kwargs: These values provide the arguments used when checking for the existence of
the object. They are used in a similar manner to Django's get_or_create function.
:returns: A tuple of the upserted object and a Boolean that is True if it was created (False otherwise)
Examples:
.. code-block:: python
# Upsert a test model with an int value of 1. Use default values that will be given to it when created
model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 2.0})
print(created)
True
print(model_obj.int_field, model_obj.float_field)
1, 2.0
# Do an upsert on that same model with different default fields. Since it already exists, the defaults
# are not used
model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 3.0})
print(created)
False
print(model_obj.int_field, model_obj.float_field)
1, 2.0
# In order to update the float field in an existing object, use the updates dictionary
model_obj, created = upsert(TestModel.objects, int_field=1, updates={'float_field': 3.0})
print(created)
False
print(model_obj.int_field, model_obj.float_field)
1, 3.0
# You can use updates on a newly created object that will also be used as initial values.
model_obj, created = upsert(TestModel.objects, int_field=2, updates={'float_field': 4.0})
print(created)
True
print(model_obj.int_field, model_obj.float_field)
2, 4.0
"""
defaults = defaults or {}
# Override any defaults with updates
defaults.update(updates or {})
# Do a get or create
obj, created = manager.get_or_create(defaults=defaults, **kwargs)
# Update any necessary fields
if updates is not None and not created and any(getattr(obj, k) != updates[k] for k in updates):
for k, v in updates.items():
setattr(obj, k, v)
obj.save(update_fields=updates)
return obj, created
class ManagerUtilsQuerySet(QuerySet):
"""
Defines the methods in the manager utils that can also be applied to querysets.
"""
def id_dict(self):
return id_dict(self)
def bulk_upsert(self, model_objs, unique_fields, update_fields=None, return_upserts=False, native=False):
return bulk_upsert(
self, model_objs, unique_fields, update_fields=update_fields, return_upserts=return_upserts, native=native
)
def bulk_upsert2(self, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False):
return bulk_upsert2(self, model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
def bulk_create(self, *args, **kwargs):
"""
Overrides Django's bulk_create function to emit a post_bulk_operation signal when bulk_create
is finished.
"""
ret_val = super(ManagerUtilsQuerySet, self).bulk_create(*args, **kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val
def sync(self, model_objs, unique_fields, update_fields=None, native=False):
return sync(self, model_objs, unique_fields, update_fields=update_fields, native=native)
def sync2(self, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
return sync2(self, model_objs, unique_fields, update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates)
def get_or_none(self, **query_params):
return get_or_none(self, **query_params)
def single(self):
return single(self)
def update(self, **kwargs):
"""
Overrides Django's update method to emit a post_bulk_operation signal when it completes.
"""
ret_val = super(ManagerUtilsQuerySet, self).update(**kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val
class ManagerUtilsMixin(object):
"""
A mixin that can be used by django model managers. It provides additional functionality on top
of the regular Django Manager class.
"""
def get_queryset(self):
return ManagerUtilsQuerySet(self.model)
def id_dict(self):
return id_dict(self.get_queryset())
def bulk_upsert(
self, model_objs, unique_fields, update_fields=None, return_upserts=False, return_upserts_distinct=False,
native=False):
return bulk_upsert(
self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, return_upserts=return_upserts,
return_upserts_distinct=return_upserts_distinct, native=native)
def bulk_upsert2(self, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False):
return bulk_upsert2(
self.get_queryset(), model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
def sync(self, model_objs, unique_fields, update_fields=None, native=False):
return sync(self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, native=native)
def sync2(self, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
return sync2(
self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates)
def bulk_update(self, model_objs, fields_to_update):
return bulk_update(self.get_queryset(), model_objs, fields_to_update)
def upsert(self, defaults=None, updates=None, **kwargs):
return upsert(self.get_queryset(), defaults=defaults, updates=updates, **kwargs)
def get_or_none(self, **query_params):
return get_or_none(self.get_queryset(), **query_params)
def single(self):
return single(self.get_queryset())
class ManagerUtilsManager(ManagerUtilsMixin, Manager):
"""
A class that can be used as a manager. It already inherits the Django Manager class and adds
the mixin.
"""
pass
|
ambitioninc/django-manager-utils | manager_utils/manager_utils.py | sync2 | python | def sync2(queryset, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
results = upsert2.upsert(queryset, model_objs, unique_fields,
update_fields=update_fields, returning=returning, sync=True,
ignore_duplicate_updates=ignore_duplicate_updates)
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return results | Performs a sync operation on a queryset, making the contents of the
queryset match the contents of model_objs.
Note: The definition of a sync requires that we return untouched rows from the upsert opertion. There is
no way to turn off returning untouched rows in a sync.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If `None`, all fields will be updated.
returning (bool|List[str]): If True, returns all fields. If a list, only returns
fields in the list. Return values are split in a tuple of created, updated, and
deleted models.
ignore_duplicate_updates (bool, default=False): Ignore updating a row in the upsert if all
of the update fields are duplicates
Returns:
UpsertResult: A list of results if ``returning`` is not ``False``. created, updated, untouched,
and deleted results can be obtained by accessing the ``created``, ``updated``, ``untouched``,
and ``deleted`` properties of the result. | train | https://github.com/ambitioninc/django-manager-utils/blob/1f111cb4846ed6cd6b78eca320a9dcc27826bf97/manager_utils/manager_utils.py#L401-L434 | [
"def upsert(\n queryset, model_objs, unique_fields,\n update_fields=None, returning=False, sync=False,\n ignore_duplicate_updates=True,\n return_untouched=False\n):\n \"\"\"\n Perform a bulk upsert on a table, optionally syncing the results.\n\n Args:\n queryset (Model|QuerySet): A model or a queryset that defines the collection to sync\n model_objs (List[Model]): A list of Django models to sync. All models in this list\n will be bulk upserted and any models not in the table (or queryset) will be deleted\n if sync=True.\n unique_fields (List[str]): A list of fields that define the uniqueness of the model. The\n model must have a unique constraint on these fields\n update_fields (List[str], default=None): A list of fields to update whenever objects\n already exist. If an empty list is provided, it is equivalent to doing a bulk\n insert on the objects that don't exist. If `None`, all fields will be updated.\n returning (bool|List[str]): If True, returns all fields. If a list, only returns\n fields in the list\n sync (bool, default=False): Perform a sync operation on the queryset\n ignore_duplicate_updates (bool, default=False): Don't perform an update if the row is\n a duplicate.\n return_untouched (bool, default=False): Return untouched rows by the operation\n \"\"\"\n queryset = queryset if isinstance(queryset, models.QuerySet) else queryset.objects.all()\n model = queryset.model\n\n # Populate automatically generated fields in the rows like date times\n _fill_auto_fields(model, model_objs)\n\n # Sort the rows to reduce the chances of deadlock during concurrent upserts\n model_objs = _sort_by_unique_fields(model, model_objs, unique_fields)\n update_fields = _get_update_fields(model, unique_fields, update_fields)\n\n return _fetch(queryset, model_objs, unique_fields, update_fields, returning, sync,\n ignore_duplicate_updates=ignore_duplicate_updates,\n return_untouched=return_untouched)\n"
] | import itertools
from django.db import connection
from django.db.models import Manager
from django.db.models.query import QuerySet
from django.dispatch import Signal
from querybuilder.query import Query
from . import upsert2
# A signal that is emitted when any bulk operation occurs
post_bulk_operation = Signal(providing_args=['model'])
def id_dict(queryset):
"""
Returns a dictionary of all the objects keyed on their ID.
:rtype: dict
:returns: A dictionary of objects from the queryset or manager that is keyed
on the objects' IDs.
Examples:
.. code-block:: python
TestModel.objects.create(int_field=1)
TestModel.objects.create(int_field=2)
print(id_dict(TestModel.objects.all()))
"""
return {obj.pk: obj for obj in queryset}
def _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields):
"""
Given a list of model objects that were updated and model objects that were created,
fetch the pks of the newly created models and return the two lists in a tuple
"""
# Keep track of the created models
created_models = []
# If we created new models query for them
if model_objs_created:
created_models.extend(
queryset.extra(
where=['({unique_fields_sql}) in %s'.format(
unique_fields_sql=', '.join(unique_fields)
)],
params=[
tuple([
tuple([
getattr(model_obj, field)
for field in unique_fields
])
for model_obj in model_objs_created
])
]
)
)
# Return the models
return model_objs_updated, created_models
def _get_upserts(queryset, model_objs_updated, model_objs_created, unique_fields):
"""
Given a list of model objects that were updated and model objects that were created,
return the list of all model objects upserted. Doing this requires fetching all of
the models created with bulk create (since django can't return bulk_create pks)
"""
updated, created = _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields)
return updated + created
def _get_model_objs_to_update_and_create(model_objs, unique_fields, update_fields, extant_model_objs):
"""
Used by bulk_upsert to gather lists of models that should be updated and created.
"""
# Find all of the objects to update and all of the objects to create
model_objs_to_update, model_objs_to_create = list(), list()
for model_obj in model_objs:
extant_model_obj = extant_model_objs.get(tuple(getattr(model_obj, field) for field in unique_fields), None)
if extant_model_obj is None:
# If the object needs to be created, make a new instance of it
model_objs_to_create.append(model_obj)
else:
# If the object needs to be updated, update its fields
for field in update_fields:
setattr(extant_model_obj, field, getattr(model_obj, field))
model_objs_to_update.append(extant_model_obj)
return model_objs_to_update, model_objs_to_create
def _get_prepped_model_field(model_obj, field):
"""
Gets the value of a field of a model obj that is prepared for the db.
"""
# Get the field
field = model_obj._meta.get_field(field)
# Get the value
value = field.get_db_prep_save(getattr(model_obj, field.attname), connection)
# Return the value
return value
def bulk_upsert(
queryset, model_objs, unique_fields, update_fields=None, return_upserts=False, return_upserts_distinct=False,
sync=False, native=False
):
"""
Performs a bulk update or insert on a list of model objects. Matches all objects in the queryset
with the objs provided using the field values in unique_fields.
If an existing object is matched, it is updated with the values from the provided objects. Objects
that don't match anything are bulk created.
A user can provide a list update_fields so that any changed values on those fields will be updated.
However, if update_fields is not provided, this function reduces down to performing a bulk_create
on any non extant objects.
:type model_objs: list of dict
:param model_objs: A list of dictionaries that have fields corresponding to the model in the manager.
:type unique_fields: list of str
:param unique_fields: A list of fields that are used to determine if an object in objs matches a model
from the queryset.
:type update_fields: list of str
:param update_fields: A list of fields used from the objects in objs as fields when updating existing
models. If None, this function will only perform a bulk create for model_objs that do not
currently exist in the database.
:type return_upserts_distinct: bool
:param return_upserts_distinct: A flag specifying whether to return the upserted values as a list of distinct lists,
one containing the updated models and the other containing the new models. If True, this performs an
additional query to fetch any bulk created values.
:type return_upserts: bool
:param return_upserts: A flag specifying whether to return the upserted values. If True, this performs
an additional query to fetch any bulk created values.
:type sync: bool
:param sync: A flag specifying whether a sync operation should be applied to the bulk_upsert. If this
is True, all values in the queryset that were not updated will be deleted such that the
entire list of model objects is synced to the queryset.
:type native: bool
:param native: A flag specifying whether to use postgres insert on conflict (upsert).
:signals: Emits a post_bulk_operation when a bulk_update or a bulk_create occurs.
Examples:
.. code-block:: python
# Start off with no objects in the database. Call a bulk_upsert on the TestModel, which includes
# a char_field, int_field, and float_field
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
], ['int_field'], ['char_field'])
# All objects should have been created
print(TestModel.objects.count())
3
# Now perform a bulk upsert on all the char_field values. Since the objects existed previously
# (known by the int_field uniqueness constraint), the char fields should be updated
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='0', int_field=1),
TestModel(float_field=2.0, char_field='0', int_field=2),
TestModel(float_field=3.0, char_field='0', int_field=3),
], ['int_field'], ['char_field'])
# No more new objects should have been created, and every char field should be 0
print(TestModel.objects.count(), TestModel.objects.filter(char_field='-1').count())
3, 3
# Do the exact same operation, but this time add an additional object that is not already
# stored. It will be created.
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be one more object
print(TestModel.objects.count())
4
# Note that one can also do the upsert on a queryset. Perform the same data upsert on a
# filter for int_field=1. In this case, only one object has the ability to be updated.
# All of the other objects will be created
bulk_upsert(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be three more objects
print(TestModel.objects.count())
7
"""
if not unique_fields:
raise ValueError('Must provide unique_fields argument')
update_fields = update_fields or []
if native:
if return_upserts_distinct:
raise NotImplementedError('return upserts distinct not supported with native postgres upsert')
return_value = Query().from_table(table=queryset.model).upsert(
model_objs, unique_fields, update_fields, return_models=return_upserts or sync
) or []
if sync:
orig_ids = frozenset(queryset.values_list('pk', flat=True))
queryset.filter(pk__in=orig_ids - frozenset([m.pk for m in return_value])).delete()
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return return_value
# Create a look up table for all of the objects in the queryset keyed on the unique_fields
extant_model_objs = {
tuple(getattr(extant_model_obj, field) for field in unique_fields): extant_model_obj
for extant_model_obj in queryset
}
# Find all of the objects to update and all of the objects to create
model_objs_to_update, model_objs_to_create = _get_model_objs_to_update_and_create(
model_objs, unique_fields, update_fields, extant_model_objs)
# Find all objects in the queryset that will not be updated. These will be deleted if the sync option is
# True
if sync:
model_objs_to_update_set = frozenset(model_objs_to_update)
model_objs_to_delete = [
model_obj.pk for model_obj in extant_model_objs.values() if model_obj not in model_objs_to_update_set
]
if model_objs_to_delete:
queryset.filter(pk__in=model_objs_to_delete).delete()
# Apply bulk updates and creates
if update_fields:
bulk_update(queryset, model_objs_to_update, update_fields)
queryset.bulk_create(model_objs_to_create)
# Optionally return the bulk upserted values
if return_upserts_distinct:
# return a list of lists, the first being the updated models, the second being the newly created objects
return _get_upserts_distinct(queryset, model_objs_to_update, model_objs_to_create, unique_fields)
if return_upserts:
return _get_upserts(queryset, model_objs_to_update, model_objs_to_create, unique_fields)
def bulk_upsert2(
queryset, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False
):
"""
Performs a bulk update or insert on a list of model objects. Matches all objects in the queryset
with the objs provided using the field values in unique_fields.
If an existing object is matched, it is updated with the values from the provided objects. Objects
that don't match anything are bulk created.
A user can provide a list update_fields so that any changed values on those fields will be updated.
However, if update_fields is not provided, this function reduces down to performing a bulk_create
on any non extant objects.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If ``None``, all fields will be updated.
returning (bool|List[str]): If ``True``, returns all fields. If a list, only returns
fields in the list. Return values are split in a tuple of created and updated models
ignore_duplicate_updates (bool, default=False): Ignore updating a row in the upsert if all of the update fields
are duplicates
return_untouched (bool, default=False): Return values that were not touched by the upsert operation
Returns:
UpsertResult: A list of results if ``returning`` is not ``False``. created, updated, and untouched,
results can be obtained by accessing the ``created``, ``updated``, and ``untouched`` properties
of the result.
Examples:
.. code-block:: python
# Start off with no objects in the database. Call a bulk_upsert on the TestModel, which includes
# a char_field, int_field, and float_field
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
], ['int_field'], ['char_field'])
# All objects should have been created
print(TestModel.objects.count())
3
# Now perform a bulk upsert on all the char_field values. Since the objects existed previously
# (known by the int_field uniqueness constraint), the char fields should be updated
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='0', int_field=1),
TestModel(float_field=2.0, char_field='0', int_field=2),
TestModel(float_field=3.0, char_field='0', int_field=3),
], ['int_field'], ['char_field'])
# No more new objects should have been created, and every char field should be 0
print(TestModel.objects.count(), TestModel.objects.filter(char_field='-1').count())
3, 3
# Do the exact same operation, but this time add an additional object that is not already
# stored. It will be created.
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be one more object
print(TestModel.objects.count())
4
# Note that one can also do the upsert on a queryset. Perform the same data upsert on a
# filter for int_field=1. In this case, only one object has the ability to be updated.
# All of the other objects will be created
bulk_upsert2(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be three more objects
print(TestModel.objects.count())
7
# Return creates and updates on the same set of models
created, updated = bulk_upsert2(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# All four objects should be updated
print(len(updated))
4
"""
results = upsert2.upsert(queryset, model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return results
def sync(queryset, model_objs, unique_fields, update_fields=None, **kwargs):
"""
Performs a sync operation on a queryset, making the contents of the
queryset match the contents of model_objs.
This function calls bulk_upsert underneath the hood with sync=True.
:type model_objs: list of :class:`Models<django:django.db.models.Model>`
:param model_objs: The models to sync
:type update_fields: list of str
:param unique_fields: A list of fields that are used to determine if an
object in objs matches a model from the queryset.
:type update_fields: list of str
:param update_fields: A list of fields used from the objects in objs as fields when updating existing
models. If None, this function will only perform a bulk create for model_objs that do not
currently exist in the database.
:type native: bool
:param native: A flag specifying whether to use postgres insert on conflict (upsert) when performing
bulk upsert.
"""
return bulk_upsert(queryset, model_objs, unique_fields, update_fields=update_fields, sync=True, **kwargs)
def get_or_none(queryset, **query_params):
"""
Get an object or return None if it doesn't exist.
:param query_params: The query parameters used in the lookup.
:returns: A model object if one exists with the query params, None otherwise.
Examples:
.. code-block:: python
model_obj = get_or_none(TestModel.objects, int_field=1)
print(model_obj)
None
TestModel.objects.create(int_field=1)
model_obj = get_or_none(TestModel.objects, int_field=1)
print(model_obj.int_field)
1
"""
try:
obj = queryset.get(**query_params)
except queryset.model.DoesNotExist:
obj = None
return obj
def single(queryset):
"""
Assumes that this model only has one element in the table and returns it.
If the table has more than one or no value, an exception is raised.
:returns: The only model object in the queryset.
:raises: :class:`DoesNotExist <django:django.core.exceptions.ObjectDoesNotExist>`
error when the object does not exist or a
:class:`MultipleObjectsReturned <django:django.core.exceptions.MultipleObjectsReturned>`
error when thereis more than one object.
Examples:
.. code-block:: python
TestModel.objects.create(int_field=1)
model_obj = single(TestModel.objects)
print(model_obj.int_field)
1
"""
return queryset.get()
def bulk_update(manager, model_objs, fields_to_update):
"""
Bulk updates a list of model objects that are already saved.
:type model_objs: list of :class:`Models<django:django.db.models.Model>`
:param model_objs: A list of model objects that have been updated.
fields_to_update: A list of fields to be updated. Only these fields will be updated
:signals: Emits a post_bulk_operation signal when completed.
Examples:
.. code-block:: python
# Create a couple test models
model_obj1 = TestModel.objects.create(int_field=1, float_field=2.0, char_field='Hi')
model_obj2 = TestModel.objects.create(int_field=3, float_field=4.0, char_field='Hello')
# Change their fields and do a bulk update
model_obj1.int_field = 10
model_obj1.float_field = 20.0
model_obj2.int_field = 30
model_obj2.float_field = 40.0
bulk_update(TestModel.objects, [model_obj1, model_obj2], ['int_field', 'float_field'])
# Reload the models and view their changes
model_obj1 = TestModel.objects.get(id=model_obj1.id)
print(model_obj1.int_field, model_obj1.float_field)
10, 20.0
model_obj2 = TestModel.objects.get(id=model_obj2.id)
print(model_obj2.int_field, model_obj2.float_field)
10, 20.0
"""
# Add the pk to the value fields so we can join
value_fields = [manager.model._meta.pk.attname] + fields_to_update
# Build the row values
row_values = [
[_get_prepped_model_field(model_obj, field_name) for field_name in value_fields]
for model_obj in model_objs
]
# If we do not have any values or fields to update just return
if len(row_values) == 0 or len(fields_to_update) == 0:
return
# Create a map of db types
db_types = [
manager.model._meta.get_field(field).db_type(connection)
for field in value_fields
]
# Build the value fields sql
value_fields_sql = ', '.join(
'"{field}"'.format(field=manager.model._meta.get_field(field).column)
for field in value_fields
)
# Build the set sql
update_fields_sql = ', '.join([
'"{field}" = "new_values"."{field}"'.format(
field=manager.model._meta.get_field(field).column
)
for field in fields_to_update
])
# Build the values sql
values_sql = ', '.join([
'({0})'.format(
', '.join([
'%s::{0}'.format(
db_types[i]
) if not row_number and i else '%s'
for i, _ in enumerate(row)
])
)
for row_number, row in enumerate(row_values)
])
# Start building the query
update_sql = (
'UPDATE {table} '
'SET {update_fields_sql} '
'FROM (VALUES {values_sql}) AS new_values ({value_fields_sql}) '
'WHERE "{table}"."{pk_field}" = "new_values"."{pk_field}"'
).format(
table=manager.model._meta.db_table,
pk_field=manager.model._meta.pk.column,
update_fields_sql=update_fields_sql,
values_sql=values_sql,
value_fields_sql=value_fields_sql
)
# Combine all the row values
update_sql_params = list(itertools.chain(*row_values))
# Run the update query
with connection.cursor() as cursor:
cursor.execute(update_sql, update_sql_params)
# call the bulk operation signal
post_bulk_operation.send(sender=manager.model, model=manager.model)
def upsert(manager, defaults=None, updates=None, **kwargs):
"""
Performs an update on an object or an insert if the object does not exist.
:type defaults: dict
:param defaults: These values are set when the object is created, but are irrelevant
when the object already exists. This field should only be used when values only need to
be set during creation.
:type updates: dict
:param updates: These values are updated when the object is updated. They also override any
values provided in the defaults when inserting the object.
:param kwargs: These values provide the arguments used when checking for the existence of
the object. They are used in a similar manner to Django's get_or_create function.
:returns: A tuple of the upserted object and a Boolean that is True if it was created (False otherwise)
Examples:
.. code-block:: python
# Upsert a test model with an int value of 1. Use default values that will be given to it when created
model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 2.0})
print(created)
True
print(model_obj.int_field, model_obj.float_field)
1, 2.0
# Do an upsert on that same model with different default fields. Since it already exists, the defaults
# are not used
model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 3.0})
print(created)
False
print(model_obj.int_field, model_obj.float_field)
1, 2.0
# In order to update the float field in an existing object, use the updates dictionary
model_obj, created = upsert(TestModel.objects, int_field=1, updates={'float_field': 3.0})
print(created)
False
print(model_obj.int_field, model_obj.float_field)
1, 3.0
# You can use updates on a newly created object that will also be used as initial values.
model_obj, created = upsert(TestModel.objects, int_field=2, updates={'float_field': 4.0})
print(created)
True
print(model_obj.int_field, model_obj.float_field)
2, 4.0
"""
defaults = defaults or {}
# Override any defaults with updates
defaults.update(updates or {})
# Do a get or create
obj, created = manager.get_or_create(defaults=defaults, **kwargs)
# Update any necessary fields
if updates is not None and not created and any(getattr(obj, k) != updates[k] for k in updates):
for k, v in updates.items():
setattr(obj, k, v)
obj.save(update_fields=updates)
return obj, created
class ManagerUtilsQuerySet(QuerySet):
"""
Defines the methods in the manager utils that can also be applied to querysets.
"""
def id_dict(self):
return id_dict(self)
def bulk_upsert(self, model_objs, unique_fields, update_fields=None, return_upserts=False, native=False):
return bulk_upsert(
self, model_objs, unique_fields, update_fields=update_fields, return_upserts=return_upserts, native=native
)
def bulk_upsert2(self, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False):
return bulk_upsert2(self, model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
def bulk_create(self, *args, **kwargs):
"""
Overrides Django's bulk_create function to emit a post_bulk_operation signal when bulk_create
is finished.
"""
ret_val = super(ManagerUtilsQuerySet, self).bulk_create(*args, **kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val
def sync(self, model_objs, unique_fields, update_fields=None, native=False):
return sync(self, model_objs, unique_fields, update_fields=update_fields, native=native)
def sync2(self, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
return sync2(self, model_objs, unique_fields, update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates)
def get_or_none(self, **query_params):
return get_or_none(self, **query_params)
def single(self):
return single(self)
def update(self, **kwargs):
"""
Overrides Django's update method to emit a post_bulk_operation signal when it completes.
"""
ret_val = super(ManagerUtilsQuerySet, self).update(**kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val
class ManagerUtilsMixin(object):
"""
A mixin that can be used by django model managers. It provides additional functionality on top
of the regular Django Manager class.
"""
def get_queryset(self):
return ManagerUtilsQuerySet(self.model)
def id_dict(self):
return id_dict(self.get_queryset())
def bulk_upsert(
self, model_objs, unique_fields, update_fields=None, return_upserts=False, return_upserts_distinct=False,
native=False):
return bulk_upsert(
self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, return_upserts=return_upserts,
return_upserts_distinct=return_upserts_distinct, native=native)
def bulk_upsert2(self, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False):
return bulk_upsert2(
self.get_queryset(), model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
def sync(self, model_objs, unique_fields, update_fields=None, native=False):
return sync(self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, native=native)
def sync2(self, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
return sync2(
self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates)
def bulk_update(self, model_objs, fields_to_update):
return bulk_update(self.get_queryset(), model_objs, fields_to_update)
def upsert(self, defaults=None, updates=None, **kwargs):
return upsert(self.get_queryset(), defaults=defaults, updates=updates, **kwargs)
def get_or_none(self, **query_params):
return get_or_none(self.get_queryset(), **query_params)
def single(self):
return single(self.get_queryset())
class ManagerUtilsManager(ManagerUtilsMixin, Manager):
"""
A class that can be used as a manager. It already inherits the Django Manager class and adds
the mixin.
"""
pass
|
ambitioninc/django-manager-utils | manager_utils/manager_utils.py | get_or_none | python | def get_or_none(queryset, **query_params):
try:
obj = queryset.get(**query_params)
except queryset.model.DoesNotExist:
obj = None
return obj | Get an object or return None if it doesn't exist.
:param query_params: The query parameters used in the lookup.
:returns: A model object if one exists with the query params, None otherwise.
Examples:
.. code-block:: python
model_obj = get_or_none(TestModel.objects, int_field=1)
print(model_obj)
None
TestModel.objects.create(int_field=1)
model_obj = get_or_none(TestModel.objects, int_field=1)
print(model_obj.int_field)
1 | train | https://github.com/ambitioninc/django-manager-utils/blob/1f111cb4846ed6cd6b78eca320a9dcc27826bf97/manager_utils/manager_utils.py#L437-L463 | null | import itertools
from django.db import connection
from django.db.models import Manager
from django.db.models.query import QuerySet
from django.dispatch import Signal
from querybuilder.query import Query
from . import upsert2
# A signal that is emitted when any bulk operation occurs
post_bulk_operation = Signal(providing_args=['model'])
def id_dict(queryset):
"""
Returns a dictionary of all the objects keyed on their ID.
:rtype: dict
:returns: A dictionary of objects from the queryset or manager that is keyed
on the objects' IDs.
Examples:
.. code-block:: python
TestModel.objects.create(int_field=1)
TestModel.objects.create(int_field=2)
print(id_dict(TestModel.objects.all()))
"""
return {obj.pk: obj for obj in queryset}
def _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields):
"""
Given a list of model objects that were updated and model objects that were created,
fetch the pks of the newly created models and return the two lists in a tuple
"""
# Keep track of the created models
created_models = []
# If we created new models query for them
if model_objs_created:
created_models.extend(
queryset.extra(
where=['({unique_fields_sql}) in %s'.format(
unique_fields_sql=', '.join(unique_fields)
)],
params=[
tuple([
tuple([
getattr(model_obj, field)
for field in unique_fields
])
for model_obj in model_objs_created
])
]
)
)
# Return the models
return model_objs_updated, created_models
def _get_upserts(queryset, model_objs_updated, model_objs_created, unique_fields):
"""
Given a list of model objects that were updated and model objects that were created,
return the list of all model objects upserted. Doing this requires fetching all of
the models created with bulk create (since django can't return bulk_create pks)
"""
updated, created = _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields)
return updated + created
def _get_model_objs_to_update_and_create(model_objs, unique_fields, update_fields, extant_model_objs):
"""
Used by bulk_upsert to gather lists of models that should be updated and created.
"""
# Find all of the objects to update and all of the objects to create
model_objs_to_update, model_objs_to_create = list(), list()
for model_obj in model_objs:
extant_model_obj = extant_model_objs.get(tuple(getattr(model_obj, field) for field in unique_fields), None)
if extant_model_obj is None:
# If the object needs to be created, make a new instance of it
model_objs_to_create.append(model_obj)
else:
# If the object needs to be updated, update its fields
for field in update_fields:
setattr(extant_model_obj, field, getattr(model_obj, field))
model_objs_to_update.append(extant_model_obj)
return model_objs_to_update, model_objs_to_create
def _get_prepped_model_field(model_obj, field):
"""
Gets the value of a field of a model obj that is prepared for the db.
"""
# Get the field
field = model_obj._meta.get_field(field)
# Get the value
value = field.get_db_prep_save(getattr(model_obj, field.attname), connection)
# Return the value
return value
def bulk_upsert(
queryset, model_objs, unique_fields, update_fields=None, return_upserts=False, return_upserts_distinct=False,
sync=False, native=False
):
"""
Performs a bulk update or insert on a list of model objects. Matches all objects in the queryset
with the objs provided using the field values in unique_fields.
If an existing object is matched, it is updated with the values from the provided objects. Objects
that don't match anything are bulk created.
A user can provide a list update_fields so that any changed values on those fields will be updated.
However, if update_fields is not provided, this function reduces down to performing a bulk_create
on any non extant objects.
:type model_objs: list of dict
:param model_objs: A list of dictionaries that have fields corresponding to the model in the manager.
:type unique_fields: list of str
:param unique_fields: A list of fields that are used to determine if an object in objs matches a model
from the queryset.
:type update_fields: list of str
:param update_fields: A list of fields used from the objects in objs as fields when updating existing
models. If None, this function will only perform a bulk create for model_objs that do not
currently exist in the database.
:type return_upserts_distinct: bool
:param return_upserts_distinct: A flag specifying whether to return the upserted values as a list of distinct lists,
one containing the updated models and the other containing the new models. If True, this performs an
additional query to fetch any bulk created values.
:type return_upserts: bool
:param return_upserts: A flag specifying whether to return the upserted values. If True, this performs
an additional query to fetch any bulk created values.
:type sync: bool
:param sync: A flag specifying whether a sync operation should be applied to the bulk_upsert. If this
is True, all values in the queryset that were not updated will be deleted such that the
entire list of model objects is synced to the queryset.
:type native: bool
:param native: A flag specifying whether to use postgres insert on conflict (upsert).
:signals: Emits a post_bulk_operation when a bulk_update or a bulk_create occurs.
Examples:
.. code-block:: python
# Start off with no objects in the database. Call a bulk_upsert on the TestModel, which includes
# a char_field, int_field, and float_field
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
], ['int_field'], ['char_field'])
# All objects should have been created
print(TestModel.objects.count())
3
# Now perform a bulk upsert on all the char_field values. Since the objects existed previously
# (known by the int_field uniqueness constraint), the char fields should be updated
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='0', int_field=1),
TestModel(float_field=2.0, char_field='0', int_field=2),
TestModel(float_field=3.0, char_field='0', int_field=3),
], ['int_field'], ['char_field'])
# No more new objects should have been created, and every char field should be 0
print(TestModel.objects.count(), TestModel.objects.filter(char_field='-1').count())
3, 3
# Do the exact same operation, but this time add an additional object that is not already
# stored. It will be created.
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be one more object
print(TestModel.objects.count())
4
# Note that one can also do the upsert on a queryset. Perform the same data upsert on a
# filter for int_field=1. In this case, only one object has the ability to be updated.
# All of the other objects will be created
bulk_upsert(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be three more objects
print(TestModel.objects.count())
7
"""
if not unique_fields:
raise ValueError('Must provide unique_fields argument')
update_fields = update_fields or []
if native:
if return_upserts_distinct:
raise NotImplementedError('return upserts distinct not supported with native postgres upsert')
return_value = Query().from_table(table=queryset.model).upsert(
model_objs, unique_fields, update_fields, return_models=return_upserts or sync
) or []
if sync:
orig_ids = frozenset(queryset.values_list('pk', flat=True))
queryset.filter(pk__in=orig_ids - frozenset([m.pk for m in return_value])).delete()
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return return_value
# Create a look up table for all of the objects in the queryset keyed on the unique_fields
extant_model_objs = {
tuple(getattr(extant_model_obj, field) for field in unique_fields): extant_model_obj
for extant_model_obj in queryset
}
# Find all of the objects to update and all of the objects to create
model_objs_to_update, model_objs_to_create = _get_model_objs_to_update_and_create(
model_objs, unique_fields, update_fields, extant_model_objs)
# Find all objects in the queryset that will not be updated. These will be deleted if the sync option is
# True
if sync:
model_objs_to_update_set = frozenset(model_objs_to_update)
model_objs_to_delete = [
model_obj.pk for model_obj in extant_model_objs.values() if model_obj not in model_objs_to_update_set
]
if model_objs_to_delete:
queryset.filter(pk__in=model_objs_to_delete).delete()
# Apply bulk updates and creates
if update_fields:
bulk_update(queryset, model_objs_to_update, update_fields)
queryset.bulk_create(model_objs_to_create)
# Optionally return the bulk upserted values
if return_upserts_distinct:
# return a list of lists, the first being the updated models, the second being the newly created objects
return _get_upserts_distinct(queryset, model_objs_to_update, model_objs_to_create, unique_fields)
if return_upserts:
return _get_upserts(queryset, model_objs_to_update, model_objs_to_create, unique_fields)
def bulk_upsert2(
queryset, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False
):
"""
Performs a bulk update or insert on a list of model objects. Matches all objects in the queryset
with the objs provided using the field values in unique_fields.
If an existing object is matched, it is updated with the values from the provided objects. Objects
that don't match anything are bulk created.
A user can provide a list update_fields so that any changed values on those fields will be updated.
However, if update_fields is not provided, this function reduces down to performing a bulk_create
on any non extant objects.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If ``None``, all fields will be updated.
returning (bool|List[str]): If ``True``, returns all fields. If a list, only returns
fields in the list. Return values are split in a tuple of created and updated models
ignore_duplicate_updates (bool, default=False): Ignore updating a row in the upsert if all of the update fields
are duplicates
return_untouched (bool, default=False): Return values that were not touched by the upsert operation
Returns:
UpsertResult: A list of results if ``returning`` is not ``False``. created, updated, and untouched,
results can be obtained by accessing the ``created``, ``updated``, and ``untouched`` properties
of the result.
Examples:
.. code-block:: python
# Start off with no objects in the database. Call a bulk_upsert on the TestModel, which includes
# a char_field, int_field, and float_field
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
], ['int_field'], ['char_field'])
# All objects should have been created
print(TestModel.objects.count())
3
# Now perform a bulk upsert on all the char_field values. Since the objects existed previously
# (known by the int_field uniqueness constraint), the char fields should be updated
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='0', int_field=1),
TestModel(float_field=2.0, char_field='0', int_field=2),
TestModel(float_field=3.0, char_field='0', int_field=3),
], ['int_field'], ['char_field'])
# No more new objects should have been created, and every char field should be 0
print(TestModel.objects.count(), TestModel.objects.filter(char_field='-1').count())
3, 3
# Do the exact same operation, but this time add an additional object that is not already
# stored. It will be created.
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be one more object
print(TestModel.objects.count())
4
# Note that one can also do the upsert on a queryset. Perform the same data upsert on a
# filter for int_field=1. In this case, only one object has the ability to be updated.
# All of the other objects will be created
bulk_upsert2(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be three more objects
print(TestModel.objects.count())
7
# Return creates and updates on the same set of models
created, updated = bulk_upsert2(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# All four objects should be updated
print(len(updated))
4
"""
results = upsert2.upsert(queryset, model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return results
def sync(queryset, model_objs, unique_fields, update_fields=None, **kwargs):
"""
Performs a sync operation on a queryset, making the contents of the
queryset match the contents of model_objs.
This function calls bulk_upsert underneath the hood with sync=True.
:type model_objs: list of :class:`Models<django:django.db.models.Model>`
:param model_objs: The models to sync
:type update_fields: list of str
:param unique_fields: A list of fields that are used to determine if an
object in objs matches a model from the queryset.
:type update_fields: list of str
:param update_fields: A list of fields used from the objects in objs as fields when updating existing
models. If None, this function will only perform a bulk create for model_objs that do not
currently exist in the database.
:type native: bool
:param native: A flag specifying whether to use postgres insert on conflict (upsert) when performing
bulk upsert.
"""
return bulk_upsert(queryset, model_objs, unique_fields, update_fields=update_fields, sync=True, **kwargs)
def sync2(queryset, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
"""
Performs a sync operation on a queryset, making the contents of the
queryset match the contents of model_objs.
Note: The definition of a sync requires that we return untouched rows from the upsert opertion. There is
no way to turn off returning untouched rows in a sync.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If `None`, all fields will be updated.
returning (bool|List[str]): If True, returns all fields. If a list, only returns
fields in the list. Return values are split in a tuple of created, updated, and
deleted models.
ignore_duplicate_updates (bool, default=False): Ignore updating a row in the upsert if all
of the update fields are duplicates
Returns:
UpsertResult: A list of results if ``returning`` is not ``False``. created, updated, untouched,
and deleted results can be obtained by accessing the ``created``, ``updated``, ``untouched``,
and ``deleted`` properties of the result.
"""
results = upsert2.upsert(queryset, model_objs, unique_fields,
update_fields=update_fields, returning=returning, sync=True,
ignore_duplicate_updates=ignore_duplicate_updates)
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return results
def single(queryset):
"""
Assumes that this model only has one element in the table and returns it.
If the table has more than one or no value, an exception is raised.
:returns: The only model object in the queryset.
:raises: :class:`DoesNotExist <django:django.core.exceptions.ObjectDoesNotExist>`
error when the object does not exist or a
:class:`MultipleObjectsReturned <django:django.core.exceptions.MultipleObjectsReturned>`
error when thereis more than one object.
Examples:
.. code-block:: python
TestModel.objects.create(int_field=1)
model_obj = single(TestModel.objects)
print(model_obj.int_field)
1
"""
return queryset.get()
def bulk_update(manager, model_objs, fields_to_update):
"""
Bulk updates a list of model objects that are already saved.
:type model_objs: list of :class:`Models<django:django.db.models.Model>`
:param model_objs: A list of model objects that have been updated.
fields_to_update: A list of fields to be updated. Only these fields will be updated
:signals: Emits a post_bulk_operation signal when completed.
Examples:
.. code-block:: python
# Create a couple test models
model_obj1 = TestModel.objects.create(int_field=1, float_field=2.0, char_field='Hi')
model_obj2 = TestModel.objects.create(int_field=3, float_field=4.0, char_field='Hello')
# Change their fields and do a bulk update
model_obj1.int_field = 10
model_obj1.float_field = 20.0
model_obj2.int_field = 30
model_obj2.float_field = 40.0
bulk_update(TestModel.objects, [model_obj1, model_obj2], ['int_field', 'float_field'])
# Reload the models and view their changes
model_obj1 = TestModel.objects.get(id=model_obj1.id)
print(model_obj1.int_field, model_obj1.float_field)
10, 20.0
model_obj2 = TestModel.objects.get(id=model_obj2.id)
print(model_obj2.int_field, model_obj2.float_field)
10, 20.0
"""
# Add the pk to the value fields so we can join
value_fields = [manager.model._meta.pk.attname] + fields_to_update
# Build the row values
row_values = [
[_get_prepped_model_field(model_obj, field_name) for field_name in value_fields]
for model_obj in model_objs
]
# If we do not have any values or fields to update just return
if len(row_values) == 0 or len(fields_to_update) == 0:
return
# Create a map of db types
db_types = [
manager.model._meta.get_field(field).db_type(connection)
for field in value_fields
]
# Build the value fields sql
value_fields_sql = ', '.join(
'"{field}"'.format(field=manager.model._meta.get_field(field).column)
for field in value_fields
)
# Build the set sql
update_fields_sql = ', '.join([
'"{field}" = "new_values"."{field}"'.format(
field=manager.model._meta.get_field(field).column
)
for field in fields_to_update
])
# Build the values sql
values_sql = ', '.join([
'({0})'.format(
', '.join([
'%s::{0}'.format(
db_types[i]
) if not row_number and i else '%s'
for i, _ in enumerate(row)
])
)
for row_number, row in enumerate(row_values)
])
# Start building the query
update_sql = (
'UPDATE {table} '
'SET {update_fields_sql} '
'FROM (VALUES {values_sql}) AS new_values ({value_fields_sql}) '
'WHERE "{table}"."{pk_field}" = "new_values"."{pk_field}"'
).format(
table=manager.model._meta.db_table,
pk_field=manager.model._meta.pk.column,
update_fields_sql=update_fields_sql,
values_sql=values_sql,
value_fields_sql=value_fields_sql
)
# Combine all the row values
update_sql_params = list(itertools.chain(*row_values))
# Run the update query
with connection.cursor() as cursor:
cursor.execute(update_sql, update_sql_params)
# call the bulk operation signal
post_bulk_operation.send(sender=manager.model, model=manager.model)
def upsert(manager, defaults=None, updates=None, **kwargs):
"""
Performs an update on an object or an insert if the object does not exist.
:type defaults: dict
:param defaults: These values are set when the object is created, but are irrelevant
when the object already exists. This field should only be used when values only need to
be set during creation.
:type updates: dict
:param updates: These values are updated when the object is updated. They also override any
values provided in the defaults when inserting the object.
:param kwargs: These values provide the arguments used when checking for the existence of
the object. They are used in a similar manner to Django's get_or_create function.
:returns: A tuple of the upserted object and a Boolean that is True if it was created (False otherwise)
Examples:
.. code-block:: python
# Upsert a test model with an int value of 1. Use default values that will be given to it when created
model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 2.0})
print(created)
True
print(model_obj.int_field, model_obj.float_field)
1, 2.0
# Do an upsert on that same model with different default fields. Since it already exists, the defaults
# are not used
model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 3.0})
print(created)
False
print(model_obj.int_field, model_obj.float_field)
1, 2.0
# In order to update the float field in an existing object, use the updates dictionary
model_obj, created = upsert(TestModel.objects, int_field=1, updates={'float_field': 3.0})
print(created)
False
print(model_obj.int_field, model_obj.float_field)
1, 3.0
# You can use updates on a newly created object that will also be used as initial values.
model_obj, created = upsert(TestModel.objects, int_field=2, updates={'float_field': 4.0})
print(created)
True
print(model_obj.int_field, model_obj.float_field)
2, 4.0
"""
defaults = defaults or {}
# Override any defaults with updates
defaults.update(updates or {})
# Do a get or create
obj, created = manager.get_or_create(defaults=defaults, **kwargs)
# Update any necessary fields
if updates is not None and not created and any(getattr(obj, k) != updates[k] for k in updates):
for k, v in updates.items():
setattr(obj, k, v)
obj.save(update_fields=updates)
return obj, created
class ManagerUtilsQuerySet(QuerySet):
"""
Defines the methods in the manager utils that can also be applied to querysets.
"""
def id_dict(self):
return id_dict(self)
def bulk_upsert(self, model_objs, unique_fields, update_fields=None, return_upserts=False, native=False):
return bulk_upsert(
self, model_objs, unique_fields, update_fields=update_fields, return_upserts=return_upserts, native=native
)
def bulk_upsert2(self, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False):
return bulk_upsert2(self, model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
def bulk_create(self, *args, **kwargs):
"""
Overrides Django's bulk_create function to emit a post_bulk_operation signal when bulk_create
is finished.
"""
ret_val = super(ManagerUtilsQuerySet, self).bulk_create(*args, **kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val
def sync(self, model_objs, unique_fields, update_fields=None, native=False):
return sync(self, model_objs, unique_fields, update_fields=update_fields, native=native)
def sync2(self, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
return sync2(self, model_objs, unique_fields, update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates)
def get_or_none(self, **query_params):
return get_or_none(self, **query_params)
def single(self):
return single(self)
def update(self, **kwargs):
"""
Overrides Django's update method to emit a post_bulk_operation signal when it completes.
"""
ret_val = super(ManagerUtilsQuerySet, self).update(**kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val
class ManagerUtilsMixin(object):
"""
A mixin that can be used by django model managers. It provides additional functionality on top
of the regular Django Manager class.
"""
def get_queryset(self):
return ManagerUtilsQuerySet(self.model)
def id_dict(self):
return id_dict(self.get_queryset())
def bulk_upsert(
self, model_objs, unique_fields, update_fields=None, return_upserts=False, return_upserts_distinct=False,
native=False):
return bulk_upsert(
self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, return_upserts=return_upserts,
return_upserts_distinct=return_upserts_distinct, native=native)
def bulk_upsert2(self, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False):
return bulk_upsert2(
self.get_queryset(), model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
def sync(self, model_objs, unique_fields, update_fields=None, native=False):
return sync(self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, native=native)
def sync2(self, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
return sync2(
self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates)
def bulk_update(self, model_objs, fields_to_update):
return bulk_update(self.get_queryset(), model_objs, fields_to_update)
def upsert(self, defaults=None, updates=None, **kwargs):
return upsert(self.get_queryset(), defaults=defaults, updates=updates, **kwargs)
def get_or_none(self, **query_params):
return get_or_none(self.get_queryset(), **query_params)
def single(self):
return single(self.get_queryset())
class ManagerUtilsManager(ManagerUtilsMixin, Manager):
"""
A class that can be used as a manager. It already inherits the Django Manager class and adds
the mixin.
"""
pass
|
ambitioninc/django-manager-utils | manager_utils/manager_utils.py | bulk_update | python | def bulk_update(manager, model_objs, fields_to_update):
# Add the pk to the value fields so we can join
value_fields = [manager.model._meta.pk.attname] + fields_to_update
# Build the row values
row_values = [
[_get_prepped_model_field(model_obj, field_name) for field_name in value_fields]
for model_obj in model_objs
]
# If we do not have any values or fields to update just return
if len(row_values) == 0 or len(fields_to_update) == 0:
return
# Create a map of db types
db_types = [
manager.model._meta.get_field(field).db_type(connection)
for field in value_fields
]
# Build the value fields sql
value_fields_sql = ', '.join(
'"{field}"'.format(field=manager.model._meta.get_field(field).column)
for field in value_fields
)
# Build the set sql
update_fields_sql = ', '.join([
'"{field}" = "new_values"."{field}"'.format(
field=manager.model._meta.get_field(field).column
)
for field in fields_to_update
])
# Build the values sql
values_sql = ', '.join([
'({0})'.format(
', '.join([
'%s::{0}'.format(
db_types[i]
) if not row_number and i else '%s'
for i, _ in enumerate(row)
])
)
for row_number, row in enumerate(row_values)
])
# Start building the query
update_sql = (
'UPDATE {table} '
'SET {update_fields_sql} '
'FROM (VALUES {values_sql}) AS new_values ({value_fields_sql}) '
'WHERE "{table}"."{pk_field}" = "new_values"."{pk_field}"'
).format(
table=manager.model._meta.db_table,
pk_field=manager.model._meta.pk.column,
update_fields_sql=update_fields_sql,
values_sql=values_sql,
value_fields_sql=value_fields_sql
)
# Combine all the row values
update_sql_params = list(itertools.chain(*row_values))
# Run the update query
with connection.cursor() as cursor:
cursor.execute(update_sql, update_sql_params)
# call the bulk operation signal
post_bulk_operation.send(sender=manager.model, model=manager.model) | Bulk updates a list of model objects that are already saved.
:type model_objs: list of :class:`Models<django:django.db.models.Model>`
:param model_objs: A list of model objects that have been updated.
fields_to_update: A list of fields to be updated. Only these fields will be updated
:signals: Emits a post_bulk_operation signal when completed.
Examples:
.. code-block:: python
# Create a couple test models
model_obj1 = TestModel.objects.create(int_field=1, float_field=2.0, char_field='Hi')
model_obj2 = TestModel.objects.create(int_field=3, float_field=4.0, char_field='Hello')
# Change their fields and do a bulk update
model_obj1.int_field = 10
model_obj1.float_field = 20.0
model_obj2.int_field = 30
model_obj2.float_field = 40.0
bulk_update(TestModel.objects, [model_obj1, model_obj2], ['int_field', 'float_field'])
# Reload the models and view their changes
model_obj1 = TestModel.objects.get(id=model_obj1.id)
print(model_obj1.int_field, model_obj1.float_field)
10, 20.0
model_obj2 = TestModel.objects.get(id=model_obj2.id)
print(model_obj2.int_field, model_obj2.float_field)
10, 20.0 | train | https://github.com/ambitioninc/django-manager-utils/blob/1f111cb4846ed6cd6b78eca320a9dcc27826bf97/manager_utils/manager_utils.py#L491-L596 | null | import itertools
from django.db import connection
from django.db.models import Manager
from django.db.models.query import QuerySet
from django.dispatch import Signal
from querybuilder.query import Query
from . import upsert2
# A signal that is emitted when any bulk operation occurs
post_bulk_operation = Signal(providing_args=['model'])
def id_dict(queryset):
"""
Returns a dictionary of all the objects keyed on their ID.
:rtype: dict
:returns: A dictionary of objects from the queryset or manager that is keyed
on the objects' IDs.
Examples:
.. code-block:: python
TestModel.objects.create(int_field=1)
TestModel.objects.create(int_field=2)
print(id_dict(TestModel.objects.all()))
"""
return {obj.pk: obj for obj in queryset}
def _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields):
"""
Given a list of model objects that were updated and model objects that were created,
fetch the pks of the newly created models and return the two lists in a tuple
"""
# Keep track of the created models
created_models = []
# If we created new models query for them
if model_objs_created:
created_models.extend(
queryset.extra(
where=['({unique_fields_sql}) in %s'.format(
unique_fields_sql=', '.join(unique_fields)
)],
params=[
tuple([
tuple([
getattr(model_obj, field)
for field in unique_fields
])
for model_obj in model_objs_created
])
]
)
)
# Return the models
return model_objs_updated, created_models
def _get_upserts(queryset, model_objs_updated, model_objs_created, unique_fields):
"""
Given a list of model objects that were updated and model objects that were created,
return the list of all model objects upserted. Doing this requires fetching all of
the models created with bulk create (since django can't return bulk_create pks)
"""
updated, created = _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields)
return updated + created
def _get_model_objs_to_update_and_create(model_objs, unique_fields, update_fields, extant_model_objs):
"""
Used by bulk_upsert to gather lists of models that should be updated and created.
"""
# Find all of the objects to update and all of the objects to create
model_objs_to_update, model_objs_to_create = list(), list()
for model_obj in model_objs:
extant_model_obj = extant_model_objs.get(tuple(getattr(model_obj, field) for field in unique_fields), None)
if extant_model_obj is None:
# If the object needs to be created, make a new instance of it
model_objs_to_create.append(model_obj)
else:
# If the object needs to be updated, update its fields
for field in update_fields:
setattr(extant_model_obj, field, getattr(model_obj, field))
model_objs_to_update.append(extant_model_obj)
return model_objs_to_update, model_objs_to_create
def _get_prepped_model_field(model_obj, field):
"""
Gets the value of a field of a model obj that is prepared for the db.
"""
# Get the field
field = model_obj._meta.get_field(field)
# Get the value
value = field.get_db_prep_save(getattr(model_obj, field.attname), connection)
# Return the value
return value
def bulk_upsert(
queryset, model_objs, unique_fields, update_fields=None, return_upserts=False, return_upserts_distinct=False,
sync=False, native=False
):
"""
Performs a bulk update or insert on a list of model objects. Matches all objects in the queryset
with the objs provided using the field values in unique_fields.
If an existing object is matched, it is updated with the values from the provided objects. Objects
that don't match anything are bulk created.
A user can provide a list update_fields so that any changed values on those fields will be updated.
However, if update_fields is not provided, this function reduces down to performing a bulk_create
on any non extant objects.
:type model_objs: list of dict
:param model_objs: A list of dictionaries that have fields corresponding to the model in the manager.
:type unique_fields: list of str
:param unique_fields: A list of fields that are used to determine if an object in objs matches a model
from the queryset.
:type update_fields: list of str
:param update_fields: A list of fields used from the objects in objs as fields when updating existing
models. If None, this function will only perform a bulk create for model_objs that do not
currently exist in the database.
:type return_upserts_distinct: bool
:param return_upserts_distinct: A flag specifying whether to return the upserted values as a list of distinct lists,
one containing the updated models and the other containing the new models. If True, this performs an
additional query to fetch any bulk created values.
:type return_upserts: bool
:param return_upserts: A flag specifying whether to return the upserted values. If True, this performs
an additional query to fetch any bulk created values.
:type sync: bool
:param sync: A flag specifying whether a sync operation should be applied to the bulk_upsert. If this
is True, all values in the queryset that were not updated will be deleted such that the
entire list of model objects is synced to the queryset.
:type native: bool
:param native: A flag specifying whether to use postgres insert on conflict (upsert).
:signals: Emits a post_bulk_operation when a bulk_update or a bulk_create occurs.
Examples:
.. code-block:: python
# Start off with no objects in the database. Call a bulk_upsert on the TestModel, which includes
# a char_field, int_field, and float_field
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
], ['int_field'], ['char_field'])
# All objects should have been created
print(TestModel.objects.count())
3
# Now perform a bulk upsert on all the char_field values. Since the objects existed previously
# (known by the int_field uniqueness constraint), the char fields should be updated
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='0', int_field=1),
TestModel(float_field=2.0, char_field='0', int_field=2),
TestModel(float_field=3.0, char_field='0', int_field=3),
], ['int_field'], ['char_field'])
# No more new objects should have been created, and every char field should be 0
print(TestModel.objects.count(), TestModel.objects.filter(char_field='-1').count())
3, 3
# Do the exact same operation, but this time add an additional object that is not already
# stored. It will be created.
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be one more object
print(TestModel.objects.count())
4
# Note that one can also do the upsert on a queryset. Perform the same data upsert on a
# filter for int_field=1. In this case, only one object has the ability to be updated.
# All of the other objects will be created
bulk_upsert(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be three more objects
print(TestModel.objects.count())
7
"""
if not unique_fields:
raise ValueError('Must provide unique_fields argument')
update_fields = update_fields or []
if native:
if return_upserts_distinct:
raise NotImplementedError('return upserts distinct not supported with native postgres upsert')
return_value = Query().from_table(table=queryset.model).upsert(
model_objs, unique_fields, update_fields, return_models=return_upserts or sync
) or []
if sync:
orig_ids = frozenset(queryset.values_list('pk', flat=True))
queryset.filter(pk__in=orig_ids - frozenset([m.pk for m in return_value])).delete()
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return return_value
# Create a look up table for all of the objects in the queryset keyed on the unique_fields
extant_model_objs = {
tuple(getattr(extant_model_obj, field) for field in unique_fields): extant_model_obj
for extant_model_obj in queryset
}
# Find all of the objects to update and all of the objects to create
model_objs_to_update, model_objs_to_create = _get_model_objs_to_update_and_create(
model_objs, unique_fields, update_fields, extant_model_objs)
# Find all objects in the queryset that will not be updated. These will be deleted if the sync option is
# True
if sync:
model_objs_to_update_set = frozenset(model_objs_to_update)
model_objs_to_delete = [
model_obj.pk for model_obj in extant_model_objs.values() if model_obj not in model_objs_to_update_set
]
if model_objs_to_delete:
queryset.filter(pk__in=model_objs_to_delete).delete()
# Apply bulk updates and creates
if update_fields:
bulk_update(queryset, model_objs_to_update, update_fields)
queryset.bulk_create(model_objs_to_create)
# Optionally return the bulk upserted values
if return_upserts_distinct:
# return a list of lists, the first being the updated models, the second being the newly created objects
return _get_upserts_distinct(queryset, model_objs_to_update, model_objs_to_create, unique_fields)
if return_upserts:
return _get_upserts(queryset, model_objs_to_update, model_objs_to_create, unique_fields)
def bulk_upsert2(
queryset, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False
):
"""
Performs a bulk update or insert on a list of model objects. Matches all objects in the queryset
with the objs provided using the field values in unique_fields.
If an existing object is matched, it is updated with the values from the provided objects. Objects
that don't match anything are bulk created.
A user can provide a list update_fields so that any changed values on those fields will be updated.
However, if update_fields is not provided, this function reduces down to performing a bulk_create
on any non extant objects.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If ``None``, all fields will be updated.
returning (bool|List[str]): If ``True``, returns all fields. If a list, only returns
fields in the list. Return values are split in a tuple of created and updated models
ignore_duplicate_updates (bool, default=False): Ignore updating a row in the upsert if all of the update fields
are duplicates
return_untouched (bool, default=False): Return values that were not touched by the upsert operation
Returns:
UpsertResult: A list of results if ``returning`` is not ``False``. created, updated, and untouched,
results can be obtained by accessing the ``created``, ``updated``, and ``untouched`` properties
of the result.
Examples:
.. code-block:: python
# Start off with no objects in the database. Call a bulk_upsert on the TestModel, which includes
# a char_field, int_field, and float_field
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
], ['int_field'], ['char_field'])
# All objects should have been created
print(TestModel.objects.count())
3
# Now perform a bulk upsert on all the char_field values. Since the objects existed previously
# (known by the int_field uniqueness constraint), the char fields should be updated
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='0', int_field=1),
TestModel(float_field=2.0, char_field='0', int_field=2),
TestModel(float_field=3.0, char_field='0', int_field=3),
], ['int_field'], ['char_field'])
# No more new objects should have been created, and every char field should be 0
print(TestModel.objects.count(), TestModel.objects.filter(char_field='-1').count())
3, 3
# Do the exact same operation, but this time add an additional object that is not already
# stored. It will be created.
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be one more object
print(TestModel.objects.count())
4
# Note that one can also do the upsert on a queryset. Perform the same data upsert on a
# filter for int_field=1. In this case, only one object has the ability to be updated.
# All of the other objects will be created
bulk_upsert2(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be three more objects
print(TestModel.objects.count())
7
# Return creates and updates on the same set of models
created, updated = bulk_upsert2(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# All four objects should be updated
print(len(updated))
4
"""
results = upsert2.upsert(queryset, model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return results
def sync(queryset, model_objs, unique_fields, update_fields=None, **kwargs):
"""
Performs a sync operation on a queryset, making the contents of the
queryset match the contents of model_objs.
This function calls bulk_upsert underneath the hood with sync=True.
:type model_objs: list of :class:`Models<django:django.db.models.Model>`
:param model_objs: The models to sync
:type update_fields: list of str
:param unique_fields: A list of fields that are used to determine if an
object in objs matches a model from the queryset.
:type update_fields: list of str
:param update_fields: A list of fields used from the objects in objs as fields when updating existing
models. If None, this function will only perform a bulk create for model_objs that do not
currently exist in the database.
:type native: bool
:param native: A flag specifying whether to use postgres insert on conflict (upsert) when performing
bulk upsert.
"""
return bulk_upsert(queryset, model_objs, unique_fields, update_fields=update_fields, sync=True, **kwargs)
def sync2(queryset, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
"""
Performs a sync operation on a queryset, making the contents of the
queryset match the contents of model_objs.
Note: The definition of a sync requires that we return untouched rows from the upsert opertion. There is
no way to turn off returning untouched rows in a sync.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If `None`, all fields will be updated.
returning (bool|List[str]): If True, returns all fields. If a list, only returns
fields in the list. Return values are split in a tuple of created, updated, and
deleted models.
ignore_duplicate_updates (bool, default=False): Ignore updating a row in the upsert if all
of the update fields are duplicates
Returns:
UpsertResult: A list of results if ``returning`` is not ``False``. created, updated, untouched,
and deleted results can be obtained by accessing the ``created``, ``updated``, ``untouched``,
and ``deleted`` properties of the result.
"""
results = upsert2.upsert(queryset, model_objs, unique_fields,
update_fields=update_fields, returning=returning, sync=True,
ignore_duplicate_updates=ignore_duplicate_updates)
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return results
def get_or_none(queryset, **query_params):
"""
Get an object or return None if it doesn't exist.
:param query_params: The query parameters used in the lookup.
:returns: A model object if one exists with the query params, None otherwise.
Examples:
.. code-block:: python
model_obj = get_or_none(TestModel.objects, int_field=1)
print(model_obj)
None
TestModel.objects.create(int_field=1)
model_obj = get_or_none(TestModel.objects, int_field=1)
print(model_obj.int_field)
1
"""
try:
obj = queryset.get(**query_params)
except queryset.model.DoesNotExist:
obj = None
return obj
def single(queryset):
"""
Assumes that this model only has one element in the table and returns it.
If the table has more than one or no value, an exception is raised.
:returns: The only model object in the queryset.
:raises: :class:`DoesNotExist <django:django.core.exceptions.ObjectDoesNotExist>`
error when the object does not exist or a
:class:`MultipleObjectsReturned <django:django.core.exceptions.MultipleObjectsReturned>`
error when thereis more than one object.
Examples:
.. code-block:: python
TestModel.objects.create(int_field=1)
model_obj = single(TestModel.objects)
print(model_obj.int_field)
1
"""
return queryset.get()
def upsert(manager, defaults=None, updates=None, **kwargs):
"""
Performs an update on an object or an insert if the object does not exist.
:type defaults: dict
:param defaults: These values are set when the object is created, but are irrelevant
when the object already exists. This field should only be used when values only need to
be set during creation.
:type updates: dict
:param updates: These values are updated when the object is updated. They also override any
values provided in the defaults when inserting the object.
:param kwargs: These values provide the arguments used when checking for the existence of
the object. They are used in a similar manner to Django's get_or_create function.
:returns: A tuple of the upserted object and a Boolean that is True if it was created (False otherwise)
Examples:
.. code-block:: python
# Upsert a test model with an int value of 1. Use default values that will be given to it when created
model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 2.0})
print(created)
True
print(model_obj.int_field, model_obj.float_field)
1, 2.0
# Do an upsert on that same model with different default fields. Since it already exists, the defaults
# are not used
model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 3.0})
print(created)
False
print(model_obj.int_field, model_obj.float_field)
1, 2.0
# In order to update the float field in an existing object, use the updates dictionary
model_obj, created = upsert(TestModel.objects, int_field=1, updates={'float_field': 3.0})
print(created)
False
print(model_obj.int_field, model_obj.float_field)
1, 3.0
# You can use updates on a newly created object that will also be used as initial values.
model_obj, created = upsert(TestModel.objects, int_field=2, updates={'float_field': 4.0})
print(created)
True
print(model_obj.int_field, model_obj.float_field)
2, 4.0
"""
defaults = defaults or {}
# Override any defaults with updates
defaults.update(updates or {})
# Do a get or create
obj, created = manager.get_or_create(defaults=defaults, **kwargs)
# Update any necessary fields
if updates is not None and not created and any(getattr(obj, k) != updates[k] for k in updates):
for k, v in updates.items():
setattr(obj, k, v)
obj.save(update_fields=updates)
return obj, created
class ManagerUtilsQuerySet(QuerySet):
"""
Defines the methods in the manager utils that can also be applied to querysets.
"""
def id_dict(self):
return id_dict(self)
def bulk_upsert(self, model_objs, unique_fields, update_fields=None, return_upserts=False, native=False):
return bulk_upsert(
self, model_objs, unique_fields, update_fields=update_fields, return_upserts=return_upserts, native=native
)
def bulk_upsert2(self, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False):
return bulk_upsert2(self, model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
def bulk_create(self, *args, **kwargs):
"""
Overrides Django's bulk_create function to emit a post_bulk_operation signal when bulk_create
is finished.
"""
ret_val = super(ManagerUtilsQuerySet, self).bulk_create(*args, **kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val
def sync(self, model_objs, unique_fields, update_fields=None, native=False):
return sync(self, model_objs, unique_fields, update_fields=update_fields, native=native)
def sync2(self, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
return sync2(self, model_objs, unique_fields, update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates)
def get_or_none(self, **query_params):
return get_or_none(self, **query_params)
def single(self):
return single(self)
def update(self, **kwargs):
"""
Overrides Django's update method to emit a post_bulk_operation signal when it completes.
"""
ret_val = super(ManagerUtilsQuerySet, self).update(**kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val
class ManagerUtilsMixin(object):
"""
A mixin that can be used by django model managers. It provides additional functionality on top
of the regular Django Manager class.
"""
def get_queryset(self):
return ManagerUtilsQuerySet(self.model)
def id_dict(self):
return id_dict(self.get_queryset())
def bulk_upsert(
self, model_objs, unique_fields, update_fields=None, return_upserts=False, return_upserts_distinct=False,
native=False):
return bulk_upsert(
self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, return_upserts=return_upserts,
return_upserts_distinct=return_upserts_distinct, native=native)
def bulk_upsert2(self, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False):
return bulk_upsert2(
self.get_queryset(), model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
def sync(self, model_objs, unique_fields, update_fields=None, native=False):
return sync(self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, native=native)
def sync2(self, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
return sync2(
self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates)
def bulk_update(self, model_objs, fields_to_update):
return bulk_update(self.get_queryset(), model_objs, fields_to_update)
def upsert(self, defaults=None, updates=None, **kwargs):
return upsert(self.get_queryset(), defaults=defaults, updates=updates, **kwargs)
def get_or_none(self, **query_params):
return get_or_none(self.get_queryset(), **query_params)
def single(self):
return single(self.get_queryset())
class ManagerUtilsManager(ManagerUtilsMixin, Manager):
"""
A class that can be used as a manager. It already inherits the Django Manager class and adds
the mixin.
"""
pass
|
ambitioninc/django-manager-utils | manager_utils/manager_utils.py | upsert | python | def upsert(manager, defaults=None, updates=None, **kwargs):
defaults = defaults or {}
# Override any defaults with updates
defaults.update(updates or {})
# Do a get or create
obj, created = manager.get_or_create(defaults=defaults, **kwargs)
# Update any necessary fields
if updates is not None and not created and any(getattr(obj, k) != updates[k] for k in updates):
for k, v in updates.items():
setattr(obj, k, v)
obj.save(update_fields=updates)
return obj, created | Performs an update on an object or an insert if the object does not exist.
:type defaults: dict
:param defaults: These values are set when the object is created, but are irrelevant
when the object already exists. This field should only be used when values only need to
be set during creation.
:type updates: dict
:param updates: These values are updated when the object is updated. They also override any
values provided in the defaults when inserting the object.
:param kwargs: These values provide the arguments used when checking for the existence of
the object. They are used in a similar manner to Django's get_or_create function.
:returns: A tuple of the upserted object and a Boolean that is True if it was created (False otherwise)
Examples:
.. code-block:: python
# Upsert a test model with an int value of 1. Use default values that will be given to it when created
model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 2.0})
print(created)
True
print(model_obj.int_field, model_obj.float_field)
1, 2.0
# Do an upsert on that same model with different default fields. Since it already exists, the defaults
# are not used
model_obj, created = upsert(TestModel.objects, int_field=1, defaults={'float_field': 3.0})
print(created)
False
print(model_obj.int_field, model_obj.float_field)
1, 2.0
# In order to update the float field in an existing object, use the updates dictionary
model_obj, created = upsert(TestModel.objects, int_field=1, updates={'float_field': 3.0})
print(created)
False
print(model_obj.int_field, model_obj.float_field)
1, 3.0
# You can use updates on a newly created object that will also be used as initial values.
model_obj, created = upsert(TestModel.objects, int_field=2, updates={'float_field': 4.0})
print(created)
True
print(model_obj.int_field, model_obj.float_field)
2, 4.0 | train | https://github.com/ambitioninc/django-manager-utils/blob/1f111cb4846ed6cd6b78eca320a9dcc27826bf97/manager_utils/manager_utils.py#L599-L664 | null | import itertools
from django.db import connection
from django.db.models import Manager
from django.db.models.query import QuerySet
from django.dispatch import Signal
from querybuilder.query import Query
from . import upsert2
# A signal that is emitted when any bulk operation occurs
post_bulk_operation = Signal(providing_args=['model'])
def id_dict(queryset):
"""
Returns a dictionary of all the objects keyed on their ID.
:rtype: dict
:returns: A dictionary of objects from the queryset or manager that is keyed
on the objects' IDs.
Examples:
.. code-block:: python
TestModel.objects.create(int_field=1)
TestModel.objects.create(int_field=2)
print(id_dict(TestModel.objects.all()))
"""
return {obj.pk: obj for obj in queryset}
def _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields):
"""
Given a list of model objects that were updated and model objects that were created,
fetch the pks of the newly created models and return the two lists in a tuple
"""
# Keep track of the created models
created_models = []
# If we created new models query for them
if model_objs_created:
created_models.extend(
queryset.extra(
where=['({unique_fields_sql}) in %s'.format(
unique_fields_sql=', '.join(unique_fields)
)],
params=[
tuple([
tuple([
getattr(model_obj, field)
for field in unique_fields
])
for model_obj in model_objs_created
])
]
)
)
# Return the models
return model_objs_updated, created_models
def _get_upserts(queryset, model_objs_updated, model_objs_created, unique_fields):
"""
Given a list of model objects that were updated and model objects that were created,
return the list of all model objects upserted. Doing this requires fetching all of
the models created with bulk create (since django can't return bulk_create pks)
"""
updated, created = _get_upserts_distinct(queryset, model_objs_updated, model_objs_created, unique_fields)
return updated + created
def _get_model_objs_to_update_and_create(model_objs, unique_fields, update_fields, extant_model_objs):
"""
Used by bulk_upsert to gather lists of models that should be updated and created.
"""
# Find all of the objects to update and all of the objects to create
model_objs_to_update, model_objs_to_create = list(), list()
for model_obj in model_objs:
extant_model_obj = extant_model_objs.get(tuple(getattr(model_obj, field) for field in unique_fields), None)
if extant_model_obj is None:
# If the object needs to be created, make a new instance of it
model_objs_to_create.append(model_obj)
else:
# If the object needs to be updated, update its fields
for field in update_fields:
setattr(extant_model_obj, field, getattr(model_obj, field))
model_objs_to_update.append(extant_model_obj)
return model_objs_to_update, model_objs_to_create
def _get_prepped_model_field(model_obj, field):
"""
Gets the value of a field of a model obj that is prepared for the db.
"""
# Get the field
field = model_obj._meta.get_field(field)
# Get the value
value = field.get_db_prep_save(getattr(model_obj, field.attname), connection)
# Return the value
return value
def bulk_upsert(
queryset, model_objs, unique_fields, update_fields=None, return_upserts=False, return_upserts_distinct=False,
sync=False, native=False
):
"""
Performs a bulk update or insert on a list of model objects. Matches all objects in the queryset
with the objs provided using the field values in unique_fields.
If an existing object is matched, it is updated with the values from the provided objects. Objects
that don't match anything are bulk created.
A user can provide a list update_fields so that any changed values on those fields will be updated.
However, if update_fields is not provided, this function reduces down to performing a bulk_create
on any non extant objects.
:type model_objs: list of dict
:param model_objs: A list of dictionaries that have fields corresponding to the model in the manager.
:type unique_fields: list of str
:param unique_fields: A list of fields that are used to determine if an object in objs matches a model
from the queryset.
:type update_fields: list of str
:param update_fields: A list of fields used from the objects in objs as fields when updating existing
models. If None, this function will only perform a bulk create for model_objs that do not
currently exist in the database.
:type return_upserts_distinct: bool
:param return_upserts_distinct: A flag specifying whether to return the upserted values as a list of distinct lists,
one containing the updated models and the other containing the new models. If True, this performs an
additional query to fetch any bulk created values.
:type return_upserts: bool
:param return_upserts: A flag specifying whether to return the upserted values. If True, this performs
an additional query to fetch any bulk created values.
:type sync: bool
:param sync: A flag specifying whether a sync operation should be applied to the bulk_upsert. If this
is True, all values in the queryset that were not updated will be deleted such that the
entire list of model objects is synced to the queryset.
:type native: bool
:param native: A flag specifying whether to use postgres insert on conflict (upsert).
:signals: Emits a post_bulk_operation when a bulk_update or a bulk_create occurs.
Examples:
.. code-block:: python
# Start off with no objects in the database. Call a bulk_upsert on the TestModel, which includes
# a char_field, int_field, and float_field
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
], ['int_field'], ['char_field'])
# All objects should have been created
print(TestModel.objects.count())
3
# Now perform a bulk upsert on all the char_field values. Since the objects existed previously
# (known by the int_field uniqueness constraint), the char fields should be updated
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='0', int_field=1),
TestModel(float_field=2.0, char_field='0', int_field=2),
TestModel(float_field=3.0, char_field='0', int_field=3),
], ['int_field'], ['char_field'])
# No more new objects should have been created, and every char field should be 0
print(TestModel.objects.count(), TestModel.objects.filter(char_field='-1').count())
3, 3
# Do the exact same operation, but this time add an additional object that is not already
# stored. It will be created.
bulk_upsert(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be one more object
print(TestModel.objects.count())
4
# Note that one can also do the upsert on a queryset. Perform the same data upsert on a
# filter for int_field=1. In this case, only one object has the ability to be updated.
# All of the other objects will be created
bulk_upsert(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be three more objects
print(TestModel.objects.count())
7
"""
if not unique_fields:
raise ValueError('Must provide unique_fields argument')
update_fields = update_fields or []
if native:
if return_upserts_distinct:
raise NotImplementedError('return upserts distinct not supported with native postgres upsert')
return_value = Query().from_table(table=queryset.model).upsert(
model_objs, unique_fields, update_fields, return_models=return_upserts or sync
) or []
if sync:
orig_ids = frozenset(queryset.values_list('pk', flat=True))
queryset.filter(pk__in=orig_ids - frozenset([m.pk for m in return_value])).delete()
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return return_value
# Create a look up table for all of the objects in the queryset keyed on the unique_fields
extant_model_objs = {
tuple(getattr(extant_model_obj, field) for field in unique_fields): extant_model_obj
for extant_model_obj in queryset
}
# Find all of the objects to update and all of the objects to create
model_objs_to_update, model_objs_to_create = _get_model_objs_to_update_and_create(
model_objs, unique_fields, update_fields, extant_model_objs)
# Find all objects in the queryset that will not be updated. These will be deleted if the sync option is
# True
if sync:
model_objs_to_update_set = frozenset(model_objs_to_update)
model_objs_to_delete = [
model_obj.pk for model_obj in extant_model_objs.values() if model_obj not in model_objs_to_update_set
]
if model_objs_to_delete:
queryset.filter(pk__in=model_objs_to_delete).delete()
# Apply bulk updates and creates
if update_fields:
bulk_update(queryset, model_objs_to_update, update_fields)
queryset.bulk_create(model_objs_to_create)
# Optionally return the bulk upserted values
if return_upserts_distinct:
# return a list of lists, the first being the updated models, the second being the newly created objects
return _get_upserts_distinct(queryset, model_objs_to_update, model_objs_to_create, unique_fields)
if return_upserts:
return _get_upserts(queryset, model_objs_to_update, model_objs_to_create, unique_fields)
def bulk_upsert2(
queryset, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False
):
"""
Performs a bulk update or insert on a list of model objects. Matches all objects in the queryset
with the objs provided using the field values in unique_fields.
If an existing object is matched, it is updated with the values from the provided objects. Objects
that don't match anything are bulk created.
A user can provide a list update_fields so that any changed values on those fields will be updated.
However, if update_fields is not provided, this function reduces down to performing a bulk_create
on any non extant objects.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If ``None``, all fields will be updated.
returning (bool|List[str]): If ``True``, returns all fields. If a list, only returns
fields in the list. Return values are split in a tuple of created and updated models
ignore_duplicate_updates (bool, default=False): Ignore updating a row in the upsert if all of the update fields
are duplicates
return_untouched (bool, default=False): Return values that were not touched by the upsert operation
Returns:
UpsertResult: A list of results if ``returning`` is not ``False``. created, updated, and untouched,
results can be obtained by accessing the ``created``, ``updated``, and ``untouched`` properties
of the result.
Examples:
.. code-block:: python
# Start off with no objects in the database. Call a bulk_upsert on the TestModel, which includes
# a char_field, int_field, and float_field
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
], ['int_field'], ['char_field'])
# All objects should have been created
print(TestModel.objects.count())
3
# Now perform a bulk upsert on all the char_field values. Since the objects existed previously
# (known by the int_field uniqueness constraint), the char fields should be updated
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='0', int_field=1),
TestModel(float_field=2.0, char_field='0', int_field=2),
TestModel(float_field=3.0, char_field='0', int_field=3),
], ['int_field'], ['char_field'])
# No more new objects should have been created, and every char field should be 0
print(TestModel.objects.count(), TestModel.objects.filter(char_field='-1').count())
3, 3
# Do the exact same operation, but this time add an additional object that is not already
# stored. It will be created.
bulk_upsert2(TestModel.objects.all(), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be one more object
print(TestModel.objects.count())
4
# Note that one can also do the upsert on a queryset. Perform the same data upsert on a
# filter for int_field=1. In this case, only one object has the ability to be updated.
# All of the other objects will be created
bulk_upsert2(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# There should be three more objects
print(TestModel.objects.count())
7
# Return creates and updates on the same set of models
created, updated = bulk_upsert2(TestModel.objects.filter(int_field=1), [
TestModel(float_field=1.0, char_field='1', int_field=1),
TestModel(float_field=2.0, char_field='2', int_field=2),
TestModel(float_field=3.0, char_field='3', int_field=3),
TestModel(float_field=4.0, char_field='4', int_field=4),
], ['int_field'], ['char_field'])
# All four objects should be updated
print(len(updated))
4
"""
results = upsert2.upsert(queryset, model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return results
def sync(queryset, model_objs, unique_fields, update_fields=None, **kwargs):
"""
Performs a sync operation on a queryset, making the contents of the
queryset match the contents of model_objs.
This function calls bulk_upsert underneath the hood with sync=True.
:type model_objs: list of :class:`Models<django:django.db.models.Model>`
:param model_objs: The models to sync
:type update_fields: list of str
:param unique_fields: A list of fields that are used to determine if an
object in objs matches a model from the queryset.
:type update_fields: list of str
:param update_fields: A list of fields used from the objects in objs as fields when updating existing
models. If None, this function will only perform a bulk create for model_objs that do not
currently exist in the database.
:type native: bool
:param native: A flag specifying whether to use postgres insert on conflict (upsert) when performing
bulk upsert.
"""
return bulk_upsert(queryset, model_objs, unique_fields, update_fields=update_fields, sync=True, **kwargs)
def sync2(queryset, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
"""
Performs a sync operation on a queryset, making the contents of the
queryset match the contents of model_objs.
Note: The definition of a sync requires that we return untouched rows from the upsert opertion. There is
no way to turn off returning untouched rows in a sync.
Args:
queryset (Model|QuerySet): A model or a queryset that defines the collection to sync
model_objs (List[Model]): A list of Django models to sync. All models in this list
will be bulk upserted and any models not in the table (or queryset) will be deleted
if sync=True.
unique_fields (List[str]): A list of fields that define the uniqueness of the model. The
model must have a unique constraint on these fields
update_fields (List[str], default=None): A list of fields to update whenever objects
already exist. If an empty list is provided, it is equivalent to doing a bulk
insert on the objects that don't exist. If `None`, all fields will be updated.
returning (bool|List[str]): If True, returns all fields. If a list, only returns
fields in the list. Return values are split in a tuple of created, updated, and
deleted models.
ignore_duplicate_updates (bool, default=False): Ignore updating a row in the upsert if all
of the update fields are duplicates
Returns:
UpsertResult: A list of results if ``returning`` is not ``False``. created, updated, untouched,
and deleted results can be obtained by accessing the ``created``, ``updated``, ``untouched``,
and ``deleted`` properties of the result.
"""
results = upsert2.upsert(queryset, model_objs, unique_fields,
update_fields=update_fields, returning=returning, sync=True,
ignore_duplicate_updates=ignore_duplicate_updates)
post_bulk_operation.send(sender=queryset.model, model=queryset.model)
return results
def get_or_none(queryset, **query_params):
"""
Get an object or return None if it doesn't exist.
:param query_params: The query parameters used in the lookup.
:returns: A model object if one exists with the query params, None otherwise.
Examples:
.. code-block:: python
model_obj = get_or_none(TestModel.objects, int_field=1)
print(model_obj)
None
TestModel.objects.create(int_field=1)
model_obj = get_or_none(TestModel.objects, int_field=1)
print(model_obj.int_field)
1
"""
try:
obj = queryset.get(**query_params)
except queryset.model.DoesNotExist:
obj = None
return obj
def single(queryset):
"""
Assumes that this model only has one element in the table and returns it.
If the table has more than one or no value, an exception is raised.
:returns: The only model object in the queryset.
:raises: :class:`DoesNotExist <django:django.core.exceptions.ObjectDoesNotExist>`
error when the object does not exist or a
:class:`MultipleObjectsReturned <django:django.core.exceptions.MultipleObjectsReturned>`
error when thereis more than one object.
Examples:
.. code-block:: python
TestModel.objects.create(int_field=1)
model_obj = single(TestModel.objects)
print(model_obj.int_field)
1
"""
return queryset.get()
def bulk_update(manager, model_objs, fields_to_update):
"""
Bulk updates a list of model objects that are already saved.
:type model_objs: list of :class:`Models<django:django.db.models.Model>`
:param model_objs: A list of model objects that have been updated.
fields_to_update: A list of fields to be updated. Only these fields will be updated
:signals: Emits a post_bulk_operation signal when completed.
Examples:
.. code-block:: python
# Create a couple test models
model_obj1 = TestModel.objects.create(int_field=1, float_field=2.0, char_field='Hi')
model_obj2 = TestModel.objects.create(int_field=3, float_field=4.0, char_field='Hello')
# Change their fields and do a bulk update
model_obj1.int_field = 10
model_obj1.float_field = 20.0
model_obj2.int_field = 30
model_obj2.float_field = 40.0
bulk_update(TestModel.objects, [model_obj1, model_obj2], ['int_field', 'float_field'])
# Reload the models and view their changes
model_obj1 = TestModel.objects.get(id=model_obj1.id)
print(model_obj1.int_field, model_obj1.float_field)
10, 20.0
model_obj2 = TestModel.objects.get(id=model_obj2.id)
print(model_obj2.int_field, model_obj2.float_field)
10, 20.0
"""
# Add the pk to the value fields so we can join
value_fields = [manager.model._meta.pk.attname] + fields_to_update
# Build the row values
row_values = [
[_get_prepped_model_field(model_obj, field_name) for field_name in value_fields]
for model_obj in model_objs
]
# If we do not have any values or fields to update just return
if len(row_values) == 0 or len(fields_to_update) == 0:
return
# Create a map of db types
db_types = [
manager.model._meta.get_field(field).db_type(connection)
for field in value_fields
]
# Build the value fields sql
value_fields_sql = ', '.join(
'"{field}"'.format(field=manager.model._meta.get_field(field).column)
for field in value_fields
)
# Build the set sql
update_fields_sql = ', '.join([
'"{field}" = "new_values"."{field}"'.format(
field=manager.model._meta.get_field(field).column
)
for field in fields_to_update
])
# Build the values sql
values_sql = ', '.join([
'({0})'.format(
', '.join([
'%s::{0}'.format(
db_types[i]
) if not row_number and i else '%s'
for i, _ in enumerate(row)
])
)
for row_number, row in enumerate(row_values)
])
# Start building the query
update_sql = (
'UPDATE {table} '
'SET {update_fields_sql} '
'FROM (VALUES {values_sql}) AS new_values ({value_fields_sql}) '
'WHERE "{table}"."{pk_field}" = "new_values"."{pk_field}"'
).format(
table=manager.model._meta.db_table,
pk_field=manager.model._meta.pk.column,
update_fields_sql=update_fields_sql,
values_sql=values_sql,
value_fields_sql=value_fields_sql
)
# Combine all the row values
update_sql_params = list(itertools.chain(*row_values))
# Run the update query
with connection.cursor() as cursor:
cursor.execute(update_sql, update_sql_params)
# call the bulk operation signal
post_bulk_operation.send(sender=manager.model, model=manager.model)
class ManagerUtilsQuerySet(QuerySet):
"""
Defines the methods in the manager utils that can also be applied to querysets.
"""
def id_dict(self):
return id_dict(self)
def bulk_upsert(self, model_objs, unique_fields, update_fields=None, return_upserts=False, native=False):
return bulk_upsert(
self, model_objs, unique_fields, update_fields=update_fields, return_upserts=return_upserts, native=native
)
def bulk_upsert2(self, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False):
return bulk_upsert2(self, model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
def bulk_create(self, *args, **kwargs):
"""
Overrides Django's bulk_create function to emit a post_bulk_operation signal when bulk_create
is finished.
"""
ret_val = super(ManagerUtilsQuerySet, self).bulk_create(*args, **kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val
def sync(self, model_objs, unique_fields, update_fields=None, native=False):
return sync(self, model_objs, unique_fields, update_fields=update_fields, native=native)
def sync2(self, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
return sync2(self, model_objs, unique_fields, update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates)
def get_or_none(self, **query_params):
return get_or_none(self, **query_params)
def single(self):
return single(self)
def update(self, **kwargs):
"""
Overrides Django's update method to emit a post_bulk_operation signal when it completes.
"""
ret_val = super(ManagerUtilsQuerySet, self).update(**kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val
class ManagerUtilsMixin(object):
"""
A mixin that can be used by django model managers. It provides additional functionality on top
of the regular Django Manager class.
"""
def get_queryset(self):
return ManagerUtilsQuerySet(self.model)
def id_dict(self):
return id_dict(self.get_queryset())
def bulk_upsert(
self, model_objs, unique_fields, update_fields=None, return_upserts=False, return_upserts_distinct=False,
native=False):
return bulk_upsert(
self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, return_upserts=return_upserts,
return_upserts_distinct=return_upserts_distinct, native=native)
def bulk_upsert2(self, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False):
return bulk_upsert2(
self.get_queryset(), model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
def sync(self, model_objs, unique_fields, update_fields=None, native=False):
return sync(self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, native=native)
def sync2(self, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
return sync2(
self.get_queryset(), model_objs, unique_fields, update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates)
def bulk_update(self, model_objs, fields_to_update):
return bulk_update(self.get_queryset(), model_objs, fields_to_update)
def upsert(self, defaults=None, updates=None, **kwargs):
return upsert(self.get_queryset(), defaults=defaults, updates=updates, **kwargs)
def get_or_none(self, **query_params):
return get_or_none(self.get_queryset(), **query_params)
def single(self):
return single(self.get_queryset())
class ManagerUtilsManager(ManagerUtilsMixin, Manager):
"""
A class that can be used as a manager. It already inherits the Django Manager class and adds
the mixin.
"""
pass
|
ambitioninc/django-manager-utils | manager_utils/manager_utils.py | ManagerUtilsQuerySet.bulk_create | python | def bulk_create(self, *args, **kwargs):
ret_val = super(ManagerUtilsQuerySet, self).bulk_create(*args, **kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val | Overrides Django's bulk_create function to emit a post_bulk_operation signal when bulk_create
is finished. | train | https://github.com/ambitioninc/django-manager-utils/blob/1f111cb4846ed6cd6b78eca320a9dcc27826bf97/manager_utils/manager_utils.py#L686-L693 | null | class ManagerUtilsQuerySet(QuerySet):
"""
Defines the methods in the manager utils that can also be applied to querysets.
"""
def id_dict(self):
return id_dict(self)
def bulk_upsert(self, model_objs, unique_fields, update_fields=None, return_upserts=False, native=False):
return bulk_upsert(
self, model_objs, unique_fields, update_fields=update_fields, return_upserts=return_upserts, native=native
)
def bulk_upsert2(self, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False):
return bulk_upsert2(self, model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
def sync(self, model_objs, unique_fields, update_fields=None, native=False):
return sync(self, model_objs, unique_fields, update_fields=update_fields, native=native)
def sync2(self, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
return sync2(self, model_objs, unique_fields, update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates)
def get_or_none(self, **query_params):
return get_or_none(self, **query_params)
def single(self):
return single(self)
def update(self, **kwargs):
"""
Overrides Django's update method to emit a post_bulk_operation signal when it completes.
"""
ret_val = super(ManagerUtilsQuerySet, self).update(**kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val
|
ambitioninc/django-manager-utils | manager_utils/manager_utils.py | ManagerUtilsQuerySet.update | python | def update(self, **kwargs):
ret_val = super(ManagerUtilsQuerySet, self).update(**kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val | Overrides Django's update method to emit a post_bulk_operation signal when it completes. | train | https://github.com/ambitioninc/django-manager-utils/blob/1f111cb4846ed6cd6b78eca320a9dcc27826bf97/manager_utils/manager_utils.py#L708-L714 | null | class ManagerUtilsQuerySet(QuerySet):
"""
Defines the methods in the manager utils that can also be applied to querysets.
"""
def id_dict(self):
return id_dict(self)
def bulk_upsert(self, model_objs, unique_fields, update_fields=None, return_upserts=False, native=False):
return bulk_upsert(
self, model_objs, unique_fields, update_fields=update_fields, return_upserts=return_upserts, native=native
)
def bulk_upsert2(self, model_objs, unique_fields, update_fields=None, returning=False,
ignore_duplicate_updates=True, return_untouched=False):
return bulk_upsert2(self, model_objs, unique_fields,
update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates,
return_untouched=return_untouched)
def bulk_create(self, *args, **kwargs):
"""
Overrides Django's bulk_create function to emit a post_bulk_operation signal when bulk_create
is finished.
"""
ret_val = super(ManagerUtilsQuerySet, self).bulk_create(*args, **kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val
def sync(self, model_objs, unique_fields, update_fields=None, native=False):
return sync(self, model_objs, unique_fields, update_fields=update_fields, native=native)
def sync2(self, model_objs, unique_fields, update_fields=None, returning=False, ignore_duplicate_updates=True):
return sync2(self, model_objs, unique_fields, update_fields=update_fields, returning=returning,
ignore_duplicate_updates=ignore_duplicate_updates)
def get_or_none(self, **query_params):
return get_or_none(self, **query_params)
def single(self):
return single(self)
|
exxeleron/qPython | qpython/qreader.py | QReader.read | python | def read(self, source = None, **options):
'''
Reads and optionally parses a single message.
:Parameters:
- `source` - optional data buffer to be read, if not specified data is
read from the wrapped stream
:Options:
- `raw` (`boolean`) - indicates whether read data should parsed or
returned in raw byte form
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
:returns: :class:`.QMessage` - read data (parsed or raw byte form) along
with meta information
'''
message = self.read_header(source)
message.data = self.read_data(message.size, message.is_compressed, **options)
return message | Reads and optionally parses a single message.
:Parameters:
- `source` - optional data buffer to be read, if not specified data is
read from the wrapped stream
:Options:
- `raw` (`boolean`) - indicates whether read data should parsed or
returned in raw byte form
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
:returns: :class:`.QMessage` - read data (parsed or raw byte form) along
with meta information | train | https://github.com/exxeleron/qPython/blob/7e64a28b1e8814a8d6b9217ce79bb8de546e62f3/qpython/qreader.py#L119-L141 | [
"def read_header(self, source = None):\n '''\n Reads and parses message header.\n\n .. note:: :func:`.read_header` wraps data for further reading in internal\n buffer \n\n :Parameters:\n - `source` - optional data buffer to be read, if not specified data is \n read from the wrapped stream\n\n :returns: :class:`.QMessage` - read meta information\n '''\n if self._stream:\n header = self._read_bytes(8)\n self._buffer.wrap(header)\n else:\n self._buffer.wrap(source)\n\n self._buffer.endianness = '<' if self._buffer.get_byte() == 1 else '>'\n self._is_native = self._buffer.endianness == ('<' if sys.byteorder == 'little' else '>')\n message_type = self._buffer.get_byte()\n message_compressed = self._buffer.get_byte() == 1\n # skip 1 byte\n self._buffer.skip()\n\n message_size = self._buffer.get_int()\n return QMessage(None, message_type, message_size, message_compressed)\n",
"def read_data(self, message_size, is_compressed = False, **options):\n '''\n Reads and optionally parses data part of a message.\n\n .. note:: :func:`.read_header` is required to be called before executing\n the :func:`.read_data`\n\n :Parameters:\n - `message_size` (`integer`) - size of the message to be read\n - `is_compressed` (`boolean`) - indicates whether data is compressed\n :Options:\n - `raw` (`boolean`) - indicates whether read data should parsed or \n returned in raw byte form\n - `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are\n backed by raw q representation (:class:`.QTemporalList`, \n :class:`.QTemporal`) instances, otherwise are represented as \n `numpy datetime64`/`timedelta64` arrays and atoms,\n **Default**: ``False``\n\n :returns: read data (parsed or raw byte form)\n '''\n self._options = MetaData(**CONVERSION_OPTIONS.union_dict(**options))\n\n if is_compressed:\n if self._stream:\n self._buffer.wrap(self._read_bytes(4))\n uncompressed_size = -8 + self._buffer.get_int()\n compressed_data = self._read_bytes(message_size - 12) if self._stream else self._buffer.raw(message_size - 12)\n\n raw_data = numpy.frombuffer(compressed_data, dtype = numpy.uint8)\n if uncompressed_size <= 0:\n raise QReaderException('Error while data decompression.')\n\n raw_data = uncompress(raw_data, numpy.intc(uncompressed_size))\n raw_data = numpy.ndarray.tostring(raw_data)\n self._buffer.wrap(raw_data)\n elif self._stream:\n raw_data = self._read_bytes(message_size - 8)\n self._buffer.wrap(raw_data)\n if not self._stream and self._options.raw:\n raw_data = self._buffer.raw(message_size - 8)\n\n return raw_data if self._options.raw else self._read_object()\n"
] | class QReader(object):
'''
Provides deserialization from q IPC protocol.
:Parameters:
- `stream` (`file object` or `None`) - data input stream
- `encoding` (`string`) - encoding for characters parsing
:Attrbutes:
- `_reader_map` - stores mapping between q types and functions
responsible for parsing into Python objects
'''
_reader_map = {}
parse = Mapper(_reader_map)
def __init__(self, stream, encoding = 'latin-1'):
self._stream = stream
self._buffer = QReader.BytesBuffer()
self._encoding = encoding
def read(self, source = None, **options):
'''
Reads and optionally parses a single message.
:Parameters:
- `source` - optional data buffer to be read, if not specified data is
read from the wrapped stream
:Options:
- `raw` (`boolean`) - indicates whether read data should parsed or
returned in raw byte form
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
:returns: :class:`.QMessage` - read data (parsed or raw byte form) along
with meta information
'''
message = self.read_header(source)
message.data = self.read_data(message.size, message.is_compressed, **options)
return message
def read_header(self, source = None):
'''
Reads and parses message header.
.. note:: :func:`.read_header` wraps data for further reading in internal
buffer
:Parameters:
- `source` - optional data buffer to be read, if not specified data is
read from the wrapped stream
:returns: :class:`.QMessage` - read meta information
'''
if self._stream:
header = self._read_bytes(8)
self._buffer.wrap(header)
else:
self._buffer.wrap(source)
self._buffer.endianness = '<' if self._buffer.get_byte() == 1 else '>'
self._is_native = self._buffer.endianness == ('<' if sys.byteorder == 'little' else '>')
message_type = self._buffer.get_byte()
message_compressed = self._buffer.get_byte() == 1
# skip 1 byte
self._buffer.skip()
message_size = self._buffer.get_int()
return QMessage(None, message_type, message_size, message_compressed)
def read_data(self, message_size, is_compressed = False, **options):
'''
Reads and optionally parses data part of a message.
.. note:: :func:`.read_header` is required to be called before executing
the :func:`.read_data`
:Parameters:
- `message_size` (`integer`) - size of the message to be read
- `is_compressed` (`boolean`) - indicates whether data is compressed
:Options:
- `raw` (`boolean`) - indicates whether read data should parsed or
returned in raw byte form
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
:returns: read data (parsed or raw byte form)
'''
self._options = MetaData(**CONVERSION_OPTIONS.union_dict(**options))
if is_compressed:
if self._stream:
self._buffer.wrap(self._read_bytes(4))
uncompressed_size = -8 + self._buffer.get_int()
compressed_data = self._read_bytes(message_size - 12) if self._stream else self._buffer.raw(message_size - 12)
raw_data = numpy.frombuffer(compressed_data, dtype = numpy.uint8)
if uncompressed_size <= 0:
raise QReaderException('Error while data decompression.')
raw_data = uncompress(raw_data, numpy.intc(uncompressed_size))
raw_data = numpy.ndarray.tostring(raw_data)
self._buffer.wrap(raw_data)
elif self._stream:
raw_data = self._read_bytes(message_size - 8)
self._buffer.wrap(raw_data)
if not self._stream and self._options.raw:
raw_data = self._buffer.raw(message_size - 8)
return raw_data if self._options.raw else self._read_object()
def _read_object(self):
qtype = self._buffer.get_byte()
reader = self._get_reader(qtype)
if reader:
return reader(self, qtype)
elif qtype >= QBOOL_LIST and qtype <= QTIME_LIST:
return self._read_list(qtype)
elif qtype <= QBOOL and qtype >= QTIME:
return self._read_atom(qtype)
raise QReaderException('Unable to deserialize q type: %s' % hex(qtype))
def _get_reader(self, qtype):
return self._reader_map.get(qtype, None)
@parse(QERROR)
def _read_error(self, qtype = QERROR):
raise QException(self._read_symbol())
@parse(QSTRING)
def _read_string(self, qtype = QSTRING):
self._buffer.skip() # ignore attributes
length = self._buffer.get_int()
return self._buffer.raw(length) if length > 0 else b''
@parse(QSYMBOL)
def _read_symbol(self, qtype = QSYMBOL):
return numpy.string_(self._buffer.get_symbol())
@parse(QCHAR)
def _read_char(self, qtype = QCHAR):
return chr(self._read_atom(QCHAR)).encode(self._encoding)
@parse(QGUID)
def _read_guid(self, qtype = QGUID):
return uuid.UUID(bytes = self._buffer.raw(16))
def _read_atom(self, qtype):
try:
fmt = STRUCT_MAP[qtype]
conversion = PY_TYPE[qtype]
return conversion(self._buffer.get(fmt))
except KeyError:
raise QReaderException('Unable to deserialize q type: %s' % hex(qtype))
@parse(QTIMESPAN, QTIMESTAMP, QTIME, QSECOND, QMINUTE, QDATE, QMONTH, QDATETIME)
def _read_temporal(self, qtype):
try:
fmt = STRUCT_MAP[qtype]
conversion = PY_TYPE[qtype]
temporal = from_raw_qtemporal(conversion(self._buffer.get(fmt)), qtype = qtype)
return temporal if self._options.numpy_temporals else qtemporal(temporal, qtype = qtype)
except KeyError:
raise QReaderException('Unable to deserialize q type: %s' % hex(qtype))
def _read_list(self, qtype):
self._buffer.skip() # ignore attributes
length = self._buffer.get_int()
conversion = PY_TYPE.get(-qtype, None)
if qtype == QSYMBOL_LIST:
symbols = self._buffer.get_symbols(length)
data = numpy.array(symbols, dtype = numpy.string_)
return qlist(data, qtype = qtype, adjust_dtype = False)
elif qtype == QGUID_LIST:
data = numpy.array([self._read_guid() for x in range(length)])
return qlist(data, qtype = qtype, adjust_dtype = False)
elif conversion:
raw = self._buffer.raw(length * ATOM_SIZE[qtype])
data = numpy.frombuffer(raw, dtype = conversion)
if not self._is_native:
data.byteswap(True)
if qtype >= QTIMESTAMP_LIST and qtype <= QTIME_LIST and self._options.numpy_temporals:
data = array_from_raw_qtemporal(data, qtype)
return qlist(data, qtype = qtype, adjust_dtype = False)
else:
raise QReaderException('Unable to deserialize q type: %s' % hex(qtype))
@parse(QDICTIONARY)
def _read_dictionary(self, qtype = QDICTIONARY):
keys = self._read_object()
values = self._read_object()
if isinstance(keys, QTable):
return QKeyedTable(keys, values)
else:
return QDictionary(keys, values)
@parse(QTABLE)
def _read_table(self, qtype = QTABLE):
self._buffer.skip() # ignore attributes
self._buffer.skip() # ignore dict type stamp
columns = self._read_object()
data = self._read_object()
return qtable(columns, data, qtype = QTABLE)
@parse(QGENERAL_LIST)
def _read_general_list(self, qtype = QGENERAL_LIST):
self._buffer.skip() # ignore attributes
length = self._buffer.get_int()
return [self._read_object() for x in range(length)]
@parse(QNULL)
@parse(QUNARY_FUNC)
@parse(QBINARY_FUNC)
@parse(QTERNARY_FUNC)
def _read_function(self, qtype = QNULL):
code = self._buffer.get_byte()
return None if qtype == QNULL and code == 0 else QFunction(qtype)
@parse(QLAMBDA)
def _read_lambda(self, qtype = QLAMBDA):
self._buffer.get_symbol() # skip
expression = self._read_object()
return QLambda(expression.decode())
@parse(QCOMPOSITION_FUNC)
def _read_function_composition(self, qtype = QCOMPOSITION_FUNC):
self._read_projection(qtype) # skip
return QFunction(qtype)
@parse(QADVERB_FUNC_106)
@parse(QADVERB_FUNC_107)
@parse(QADVERB_FUNC_108)
@parse(QADVERB_FUNC_109)
@parse(QADVERB_FUNC_110)
@parse(QADVERB_FUNC_111)
def _read_adverb_function(self, qtype = QADVERB_FUNC_106):
self._read_object() # skip
return QFunction(qtype)
@parse(QPROJECTION)
def _read_projection(self, qtype = QPROJECTION):
length = self._buffer.get_int()
parameters = [ self._read_object() for x in range(length) ]
return QProjection(parameters)
def _read_bytes(self, length):
if not self._stream:
raise QReaderException('There is no input data. QReader requires either stream or data chunk')
if length == 0:
return b''
else:
data = self._stream.read(length)
if len(data) == 0:
raise QReaderException('Error while reading data')
return data
class BytesBuffer(object):
'''
Utility class for reading bytes from wrapped buffer.
'''
def __init__(self):
self._endianness = '@'
@property
def endianness(self):
'''
Gets the endianness.
'''
return self._endianness
@endianness.setter
def endianness(self, endianness):
'''
Sets the byte order (endianness) for reading from the buffer.
:Parameters:
- `endianness` (``<`` or ``>``) - byte order indicator
'''
self._endianness = endianness
def wrap(self, data):
'''
Wraps the data in the buffer.
:Parameters:
- `data` - data to be wrapped
'''
self._data = data
self._position = 0
self._size = len(data)
def skip(self, offset = 1):
'''
Skips reading of `offset` bytes.
:Parameters:
- `offset` (`integer`) - number of bytes to be skipped
'''
new_position = self._position + offset
if new_position > self._size:
raise QReaderException('Attempt to read data out of buffer bounds')
self._position = new_position
def raw(self, offset):
'''
Gets `offset` number of raw bytes.
:Parameters:
- `offset` (`integer`) - number of bytes to be retrieved
:returns: raw bytes
'''
new_position = self._position + offset
if new_position > self._size:
raise QReaderException('Attempt to read data out of buffer bounds')
raw = self._data[self._position : new_position]
self._position = new_position
return raw
def get(self, fmt, offset = None):
'''
Gets bytes from the buffer according to specified format or `offset`.
:Parameters:
- `fmt` (struct format) - conversion to be applied for reading
- `offset` (`integer`) - number of bytes to be retrieved
:returns: unpacked bytes
'''
fmt = self.endianness + fmt
offset = offset if offset else struct.calcsize(fmt)
return struct.unpack(fmt, self.raw(offset))[0]
def get_byte(self):
'''
Gets a single byte from the buffer.
:returns: single byte
'''
return self.get('b')
def get_int(self):
'''
Gets a single 32-bit integer from the buffer.
:returns: single integer
'''
return self.get('i')
def get_symbol(self):
'''
Gets a single, ``\\x00`` terminated string from the buffer.
:returns: ``\\x00`` terminated string
'''
new_position = self._data.find(b'\x00', self._position)
if new_position < 0:
raise QReaderException('Failed to read symbol from stream')
raw = self._data[self._position : new_position]
self._position = new_position + 1
return raw
def get_symbols(self, count):
'''
Gets ``count`` ``\\x00`` terminated strings from the buffer.
:Parameters:
- `count` (`integer`) - number of strings to be read
:returns: list of ``\\x00`` terminated string read from the buffer
'''
c = 0
new_position = self._position
if count == 0:
return []
while c < count:
new_position = self._data.find(b'\x00', new_position)
if new_position < 0:
raise QReaderException('Failed to read symbol from stream')
c += 1
new_position += 1
raw = self._data[self._position : new_position - 1]
self._position = new_position
return raw.split(b'\x00')
|
exxeleron/qPython | qpython/qreader.py | QReader.read_header | python | def read_header(self, source = None):
'''
Reads and parses message header.
.. note:: :func:`.read_header` wraps data for further reading in internal
buffer
:Parameters:
- `source` - optional data buffer to be read, if not specified data is
read from the wrapped stream
:returns: :class:`.QMessage` - read meta information
'''
if self._stream:
header = self._read_bytes(8)
self._buffer.wrap(header)
else:
self._buffer.wrap(source)
self._buffer.endianness = '<' if self._buffer.get_byte() == 1 else '>'
self._is_native = self._buffer.endianness == ('<' if sys.byteorder == 'little' else '>')
message_type = self._buffer.get_byte()
message_compressed = self._buffer.get_byte() == 1
# skip 1 byte
self._buffer.skip()
message_size = self._buffer.get_int()
return QMessage(None, message_type, message_size, message_compressed) | Reads and parses message header.
.. note:: :func:`.read_header` wraps data for further reading in internal
buffer
:Parameters:
- `source` - optional data buffer to be read, if not specified data is
read from the wrapped stream
:returns: :class:`.QMessage` - read meta information | train | https://github.com/exxeleron/qPython/blob/7e64a28b1e8814a8d6b9217ce79bb8de546e62f3/qpython/qreader.py#L144-L171 | [
"def _read_bytes(self, length):\n if not self._stream:\n raise QReaderException('There is no input data. QReader requires either stream or data chunk')\n\n if length == 0:\n return b''\n else:\n data = self._stream.read(length)\n\n if len(data) == 0:\n raise QReaderException('Error while reading data')\n return data\n"
] | class QReader(object):
'''
Provides deserialization from q IPC protocol.
:Parameters:
- `stream` (`file object` or `None`) - data input stream
- `encoding` (`string`) - encoding for characters parsing
:Attrbutes:
- `_reader_map` - stores mapping between q types and functions
responsible for parsing into Python objects
'''
_reader_map = {}
parse = Mapper(_reader_map)
def __init__(self, stream, encoding = 'latin-1'):
self._stream = stream
self._buffer = QReader.BytesBuffer()
self._encoding = encoding
def read(self, source = None, **options):
'''
Reads and optionally parses a single message.
:Parameters:
- `source` - optional data buffer to be read, if not specified data is
read from the wrapped stream
:Options:
- `raw` (`boolean`) - indicates whether read data should parsed or
returned in raw byte form
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
:returns: :class:`.QMessage` - read data (parsed or raw byte form) along
with meta information
'''
message = self.read_header(source)
message.data = self.read_data(message.size, message.is_compressed, **options)
return message
def read_header(self, source = None):
'''
Reads and parses message header.
.. note:: :func:`.read_header` wraps data for further reading in internal
buffer
:Parameters:
- `source` - optional data buffer to be read, if not specified data is
read from the wrapped stream
:returns: :class:`.QMessage` - read meta information
'''
if self._stream:
header = self._read_bytes(8)
self._buffer.wrap(header)
else:
self._buffer.wrap(source)
self._buffer.endianness = '<' if self._buffer.get_byte() == 1 else '>'
self._is_native = self._buffer.endianness == ('<' if sys.byteorder == 'little' else '>')
message_type = self._buffer.get_byte()
message_compressed = self._buffer.get_byte() == 1
# skip 1 byte
self._buffer.skip()
message_size = self._buffer.get_int()
return QMessage(None, message_type, message_size, message_compressed)
def read_data(self, message_size, is_compressed = False, **options):
'''
Reads and optionally parses data part of a message.
.. note:: :func:`.read_header` is required to be called before executing
the :func:`.read_data`
:Parameters:
- `message_size` (`integer`) - size of the message to be read
- `is_compressed` (`boolean`) - indicates whether data is compressed
:Options:
- `raw` (`boolean`) - indicates whether read data should parsed or
returned in raw byte form
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
:returns: read data (parsed or raw byte form)
'''
self._options = MetaData(**CONVERSION_OPTIONS.union_dict(**options))
if is_compressed:
if self._stream:
self._buffer.wrap(self._read_bytes(4))
uncompressed_size = -8 + self._buffer.get_int()
compressed_data = self._read_bytes(message_size - 12) if self._stream else self._buffer.raw(message_size - 12)
raw_data = numpy.frombuffer(compressed_data, dtype = numpy.uint8)
if uncompressed_size <= 0:
raise QReaderException('Error while data decompression.')
raw_data = uncompress(raw_data, numpy.intc(uncompressed_size))
raw_data = numpy.ndarray.tostring(raw_data)
self._buffer.wrap(raw_data)
elif self._stream:
raw_data = self._read_bytes(message_size - 8)
self._buffer.wrap(raw_data)
if not self._stream and self._options.raw:
raw_data = self._buffer.raw(message_size - 8)
return raw_data if self._options.raw else self._read_object()
def _read_object(self):
qtype = self._buffer.get_byte()
reader = self._get_reader(qtype)
if reader:
return reader(self, qtype)
elif qtype >= QBOOL_LIST and qtype <= QTIME_LIST:
return self._read_list(qtype)
elif qtype <= QBOOL and qtype >= QTIME:
return self._read_atom(qtype)
raise QReaderException('Unable to deserialize q type: %s' % hex(qtype))
def _get_reader(self, qtype):
return self._reader_map.get(qtype, None)
@parse(QERROR)
def _read_error(self, qtype = QERROR):
raise QException(self._read_symbol())
@parse(QSTRING)
def _read_string(self, qtype = QSTRING):
self._buffer.skip() # ignore attributes
length = self._buffer.get_int()
return self._buffer.raw(length) if length > 0 else b''
@parse(QSYMBOL)
def _read_symbol(self, qtype = QSYMBOL):
return numpy.string_(self._buffer.get_symbol())
@parse(QCHAR)
def _read_char(self, qtype = QCHAR):
return chr(self._read_atom(QCHAR)).encode(self._encoding)
@parse(QGUID)
def _read_guid(self, qtype = QGUID):
return uuid.UUID(bytes = self._buffer.raw(16))
def _read_atom(self, qtype):
try:
fmt = STRUCT_MAP[qtype]
conversion = PY_TYPE[qtype]
return conversion(self._buffer.get(fmt))
except KeyError:
raise QReaderException('Unable to deserialize q type: %s' % hex(qtype))
@parse(QTIMESPAN, QTIMESTAMP, QTIME, QSECOND, QMINUTE, QDATE, QMONTH, QDATETIME)
def _read_temporal(self, qtype):
try:
fmt = STRUCT_MAP[qtype]
conversion = PY_TYPE[qtype]
temporal = from_raw_qtemporal(conversion(self._buffer.get(fmt)), qtype = qtype)
return temporal if self._options.numpy_temporals else qtemporal(temporal, qtype = qtype)
except KeyError:
raise QReaderException('Unable to deserialize q type: %s' % hex(qtype))
def _read_list(self, qtype):
self._buffer.skip() # ignore attributes
length = self._buffer.get_int()
conversion = PY_TYPE.get(-qtype, None)
if qtype == QSYMBOL_LIST:
symbols = self._buffer.get_symbols(length)
data = numpy.array(symbols, dtype = numpy.string_)
return qlist(data, qtype = qtype, adjust_dtype = False)
elif qtype == QGUID_LIST:
data = numpy.array([self._read_guid() for x in range(length)])
return qlist(data, qtype = qtype, adjust_dtype = False)
elif conversion:
raw = self._buffer.raw(length * ATOM_SIZE[qtype])
data = numpy.frombuffer(raw, dtype = conversion)
if not self._is_native:
data.byteswap(True)
if qtype >= QTIMESTAMP_LIST and qtype <= QTIME_LIST and self._options.numpy_temporals:
data = array_from_raw_qtemporal(data, qtype)
return qlist(data, qtype = qtype, adjust_dtype = False)
else:
raise QReaderException('Unable to deserialize q type: %s' % hex(qtype))
@parse(QDICTIONARY)
def _read_dictionary(self, qtype = QDICTIONARY):
keys = self._read_object()
values = self._read_object()
if isinstance(keys, QTable):
return QKeyedTable(keys, values)
else:
return QDictionary(keys, values)
@parse(QTABLE)
def _read_table(self, qtype = QTABLE):
self._buffer.skip() # ignore attributes
self._buffer.skip() # ignore dict type stamp
columns = self._read_object()
data = self._read_object()
return qtable(columns, data, qtype = QTABLE)
@parse(QGENERAL_LIST)
def _read_general_list(self, qtype = QGENERAL_LIST):
self._buffer.skip() # ignore attributes
length = self._buffer.get_int()
return [self._read_object() for x in range(length)]
@parse(QNULL)
@parse(QUNARY_FUNC)
@parse(QBINARY_FUNC)
@parse(QTERNARY_FUNC)
def _read_function(self, qtype = QNULL):
code = self._buffer.get_byte()
return None if qtype == QNULL and code == 0 else QFunction(qtype)
@parse(QLAMBDA)
def _read_lambda(self, qtype = QLAMBDA):
self._buffer.get_symbol() # skip
expression = self._read_object()
return QLambda(expression.decode())
@parse(QCOMPOSITION_FUNC)
def _read_function_composition(self, qtype = QCOMPOSITION_FUNC):
self._read_projection(qtype) # skip
return QFunction(qtype)
@parse(QADVERB_FUNC_106)
@parse(QADVERB_FUNC_107)
@parse(QADVERB_FUNC_108)
@parse(QADVERB_FUNC_109)
@parse(QADVERB_FUNC_110)
@parse(QADVERB_FUNC_111)
def _read_adverb_function(self, qtype = QADVERB_FUNC_106):
self._read_object() # skip
return QFunction(qtype)
@parse(QPROJECTION)
def _read_projection(self, qtype = QPROJECTION):
length = self._buffer.get_int()
parameters = [ self._read_object() for x in range(length) ]
return QProjection(parameters)
def _read_bytes(self, length):
if not self._stream:
raise QReaderException('There is no input data. QReader requires either stream or data chunk')
if length == 0:
return b''
else:
data = self._stream.read(length)
if len(data) == 0:
raise QReaderException('Error while reading data')
return data
class BytesBuffer(object):
'''
Utility class for reading bytes from wrapped buffer.
'''
def __init__(self):
self._endianness = '@'
@property
def endianness(self):
'''
Gets the endianness.
'''
return self._endianness
@endianness.setter
def endianness(self, endianness):
'''
Sets the byte order (endianness) for reading from the buffer.
:Parameters:
- `endianness` (``<`` or ``>``) - byte order indicator
'''
self._endianness = endianness
def wrap(self, data):
'''
Wraps the data in the buffer.
:Parameters:
- `data` - data to be wrapped
'''
self._data = data
self._position = 0
self._size = len(data)
def skip(self, offset = 1):
'''
Skips reading of `offset` bytes.
:Parameters:
- `offset` (`integer`) - number of bytes to be skipped
'''
new_position = self._position + offset
if new_position > self._size:
raise QReaderException('Attempt to read data out of buffer bounds')
self._position = new_position
def raw(self, offset):
'''
Gets `offset` number of raw bytes.
:Parameters:
- `offset` (`integer`) - number of bytes to be retrieved
:returns: raw bytes
'''
new_position = self._position + offset
if new_position > self._size:
raise QReaderException('Attempt to read data out of buffer bounds')
raw = self._data[self._position : new_position]
self._position = new_position
return raw
def get(self, fmt, offset = None):
'''
Gets bytes from the buffer according to specified format or `offset`.
:Parameters:
- `fmt` (struct format) - conversion to be applied for reading
- `offset` (`integer`) - number of bytes to be retrieved
:returns: unpacked bytes
'''
fmt = self.endianness + fmt
offset = offset if offset else struct.calcsize(fmt)
return struct.unpack(fmt, self.raw(offset))[0]
def get_byte(self):
'''
Gets a single byte from the buffer.
:returns: single byte
'''
return self.get('b')
def get_int(self):
'''
Gets a single 32-bit integer from the buffer.
:returns: single integer
'''
return self.get('i')
def get_symbol(self):
'''
Gets a single, ``\\x00`` terminated string from the buffer.
:returns: ``\\x00`` terminated string
'''
new_position = self._data.find(b'\x00', self._position)
if new_position < 0:
raise QReaderException('Failed to read symbol from stream')
raw = self._data[self._position : new_position]
self._position = new_position + 1
return raw
def get_symbols(self, count):
'''
Gets ``count`` ``\\x00`` terminated strings from the buffer.
:Parameters:
- `count` (`integer`) - number of strings to be read
:returns: list of ``\\x00`` terminated string read from the buffer
'''
c = 0
new_position = self._position
if count == 0:
return []
while c < count:
new_position = self._data.find(b'\x00', new_position)
if new_position < 0:
raise QReaderException('Failed to read symbol from stream')
c += 1
new_position += 1
raw = self._data[self._position : new_position - 1]
self._position = new_position
return raw.split(b'\x00')
|
exxeleron/qPython | qpython/qreader.py | QReader.read_data | python | def read_data(self, message_size, is_compressed = False, **options):
'''
Reads and optionally parses data part of a message.
.. note:: :func:`.read_header` is required to be called before executing
the :func:`.read_data`
:Parameters:
- `message_size` (`integer`) - size of the message to be read
- `is_compressed` (`boolean`) - indicates whether data is compressed
:Options:
- `raw` (`boolean`) - indicates whether read data should parsed or
returned in raw byte form
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
:returns: read data (parsed or raw byte form)
'''
self._options = MetaData(**CONVERSION_OPTIONS.union_dict(**options))
if is_compressed:
if self._stream:
self._buffer.wrap(self._read_bytes(4))
uncompressed_size = -8 + self._buffer.get_int()
compressed_data = self._read_bytes(message_size - 12) if self._stream else self._buffer.raw(message_size - 12)
raw_data = numpy.frombuffer(compressed_data, dtype = numpy.uint8)
if uncompressed_size <= 0:
raise QReaderException('Error while data decompression.')
raw_data = uncompress(raw_data, numpy.intc(uncompressed_size))
raw_data = numpy.ndarray.tostring(raw_data)
self._buffer.wrap(raw_data)
elif self._stream:
raw_data = self._read_bytes(message_size - 8)
self._buffer.wrap(raw_data)
if not self._stream and self._options.raw:
raw_data = self._buffer.raw(message_size - 8)
return raw_data if self._options.raw else self._read_object() | Reads and optionally parses data part of a message.
.. note:: :func:`.read_header` is required to be called before executing
the :func:`.read_data`
:Parameters:
- `message_size` (`integer`) - size of the message to be read
- `is_compressed` (`boolean`) - indicates whether data is compressed
:Options:
- `raw` (`boolean`) - indicates whether read data should parsed or
returned in raw byte form
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
:returns: read data (parsed or raw byte form) | train | https://github.com/exxeleron/qPython/blob/7e64a28b1e8814a8d6b9217ce79bb8de546e62f3/qpython/qreader.py#L174-L216 | [
"def uncompress(data, uncompressed_size):\n _0 = numpy.intc(0)\n _1 = numpy.intc(1)\n _2 = numpy.intc(2)\n _128 = numpy.intc(128)\n _255 = numpy.intc(255)\n\n n, r, s, p = _0, _0, _0, _0\n i, d = _1, _1\n f = _255 & data[_0]\n\n ptrs = numpy.zeros(256, dtype = numpy.intc)\n uncompressed = numpy.zeros(uncompressed_size, dtype = numpy.uint8)\n idx = numpy.arange(uncompressed_size, dtype = numpy.intc)\n\n while s < uncompressed_size:\n pp = p + _1\n\n if f & i:\n r = ptrs[data[d]]\n n = _2 + data[d + _1]\n uncompressed[idx[s:s + n]] = uncompressed[r:r + n]\n\n ptrs[(uncompressed[p]) ^ (uncompressed[pp])] = p\n if s == pp:\n ptrs[(uncompressed[pp]) ^ (uncompressed[pp + _1])] = pp\n\n d += _2\n r += _2\n s = s + n\n p = s\n\n else:\n uncompressed[s] = data[d]\n\n if pp == s:\n ptrs[(uncompressed[p]) ^ (uncompressed[pp])] = p\n p = pp\n\n s += _1\n d += _1\n\n if i == _128:\n if s < uncompressed_size:\n f = _255 & data[d]\n d += _1\n i = _1\n else:\n i += i\n\n return uncompressed\n",
"def union_dict(self, **kw):\n return dict(list(self.as_dict().items()) + list(kw.items()))\n",
"def _read_object(self):\n qtype = self._buffer.get_byte()\n\n reader = self._get_reader(qtype)\n\n if reader:\n return reader(self, qtype)\n elif qtype >= QBOOL_LIST and qtype <= QTIME_LIST:\n return self._read_list(qtype)\n elif qtype <= QBOOL and qtype >= QTIME:\n return self._read_atom(qtype)\n\n raise QReaderException('Unable to deserialize q type: %s' % hex(qtype))\n",
"def _read_bytes(self, length):\n if not self._stream:\n raise QReaderException('There is no input data. QReader requires either stream or data chunk')\n\n if length == 0:\n return b''\n else:\n data = self._stream.read(length)\n\n if len(data) == 0:\n raise QReaderException('Error while reading data')\n return data\n"
] | class QReader(object):
'''
Provides deserialization from q IPC protocol.
:Parameters:
- `stream` (`file object` or `None`) - data input stream
- `encoding` (`string`) - encoding for characters parsing
:Attrbutes:
- `_reader_map` - stores mapping between q types and functions
responsible for parsing into Python objects
'''
_reader_map = {}
parse = Mapper(_reader_map)
def __init__(self, stream, encoding = 'latin-1'):
self._stream = stream
self._buffer = QReader.BytesBuffer()
self._encoding = encoding
def read(self, source = None, **options):
'''
Reads and optionally parses a single message.
:Parameters:
- `source` - optional data buffer to be read, if not specified data is
read from the wrapped stream
:Options:
- `raw` (`boolean`) - indicates whether read data should parsed or
returned in raw byte form
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
:returns: :class:`.QMessage` - read data (parsed or raw byte form) along
with meta information
'''
message = self.read_header(source)
message.data = self.read_data(message.size, message.is_compressed, **options)
return message
def read_header(self, source = None):
'''
Reads and parses message header.
.. note:: :func:`.read_header` wraps data for further reading in internal
buffer
:Parameters:
- `source` - optional data buffer to be read, if not specified data is
read from the wrapped stream
:returns: :class:`.QMessage` - read meta information
'''
if self._stream:
header = self._read_bytes(8)
self._buffer.wrap(header)
else:
self._buffer.wrap(source)
self._buffer.endianness = '<' if self._buffer.get_byte() == 1 else '>'
self._is_native = self._buffer.endianness == ('<' if sys.byteorder == 'little' else '>')
message_type = self._buffer.get_byte()
message_compressed = self._buffer.get_byte() == 1
# skip 1 byte
self._buffer.skip()
message_size = self._buffer.get_int()
return QMessage(None, message_type, message_size, message_compressed)
def read_data(self, message_size, is_compressed = False, **options):
'''
Reads and optionally parses data part of a message.
.. note:: :func:`.read_header` is required to be called before executing
the :func:`.read_data`
:Parameters:
- `message_size` (`integer`) - size of the message to be read
- `is_compressed` (`boolean`) - indicates whether data is compressed
:Options:
- `raw` (`boolean`) - indicates whether read data should parsed or
returned in raw byte form
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
:returns: read data (parsed or raw byte form)
'''
self._options = MetaData(**CONVERSION_OPTIONS.union_dict(**options))
if is_compressed:
if self._stream:
self._buffer.wrap(self._read_bytes(4))
uncompressed_size = -8 + self._buffer.get_int()
compressed_data = self._read_bytes(message_size - 12) if self._stream else self._buffer.raw(message_size - 12)
raw_data = numpy.frombuffer(compressed_data, dtype = numpy.uint8)
if uncompressed_size <= 0:
raise QReaderException('Error while data decompression.')
raw_data = uncompress(raw_data, numpy.intc(uncompressed_size))
raw_data = numpy.ndarray.tostring(raw_data)
self._buffer.wrap(raw_data)
elif self._stream:
raw_data = self._read_bytes(message_size - 8)
self._buffer.wrap(raw_data)
if not self._stream and self._options.raw:
raw_data = self._buffer.raw(message_size - 8)
return raw_data if self._options.raw else self._read_object()
def _read_object(self):
qtype = self._buffer.get_byte()
reader = self._get_reader(qtype)
if reader:
return reader(self, qtype)
elif qtype >= QBOOL_LIST and qtype <= QTIME_LIST:
return self._read_list(qtype)
elif qtype <= QBOOL and qtype >= QTIME:
return self._read_atom(qtype)
raise QReaderException('Unable to deserialize q type: %s' % hex(qtype))
def _get_reader(self, qtype):
return self._reader_map.get(qtype, None)
@parse(QERROR)
def _read_error(self, qtype = QERROR):
raise QException(self._read_symbol())
@parse(QSTRING)
def _read_string(self, qtype = QSTRING):
self._buffer.skip() # ignore attributes
length = self._buffer.get_int()
return self._buffer.raw(length) if length > 0 else b''
@parse(QSYMBOL)
def _read_symbol(self, qtype = QSYMBOL):
return numpy.string_(self._buffer.get_symbol())
@parse(QCHAR)
def _read_char(self, qtype = QCHAR):
return chr(self._read_atom(QCHAR)).encode(self._encoding)
@parse(QGUID)
def _read_guid(self, qtype = QGUID):
return uuid.UUID(bytes = self._buffer.raw(16))
def _read_atom(self, qtype):
try:
fmt = STRUCT_MAP[qtype]
conversion = PY_TYPE[qtype]
return conversion(self._buffer.get(fmt))
except KeyError:
raise QReaderException('Unable to deserialize q type: %s' % hex(qtype))
@parse(QTIMESPAN, QTIMESTAMP, QTIME, QSECOND, QMINUTE, QDATE, QMONTH, QDATETIME)
def _read_temporal(self, qtype):
try:
fmt = STRUCT_MAP[qtype]
conversion = PY_TYPE[qtype]
temporal = from_raw_qtemporal(conversion(self._buffer.get(fmt)), qtype = qtype)
return temporal if self._options.numpy_temporals else qtemporal(temporal, qtype = qtype)
except KeyError:
raise QReaderException('Unable to deserialize q type: %s' % hex(qtype))
def _read_list(self, qtype):
self._buffer.skip() # ignore attributes
length = self._buffer.get_int()
conversion = PY_TYPE.get(-qtype, None)
if qtype == QSYMBOL_LIST:
symbols = self._buffer.get_symbols(length)
data = numpy.array(symbols, dtype = numpy.string_)
return qlist(data, qtype = qtype, adjust_dtype = False)
elif qtype == QGUID_LIST:
data = numpy.array([self._read_guid() for x in range(length)])
return qlist(data, qtype = qtype, adjust_dtype = False)
elif conversion:
raw = self._buffer.raw(length * ATOM_SIZE[qtype])
data = numpy.frombuffer(raw, dtype = conversion)
if not self._is_native:
data.byteswap(True)
if qtype >= QTIMESTAMP_LIST and qtype <= QTIME_LIST and self._options.numpy_temporals:
data = array_from_raw_qtemporal(data, qtype)
return qlist(data, qtype = qtype, adjust_dtype = False)
else:
raise QReaderException('Unable to deserialize q type: %s' % hex(qtype))
@parse(QDICTIONARY)
def _read_dictionary(self, qtype = QDICTIONARY):
keys = self._read_object()
values = self._read_object()
if isinstance(keys, QTable):
return QKeyedTable(keys, values)
else:
return QDictionary(keys, values)
@parse(QTABLE)
def _read_table(self, qtype = QTABLE):
self._buffer.skip() # ignore attributes
self._buffer.skip() # ignore dict type stamp
columns = self._read_object()
data = self._read_object()
return qtable(columns, data, qtype = QTABLE)
@parse(QGENERAL_LIST)
def _read_general_list(self, qtype = QGENERAL_LIST):
self._buffer.skip() # ignore attributes
length = self._buffer.get_int()
return [self._read_object() for x in range(length)]
@parse(QNULL)
@parse(QUNARY_FUNC)
@parse(QBINARY_FUNC)
@parse(QTERNARY_FUNC)
def _read_function(self, qtype = QNULL):
code = self._buffer.get_byte()
return None if qtype == QNULL and code == 0 else QFunction(qtype)
@parse(QLAMBDA)
def _read_lambda(self, qtype = QLAMBDA):
self._buffer.get_symbol() # skip
expression = self._read_object()
return QLambda(expression.decode())
@parse(QCOMPOSITION_FUNC)
def _read_function_composition(self, qtype = QCOMPOSITION_FUNC):
self._read_projection(qtype) # skip
return QFunction(qtype)
@parse(QADVERB_FUNC_106)
@parse(QADVERB_FUNC_107)
@parse(QADVERB_FUNC_108)
@parse(QADVERB_FUNC_109)
@parse(QADVERB_FUNC_110)
@parse(QADVERB_FUNC_111)
def _read_adverb_function(self, qtype = QADVERB_FUNC_106):
self._read_object() # skip
return QFunction(qtype)
@parse(QPROJECTION)
def _read_projection(self, qtype = QPROJECTION):
length = self._buffer.get_int()
parameters = [ self._read_object() for x in range(length) ]
return QProjection(parameters)
def _read_bytes(self, length):
if not self._stream:
raise QReaderException('There is no input data. QReader requires either stream or data chunk')
if length == 0:
return b''
else:
data = self._stream.read(length)
if len(data) == 0:
raise QReaderException('Error while reading data')
return data
class BytesBuffer(object):
'''
Utility class for reading bytes from wrapped buffer.
'''
def __init__(self):
self._endianness = '@'
@property
def endianness(self):
'''
Gets the endianness.
'''
return self._endianness
@endianness.setter
def endianness(self, endianness):
'''
Sets the byte order (endianness) for reading from the buffer.
:Parameters:
- `endianness` (``<`` or ``>``) - byte order indicator
'''
self._endianness = endianness
def wrap(self, data):
'''
Wraps the data in the buffer.
:Parameters:
- `data` - data to be wrapped
'''
self._data = data
self._position = 0
self._size = len(data)
def skip(self, offset = 1):
'''
Skips reading of `offset` bytes.
:Parameters:
- `offset` (`integer`) - number of bytes to be skipped
'''
new_position = self._position + offset
if new_position > self._size:
raise QReaderException('Attempt to read data out of buffer bounds')
self._position = new_position
def raw(self, offset):
'''
Gets `offset` number of raw bytes.
:Parameters:
- `offset` (`integer`) - number of bytes to be retrieved
:returns: raw bytes
'''
new_position = self._position + offset
if new_position > self._size:
raise QReaderException('Attempt to read data out of buffer bounds')
raw = self._data[self._position : new_position]
self._position = new_position
return raw
def get(self, fmt, offset = None):
'''
Gets bytes from the buffer according to specified format or `offset`.
:Parameters:
- `fmt` (struct format) - conversion to be applied for reading
- `offset` (`integer`) - number of bytes to be retrieved
:returns: unpacked bytes
'''
fmt = self.endianness + fmt
offset = offset if offset else struct.calcsize(fmt)
return struct.unpack(fmt, self.raw(offset))[0]
def get_byte(self):
'''
Gets a single byte from the buffer.
:returns: single byte
'''
return self.get('b')
def get_int(self):
'''
Gets a single 32-bit integer from the buffer.
:returns: single integer
'''
return self.get('i')
def get_symbol(self):
'''
Gets a single, ``\\x00`` terminated string from the buffer.
:returns: ``\\x00`` terminated string
'''
new_position = self._data.find(b'\x00', self._position)
if new_position < 0:
raise QReaderException('Failed to read symbol from stream')
raw = self._data[self._position : new_position]
self._position = new_position + 1
return raw
def get_symbols(self, count):
'''
Gets ``count`` ``\\x00`` terminated strings from the buffer.
:Parameters:
- `count` (`integer`) - number of strings to be read
:returns: list of ``\\x00`` terminated string read from the buffer
'''
c = 0
new_position = self._position
if count == 0:
return []
while c < count:
new_position = self._data.find(b'\x00', new_position)
if new_position < 0:
raise QReaderException('Failed to read symbol from stream')
c += 1
new_position += 1
raw = self._data[self._position : new_position - 1]
self._position = new_position
return raw.split(b'\x00')
|
exxeleron/qPython | qpython/qconnection.py | QConnection.open | python | def open(self):
'''Initialises connection to q service.
If the connection hasn't been initialised yet, invoking the
:func:`.open` creates a new socket and performs a handshake with a q
service.
:raises: :class:`.QConnectionException`, :class:`.QAuthenticationException`
'''
if not self._connection:
if not self.host:
raise QConnectionException('Host cannot be None')
self._init_socket()
self._initialize()
self._writer = self._writer_class(self._connection, protocol_version = self._protocol_version, encoding = self._encoding)
self._reader = self._reader_class(self._connection_file, encoding = self._encoding) | Initialises connection to q service.
If the connection hasn't been initialised yet, invoking the
:func:`.open` creates a new socket and performs a handshake with a q
service.
:raises: :class:`.QConnectionException`, :class:`.QAuthenticationException` | train | https://github.com/exxeleron/qPython/blob/7e64a28b1e8814a8d6b9217ce79bb8de546e62f3/qpython/qconnection.py#L130-L147 | [
"def _init_socket(self):\n '''Initialises the socket used for communicating with a q service,'''\n try:\n self._connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._connection.connect((self.host, self.port))\n self._connection.settimeout(self.timeout)\n self._connection_file = self._connection.makefile('b')\n except:\n self._connection = None\n self._connection_file = None\n raise\n",
"def _initialize(self):\n '''Performs a IPC protocol handshake.'''\n credentials = (self.username if self.username else '') + ':' + (self.password if self.password else '')\n credentials = credentials.encode(self._encoding)\n self._connection.send(credentials + b'\\3\\0')\n response = self._connection.recv(1)\n\n if len(response) != 1:\n self.close()\n self._init_socket()\n\n self._connection.send(credentials + b'\\0')\n response = self._connection.recv(1)\n if len(response) != 1:\n self.close()\n raise QAuthenticationException('Connection denied.')\n\n self._protocol_version = min(struct.unpack('B', response)[0], 3)\n"
] | class QConnection(object):
'''Connector class for interfacing with the q service.
Provides methods for synchronous and asynchronous interaction.
The :class:`.QConnection` class provides a context manager API and can be
used with a ``with`` statement::
with qconnection.QConnection(host = 'localhost', port = 5000) as q:
print(q)
print(q('{`int$ til x}', 10))
:Parameters:
- `host` (`string`) - q service hostname
- `port` (`integer`) - q service port
- `username` (`string` or `None`) - username for q authentication/authorization
- `password` (`string` or `None`) - password for q authentication/authorization
- `timeout` (`nonnegative float` or `None`) - set a timeout on blocking socket operations
- `encoding` (`string`) - string encoding for data deserialization
- `reader_class` (subclass of `QReader`) - data deserializer
- `writer_class` (subclass of `QWriter`) - data serializer
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of parsed
data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars, **Default**: ``False``
'''
def __init__(self, host, port, username = None, password = None, timeout = None, encoding = 'latin-1', reader_class = None, writer_class = None, **options):
self.host = host
self.port = port
self.username = username
self.password = password
self._connection = None
self._connection_file = None
self._protocol_version = None
self.timeout = timeout
self._encoding = encoding
self._options = MetaData(**CONVERSION_OPTIONS.union_dict(**options))
try:
from qpython._pandas import PandasQReader, PandasQWriter
self._reader_class = PandasQReader
self._writer_class = PandasQWriter
except ImportError:
self._reader_class = QReader
self._writer_class = QWriter
if reader_class:
self._reader_class = reader_class
if writer_class:
self._writer_class = writer_class
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@property
def protocol_version(self):
'''Retrieves established version of the IPC protocol.
:returns: `integer` -- version of the IPC protocol
'''
return self._protocol_version
def open(self):
'''Initialises connection to q service.
If the connection hasn't been initialised yet, invoking the
:func:`.open` creates a new socket and performs a handshake with a q
service.
:raises: :class:`.QConnectionException`, :class:`.QAuthenticationException`
'''
if not self._connection:
if not self.host:
raise QConnectionException('Host cannot be None')
self._init_socket()
self._initialize()
self._writer = self._writer_class(self._connection, protocol_version = self._protocol_version, encoding = self._encoding)
self._reader = self._reader_class(self._connection_file, encoding = self._encoding)
def _init_socket(self):
'''Initialises the socket used for communicating with a q service,'''
try:
self._connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._connection.connect((self.host, self.port))
self._connection.settimeout(self.timeout)
self._connection_file = self._connection.makefile('b')
except:
self._connection = None
self._connection_file = None
raise
def close(self):
'''Closes connection with the q service.'''
if self._connection:
self._connection_file.close()
self._connection_file = None
self._connection.close()
self._connection = None
def is_connected(self):
'''Checks whether connection with a q service has been established.
Connection is considered inactive when:
- it has not been initialised,
- it has been closed.
:returns: `boolean` -- ``True`` if connection has been established,
``False`` otherwise
'''
return True if self._connection else False
def _initialize(self):
'''Performs a IPC protocol handshake.'''
credentials = (self.username if self.username else '') + ':' + (self.password if self.password else '')
credentials = credentials.encode(self._encoding)
self._connection.send(credentials + b'\3\0')
response = self._connection.recv(1)
if len(response) != 1:
self.close()
self._init_socket()
self._connection.send(credentials + b'\0')
response = self._connection.recv(1)
if len(response) != 1:
self.close()
raise QAuthenticationException('Connection denied.')
self._protocol_version = min(struct.unpack('B', response)[0], 3)
def __str__(self):
return '%s@:%s:%s' % (self.username, self.host, self.port) if self.username else ':%s:%s' % (self.host, self.port)
def query(self, msg_type, query, *parameters, **options):
'''Performs a query against a q service.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.query(qconnection.MessageType.SYNC,'{til x}', 10)
Executes a q expression:
>>> q.query(qconnection.MessageType.SYNC,'til 10')
:Parameters:
- `msg_type` (one of the constants defined in :class:`.MessageType`) -
type of the query to be executed
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException`
'''
if not self._connection:
raise QConnectionException('Connection is not established.')
if parameters and len(parameters) > 8:
raise QWriterException('Too many parameters.')
if not parameters or len(parameters) == 0:
self._writer.write(query, msg_type, **self._options.union_dict(**options))
else:
self._writer.write([query] + list(parameters), msg_type, **self._options.union_dict(**options))
def sendSync(self, query, *parameters, **options):
'''Performs a synchronous query against a q service and returns parsed
data.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Executes a q expression:
>>> print(q.sendSync('til 10'))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with a single parameter:
>>> print(q.sendSync('{til x}', 10))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with two parameters:
>>> print(q.sendSync('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
>>> print(q.sendSync('{y + til x}', *[10, 1]))
[ 1 2 3 4 5 6 7 8 9 10]
The :func:`.sendSync` is called from the overloaded :func:`.__call__`
function. This allows :class:`.QConnection` instance to be called as
a function:
>>> print(q('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
:Parameters:
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of
parsed data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:returns: query result parsed to Python data structures
:raises: :class:`.QConnectionException`, :class:`.QWriterException`,
:class:`.QReaderException`
'''
self.query(MessageType.SYNC, query, *parameters, **options)
response = self.receive(data_only = False, **options)
if response.type == MessageType.RESPONSE:
return response.data
else:
self._writer.write(QException('nyi: qPython expected response message'), MessageType.ASYNC if response.type == MessageType.ASYNC else MessageType.RESPONSE)
raise QReaderException('Received message of type: %s where response was expected')
def sendAsync(self, query, *parameters, **options):
'''Performs an asynchronous query and returns **without** retrieving of
the response.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.sendAsync('{til x}', 10)
Executes a q expression:
>>> q.sendAsync('til 10')
:Parameters:
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException`
'''
self.query(MessageType.ASYNC, query, *parameters, **options)
def receive(self, data_only = True, **options):
'''Reads and (optionally) parses the response from a q service.
Retrieves query result along with meta-information:
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(q.receive(data_only = False, raw = False))
QMessage: message type: 2, data size: 13, is_compressed: False, data: 10
Retrieves parsed query result:
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(q.receive(data_only = True, raw = False))
10
Retrieves not-parsed (raw) query result:
>>> from binascii import hexlify
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(hexlify(q.receive(data_only = True, raw = True)))
fa0a000000
:Parameters:
- `data_only` (`boolean`) - if ``True`` returns only data part of the
message, otherwise returns data and message meta-information
encapsulated in :class:`.QMessage` instance
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of
parsed data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
:returns: depending on parameter flags: :class:`.QMessage` instance,
parsed message, raw data
:raises: :class:`.QReaderException`
'''
result = self._reader.read(**self._options.union_dict(**options))
return result.data if data_only else result
def __call__(self, *parameters, **options):
return self.sendSync(parameters[0], *parameters[1:], **options)
|
exxeleron/qPython | qpython/qconnection.py | QConnection._init_socket | python | def _init_socket(self):
'''Initialises the socket used for communicating with a q service,'''
try:
self._connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._connection.connect((self.host, self.port))
self._connection.settimeout(self.timeout)
self._connection_file = self._connection.makefile('b')
except:
self._connection = None
self._connection_file = None
raise | Initialises the socket used for communicating with a q service, | train | https://github.com/exxeleron/qPython/blob/7e64a28b1e8814a8d6b9217ce79bb8de546e62f3/qpython/qconnection.py#L150-L160 | null | class QConnection(object):
'''Connector class for interfacing with the q service.
Provides methods for synchronous and asynchronous interaction.
The :class:`.QConnection` class provides a context manager API and can be
used with a ``with`` statement::
with qconnection.QConnection(host = 'localhost', port = 5000) as q:
print(q)
print(q('{`int$ til x}', 10))
:Parameters:
- `host` (`string`) - q service hostname
- `port` (`integer`) - q service port
- `username` (`string` or `None`) - username for q authentication/authorization
- `password` (`string` or `None`) - password for q authentication/authorization
- `timeout` (`nonnegative float` or `None`) - set a timeout on blocking socket operations
- `encoding` (`string`) - string encoding for data deserialization
- `reader_class` (subclass of `QReader`) - data deserializer
- `writer_class` (subclass of `QWriter`) - data serializer
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of parsed
data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars, **Default**: ``False``
'''
def __init__(self, host, port, username = None, password = None, timeout = None, encoding = 'latin-1', reader_class = None, writer_class = None, **options):
self.host = host
self.port = port
self.username = username
self.password = password
self._connection = None
self._connection_file = None
self._protocol_version = None
self.timeout = timeout
self._encoding = encoding
self._options = MetaData(**CONVERSION_OPTIONS.union_dict(**options))
try:
from qpython._pandas import PandasQReader, PandasQWriter
self._reader_class = PandasQReader
self._writer_class = PandasQWriter
except ImportError:
self._reader_class = QReader
self._writer_class = QWriter
if reader_class:
self._reader_class = reader_class
if writer_class:
self._writer_class = writer_class
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@property
def protocol_version(self):
'''Retrieves established version of the IPC protocol.
:returns: `integer` -- version of the IPC protocol
'''
return self._protocol_version
def open(self):
'''Initialises connection to q service.
If the connection hasn't been initialised yet, invoking the
:func:`.open` creates a new socket and performs a handshake with a q
service.
:raises: :class:`.QConnectionException`, :class:`.QAuthenticationException`
'''
if not self._connection:
if not self.host:
raise QConnectionException('Host cannot be None')
self._init_socket()
self._initialize()
self._writer = self._writer_class(self._connection, protocol_version = self._protocol_version, encoding = self._encoding)
self._reader = self._reader_class(self._connection_file, encoding = self._encoding)
def close(self):
'''Closes connection with the q service.'''
if self._connection:
self._connection_file.close()
self._connection_file = None
self._connection.close()
self._connection = None
def is_connected(self):
'''Checks whether connection with a q service has been established.
Connection is considered inactive when:
- it has not been initialised,
- it has been closed.
:returns: `boolean` -- ``True`` if connection has been established,
``False`` otherwise
'''
return True if self._connection else False
def _initialize(self):
'''Performs a IPC protocol handshake.'''
credentials = (self.username if self.username else '') + ':' + (self.password if self.password else '')
credentials = credentials.encode(self._encoding)
self._connection.send(credentials + b'\3\0')
response = self._connection.recv(1)
if len(response) != 1:
self.close()
self._init_socket()
self._connection.send(credentials + b'\0')
response = self._connection.recv(1)
if len(response) != 1:
self.close()
raise QAuthenticationException('Connection denied.')
self._protocol_version = min(struct.unpack('B', response)[0], 3)
def __str__(self):
return '%s@:%s:%s' % (self.username, self.host, self.port) if self.username else ':%s:%s' % (self.host, self.port)
def query(self, msg_type, query, *parameters, **options):
'''Performs a query against a q service.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.query(qconnection.MessageType.SYNC,'{til x}', 10)
Executes a q expression:
>>> q.query(qconnection.MessageType.SYNC,'til 10')
:Parameters:
- `msg_type` (one of the constants defined in :class:`.MessageType`) -
type of the query to be executed
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException`
'''
if not self._connection:
raise QConnectionException('Connection is not established.')
if parameters and len(parameters) > 8:
raise QWriterException('Too many parameters.')
if not parameters or len(parameters) == 0:
self._writer.write(query, msg_type, **self._options.union_dict(**options))
else:
self._writer.write([query] + list(parameters), msg_type, **self._options.union_dict(**options))
def sendSync(self, query, *parameters, **options):
'''Performs a synchronous query against a q service and returns parsed
data.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Executes a q expression:
>>> print(q.sendSync('til 10'))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with a single parameter:
>>> print(q.sendSync('{til x}', 10))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with two parameters:
>>> print(q.sendSync('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
>>> print(q.sendSync('{y + til x}', *[10, 1]))
[ 1 2 3 4 5 6 7 8 9 10]
The :func:`.sendSync` is called from the overloaded :func:`.__call__`
function. This allows :class:`.QConnection` instance to be called as
a function:
>>> print(q('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
:Parameters:
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of
parsed data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:returns: query result parsed to Python data structures
:raises: :class:`.QConnectionException`, :class:`.QWriterException`,
:class:`.QReaderException`
'''
self.query(MessageType.SYNC, query, *parameters, **options)
response = self.receive(data_only = False, **options)
if response.type == MessageType.RESPONSE:
return response.data
else:
self._writer.write(QException('nyi: qPython expected response message'), MessageType.ASYNC if response.type == MessageType.ASYNC else MessageType.RESPONSE)
raise QReaderException('Received message of type: %s where response was expected')
def sendAsync(self, query, *parameters, **options):
'''Performs an asynchronous query and returns **without** retrieving of
the response.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.sendAsync('{til x}', 10)
Executes a q expression:
>>> q.sendAsync('til 10')
:Parameters:
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException`
'''
self.query(MessageType.ASYNC, query, *parameters, **options)
def receive(self, data_only = True, **options):
'''Reads and (optionally) parses the response from a q service.
Retrieves query result along with meta-information:
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(q.receive(data_only = False, raw = False))
QMessage: message type: 2, data size: 13, is_compressed: False, data: 10
Retrieves parsed query result:
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(q.receive(data_only = True, raw = False))
10
Retrieves not-parsed (raw) query result:
>>> from binascii import hexlify
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(hexlify(q.receive(data_only = True, raw = True)))
fa0a000000
:Parameters:
- `data_only` (`boolean`) - if ``True`` returns only data part of the
message, otherwise returns data and message meta-information
encapsulated in :class:`.QMessage` instance
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of
parsed data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
:returns: depending on parameter flags: :class:`.QMessage` instance,
parsed message, raw data
:raises: :class:`.QReaderException`
'''
result = self._reader.read(**self._options.union_dict(**options))
return result.data if data_only else result
def __call__(self, *parameters, **options):
return self.sendSync(parameters[0], *parameters[1:], **options)
|
exxeleron/qPython | qpython/qconnection.py | QConnection.close | python | def close(self):
'''Closes connection with the q service.'''
if self._connection:
self._connection_file.close()
self._connection_file = None
self._connection.close()
self._connection = None | Closes connection with the q service. | train | https://github.com/exxeleron/qPython/blob/7e64a28b1e8814a8d6b9217ce79bb8de546e62f3/qpython/qconnection.py#L163-L169 | null | class QConnection(object):
'''Connector class for interfacing with the q service.
Provides methods for synchronous and asynchronous interaction.
The :class:`.QConnection` class provides a context manager API and can be
used with a ``with`` statement::
with qconnection.QConnection(host = 'localhost', port = 5000) as q:
print(q)
print(q('{`int$ til x}', 10))
:Parameters:
- `host` (`string`) - q service hostname
- `port` (`integer`) - q service port
- `username` (`string` or `None`) - username for q authentication/authorization
- `password` (`string` or `None`) - password for q authentication/authorization
- `timeout` (`nonnegative float` or `None`) - set a timeout on blocking socket operations
- `encoding` (`string`) - string encoding for data deserialization
- `reader_class` (subclass of `QReader`) - data deserializer
- `writer_class` (subclass of `QWriter`) - data serializer
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of parsed
data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars, **Default**: ``False``
'''
def __init__(self, host, port, username = None, password = None, timeout = None, encoding = 'latin-1', reader_class = None, writer_class = None, **options):
self.host = host
self.port = port
self.username = username
self.password = password
self._connection = None
self._connection_file = None
self._protocol_version = None
self.timeout = timeout
self._encoding = encoding
self._options = MetaData(**CONVERSION_OPTIONS.union_dict(**options))
try:
from qpython._pandas import PandasQReader, PandasQWriter
self._reader_class = PandasQReader
self._writer_class = PandasQWriter
except ImportError:
self._reader_class = QReader
self._writer_class = QWriter
if reader_class:
self._reader_class = reader_class
if writer_class:
self._writer_class = writer_class
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@property
def protocol_version(self):
'''Retrieves established version of the IPC protocol.
:returns: `integer` -- version of the IPC protocol
'''
return self._protocol_version
def open(self):
'''Initialises connection to q service.
If the connection hasn't been initialised yet, invoking the
:func:`.open` creates a new socket and performs a handshake with a q
service.
:raises: :class:`.QConnectionException`, :class:`.QAuthenticationException`
'''
if not self._connection:
if not self.host:
raise QConnectionException('Host cannot be None')
self._init_socket()
self._initialize()
self._writer = self._writer_class(self._connection, protocol_version = self._protocol_version, encoding = self._encoding)
self._reader = self._reader_class(self._connection_file, encoding = self._encoding)
def _init_socket(self):
'''Initialises the socket used for communicating with a q service,'''
try:
self._connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._connection.connect((self.host, self.port))
self._connection.settimeout(self.timeout)
self._connection_file = self._connection.makefile('b')
except:
self._connection = None
self._connection_file = None
raise
def is_connected(self):
'''Checks whether connection with a q service has been established.
Connection is considered inactive when:
- it has not been initialised,
- it has been closed.
:returns: `boolean` -- ``True`` if connection has been established,
``False`` otherwise
'''
return True if self._connection else False
def _initialize(self):
'''Performs a IPC protocol handshake.'''
credentials = (self.username if self.username else '') + ':' + (self.password if self.password else '')
credentials = credentials.encode(self._encoding)
self._connection.send(credentials + b'\3\0')
response = self._connection.recv(1)
if len(response) != 1:
self.close()
self._init_socket()
self._connection.send(credentials + b'\0')
response = self._connection.recv(1)
if len(response) != 1:
self.close()
raise QAuthenticationException('Connection denied.')
self._protocol_version = min(struct.unpack('B', response)[0], 3)
def __str__(self):
return '%s@:%s:%s' % (self.username, self.host, self.port) if self.username else ':%s:%s' % (self.host, self.port)
def query(self, msg_type, query, *parameters, **options):
'''Performs a query against a q service.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.query(qconnection.MessageType.SYNC,'{til x}', 10)
Executes a q expression:
>>> q.query(qconnection.MessageType.SYNC,'til 10')
:Parameters:
- `msg_type` (one of the constants defined in :class:`.MessageType`) -
type of the query to be executed
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException`
'''
if not self._connection:
raise QConnectionException('Connection is not established.')
if parameters and len(parameters) > 8:
raise QWriterException('Too many parameters.')
if not parameters or len(parameters) == 0:
self._writer.write(query, msg_type, **self._options.union_dict(**options))
else:
self._writer.write([query] + list(parameters), msg_type, **self._options.union_dict(**options))
def sendSync(self, query, *parameters, **options):
'''Performs a synchronous query against a q service and returns parsed
data.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Executes a q expression:
>>> print(q.sendSync('til 10'))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with a single parameter:
>>> print(q.sendSync('{til x}', 10))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with two parameters:
>>> print(q.sendSync('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
>>> print(q.sendSync('{y + til x}', *[10, 1]))
[ 1 2 3 4 5 6 7 8 9 10]
The :func:`.sendSync` is called from the overloaded :func:`.__call__`
function. This allows :class:`.QConnection` instance to be called as
a function:
>>> print(q('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
:Parameters:
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of
parsed data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:returns: query result parsed to Python data structures
:raises: :class:`.QConnectionException`, :class:`.QWriterException`,
:class:`.QReaderException`
'''
self.query(MessageType.SYNC, query, *parameters, **options)
response = self.receive(data_only = False, **options)
if response.type == MessageType.RESPONSE:
return response.data
else:
self._writer.write(QException('nyi: qPython expected response message'), MessageType.ASYNC if response.type == MessageType.ASYNC else MessageType.RESPONSE)
raise QReaderException('Received message of type: %s where response was expected')
def sendAsync(self, query, *parameters, **options):
'''Performs an asynchronous query and returns **without** retrieving of
the response.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.sendAsync('{til x}', 10)
Executes a q expression:
>>> q.sendAsync('til 10')
:Parameters:
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException`
'''
self.query(MessageType.ASYNC, query, *parameters, **options)
def receive(self, data_only = True, **options):
'''Reads and (optionally) parses the response from a q service.
Retrieves query result along with meta-information:
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(q.receive(data_only = False, raw = False))
QMessage: message type: 2, data size: 13, is_compressed: False, data: 10
Retrieves parsed query result:
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(q.receive(data_only = True, raw = False))
10
Retrieves not-parsed (raw) query result:
>>> from binascii import hexlify
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(hexlify(q.receive(data_only = True, raw = True)))
fa0a000000
:Parameters:
- `data_only` (`boolean`) - if ``True`` returns only data part of the
message, otherwise returns data and message meta-information
encapsulated in :class:`.QMessage` instance
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of
parsed data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
:returns: depending on parameter flags: :class:`.QMessage` instance,
parsed message, raw data
:raises: :class:`.QReaderException`
'''
result = self._reader.read(**self._options.union_dict(**options))
return result.data if data_only else result
def __call__(self, *parameters, **options):
return self.sendSync(parameters[0], *parameters[1:], **options)
|
exxeleron/qPython | qpython/qconnection.py | QConnection._initialize | python | def _initialize(self):
'''Performs a IPC protocol handshake.'''
credentials = (self.username if self.username else '') + ':' + (self.password if self.password else '')
credentials = credentials.encode(self._encoding)
self._connection.send(credentials + b'\3\0')
response = self._connection.recv(1)
if len(response) != 1:
self.close()
self._init_socket()
self._connection.send(credentials + b'\0')
response = self._connection.recv(1)
if len(response) != 1:
self.close()
raise QAuthenticationException('Connection denied.')
self._protocol_version = min(struct.unpack('B', response)[0], 3) | Performs a IPC protocol handshake. | train | https://github.com/exxeleron/qPython/blob/7e64a28b1e8814a8d6b9217ce79bb8de546e62f3/qpython/qconnection.py#L185-L202 | [
"def _init_socket(self):\n '''Initialises the socket used for communicating with a q service,'''\n try:\n self._connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._connection.connect((self.host, self.port))\n self._connection.settimeout(self.timeout)\n self._connection_file = self._connection.makefile('b')\n except:\n self._connection = None\n self._connection_file = None\n raise\n",
"def close(self):\n '''Closes connection with the q service.'''\n if self._connection:\n self._connection_file.close()\n self._connection_file = None\n self._connection.close()\n self._connection = None\n"
] | class QConnection(object):
'''Connector class for interfacing with the q service.
Provides methods for synchronous and asynchronous interaction.
The :class:`.QConnection` class provides a context manager API and can be
used with a ``with`` statement::
with qconnection.QConnection(host = 'localhost', port = 5000) as q:
print(q)
print(q('{`int$ til x}', 10))
:Parameters:
- `host` (`string`) - q service hostname
- `port` (`integer`) - q service port
- `username` (`string` or `None`) - username for q authentication/authorization
- `password` (`string` or `None`) - password for q authentication/authorization
- `timeout` (`nonnegative float` or `None`) - set a timeout on blocking socket operations
- `encoding` (`string`) - string encoding for data deserialization
- `reader_class` (subclass of `QReader`) - data deserializer
- `writer_class` (subclass of `QWriter`) - data serializer
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of parsed
data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars, **Default**: ``False``
'''
def __init__(self, host, port, username = None, password = None, timeout = None, encoding = 'latin-1', reader_class = None, writer_class = None, **options):
self.host = host
self.port = port
self.username = username
self.password = password
self._connection = None
self._connection_file = None
self._protocol_version = None
self.timeout = timeout
self._encoding = encoding
self._options = MetaData(**CONVERSION_OPTIONS.union_dict(**options))
try:
from qpython._pandas import PandasQReader, PandasQWriter
self._reader_class = PandasQReader
self._writer_class = PandasQWriter
except ImportError:
self._reader_class = QReader
self._writer_class = QWriter
if reader_class:
self._reader_class = reader_class
if writer_class:
self._writer_class = writer_class
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@property
def protocol_version(self):
'''Retrieves established version of the IPC protocol.
:returns: `integer` -- version of the IPC protocol
'''
return self._protocol_version
def open(self):
'''Initialises connection to q service.
If the connection hasn't been initialised yet, invoking the
:func:`.open` creates a new socket and performs a handshake with a q
service.
:raises: :class:`.QConnectionException`, :class:`.QAuthenticationException`
'''
if not self._connection:
if not self.host:
raise QConnectionException('Host cannot be None')
self._init_socket()
self._initialize()
self._writer = self._writer_class(self._connection, protocol_version = self._protocol_version, encoding = self._encoding)
self._reader = self._reader_class(self._connection_file, encoding = self._encoding)
def _init_socket(self):
'''Initialises the socket used for communicating with a q service,'''
try:
self._connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._connection.connect((self.host, self.port))
self._connection.settimeout(self.timeout)
self._connection_file = self._connection.makefile('b')
except:
self._connection = None
self._connection_file = None
raise
def close(self):
'''Closes connection with the q service.'''
if self._connection:
self._connection_file.close()
self._connection_file = None
self._connection.close()
self._connection = None
def is_connected(self):
'''Checks whether connection with a q service has been established.
Connection is considered inactive when:
- it has not been initialised,
- it has been closed.
:returns: `boolean` -- ``True`` if connection has been established,
``False`` otherwise
'''
return True if self._connection else False
def __str__(self):
return '%s@:%s:%s' % (self.username, self.host, self.port) if self.username else ':%s:%s' % (self.host, self.port)
def query(self, msg_type, query, *parameters, **options):
'''Performs a query against a q service.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.query(qconnection.MessageType.SYNC,'{til x}', 10)
Executes a q expression:
>>> q.query(qconnection.MessageType.SYNC,'til 10')
:Parameters:
- `msg_type` (one of the constants defined in :class:`.MessageType`) -
type of the query to be executed
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException`
'''
if not self._connection:
raise QConnectionException('Connection is not established.')
if parameters and len(parameters) > 8:
raise QWriterException('Too many parameters.')
if not parameters or len(parameters) == 0:
self._writer.write(query, msg_type, **self._options.union_dict(**options))
else:
self._writer.write([query] + list(parameters), msg_type, **self._options.union_dict(**options))
def sendSync(self, query, *parameters, **options):
'''Performs a synchronous query against a q service and returns parsed
data.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Executes a q expression:
>>> print(q.sendSync('til 10'))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with a single parameter:
>>> print(q.sendSync('{til x}', 10))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with two parameters:
>>> print(q.sendSync('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
>>> print(q.sendSync('{y + til x}', *[10, 1]))
[ 1 2 3 4 5 6 7 8 9 10]
The :func:`.sendSync` is called from the overloaded :func:`.__call__`
function. This allows :class:`.QConnection` instance to be called as
a function:
>>> print(q('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
:Parameters:
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of
parsed data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:returns: query result parsed to Python data structures
:raises: :class:`.QConnectionException`, :class:`.QWriterException`,
:class:`.QReaderException`
'''
self.query(MessageType.SYNC, query, *parameters, **options)
response = self.receive(data_only = False, **options)
if response.type == MessageType.RESPONSE:
return response.data
else:
self._writer.write(QException('nyi: qPython expected response message'), MessageType.ASYNC if response.type == MessageType.ASYNC else MessageType.RESPONSE)
raise QReaderException('Received message of type: %s where response was expected')
def sendAsync(self, query, *parameters, **options):
'''Performs an asynchronous query and returns **without** retrieving of
the response.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.sendAsync('{til x}', 10)
Executes a q expression:
>>> q.sendAsync('til 10')
:Parameters:
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException`
'''
self.query(MessageType.ASYNC, query, *parameters, **options)
def receive(self, data_only = True, **options):
'''Reads and (optionally) parses the response from a q service.
Retrieves query result along with meta-information:
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(q.receive(data_only = False, raw = False))
QMessage: message type: 2, data size: 13, is_compressed: False, data: 10
Retrieves parsed query result:
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(q.receive(data_only = True, raw = False))
10
Retrieves not-parsed (raw) query result:
>>> from binascii import hexlify
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(hexlify(q.receive(data_only = True, raw = True)))
fa0a000000
:Parameters:
- `data_only` (`boolean`) - if ``True`` returns only data part of the
message, otherwise returns data and message meta-information
encapsulated in :class:`.QMessage` instance
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of
parsed data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
:returns: depending on parameter flags: :class:`.QMessage` instance,
parsed message, raw data
:raises: :class:`.QReaderException`
'''
result = self._reader.read(**self._options.union_dict(**options))
return result.data if data_only else result
def __call__(self, *parameters, **options):
return self.sendSync(parameters[0], *parameters[1:], **options)
|
exxeleron/qPython | qpython/qconnection.py | QConnection.query | python | def query(self, msg_type, query, *parameters, **options):
'''Performs a query against a q service.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.query(qconnection.MessageType.SYNC,'{til x}', 10)
Executes a q expression:
>>> q.query(qconnection.MessageType.SYNC,'til 10')
:Parameters:
- `msg_type` (one of the constants defined in :class:`.MessageType`) -
type of the query to be executed
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException`
'''
if not self._connection:
raise QConnectionException('Connection is not established.')
if parameters and len(parameters) > 8:
raise QWriterException('Too many parameters.')
if not parameters or len(parameters) == 0:
self._writer.write(query, msg_type, **self._options.union_dict(**options))
else:
self._writer.write([query] + list(parameters), msg_type, **self._options.union_dict(**options)) | Performs a query against a q service.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.query(qconnection.MessageType.SYNC,'{til x}', 10)
Executes a q expression:
>>> q.query(qconnection.MessageType.SYNC,'til 10')
:Parameters:
- `msg_type` (one of the constants defined in :class:`.MessageType`) -
type of the query to be executed
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException` | train | https://github.com/exxeleron/qPython/blob/7e64a28b1e8814a8d6b9217ce79bb8de546e62f3/qpython/qconnection.py#L209-L245 | [
"def union_dict(self, **kw):\n return dict(list(self.as_dict().items()) + list(kw.items()))\n"
] | class QConnection(object):
'''Connector class for interfacing with the q service.
Provides methods for synchronous and asynchronous interaction.
The :class:`.QConnection` class provides a context manager API and can be
used with a ``with`` statement::
with qconnection.QConnection(host = 'localhost', port = 5000) as q:
print(q)
print(q('{`int$ til x}', 10))
:Parameters:
- `host` (`string`) - q service hostname
- `port` (`integer`) - q service port
- `username` (`string` or `None`) - username for q authentication/authorization
- `password` (`string` or `None`) - password for q authentication/authorization
- `timeout` (`nonnegative float` or `None`) - set a timeout on blocking socket operations
- `encoding` (`string`) - string encoding for data deserialization
- `reader_class` (subclass of `QReader`) - data deserializer
- `writer_class` (subclass of `QWriter`) - data serializer
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of parsed
data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars, **Default**: ``False``
'''
def __init__(self, host, port, username = None, password = None, timeout = None, encoding = 'latin-1', reader_class = None, writer_class = None, **options):
self.host = host
self.port = port
self.username = username
self.password = password
self._connection = None
self._connection_file = None
self._protocol_version = None
self.timeout = timeout
self._encoding = encoding
self._options = MetaData(**CONVERSION_OPTIONS.union_dict(**options))
try:
from qpython._pandas import PandasQReader, PandasQWriter
self._reader_class = PandasQReader
self._writer_class = PandasQWriter
except ImportError:
self._reader_class = QReader
self._writer_class = QWriter
if reader_class:
self._reader_class = reader_class
if writer_class:
self._writer_class = writer_class
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@property
def protocol_version(self):
'''Retrieves established version of the IPC protocol.
:returns: `integer` -- version of the IPC protocol
'''
return self._protocol_version
def open(self):
'''Initialises connection to q service.
If the connection hasn't been initialised yet, invoking the
:func:`.open` creates a new socket and performs a handshake with a q
service.
:raises: :class:`.QConnectionException`, :class:`.QAuthenticationException`
'''
if not self._connection:
if not self.host:
raise QConnectionException('Host cannot be None')
self._init_socket()
self._initialize()
self._writer = self._writer_class(self._connection, protocol_version = self._protocol_version, encoding = self._encoding)
self._reader = self._reader_class(self._connection_file, encoding = self._encoding)
def _init_socket(self):
'''Initialises the socket used for communicating with a q service,'''
try:
self._connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._connection.connect((self.host, self.port))
self._connection.settimeout(self.timeout)
self._connection_file = self._connection.makefile('b')
except:
self._connection = None
self._connection_file = None
raise
def close(self):
'''Closes connection with the q service.'''
if self._connection:
self._connection_file.close()
self._connection_file = None
self._connection.close()
self._connection = None
def is_connected(self):
'''Checks whether connection with a q service has been established.
Connection is considered inactive when:
- it has not been initialised,
- it has been closed.
:returns: `boolean` -- ``True`` if connection has been established,
``False`` otherwise
'''
return True if self._connection else False
def _initialize(self):
'''Performs a IPC protocol handshake.'''
credentials = (self.username if self.username else '') + ':' + (self.password if self.password else '')
credentials = credentials.encode(self._encoding)
self._connection.send(credentials + b'\3\0')
response = self._connection.recv(1)
if len(response) != 1:
self.close()
self._init_socket()
self._connection.send(credentials + b'\0')
response = self._connection.recv(1)
if len(response) != 1:
self.close()
raise QAuthenticationException('Connection denied.')
self._protocol_version = min(struct.unpack('B', response)[0], 3)
def __str__(self):
return '%s@:%s:%s' % (self.username, self.host, self.port) if self.username else ':%s:%s' % (self.host, self.port)
def query(self, msg_type, query, *parameters, **options):
'''Performs a query against a q service.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.query(qconnection.MessageType.SYNC,'{til x}', 10)
Executes a q expression:
>>> q.query(qconnection.MessageType.SYNC,'til 10')
:Parameters:
- `msg_type` (one of the constants defined in :class:`.MessageType`) -
type of the query to be executed
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException`
'''
if not self._connection:
raise QConnectionException('Connection is not established.')
if parameters and len(parameters) > 8:
raise QWriterException('Too many parameters.')
if not parameters or len(parameters) == 0:
self._writer.write(query, msg_type, **self._options.union_dict(**options))
else:
self._writer.write([query] + list(parameters), msg_type, **self._options.union_dict(**options))
def sendSync(self, query, *parameters, **options):
'''Performs a synchronous query against a q service and returns parsed
data.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Executes a q expression:
>>> print(q.sendSync('til 10'))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with a single parameter:
>>> print(q.sendSync('{til x}', 10))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with two parameters:
>>> print(q.sendSync('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
>>> print(q.sendSync('{y + til x}', *[10, 1]))
[ 1 2 3 4 5 6 7 8 9 10]
The :func:`.sendSync` is called from the overloaded :func:`.__call__`
function. This allows :class:`.QConnection` instance to be called as
a function:
>>> print(q('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
:Parameters:
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of
parsed data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:returns: query result parsed to Python data structures
:raises: :class:`.QConnectionException`, :class:`.QWriterException`,
:class:`.QReaderException`
'''
self.query(MessageType.SYNC, query, *parameters, **options)
response = self.receive(data_only = False, **options)
if response.type == MessageType.RESPONSE:
return response.data
else:
self._writer.write(QException('nyi: qPython expected response message'), MessageType.ASYNC if response.type == MessageType.ASYNC else MessageType.RESPONSE)
raise QReaderException('Received message of type: %s where response was expected')
def sendAsync(self, query, *parameters, **options):
'''Performs an asynchronous query and returns **without** retrieving of
the response.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.sendAsync('{til x}', 10)
Executes a q expression:
>>> q.sendAsync('til 10')
:Parameters:
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException`
'''
self.query(MessageType.ASYNC, query, *parameters, **options)
def receive(self, data_only = True, **options):
'''Reads and (optionally) parses the response from a q service.
Retrieves query result along with meta-information:
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(q.receive(data_only = False, raw = False))
QMessage: message type: 2, data size: 13, is_compressed: False, data: 10
Retrieves parsed query result:
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(q.receive(data_only = True, raw = False))
10
Retrieves not-parsed (raw) query result:
>>> from binascii import hexlify
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(hexlify(q.receive(data_only = True, raw = True)))
fa0a000000
:Parameters:
- `data_only` (`boolean`) - if ``True`` returns only data part of the
message, otherwise returns data and message meta-information
encapsulated in :class:`.QMessage` instance
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of
parsed data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
:returns: depending on parameter flags: :class:`.QMessage` instance,
parsed message, raw data
:raises: :class:`.QReaderException`
'''
result = self._reader.read(**self._options.union_dict(**options))
return result.data if data_only else result
def __call__(self, *parameters, **options):
return self.sendSync(parameters[0], *parameters[1:], **options)
|
exxeleron/qPython | qpython/qconnection.py | QConnection.sendSync | python | def sendSync(self, query, *parameters, **options):
'''Performs a synchronous query against a q service and returns parsed
data.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Executes a q expression:
>>> print(q.sendSync('til 10'))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with a single parameter:
>>> print(q.sendSync('{til x}', 10))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with two parameters:
>>> print(q.sendSync('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
>>> print(q.sendSync('{y + til x}', *[10, 1]))
[ 1 2 3 4 5 6 7 8 9 10]
The :func:`.sendSync` is called from the overloaded :func:`.__call__`
function. This allows :class:`.QConnection` instance to be called as
a function:
>>> print(q('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
:Parameters:
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of
parsed data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:returns: query result parsed to Python data structures
:raises: :class:`.QConnectionException`, :class:`.QWriterException`,
:class:`.QReaderException`
'''
self.query(MessageType.SYNC, query, *parameters, **options)
response = self.receive(data_only = False, **options)
if response.type == MessageType.RESPONSE:
return response.data
else:
self._writer.write(QException('nyi: qPython expected response message'), MessageType.ASYNC if response.type == MessageType.ASYNC else MessageType.RESPONSE)
raise QReaderException('Received message of type: %s where response was expected') | Performs a synchronous query against a q service and returns parsed
data.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Executes a q expression:
>>> print(q.sendSync('til 10'))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with a single parameter:
>>> print(q.sendSync('{til x}', 10))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with two parameters:
>>> print(q.sendSync('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
>>> print(q.sendSync('{y + til x}', *[10, 1]))
[ 1 2 3 4 5 6 7 8 9 10]
The :func:`.sendSync` is called from the overloaded :func:`.__call__`
function. This allows :class:`.QConnection` instance to be called as
a function:
>>> print(q('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
:Parameters:
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of
parsed data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:returns: query result parsed to Python data structures
:raises: :class:`.QConnectionException`, :class:`.QWriterException`,
:class:`.QReaderException` | train | https://github.com/exxeleron/qPython/blob/7e64a28b1e8814a8d6b9217ce79bb8de546e62f3/qpython/qconnection.py#L248-L309 | [
"def query(self, msg_type, query, *parameters, **options):\n '''Performs a query against a q service.\n\n In typical use case, `query` is the name of the function to call and \n `parameters` are its parameters. When `parameters` list is empty, the \n query can be an arbitrary q expression (e.g. ``0 +/ til 100``).\n\n Calls a anonymous function with a single parameter:\n\n >>> q.query(qconnection.MessageType.SYNC,'{til x}', 10)\n\n Executes a q expression:\n\n >>> q.query(qconnection.MessageType.SYNC,'til 10')\n\n :Parameters:\n - `msg_type` (one of the constants defined in :class:`.MessageType`) - \n type of the query to be executed\n - `query` (`string`) - query to be executed\n - `parameters` (`list` or `None`) - parameters for the query\n :Options:\n - `single_char_strings` (`boolean`) - if ``True`` single char Python \n strings are encoded as q strings instead of chars, \n **Default**: ``False``\n\n :raises: :class:`.QConnectionException`, :class:`.QWriterException`\n '''\n if not self._connection:\n raise QConnectionException('Connection is not established.')\n\n if parameters and len(parameters) > 8:\n raise QWriterException('Too many parameters.')\n\n if not parameters or len(parameters) == 0:\n self._writer.write(query, msg_type, **self._options.union_dict(**options))\n else:\n self._writer.write([query] + list(parameters), msg_type, **self._options.union_dict(**options))\n",
"def receive(self, data_only = True, **options):\n '''Reads and (optionally) parses the response from a q service.\n\n Retrieves query result along with meta-information:\n\n >>> q.query(qconnection.MessageType.SYNC,'{x}', 10)\n >>> print(q.receive(data_only = False, raw = False))\n QMessage: message type: 2, data size: 13, is_compressed: False, data: 10\n\n Retrieves parsed query result:\n\n >>> q.query(qconnection.MessageType.SYNC,'{x}', 10)\n >>> print(q.receive(data_only = True, raw = False))\n 10\n\n Retrieves not-parsed (raw) query result:\n\n >>> from binascii import hexlify\n >>> q.query(qconnection.MessageType.SYNC,'{x}', 10)\n >>> print(hexlify(q.receive(data_only = True, raw = True)))\n fa0a000000\n\n :Parameters:\n - `data_only` (`boolean`) - if ``True`` returns only data part of the \n message, otherwise returns data and message meta-information \n encapsulated in :class:`.QMessage` instance \n :Options:\n - `raw` (`boolean`) - if ``True`` returns raw data chunk instead of \n parsed data, **Default**: ``False``\n - `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are\n backed by raw q representation (:class:`.QTemporalList`, \n :class:`.QTemporal`) instances, otherwise are represented as \n `numpy datetime64`/`timedelta64` arrays and atoms,\n **Default**: ``False``\n\n :returns: depending on parameter flags: :class:`.QMessage` instance, \n parsed message, raw data \n :raises: :class:`.QReaderException`\n '''\n result = self._reader.read(**self._options.union_dict(**options))\n return result.data if data_only else result\n"
] | class QConnection(object):
'''Connector class for interfacing with the q service.
Provides methods for synchronous and asynchronous interaction.
The :class:`.QConnection` class provides a context manager API and can be
used with a ``with`` statement::
with qconnection.QConnection(host = 'localhost', port = 5000) as q:
print(q)
print(q('{`int$ til x}', 10))
:Parameters:
- `host` (`string`) - q service hostname
- `port` (`integer`) - q service port
- `username` (`string` or `None`) - username for q authentication/authorization
- `password` (`string` or `None`) - password for q authentication/authorization
- `timeout` (`nonnegative float` or `None`) - set a timeout on blocking socket operations
- `encoding` (`string`) - string encoding for data deserialization
- `reader_class` (subclass of `QReader`) - data deserializer
- `writer_class` (subclass of `QWriter`) - data serializer
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of parsed
data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars, **Default**: ``False``
'''
def __init__(self, host, port, username = None, password = None, timeout = None, encoding = 'latin-1', reader_class = None, writer_class = None, **options):
self.host = host
self.port = port
self.username = username
self.password = password
self._connection = None
self._connection_file = None
self._protocol_version = None
self.timeout = timeout
self._encoding = encoding
self._options = MetaData(**CONVERSION_OPTIONS.union_dict(**options))
try:
from qpython._pandas import PandasQReader, PandasQWriter
self._reader_class = PandasQReader
self._writer_class = PandasQWriter
except ImportError:
self._reader_class = QReader
self._writer_class = QWriter
if reader_class:
self._reader_class = reader_class
if writer_class:
self._writer_class = writer_class
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@property
def protocol_version(self):
'''Retrieves established version of the IPC protocol.
:returns: `integer` -- version of the IPC protocol
'''
return self._protocol_version
def open(self):
'''Initialises connection to q service.
If the connection hasn't been initialised yet, invoking the
:func:`.open` creates a new socket and performs a handshake with a q
service.
:raises: :class:`.QConnectionException`, :class:`.QAuthenticationException`
'''
if not self._connection:
if not self.host:
raise QConnectionException('Host cannot be None')
self._init_socket()
self._initialize()
self._writer = self._writer_class(self._connection, protocol_version = self._protocol_version, encoding = self._encoding)
self._reader = self._reader_class(self._connection_file, encoding = self._encoding)
def _init_socket(self):
'''Initialises the socket used for communicating with a q service,'''
try:
self._connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._connection.connect((self.host, self.port))
self._connection.settimeout(self.timeout)
self._connection_file = self._connection.makefile('b')
except:
self._connection = None
self._connection_file = None
raise
def close(self):
'''Closes connection with the q service.'''
if self._connection:
self._connection_file.close()
self._connection_file = None
self._connection.close()
self._connection = None
def is_connected(self):
'''Checks whether connection with a q service has been established.
Connection is considered inactive when:
- it has not been initialised,
- it has been closed.
:returns: `boolean` -- ``True`` if connection has been established,
``False`` otherwise
'''
return True if self._connection else False
def _initialize(self):
'''Performs a IPC protocol handshake.'''
credentials = (self.username if self.username else '') + ':' + (self.password if self.password else '')
credentials = credentials.encode(self._encoding)
self._connection.send(credentials + b'\3\0')
response = self._connection.recv(1)
if len(response) != 1:
self.close()
self._init_socket()
self._connection.send(credentials + b'\0')
response = self._connection.recv(1)
if len(response) != 1:
self.close()
raise QAuthenticationException('Connection denied.')
self._protocol_version = min(struct.unpack('B', response)[0], 3)
def __str__(self):
return '%s@:%s:%s' % (self.username, self.host, self.port) if self.username else ':%s:%s' % (self.host, self.port)
def query(self, msg_type, query, *parameters, **options):
'''Performs a query against a q service.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.query(qconnection.MessageType.SYNC,'{til x}', 10)
Executes a q expression:
>>> q.query(qconnection.MessageType.SYNC,'til 10')
:Parameters:
- `msg_type` (one of the constants defined in :class:`.MessageType`) -
type of the query to be executed
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException`
'''
if not self._connection:
raise QConnectionException('Connection is not established.')
if parameters and len(parameters) > 8:
raise QWriterException('Too many parameters.')
if not parameters or len(parameters) == 0:
self._writer.write(query, msg_type, **self._options.union_dict(**options))
else:
self._writer.write([query] + list(parameters), msg_type, **self._options.union_dict(**options))
def sendSync(self, query, *parameters, **options):
'''Performs a synchronous query against a q service and returns parsed
data.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Executes a q expression:
>>> print(q.sendSync('til 10'))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with a single parameter:
>>> print(q.sendSync('{til x}', 10))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with two parameters:
>>> print(q.sendSync('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
>>> print(q.sendSync('{y + til x}', *[10, 1]))
[ 1 2 3 4 5 6 7 8 9 10]
The :func:`.sendSync` is called from the overloaded :func:`.__call__`
function. This allows :class:`.QConnection` instance to be called as
a function:
>>> print(q('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
:Parameters:
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of
parsed data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:returns: query result parsed to Python data structures
:raises: :class:`.QConnectionException`, :class:`.QWriterException`,
:class:`.QReaderException`
'''
self.query(MessageType.SYNC, query, *parameters, **options)
response = self.receive(data_only = False, **options)
if response.type == MessageType.RESPONSE:
return response.data
else:
self._writer.write(QException('nyi: qPython expected response message'), MessageType.ASYNC if response.type == MessageType.ASYNC else MessageType.RESPONSE)
raise QReaderException('Received message of type: %s where response was expected')
def sendAsync(self, query, *parameters, **options):
'''Performs an asynchronous query and returns **without** retrieving of
the response.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.sendAsync('{til x}', 10)
Executes a q expression:
>>> q.sendAsync('til 10')
:Parameters:
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException`
'''
self.query(MessageType.ASYNC, query, *parameters, **options)
def receive(self, data_only = True, **options):
'''Reads and (optionally) parses the response from a q service.
Retrieves query result along with meta-information:
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(q.receive(data_only = False, raw = False))
QMessage: message type: 2, data size: 13, is_compressed: False, data: 10
Retrieves parsed query result:
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(q.receive(data_only = True, raw = False))
10
Retrieves not-parsed (raw) query result:
>>> from binascii import hexlify
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(hexlify(q.receive(data_only = True, raw = True)))
fa0a000000
:Parameters:
- `data_only` (`boolean`) - if ``True`` returns only data part of the
message, otherwise returns data and message meta-information
encapsulated in :class:`.QMessage` instance
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of
parsed data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
:returns: depending on parameter flags: :class:`.QMessage` instance,
parsed message, raw data
:raises: :class:`.QReaderException`
'''
result = self._reader.read(**self._options.union_dict(**options))
return result.data if data_only else result
def __call__(self, *parameters, **options):
return self.sendSync(parameters[0], *parameters[1:], **options)
|
exxeleron/qPython | qpython/qconnection.py | QConnection.sendAsync | python | def sendAsync(self, query, *parameters, **options):
'''Performs an asynchronous query and returns **without** retrieving of
the response.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.sendAsync('{til x}', 10)
Executes a q expression:
>>> q.sendAsync('til 10')
:Parameters:
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException`
'''
self.query(MessageType.ASYNC, query, *parameters, **options) | Performs an asynchronous query and returns **without** retrieving of
the response.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.sendAsync('{til x}', 10)
Executes a q expression:
>>> q.sendAsync('til 10')
:Parameters:
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException` | train | https://github.com/exxeleron/qPython/blob/7e64a28b1e8814a8d6b9217ce79bb8de546e62f3/qpython/qconnection.py#L312-L338 | [
"def query(self, msg_type, query, *parameters, **options):\n '''Performs a query against a q service.\n\n In typical use case, `query` is the name of the function to call and \n `parameters` are its parameters. When `parameters` list is empty, the \n query can be an arbitrary q expression (e.g. ``0 +/ til 100``).\n\n Calls a anonymous function with a single parameter:\n\n >>> q.query(qconnection.MessageType.SYNC,'{til x}', 10)\n\n Executes a q expression:\n\n >>> q.query(qconnection.MessageType.SYNC,'til 10')\n\n :Parameters:\n - `msg_type` (one of the constants defined in :class:`.MessageType`) - \n type of the query to be executed\n - `query` (`string`) - query to be executed\n - `parameters` (`list` or `None`) - parameters for the query\n :Options:\n - `single_char_strings` (`boolean`) - if ``True`` single char Python \n strings are encoded as q strings instead of chars, \n **Default**: ``False``\n\n :raises: :class:`.QConnectionException`, :class:`.QWriterException`\n '''\n if not self._connection:\n raise QConnectionException('Connection is not established.')\n\n if parameters and len(parameters) > 8:\n raise QWriterException('Too many parameters.')\n\n if not parameters or len(parameters) == 0:\n self._writer.write(query, msg_type, **self._options.union_dict(**options))\n else:\n self._writer.write([query] + list(parameters), msg_type, **self._options.union_dict(**options))\n"
] | class QConnection(object):
'''Connector class for interfacing with the q service.
Provides methods for synchronous and asynchronous interaction.
The :class:`.QConnection` class provides a context manager API and can be
used with a ``with`` statement::
with qconnection.QConnection(host = 'localhost', port = 5000) as q:
print(q)
print(q('{`int$ til x}', 10))
:Parameters:
- `host` (`string`) - q service hostname
- `port` (`integer`) - q service port
- `username` (`string` or `None`) - username for q authentication/authorization
- `password` (`string` or `None`) - password for q authentication/authorization
- `timeout` (`nonnegative float` or `None`) - set a timeout on blocking socket operations
- `encoding` (`string`) - string encoding for data deserialization
- `reader_class` (subclass of `QReader`) - data deserializer
- `writer_class` (subclass of `QWriter`) - data serializer
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of parsed
data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars, **Default**: ``False``
'''
def __init__(self, host, port, username = None, password = None, timeout = None, encoding = 'latin-1', reader_class = None, writer_class = None, **options):
self.host = host
self.port = port
self.username = username
self.password = password
self._connection = None
self._connection_file = None
self._protocol_version = None
self.timeout = timeout
self._encoding = encoding
self._options = MetaData(**CONVERSION_OPTIONS.union_dict(**options))
try:
from qpython._pandas import PandasQReader, PandasQWriter
self._reader_class = PandasQReader
self._writer_class = PandasQWriter
except ImportError:
self._reader_class = QReader
self._writer_class = QWriter
if reader_class:
self._reader_class = reader_class
if writer_class:
self._writer_class = writer_class
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@property
def protocol_version(self):
'''Retrieves established version of the IPC protocol.
:returns: `integer` -- version of the IPC protocol
'''
return self._protocol_version
def open(self):
'''Initialises connection to q service.
If the connection hasn't been initialised yet, invoking the
:func:`.open` creates a new socket and performs a handshake with a q
service.
:raises: :class:`.QConnectionException`, :class:`.QAuthenticationException`
'''
if not self._connection:
if not self.host:
raise QConnectionException('Host cannot be None')
self._init_socket()
self._initialize()
self._writer = self._writer_class(self._connection, protocol_version = self._protocol_version, encoding = self._encoding)
self._reader = self._reader_class(self._connection_file, encoding = self._encoding)
def _init_socket(self):
'''Initialises the socket used for communicating with a q service,'''
try:
self._connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._connection.connect((self.host, self.port))
self._connection.settimeout(self.timeout)
self._connection_file = self._connection.makefile('b')
except:
self._connection = None
self._connection_file = None
raise
def close(self):
'''Closes connection with the q service.'''
if self._connection:
self._connection_file.close()
self._connection_file = None
self._connection.close()
self._connection = None
def is_connected(self):
'''Checks whether connection with a q service has been established.
Connection is considered inactive when:
- it has not been initialised,
- it has been closed.
:returns: `boolean` -- ``True`` if connection has been established,
``False`` otherwise
'''
return True if self._connection else False
def _initialize(self):
'''Performs a IPC protocol handshake.'''
credentials = (self.username if self.username else '') + ':' + (self.password if self.password else '')
credentials = credentials.encode(self._encoding)
self._connection.send(credentials + b'\3\0')
response = self._connection.recv(1)
if len(response) != 1:
self.close()
self._init_socket()
self._connection.send(credentials + b'\0')
response = self._connection.recv(1)
if len(response) != 1:
self.close()
raise QAuthenticationException('Connection denied.')
self._protocol_version = min(struct.unpack('B', response)[0], 3)
def __str__(self):
return '%s@:%s:%s' % (self.username, self.host, self.port) if self.username else ':%s:%s' % (self.host, self.port)
def query(self, msg_type, query, *parameters, **options):
'''Performs a query against a q service.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.query(qconnection.MessageType.SYNC,'{til x}', 10)
Executes a q expression:
>>> q.query(qconnection.MessageType.SYNC,'til 10')
:Parameters:
- `msg_type` (one of the constants defined in :class:`.MessageType`) -
type of the query to be executed
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException`
'''
if not self._connection:
raise QConnectionException('Connection is not established.')
if parameters and len(parameters) > 8:
raise QWriterException('Too many parameters.')
if not parameters or len(parameters) == 0:
self._writer.write(query, msg_type, **self._options.union_dict(**options))
else:
self._writer.write([query] + list(parameters), msg_type, **self._options.union_dict(**options))
def sendSync(self, query, *parameters, **options):
'''Performs a synchronous query against a q service and returns parsed
data.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Executes a q expression:
>>> print(q.sendSync('til 10'))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with a single parameter:
>>> print(q.sendSync('{til x}', 10))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with two parameters:
>>> print(q.sendSync('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
>>> print(q.sendSync('{y + til x}', *[10, 1]))
[ 1 2 3 4 5 6 7 8 9 10]
The :func:`.sendSync` is called from the overloaded :func:`.__call__`
function. This allows :class:`.QConnection` instance to be called as
a function:
>>> print(q('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
:Parameters:
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of
parsed data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:returns: query result parsed to Python data structures
:raises: :class:`.QConnectionException`, :class:`.QWriterException`,
:class:`.QReaderException`
'''
self.query(MessageType.SYNC, query, *parameters, **options)
response = self.receive(data_only = False, **options)
if response.type == MessageType.RESPONSE:
return response.data
else:
self._writer.write(QException('nyi: qPython expected response message'), MessageType.ASYNC if response.type == MessageType.ASYNC else MessageType.RESPONSE)
raise QReaderException('Received message of type: %s where response was expected')
def sendAsync(self, query, *parameters, **options):
'''Performs an asynchronous query and returns **without** retrieving of
the response.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.sendAsync('{til x}', 10)
Executes a q expression:
>>> q.sendAsync('til 10')
:Parameters:
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException`
'''
self.query(MessageType.ASYNC, query, *parameters, **options)
def receive(self, data_only = True, **options):
'''Reads and (optionally) parses the response from a q service.
Retrieves query result along with meta-information:
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(q.receive(data_only = False, raw = False))
QMessage: message type: 2, data size: 13, is_compressed: False, data: 10
Retrieves parsed query result:
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(q.receive(data_only = True, raw = False))
10
Retrieves not-parsed (raw) query result:
>>> from binascii import hexlify
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(hexlify(q.receive(data_only = True, raw = True)))
fa0a000000
:Parameters:
- `data_only` (`boolean`) - if ``True`` returns only data part of the
message, otherwise returns data and message meta-information
encapsulated in :class:`.QMessage` instance
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of
parsed data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
:returns: depending on parameter flags: :class:`.QMessage` instance,
parsed message, raw data
:raises: :class:`.QReaderException`
'''
result = self._reader.read(**self._options.union_dict(**options))
return result.data if data_only else result
def __call__(self, *parameters, **options):
return self.sendSync(parameters[0], *parameters[1:], **options)
|
exxeleron/qPython | qpython/qconnection.py | QConnection.receive | python | def receive(self, data_only = True, **options):
'''Reads and (optionally) parses the response from a q service.
Retrieves query result along with meta-information:
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(q.receive(data_only = False, raw = False))
QMessage: message type: 2, data size: 13, is_compressed: False, data: 10
Retrieves parsed query result:
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(q.receive(data_only = True, raw = False))
10
Retrieves not-parsed (raw) query result:
>>> from binascii import hexlify
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(hexlify(q.receive(data_only = True, raw = True)))
fa0a000000
:Parameters:
- `data_only` (`boolean`) - if ``True`` returns only data part of the
message, otherwise returns data and message meta-information
encapsulated in :class:`.QMessage` instance
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of
parsed data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
:returns: depending on parameter flags: :class:`.QMessage` instance,
parsed message, raw data
:raises: :class:`.QReaderException`
'''
result = self._reader.read(**self._options.union_dict(**options))
return result.data if data_only else result | Reads and (optionally) parses the response from a q service.
Retrieves query result along with meta-information:
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(q.receive(data_only = False, raw = False))
QMessage: message type: 2, data size: 13, is_compressed: False, data: 10
Retrieves parsed query result:
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(q.receive(data_only = True, raw = False))
10
Retrieves not-parsed (raw) query result:
>>> from binascii import hexlify
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(hexlify(q.receive(data_only = True, raw = True)))
fa0a000000
:Parameters:
- `data_only` (`boolean`) - if ``True`` returns only data part of the
message, otherwise returns data and message meta-information
encapsulated in :class:`.QMessage` instance
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of
parsed data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
:returns: depending on parameter flags: :class:`.QMessage` instance,
parsed message, raw data
:raises: :class:`.QReaderException` | train | https://github.com/exxeleron/qPython/blob/7e64a28b1e8814a8d6b9217ce79bb8de546e62f3/qpython/qconnection.py#L341-L381 | [
"def union_dict(self, **kw):\n return dict(list(self.as_dict().items()) + list(kw.items()))\n"
] | class QConnection(object):
'''Connector class for interfacing with the q service.
Provides methods for synchronous and asynchronous interaction.
The :class:`.QConnection` class provides a context manager API and can be
used with a ``with`` statement::
with qconnection.QConnection(host = 'localhost', port = 5000) as q:
print(q)
print(q('{`int$ til x}', 10))
:Parameters:
- `host` (`string`) - q service hostname
- `port` (`integer`) - q service port
- `username` (`string` or `None`) - username for q authentication/authorization
- `password` (`string` or `None`) - password for q authentication/authorization
- `timeout` (`nonnegative float` or `None`) - set a timeout on blocking socket operations
- `encoding` (`string`) - string encoding for data deserialization
- `reader_class` (subclass of `QReader`) - data deserializer
- `writer_class` (subclass of `QWriter`) - data serializer
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of parsed
data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars, **Default**: ``False``
'''
def __init__(self, host, port, username = None, password = None, timeout = None, encoding = 'latin-1', reader_class = None, writer_class = None, **options):
self.host = host
self.port = port
self.username = username
self.password = password
self._connection = None
self._connection_file = None
self._protocol_version = None
self.timeout = timeout
self._encoding = encoding
self._options = MetaData(**CONVERSION_OPTIONS.union_dict(**options))
try:
from qpython._pandas import PandasQReader, PandasQWriter
self._reader_class = PandasQReader
self._writer_class = PandasQWriter
except ImportError:
self._reader_class = QReader
self._writer_class = QWriter
if reader_class:
self._reader_class = reader_class
if writer_class:
self._writer_class = writer_class
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@property
def protocol_version(self):
'''Retrieves established version of the IPC protocol.
:returns: `integer` -- version of the IPC protocol
'''
return self._protocol_version
def open(self):
'''Initialises connection to q service.
If the connection hasn't been initialised yet, invoking the
:func:`.open` creates a new socket and performs a handshake with a q
service.
:raises: :class:`.QConnectionException`, :class:`.QAuthenticationException`
'''
if not self._connection:
if not self.host:
raise QConnectionException('Host cannot be None')
self._init_socket()
self._initialize()
self._writer = self._writer_class(self._connection, protocol_version = self._protocol_version, encoding = self._encoding)
self._reader = self._reader_class(self._connection_file, encoding = self._encoding)
def _init_socket(self):
'''Initialises the socket used for communicating with a q service,'''
try:
self._connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._connection.connect((self.host, self.port))
self._connection.settimeout(self.timeout)
self._connection_file = self._connection.makefile('b')
except:
self._connection = None
self._connection_file = None
raise
def close(self):
'''Closes connection with the q service.'''
if self._connection:
self._connection_file.close()
self._connection_file = None
self._connection.close()
self._connection = None
def is_connected(self):
'''Checks whether connection with a q service has been established.
Connection is considered inactive when:
- it has not been initialised,
- it has been closed.
:returns: `boolean` -- ``True`` if connection has been established,
``False`` otherwise
'''
return True if self._connection else False
def _initialize(self):
'''Performs a IPC protocol handshake.'''
credentials = (self.username if self.username else '') + ':' + (self.password if self.password else '')
credentials = credentials.encode(self._encoding)
self._connection.send(credentials + b'\3\0')
response = self._connection.recv(1)
if len(response) != 1:
self.close()
self._init_socket()
self._connection.send(credentials + b'\0')
response = self._connection.recv(1)
if len(response) != 1:
self.close()
raise QAuthenticationException('Connection denied.')
self._protocol_version = min(struct.unpack('B', response)[0], 3)
def __str__(self):
return '%s@:%s:%s' % (self.username, self.host, self.port) if self.username else ':%s:%s' % (self.host, self.port)
def query(self, msg_type, query, *parameters, **options):
'''Performs a query against a q service.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.query(qconnection.MessageType.SYNC,'{til x}', 10)
Executes a q expression:
>>> q.query(qconnection.MessageType.SYNC,'til 10')
:Parameters:
- `msg_type` (one of the constants defined in :class:`.MessageType`) -
type of the query to be executed
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException`
'''
if not self._connection:
raise QConnectionException('Connection is not established.')
if parameters and len(parameters) > 8:
raise QWriterException('Too many parameters.')
if not parameters or len(parameters) == 0:
self._writer.write(query, msg_type, **self._options.union_dict(**options))
else:
self._writer.write([query] + list(parameters), msg_type, **self._options.union_dict(**options))
def sendSync(self, query, *parameters, **options):
'''Performs a synchronous query against a q service and returns parsed
data.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Executes a q expression:
>>> print(q.sendSync('til 10'))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with a single parameter:
>>> print(q.sendSync('{til x}', 10))
[0 1 2 3 4 5 6 7 8 9]
Executes an anonymous q function with two parameters:
>>> print(q.sendSync('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
>>> print(q.sendSync('{y + til x}', *[10, 1]))
[ 1 2 3 4 5 6 7 8 9 10]
The :func:`.sendSync` is called from the overloaded :func:`.__call__`
function. This allows :class:`.QConnection` instance to be called as
a function:
>>> print(q('{y + til x}', 10, 1))
[ 1 2 3 4 5 6 7 8 9 10]
:Parameters:
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of
parsed data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:returns: query result parsed to Python data structures
:raises: :class:`.QConnectionException`, :class:`.QWriterException`,
:class:`.QReaderException`
'''
self.query(MessageType.SYNC, query, *parameters, **options)
response = self.receive(data_only = False, **options)
if response.type == MessageType.RESPONSE:
return response.data
else:
self._writer.write(QException('nyi: qPython expected response message'), MessageType.ASYNC if response.type == MessageType.ASYNC else MessageType.RESPONSE)
raise QReaderException('Received message of type: %s where response was expected')
def sendAsync(self, query, *parameters, **options):
'''Performs an asynchronous query and returns **without** retrieving of
the response.
In typical use case, `query` is the name of the function to call and
`parameters` are its parameters. When `parameters` list is empty, the
query can be an arbitrary q expression (e.g. ``0 +/ til 100``).
Calls a anonymous function with a single parameter:
>>> q.sendAsync('{til x}', 10)
Executes a q expression:
>>> q.sendAsync('til 10')
:Parameters:
- `query` (`string`) - query to be executed
- `parameters` (`list` or `None`) - parameters for the query
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:raises: :class:`.QConnectionException`, :class:`.QWriterException`
'''
self.query(MessageType.ASYNC, query, *parameters, **options)
def receive(self, data_only = True, **options):
'''Reads and (optionally) parses the response from a q service.
Retrieves query result along with meta-information:
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(q.receive(data_only = False, raw = False))
QMessage: message type: 2, data size: 13, is_compressed: False, data: 10
Retrieves parsed query result:
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(q.receive(data_only = True, raw = False))
10
Retrieves not-parsed (raw) query result:
>>> from binascii import hexlify
>>> q.query(qconnection.MessageType.SYNC,'{x}', 10)
>>> print(hexlify(q.receive(data_only = True, raw = True)))
fa0a000000
:Parameters:
- `data_only` (`boolean`) - if ``True`` returns only data part of the
message, otherwise returns data and message meta-information
encapsulated in :class:`.QMessage` instance
:Options:
- `raw` (`boolean`) - if ``True`` returns raw data chunk instead of
parsed data, **Default**: ``False``
- `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are
backed by raw q representation (:class:`.QTemporalList`,
:class:`.QTemporal`) instances, otherwise are represented as
`numpy datetime64`/`timedelta64` arrays and atoms,
**Default**: ``False``
:returns: depending on parameter flags: :class:`.QMessage` instance,
parsed message, raw data
:raises: :class:`.QReaderException`
'''
result = self._reader.read(**self._options.union_dict(**options))
return result.data if data_only else result
def __call__(self, *parameters, **options):
return self.sendSync(parameters[0], *parameters[1:], **options)
|
exxeleron/qPython | qpython/qtemporal.py | qtemporal | python | def qtemporal(dt, **meta):
'''Converts a `numpy.datetime64` or `numpy.timedelta64` to
:class:`.QTemporal` and enriches object instance with given meta data.
Examples:
>>> qtemporal(numpy.datetime64('2001-01-01', 'D'), qtype=QDATE)
2001-01-01 [metadata(qtype=-14)]
>>> qtemporal(numpy.timedelta64(43499123, 'ms'), qtype=QTIME)
43499123 milliseconds [metadata(qtype=-19)]
>>> qtemporal(qnull(QDATETIME), qtype=QDATETIME)
nan [metadata(qtype=-15)]
:Parameters:
- `dt` (`numpy.datetime64` or `numpy.timedelta64`) - datetime to be wrapped
:Kwargs:
- `qtype` (`integer`) - qtype indicator
:returns: `QTemporal` - wrapped datetime
'''
result = QTemporal(dt)
result._meta_init(**meta)
return result | Converts a `numpy.datetime64` or `numpy.timedelta64` to
:class:`.QTemporal` and enriches object instance with given meta data.
Examples:
>>> qtemporal(numpy.datetime64('2001-01-01', 'D'), qtype=QDATE)
2001-01-01 [metadata(qtype=-14)]
>>> qtemporal(numpy.timedelta64(43499123, 'ms'), qtype=QTIME)
43499123 milliseconds [metadata(qtype=-19)]
>>> qtemporal(qnull(QDATETIME), qtype=QDATETIME)
nan [metadata(qtype=-15)]
:Parameters:
- `dt` (`numpy.datetime64` or `numpy.timedelta64`) - datetime to be wrapped
:Kwargs:
- `qtype` (`integer`) - qtype indicator
:returns: `QTemporal` - wrapped datetime | train | https://github.com/exxeleron/qPython/blob/7e64a28b1e8814a8d6b9217ce79bb8de546e62f3/qpython/qtemporal.py#L82-L104 | [
"def _meta_init(self, **meta):\n self.meta = MetaData(**meta)\n"
] | #
# Copyright (c) 2011-2014 Exxeleron GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from qpython import MetaData
from qpython.qtype import * # @UnusedWildImport
from numpy import longlong
_MILLIS_PER_DAY = 24 * 60 * 60 * 1000
_MILLIS_PER_DAY_FLOAT = float(_MILLIS_PER_DAY)
_QEPOCH_MS = long(10957 * _MILLIS_PER_DAY)
_EPOCH_QTIMESTAMP_NS = _QEPOCH_MS * 1000000
_EPOCH_QMONTH = numpy.datetime64('2000-01', 'M')
_EPOCH_QDATE = numpy.datetime64('2000-01-01', 'D')
_EPOCH_QDATETIME = numpy.datetime64(_QEPOCH_MS, 'ms')
_EPOCH_TIMESTAMP = numpy.datetime64(_EPOCH_QTIMESTAMP_NS, 'ns')
_QMONTH_NULL = qnull(QMONTH)
_QDATE_NULL = qnull(QDATE)
_QDATETIME_NULL = qnull(QDATETIME)
_QMINUTE_NULL = qnull(QMINUTE)
_QSECOND_NULL = qnull(QSECOND)
_QTIME_NULL = qnull(QTIME)
_QTIMESTAMP_NULL = qnull(QTIMESTAMP)
_QTIMESPAN_NULL = qnull(QTIMESPAN)
class QTemporal(object):
'''
Represents a q temporal value.
The :class:`.QTemporal` wraps `numpy.datetime64` or `numpy.timedelta64`
along with meta-information like qtype indicator.
:Parameters:
- `dt` (`numpy.datetime64` or `numpy.timedelta64`) - datetime to be wrapped
'''
def __init__(self, dt):
self._datetime = dt
def _meta_init(self, **meta):
self.meta = MetaData(**meta)
@property
def raw(self):
'''Return wrapped datetime object.
:returns: `numpy.datetime64` or `numpy.timedelta64` - wrapped datetime
'''
return self._datetime
def __str__(self):
return '%s [%s]' % (self._datetime, self.meta)
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.meta.qtype == other.meta.qtype
and self._datetime == other._datetime)
def __ne__(self, other):
return not self.__eq__(other)
def qtemporal(dt, **meta):
'''Converts a `numpy.datetime64` or `numpy.timedelta64` to
:class:`.QTemporal` and enriches object instance with given meta data.
Examples:
>>> qtemporal(numpy.datetime64('2001-01-01', 'D'), qtype=QDATE)
2001-01-01 [metadata(qtype=-14)]
>>> qtemporal(numpy.timedelta64(43499123, 'ms'), qtype=QTIME)
43499123 milliseconds [metadata(qtype=-19)]
>>> qtemporal(qnull(QDATETIME), qtype=QDATETIME)
nan [metadata(qtype=-15)]
:Parameters:
- `dt` (`numpy.datetime64` or `numpy.timedelta64`) - datetime to be wrapped
:Kwargs:
- `qtype` (`integer`) - qtype indicator
:returns: `QTemporal` - wrapped datetime
'''
result = QTemporal(dt)
result._meta_init(**meta)
return result
def from_raw_qtemporal(raw, qtype):
'''
Converts raw numeric value to `numpy.datetime64` or `numpy.timedelta64`
instance.
Actual conversion applied to raw numeric value depends on `qtype` parameter.
:Parameters:
- `raw` (`integer`, `float`) - raw representation to be converted
- `qtype` (`integer`) - qtype indicator
:returns: `numpy.datetime64` or `numpy.timedelta64` - converted datetime
'''
return _FROM_Q[qtype](raw)
def to_raw_qtemporal(dt, qtype):
'''
Converts datetime/timedelta instance to raw numeric value.
Actual conversion applied to datetime/timedelta instance depends on `qtype`
parameter.
:Parameters:
- `dt` (`numpy.datetime64` or `numpy.timedelta64`) - datetime/timedelta
object to be converted
- `qtype` (`integer`) - qtype indicator
:returns: `integer`, `float` - raw numeric value
'''
return _TO_Q[qtype](dt)
def array_from_raw_qtemporal(raw, qtype):
'''
Converts `numpy.array` containing raw q representation to ``datetime64``/``timedelta64``
array.
Examples:
>>> raw = numpy.array([366, 121, qnull(QDATE)])
>>> print(array_from_raw_qtemporal(raw, qtype = QDATE))
['2001-01-01' '2000-05-01' 'NaT']
:Parameters:
- `raw` (`numpy.array`) - numpy raw array to be converted
- `qtype` (`integer`) - qtype indicator
:returns: `numpy.array` - numpy array with ``datetime64``/``timedelta64``
:raises: `ValueError`
'''
if not isinstance(raw, numpy.ndarray):
raise ValueError('raw parameter is expected to be of type: numpy.ndarray. Was: %s' % type(raw))
qtype = -abs(qtype)
conversion = _FROM_RAW_LIST[qtype]
mask = raw == qnull(qtype)
dtype = PY_TYPE[qtype]
array = raw.astype(dtype) if dtype != raw.dtype else raw
array = conversion(array) if conversion else array
null = _NUMPY_NULL[qtype]
array = numpy.where(mask, null, array)
return array
def array_to_raw_qtemporal(array, qtype):
'''
Converts `numpy.array` containing ``datetime64``/``timedelta64`` to raw
q representation.
Examples:
>>> na_dt = numpy.arange('1999-01-01', '2005-12-31', dtype='datetime64[D]')
>>> print(array_to_raw_qtemporal(na_dt, qtype = QDATE_LIST))
[-365 -364 -363 ..., 2188 2189 2190]
>>> array_to_raw_qtemporal(numpy.arange(-20, 30, dtype='int32'), qtype = QDATE_LIST)
Traceback (most recent call last):
...
ValueError: array.dtype is expected to be of type: datetime64 or timedelta64. Was: int32
:Parameters:
- `array` (`numpy.array`) - numpy datetime/timedelta array to be converted
- `qtype` (`integer`) - qtype indicator
:returns: `numpy.array` - numpy array with raw values
:raises: `ValueError`
'''
if not isinstance(array, numpy.ndarray):
raise ValueError('array parameter is expected to be of type: numpy.ndarray. Was: %s' % type(array))
if not array.dtype.type in (numpy.datetime64, numpy.timedelta64):
raise ValueError('array.dtype is expected to be of type: datetime64 or timedelta64. Was: %s' % array.dtype)
qtype = -abs(qtype)
conversion = _TO_RAW_LIST[qtype]
raw = array.view(numpy.int64).view(numpy.ndarray)
mask = raw == numpy.int64(-2 ** 63)
raw = conversion(raw) if conversion else raw
null = qnull(qtype)
raw = numpy.where(mask, null, raw)
return raw
def _from_qmonth(raw):
if raw == _QMONTH_NULL:
return _NUMPY_NULL[QMONTH]
else:
return _EPOCH_QMONTH + numpy.timedelta64(int(raw), 'M')
def _to_qmonth(dt):
t_dt = type(dt)
if t_dt == numpy.int32:
return dt
elif t_dt == numpy.datetime64:
return (dt - _EPOCH_QMONTH).astype(int) if not dt == _NUMPY_NULL[QMONTH] else _QMONTH_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qdate(raw):
if raw == _QDATE_NULL:
return _NUMPY_NULL[QDATE]
else:
return _EPOCH_QDATE + numpy.timedelta64(int(raw), 'D')
def _to_qdate(dt):
t_dt = type(dt)
if t_dt == numpy.int32:
return dt
elif t_dt == numpy.datetime64:
return (dt - _EPOCH_QDATE).astype(int) if not dt == _NUMPY_NULL[QDATE] else _QDATE_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qdatetime(raw):
if numpy.isnan(raw) or raw == _QDATETIME_NULL:
return _NUMPY_NULL[QDATETIME]
else:
return _EPOCH_QDATETIME + numpy.timedelta64(long(_MILLIS_PER_DAY * raw), 'ms')
def _to_qdatetime(dt):
t_dt = type(dt)
if t_dt == numpy.float64:
return dt
elif t_dt == numpy.datetime64:
return (dt - _EPOCH_QDATETIME).astype(float) / _MILLIS_PER_DAY if not dt == _NUMPY_NULL[QDATETIME] else _QDATETIME_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qminute(raw):
if raw == _QMINUTE_NULL:
return _NUMPY_NULL[QMINUTE]
else:
return numpy.timedelta64(int(raw), 'm')
def _to_qminute(dt):
t_dt = type(dt)
if t_dt == numpy.int32:
return dt
elif t_dt == numpy.timedelta64:
return dt.astype(int) if not dt == _NUMPY_NULL[QMINUTE] else _QMINUTE_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qsecond(raw):
if raw == _QSECOND_NULL:
return _NUMPY_NULL[QSECOND]
else:
return numpy.timedelta64(int(raw), 's')
def _to_qsecond(dt):
t_dt = type(dt)
if t_dt == numpy.int32:
return dt
elif t_dt == numpy.timedelta64:
return dt.astype(int) if not dt == _NUMPY_NULL[QSECOND] else _QSECOND_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qtime(raw):
if raw == _QTIME_NULL:
return _NUMPY_NULL[QTIME]
else:
return numpy.timedelta64(int(raw), 'ms')
def _to_qtime(dt):
t_dt = type(dt)
if t_dt == numpy.int32:
return dt
elif t_dt == numpy.timedelta64:
return dt.astype(int) if not dt == _NUMPY_NULL[QTIME] else _QTIME_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qtimestamp(raw):
if raw == _QTIMESTAMP_NULL:
return _NUMPY_NULL[QTIMESTAMP]
else:
return _EPOCH_TIMESTAMP + numpy.timedelta64(long(raw), 'ns')
def _to_qtimestamp(dt):
t_dt = type(dt)
if t_dt == numpy.int64:
return dt
elif t_dt == numpy.datetime64:
return (dt - _EPOCH_TIMESTAMP).astype(longlong) if not dt == _NUMPY_NULL[QTIMESTAMP] else _QTIMESTAMP_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qtimespan(raw):
if raw == _QTIMESPAN_NULL:
return _NUMPY_NULL[QTIMESPAN]
else:
return numpy.timedelta64(long(raw), 'ns')
def _to_qtimespan(dt):
t_dt = type(dt)
if t_dt == numpy.int64:
return dt
elif t_dt == numpy.timedelta64:
return dt.astype(longlong) if not dt == _NUMPY_NULL[QTIMESPAN] else _QTIMESTAMP_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
_FROM_Q = {
QMONTH: _from_qmonth,
QDATE: _from_qdate,
QDATETIME: _from_qdatetime,
QMINUTE: _from_qminute,
QSECOND: _from_qsecond,
QTIME: _from_qtime,
QTIMESTAMP: _from_qtimestamp,
QTIMESPAN: _from_qtimespan,
}
_TO_Q = {
QMONTH: _to_qmonth,
QDATE: _to_qdate,
QDATETIME: _to_qdatetime,
QMINUTE: _to_qminute,
QSECOND: _to_qsecond,
QTIME: _to_qtime,
QTIMESTAMP: _to_qtimestamp,
QTIMESPAN: _to_qtimespan,
}
_TO_RAW_LIST = {
QMONTH: lambda a: (a - 360).astype(numpy.int32),
QDATE: lambda a: (a - 10957).astype(numpy.int32),
QDATETIME: lambda a: ((a - _QEPOCH_MS) / _MILLIS_PER_DAY_FLOAT).astype(numpy.float64),
QMINUTE: lambda a: a.astype(numpy.int32),
QSECOND: lambda a: a.astype(numpy.int32),
QTIME: lambda a: a.astype(numpy.int32),
QTIMESTAMP: lambda a: a - _EPOCH_QTIMESTAMP_NS,
QTIMESPAN: None,
}
_FROM_RAW_LIST = {
QMONTH: lambda a: numpy.array((a + 360), dtype = 'datetime64[M]'),
QDATE: lambda a: numpy.array((a + 10957), dtype = 'datetime64[D]'),
QDATETIME: lambda a: numpy.array((a * _MILLIS_PER_DAY + _QEPOCH_MS), dtype = 'datetime64[ms]'),
QMINUTE: lambda a: numpy.array(a, dtype = 'timedelta64[m]'),
QSECOND: lambda a: numpy.array(a, dtype = 'timedelta64[s]'),
QTIME: lambda a: numpy.array(a, dtype = 'timedelta64[ms]'),
QTIMESTAMP: lambda a: numpy.array((a + _EPOCH_QTIMESTAMP_NS), dtype = 'datetime64[ns]'),
QTIMESPAN: lambda a: numpy.array(a, dtype = 'timedelta64[ns]'),
}
_NUMPY_NULL = {
QMONTH: numpy.datetime64('NaT', 'M'),
QDATE: numpy.datetime64('NaT', 'D'),
QDATETIME: numpy.datetime64('NaT', 'ms'),
QMINUTE: numpy.timedelta64('NaT', 'm'),
QSECOND: numpy.timedelta64('NaT', 's'),
QTIME: numpy.timedelta64('NaT', 'ms'),
QTIMESTAMP: numpy.datetime64('NaT', 'ns'),
QTIMESPAN: numpy.timedelta64('NaT', 'ns'),
}
|
exxeleron/qPython | qpython/qtemporal.py | array_from_raw_qtemporal | python | def array_from_raw_qtemporal(raw, qtype):
'''
Converts `numpy.array` containing raw q representation to ``datetime64``/``timedelta64``
array.
Examples:
>>> raw = numpy.array([366, 121, qnull(QDATE)])
>>> print(array_from_raw_qtemporal(raw, qtype = QDATE))
['2001-01-01' '2000-05-01' 'NaT']
:Parameters:
- `raw` (`numpy.array`) - numpy raw array to be converted
- `qtype` (`integer`) - qtype indicator
:returns: `numpy.array` - numpy array with ``datetime64``/``timedelta64``
:raises: `ValueError`
'''
if not isinstance(raw, numpy.ndarray):
raise ValueError('raw parameter is expected to be of type: numpy.ndarray. Was: %s' % type(raw))
qtype = -abs(qtype)
conversion = _FROM_RAW_LIST[qtype]
mask = raw == qnull(qtype)
dtype = PY_TYPE[qtype]
array = raw.astype(dtype) if dtype != raw.dtype else raw
array = conversion(array) if conversion else array
null = _NUMPY_NULL[qtype]
array = numpy.where(mask, null, array)
return array | Converts `numpy.array` containing raw q representation to ``datetime64``/``timedelta64``
array.
Examples:
>>> raw = numpy.array([366, 121, qnull(QDATE)])
>>> print(array_from_raw_qtemporal(raw, qtype = QDATE))
['2001-01-01' '2000-05-01' 'NaT']
:Parameters:
- `raw` (`numpy.array`) - numpy raw array to be converted
- `qtype` (`integer`) - qtype indicator
:returns: `numpy.array` - numpy array with ``datetime64``/``timedelta64``
:raises: `ValueError` | train | https://github.com/exxeleron/qPython/blob/7e64a28b1e8814a8d6b9217ce79bb8de546e62f3/qpython/qtemporal.py#L143-L176 | [
"def qnull(qtype):\n '''Retrieve null value for requested q type.\n\n :Parameters:\n - `qtype` (`integer`) - qtype indicator\n\n :returns: null value for specified q type\n '''\n return QNULLMAP[qtype][1]\n"
] | #
# Copyright (c) 2011-2014 Exxeleron GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from qpython import MetaData
from qpython.qtype import * # @UnusedWildImport
from numpy import longlong
_MILLIS_PER_DAY = 24 * 60 * 60 * 1000
_MILLIS_PER_DAY_FLOAT = float(_MILLIS_PER_DAY)
_QEPOCH_MS = long(10957 * _MILLIS_PER_DAY)
_EPOCH_QTIMESTAMP_NS = _QEPOCH_MS * 1000000
_EPOCH_QMONTH = numpy.datetime64('2000-01', 'M')
_EPOCH_QDATE = numpy.datetime64('2000-01-01', 'D')
_EPOCH_QDATETIME = numpy.datetime64(_QEPOCH_MS, 'ms')
_EPOCH_TIMESTAMP = numpy.datetime64(_EPOCH_QTIMESTAMP_NS, 'ns')
_QMONTH_NULL = qnull(QMONTH)
_QDATE_NULL = qnull(QDATE)
_QDATETIME_NULL = qnull(QDATETIME)
_QMINUTE_NULL = qnull(QMINUTE)
_QSECOND_NULL = qnull(QSECOND)
_QTIME_NULL = qnull(QTIME)
_QTIMESTAMP_NULL = qnull(QTIMESTAMP)
_QTIMESPAN_NULL = qnull(QTIMESPAN)
class QTemporal(object):
'''
Represents a q temporal value.
The :class:`.QTemporal` wraps `numpy.datetime64` or `numpy.timedelta64`
along with meta-information like qtype indicator.
:Parameters:
- `dt` (`numpy.datetime64` or `numpy.timedelta64`) - datetime to be wrapped
'''
def __init__(self, dt):
self._datetime = dt
def _meta_init(self, **meta):
self.meta = MetaData(**meta)
@property
def raw(self):
'''Return wrapped datetime object.
:returns: `numpy.datetime64` or `numpy.timedelta64` - wrapped datetime
'''
return self._datetime
def __str__(self):
return '%s [%s]' % (self._datetime, self.meta)
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.meta.qtype == other.meta.qtype
and self._datetime == other._datetime)
def __ne__(self, other):
return not self.__eq__(other)
def qtemporal(dt, **meta):
'''Converts a `numpy.datetime64` or `numpy.timedelta64` to
:class:`.QTemporal` and enriches object instance with given meta data.
Examples:
>>> qtemporal(numpy.datetime64('2001-01-01', 'D'), qtype=QDATE)
2001-01-01 [metadata(qtype=-14)]
>>> qtemporal(numpy.timedelta64(43499123, 'ms'), qtype=QTIME)
43499123 milliseconds [metadata(qtype=-19)]
>>> qtemporal(qnull(QDATETIME), qtype=QDATETIME)
nan [metadata(qtype=-15)]
:Parameters:
- `dt` (`numpy.datetime64` or `numpy.timedelta64`) - datetime to be wrapped
:Kwargs:
- `qtype` (`integer`) - qtype indicator
:returns: `QTemporal` - wrapped datetime
'''
result = QTemporal(dt)
result._meta_init(**meta)
return result
def from_raw_qtemporal(raw, qtype):
'''
Converts raw numeric value to `numpy.datetime64` or `numpy.timedelta64`
instance.
Actual conversion applied to raw numeric value depends on `qtype` parameter.
:Parameters:
- `raw` (`integer`, `float`) - raw representation to be converted
- `qtype` (`integer`) - qtype indicator
:returns: `numpy.datetime64` or `numpy.timedelta64` - converted datetime
'''
return _FROM_Q[qtype](raw)
def to_raw_qtemporal(dt, qtype):
'''
Converts datetime/timedelta instance to raw numeric value.
Actual conversion applied to datetime/timedelta instance depends on `qtype`
parameter.
:Parameters:
- `dt` (`numpy.datetime64` or `numpy.timedelta64`) - datetime/timedelta
object to be converted
- `qtype` (`integer`) - qtype indicator
:returns: `integer`, `float` - raw numeric value
'''
return _TO_Q[qtype](dt)
def array_from_raw_qtemporal(raw, qtype):
'''
Converts `numpy.array` containing raw q representation to ``datetime64``/``timedelta64``
array.
Examples:
>>> raw = numpy.array([366, 121, qnull(QDATE)])
>>> print(array_from_raw_qtemporal(raw, qtype = QDATE))
['2001-01-01' '2000-05-01' 'NaT']
:Parameters:
- `raw` (`numpy.array`) - numpy raw array to be converted
- `qtype` (`integer`) - qtype indicator
:returns: `numpy.array` - numpy array with ``datetime64``/``timedelta64``
:raises: `ValueError`
'''
if not isinstance(raw, numpy.ndarray):
raise ValueError('raw parameter is expected to be of type: numpy.ndarray. Was: %s' % type(raw))
qtype = -abs(qtype)
conversion = _FROM_RAW_LIST[qtype]
mask = raw == qnull(qtype)
dtype = PY_TYPE[qtype]
array = raw.astype(dtype) if dtype != raw.dtype else raw
array = conversion(array) if conversion else array
null = _NUMPY_NULL[qtype]
array = numpy.where(mask, null, array)
return array
def array_to_raw_qtemporal(array, qtype):
'''
Converts `numpy.array` containing ``datetime64``/``timedelta64`` to raw
q representation.
Examples:
>>> na_dt = numpy.arange('1999-01-01', '2005-12-31', dtype='datetime64[D]')
>>> print(array_to_raw_qtemporal(na_dt, qtype = QDATE_LIST))
[-365 -364 -363 ..., 2188 2189 2190]
>>> array_to_raw_qtemporal(numpy.arange(-20, 30, dtype='int32'), qtype = QDATE_LIST)
Traceback (most recent call last):
...
ValueError: array.dtype is expected to be of type: datetime64 or timedelta64. Was: int32
:Parameters:
- `array` (`numpy.array`) - numpy datetime/timedelta array to be converted
- `qtype` (`integer`) - qtype indicator
:returns: `numpy.array` - numpy array with raw values
:raises: `ValueError`
'''
if not isinstance(array, numpy.ndarray):
raise ValueError('array parameter is expected to be of type: numpy.ndarray. Was: %s' % type(array))
if not array.dtype.type in (numpy.datetime64, numpy.timedelta64):
raise ValueError('array.dtype is expected to be of type: datetime64 or timedelta64. Was: %s' % array.dtype)
qtype = -abs(qtype)
conversion = _TO_RAW_LIST[qtype]
raw = array.view(numpy.int64).view(numpy.ndarray)
mask = raw == numpy.int64(-2 ** 63)
raw = conversion(raw) if conversion else raw
null = qnull(qtype)
raw = numpy.where(mask, null, raw)
return raw
def _from_qmonth(raw):
if raw == _QMONTH_NULL:
return _NUMPY_NULL[QMONTH]
else:
return _EPOCH_QMONTH + numpy.timedelta64(int(raw), 'M')
def _to_qmonth(dt):
t_dt = type(dt)
if t_dt == numpy.int32:
return dt
elif t_dt == numpy.datetime64:
return (dt - _EPOCH_QMONTH).astype(int) if not dt == _NUMPY_NULL[QMONTH] else _QMONTH_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qdate(raw):
if raw == _QDATE_NULL:
return _NUMPY_NULL[QDATE]
else:
return _EPOCH_QDATE + numpy.timedelta64(int(raw), 'D')
def _to_qdate(dt):
t_dt = type(dt)
if t_dt == numpy.int32:
return dt
elif t_dt == numpy.datetime64:
return (dt - _EPOCH_QDATE).astype(int) if not dt == _NUMPY_NULL[QDATE] else _QDATE_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qdatetime(raw):
if numpy.isnan(raw) or raw == _QDATETIME_NULL:
return _NUMPY_NULL[QDATETIME]
else:
return _EPOCH_QDATETIME + numpy.timedelta64(long(_MILLIS_PER_DAY * raw), 'ms')
def _to_qdatetime(dt):
t_dt = type(dt)
if t_dt == numpy.float64:
return dt
elif t_dt == numpy.datetime64:
return (dt - _EPOCH_QDATETIME).astype(float) / _MILLIS_PER_DAY if not dt == _NUMPY_NULL[QDATETIME] else _QDATETIME_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qminute(raw):
if raw == _QMINUTE_NULL:
return _NUMPY_NULL[QMINUTE]
else:
return numpy.timedelta64(int(raw), 'm')
def _to_qminute(dt):
t_dt = type(dt)
if t_dt == numpy.int32:
return dt
elif t_dt == numpy.timedelta64:
return dt.astype(int) if not dt == _NUMPY_NULL[QMINUTE] else _QMINUTE_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qsecond(raw):
if raw == _QSECOND_NULL:
return _NUMPY_NULL[QSECOND]
else:
return numpy.timedelta64(int(raw), 's')
def _to_qsecond(dt):
t_dt = type(dt)
if t_dt == numpy.int32:
return dt
elif t_dt == numpy.timedelta64:
return dt.astype(int) if not dt == _NUMPY_NULL[QSECOND] else _QSECOND_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qtime(raw):
if raw == _QTIME_NULL:
return _NUMPY_NULL[QTIME]
else:
return numpy.timedelta64(int(raw), 'ms')
def _to_qtime(dt):
t_dt = type(dt)
if t_dt == numpy.int32:
return dt
elif t_dt == numpy.timedelta64:
return dt.astype(int) if not dt == _NUMPY_NULL[QTIME] else _QTIME_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qtimestamp(raw):
if raw == _QTIMESTAMP_NULL:
return _NUMPY_NULL[QTIMESTAMP]
else:
return _EPOCH_TIMESTAMP + numpy.timedelta64(long(raw), 'ns')
def _to_qtimestamp(dt):
t_dt = type(dt)
if t_dt == numpy.int64:
return dt
elif t_dt == numpy.datetime64:
return (dt - _EPOCH_TIMESTAMP).astype(longlong) if not dt == _NUMPY_NULL[QTIMESTAMP] else _QTIMESTAMP_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qtimespan(raw):
if raw == _QTIMESPAN_NULL:
return _NUMPY_NULL[QTIMESPAN]
else:
return numpy.timedelta64(long(raw), 'ns')
def _to_qtimespan(dt):
t_dt = type(dt)
if t_dt == numpy.int64:
return dt
elif t_dt == numpy.timedelta64:
return dt.astype(longlong) if not dt == _NUMPY_NULL[QTIMESPAN] else _QTIMESTAMP_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
_FROM_Q = {
QMONTH: _from_qmonth,
QDATE: _from_qdate,
QDATETIME: _from_qdatetime,
QMINUTE: _from_qminute,
QSECOND: _from_qsecond,
QTIME: _from_qtime,
QTIMESTAMP: _from_qtimestamp,
QTIMESPAN: _from_qtimespan,
}
_TO_Q = {
QMONTH: _to_qmonth,
QDATE: _to_qdate,
QDATETIME: _to_qdatetime,
QMINUTE: _to_qminute,
QSECOND: _to_qsecond,
QTIME: _to_qtime,
QTIMESTAMP: _to_qtimestamp,
QTIMESPAN: _to_qtimespan,
}
_TO_RAW_LIST = {
QMONTH: lambda a: (a - 360).astype(numpy.int32),
QDATE: lambda a: (a - 10957).astype(numpy.int32),
QDATETIME: lambda a: ((a - _QEPOCH_MS) / _MILLIS_PER_DAY_FLOAT).astype(numpy.float64),
QMINUTE: lambda a: a.astype(numpy.int32),
QSECOND: lambda a: a.astype(numpy.int32),
QTIME: lambda a: a.astype(numpy.int32),
QTIMESTAMP: lambda a: a - _EPOCH_QTIMESTAMP_NS,
QTIMESPAN: None,
}
_FROM_RAW_LIST = {
QMONTH: lambda a: numpy.array((a + 360), dtype = 'datetime64[M]'),
QDATE: lambda a: numpy.array((a + 10957), dtype = 'datetime64[D]'),
QDATETIME: lambda a: numpy.array((a * _MILLIS_PER_DAY + _QEPOCH_MS), dtype = 'datetime64[ms]'),
QMINUTE: lambda a: numpy.array(a, dtype = 'timedelta64[m]'),
QSECOND: lambda a: numpy.array(a, dtype = 'timedelta64[s]'),
QTIME: lambda a: numpy.array(a, dtype = 'timedelta64[ms]'),
QTIMESTAMP: lambda a: numpy.array((a + _EPOCH_QTIMESTAMP_NS), dtype = 'datetime64[ns]'),
QTIMESPAN: lambda a: numpy.array(a, dtype = 'timedelta64[ns]'),
}
_NUMPY_NULL = {
QMONTH: numpy.datetime64('NaT', 'M'),
QDATE: numpy.datetime64('NaT', 'D'),
QDATETIME: numpy.datetime64('NaT', 'ms'),
QMINUTE: numpy.timedelta64('NaT', 'm'),
QSECOND: numpy.timedelta64('NaT', 's'),
QTIME: numpy.timedelta64('NaT', 'ms'),
QTIMESTAMP: numpy.datetime64('NaT', 'ns'),
QTIMESPAN: numpy.timedelta64('NaT', 'ns'),
}
|
exxeleron/qPython | qpython/qtemporal.py | array_to_raw_qtemporal | python | def array_to_raw_qtemporal(array, qtype):
'''
Converts `numpy.array` containing ``datetime64``/``timedelta64`` to raw
q representation.
Examples:
>>> na_dt = numpy.arange('1999-01-01', '2005-12-31', dtype='datetime64[D]')
>>> print(array_to_raw_qtemporal(na_dt, qtype = QDATE_LIST))
[-365 -364 -363 ..., 2188 2189 2190]
>>> array_to_raw_qtemporal(numpy.arange(-20, 30, dtype='int32'), qtype = QDATE_LIST)
Traceback (most recent call last):
...
ValueError: array.dtype is expected to be of type: datetime64 or timedelta64. Was: int32
:Parameters:
- `array` (`numpy.array`) - numpy datetime/timedelta array to be converted
- `qtype` (`integer`) - qtype indicator
:returns: `numpy.array` - numpy array with raw values
:raises: `ValueError`
'''
if not isinstance(array, numpy.ndarray):
raise ValueError('array parameter is expected to be of type: numpy.ndarray. Was: %s' % type(array))
if not array.dtype.type in (numpy.datetime64, numpy.timedelta64):
raise ValueError('array.dtype is expected to be of type: datetime64 or timedelta64. Was: %s' % array.dtype)
qtype = -abs(qtype)
conversion = _TO_RAW_LIST[qtype]
raw = array.view(numpy.int64).view(numpy.ndarray)
mask = raw == numpy.int64(-2 ** 63)
raw = conversion(raw) if conversion else raw
null = qnull(qtype)
raw = numpy.where(mask, null, raw)
return raw | Converts `numpy.array` containing ``datetime64``/``timedelta64`` to raw
q representation.
Examples:
>>> na_dt = numpy.arange('1999-01-01', '2005-12-31', dtype='datetime64[D]')
>>> print(array_to_raw_qtemporal(na_dt, qtype = QDATE_LIST))
[-365 -364 -363 ..., 2188 2189 2190]
>>> array_to_raw_qtemporal(numpy.arange(-20, 30, dtype='int32'), qtype = QDATE_LIST)
Traceback (most recent call last):
...
ValueError: array.dtype is expected to be of type: datetime64 or timedelta64. Was: int32
:Parameters:
- `array` (`numpy.array`) - numpy datetime/timedelta array to be converted
- `qtype` (`integer`) - qtype indicator
:returns: `numpy.array` - numpy array with raw values
:raises: `ValueError` | train | https://github.com/exxeleron/qPython/blob/7e64a28b1e8814a8d6b9217ce79bb8de546e62f3/qpython/qtemporal.py#L180-L217 | [
"def qnull(qtype):\n '''Retrieve null value for requested q type.\n\n :Parameters:\n - `qtype` (`integer`) - qtype indicator\n\n :returns: null value for specified q type\n '''\n return QNULLMAP[qtype][1]\n"
] | #
# Copyright (c) 2011-2014 Exxeleron GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from qpython import MetaData
from qpython.qtype import * # @UnusedWildImport
from numpy import longlong
_MILLIS_PER_DAY = 24 * 60 * 60 * 1000
_MILLIS_PER_DAY_FLOAT = float(_MILLIS_PER_DAY)
_QEPOCH_MS = long(10957 * _MILLIS_PER_DAY)
_EPOCH_QTIMESTAMP_NS = _QEPOCH_MS * 1000000
_EPOCH_QMONTH = numpy.datetime64('2000-01', 'M')
_EPOCH_QDATE = numpy.datetime64('2000-01-01', 'D')
_EPOCH_QDATETIME = numpy.datetime64(_QEPOCH_MS, 'ms')
_EPOCH_TIMESTAMP = numpy.datetime64(_EPOCH_QTIMESTAMP_NS, 'ns')
_QMONTH_NULL = qnull(QMONTH)
_QDATE_NULL = qnull(QDATE)
_QDATETIME_NULL = qnull(QDATETIME)
_QMINUTE_NULL = qnull(QMINUTE)
_QSECOND_NULL = qnull(QSECOND)
_QTIME_NULL = qnull(QTIME)
_QTIMESTAMP_NULL = qnull(QTIMESTAMP)
_QTIMESPAN_NULL = qnull(QTIMESPAN)
class QTemporal(object):
'''
Represents a q temporal value.
The :class:`.QTemporal` wraps `numpy.datetime64` or `numpy.timedelta64`
along with meta-information like qtype indicator.
:Parameters:
- `dt` (`numpy.datetime64` or `numpy.timedelta64`) - datetime to be wrapped
'''
def __init__(self, dt):
self._datetime = dt
def _meta_init(self, **meta):
self.meta = MetaData(**meta)
@property
def raw(self):
'''Return wrapped datetime object.
:returns: `numpy.datetime64` or `numpy.timedelta64` - wrapped datetime
'''
return self._datetime
def __str__(self):
return '%s [%s]' % (self._datetime, self.meta)
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.meta.qtype == other.meta.qtype
and self._datetime == other._datetime)
def __ne__(self, other):
return not self.__eq__(other)
def qtemporal(dt, **meta):
'''Converts a `numpy.datetime64` or `numpy.timedelta64` to
:class:`.QTemporal` and enriches object instance with given meta data.
Examples:
>>> qtemporal(numpy.datetime64('2001-01-01', 'D'), qtype=QDATE)
2001-01-01 [metadata(qtype=-14)]
>>> qtemporal(numpy.timedelta64(43499123, 'ms'), qtype=QTIME)
43499123 milliseconds [metadata(qtype=-19)]
>>> qtemporal(qnull(QDATETIME), qtype=QDATETIME)
nan [metadata(qtype=-15)]
:Parameters:
- `dt` (`numpy.datetime64` or `numpy.timedelta64`) - datetime to be wrapped
:Kwargs:
- `qtype` (`integer`) - qtype indicator
:returns: `QTemporal` - wrapped datetime
'''
result = QTemporal(dt)
result._meta_init(**meta)
return result
def from_raw_qtemporal(raw, qtype):
'''
Converts raw numeric value to `numpy.datetime64` or `numpy.timedelta64`
instance.
Actual conversion applied to raw numeric value depends on `qtype` parameter.
:Parameters:
- `raw` (`integer`, `float`) - raw representation to be converted
- `qtype` (`integer`) - qtype indicator
:returns: `numpy.datetime64` or `numpy.timedelta64` - converted datetime
'''
return _FROM_Q[qtype](raw)
def to_raw_qtemporal(dt, qtype):
'''
Converts datetime/timedelta instance to raw numeric value.
Actual conversion applied to datetime/timedelta instance depends on `qtype`
parameter.
:Parameters:
- `dt` (`numpy.datetime64` or `numpy.timedelta64`) - datetime/timedelta
object to be converted
- `qtype` (`integer`) - qtype indicator
:returns: `integer`, `float` - raw numeric value
'''
return _TO_Q[qtype](dt)
def array_from_raw_qtemporal(raw, qtype):
'''
Converts `numpy.array` containing raw q representation to ``datetime64``/``timedelta64``
array.
Examples:
>>> raw = numpy.array([366, 121, qnull(QDATE)])
>>> print(array_from_raw_qtemporal(raw, qtype = QDATE))
['2001-01-01' '2000-05-01' 'NaT']
:Parameters:
- `raw` (`numpy.array`) - numpy raw array to be converted
- `qtype` (`integer`) - qtype indicator
:returns: `numpy.array` - numpy array with ``datetime64``/``timedelta64``
:raises: `ValueError`
'''
if not isinstance(raw, numpy.ndarray):
raise ValueError('raw parameter is expected to be of type: numpy.ndarray. Was: %s' % type(raw))
qtype = -abs(qtype)
conversion = _FROM_RAW_LIST[qtype]
mask = raw == qnull(qtype)
dtype = PY_TYPE[qtype]
array = raw.astype(dtype) if dtype != raw.dtype else raw
array = conversion(array) if conversion else array
null = _NUMPY_NULL[qtype]
array = numpy.where(mask, null, array)
return array
def array_to_raw_qtemporal(array, qtype):
'''
Converts `numpy.array` containing ``datetime64``/``timedelta64`` to raw
q representation.
Examples:
>>> na_dt = numpy.arange('1999-01-01', '2005-12-31', dtype='datetime64[D]')
>>> print(array_to_raw_qtemporal(na_dt, qtype = QDATE_LIST))
[-365 -364 -363 ..., 2188 2189 2190]
>>> array_to_raw_qtemporal(numpy.arange(-20, 30, dtype='int32'), qtype = QDATE_LIST)
Traceback (most recent call last):
...
ValueError: array.dtype is expected to be of type: datetime64 or timedelta64. Was: int32
:Parameters:
- `array` (`numpy.array`) - numpy datetime/timedelta array to be converted
- `qtype` (`integer`) - qtype indicator
:returns: `numpy.array` - numpy array with raw values
:raises: `ValueError`
'''
if not isinstance(array, numpy.ndarray):
raise ValueError('array parameter is expected to be of type: numpy.ndarray. Was: %s' % type(array))
if not array.dtype.type in (numpy.datetime64, numpy.timedelta64):
raise ValueError('array.dtype is expected to be of type: datetime64 or timedelta64. Was: %s' % array.dtype)
qtype = -abs(qtype)
conversion = _TO_RAW_LIST[qtype]
raw = array.view(numpy.int64).view(numpy.ndarray)
mask = raw == numpy.int64(-2 ** 63)
raw = conversion(raw) if conversion else raw
null = qnull(qtype)
raw = numpy.where(mask, null, raw)
return raw
def _from_qmonth(raw):
if raw == _QMONTH_NULL:
return _NUMPY_NULL[QMONTH]
else:
return _EPOCH_QMONTH + numpy.timedelta64(int(raw), 'M')
def _to_qmonth(dt):
t_dt = type(dt)
if t_dt == numpy.int32:
return dt
elif t_dt == numpy.datetime64:
return (dt - _EPOCH_QMONTH).astype(int) if not dt == _NUMPY_NULL[QMONTH] else _QMONTH_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qdate(raw):
if raw == _QDATE_NULL:
return _NUMPY_NULL[QDATE]
else:
return _EPOCH_QDATE + numpy.timedelta64(int(raw), 'D')
def _to_qdate(dt):
t_dt = type(dt)
if t_dt == numpy.int32:
return dt
elif t_dt == numpy.datetime64:
return (dt - _EPOCH_QDATE).astype(int) if not dt == _NUMPY_NULL[QDATE] else _QDATE_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qdatetime(raw):
if numpy.isnan(raw) or raw == _QDATETIME_NULL:
return _NUMPY_NULL[QDATETIME]
else:
return _EPOCH_QDATETIME + numpy.timedelta64(long(_MILLIS_PER_DAY * raw), 'ms')
def _to_qdatetime(dt):
t_dt = type(dt)
if t_dt == numpy.float64:
return dt
elif t_dt == numpy.datetime64:
return (dt - _EPOCH_QDATETIME).astype(float) / _MILLIS_PER_DAY if not dt == _NUMPY_NULL[QDATETIME] else _QDATETIME_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qminute(raw):
if raw == _QMINUTE_NULL:
return _NUMPY_NULL[QMINUTE]
else:
return numpy.timedelta64(int(raw), 'm')
def _to_qminute(dt):
t_dt = type(dt)
if t_dt == numpy.int32:
return dt
elif t_dt == numpy.timedelta64:
return dt.astype(int) if not dt == _NUMPY_NULL[QMINUTE] else _QMINUTE_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qsecond(raw):
if raw == _QSECOND_NULL:
return _NUMPY_NULL[QSECOND]
else:
return numpy.timedelta64(int(raw), 's')
def _to_qsecond(dt):
t_dt = type(dt)
if t_dt == numpy.int32:
return dt
elif t_dt == numpy.timedelta64:
return dt.astype(int) if not dt == _NUMPY_NULL[QSECOND] else _QSECOND_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qtime(raw):
if raw == _QTIME_NULL:
return _NUMPY_NULL[QTIME]
else:
return numpy.timedelta64(int(raw), 'ms')
def _to_qtime(dt):
t_dt = type(dt)
if t_dt == numpy.int32:
return dt
elif t_dt == numpy.timedelta64:
return dt.astype(int) if not dt == _NUMPY_NULL[QTIME] else _QTIME_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qtimestamp(raw):
if raw == _QTIMESTAMP_NULL:
return _NUMPY_NULL[QTIMESTAMP]
else:
return _EPOCH_TIMESTAMP + numpy.timedelta64(long(raw), 'ns')
def _to_qtimestamp(dt):
t_dt = type(dt)
if t_dt == numpy.int64:
return dt
elif t_dt == numpy.datetime64:
return (dt - _EPOCH_TIMESTAMP).astype(longlong) if not dt == _NUMPY_NULL[QTIMESTAMP] else _QTIMESTAMP_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
def _from_qtimespan(raw):
if raw == _QTIMESPAN_NULL:
return _NUMPY_NULL[QTIMESPAN]
else:
return numpy.timedelta64(long(raw), 'ns')
def _to_qtimespan(dt):
t_dt = type(dt)
if t_dt == numpy.int64:
return dt
elif t_dt == numpy.timedelta64:
return dt.astype(longlong) if not dt == _NUMPY_NULL[QTIMESPAN] else _QTIMESTAMP_NULL
else:
raise ValueError('Cannot convert %s of type %s to q value.' % (dt, type(dt)))
_FROM_Q = {
QMONTH: _from_qmonth,
QDATE: _from_qdate,
QDATETIME: _from_qdatetime,
QMINUTE: _from_qminute,
QSECOND: _from_qsecond,
QTIME: _from_qtime,
QTIMESTAMP: _from_qtimestamp,
QTIMESPAN: _from_qtimespan,
}
_TO_Q = {
QMONTH: _to_qmonth,
QDATE: _to_qdate,
QDATETIME: _to_qdatetime,
QMINUTE: _to_qminute,
QSECOND: _to_qsecond,
QTIME: _to_qtime,
QTIMESTAMP: _to_qtimestamp,
QTIMESPAN: _to_qtimespan,
}
_TO_RAW_LIST = {
QMONTH: lambda a: (a - 360).astype(numpy.int32),
QDATE: lambda a: (a - 10957).astype(numpy.int32),
QDATETIME: lambda a: ((a - _QEPOCH_MS) / _MILLIS_PER_DAY_FLOAT).astype(numpy.float64),
QMINUTE: lambda a: a.astype(numpy.int32),
QSECOND: lambda a: a.astype(numpy.int32),
QTIME: lambda a: a.astype(numpy.int32),
QTIMESTAMP: lambda a: a - _EPOCH_QTIMESTAMP_NS,
QTIMESPAN: None,
}
_FROM_RAW_LIST = {
QMONTH: lambda a: numpy.array((a + 360), dtype = 'datetime64[M]'),
QDATE: lambda a: numpy.array((a + 10957), dtype = 'datetime64[D]'),
QDATETIME: lambda a: numpy.array((a * _MILLIS_PER_DAY + _QEPOCH_MS), dtype = 'datetime64[ms]'),
QMINUTE: lambda a: numpy.array(a, dtype = 'timedelta64[m]'),
QSECOND: lambda a: numpy.array(a, dtype = 'timedelta64[s]'),
QTIME: lambda a: numpy.array(a, dtype = 'timedelta64[ms]'),
QTIMESTAMP: lambda a: numpy.array((a + _EPOCH_QTIMESTAMP_NS), dtype = 'datetime64[ns]'),
QTIMESPAN: lambda a: numpy.array(a, dtype = 'timedelta64[ns]'),
}
_NUMPY_NULL = {
QMONTH: numpy.datetime64('NaT', 'M'),
QDATE: numpy.datetime64('NaT', 'D'),
QDATETIME: numpy.datetime64('NaT', 'ms'),
QMINUTE: numpy.timedelta64('NaT', 'm'),
QSECOND: numpy.timedelta64('NaT', 's'),
QTIME: numpy.timedelta64('NaT', 'ms'),
QTIMESTAMP: numpy.datetime64('NaT', 'ns'),
QTIMESPAN: numpy.timedelta64('NaT', 'ns'),
}
|
exxeleron/qPython | qpython/qwriter.py | QWriter.write | python | def write(self, data, msg_type, **options):
'''Serializes and pushes single data object to a wrapped stream.
:Parameters:
- `data` - data to be serialized
- `msg_type` (one of the constants defined in :class:`.MessageType`) -
type of the message
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:returns: if wraped stream is ``None`` serialized data,
otherwise ``None``
'''
self._buffer = BytesIO()
self._options = MetaData(**CONVERSION_OPTIONS.union_dict(**options))
# header and placeholder for message size
self._buffer.write(('%s%s\0\0\0\0\0\0' % (ENDIANESS, chr(msg_type))).encode(self._encoding))
self._write(data)
# update message size
data_size = self._buffer.tell()
self._buffer.seek(4)
self._buffer.write(struct.pack('i', data_size))
# write data to socket
if self._stream:
self._stream.sendall(self._buffer.getvalue())
else:
return self._buffer.getvalue() | Serializes and pushes single data object to a wrapped stream.
:Parameters:
- `data` - data to be serialized
- `msg_type` (one of the constants defined in :class:`.MessageType`) -
type of the message
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:returns: if wraped stream is ``None`` serialized data,
otherwise ``None`` | train | https://github.com/exxeleron/qPython/blob/7e64a28b1e8814a8d6b9217ce79bb8de546e62f3/qpython/qwriter.py#L64-L97 | [
"def union_dict(self, **kw):\n return dict(list(self.as_dict().items()) + list(kw.items()))\n",
"def _write(self, data):\n if data is None:\n self._write_null()\n else:\n if isinstance(data, Exception) or (type(data) == type and issubclass(data, Exception)):\n data_type = Exception\n else:\n data_type = type(data)\n\n writer = self._get_writer(data_type)\n\n if writer:\n writer(self, data)\n else:\n qtype = Q_TYPE.get(type(data), None)\n\n if qtype:\n self._write_atom(data, qtype)\n else:\n raise QWriterException('Unable to serialize type: %s' % data.__class__ if isinstance(data, object) else type(data))\n"
] | class QWriter(object):
'''
Provides serialization to q IPC protocol.
:Parameters:
- `stream` (`socket` or `None`) - stream for data serialization
- `protocol_version` (`integer`) - version IPC protocol
- `encoding` (`string`) - encoding for characters serialization
:Attrbutes:
- `_writer_map` - stores mapping between Python types and functions
responsible for serializing into IPC representation
'''
_writer_map = {}
serialize = Mapper(_writer_map)
def __init__(self, stream, protocol_version, encoding = 'latin-1'):
self._stream = stream
self._protocol_version = protocol_version
self._encoding = encoding
def write(self, data, msg_type, **options):
'''Serializes and pushes single data object to a wrapped stream.
:Parameters:
- `data` - data to be serialized
- `msg_type` (one of the constants defined in :class:`.MessageType`) -
type of the message
:Options:
- `single_char_strings` (`boolean`) - if ``True`` single char Python
strings are encoded as q strings instead of chars,
**Default**: ``False``
:returns: if wraped stream is ``None`` serialized data,
otherwise ``None``
'''
self._buffer = BytesIO()
self._options = MetaData(**CONVERSION_OPTIONS.union_dict(**options))
# header and placeholder for message size
self._buffer.write(('%s%s\0\0\0\0\0\0' % (ENDIANESS, chr(msg_type))).encode(self._encoding))
self._write(data)
# update message size
data_size = self._buffer.tell()
self._buffer.seek(4)
self._buffer.write(struct.pack('i', data_size))
# write data to socket
if self._stream:
self._stream.sendall(self._buffer.getvalue())
else:
return self._buffer.getvalue()
def _write(self, data):
if data is None:
self._write_null()
else:
if isinstance(data, Exception) or (type(data) == type and issubclass(data, Exception)):
data_type = Exception
else:
data_type = type(data)
writer = self._get_writer(data_type)
if writer:
writer(self, data)
else:
qtype = Q_TYPE.get(type(data), None)
if qtype:
self._write_atom(data, qtype)
else:
raise QWriterException('Unable to serialize type: %s' % data.__class__ if isinstance(data, object) else type(data))
def _get_writer(self, data_type):
return self._writer_map.get(data_type, None)
def _write_null(self):
self._buffer.write(struct.pack('=bx', QNULL))
@serialize(Exception)
def _write_error(self, data):
self._buffer.write(struct.pack('b', QERROR))
if isinstance(data, Exception):
msg = data.__class__.__name__
if data.args:
msg = data.args[0]
else:
msg = data.__name__
self._buffer.write(msg.encode(self._encoding))
self._buffer.write(b'\0')
def _write_atom(self, data, qtype):
try:
self._buffer.write(struct.pack('b', qtype))
fmt = STRUCT_MAP[qtype]
self._buffer.write(struct.pack(fmt, data))
except KeyError:
raise QWriterException('Unable to serialize type: %s' % data.__class__ if isinstance(data, object) else type(data))
@serialize(tuple, list)
def _write_generic_list(self, data):
self._buffer.write(struct.pack('=bxi', QGENERAL_LIST, len(data)))
for element in data:
self._write(element)
@serialize(str, bytes)
def _write_string(self, data):
if not self._options.single_char_strings and len(data) == 1:
self._write_atom(ord(data), QCHAR)
else:
self._buffer.write(struct.pack('=bxi', QSTRING, len(data)))
if isinstance(data, str):
self._buffer.write(data.encode(self._encoding))
else:
self._buffer.write(data)
@serialize(numpy.string_)
def _write_symbol(self, data):
self._buffer.write(struct.pack('=b', QSYMBOL))
if data:
self._buffer.write(data)
self._buffer.write(b'\0')
@serialize(uuid.UUID)
def _write_guid(self, data):
if self._protocol_version < 3:
raise QWriterException('kdb+ protocol version violation: Guid not supported pre kdb+ v3.0')
self._buffer.write(struct.pack('=b', QGUID))
self._buffer.write(data.bytes)
@serialize(QTemporal)
def _write_temporal(self, data):
try:
if self._protocol_version < 1 and (data.meta.qtype == QTIMESPAN or data.meta.qtype == QTIMESTAMP):
raise QWriterException('kdb+ protocol version violation: data type %s not supported pre kdb+ v2.6' % hex(data.meta.qtype))
self._buffer.write(struct.pack('=b', data.meta.qtype))
fmt = STRUCT_MAP[data.meta.qtype]
self._buffer.write(struct.pack(fmt, to_raw_qtemporal(data.raw, data.meta.qtype)))
except KeyError:
raise QWriterException('Unable to serialize type: %s' % type(data))
@serialize(numpy.datetime64, numpy.timedelta64)
def _write_numpy_temporal(self, data):
try:
qtype = TEMPORAL_PY_TYPE[str(data.dtype)]
if self._protocol_version < 1 and (qtype == QTIMESPAN or qtype == QTIMESTAMP):
raise QWriterException('kdb+ protocol version violation: data type %s not supported pre kdb+ v2.6' % hex(qtype))
self._buffer.write(struct.pack('=b', qtype))
fmt = STRUCT_MAP[qtype]
self._buffer.write(struct.pack(fmt, to_raw_qtemporal(data, qtype)))
except KeyError:
raise QWriterException('Unable to serialize type: %s' % data.dtype)
@serialize(QLambda)
def _write_lambda(self, data):
self._buffer.write(struct.pack('=b', QLAMBDA))
self._buffer.write(b'\0')
self._write_string(data.expression)
@serialize(QProjection)
def _write_projection(self, data):
self._buffer.write(struct.pack('=bi', QPROJECTION, len(data.parameters)))
for parameter in data.parameters:
self._write(parameter)
@serialize(QDictionary, QKeyedTable)
def _write_dictionary(self, data):
self._buffer.write(struct.pack('=b', QDICTIONARY))
self._write(data.keys)
self._write(data.values)
@serialize(QTable)
def _write_table(self, data):
self._buffer.write(struct.pack('=bxb', QTABLE, QDICTIONARY))
self._write(qlist(numpy.array(data.dtype.names), qtype = QSYMBOL_LIST))
self._buffer.write(struct.pack('=bxi', QGENERAL_LIST, len(data.dtype)))
for column in data.dtype.names:
self._write_list(data[column], data.meta[column])
@serialize(numpy.ndarray, QList, QTemporalList)
def _write_list(self, data, qtype = None):
if qtype is not None:
qtype = -abs(qtype)
if qtype is None:
qtype = get_list_qtype(data)
if self._protocol_version < 1 and (abs(qtype) == QTIMESPAN_LIST or abs(qtype) == QTIMESTAMP_LIST):
raise QWriterException('kdb+ protocol version violation: data type %s not supported pre kdb+ v2.6' % hex(data.meta.qtype))
if qtype == QGENERAL_LIST:
self._write_generic_list(data)
elif qtype == QCHAR:
self._write_string(data.tostring())
else:
self._buffer.write(struct.pack('=bxi', -qtype, len(data)))
if data.dtype.type in (numpy.datetime64, numpy.timedelta64):
# convert numpy temporal to raw q temporal
data = array_to_raw_qtemporal(data, qtype = qtype)
if qtype == QSYMBOL:
for symbol in data:
if symbol:
self._buffer.write(symbol)
self._buffer.write(b'\0')
elif qtype == QGUID:
if self._protocol_version < 3:
raise QWriterException('kdb+ protocol version violation: Guid not supported pre kdb+ v3.0')
for guid in data:
self._buffer.write(guid.bytes)
else:
self._buffer.write(data.tostring())
|
exxeleron/qPython | qpython/qcollection.py | get_list_qtype | python | def get_list_qtype(array):
'''Finds out a corresponding qtype for a specified `QList`/`numpy.ndarray`
instance.
:Parameters:
- `array` (`QList` or `numpy.ndarray`) - array to be checked
:returns: `integer` - qtype matching the specified array object
'''
if not isinstance(array, numpy.ndarray):
raise ValueError('array parameter is expected to be of type: numpy.ndarray, got: %s' % type(array))
if isinstance(array, QList):
return -abs(array.meta.qtype)
qtype = None
if str(array.dtype) in ('|S1', '<U1', '>U1', '|U1') :
qtype = QCHAR
if qtype is None:
qtype = Q_TYPE.get(array.dtype.type, None)
if qtype is None and array.dtype.type in (numpy.datetime64, numpy.timedelta64):
qtype = TEMPORAL_PY_TYPE.get(str(array.dtype), None)
if qtype is None:
# determinate type based on first element of the numpy array
qtype = Q_TYPE.get(type(array[0]), QGENERAL_LIST)
return qtype | Finds out a corresponding qtype for a specified `QList`/`numpy.ndarray`
instance.
:Parameters:
- `array` (`QList` or `numpy.ndarray`) - array to be checked
:returns: `integer` - qtype matching the specified array object | train | https://github.com/exxeleron/qPython/blob/7e64a28b1e8814a8d6b9217ce79bb8de546e62f3/qpython/qcollection.py#L71-L101 | null | #
# Copyright (c) 2011-2014 Exxeleron GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from qpython.qtype import * # @UnusedWildImport
from qpython import MetaData
from qpython.qtemporal import qtemporal, from_raw_qtemporal, to_raw_qtemporal
class QList(numpy.ndarray):
'''An array object represents a q vector.'''
def _meta_init(self, **meta):
'''Initialises the meta-information.'''
self.meta = MetaData(**meta)
def __eq__(self, other):
return numpy.array_equal(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.dtype, self.meta.qtype, self.tostring()))
def __array_finalize__(self, obj):
self.meta = MetaData() if obj is None else getattr(obj, 'meta', MetaData())
class QTemporalList(QList):
'''An array object represents a q vector of datetime objects.'''
def __getitem__(self, idx):
return qtemporal(from_raw_qtemporal(numpy.ndarray.__getitem__(self, idx), -abs(self.meta.qtype)), qtype = -abs(self.meta.qtype))
def __setitem__(self, idx, value):
numpy.ndarray.__setitem__(self, idx, to_raw_qtemporal(value, - -abs(self.meta.qtype)))
def raw(self, idx):
'''Gets the raw representation of the datetime object at the specified
index.
>>> t = qlist(numpy.array([366, 121, qnull(QDATE)]), qtype=QDATE_LIST)
>>> print(t[0])
2001-01-01 [metadata(qtype=-14)]
>>> print(t.raw(0))
366
:Parameters:
- `idx` (`integer`) - array index of the datetime object to be retrieved
:returns: raw representation of the datetime object
'''
return numpy.ndarray.__getitem__(self, idx)
def get_list_qtype(array):
'''Finds out a corresponding qtype for a specified `QList`/`numpy.ndarray`
instance.
:Parameters:
- `array` (`QList` or `numpy.ndarray`) - array to be checked
:returns: `integer` - qtype matching the specified array object
'''
if not isinstance(array, numpy.ndarray):
raise ValueError('array parameter is expected to be of type: numpy.ndarray, got: %s' % type(array))
if isinstance(array, QList):
return -abs(array.meta.qtype)
qtype = None
if str(array.dtype) in ('|S1', '<U1', '>U1', '|U1') :
qtype = QCHAR
if qtype is None:
qtype = Q_TYPE.get(array.dtype.type, None)
if qtype is None and array.dtype.type in (numpy.datetime64, numpy.timedelta64):
qtype = TEMPORAL_PY_TYPE.get(str(array.dtype), None)
if qtype is None:
# determinate type based on first element of the numpy array
qtype = Q_TYPE.get(type(array[0]), QGENERAL_LIST)
return qtype
def qlist(array, adjust_dtype = True, **meta):
'''Converts an input array to q vector and enriches object instance with
meta data.
Returns a :class:`.QList` instance for non-datetime vectors. For datetime
vectors :class:`.QTemporalList` is returned instead.
If parameter `adjust_dtype` is `True` and q type retrieved via
:func:`.get_list_qtype` doesn't match one provided as a `qtype` parameter
guessed q type, underlying numpy.array is converted to correct data type.
`qPython` internally represents ``(0x01;0x02;0xff)`` q list as:
``<class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]``.
This object can be created by calling the :func:`.qlist` with following
arguments:
- `byte numpy.array`:
>>> v = qlist(numpy.array([0x01, 0x02, 0xff], dtype=numpy.byte))
>>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))
<class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]
- `int32 numpy.array` with explicit conversion to `QBYTE_LIST`:
>>> v = qlist(numpy.array([1, 2, -1]), qtype = QBYTE_LIST)
>>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))
<class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]
- plain Python `integer` list with explicit conversion to `QBYTE_LIST`:
>>> v = qlist([1, 2, -1], qtype = QBYTE_LIST)
>>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))
<class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]
- numpy datetime64 array with implicit conversion to `QDATE_LIST`:
>>> v = qlist(numpy.array([numpy.datetime64('2001-01-01'), numpy.datetime64('2000-05-01'), numpy.datetime64('NaT')], dtype='datetime64[D]'))
>>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))
<class 'qpython.qcollection.QList'> dtype: datetime64[D] qtype: -14: ['2001-01-01' '2000-05-01' 'NaT']
- numpy datetime64 array with explicit conversion to `QDATE_LIST`:
>>> v = qlist(numpy.array([numpy.datetime64('2001-01-01'), numpy.datetime64('2000-05-01'), numpy.datetime64('NaT')], dtype='datetime64[D]'), qtype = QDATE_LIST)
>>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))
<class 'qpython.qcollection.QList'> dtype: datetime64[D] qtype: -14: ['2001-01-01' '2000-05-01' 'NaT']
:Parameters:
- `array` (`tuple`, `list`, `numpy.array`) - input array to be converted
- `adjust_dtype` (`boolean`) - determine whether data type of vector should
be adjusted if it doesn't match default representation. **Default**: ``True``
.. note:: numpy `datetime64` and `timedelta64` arrays are not converted
to raw temporal vectors if `adjust_dtype` is ``True``
:Kwargs:
- `qtype` (`integer` or `None`) - qtype indicator
:returns: `QList` or `QTemporalList` - array representation of the list
:raises: `ValueError`
'''
if type(array) in (list, tuple):
if meta and 'qtype' in meta and meta['qtype'] == QGENERAL_LIST:
# force shape and dtype for generic lists
tarray = numpy.ndarray(shape = len(array), dtype = numpy.dtype('O'))
for i in range(len(array)):
tarray[i] = array[i]
array = tarray
else:
array = numpy.array(array)
if not isinstance(array, numpy.ndarray):
raise ValueError('array parameter is expected to be of type: numpy.ndarray, list or tuple. Was: %s' % type(array))
qtype = None
is_numpy_temporal = array.dtype.type in (numpy.datetime64, numpy.timedelta64)
if meta and 'qtype' in meta:
qtype = -abs(meta['qtype'])
dtype = PY_TYPE[qtype]
if adjust_dtype and dtype != array.dtype and not is_numpy_temporal:
array = array.astype(dtype = dtype)
qtype = get_list_qtype(array) if qtype is None else qtype
meta['qtype'] = qtype
is_raw_temporal = meta['qtype'] in [QMONTH, QDATE, QDATETIME, QMINUTE, QSECOND, QTIME, QTIMESTAMP, QTIMESPAN] \
and not is_numpy_temporal
vector = array.view(QList) if not is_raw_temporal else array.view(QTemporalList)
vector._meta_init(**meta)
return vector
class QDictionary(object):
'''Represents a q dictionary.
Dictionary examples:
>>> # q: 1 2!`abc`cdefgh
>>> print(QDictionary(qlist(numpy.array([1, 2], dtype=numpy.int64), qtype=QLONG_LIST),
... qlist(numpy.array(['abc', 'cdefgh']), qtype = QSYMBOL_LIST)))
[1 2]!['abc' 'cdefgh']
>>> # q: (1;2h;3.234;"4")!(`one;2 3;"456";(7;8 9))
>>> print(QDictionary([numpy.int64(1), numpy.int16(2), numpy.float64(3.234), '4'],
... [numpy.string_('one'), qlist(numpy.array([2, 3]), qtype=QLONG_LIST), '456', [numpy.int64(7), qlist(numpy.array([8, 9]), qtype=QLONG_LIST)]]))
[1, 2, 3.234, '4']!['one', QList([2, 3], dtype=int64), '456', [7, QList([8, 9], dtype=int64)]]
:Parameters:
- `keys` (`QList`, `tuple` or `list`) - dictionary keys
- `values` (`QList`, `QTable`, `tuple` or `list`) - dictionary values
'''
def __init__(self, keys, values):
if not isinstance(keys, (QList, tuple, list, numpy.ndarray)):
raise ValueError('%s expects keys to be of type: QList, tuple or list. Actual type: %s' % (self.__class__.__name__, type(keys)))
if not isinstance(values, (QTable, QList, tuple, list, numpy.ndarray)):
raise ValueError('%s expects values to be of type: QTable, QList, tuple or list. Actual type: %s' % (self.__class__.__name__, type(values)))
if len(keys) != len(values):
raise ValueError('Number of keys: %d doesn`t match number of values: %d' % (len(keys), len(values)))
self.keys = keys
self.values = values
def __str__(self, *args, **kwargs):
return '%s!%s' % (self.keys, self.values)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
idx = 0
for key in self.keys:
if key != other.keys[idx] or self.values[idx] != other.values[idx]:
return False
idx += 1
return True
def __ne__(self, other):
return not self.__eq__(other)
def _find_key_(self, key):
idx = 0
for k in self.keys:
if key == k:
return idx
idx += 1
raise KeyError('QDictionary doesn`t contain key: %s' % key)
def __getitem__(self, key):
return self.values[self._find_key_(key)]
def __setitem__(self, key, value):
self.values[self._find_key_(key)] = value
def __len__(self):
return len(self.keys)
def __iter__(self):
return iter(self.keys)
def items(self):
'''Return a copy of the dictionary's list of ``(key, value)`` pairs.'''
return [(self.keys[x], self.values[x]) for x in range(len(self.keys))]
def iteritems(self):
'''Return an iterator over the dictionary's ``(key, value)`` pairs.'''
for x in range(len(self.keys)):
yield (self.keys[x], self.values[x])
def iterkeys(self):
'''Return an iterator over the dictionary's keys.'''
return iter(self.keys)
def itervalues(self):
'''Return an iterator over the dictionary's values.'''
return iter(self.values)
class QTable(numpy.recarray):
'''Represents a q table.
Internal table data is stored as a `numpy.array` separately for each column.
This mimics the internal representation of tables in q.
'''
def _meta_init(self, **meta):
self.meta = MetaData(**meta)
def __eq__(self, other):
return numpy.array_equal(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def __array_finalize__(self, obj):
self.meta = MetaData() if obj is None else getattr(obj, 'meta', MetaData())
def qtable(columns, data, **meta):
'''Creates a QTable out of given column names and data, and initialises the
meta data.
:class:`.QTable` is represented internally by `numpy.core.records.recarray`.
Data for each column is converted to :class:`.QList` via :func:`.qlist`
function. If qtype indicator is defined for a column, this information
is used for explicit array conversion.
Table examples:
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(qlist(numpy.array(['name', 'iq']), qtype = QSYMBOL_LIST),
... [qlist(numpy.array(['Dent', 'Beeblebrox', 'Prefect'])),
... qlist(numpy.array([98, 42, 126], dtype=numpy.int64))])
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(qlist(numpy.array(['name', 'iq']), qtype = QSYMBOL_LIST),
... [qlist(['Dent', 'Beeblebrox', 'Prefect'], qtype = QSYMBOL_LIST),
... qlist([98, 42, 126], qtype = QLONG_LIST)])
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(['name', 'iq'],
... [['Dent', 'Beeblebrox', 'Prefect'],
... [98, 42, 126]],
... name = QSYMBOL, iq = QLONG)
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq`fullname!(`Dent`Beeblebrox`Prefect;98 42 126;("Arthur Dent"; "Zaphod Beeblebrox"; "Ford Prefect"))
>>> t = qtable(('name', 'iq', 'fullname'),
... [qlist(numpy.array(['Dent', 'Beeblebrox', 'Prefect']), qtype = QSYMBOL_LIST),
... qlist(numpy.array([98, 42, 126]), qtype = QLONG_LIST),
... qlist(numpy.array(["Arthur Dent", "Zaphod Beeblebrox", "Ford Prefect"]), qtype = QSTRING_LIST)])
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8'), ('fullname', 'O')] meta: metadata(iq=-7, fullname=0, qtype=98, name=-11): [('Dent', 98L, 'Arthur Dent') ('Beeblebrox', 42L, 'Zaphod Beeblebrox') ('Prefect', 126L, 'Ford Prefect')]
:Parameters:
- `columns` (list of `strings`) - table column names
- `data` (list of lists) - list of columns containing table data
:Kwargs:
- `meta` (`integer`) - qtype for particular column
:returns: `QTable` - representation of q table
:raises: `ValueError`
'''
if len(columns) != len(data):
raise ValueError('Number of columns doesn`t match the data layout. %s vs %s' % (len(columns), len(data)))
meta = {} if not meta else meta
if not 'qtype' in meta:
meta['qtype'] = QTABLE
dtypes = []
for i in range(len(columns)):
column_name = columns[i] if isinstance(columns[i], str) else columns[i].decode("utf-8")
if isinstance(data[i], str):
# convert character list (represented as string) to numpy representation
data[i] = numpy.array(list(data[i]), dtype = numpy.string_)
if isinstance(data[i], bytes):
data[i] = numpy.array(list(data[i].decode()), dtype = numpy.string_)
if column_name in meta:
data[i] = qlist(data[i], qtype = meta[column_name])
elif not isinstance(data[i], QList):
if type(data[i]) in (list, tuple):
data[i] = qlist(data[i], qtype = QGENERAL_LIST)
else:
data[i] = qlist(data[i])
meta[column_name] = data[i].meta.qtype
dtypes.append((column_name, data[i].dtype))
table = numpy.core.records.fromarrays(data, dtype = dtypes)
table = table.view(QTable)
table._meta_init(**meta)
return table
class QKeyedTable(object):
'''Represents a q keyed table.
:class:`.QKeyedTable` is built with two :class:`.QTable`\s, one representing
keys and the other values.
Keyed tables example:
>>> # q: ([eid:1001 1002 1003] pos:`d1`d2`d3;dates:(2001.01.01;2000.05.01;0Nd))
>>> t = QKeyedTable(qtable(['eid'],
... [qlist(numpy.array([1001, 1002, 1003]), qtype = QLONG_LIST)]),
... qtable(['pos', 'dates'],
... [qlist(numpy.array(['d1', 'd2', 'd3']), qtype = QSYMBOL_LIST),
... qlist(numpy.array([366, 121, qnull(QDATE)]), qtype = QDATE_LIST)]))
>>> print('%s: %s' % (type(t), t))
>>> print('%s dtype: %s meta: %s' % (type(t.keys), t.keys.dtype, t.keys.meta))
>>> print('%s dtype: %s meta: %s' % (type(t.values), t.values.dtype, t.values.meta))
<class 'qpython.qcollection.QKeyedTable'>: [(1001L,) (1002L,) (1003L,)]![('d1', 366) ('d2', 121) ('d3', -2147483648)]
<class 'qpython.qcollection.QTable'> dtype: [('eid', '<i8')] meta: metadata(qtype=98, eid=-7)
<class 'qpython.qcollection.QTable'> dtype: [('pos', 'S2'), ('dates', '<i4')] meta: metadata(dates=-14, qtype=98, pos=-11)
:Parameters:
- `keys` (`QTable`) - table keys
- `values` (`QTable`) - table values
:raises: `ValueError`
'''
def __init__(self, keys, values):
if not isinstance(keys, QTable):
raise ValueError('Keys array is required to be of type: QTable')
if not isinstance(values, QTable):
raise ValueError('Values array is required to be of type: QTable')
if len(keys) != len(values):
raise ValueError('Keys and value arrays cannot have different length')
self.keys = keys
self.values = values
def __str__(self, *args, **kwargs):
return '%s!%s' % (self.keys, self.values)
def __eq__(self, other):
return isinstance(other, QKeyedTable) and numpy.array_equal(self.keys, other.keys) and numpy.array_equal(self.values, other.values)
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self.keys)
def __iter__(self):
return iter(self.keys)
def items(self):
'''Return a copy of the keyed table's list of ``(key, value)`` pairs.'''
return [(self.keys[x], self.values[x]) for x in range(len(self.keys))]
def iteritems(self):
'''Return an iterator over the keyed table's ``(key, value)`` pairs.'''
for x in range(len(self.keys)):
yield (self.keys[x], self.values[x])
def iterkeys(self):
'''Return an iterator over the keyed table's keys.'''
return iter(self.keys)
def itervalues(self):
'''Return an iterator over the keyed table's values.'''
return iter(self.values)
|
exxeleron/qPython | qpython/qcollection.py | qlist | python | def qlist(array, adjust_dtype = True, **meta):
'''Converts an input array to q vector and enriches object instance with
meta data.
Returns a :class:`.QList` instance for non-datetime vectors. For datetime
vectors :class:`.QTemporalList` is returned instead.
If parameter `adjust_dtype` is `True` and q type retrieved via
:func:`.get_list_qtype` doesn't match one provided as a `qtype` parameter
guessed q type, underlying numpy.array is converted to correct data type.
`qPython` internally represents ``(0x01;0x02;0xff)`` q list as:
``<class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]``.
This object can be created by calling the :func:`.qlist` with following
arguments:
- `byte numpy.array`:
>>> v = qlist(numpy.array([0x01, 0x02, 0xff], dtype=numpy.byte))
>>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))
<class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]
- `int32 numpy.array` with explicit conversion to `QBYTE_LIST`:
>>> v = qlist(numpy.array([1, 2, -1]), qtype = QBYTE_LIST)
>>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))
<class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]
- plain Python `integer` list with explicit conversion to `QBYTE_LIST`:
>>> v = qlist([1, 2, -1], qtype = QBYTE_LIST)
>>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))
<class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]
- numpy datetime64 array with implicit conversion to `QDATE_LIST`:
>>> v = qlist(numpy.array([numpy.datetime64('2001-01-01'), numpy.datetime64('2000-05-01'), numpy.datetime64('NaT')], dtype='datetime64[D]'))
>>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))
<class 'qpython.qcollection.QList'> dtype: datetime64[D] qtype: -14: ['2001-01-01' '2000-05-01' 'NaT']
- numpy datetime64 array with explicit conversion to `QDATE_LIST`:
>>> v = qlist(numpy.array([numpy.datetime64('2001-01-01'), numpy.datetime64('2000-05-01'), numpy.datetime64('NaT')], dtype='datetime64[D]'), qtype = QDATE_LIST)
>>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))
<class 'qpython.qcollection.QList'> dtype: datetime64[D] qtype: -14: ['2001-01-01' '2000-05-01' 'NaT']
:Parameters:
- `array` (`tuple`, `list`, `numpy.array`) - input array to be converted
- `adjust_dtype` (`boolean`) - determine whether data type of vector should
be adjusted if it doesn't match default representation. **Default**: ``True``
.. note:: numpy `datetime64` and `timedelta64` arrays are not converted
to raw temporal vectors if `adjust_dtype` is ``True``
:Kwargs:
- `qtype` (`integer` or `None`) - qtype indicator
:returns: `QList` or `QTemporalList` - array representation of the list
:raises: `ValueError`
'''
if type(array) in (list, tuple):
if meta and 'qtype' in meta and meta['qtype'] == QGENERAL_LIST:
# force shape and dtype for generic lists
tarray = numpy.ndarray(shape = len(array), dtype = numpy.dtype('O'))
for i in range(len(array)):
tarray[i] = array[i]
array = tarray
else:
array = numpy.array(array)
if not isinstance(array, numpy.ndarray):
raise ValueError('array parameter is expected to be of type: numpy.ndarray, list or tuple. Was: %s' % type(array))
qtype = None
is_numpy_temporal = array.dtype.type in (numpy.datetime64, numpy.timedelta64)
if meta and 'qtype' in meta:
qtype = -abs(meta['qtype'])
dtype = PY_TYPE[qtype]
if adjust_dtype and dtype != array.dtype and not is_numpy_temporal:
array = array.astype(dtype = dtype)
qtype = get_list_qtype(array) if qtype is None else qtype
meta['qtype'] = qtype
is_raw_temporal = meta['qtype'] in [QMONTH, QDATE, QDATETIME, QMINUTE, QSECOND, QTIME, QTIMESTAMP, QTIMESPAN] \
and not is_numpy_temporal
vector = array.view(QList) if not is_raw_temporal else array.view(QTemporalList)
vector._meta_init(**meta)
return vector | Converts an input array to q vector and enriches object instance with
meta data.
Returns a :class:`.QList` instance for non-datetime vectors. For datetime
vectors :class:`.QTemporalList` is returned instead.
If parameter `adjust_dtype` is `True` and q type retrieved via
:func:`.get_list_qtype` doesn't match one provided as a `qtype` parameter
guessed q type, underlying numpy.array is converted to correct data type.
`qPython` internally represents ``(0x01;0x02;0xff)`` q list as:
``<class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]``.
This object can be created by calling the :func:`.qlist` with following
arguments:
- `byte numpy.array`:
>>> v = qlist(numpy.array([0x01, 0x02, 0xff], dtype=numpy.byte))
>>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))
<class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]
- `int32 numpy.array` with explicit conversion to `QBYTE_LIST`:
>>> v = qlist(numpy.array([1, 2, -1]), qtype = QBYTE_LIST)
>>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))
<class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]
- plain Python `integer` list with explicit conversion to `QBYTE_LIST`:
>>> v = qlist([1, 2, -1], qtype = QBYTE_LIST)
>>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))
<class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]
- numpy datetime64 array with implicit conversion to `QDATE_LIST`:
>>> v = qlist(numpy.array([numpy.datetime64('2001-01-01'), numpy.datetime64('2000-05-01'), numpy.datetime64('NaT')], dtype='datetime64[D]'))
>>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))
<class 'qpython.qcollection.QList'> dtype: datetime64[D] qtype: -14: ['2001-01-01' '2000-05-01' 'NaT']
- numpy datetime64 array with explicit conversion to `QDATE_LIST`:
>>> v = qlist(numpy.array([numpy.datetime64('2001-01-01'), numpy.datetime64('2000-05-01'), numpy.datetime64('NaT')], dtype='datetime64[D]'), qtype = QDATE_LIST)
>>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))
<class 'qpython.qcollection.QList'> dtype: datetime64[D] qtype: -14: ['2001-01-01' '2000-05-01' 'NaT']
:Parameters:
- `array` (`tuple`, `list`, `numpy.array`) - input array to be converted
- `adjust_dtype` (`boolean`) - determine whether data type of vector should
be adjusted if it doesn't match default representation. **Default**: ``True``
.. note:: numpy `datetime64` and `timedelta64` arrays are not converted
to raw temporal vectors if `adjust_dtype` is ``True``
:Kwargs:
- `qtype` (`integer` or `None`) - qtype indicator
:returns: `QList` or `QTemporalList` - array representation of the list
:raises: `ValueError` | train | https://github.com/exxeleron/qPython/blob/7e64a28b1e8814a8d6b9217ce79bb8de546e62f3/qpython/qcollection.py#L105-L196 | [
"def get_list_qtype(array):\n '''Finds out a corresponding qtype for a specified `QList`/`numpy.ndarray` \n instance.\n\n :Parameters:\n - `array` (`QList` or `numpy.ndarray`) - array to be checked\n\n :returns: `integer` - qtype matching the specified array object\n '''\n if not isinstance(array, numpy.ndarray):\n raise ValueError('array parameter is expected to be of type: numpy.ndarray, got: %s' % type(array))\n\n if isinstance(array, QList):\n return -abs(array.meta.qtype)\n\n qtype = None\n\n if str(array.dtype) in ('|S1', '<U1', '>U1', '|U1') :\n qtype = QCHAR\n\n if qtype is None:\n qtype = Q_TYPE.get(array.dtype.type, None)\n\n if qtype is None and array.dtype.type in (numpy.datetime64, numpy.timedelta64):\n qtype = TEMPORAL_PY_TYPE.get(str(array.dtype), None)\n\n if qtype is None:\n # determinate type based on first element of the numpy array\n qtype = Q_TYPE.get(type(array[0]), QGENERAL_LIST)\n\n return qtype\n"
] | #
# Copyright (c) 2011-2014 Exxeleron GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from qpython.qtype import * # @UnusedWildImport
from qpython import MetaData
from qpython.qtemporal import qtemporal, from_raw_qtemporal, to_raw_qtemporal
class QList(numpy.ndarray):
'''An array object represents a q vector.'''
def _meta_init(self, **meta):
'''Initialises the meta-information.'''
self.meta = MetaData(**meta)
def __eq__(self, other):
return numpy.array_equal(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.dtype, self.meta.qtype, self.tostring()))
def __array_finalize__(self, obj):
self.meta = MetaData() if obj is None else getattr(obj, 'meta', MetaData())
class QTemporalList(QList):
'''An array object represents a q vector of datetime objects.'''
def __getitem__(self, idx):
return qtemporal(from_raw_qtemporal(numpy.ndarray.__getitem__(self, idx), -abs(self.meta.qtype)), qtype = -abs(self.meta.qtype))
def __setitem__(self, idx, value):
numpy.ndarray.__setitem__(self, idx, to_raw_qtemporal(value, - -abs(self.meta.qtype)))
def raw(self, idx):
'''Gets the raw representation of the datetime object at the specified
index.
>>> t = qlist(numpy.array([366, 121, qnull(QDATE)]), qtype=QDATE_LIST)
>>> print(t[0])
2001-01-01 [metadata(qtype=-14)]
>>> print(t.raw(0))
366
:Parameters:
- `idx` (`integer`) - array index of the datetime object to be retrieved
:returns: raw representation of the datetime object
'''
return numpy.ndarray.__getitem__(self, idx)
def get_list_qtype(array):
'''Finds out a corresponding qtype for a specified `QList`/`numpy.ndarray`
instance.
:Parameters:
- `array` (`QList` or `numpy.ndarray`) - array to be checked
:returns: `integer` - qtype matching the specified array object
'''
if not isinstance(array, numpy.ndarray):
raise ValueError('array parameter is expected to be of type: numpy.ndarray, got: %s' % type(array))
if isinstance(array, QList):
return -abs(array.meta.qtype)
qtype = None
if str(array.dtype) in ('|S1', '<U1', '>U1', '|U1') :
qtype = QCHAR
if qtype is None:
qtype = Q_TYPE.get(array.dtype.type, None)
if qtype is None and array.dtype.type in (numpy.datetime64, numpy.timedelta64):
qtype = TEMPORAL_PY_TYPE.get(str(array.dtype), None)
if qtype is None:
# determinate type based on first element of the numpy array
qtype = Q_TYPE.get(type(array[0]), QGENERAL_LIST)
return qtype
def qlist(array, adjust_dtype = True, **meta):
'''Converts an input array to q vector and enriches object instance with
meta data.
Returns a :class:`.QList` instance for non-datetime vectors. For datetime
vectors :class:`.QTemporalList` is returned instead.
If parameter `adjust_dtype` is `True` and q type retrieved via
:func:`.get_list_qtype` doesn't match one provided as a `qtype` parameter
guessed q type, underlying numpy.array is converted to correct data type.
`qPython` internally represents ``(0x01;0x02;0xff)`` q list as:
``<class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]``.
This object can be created by calling the :func:`.qlist` with following
arguments:
- `byte numpy.array`:
>>> v = qlist(numpy.array([0x01, 0x02, 0xff], dtype=numpy.byte))
>>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))
<class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]
- `int32 numpy.array` with explicit conversion to `QBYTE_LIST`:
>>> v = qlist(numpy.array([1, 2, -1]), qtype = QBYTE_LIST)
>>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))
<class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]
- plain Python `integer` list with explicit conversion to `QBYTE_LIST`:
>>> v = qlist([1, 2, -1], qtype = QBYTE_LIST)
>>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))
<class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]
- numpy datetime64 array with implicit conversion to `QDATE_LIST`:
>>> v = qlist(numpy.array([numpy.datetime64('2001-01-01'), numpy.datetime64('2000-05-01'), numpy.datetime64('NaT')], dtype='datetime64[D]'))
>>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))
<class 'qpython.qcollection.QList'> dtype: datetime64[D] qtype: -14: ['2001-01-01' '2000-05-01' 'NaT']
- numpy datetime64 array with explicit conversion to `QDATE_LIST`:
>>> v = qlist(numpy.array([numpy.datetime64('2001-01-01'), numpy.datetime64('2000-05-01'), numpy.datetime64('NaT')], dtype='datetime64[D]'), qtype = QDATE_LIST)
>>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))
<class 'qpython.qcollection.QList'> dtype: datetime64[D] qtype: -14: ['2001-01-01' '2000-05-01' 'NaT']
:Parameters:
- `array` (`tuple`, `list`, `numpy.array`) - input array to be converted
- `adjust_dtype` (`boolean`) - determine whether data type of vector should
be adjusted if it doesn't match default representation. **Default**: ``True``
.. note:: numpy `datetime64` and `timedelta64` arrays are not converted
to raw temporal vectors if `adjust_dtype` is ``True``
:Kwargs:
- `qtype` (`integer` or `None`) - qtype indicator
:returns: `QList` or `QTemporalList` - array representation of the list
:raises: `ValueError`
'''
if type(array) in (list, tuple):
if meta and 'qtype' in meta and meta['qtype'] == QGENERAL_LIST:
# force shape and dtype for generic lists
tarray = numpy.ndarray(shape = len(array), dtype = numpy.dtype('O'))
for i in range(len(array)):
tarray[i] = array[i]
array = tarray
else:
array = numpy.array(array)
if not isinstance(array, numpy.ndarray):
raise ValueError('array parameter is expected to be of type: numpy.ndarray, list or tuple. Was: %s' % type(array))
qtype = None
is_numpy_temporal = array.dtype.type in (numpy.datetime64, numpy.timedelta64)
if meta and 'qtype' in meta:
qtype = -abs(meta['qtype'])
dtype = PY_TYPE[qtype]
if adjust_dtype and dtype != array.dtype and not is_numpy_temporal:
array = array.astype(dtype = dtype)
qtype = get_list_qtype(array) if qtype is None else qtype
meta['qtype'] = qtype
is_raw_temporal = meta['qtype'] in [QMONTH, QDATE, QDATETIME, QMINUTE, QSECOND, QTIME, QTIMESTAMP, QTIMESPAN] \
and not is_numpy_temporal
vector = array.view(QList) if not is_raw_temporal else array.view(QTemporalList)
vector._meta_init(**meta)
return vector
class QDictionary(object):
'''Represents a q dictionary.
Dictionary examples:
>>> # q: 1 2!`abc`cdefgh
>>> print(QDictionary(qlist(numpy.array([1, 2], dtype=numpy.int64), qtype=QLONG_LIST),
... qlist(numpy.array(['abc', 'cdefgh']), qtype = QSYMBOL_LIST)))
[1 2]!['abc' 'cdefgh']
>>> # q: (1;2h;3.234;"4")!(`one;2 3;"456";(7;8 9))
>>> print(QDictionary([numpy.int64(1), numpy.int16(2), numpy.float64(3.234), '4'],
... [numpy.string_('one'), qlist(numpy.array([2, 3]), qtype=QLONG_LIST), '456', [numpy.int64(7), qlist(numpy.array([8, 9]), qtype=QLONG_LIST)]]))
[1, 2, 3.234, '4']!['one', QList([2, 3], dtype=int64), '456', [7, QList([8, 9], dtype=int64)]]
:Parameters:
- `keys` (`QList`, `tuple` or `list`) - dictionary keys
- `values` (`QList`, `QTable`, `tuple` or `list`) - dictionary values
'''
def __init__(self, keys, values):
if not isinstance(keys, (QList, tuple, list, numpy.ndarray)):
raise ValueError('%s expects keys to be of type: QList, tuple or list. Actual type: %s' % (self.__class__.__name__, type(keys)))
if not isinstance(values, (QTable, QList, tuple, list, numpy.ndarray)):
raise ValueError('%s expects values to be of type: QTable, QList, tuple or list. Actual type: %s' % (self.__class__.__name__, type(values)))
if len(keys) != len(values):
raise ValueError('Number of keys: %d doesn`t match number of values: %d' % (len(keys), len(values)))
self.keys = keys
self.values = values
def __str__(self, *args, **kwargs):
return '%s!%s' % (self.keys, self.values)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
idx = 0
for key in self.keys:
if key != other.keys[idx] or self.values[idx] != other.values[idx]:
return False
idx += 1
return True
def __ne__(self, other):
return not self.__eq__(other)
def _find_key_(self, key):
idx = 0
for k in self.keys:
if key == k:
return idx
idx += 1
raise KeyError('QDictionary doesn`t contain key: %s' % key)
def __getitem__(self, key):
return self.values[self._find_key_(key)]
def __setitem__(self, key, value):
self.values[self._find_key_(key)] = value
def __len__(self):
return len(self.keys)
def __iter__(self):
return iter(self.keys)
def items(self):
'''Return a copy of the dictionary's list of ``(key, value)`` pairs.'''
return [(self.keys[x], self.values[x]) for x in range(len(self.keys))]
def iteritems(self):
'''Return an iterator over the dictionary's ``(key, value)`` pairs.'''
for x in range(len(self.keys)):
yield (self.keys[x], self.values[x])
def iterkeys(self):
'''Return an iterator over the dictionary's keys.'''
return iter(self.keys)
def itervalues(self):
'''Return an iterator over the dictionary's values.'''
return iter(self.values)
class QTable(numpy.recarray):
'''Represents a q table.
Internal table data is stored as a `numpy.array` separately for each column.
This mimics the internal representation of tables in q.
'''
def _meta_init(self, **meta):
self.meta = MetaData(**meta)
def __eq__(self, other):
return numpy.array_equal(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def __array_finalize__(self, obj):
self.meta = MetaData() if obj is None else getattr(obj, 'meta', MetaData())
def qtable(columns, data, **meta):
'''Creates a QTable out of given column names and data, and initialises the
meta data.
:class:`.QTable` is represented internally by `numpy.core.records.recarray`.
Data for each column is converted to :class:`.QList` via :func:`.qlist`
function. If qtype indicator is defined for a column, this information
is used for explicit array conversion.
Table examples:
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(qlist(numpy.array(['name', 'iq']), qtype = QSYMBOL_LIST),
... [qlist(numpy.array(['Dent', 'Beeblebrox', 'Prefect'])),
... qlist(numpy.array([98, 42, 126], dtype=numpy.int64))])
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(qlist(numpy.array(['name', 'iq']), qtype = QSYMBOL_LIST),
... [qlist(['Dent', 'Beeblebrox', 'Prefect'], qtype = QSYMBOL_LIST),
... qlist([98, 42, 126], qtype = QLONG_LIST)])
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(['name', 'iq'],
... [['Dent', 'Beeblebrox', 'Prefect'],
... [98, 42, 126]],
... name = QSYMBOL, iq = QLONG)
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq`fullname!(`Dent`Beeblebrox`Prefect;98 42 126;("Arthur Dent"; "Zaphod Beeblebrox"; "Ford Prefect"))
>>> t = qtable(('name', 'iq', 'fullname'),
... [qlist(numpy.array(['Dent', 'Beeblebrox', 'Prefect']), qtype = QSYMBOL_LIST),
... qlist(numpy.array([98, 42, 126]), qtype = QLONG_LIST),
... qlist(numpy.array(["Arthur Dent", "Zaphod Beeblebrox", "Ford Prefect"]), qtype = QSTRING_LIST)])
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8'), ('fullname', 'O')] meta: metadata(iq=-7, fullname=0, qtype=98, name=-11): [('Dent', 98L, 'Arthur Dent') ('Beeblebrox', 42L, 'Zaphod Beeblebrox') ('Prefect', 126L, 'Ford Prefect')]
:Parameters:
- `columns` (list of `strings`) - table column names
- `data` (list of lists) - list of columns containing table data
:Kwargs:
- `meta` (`integer`) - qtype for particular column
:returns: `QTable` - representation of q table
:raises: `ValueError`
'''
if len(columns) != len(data):
raise ValueError('Number of columns doesn`t match the data layout. %s vs %s' % (len(columns), len(data)))
meta = {} if not meta else meta
if not 'qtype' in meta:
meta['qtype'] = QTABLE
dtypes = []
for i in range(len(columns)):
column_name = columns[i] if isinstance(columns[i], str) else columns[i].decode("utf-8")
if isinstance(data[i], str):
# convert character list (represented as string) to numpy representation
data[i] = numpy.array(list(data[i]), dtype = numpy.string_)
if isinstance(data[i], bytes):
data[i] = numpy.array(list(data[i].decode()), dtype = numpy.string_)
if column_name in meta:
data[i] = qlist(data[i], qtype = meta[column_name])
elif not isinstance(data[i], QList):
if type(data[i]) in (list, tuple):
data[i] = qlist(data[i], qtype = QGENERAL_LIST)
else:
data[i] = qlist(data[i])
meta[column_name] = data[i].meta.qtype
dtypes.append((column_name, data[i].dtype))
table = numpy.core.records.fromarrays(data, dtype = dtypes)
table = table.view(QTable)
table._meta_init(**meta)
return table
class QKeyedTable(object):
'''Represents a q keyed table.
:class:`.QKeyedTable` is built with two :class:`.QTable`\s, one representing
keys and the other values.
Keyed tables example:
>>> # q: ([eid:1001 1002 1003] pos:`d1`d2`d3;dates:(2001.01.01;2000.05.01;0Nd))
>>> t = QKeyedTable(qtable(['eid'],
... [qlist(numpy.array([1001, 1002, 1003]), qtype = QLONG_LIST)]),
... qtable(['pos', 'dates'],
... [qlist(numpy.array(['d1', 'd2', 'd3']), qtype = QSYMBOL_LIST),
... qlist(numpy.array([366, 121, qnull(QDATE)]), qtype = QDATE_LIST)]))
>>> print('%s: %s' % (type(t), t))
>>> print('%s dtype: %s meta: %s' % (type(t.keys), t.keys.dtype, t.keys.meta))
>>> print('%s dtype: %s meta: %s' % (type(t.values), t.values.dtype, t.values.meta))
<class 'qpython.qcollection.QKeyedTable'>: [(1001L,) (1002L,) (1003L,)]![('d1', 366) ('d2', 121) ('d3', -2147483648)]
<class 'qpython.qcollection.QTable'> dtype: [('eid', '<i8')] meta: metadata(qtype=98, eid=-7)
<class 'qpython.qcollection.QTable'> dtype: [('pos', 'S2'), ('dates', '<i4')] meta: metadata(dates=-14, qtype=98, pos=-11)
:Parameters:
- `keys` (`QTable`) - table keys
- `values` (`QTable`) - table values
:raises: `ValueError`
'''
def __init__(self, keys, values):
if not isinstance(keys, QTable):
raise ValueError('Keys array is required to be of type: QTable')
if not isinstance(values, QTable):
raise ValueError('Values array is required to be of type: QTable')
if len(keys) != len(values):
raise ValueError('Keys and value arrays cannot have different length')
self.keys = keys
self.values = values
def __str__(self, *args, **kwargs):
return '%s!%s' % (self.keys, self.values)
def __eq__(self, other):
return isinstance(other, QKeyedTable) and numpy.array_equal(self.keys, other.keys) and numpy.array_equal(self.values, other.values)
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self.keys)
def __iter__(self):
return iter(self.keys)
def items(self):
'''Return a copy of the keyed table's list of ``(key, value)`` pairs.'''
return [(self.keys[x], self.values[x]) for x in range(len(self.keys))]
def iteritems(self):
'''Return an iterator over the keyed table's ``(key, value)`` pairs.'''
for x in range(len(self.keys)):
yield (self.keys[x], self.values[x])
def iterkeys(self):
'''Return an iterator over the keyed table's keys.'''
return iter(self.keys)
def itervalues(self):
'''Return an iterator over the keyed table's values.'''
return iter(self.values)
|
exxeleron/qPython | qpython/qcollection.py | qtable | python | def qtable(columns, data, **meta):
'''Creates a QTable out of given column names and data, and initialises the
meta data.
:class:`.QTable` is represented internally by `numpy.core.records.recarray`.
Data for each column is converted to :class:`.QList` via :func:`.qlist`
function. If qtype indicator is defined for a column, this information
is used for explicit array conversion.
Table examples:
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(qlist(numpy.array(['name', 'iq']), qtype = QSYMBOL_LIST),
... [qlist(numpy.array(['Dent', 'Beeblebrox', 'Prefect'])),
... qlist(numpy.array([98, 42, 126], dtype=numpy.int64))])
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(qlist(numpy.array(['name', 'iq']), qtype = QSYMBOL_LIST),
... [qlist(['Dent', 'Beeblebrox', 'Prefect'], qtype = QSYMBOL_LIST),
... qlist([98, 42, 126], qtype = QLONG_LIST)])
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(['name', 'iq'],
... [['Dent', 'Beeblebrox', 'Prefect'],
... [98, 42, 126]],
... name = QSYMBOL, iq = QLONG)
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq`fullname!(`Dent`Beeblebrox`Prefect;98 42 126;("Arthur Dent"; "Zaphod Beeblebrox"; "Ford Prefect"))
>>> t = qtable(('name', 'iq', 'fullname'),
... [qlist(numpy.array(['Dent', 'Beeblebrox', 'Prefect']), qtype = QSYMBOL_LIST),
... qlist(numpy.array([98, 42, 126]), qtype = QLONG_LIST),
... qlist(numpy.array(["Arthur Dent", "Zaphod Beeblebrox", "Ford Prefect"]), qtype = QSTRING_LIST)])
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8'), ('fullname', 'O')] meta: metadata(iq=-7, fullname=0, qtype=98, name=-11): [('Dent', 98L, 'Arthur Dent') ('Beeblebrox', 42L, 'Zaphod Beeblebrox') ('Prefect', 126L, 'Ford Prefect')]
:Parameters:
- `columns` (list of `strings`) - table column names
- `data` (list of lists) - list of columns containing table data
:Kwargs:
- `meta` (`integer`) - qtype for particular column
:returns: `QTable` - representation of q table
:raises: `ValueError`
'''
if len(columns) != len(data):
raise ValueError('Number of columns doesn`t match the data layout. %s vs %s' % (len(columns), len(data)))
meta = {} if not meta else meta
if not 'qtype' in meta:
meta['qtype'] = QTABLE
dtypes = []
for i in range(len(columns)):
column_name = columns[i] if isinstance(columns[i], str) else columns[i].decode("utf-8")
if isinstance(data[i], str):
# convert character list (represented as string) to numpy representation
data[i] = numpy.array(list(data[i]), dtype = numpy.string_)
if isinstance(data[i], bytes):
data[i] = numpy.array(list(data[i].decode()), dtype = numpy.string_)
if column_name in meta:
data[i] = qlist(data[i], qtype = meta[column_name])
elif not isinstance(data[i], QList):
if type(data[i]) in (list, tuple):
data[i] = qlist(data[i], qtype = QGENERAL_LIST)
else:
data[i] = qlist(data[i])
meta[column_name] = data[i].meta.qtype
dtypes.append((column_name, data[i].dtype))
table = numpy.core.records.fromarrays(data, dtype = dtypes)
table = table.view(QTable)
table._meta_init(**meta)
return table | Creates a QTable out of given column names and data, and initialises the
meta data.
:class:`.QTable` is represented internally by `numpy.core.records.recarray`.
Data for each column is converted to :class:`.QList` via :func:`.qlist`
function. If qtype indicator is defined for a column, this information
is used for explicit array conversion.
Table examples:
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(qlist(numpy.array(['name', 'iq']), qtype = QSYMBOL_LIST),
... [qlist(numpy.array(['Dent', 'Beeblebrox', 'Prefect'])),
... qlist(numpy.array([98, 42, 126], dtype=numpy.int64))])
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(qlist(numpy.array(['name', 'iq']), qtype = QSYMBOL_LIST),
... [qlist(['Dent', 'Beeblebrox', 'Prefect'], qtype = QSYMBOL_LIST),
... qlist([98, 42, 126], qtype = QLONG_LIST)])
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(['name', 'iq'],
... [['Dent', 'Beeblebrox', 'Prefect'],
... [98, 42, 126]],
... name = QSYMBOL, iq = QLONG)
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq`fullname!(`Dent`Beeblebrox`Prefect;98 42 126;("Arthur Dent"; "Zaphod Beeblebrox"; "Ford Prefect"))
>>> t = qtable(('name', 'iq', 'fullname'),
... [qlist(numpy.array(['Dent', 'Beeblebrox', 'Prefect']), qtype = QSYMBOL_LIST),
... qlist(numpy.array([98, 42, 126]), qtype = QLONG_LIST),
... qlist(numpy.array(["Arthur Dent", "Zaphod Beeblebrox", "Ford Prefect"]), qtype = QSTRING_LIST)])
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8'), ('fullname', 'O')] meta: metadata(iq=-7, fullname=0, qtype=98, name=-11): [('Dent', 98L, 'Arthur Dent') ('Beeblebrox', 42L, 'Zaphod Beeblebrox') ('Prefect', 126L, 'Ford Prefect')]
:Parameters:
- `columns` (list of `strings`) - table column names
- `data` (list of lists) - list of columns containing table data
:Kwargs:
- `meta` (`integer`) - qtype for particular column
:returns: `QTable` - representation of q table
:raises: `ValueError` | train | https://github.com/exxeleron/qPython/blob/7e64a28b1e8814a8d6b9217ce79bb8de546e62f3/qpython/qcollection.py#L308-L393 | [
"def qlist(array, adjust_dtype = True, **meta):\n '''Converts an input array to q vector and enriches object instance with \n meta data.\n\n Returns a :class:`.QList` instance for non-datetime vectors. For datetime \n vectors :class:`.QTemporalList` is returned instead.\n\n If parameter `adjust_dtype` is `True` and q type retrieved via \n :func:`.get_list_qtype` doesn't match one provided as a `qtype` parameter \n guessed q type, underlying numpy.array is converted to correct data type.\n\n `qPython` internally represents ``(0x01;0x02;0xff)`` q list as:\n ``<class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]``.\n This object can be created by calling the :func:`.qlist` with following \n arguments:\n\n - `byte numpy.array`:\n\n >>> v = qlist(numpy.array([0x01, 0x02, 0xff], dtype=numpy.byte))\n >>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))\n <class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]\n\n - `int32 numpy.array` with explicit conversion to `QBYTE_LIST`: \n\n >>> v = qlist(numpy.array([1, 2, -1]), qtype = QBYTE_LIST)\n >>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))\n <class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]\n\n - plain Python `integer` list with explicit conversion to `QBYTE_LIST`: \n\n >>> v = qlist([1, 2, -1], qtype = QBYTE_LIST)\n >>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))\n <class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]\n\n - numpy datetime64 array with implicit conversion to `QDATE_LIST`: \n\n >>> v = qlist(numpy.array([numpy.datetime64('2001-01-01'), numpy.datetime64('2000-05-01'), numpy.datetime64('NaT')], dtype='datetime64[D]'))\n >>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))\n <class 'qpython.qcollection.QList'> dtype: datetime64[D] qtype: -14: ['2001-01-01' '2000-05-01' 'NaT']\n\n - numpy datetime64 array with explicit conversion to `QDATE_LIST`: \n\n >>> v = qlist(numpy.array([numpy.datetime64('2001-01-01'), numpy.datetime64('2000-05-01'), numpy.datetime64('NaT')], dtype='datetime64[D]'), qtype = QDATE_LIST)\n >>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))\n <class 'qpython.qcollection.QList'> dtype: datetime64[D] qtype: -14: ['2001-01-01' '2000-05-01' 'NaT']\n\n\n :Parameters:\n - `array` (`tuple`, `list`, `numpy.array`) - input array to be converted\n - `adjust_dtype` (`boolean`) - determine whether data type of vector should\n be adjusted if it doesn't match default representation. **Default**: ``True``\n\n .. note:: numpy `datetime64` and `timedelta64` arrays are not converted\n to raw temporal vectors if `adjust_dtype` is ``True``\n\n :Kwargs:\n - `qtype` (`integer` or `None`) - qtype indicator\n\n :returns: `QList` or `QTemporalList` - array representation of the list\n\n :raises: `ValueError` \n '''\n if type(array) in (list, tuple):\n if meta and 'qtype' in meta and meta['qtype'] == QGENERAL_LIST:\n # force shape and dtype for generic lists\n tarray = numpy.ndarray(shape = len(array), dtype = numpy.dtype('O'))\n for i in range(len(array)):\n tarray[i] = array[i]\n array = tarray\n else:\n array = numpy.array(array)\n\n if not isinstance(array, numpy.ndarray):\n raise ValueError('array parameter is expected to be of type: numpy.ndarray, list or tuple. Was: %s' % type(array))\n\n qtype = None\n is_numpy_temporal = array.dtype.type in (numpy.datetime64, numpy.timedelta64)\n\n if meta and 'qtype' in meta:\n qtype = -abs(meta['qtype'])\n dtype = PY_TYPE[qtype]\n if adjust_dtype and dtype != array.dtype and not is_numpy_temporal:\n array = array.astype(dtype = dtype)\n\n qtype = get_list_qtype(array) if qtype is None else qtype\n meta['qtype'] = qtype\n\n is_raw_temporal = meta['qtype'] in [QMONTH, QDATE, QDATETIME, QMINUTE, QSECOND, QTIME, QTIMESTAMP, QTIMESPAN] \\\n and not is_numpy_temporal\n vector = array.view(QList) if not is_raw_temporal else array.view(QTemporalList)\n vector._meta_init(**meta)\n return vector\n"
] | #
# Copyright (c) 2011-2014 Exxeleron GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from qpython.qtype import * # @UnusedWildImport
from qpython import MetaData
from qpython.qtemporal import qtemporal, from_raw_qtemporal, to_raw_qtemporal
class QList(numpy.ndarray):
'''An array object represents a q vector.'''
def _meta_init(self, **meta):
'''Initialises the meta-information.'''
self.meta = MetaData(**meta)
def __eq__(self, other):
return numpy.array_equal(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.dtype, self.meta.qtype, self.tostring()))
def __array_finalize__(self, obj):
self.meta = MetaData() if obj is None else getattr(obj, 'meta', MetaData())
class QTemporalList(QList):
'''An array object represents a q vector of datetime objects.'''
def __getitem__(self, idx):
return qtemporal(from_raw_qtemporal(numpy.ndarray.__getitem__(self, idx), -abs(self.meta.qtype)), qtype = -abs(self.meta.qtype))
def __setitem__(self, idx, value):
numpy.ndarray.__setitem__(self, idx, to_raw_qtemporal(value, - -abs(self.meta.qtype)))
def raw(self, idx):
'''Gets the raw representation of the datetime object at the specified
index.
>>> t = qlist(numpy.array([366, 121, qnull(QDATE)]), qtype=QDATE_LIST)
>>> print(t[0])
2001-01-01 [metadata(qtype=-14)]
>>> print(t.raw(0))
366
:Parameters:
- `idx` (`integer`) - array index of the datetime object to be retrieved
:returns: raw representation of the datetime object
'''
return numpy.ndarray.__getitem__(self, idx)
def get_list_qtype(array):
'''Finds out a corresponding qtype for a specified `QList`/`numpy.ndarray`
instance.
:Parameters:
- `array` (`QList` or `numpy.ndarray`) - array to be checked
:returns: `integer` - qtype matching the specified array object
'''
if not isinstance(array, numpy.ndarray):
raise ValueError('array parameter is expected to be of type: numpy.ndarray, got: %s' % type(array))
if isinstance(array, QList):
return -abs(array.meta.qtype)
qtype = None
if str(array.dtype) in ('|S1', '<U1', '>U1', '|U1') :
qtype = QCHAR
if qtype is None:
qtype = Q_TYPE.get(array.dtype.type, None)
if qtype is None and array.dtype.type in (numpy.datetime64, numpy.timedelta64):
qtype = TEMPORAL_PY_TYPE.get(str(array.dtype), None)
if qtype is None:
# determinate type based on first element of the numpy array
qtype = Q_TYPE.get(type(array[0]), QGENERAL_LIST)
return qtype
def qlist(array, adjust_dtype = True, **meta):
'''Converts an input array to q vector and enriches object instance with
meta data.
Returns a :class:`.QList` instance for non-datetime vectors. For datetime
vectors :class:`.QTemporalList` is returned instead.
If parameter `adjust_dtype` is `True` and q type retrieved via
:func:`.get_list_qtype` doesn't match one provided as a `qtype` parameter
guessed q type, underlying numpy.array is converted to correct data type.
`qPython` internally represents ``(0x01;0x02;0xff)`` q list as:
``<class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]``.
This object can be created by calling the :func:`.qlist` with following
arguments:
- `byte numpy.array`:
>>> v = qlist(numpy.array([0x01, 0x02, 0xff], dtype=numpy.byte))
>>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))
<class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]
- `int32 numpy.array` with explicit conversion to `QBYTE_LIST`:
>>> v = qlist(numpy.array([1, 2, -1]), qtype = QBYTE_LIST)
>>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))
<class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]
- plain Python `integer` list with explicit conversion to `QBYTE_LIST`:
>>> v = qlist([1, 2, -1], qtype = QBYTE_LIST)
>>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))
<class 'qpython.qcollection.QList'> dtype: int8 qtype: -4: [ 1 2 -1]
- numpy datetime64 array with implicit conversion to `QDATE_LIST`:
>>> v = qlist(numpy.array([numpy.datetime64('2001-01-01'), numpy.datetime64('2000-05-01'), numpy.datetime64('NaT')], dtype='datetime64[D]'))
>>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))
<class 'qpython.qcollection.QList'> dtype: datetime64[D] qtype: -14: ['2001-01-01' '2000-05-01' 'NaT']
- numpy datetime64 array with explicit conversion to `QDATE_LIST`:
>>> v = qlist(numpy.array([numpy.datetime64('2001-01-01'), numpy.datetime64('2000-05-01'), numpy.datetime64('NaT')], dtype='datetime64[D]'), qtype = QDATE_LIST)
>>> print('%s dtype: %s qtype: %d: %s' % (type(v), v.dtype, v.meta.qtype, v))
<class 'qpython.qcollection.QList'> dtype: datetime64[D] qtype: -14: ['2001-01-01' '2000-05-01' 'NaT']
:Parameters:
- `array` (`tuple`, `list`, `numpy.array`) - input array to be converted
- `adjust_dtype` (`boolean`) - determine whether data type of vector should
be adjusted if it doesn't match default representation. **Default**: ``True``
.. note:: numpy `datetime64` and `timedelta64` arrays are not converted
to raw temporal vectors if `adjust_dtype` is ``True``
:Kwargs:
- `qtype` (`integer` or `None`) - qtype indicator
:returns: `QList` or `QTemporalList` - array representation of the list
:raises: `ValueError`
'''
if type(array) in (list, tuple):
if meta and 'qtype' in meta and meta['qtype'] == QGENERAL_LIST:
# force shape and dtype for generic lists
tarray = numpy.ndarray(shape = len(array), dtype = numpy.dtype('O'))
for i in range(len(array)):
tarray[i] = array[i]
array = tarray
else:
array = numpy.array(array)
if not isinstance(array, numpy.ndarray):
raise ValueError('array parameter is expected to be of type: numpy.ndarray, list or tuple. Was: %s' % type(array))
qtype = None
is_numpy_temporal = array.dtype.type in (numpy.datetime64, numpy.timedelta64)
if meta and 'qtype' in meta:
qtype = -abs(meta['qtype'])
dtype = PY_TYPE[qtype]
if adjust_dtype and dtype != array.dtype and not is_numpy_temporal:
array = array.astype(dtype = dtype)
qtype = get_list_qtype(array) if qtype is None else qtype
meta['qtype'] = qtype
is_raw_temporal = meta['qtype'] in [QMONTH, QDATE, QDATETIME, QMINUTE, QSECOND, QTIME, QTIMESTAMP, QTIMESPAN] \
and not is_numpy_temporal
vector = array.view(QList) if not is_raw_temporal else array.view(QTemporalList)
vector._meta_init(**meta)
return vector
class QDictionary(object):
'''Represents a q dictionary.
Dictionary examples:
>>> # q: 1 2!`abc`cdefgh
>>> print(QDictionary(qlist(numpy.array([1, 2], dtype=numpy.int64), qtype=QLONG_LIST),
... qlist(numpy.array(['abc', 'cdefgh']), qtype = QSYMBOL_LIST)))
[1 2]!['abc' 'cdefgh']
>>> # q: (1;2h;3.234;"4")!(`one;2 3;"456";(7;8 9))
>>> print(QDictionary([numpy.int64(1), numpy.int16(2), numpy.float64(3.234), '4'],
... [numpy.string_('one'), qlist(numpy.array([2, 3]), qtype=QLONG_LIST), '456', [numpy.int64(7), qlist(numpy.array([8, 9]), qtype=QLONG_LIST)]]))
[1, 2, 3.234, '4']!['one', QList([2, 3], dtype=int64), '456', [7, QList([8, 9], dtype=int64)]]
:Parameters:
- `keys` (`QList`, `tuple` or `list`) - dictionary keys
- `values` (`QList`, `QTable`, `tuple` or `list`) - dictionary values
'''
def __init__(self, keys, values):
if not isinstance(keys, (QList, tuple, list, numpy.ndarray)):
raise ValueError('%s expects keys to be of type: QList, tuple or list. Actual type: %s' % (self.__class__.__name__, type(keys)))
if not isinstance(values, (QTable, QList, tuple, list, numpy.ndarray)):
raise ValueError('%s expects values to be of type: QTable, QList, tuple or list. Actual type: %s' % (self.__class__.__name__, type(values)))
if len(keys) != len(values):
raise ValueError('Number of keys: %d doesn`t match number of values: %d' % (len(keys), len(values)))
self.keys = keys
self.values = values
def __str__(self, *args, **kwargs):
return '%s!%s' % (self.keys, self.values)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
idx = 0
for key in self.keys:
if key != other.keys[idx] or self.values[idx] != other.values[idx]:
return False
idx += 1
return True
def __ne__(self, other):
return not self.__eq__(other)
def _find_key_(self, key):
idx = 0
for k in self.keys:
if key == k:
return idx
idx += 1
raise KeyError('QDictionary doesn`t contain key: %s' % key)
def __getitem__(self, key):
return self.values[self._find_key_(key)]
def __setitem__(self, key, value):
self.values[self._find_key_(key)] = value
def __len__(self):
return len(self.keys)
def __iter__(self):
return iter(self.keys)
def items(self):
'''Return a copy of the dictionary's list of ``(key, value)`` pairs.'''
return [(self.keys[x], self.values[x]) for x in range(len(self.keys))]
def iteritems(self):
'''Return an iterator over the dictionary's ``(key, value)`` pairs.'''
for x in range(len(self.keys)):
yield (self.keys[x], self.values[x])
def iterkeys(self):
'''Return an iterator over the dictionary's keys.'''
return iter(self.keys)
def itervalues(self):
'''Return an iterator over the dictionary's values.'''
return iter(self.values)
class QTable(numpy.recarray):
'''Represents a q table.
Internal table data is stored as a `numpy.array` separately for each column.
This mimics the internal representation of tables in q.
'''
def _meta_init(self, **meta):
self.meta = MetaData(**meta)
def __eq__(self, other):
return numpy.array_equal(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def __array_finalize__(self, obj):
self.meta = MetaData() if obj is None else getattr(obj, 'meta', MetaData())
def qtable(columns, data, **meta):
'''Creates a QTable out of given column names and data, and initialises the
meta data.
:class:`.QTable` is represented internally by `numpy.core.records.recarray`.
Data for each column is converted to :class:`.QList` via :func:`.qlist`
function. If qtype indicator is defined for a column, this information
is used for explicit array conversion.
Table examples:
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(qlist(numpy.array(['name', 'iq']), qtype = QSYMBOL_LIST),
... [qlist(numpy.array(['Dent', 'Beeblebrox', 'Prefect'])),
... qlist(numpy.array([98, 42, 126], dtype=numpy.int64))])
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(qlist(numpy.array(['name', 'iq']), qtype = QSYMBOL_LIST),
... [qlist(['Dent', 'Beeblebrox', 'Prefect'], qtype = QSYMBOL_LIST),
... qlist([98, 42, 126], qtype = QLONG_LIST)])
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq!(`Dent`Beeblebrox`Prefect;98 42 126)
>>> t = qtable(['name', 'iq'],
... [['Dent', 'Beeblebrox', 'Prefect'],
... [98, 42, 126]],
... name = QSYMBOL, iq = QLONG)
>>> print('%s dtype: %s meta: %s: %s' % (type(t), t.dtype, t.meta, t))
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8')] meta: metadata(iq=-7, qtype=98, name=-11): [('Dent', 98L) ('Beeblebrox', 42L) ('Prefect', 126L)]
>>> # q: flip `name`iq`fullname!(`Dent`Beeblebrox`Prefect;98 42 126;("Arthur Dent"; "Zaphod Beeblebrox"; "Ford Prefect"))
>>> t = qtable(('name', 'iq', 'fullname'),
... [qlist(numpy.array(['Dent', 'Beeblebrox', 'Prefect']), qtype = QSYMBOL_LIST),
... qlist(numpy.array([98, 42, 126]), qtype = QLONG_LIST),
... qlist(numpy.array(["Arthur Dent", "Zaphod Beeblebrox", "Ford Prefect"]), qtype = QSTRING_LIST)])
<class 'qpython.qcollection.QTable'> dtype: [('name', 'S10'), ('iq', '<i8'), ('fullname', 'O')] meta: metadata(iq=-7, fullname=0, qtype=98, name=-11): [('Dent', 98L, 'Arthur Dent') ('Beeblebrox', 42L, 'Zaphod Beeblebrox') ('Prefect', 126L, 'Ford Prefect')]
:Parameters:
- `columns` (list of `strings`) - table column names
- `data` (list of lists) - list of columns containing table data
:Kwargs:
- `meta` (`integer`) - qtype for particular column
:returns: `QTable` - representation of q table
:raises: `ValueError`
'''
if len(columns) != len(data):
raise ValueError('Number of columns doesn`t match the data layout. %s vs %s' % (len(columns), len(data)))
meta = {} if not meta else meta
if not 'qtype' in meta:
meta['qtype'] = QTABLE
dtypes = []
for i in range(len(columns)):
column_name = columns[i] if isinstance(columns[i], str) else columns[i].decode("utf-8")
if isinstance(data[i], str):
# convert character list (represented as string) to numpy representation
data[i] = numpy.array(list(data[i]), dtype = numpy.string_)
if isinstance(data[i], bytes):
data[i] = numpy.array(list(data[i].decode()), dtype = numpy.string_)
if column_name in meta:
data[i] = qlist(data[i], qtype = meta[column_name])
elif not isinstance(data[i], QList):
if type(data[i]) in (list, tuple):
data[i] = qlist(data[i], qtype = QGENERAL_LIST)
else:
data[i] = qlist(data[i])
meta[column_name] = data[i].meta.qtype
dtypes.append((column_name, data[i].dtype))
table = numpy.core.records.fromarrays(data, dtype = dtypes)
table = table.view(QTable)
table._meta_init(**meta)
return table
class QKeyedTable(object):
'''Represents a q keyed table.
:class:`.QKeyedTable` is built with two :class:`.QTable`\s, one representing
keys and the other values.
Keyed tables example:
>>> # q: ([eid:1001 1002 1003] pos:`d1`d2`d3;dates:(2001.01.01;2000.05.01;0Nd))
>>> t = QKeyedTable(qtable(['eid'],
... [qlist(numpy.array([1001, 1002, 1003]), qtype = QLONG_LIST)]),
... qtable(['pos', 'dates'],
... [qlist(numpy.array(['d1', 'd2', 'd3']), qtype = QSYMBOL_LIST),
... qlist(numpy.array([366, 121, qnull(QDATE)]), qtype = QDATE_LIST)]))
>>> print('%s: %s' % (type(t), t))
>>> print('%s dtype: %s meta: %s' % (type(t.keys), t.keys.dtype, t.keys.meta))
>>> print('%s dtype: %s meta: %s' % (type(t.values), t.values.dtype, t.values.meta))
<class 'qpython.qcollection.QKeyedTable'>: [(1001L,) (1002L,) (1003L,)]![('d1', 366) ('d2', 121) ('d3', -2147483648)]
<class 'qpython.qcollection.QTable'> dtype: [('eid', '<i8')] meta: metadata(qtype=98, eid=-7)
<class 'qpython.qcollection.QTable'> dtype: [('pos', 'S2'), ('dates', '<i4')] meta: metadata(dates=-14, qtype=98, pos=-11)
:Parameters:
- `keys` (`QTable`) - table keys
- `values` (`QTable`) - table values
:raises: `ValueError`
'''
def __init__(self, keys, values):
if not isinstance(keys, QTable):
raise ValueError('Keys array is required to be of type: QTable')
if not isinstance(values, QTable):
raise ValueError('Values array is required to be of type: QTable')
if len(keys) != len(values):
raise ValueError('Keys and value arrays cannot have different length')
self.keys = keys
self.values = values
def __str__(self, *args, **kwargs):
return '%s!%s' % (self.keys, self.values)
def __eq__(self, other):
return isinstance(other, QKeyedTable) and numpy.array_equal(self.keys, other.keys) and numpy.array_equal(self.values, other.values)
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self.keys)
def __iter__(self):
return iter(self.keys)
def items(self):
'''Return a copy of the keyed table's list of ``(key, value)`` pairs.'''
return [(self.keys[x], self.values[x]) for x in range(len(self.keys))]
def iteritems(self):
'''Return an iterator over the keyed table's ``(key, value)`` pairs.'''
for x in range(len(self.keys)):
yield (self.keys[x], self.values[x])
def iterkeys(self):
'''Return an iterator over the keyed table's keys.'''
return iter(self.keys)
def itervalues(self):
'''Return an iterator over the keyed table's values.'''
return iter(self.values)
|
exxeleron/qPython | qpython/qcollection.py | QDictionary.items | python | def items(self):
'''Return a copy of the dictionary's list of ``(key, value)`` pairs.'''
return [(self.keys[x], self.values[x]) for x in range(len(self.keys))] | Return a copy of the dictionary's list of ``(key, value)`` pairs. | train | https://github.com/exxeleron/qPython/blob/7e64a28b1e8814a8d6b9217ce79bb8de546e62f3/qpython/qcollection.py#L269-L271 | null | class QDictionary(object):
'''Represents a q dictionary.
Dictionary examples:
>>> # q: 1 2!`abc`cdefgh
>>> print(QDictionary(qlist(numpy.array([1, 2], dtype=numpy.int64), qtype=QLONG_LIST),
... qlist(numpy.array(['abc', 'cdefgh']), qtype = QSYMBOL_LIST)))
[1 2]!['abc' 'cdefgh']
>>> # q: (1;2h;3.234;"4")!(`one;2 3;"456";(7;8 9))
>>> print(QDictionary([numpy.int64(1), numpy.int16(2), numpy.float64(3.234), '4'],
... [numpy.string_('one'), qlist(numpy.array([2, 3]), qtype=QLONG_LIST), '456', [numpy.int64(7), qlist(numpy.array([8, 9]), qtype=QLONG_LIST)]]))
[1, 2, 3.234, '4']!['one', QList([2, 3], dtype=int64), '456', [7, QList([8, 9], dtype=int64)]]
:Parameters:
- `keys` (`QList`, `tuple` or `list`) - dictionary keys
- `values` (`QList`, `QTable`, `tuple` or `list`) - dictionary values
'''
def __init__(self, keys, values):
if not isinstance(keys, (QList, tuple, list, numpy.ndarray)):
raise ValueError('%s expects keys to be of type: QList, tuple or list. Actual type: %s' % (self.__class__.__name__, type(keys)))
if not isinstance(values, (QTable, QList, tuple, list, numpy.ndarray)):
raise ValueError('%s expects values to be of type: QTable, QList, tuple or list. Actual type: %s' % (self.__class__.__name__, type(values)))
if len(keys) != len(values):
raise ValueError('Number of keys: %d doesn`t match number of values: %d' % (len(keys), len(values)))
self.keys = keys
self.values = values
def __str__(self, *args, **kwargs):
return '%s!%s' % (self.keys, self.values)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
idx = 0
for key in self.keys:
if key != other.keys[idx] or self.values[idx] != other.values[idx]:
return False
idx += 1
return True
def __ne__(self, other):
return not self.__eq__(other)
def _find_key_(self, key):
idx = 0
for k in self.keys:
if key == k:
return idx
idx += 1
raise KeyError('QDictionary doesn`t contain key: %s' % key)
def __getitem__(self, key):
return self.values[self._find_key_(key)]
def __setitem__(self, key, value):
self.values[self._find_key_(key)] = value
def __len__(self):
return len(self.keys)
def __iter__(self):
return iter(self.keys)
def iteritems(self):
'''Return an iterator over the dictionary's ``(key, value)`` pairs.'''
for x in range(len(self.keys)):
yield (self.keys[x], self.values[x])
def iterkeys(self):
'''Return an iterator over the dictionary's keys.'''
return iter(self.keys)
def itervalues(self):
'''Return an iterator over the dictionary's values.'''
return iter(self.values)
|
exxeleron/qPython | qpython/qcollection.py | QDictionary.iteritems | python | def iteritems(self):
'''Return an iterator over the dictionary's ``(key, value)`` pairs.'''
for x in range(len(self.keys)):
yield (self.keys[x], self.values[x]) | Return an iterator over the dictionary's ``(key, value)`` pairs. | train | https://github.com/exxeleron/qPython/blob/7e64a28b1e8814a8d6b9217ce79bb8de546e62f3/qpython/qcollection.py#L273-L276 | null | class QDictionary(object):
'''Represents a q dictionary.
Dictionary examples:
>>> # q: 1 2!`abc`cdefgh
>>> print(QDictionary(qlist(numpy.array([1, 2], dtype=numpy.int64), qtype=QLONG_LIST),
... qlist(numpy.array(['abc', 'cdefgh']), qtype = QSYMBOL_LIST)))
[1 2]!['abc' 'cdefgh']
>>> # q: (1;2h;3.234;"4")!(`one;2 3;"456";(7;8 9))
>>> print(QDictionary([numpy.int64(1), numpy.int16(2), numpy.float64(3.234), '4'],
... [numpy.string_('one'), qlist(numpy.array([2, 3]), qtype=QLONG_LIST), '456', [numpy.int64(7), qlist(numpy.array([8, 9]), qtype=QLONG_LIST)]]))
[1, 2, 3.234, '4']!['one', QList([2, 3], dtype=int64), '456', [7, QList([8, 9], dtype=int64)]]
:Parameters:
- `keys` (`QList`, `tuple` or `list`) - dictionary keys
- `values` (`QList`, `QTable`, `tuple` or `list`) - dictionary values
'''
def __init__(self, keys, values):
if not isinstance(keys, (QList, tuple, list, numpy.ndarray)):
raise ValueError('%s expects keys to be of type: QList, tuple or list. Actual type: %s' % (self.__class__.__name__, type(keys)))
if not isinstance(values, (QTable, QList, tuple, list, numpy.ndarray)):
raise ValueError('%s expects values to be of type: QTable, QList, tuple or list. Actual type: %s' % (self.__class__.__name__, type(values)))
if len(keys) != len(values):
raise ValueError('Number of keys: %d doesn`t match number of values: %d' % (len(keys), len(values)))
self.keys = keys
self.values = values
def __str__(self, *args, **kwargs):
return '%s!%s' % (self.keys, self.values)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
idx = 0
for key in self.keys:
if key != other.keys[idx] or self.values[idx] != other.values[idx]:
return False
idx += 1
return True
def __ne__(self, other):
return not self.__eq__(other)
def _find_key_(self, key):
idx = 0
for k in self.keys:
if key == k:
return idx
idx += 1
raise KeyError('QDictionary doesn`t contain key: %s' % key)
def __getitem__(self, key):
return self.values[self._find_key_(key)]
def __setitem__(self, key, value):
self.values[self._find_key_(key)] = value
def __len__(self):
return len(self.keys)
def __iter__(self):
return iter(self.keys)
def items(self):
'''Return a copy of the dictionary's list of ``(key, value)`` pairs.'''
return [(self.keys[x], self.values[x]) for x in range(len(self.keys))]
def iterkeys(self):
'''Return an iterator over the dictionary's keys.'''
return iter(self.keys)
def itervalues(self):
'''Return an iterator over the dictionary's values.'''
return iter(self.values)
|
masfaraud/BMSpy | bms/core.py | Load | python | def Load(file):
with open(file, 'rb') as file:
model = dill.load(file)
return model | Loads a model from specified file | train | https://github.com/masfaraud/BMSpy/blob/5ac6b9539c1141dd955560afb532e6b915b77bdc/bms/core.py#L555-L559 | null | # -*- coding: utf-8 -*-
"""
Core of BMS. All content of this file is imported by bms, and is therefore in bms
This file defines the base of BMS.
"""
import numpy as np
#import numpy.random
import matplotlib.pyplot as plt
#import math
import networkx as nx
import dill
from scipy.optimize import fsolve, root, minimize
#import cma
class Variable:
""" Defines a variable
:param names: Defines full name and short name.
If names is a string the two names will be identical
otherwise names should be a tuple of strings (full_name,short_name)
:param hidden: inner variable to hide in plots if true
"""
def __init__(self, names='variable', initial_values=[0], hidden=False):
if type(names) == str:
self.name = names
self.short_name = names
else:
try:
self.short_name = names[1]
self.name = names[0]
except:
raise TypeError
self.initial_values = initial_values
self._values = np.array([])
self.max_order = 0
self.hidden = hidden
def _InitValues(self, ns, ts, max_order):
self.max_order = max_order
self._values = self.initial_values[0]*np.ones(ns+max_order+1)
self._ForwardValues()
def _ForwardValues(self):
pass
def _get_values(self):
return self._values[self.max_order:]
values = property(_get_values)
class Signal(Variable):
""" Abstract class of signal """
def __init__(self, names):
if type(names) == str:
self.name = names
self.short_name = names
else:
try:
self.short_name = names[1]
self.name = names[0]
except:
raise TypeError
self._values = np.array([])
self.max_order = 0
self.hidden = False
def _InitValues(self, ns, ts, max_order):
self.max_order = max_order
self._values = np.zeros(ns+max_order+1)
for i in range(ns+1):
self._values[i+max_order] = self.function(i*ts)
self._ForwardValues()
self.initial_values = [self._values[0]]
def _ForwardValues(self):
"""
Implementation for problems with derivative conditions on variables
"""
pass
class Block:
""" Abstract class of block: this class should not be instanciate directly
"""
def __init__(self, inputs, outputs, max_input_order, max_output_order):
self.inputs = []
self.outputs = []
self.n_inputs = len(inputs)
self.n_outputs = len(outputs)
for variable in inputs:
self._AddInput(variable)
for variable in outputs:
self._AddOutput(variable)
# self.input_orders=
# self.output_orders=output_orders
self.max_input_order = max_input_order
self.max_output_order = max_output_order
self.max_order = max(self.max_input_order, self.max_output_order)
def _AddInput(self, variable):
"""
Add one more variable as an input of the block
:param variable: variable (or signal as it is also a variable)
"""
if isinstance(variable, Variable):
self.inputs.append(variable)
else:
print('Error: ', variable.name, variable,
' given is not a variable')
raise TypeError
def _AddOutput(self, variable):
"""
Add one more variable as an output of the block
:param variable: variable (or signal as it is also a variable)
"""
if isinstance(variable, Variable):
self.outputs.append(variable)
else:
print(variable)
raise TypeError
def InputValues(self, it, nsteps=None):
"""
Returns the input values at a given iteration for solving the block outputs
"""
if nsteps == None:
nsteps = self.max_input_order
# print(self,it)
# Provides values in inputs values for computing at iteration it
I = np.zeros((self.n_inputs, nsteps))
for iv, variable in enumerate(self.inputs):
# print(it-self.max_input_order+1,it+1)
# print(variable._values[it-self.max_input_order+1:it+1])
I[iv, :] = variable._values[it-nsteps+1:it+1]
return I
def OutputValues(self, it, nsteps=None):
# Provides values in inputs values for computing at iteration it
if nsteps == None:
nsteps = self.max_output_order
O = np.zeros((self.n_outputs, nsteps))
for iv, variable in enumerate(self.outputs):
O[iv, :] = variable._values[it-nsteps:it]
return O
def Solve(self, it, ts):
step_outputs = self.Evaluate(it, ts)
for i, step_output in enumerate(step_outputs):
self.outputs[i]._values[it] = step_output
class ModelError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return 'Model Error: '+self.message
class DynamicSystem:
"""
Defines a dynamic system that can simulate itself
:param te: time of simulation's end
:param ns: number of steps
:param blocks: (optional) list of blocks defining the model
"""
def __init__(self, te, ns, blocks=[]):
self.te = te
self.ns = ns
self.ts = self.te/float(self.ns) # time step
self.t = np.linspace(0, self.te, num=ns+1) # Time vector
self.blocks = []
self.variables = []
self.signals = []
self.max_order = 0
for block in blocks:
self.AddBlock(block)
self._utd_graph = False # True if graph is up-to-date
def AddBlock(self, block):
"""
Add the given block to the model and also its input/output variables
"""
if isinstance(block, Block):
self.blocks.append(block)
self.max_order = max(self.max_order, block.max_input_order-1)
self.max_order = max(self.max_order, block.max_output_order)
for variable in block.inputs+block.outputs:
self._AddVariable(variable)
else:
print(block)
raise TypeError
self._utd_graph = False
def _AddVariable(self, variable):
"""
Add a variable to the model. Should not be used by end-user
"""
if isinstance(variable, Signal):
if not variable in self.signals:
self.signals.append(variable)
elif isinstance(variable, Variable):
if not variable in self.variables:
self.variables.append(variable)
else:
raise TypeError
self._utd_graph = False
def _get_Graph(self):
if not self._utd_graph:
# Generate graph
self._graph = nx.DiGraph()
for variable in self.variables:
self._graph.add_node(variable, bipartite=0)
for block in self.blocks:
self._graph.add_node(block, bipartite=1)
for variable in block.inputs:
self._graph.add_edge(variable, block)
for variable in block.outputs:
self._graph.add_edge(block, variable)
self._utd_graph = True
return self._graph
graph = property(_get_Graph)
def _ResolutionOrder(self, variables_to_solve):
"""
return a list of lists of tuples (block,output,ndof) to be solved
"""
# Gp=nx.DiGraph()
#
# for i in range(nvar):
# Gp.add_node('v'+str(i),bipartite=0)
#
# for i in range(neq):
# Gp.add_node('e'+str(i),bipartite=1)
# for j in range(nvar):
# if Mo[i,j]==1:
# Gp.add_edge('e'+str(i),'v'+str(j))
Gp = nx.DiGraph()
for variable in self.variables:
Gp.add_node(variable, bipartite=0)
for block in self.blocks:
for iov, output_variable in enumerate(block.outputs):
Gp.add_node((block, iov), bipartite=1)
Gp.add_edge((block, iov), output_variable)
Gp.add_edge(output_variable, (block, iov))
for input_variable in block.inputs:
if not isinstance(input_variable, Signal):
Gp.add_edge(input_variable, (block, iov))
# for n1,n2 in M.items():
# Gp.add_edge(n1,n2)
sinks = []
sources = []
for node in Gp.nodes():
if Gp.out_degree(node) == 0:
sinks.append(node)
elif Gp.in_degree(node) == 0:
sources.append(node)
G2 = sources[:]
for node in sources:
for node2 in nx.descendants(Gp, node):
if node2 not in G2:
G2.append(node2)
if G2 != []:
print(G2)
raise ModelError('Overconstrained variables')
G3 = sinks[:]
for node in sinks:
for node2 in nx.ancestors(Gp, node):
if node2 not in G3:
G3.append(node2)
if G3 != []:
raise ModelError('Underconstrained variables')
# vars_resolvables=[]
# for var in vars_resoudre:
# if not 'v'+str(var) in G2+G3:
# vars_resolvables.append(var)
# G1=Gp.copy()
# G1.remove_nodes_from(G2+G3)
#
# M1=nx.bipartite.maximum_matching(G1)
# G1p=nx.DiGraph()
#
# G1p.add_nodes_from(G1.nodes())
# for e in G1.edges():
# # equation vers variable
# if e[0][0]=='v':
# G1p.add_edge(e[0],e[1])
# else:
# G1p.add_edge(e[1],e[0])
# # print(len(M))
# for n1,n2 in M1.items():
# # print(n1,n2)
# if n1[0]=='e':
# G1p.add_edge(n1,n2)
# else:
# G1p.add_edge(n2,n1)
scc = list(nx.strongly_connected_components(Gp))
# pos=nx.spring_layout(G1p)
# plt.figure()
# nx.draw(G1p,pos)
# nx.draw_networkx_labels(G1p,pos)
# print(scc)
if scc != []:
C = nx.condensation(Gp, scc)
isc_vars = []
for isc, sc in enumerate(scc):
for var in variables_to_solve:
if var in sc:
isc_vars.append(isc)
break
ancestors_vars = isc_vars[:]
for isc_var in isc_vars:
for ancetre in nx.ancestors(C, isc_var):
if ancetre not in ancestors_vars:
ancestors_vars.append(ancetre)
order_sc = [sc for sc in nx.topological_sort(
C) if sc in ancestors_vars]
order_ev = []
for isc in order_sc:
# liste d'équations et de variables triées pour être séparées
evs = list(scc[isc])
# print(evs)
# levs=int(len(evs)/2)
eqs = []
var = []
for element in evs:
if type(element) == tuple:
eqs.append(element)
else:
var.append(element)
order_ev.append((len(eqs), eqs, var))
return order_ev
raise ModelError
def Simulate(self, variables_to_solve=None):
if variables_to_solve == None:
variables_to_solve = [
variable for variable in self.variables if not variable.hidden]
order = self._ResolutionOrder(variables_to_solve)
# Initialisation of variables values
for variable in self.variables+self.signals:
variable._InitValues(self.ns, self.ts, self.max_order)
# ==============================================================================
# Enhancement to do: defining functions out of loop (copy args)s
# ==============================================================================
# print(order)
residue = []
for it, t in enumerate(self.t[1:]):
for neqs, equations, variables in order:
if neqs == 1:
equations[0][0].Solve(it+self.max_order+1, self.ts)
else:
# x0=np.zeros(neqs)
x0 = [equations[i][0].outputs[equations[i][1]]._values[it +
self.max_order] for i in range(len(equations))]
# print('===========')
def r(x, equations=equations[:]):
# Writing variables values proposed by optimizer
for i, xi in enumerate(x):
equations[i][0].outputs[equations[i][1]
]._values[it+self.max_order+1] = xi
# Computing regrets
r = []
# s=0
for ieq, (block, neq) in enumerate(equations):
# print(block,it)
# print(block.Evaluate(it+self.max_order+1,self.ts).shape)
# print(block.Evaluate(it+self.max_order+1,self.ts),block)
r.append(x[ieq]-block.Evaluate(it +
self.max_order+1, self.ts)[neq])
# print(block)
# print('xproposed:',x[ieq])
# print('block eval',block.Evaluate(it+self.max_order+1,self.ts)[neq])
# print('value', x[ieq]-block.Evaluate(it+self.max_order+1,self.ts)[neq])
# s+=abs(x[ieq]-block.Evaluate(it+self.max_order+1,self.ts)[neq])
# print(x[ieq],block.Evaluate(it+self.max_order+1,self.ts)[neq])
return r
def f(x, equations=equations[:]):
# Writing variables values proposed by optimizer
for i, xi in enumerate(x):
equations[i][0].outputs[equations[i][1]
]._values[it+self.max_order+1] = xi
# Computing regrets
# r=[]
s = 0
for ieq, (block, neq) in enumerate(equations):
# print(block,it)
# print(block.Evaluate(it+self.max_order+1,self.ts).shape)
# print(block.Evaluate(it+self.max_order+1,self.ts),block)
# r.append(x[ieq]-block.Evaluate(it+self.max_order+1,self.ts)[neq])
# print(block)
s += abs(x[ieq]-block.Evaluate(it +
self.max_order+1, self.ts)[neq])
# print(x[ieq],block.Evaluate(it+self.max_order+1,self.ts)[neq])
# return r
# print(s)
return s
x, d, i, m = fsolve(r, x0, full_output=True)
# res=root(f,x0,method='anderson')
# x=res.x
# res=minimize(f,x0,method='powell')
# if res.fun>1e-3:
# x0=[equations[i][0].outputs[equations[i][1]]._values[it+self.max_order] for i in range(len(equations))]
# x0+=np.random.random(len(equations))
# print('restart')
# res=minimize(f,x0,method='powell')
#
# residue.append(f(res.x))
# print(r(x),i)
# print(f(res.x),res.fun)
# f(x)
# print(r)
# if i!=1:
# print(equations)
# print(i,r(x))
# options={'tolfun':1e-3,'verbose':-9,'ftarget':1e-3}
# res=cma.fmin(f,x0,1,options=options)
# print(f(res[0]),r(res[0]))
# print(equations)
#
# print(m)
# if res.fun>1e-3:
# print('fail',res.fun)
# options={'tolfun':1e-3,'verbose':-9}
# res=cma.fmin(f,x0,1,options=options)
# else:
# print('ok')
# return residue
def VariablesValues(self, variables, t):
"""
Returns the value of given variables at time t.
Linear interpolation is performed between two time steps.
:param variables: one variable or a list of variables
:param t: time of evaluation
"""
# TODO: put interpolation in variables
if (t < self.te) | (t > 0):
i = t//self.ts # time step
ti = self.ts*i
if type(variables) == list:
values = []
for variable in variables:
# interpolation
values.append(
variables.values[i]*((ti-t)/self.ts+1)+variables.values[i+1]*(t-ti)/self.ts)
return values
else:
# interpolation
return variables.values[i]*((ti-t)/self.ts+1)+variables.values[i+1]*(t-ti)/self.ts
else:
raise ValueError
def PlotVariables(self, subplots_variables=None):
if subplots_variables == None:
subplots_variables = [self.signals+self.variables]
subplots_variables = [
[variable for variable in self.signals+self.variables if not variable.hidden]]
# plt.figure()
fig, axs = plt.subplots(len(subplots_variables), sharex=True)
if len(subplots_variables) == 1:
axs = [axs]
for isub, subplot in enumerate(subplots_variables):
legend = []
for variable in subplot:
axs[isub].plot(self.t, variable.values)
legend.append(variable.name)
axs[isub].legend(legend, loc='best')
axs[isub].margins(0.08)
axs[isub].grid()
plt.xlabel('Time')
plt.show()
def DrawModel(self):
from .interface import ModelDrawer
ModelDrawer(self)
def Save(self, name_file):
"""
name_file: name of the file without extension.
The extension .bms is added by function
"""
with open(name_file+'.bms', 'wb') as file:
model = dill.dump(self, file)
def __getstate__(self):
dic = self.__dict__.copy()
return dic
def __setstate__(self, dic):
self.__dict__ = dic
class PhysicalNode:
"""
Abstract class
"""
def __init__(self, cl_solves_potential, cl_solves_fluxes, node_name, potential_variable_name, flux_variable_name):
self.cl_solves_potential = cl_solves_potential
self.cl_solves_fluxes = cl_solves_fluxes
self.name = node_name
self.potential_variable_name = potential_variable_name
self.flux_variable_name = flux_variable_name
self.variable = Variable(potential_variable_name+' '+node_name)
class PhysicalBlock:
"""
Abstract class to inherit when coding a physical block
"""
def __init__(self, physical_nodes, nodes_with_fluxes, occurence_matrix, commands, name):
self.physical_nodes = physical_nodes
self.name = name
self.nodes_with_fluxes = nodes_with_fluxes
self.occurence_matrix = occurence_matrix
self.commands = commands
self.variables = [Variable(physical_nodes[inode].flux_variable_name+' from ' +
physical_nodes[inode].name+' to '+self.name) for inode in nodes_with_fluxes]
class PhysicalSystem:
"""
Defines a physical system
"""
def __init__(self, te, ns, physical_blocks, command_blocks):
self.te = te
self.ns = ns
self.physical_blocks = []
self.physical_nodes = []
self.variables = []
self.command_blocks = []
for block in physical_blocks:
self.AddPhysicalBlock(block)
for block in command_blocks:
self.AddCommandBlock(block)
self._utd_ds = False
def AddPhysicalBlock(self, block):
if isinstance(block, PhysicalBlock):
self.physical_blocks.append(block)
for node in block.physical_nodes:
self._AddPhysicalNode(node)
else:
raise TypeError
self._utd_ds = False
def _AddPhysicalNode(self, node):
if isinstance(node, PhysicalNode):
if not node in self.physical_nodes:
self.physical_nodes.append(node)
self._utd_ds = False
def AddCommandBlock(self, block):
if isinstance(block, Block):
self.command_blocks.append(block)
for variable in block.inputs+block.outputs:
self._AddVariable(variable)
else:
raise TypeError
self._utd_ds = False
def _AddVariable(self, variable):
if isinstance(variable, Variable):
if not variable in self.variables:
self.variables.append(variable)
self._utd_ds = False
def GenerateDynamicSystem(self):
# from bms.blocks.continuous import WeightedSum
G = nx.Graph()
# variables={}
# Adding node variables
for node in self.physical_nodes:
G.add_node(node.variable, bipartite=0)
for block in self.physical_blocks:
# print(block.variables)
for variable in block.variables:
# add variable realted to connection
G.add_node(node.variable, bipartite=0)
ne, nv = block.occurence_matrix.shape
# print(block,block.occurence_matrix)
# Add equations of blocs
for ie in range(ne):
G.add_node((block, ie), bipartite=1)
for iv in range(nv):
# print(iv)
if block.occurence_matrix[ie, iv] == 1:
if iv % 2 == 0:
G.add_edge(
(block, ie), block.physical_nodes[iv//2].variable)
else:
G.add_edge((block, ie), block.variables[iv//2])
# Adding equation of physical nodes: conservative law of node from its occurence matrix
# Restricted to nodes to which are brought fluxes
for node in self.physical_nodes:
# linking conservative equation of flux to potential if
if node.cl_solves_potential:
G.add_edge(node, node.variable)
if node.cl_solves_fluxes:
# Linking fluxes of node
G.add_node(node, bipartite=1)
for block in self.physical_blocks:
for inb in block.nodes_with_fluxes:
# print(block,block.physical_nodes[inb].name)
node_block = block.physical_nodes[inb]
if node == node_block:
G.add_edge(
node, block.variables[block.nodes_with_fluxes.index(inb)])
# print(node_block,block.variables[inb].name)
# # Draw graph for debug
# pos=nx.spring_layout(G)
# nx.draw(G,pos)
# names={}
# for node in G.nodes():
# if type(node)==tuple:
# names[node]=(node[0].name,node[1])
# else:
# names[node]=node.name
# nx.draw_networkx_labels(G,pos,names)
# plt.show()
G2 = nx.DiGraph()
G2.add_nodes_from(G)
eq_out_var = {}
for e in nx.bipartite.maximum_matching(G).items():
# print(e[0].__class__.__name__)
# eq -> variable
if e[0].__class__.__name__ == 'Variable':
G2.add_edge(e[1], e[0])
eq_out_var[e[1]] = e[0]
else:
G2.add_edge(e[0], e[1])
eq_out_var[e[0]] = e[1]
for e in G.edges():
if e[0].__class__.__name__ == 'Variable':
G2.add_edge(e[0], e[1])
else:
G2.add_edge(e[1], e[0])
# print('@@@@@@@@@@@@@@@@@@@@@@@@')
sinks = []
sources = []
for node in G2.nodes():
if G2.out_degree(node) == 0:
sinks.append(node)
elif G2.in_degree(node) == 0:
sources.append(node)
# print(sinks,sources)
if sinks != []:
print(sinks)
raise ModelError
if sources != []:
print(sources)
raise ModelError
# Model is solvable: it must say to equations of blocks which is their
# output variable
# print(eq_out_var)
model_blocks = []
for block_node, variable in eq_out_var.items():
# print(block_node,variable)
if type(block_node) == tuple:
# Blocks writes an equation
model_blocks.extend(
block_node[0].PartialDynamicSystem(block_node[1], variable))
else:
# Sum of incomming variables at nodes
# searching attached nodes
variables = []
for block in self.physical_blocks:
try:
ibn = block.physical_nodes.index(block_node)
if ibn in block.nodes_with_fluxes:
variable2 = block.variables[ibn]
if variable2 != variable:
variables.append(variable2)
except ValueError:
pass
# model_blocks.append(WeightedSum(variables,variable,[-1]*len(variables)))
model_blocks.extend(
block_node.ConservativeLaw(variables, variable))
# model_blocks.append(Gain(v1,variable,-1))
model_blocks.extend(self.command_blocks)
return DynamicSystem(self.te, self.ns, model_blocks)
def _get_ds(self):
if not self._utd_ds:
self._dynamic_system = self.GenerateDynamicSystem()
self._utd_ds = True
return self._dynamic_system
dynamic_system = property(_get_ds)
def Simulate(self):
self.dynamic_system.Simulate()
|
masfaraud/BMSpy | bms/core.py | Block._AddInput | python | def _AddInput(self, variable):
if isinstance(variable, Variable):
self.inputs.append(variable)
else:
print('Error: ', variable.name, variable,
' given is not a variable')
raise TypeError | Add one more variable as an input of the block
:param variable: variable (or signal as it is also a variable) | train | https://github.com/masfaraud/BMSpy/blob/5ac6b9539c1141dd955560afb532e6b915b77bdc/bms/core.py#L118-L129 | null | class Block:
""" Abstract class of block: this class should not be instanciate directly
"""
def __init__(self, inputs, outputs, max_input_order, max_output_order):
self.inputs = []
self.outputs = []
self.n_inputs = len(inputs)
self.n_outputs = len(outputs)
for variable in inputs:
self._AddInput(variable)
for variable in outputs:
self._AddOutput(variable)
# self.input_orders=
# self.output_orders=output_orders
self.max_input_order = max_input_order
self.max_output_order = max_output_order
self.max_order = max(self.max_input_order, self.max_output_order)
def _AddOutput(self, variable):
"""
Add one more variable as an output of the block
:param variable: variable (or signal as it is also a variable)
"""
if isinstance(variable, Variable):
self.outputs.append(variable)
else:
print(variable)
raise TypeError
def InputValues(self, it, nsteps=None):
"""
Returns the input values at a given iteration for solving the block outputs
"""
if nsteps == None:
nsteps = self.max_input_order
# print(self,it)
# Provides values in inputs values for computing at iteration it
I = np.zeros((self.n_inputs, nsteps))
for iv, variable in enumerate(self.inputs):
# print(it-self.max_input_order+1,it+1)
# print(variable._values[it-self.max_input_order+1:it+1])
I[iv, :] = variable._values[it-nsteps+1:it+1]
return I
def OutputValues(self, it, nsteps=None):
# Provides values in inputs values for computing at iteration it
if nsteps == None:
nsteps = self.max_output_order
O = np.zeros((self.n_outputs, nsteps))
for iv, variable in enumerate(self.outputs):
O[iv, :] = variable._values[it-nsteps:it]
return O
def Solve(self, it, ts):
step_outputs = self.Evaluate(it, ts)
for i, step_output in enumerate(step_outputs):
self.outputs[i]._values[it] = step_output
|
masfaraud/BMSpy | bms/core.py | Block._AddOutput | python | def _AddOutput(self, variable):
if isinstance(variable, Variable):
self.outputs.append(variable)
else:
print(variable)
raise TypeError | Add one more variable as an output of the block
:param variable: variable (or signal as it is also a variable) | train | https://github.com/masfaraud/BMSpy/blob/5ac6b9539c1141dd955560afb532e6b915b77bdc/bms/core.py#L131-L141 | null | class Block:
""" Abstract class of block: this class should not be instanciate directly
"""
def __init__(self, inputs, outputs, max_input_order, max_output_order):
self.inputs = []
self.outputs = []
self.n_inputs = len(inputs)
self.n_outputs = len(outputs)
for variable in inputs:
self._AddInput(variable)
for variable in outputs:
self._AddOutput(variable)
# self.input_orders=
# self.output_orders=output_orders
self.max_input_order = max_input_order
self.max_output_order = max_output_order
self.max_order = max(self.max_input_order, self.max_output_order)
def _AddInput(self, variable):
"""
Add one more variable as an input of the block
:param variable: variable (or signal as it is also a variable)
"""
if isinstance(variable, Variable):
self.inputs.append(variable)
else:
print('Error: ', variable.name, variable,
' given is not a variable')
raise TypeError
def InputValues(self, it, nsteps=None):
"""
Returns the input values at a given iteration for solving the block outputs
"""
if nsteps == None:
nsteps = self.max_input_order
# print(self,it)
# Provides values in inputs values for computing at iteration it
I = np.zeros((self.n_inputs, nsteps))
for iv, variable in enumerate(self.inputs):
# print(it-self.max_input_order+1,it+1)
# print(variable._values[it-self.max_input_order+1:it+1])
I[iv, :] = variable._values[it-nsteps+1:it+1]
return I
def OutputValues(self, it, nsteps=None):
# Provides values in inputs values for computing at iteration it
if nsteps == None:
nsteps = self.max_output_order
O = np.zeros((self.n_outputs, nsteps))
for iv, variable in enumerate(self.outputs):
O[iv, :] = variable._values[it-nsteps:it]
return O
def Solve(self, it, ts):
step_outputs = self.Evaluate(it, ts)
for i, step_output in enumerate(step_outputs):
self.outputs[i]._values[it] = step_output
|
masfaraud/BMSpy | bms/core.py | Block.InputValues | python | def InputValues(self, it, nsteps=None):
if nsteps == None:
nsteps = self.max_input_order
# print(self,it)
# Provides values in inputs values for computing at iteration it
I = np.zeros((self.n_inputs, nsteps))
for iv, variable in enumerate(self.inputs):
# print(it-self.max_input_order+1,it+1)
# print(variable._values[it-self.max_input_order+1:it+1])
I[iv, :] = variable._values[it-nsteps+1:it+1]
return I | Returns the input values at a given iteration for solving the block outputs | train | https://github.com/masfaraud/BMSpy/blob/5ac6b9539c1141dd955560afb532e6b915b77bdc/bms/core.py#L143-L156 | null | class Block:
""" Abstract class of block: this class should not be instanciate directly
"""
def __init__(self, inputs, outputs, max_input_order, max_output_order):
self.inputs = []
self.outputs = []
self.n_inputs = len(inputs)
self.n_outputs = len(outputs)
for variable in inputs:
self._AddInput(variable)
for variable in outputs:
self._AddOutput(variable)
# self.input_orders=
# self.output_orders=output_orders
self.max_input_order = max_input_order
self.max_output_order = max_output_order
self.max_order = max(self.max_input_order, self.max_output_order)
def _AddInput(self, variable):
"""
Add one more variable as an input of the block
:param variable: variable (or signal as it is also a variable)
"""
if isinstance(variable, Variable):
self.inputs.append(variable)
else:
print('Error: ', variable.name, variable,
' given is not a variable')
raise TypeError
def _AddOutput(self, variable):
"""
Add one more variable as an output of the block
:param variable: variable (or signal as it is also a variable)
"""
if isinstance(variable, Variable):
self.outputs.append(variable)
else:
print(variable)
raise TypeError
def OutputValues(self, it, nsteps=None):
# Provides values in inputs values for computing at iteration it
if nsteps == None:
nsteps = self.max_output_order
O = np.zeros((self.n_outputs, nsteps))
for iv, variable in enumerate(self.outputs):
O[iv, :] = variable._values[it-nsteps:it]
return O
def Solve(self, it, ts):
step_outputs = self.Evaluate(it, ts)
for i, step_output in enumerate(step_outputs):
self.outputs[i]._values[it] = step_output
|
masfaraud/BMSpy | bms/core.py | DynamicSystem.AddBlock | python | def AddBlock(self, block):
if isinstance(block, Block):
self.blocks.append(block)
self.max_order = max(self.max_order, block.max_input_order-1)
self.max_order = max(self.max_order, block.max_output_order)
for variable in block.inputs+block.outputs:
self._AddVariable(variable)
else:
print(block)
raise TypeError
self._utd_graph = False | Add the given block to the model and also its input/output variables | train | https://github.com/masfaraud/BMSpy/blob/5ac6b9539c1141dd955560afb532e6b915b77bdc/bms/core.py#L207-L220 | [
"def _AddVariable(self, variable):\n \"\"\"\n Add a variable to the model. Should not be used by end-user\n \"\"\"\n if isinstance(variable, Signal):\n if not variable in self.signals:\n self.signals.append(variable)\n elif isinstance(variable, Variable):\n if not variable in self.variables:\n self.variables.append(variable)\n else:\n raise TypeError\n self._utd_graph = False\n"
] | class DynamicSystem:
"""
Defines a dynamic system that can simulate itself
:param te: time of simulation's end
:param ns: number of steps
:param blocks: (optional) list of blocks defining the model
"""
def __init__(self, te, ns, blocks=[]):
self.te = te
self.ns = ns
self.ts = self.te/float(self.ns) # time step
self.t = np.linspace(0, self.te, num=ns+1) # Time vector
self.blocks = []
self.variables = []
self.signals = []
self.max_order = 0
for block in blocks:
self.AddBlock(block)
self._utd_graph = False # True if graph is up-to-date
def _AddVariable(self, variable):
"""
Add a variable to the model. Should not be used by end-user
"""
if isinstance(variable, Signal):
if not variable in self.signals:
self.signals.append(variable)
elif isinstance(variable, Variable):
if not variable in self.variables:
self.variables.append(variable)
else:
raise TypeError
self._utd_graph = False
def _get_Graph(self):
if not self._utd_graph:
# Generate graph
self._graph = nx.DiGraph()
for variable in self.variables:
self._graph.add_node(variable, bipartite=0)
for block in self.blocks:
self._graph.add_node(block, bipartite=1)
for variable in block.inputs:
self._graph.add_edge(variable, block)
for variable in block.outputs:
self._graph.add_edge(block, variable)
self._utd_graph = True
return self._graph
graph = property(_get_Graph)
def _ResolutionOrder(self, variables_to_solve):
"""
return a list of lists of tuples (block,output,ndof) to be solved
"""
# Gp=nx.DiGraph()
#
# for i in range(nvar):
# Gp.add_node('v'+str(i),bipartite=0)
#
# for i in range(neq):
# Gp.add_node('e'+str(i),bipartite=1)
# for j in range(nvar):
# if Mo[i,j]==1:
# Gp.add_edge('e'+str(i),'v'+str(j))
Gp = nx.DiGraph()
for variable in self.variables:
Gp.add_node(variable, bipartite=0)
for block in self.blocks:
for iov, output_variable in enumerate(block.outputs):
Gp.add_node((block, iov), bipartite=1)
Gp.add_edge((block, iov), output_variable)
Gp.add_edge(output_variable, (block, iov))
for input_variable in block.inputs:
if not isinstance(input_variable, Signal):
Gp.add_edge(input_variable, (block, iov))
# for n1,n2 in M.items():
# Gp.add_edge(n1,n2)
sinks = []
sources = []
for node in Gp.nodes():
if Gp.out_degree(node) == 0:
sinks.append(node)
elif Gp.in_degree(node) == 0:
sources.append(node)
G2 = sources[:]
for node in sources:
for node2 in nx.descendants(Gp, node):
if node2 not in G2:
G2.append(node2)
if G2 != []:
print(G2)
raise ModelError('Overconstrained variables')
G3 = sinks[:]
for node in sinks:
for node2 in nx.ancestors(Gp, node):
if node2 not in G3:
G3.append(node2)
if G3 != []:
raise ModelError('Underconstrained variables')
# vars_resolvables=[]
# for var in vars_resoudre:
# if not 'v'+str(var) in G2+G3:
# vars_resolvables.append(var)
# G1=Gp.copy()
# G1.remove_nodes_from(G2+G3)
#
# M1=nx.bipartite.maximum_matching(G1)
# G1p=nx.DiGraph()
#
# G1p.add_nodes_from(G1.nodes())
# for e in G1.edges():
# # equation vers variable
# if e[0][0]=='v':
# G1p.add_edge(e[0],e[1])
# else:
# G1p.add_edge(e[1],e[0])
# # print(len(M))
# for n1,n2 in M1.items():
# # print(n1,n2)
# if n1[0]=='e':
# G1p.add_edge(n1,n2)
# else:
# G1p.add_edge(n2,n1)
scc = list(nx.strongly_connected_components(Gp))
# pos=nx.spring_layout(G1p)
# plt.figure()
# nx.draw(G1p,pos)
# nx.draw_networkx_labels(G1p,pos)
# print(scc)
if scc != []:
C = nx.condensation(Gp, scc)
isc_vars = []
for isc, sc in enumerate(scc):
for var in variables_to_solve:
if var in sc:
isc_vars.append(isc)
break
ancestors_vars = isc_vars[:]
for isc_var in isc_vars:
for ancetre in nx.ancestors(C, isc_var):
if ancetre not in ancestors_vars:
ancestors_vars.append(ancetre)
order_sc = [sc for sc in nx.topological_sort(
C) if sc in ancestors_vars]
order_ev = []
for isc in order_sc:
# liste d'équations et de variables triées pour être séparées
evs = list(scc[isc])
# print(evs)
# levs=int(len(evs)/2)
eqs = []
var = []
for element in evs:
if type(element) == tuple:
eqs.append(element)
else:
var.append(element)
order_ev.append((len(eqs), eqs, var))
return order_ev
raise ModelError
def Simulate(self, variables_to_solve=None):
if variables_to_solve == None:
variables_to_solve = [
variable for variable in self.variables if not variable.hidden]
order = self._ResolutionOrder(variables_to_solve)
# Initialisation of variables values
for variable in self.variables+self.signals:
variable._InitValues(self.ns, self.ts, self.max_order)
# ==============================================================================
# Enhancement to do: defining functions out of loop (copy args)s
# ==============================================================================
# print(order)
residue = []
for it, t in enumerate(self.t[1:]):
for neqs, equations, variables in order:
if neqs == 1:
equations[0][0].Solve(it+self.max_order+1, self.ts)
else:
# x0=np.zeros(neqs)
x0 = [equations[i][0].outputs[equations[i][1]]._values[it +
self.max_order] for i in range(len(equations))]
# print('===========')
def r(x, equations=equations[:]):
# Writing variables values proposed by optimizer
for i, xi in enumerate(x):
equations[i][0].outputs[equations[i][1]
]._values[it+self.max_order+1] = xi
# Computing regrets
r = []
# s=0
for ieq, (block, neq) in enumerate(equations):
# print(block,it)
# print(block.Evaluate(it+self.max_order+1,self.ts).shape)
# print(block.Evaluate(it+self.max_order+1,self.ts),block)
r.append(x[ieq]-block.Evaluate(it +
self.max_order+1, self.ts)[neq])
# print(block)
# print('xproposed:',x[ieq])
# print('block eval',block.Evaluate(it+self.max_order+1,self.ts)[neq])
# print('value', x[ieq]-block.Evaluate(it+self.max_order+1,self.ts)[neq])
# s+=abs(x[ieq]-block.Evaluate(it+self.max_order+1,self.ts)[neq])
# print(x[ieq],block.Evaluate(it+self.max_order+1,self.ts)[neq])
return r
def f(x, equations=equations[:]):
# Writing variables values proposed by optimizer
for i, xi in enumerate(x):
equations[i][0].outputs[equations[i][1]
]._values[it+self.max_order+1] = xi
# Computing regrets
# r=[]
s = 0
for ieq, (block, neq) in enumerate(equations):
# print(block,it)
# print(block.Evaluate(it+self.max_order+1,self.ts).shape)
# print(block.Evaluate(it+self.max_order+1,self.ts),block)
# r.append(x[ieq]-block.Evaluate(it+self.max_order+1,self.ts)[neq])
# print(block)
s += abs(x[ieq]-block.Evaluate(it +
self.max_order+1, self.ts)[neq])
# print(x[ieq],block.Evaluate(it+self.max_order+1,self.ts)[neq])
# return r
# print(s)
return s
x, d, i, m = fsolve(r, x0, full_output=True)
# res=root(f,x0,method='anderson')
# x=res.x
# res=minimize(f,x0,method='powell')
# if res.fun>1e-3:
# x0=[equations[i][0].outputs[equations[i][1]]._values[it+self.max_order] for i in range(len(equations))]
# x0+=np.random.random(len(equations))
# print('restart')
# res=minimize(f,x0,method='powell')
#
# residue.append(f(res.x))
# print(r(x),i)
# print(f(res.x),res.fun)
# f(x)
# print(r)
# if i!=1:
# print(equations)
# print(i,r(x))
# options={'tolfun':1e-3,'verbose':-9,'ftarget':1e-3}
# res=cma.fmin(f,x0,1,options=options)
# print(f(res[0]),r(res[0]))
# print(equations)
#
# print(m)
# if res.fun>1e-3:
# print('fail',res.fun)
# options={'tolfun':1e-3,'verbose':-9}
# res=cma.fmin(f,x0,1,options=options)
# else:
# print('ok')
# return residue
def VariablesValues(self, variables, t):
"""
Returns the value of given variables at time t.
Linear interpolation is performed between two time steps.
:param variables: one variable or a list of variables
:param t: time of evaluation
"""
# TODO: put interpolation in variables
if (t < self.te) | (t > 0):
i = t//self.ts # time step
ti = self.ts*i
if type(variables) == list:
values = []
for variable in variables:
# interpolation
values.append(
variables.values[i]*((ti-t)/self.ts+1)+variables.values[i+1]*(t-ti)/self.ts)
return values
else:
# interpolation
return variables.values[i]*((ti-t)/self.ts+1)+variables.values[i+1]*(t-ti)/self.ts
else:
raise ValueError
def PlotVariables(self, subplots_variables=None):
if subplots_variables == None:
subplots_variables = [self.signals+self.variables]
subplots_variables = [
[variable for variable in self.signals+self.variables if not variable.hidden]]
# plt.figure()
fig, axs = plt.subplots(len(subplots_variables), sharex=True)
if len(subplots_variables) == 1:
axs = [axs]
for isub, subplot in enumerate(subplots_variables):
legend = []
for variable in subplot:
axs[isub].plot(self.t, variable.values)
legend.append(variable.name)
axs[isub].legend(legend, loc='best')
axs[isub].margins(0.08)
axs[isub].grid()
plt.xlabel('Time')
plt.show()
def DrawModel(self):
from .interface import ModelDrawer
ModelDrawer(self)
def Save(self, name_file):
"""
name_file: name of the file without extension.
The extension .bms is added by function
"""
with open(name_file+'.bms', 'wb') as file:
model = dill.dump(self, file)
def __getstate__(self):
dic = self.__dict__.copy()
return dic
def __setstate__(self, dic):
self.__dict__ = dic
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.