repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
Dispersive-Hydrodynamics-Lab/PACE | PACE/PACE.py | ML.plot_fitspace | python | def plot_fitspace(self, name: str, X: np.ndarray, y: np.ndarray, clf: object) -> None:
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
h = 0.01 # Mesh step size
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xlabel(r'$\log_{10}$ Noise')
plt.ylabel(r'$\log_{10}$ Curvature')
plt.savefig(name) | Plot 2dplane of fitspace | train | https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L469-L494 | null | class ML(object):
def __init__(self, args: argparse.Namespace, algo: str='nn'):
"""
Machine Learning to determine usability of data....
"""
self.algo = self.get_algo(args, algo)
def get_algo(self, args: argparse.Namespace, algo: str) -> object:
""" Returns machine learning algorithm based on arguments """
if algo == 'nn':
return NearestNeighbor(args.nnk)
def train(self) -> None:
""" Trains specified algorithm """
traindata = self.get_data()
self.algo.train(traindata)
def get_data(self) -> np.ndarray:
"""
Gets data for training
We use the domain column to determine what fields have been filled out
If the domain is zero (i.e. not in error) than we should probably ignore it anyway
"""
traindata = data.get_traindata()
return traindata
|
Dispersive-Hydrodynamics-Lab/PACE | PACE/PACE.py | NearestNeighbor.train | python | def train(self, traindata: np.ndarray) -> None:
self.clf.fit(traindata[:, 1:5], traindata[:, 5]) | Trains on dataset | train | https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L508-L510 | null | class NearestNeighbor(object):
def __init__(self, k: int):
"""
An example machine learning model. EVERY MODEL NEEDS TO PROVIDE:
1. Train
2. Predict
"""
self.clf = neighbors.KNeighborsClassifier(k, weights='distance',
p=2, algorithm='auto',
n_jobs=8)
def predict(self, predictdata: np.ndarray) -> np.ndarray:
""" predict given points """
return self.clf.predict(predictdata)
|
Dispersive-Hydrodynamics-Lab/PACE | PACE/PACE.py | NearestNeighbor.predict | python | def predict(self, predictdata: np.ndarray) -> np.ndarray:
return self.clf.predict(predictdata) | predict given points | train | https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L512-L514 | null | class NearestNeighbor(object):
def __init__(self, k: int):
"""
An example machine learning model. EVERY MODEL NEEDS TO PROVIDE:
1. Train
2. Predict
"""
self.clf = neighbors.KNeighborsClassifier(k, weights='distance',
p=2, algorithm='auto',
n_jobs=8)
def train(self, traindata: np.ndarray) -> None:
""" Trains on dataset """
self.clf.fit(traindata[:, 1:5], traindata[:, 5])
|
stain/forgetSQL | lib/forgetSQL.py | prepareClasses | python | def prepareClasses(locals):
for (name, forgetter) in locals.items():
if not (type(forgetter) is types.TypeType and
issubclass(forgetter, Forgetter)):
# Only care about Forgetter objects
continue
# Resolve classes
for (key, userclass) in forgetter._userClasses.items():
if type(userclass) is types.StringType:
# resolve from locals
resolved = locals[userclass]
forgetter._userClasses[key] = resolved
forgetter._tables = {}
# Update all fields with proper names
for (field, sqlfield) in forgetter._sqlFields.items():
forgetter._sqlFields[field] = forgetter._checkTable(sqlfield)
newLinks = []
for linkpair in forgetter._sqlLinks:
(link1, link2) = linkpair
link1=forgetter._checkTable(link1)
link2=forgetter._checkTable(link2)
newLinks.append((link1, link2))
forgetter._sqlLinks = newLinks
forgetter._prepared = True | Fix _userClasses and some stuff in classes.
Traverses locals, which is a locals() dictionary from
the namespace where Forgetter subclasses have been
defined, and resolves names in _userClasses to real
class-references.
Normally you would call forgettSQL.prepareClasses(locals())
after defining all classes in your local module.
prepareClasses will only touch objects in the name space
that is a subclassed of Forgetter. | train | https://github.com/stain/forgetSQL/blob/2e13f983020b121fd75a95fcafce3ea75573fb6b/lib/forgetSQL.py#L959-L998 | null | #!/usr/bin/env python
# *-* encoding: utf8
#
# Copyright (c) 2002-2015 Stian Soiland
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Author: Stian Soiland-Reyes <stian@soiland-reyes.com>
# URL: https://github.com/stain/forgetSQL
# License: LGPL 2.1 or later
#
"""forgetSQL is a Python module for accessing SQL databases by creating
classes that maps SQL tables to objects, normally one class pr. SQL
table. The idea is to forget everything about SQL and just worrying
about normal classes and objects. """
__version__ = "0.6.0-SNAPSHOT"
import exceptions
import time
import re
import types
import sys
import weakref
import pprint
try:
from mx import DateTime
except:
DateTime = None
try:
True,False
except NameError:
(True,False) = (1==1, 0==1)
class NotFound(exceptions.Exception):
pass
class Forgetter(object):
"""SQL to object database wrapper.
Given a welldefined database, by subclassing Forgetter
and supplying some attributes, you may wrap your SQL tables
into objects that are easier to program with.
You must define all fields in the database table that you want
to expose, and you may refine the names to suit your
object oriented programming style. (ie. customerID -> customer)
Objects will be created without loading from database,
loading will occur when you try to read or write some of the
attributes defined as a SQL field. If you change some attributes the
object will be saved to the database by save() or garbage
collection. (be aware that GC in Py >= 2.2 is not immediate)
If you want to create new objects, just supply them with blank
ID-fields, and _nextSequence() will be called to fetch a new
ID used for insertion.
The rule is one class pr. table, although it is possible
to join several table into one class, as long as the
identificator is unique.
By defining _userClasses you can resolve links to other
tables, a field in this table would be an id in another
table, ie. another class. In practical use this means that
behind attributes pointing to other classes (tables)
you will find instances of that class.
Short example usage of forgetter objects::
# Process all
for user in User.getAllIterator():
# Access attributes
print user.name
print "Employed at:"
# Access the Employed-class/table
print user.employed.name, user.employed.address
# fire him, setting employed reference to SQL NULL
user.employed = None
# Retrieve some ID
shop = Shop(552)
shop.name = 'Corrected name'
shop.save() # Save now instead of waiting for garbage collactor
# Include SQL where-statements in selections
myIDs = User.getAllIDs(("name='soiland'", 'salary > 5'))
Requirements:
The attributes 'cursor' and '_dbModule' should be set from the
outside. The cursor should be DB 2.0 complient, preferably with
autocommit turned on. (Transactions are not within the scope of this
module yet)
Python 2.2 (iterators, methodclasses)
"""
# How long to keep objects in cache?
_timeout = 60
# Will be True once prepare() is called
_prepared = False
# The default table containing our fields
# _sqlTable = 'shop'
_sqlTable = ''
# A mapping between our fields and the database fields.
#
# You must include all fields needed here. You may specify
# other names if you want to make the sql name more approriate
# for object oriented programming. (Like calling a field 'location'
# instead of 'location_id', because we wrap the location in a seperate
# object and don't really care about the id)
#
# You may reference to other tables with a dot, all
# other db fields will be related to _sqlTable.
# If you reference other tables, don't forget to
# modify _sqlLinks.
#
# _sqlFields = {
# 'id': 'shop_id',
# 'name': 'name',
# 'location': 'location_id',
# 'chain': 'shop_chain_id',
# 'address': 'address.address_id',
# }
_sqlFields = {}
# A list of attribute names (in the object, not database)
# that are the primary key in the database. Normally
# 'id' is sufficient. It is legal to have
# multiple fields as primary key, but it won't work
# properly with _userClasses and getChildren().
#
# If your table is a link table or something, ALL fields
# should be in _sqlPrimary. (all fields are needed to define
# a unique row to be deleted/updated)
_sqlPrimary = ('id',)
# When using several tables, you should include a
# 'link' statement, displaying which fields link the
# two tables together. Note that these are sql names.
# _sqlLinks = (
# ('shop_id', 'address.shop_id'),
# )
_sqlLinks = ()
# The name of the sequence used by _nextSequence
# - if None, a guess will be made based on _sqlTable
# and _sqlPrimary.
_sqlSequence = None
# Order by this attribute by default, if specified
# _orderBy = 'name' - this could also be a tuple
_orderBy = None
# _userClasses can be used to trigger creation of a field
# with an instance of the class. The given database field
# will be sent to the constructor as an objectID
# (ie. as self.id in this object) (ie. the class does not
# neccessary need to be a subclass of Forgetter)
#
# This means that the attribute will be an instance of that
# class, not the ID. The object will not be loaded from the
# database until you try to read any of it's attributes,
# though. (to prevent unneccessary database overload and
# recursions)
#
# Notice that _userClasses must be a name resolvable, ie.
# from the same module as your other classes.
# _userClasses = {
# 'location': 'Location',
# 'chain': 'Chain',
# 'address': 'Address',
# }
_userClasses = {}
# If you want userClasses to work properly with strings instead of
# instances, you must also 'prepare' your classes to resolve the
# names. This must be done from the same module you are defining the
# classes: forgetSQL.prepareClasses(locals())
# A list of fields that are suitable for a textual
# representation (typical a one liner).
#
# Fields will be joint together with spaces or
# simular.
# _shortView = ('name')
_shortView = ()
# Description for the fields (ie. labels)
# Note that these fields will be translated with the _ function.
# If a field is undescribe, a capitalized version of the field name
# will be presented.
#_descriptions = {
# 'name': 'Full name',
# 'description': 'Description of thingie',
#}
_descriptions = {}
def cursor(cls):
try:
import database
return database.cursor()
except:
raise "cursor method undefined, no database connection could be made"
cursor = classmethod(cursor)
# a reference to the database module object used, ie.
# MySQLdb, psycopg etc.
# Use MyClass._dbModule = MySQLdb - not "MySQLdb"
#
_dbModule = None
# By default, autosave change when object is garbage collected.
# NOTE: This could give weird side affects as you need to keep track
# on when objects are garbage collected. Instead, use .save()
# explicitely when you want to save. By default, for backward
# compatibility, autosave is on.
_autosave = True
def __new__(cls, *args):
if not hasattr(cls, '_cache'):
cls._cache = {}
try: # to implement 'goto' in Python.. UGH
if not cls._cache.has_key(args):
# unknown
raise "NotFound"
(ref, updated) = cls._cache[args]
realObject = ref()
if realObject is None:
# No more real references to it, dead object
raise "NotFound"
age = time.time() - updated
if age > cls._timeout:
# Too old!
raise "NotFound"
updated = time.time()
except "NotFound":
# We'll need to create it
realObject = object.__new__(cls, *args)
ref = weakref.ref(realObject)
updated = time.time()
# store a weak reference
cls._cache[args] = (ref, updated)
return realObject
def __init__(self, *id):
"""Initialize, possibly with a database id.
A forgetter with multivalue primary key (ie. _sqlPrimary more
than 1 in length), may be initalized by using several parameters
to this constructor. Note that the object will not be loaded
before you call load().
"""
self._values = {}
self.reset()
if not id:
self._resetID()
else:
self._setID(id)
def _setID(self, id):
"""Set the ID, ie. the values for primary keys.
id can be either a list, following the
_sqlPrimary, or some other type, that will be set
as the singleton ID (requires 1-length sqlPrimary).
"""
if type(id) in (types.ListType, types.TupleType):
try:
for key in self._sqlPrimary:
value = id[0]
self.__dict__[key] = value
id = id[1:] # rest, go revursive
except IndexError:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
elif len(self._sqlPrimary) <= 1:
# It's a simple value
key = self._sqlPrimary[0]
self.__dict__[key] = id
else:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
self._new = False
def _getID(self):
"""Get the ID values as a tuple annotated by sqlPrimary"""
id = []
for key in self._sqlPrimary:
value = self.__dict__[key]
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Part %s of %s primary key is a reference to %s, with multiple-primary-key %s " % (key, self.__class__, value.__class__, value)
id.append(value)
return id
def _resetID(self):
"""Reset all ID fields."""
# Dirty.. .=))
self._setID((None,) * len(self._sqlPrimary))
self._new = True
def _validID(self):
"""Is all ID fields with values, ie. not None?"""
return not None in self._getID()
def __getattr__(self, key):
"""Get an attribute, normally a SQL field value.
Will be called when an unknown key is to be
retrieved, ie. most likely one of our database
fields.
"""
if self._sqlFields.has_key(key):
if not self._updated:
self.load()
return self._values[key]
else:
raise AttributeError, key
def __setattr__(self, key, value):
"""Set an attribute, normally a SQL field value.
Will be called whenever something needs to be set, so
we store the value as a SQL-thingie unless the key
is not listed in sqlFields.
"""
if key not in self._sqlPrimary and self._sqlFields.has_key(key):
if not self._updated:
self.load()
self._values[key] = value
self._changed = time.time()
else:
# It's a normal thingie
self.__dict__[key] = value
def __del__(self):
"""Save the object on deletion.
Be aware of this. If you want to undo some change, use reset()
first.
Be aware of Python 2.2's garbage collector, that
might run in the background. This means that
unless you call save() changes might not
be done immediately in the database.
Not calling save() also means that you cannot catch
errors caused by wrong insertion/update (ie. wrong
datatype for a field)
"""
if not self._autosave:
return
try:
self.save()
except Exception, e:
pass
def _checkTable(cls, field):
"""Split a field from _sqlFields into table, column.
Registers the table in cls._tables, and returns a fully
qualified table.column (default table: cls._sqlTable)
"""
# Get table part
try:
(table, field) = field.split('.')
except ValueError:
table = cls._sqlTable
# clean away white space
table = table.strip()
field = field.strip()
# register table
cls._tables[table] = None
# and return in proper shape
return table + '.' + field
_checkTable = classmethod(_checkTable)
def reset(self):
"""Reset all fields, almost like creating a new object.
Note: Forgets changes you have made not saved to database!
(Remember: Others might reference the object already, expecting
something else!) Override this method if you add properties not
defined in _sqlFields.
"""
self._resetID()
self._new = None
self._updated = None
self._changed = None
self._values = {}
# initially create fields
for field in self._sqlFields.keys():
self._values[field] = None
def load(self, id=None):
"""Load from database. Old values will be discarded."""
if id is not None:
# We are asked to change our ID to something else
self.reset()
self._setID(id)
if not self._new and self._validID():
self._loadDB()
self._updated = time.time()
def save(self):
"""Save to database if anything has changed since last load"""
if ( self._new or
(self._validID() and self._changed) or
(self._updated and self._changed > self._updated) ):
# Don't save if we have not loaded existing data!
self._saveDB()
return True
return False
def delete(self):
"""Mark this object for deletion in the database.
The object will then be reset and ready for use
again with a new id.
"""
(sql, ) = self._prepareSQL("DELETE")
curs = self.cursor()
curs.execute(sql, self._getID())
curs.close()
self.reset()
def _prepareSQL(cls, operation="SELECT", where=None, selectfields=None, orderBy=None):
"""Return a sql for the given operation.
Possible operations:
SELECT read data for this id
SELECTALL read data for all ids
INSERT insert data, create new id
UPDATE update data for this id
DELETE remove data for this id
SQL will be built by data from _sqlFields, and will
contain 0 or several %s for you to sprintf-format in later:
SELECT --> len(cls._sqlPrimary)
SELECTALL --> 0 %s
INSERT --> len(cls._sqlFields) %s (including id)
UPDATE --> len(cls._sqlFields) %s (including id)
DELETE --> len(cls._sqlPrimary)
(Note: INSERT and UPDATE will only change values in _sqlTable, so
the actual number of fields for substitutions might be lower
than len(cls._sqlFields) )
For INSERT you should use cls._nextSequence() to retrieve
a new 'id' number. Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary key
'john_id', sequence name blapp_john_id_seq) you must give the sequence
name as an optional argument to _nextSequence)
Additional note: cls._nextSequence() MUST be overloaded
for multi _sqlPrimary classes. Return a tuple.
Return values will always be tuples:
SELECT --> (sql, fields)
SELECTALL -> sql, fields)
INSERT -> (sql, fields)
UPDATE -> (sql, fields)
DELETE -> (sql,) -- for consistency
fields will be object properties as a list, ie. the keys from
cls._sqlFields. The purpose of this list is to give the programmer
an idea of which order the keys are inserted in the SQL, giving
help for retreiving (SELECT, SELECTALL) or inserting for %s
(INSERT, DELETE).
Why? Well, the keys are stored in a hash, and we cannot be sure
about the order of hash.keys() from time to time, not even with
the same instance.
Optional where-parameter applies to SELECT, SELECTALL and DELETE.
where should be a list or string of where clauses.
"""
# Normalize parameter for later comparissions
operation = operation.upper()
# Convert where to a list if it is a string
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if orderBy is None:
orderBy = cls._orderBy
if operation in ('SELECT', 'SELECTALL'):
# Get the object fields and sql fields in the same
# order to be able to reconstruct later.
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if selectfields is None or field in selectfields:
fields.append(field)
sqlfields.append(sqlfield)
if not fields:
# dirrrrrty!
raise """ERROR: No fields defined, cannot create SQL.
Maybe sqlPrimary is invalid?
Fields asked: %s
My fields: %s""" % (selectfields, cls._sqlFields)
sql = "SELECT\n "
sql += ', '.join(sqlfields)
sql += "\nFROM\n "
tables = cls._tables.keys()
if not tables:
raise "REALITY ERROR: No tables defined"
sql += ', '.join(tables)
tempWhere = ["%s=%s" % linkPair for linkPair in cls._sqlLinks]
# this MUST be here.
if operation <> 'SELECTALL':
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
if where:
tempWhere += where
if(tempWhere):
# Make sure to use paranteses in case someone has used
# ORs in the WHERE-list..
sql += "\nWHERE\n ("
sql += ') AND\n ('.join(tempWhere)
sql += ')'
if operation == 'SELECTALL' and orderBy:
sql += '\nORDER BY\n '
if type(orderBy) in (types.TupleType, types.ListType):
orderBy = [cls._sqlFields[x] for x in orderBy]
orderBy = ',\n '.join(orderBy)
else:
orderBy = cls._sqlFields[orderBy]
sql += orderBy
return (sql, fields)
elif operation in ('INSERT', 'UPDATE'):
if operation == 'UPDATE':
sql = 'UPDATE %s SET\n ' % cls._sqlTable
else:
sql = 'INSERT INTO %s (\n ' % cls._sqlTable
set = []
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if operation == 'UPDATE' and field in cls._sqlPrimary:
continue
if sqlfield.find(cls._sqlTable + '.') == 0:
# It's a local field, chop of the table part
sqlfield = sqlfield[len(cls._sqlTable)+1:]
fields.append(field)
sqlfields.append(sqlfield)
set.append(sqlfield + '=%s')
if operation == 'UPDATE':
sql += ',\n '.join(set)
sql += '\nWHERE\n '
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
fields.append(key)
sql += ' AND\n '.join(tempWhere)
else:
sql += ',\n '.join(sqlfields)
sql += ')\nVALUES (\n '
sql += ',\n '.join(('%s',) * len(sqlfields))
sql += ')'
return (sql, fields)
elif operation == 'DELETE':
sql = 'DELETE FROM ' + cls._sqlTable + ' WHERE '
if where:
sql += " AND\n ".join(where)
else:
for key in cls._sqlPrimary:
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
sql += ' AND\n '.join(tempWhere)
return (sql, )
else:
raise "Unknown operation", operation
_prepareSQL = classmethod(_prepareSQL)
def _nextSequence(cls, name=None):
"""Return a new sequence number for insertion in self._sqlTable.
Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary
key 'john_id', sequence name blapp_john_id_seq) you must give
the full sequence name as an optional argument to _nextSequence)
"""
if not name:
name = cls._sqlSequence
if not name:
# Assume it's tablename_primarykey_seq
if len(cls._sqlPrimary) <> 1:
raise "Could not guess sequence name for multi-primary-key"
primary = cls._sqlPrimary[0]
name = '%s_%s_seq' % (cls._sqlTable, primary.replace('.','_'))
# Don't have . as a tablename or column name! =)
curs = cls.cursor()
curs.execute("SELECT nextval('%s')" % name)
value = curs.fetchone()[0]
curs.close()
return value
_nextSequence = classmethod(_nextSequence)
def _loadFromRow(self, result, fields, cursor):
"""Load from a database row, described by fields.
``fields`` should be the attribute names that
will be set. Note that userclasses will be
created (but not loaded).
"""
position = 0
for elem in fields:
value = result[position]
valueType = cursor.description[position][1]
if hasattr(self._dbModule, 'BOOLEAN') and \
valueType == self._dbModule.BOOLEAN and \
(value is not True or value is not False):
# convert to a python boolean
value = value and True or False
if value and self._userClasses.has_key(elem):
userClass = self._userClasses[elem]
# create an instance
value = userClass(value)
self._values[elem] = value
position += 1
def _loadDB(self):
"""Connect to the database to load myself"""
if not self._validID():
raise NotFound, self._getID()
(sql, fields) = self._prepareSQL("SELECT")
curs = self.cursor()
curs.execute(sql, self._getID())
result = curs.fetchone()
if not result:
curs.close()
raise NotFound, self._getID()
self._loadFromRow(result, fields, curs)
curs.close()
self._updated = time.time()
def _saveDB(self):
"""Insert or update into the database.
Note that every field will be updated, not just the changed
one.
"""
# We're a "fresh" copy now
self._updated = time.time()
if self._new:
operation = 'INSERT'
if not self._validID():
self._setID(self._nextSequence())
# Note that we assign this ID to our self
# BEFORE possibly saving any of our attribute
# objects that might be new as well. This means
# that they might have references to us, as long
# as the database does not require our existence
# yet.
#
# Since mysql does not have Sequences, this will
# not work as smoothly there. See class
# MysqlForgetter below.
else:
operation = 'UPDATE'
(sql, fields) = self._prepareSQL(operation)
values = []
for field in fields:
value = getattr(self, field)
# First some dirty datatype hacks
if DateTime and type(value) == DateTime.DateTimeType:
# stupid psycopg does not support it's own return type..
# lovely..
value = str(value)
if DateTime and type(value) == DateTime.DateTimeDeltaType:
# Format delta as days, hours, minutes seconds
# NOTE: includes value.second directly to get the
# whole floating number
value = value.strftime("%d %H:%M:") + str(value.second)
if value is True or value is False:
# We must store booleans as 't' and 'f' ...
value = value and 't' or 'f'
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Can't reference multiple-primary-key: %s" % value
values.append(value)
cursor = self.cursor()
cursor.execute(sql, values)
# cursor.commit()
cursor.close()
self._new = False
self._changed = None
def getAll(cls, where=None, orderBy=None):
"""Retrieve all the objects.
If a list of ``where`` clauses are given, they will be AND-ed
and will limit the search.
This will not load everything out from the database, but will
create a large amount of objects with only the ID inserted. The
data will be loaded from the objects when needed by the regular
load()-autocall.
"""
ids = cls.getAllIDs(where, orderBy=orderBy)
# Instansiate a lot of them
if len(cls._sqlPrimary) > 1:
return [cls(*id) for id in ids]
else:
return [cls(id) for id in ids]
getAll = classmethod(getAll)
def getAllIterator(cls, where=None, buffer=100,
useObject=None, orderBy=None):
"""Retrieve every object as an iterator.
Possibly limitted by the where list of clauses that will be
AND-ed.
Since an iterator is returned, only ``buffer`` rows are loaded
from the database at once. This is useful if you need
to process all objects.
If useObject is given, this object is returned each time, but
with new data. This can be used to avoid creating many new
objects when only one object is needed each time.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
fetchedAt = time.time()
curs.execute(sql)
# We might start eating memory at this point
def getNext(rows=[]):
forgetter = cls
if not rows:
rows += curs.fetchmany(buffer)
if not rows:
curs.close()
return None
row = rows[0]
del rows[0]
try:
idPositions = [fields.index(key) for key in cls._sqlPrimary]
except ValueError:
raise "Bad sqlPrimary, should be a list or tuple: %s" % cls._sqlPrimary
ids = [row[pos] for pos in idPositions]
if useObject:
result = useObject
result.reset()
result._setID(ids)
else:
result = forgetter(*ids)
result._loadFromRow(row, fields, curs)
result._updated = fetchedAt
return result
return iter(getNext, None)
getAllIterator = classmethod(getAllIterator)
def getAllIDs(cls, where=None, orderBy=None):
"""Retrive all the IDs, possibly matching the where clauses.
Where should be some list of where clauses that will be joined
with AND). Note that the result might be tuples if this table
has a multivalue _sqlPrimary.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where,
cls._sqlPrimary, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
result.append((ids))
return result
getAllIDs = classmethod(getAllIDs)
def getAllText(cls, where=None, SEPERATOR=' ', orderBy=None):
"""Retrieve a list of of all possible instances of this class.
The list is composed of tuples in the format (id, description) -
where description is a string composed by the fields from
cls._shortView, joint with SEPERATOR.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
shortPos = [fields.index(short) for short in cls._shortView]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
text = SEPERATOR.join([str(row[pos]) for pos in shortPos])
result.append((ids, text))
return result
getAllText = classmethod(getAllText)
def getChildren(self, forgetter, field=None, where=None, orderBy=None):
"""Return the children that links to me.
That means that I have to be listed in their _userClasses
somehow. If field is specified, that field in my children is
used as the pointer to me. Use this if you have multiple fields
referring to my class.
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAll(whereList, orderBy=orderBy)
def getChildrenIterator(self, forgetter, field=None, where=None,
orderBy=None, useObject=None):
"""Like getChildren, except that it returns an
iterator, like getAllIterator. An iterator should
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAllIterator(whereList, useObject=useObject,
orderBy=orderBy)
def __repr__(self):
return self.__class__.__name__ + ' %s' % self._getID()
def __str__(self):
shortView = self._shortView or self._sqlPrimary
short = [str(getattr(self, short)) for short in shortView]
text = ', '.join(short)
# return repr(self) + ': ' + text
return text
def __eq__(self, obj):
"""Simple comparsion of objects."""
return self.__class__.__name__ == obj.__class__.__name__ \
and self._getID() == obj._getID()
class MysqlForgetter(Forgetter):
"""MySQL-compatible Forgetter"""
def _saveDB(self):
"""Overloaded - we don't have nextval() in mysql"""
# We're a "fresh" copy now
self._updated = time.time()
if self._new:
operation = 'INSERT'
else:
operation = 'UPDATE'
(sql, fields) = self._prepareSQL(operation)
values = []
for field in fields:
value = getattr(self, field)
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Can't reference multiple-primary-key: %s" % value
values.append(value)
cursor = self.cursor()
cursor.execute(sql, values)
# cursor.commit()
if not self._validID():
if not len(self._getID()) == 1:
raise "Can't retrieve auto-inserted ID for multiple-primary-key"
# Here's the mysql magic to get the new ID
self._setID(cursor.insert_id())
cursor.close()
self._new = False
def generateFromTables(tables, cursor, getLinks=1, code=0):
"""Generates python code (or class objects if code is false)
based on SQL queries on the table names given in the list
tables.
code -- if given -- should be an dictionary containing these
keys to be inserted into generated code:
'database': database name
'module': database module name
'connect': string to be inserted into module.connect()
"""
curs = cursor()
forgetters = {}
class _Wrapper(forgetSQL.Forgetter):
_autosave = False
pass
_Wrapper.cursor = cursor
for table in tables:
# capitalize the table name to make it look like a class
name = table.capitalize()
# Define the class by instanciating the meta class to
# the given name (requires Forgetter to be new style)
forgetter = _Wrapper.__class__(name, (_Wrapper,), {})
# Register it
forgetters[name] = forgetter
forgetter._sqlTable = table
forgetter._sqlLinks = {}
forgetter._sqlFields = {}
forgetter._shortView = ()
forgetter._descriptions = {}
forgetter._userClasses = {}
# Get columns
curs.execute("SELECT * FROM %s LIMIT 1" % table)
columns = [column[0] for column in curs.description]
# convert to dictionary and register in forgetter
for column in columns:
forgetter._sqlFields[column] = column
if getLinks:
# Try to find links between tables (!)
# Note the big O factor with this ...
for (tableName, forgetter) in forgetters.items():
for (key, column) in forgetter._sqlFields.items():
# A column refering to another table would most likely
# be called otherColumnID or just otherColumn. We'll
# lowercase below when performing the test.
possTable = re.sub(r'_?id$', '', column)
# all tables (ie. one of the forgetters) are candidates
foundLink = False
for candidate in forgetters.keys():
if candidate.lower() == possTable.lower():
if possTable.lower() == tableName.lower():
# It's our own primary key!
forgetter._sqlPrimary = (column,)
break
# Woooh! First - let's replace 'blapp_id' with 'blapp'
# as the attribute name to indicate that it would
# contain the Blapp instance, not just
# some ID.
del forgetter._sqlFields[key]
forgetter._sqlFields[possTable] = column
# And.. we'll need to know which class we refer to
forgetter._userClasses[possTable] = candidate
break # we've found our candidate
if code:
if code['module'] == "MySQLdb":
code['class'] = 'forgetSQL.MysqlForgetter'
else:
code['class'] = 'forgetSQL.Forgetter'
code['date'] = time.strftime('%Y-%m-%d')
print '''
"""Database wrappers %(database)s
Autogenerated by forgetsql-generate %(date)s.
"""
import forgetSQL
#import %(module)s
class _Wrapper(%(class)s):
"""Just a simple wrapper class so that you may
easily change stuff for all forgetters. Typically
this involves subclassing MysqlForgetter instead."""
# Only save changes on .save()
_autosave = False
# Example database connection (might lack password)
#_dbModule = %(module)s
#_dbConnection = %(module)s.connect(%(connect)s)
#def cursor(cls):
# return cls._dbConnection.cursor()
#cursor = classmethod(cursor)
''' % code
items = forgetters.items()
items.sort()
for (name, forgetter) in items:
print "class %s(_Wrapper):" % name
for (key, value) in forgetter.__dict__.items():
if key.find('__') == 0:
continue
nice = pprint.pformat(value)
# Get some indention
nice = nice.replace('\n', '\n ' + ' '*len(key))
print ' %s = ' % key, nice
print ""
print '''
# Prepare them all. We need to send in our local
# namespace.
forgetSQL.prepareClasses(locals())
'''
else:
prepareClasses(forgetters)
return forgetters
|
stain/forgetSQL | lib/forgetSQL.py | generateFromTables | python | def generateFromTables(tables, cursor, getLinks=1, code=0):
curs = cursor()
forgetters = {}
class _Wrapper(forgetSQL.Forgetter):
_autosave = False
pass
_Wrapper.cursor = cursor
for table in tables:
# capitalize the table name to make it look like a class
name = table.capitalize()
# Define the class by instanciating the meta class to
# the given name (requires Forgetter to be new style)
forgetter = _Wrapper.__class__(name, (_Wrapper,), {})
# Register it
forgetters[name] = forgetter
forgetter._sqlTable = table
forgetter._sqlLinks = {}
forgetter._sqlFields = {}
forgetter._shortView = ()
forgetter._descriptions = {}
forgetter._userClasses = {}
# Get columns
curs.execute("SELECT * FROM %s LIMIT 1" % table)
columns = [column[0] for column in curs.description]
# convert to dictionary and register in forgetter
for column in columns:
forgetter._sqlFields[column] = column
if getLinks:
# Try to find links between tables (!)
# Note the big O factor with this ...
for (tableName, forgetter) in forgetters.items():
for (key, column) in forgetter._sqlFields.items():
# A column refering to another table would most likely
# be called otherColumnID or just otherColumn. We'll
# lowercase below when performing the test.
possTable = re.sub(r'_?id$', '', column)
# all tables (ie. one of the forgetters) are candidates
foundLink = False
for candidate in forgetters.keys():
if candidate.lower() == possTable.lower():
if possTable.lower() == tableName.lower():
# It's our own primary key!
forgetter._sqlPrimary = (column,)
break
# Woooh! First - let's replace 'blapp_id' with 'blapp'
# as the attribute name to indicate that it would
# contain the Blapp instance, not just
# some ID.
del forgetter._sqlFields[key]
forgetter._sqlFields[possTable] = column
# And.. we'll need to know which class we refer to
forgetter._userClasses[possTable] = candidate
break # we've found our candidate
if code:
if code['module'] == "MySQLdb":
code['class'] = 'forgetSQL.MysqlForgetter'
else:
code['class'] = 'forgetSQL.Forgetter'
code['date'] = time.strftime('%Y-%m-%d')
print '''
"""Database wrappers %(database)s
Autogenerated by forgetsql-generate %(date)s.
"""
import forgetSQL
#import %(module)s
class _Wrapper(%(class)s):
"""Just a simple wrapper class so that you may
easily change stuff for all forgetters. Typically
this involves subclassing MysqlForgetter instead."""
# Only save changes on .save()
_autosave = False
# Example database connection (might lack password)
#_dbModule = %(module)s
#_dbConnection = %(module)s.connect(%(connect)s)
#def cursor(cls):
# return cls._dbConnection.cursor()
#cursor = classmethod(cursor)
''' % code
items = forgetters.items()
items.sort()
for (name, forgetter) in items:
print "class %s(_Wrapper):" % name
for (key, value) in forgetter.__dict__.items():
if key.find('__') == 0:
continue
nice = pprint.pformat(value)
# Get some indention
nice = nice.replace('\n', '\n ' + ' '*len(key))
print ' %s = ' % key, nice
print ""
print '''
# Prepare them all. We need to send in our local
# namespace.
forgetSQL.prepareClasses(locals())
'''
else:
prepareClasses(forgetters)
return forgetters | Generates python code (or class objects if code is false)
based on SQL queries on the table names given in the list
tables.
code -- if given -- should be an dictionary containing these
keys to be inserted into generated code:
'database': database name
'module': database module name
'connect': string to be inserted into module.connect() | train | https://github.com/stain/forgetSQL/blob/2e13f983020b121fd75a95fcafce3ea75573fb6b/lib/forgetSQL.py#L1001-L1122 | [
"def prepareClasses(locals):\n \"\"\"Fix _userClasses and some stuff in classes.\n\n Traverses locals, which is a locals() dictionary from\n the namespace where Forgetter subclasses have been\n defined, and resolves names in _userClasses to real\n class-references.\n\n Normally you would call forgettSQL.prepareClasses(locals())\n after defining all classes in your local module.\n prepareClasses will only touch objects in the name space\n that is a subclassed of Forgetter.\n \"\"\"\n for (name, forgetter) in locals.items():\n if not (type(forgetter) is types.TypeType and\n issubclass(forgetter, Forgetter)):\n # Only care about Forgetter objects\n continue\n\n # Resolve classes\n for (key, userclass) in forgetter._userClasses.items():\n if type(userclass) is types.StringType:\n # resolve from locals\n resolved = locals[userclass]\n forgetter._userClasses[key] = resolved\n\n forgetter._tables = {}\n # Update all fields with proper names\n for (field, sqlfield) in forgetter._sqlFields.items():\n forgetter._sqlFields[field] = forgetter._checkTable(sqlfield)\n\n newLinks = []\n for linkpair in forgetter._sqlLinks:\n (link1, link2) = linkpair\n link1=forgetter._checkTable(link1)\n link2=forgetter._checkTable(link2)\n newLinks.append((link1, link2))\n\n forgetter._sqlLinks = newLinks\n forgetter._prepared = True\n"
] | #!/usr/bin/env python
# *-* encoding: utf8
#
# Copyright (c) 2002-2015 Stian Soiland
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Author: Stian Soiland-Reyes <stian@soiland-reyes.com>
# URL: https://github.com/stain/forgetSQL
# License: LGPL 2.1 or later
#
"""forgetSQL is a Python module for accessing SQL databases by creating
classes that maps SQL tables to objects, normally one class pr. SQL
table. The idea is to forget everything about SQL and just worrying
about normal classes and objects. """
__version__ = "0.6.0-SNAPSHOT"
import exceptions
import time
import re
import types
import sys
import weakref
import pprint
try:
from mx import DateTime
except:
DateTime = None
try:
True,False
except NameError:
(True,False) = (1==1, 0==1)
class NotFound(exceptions.Exception):
pass
class Forgetter(object):
"""SQL to object database wrapper.
Given a welldefined database, by subclassing Forgetter
and supplying some attributes, you may wrap your SQL tables
into objects that are easier to program with.
You must define all fields in the database table that you want
to expose, and you may refine the names to suit your
object oriented programming style. (ie. customerID -> customer)
Objects will be created without loading from database,
loading will occur when you try to read or write some of the
attributes defined as a SQL field. If you change some attributes the
object will be saved to the database by save() or garbage
collection. (be aware that GC in Py >= 2.2 is not immediate)
If you want to create new objects, just supply them with blank
ID-fields, and _nextSequence() will be called to fetch a new
ID used for insertion.
The rule is one class pr. table, although it is possible
to join several table into one class, as long as the
identificator is unique.
By defining _userClasses you can resolve links to other
tables, a field in this table would be an id in another
table, ie. another class. In practical use this means that
behind attributes pointing to other classes (tables)
you will find instances of that class.
Short example usage of forgetter objects::
# Process all
for user in User.getAllIterator():
# Access attributes
print user.name
print "Employed at:"
# Access the Employed-class/table
print user.employed.name, user.employed.address
# fire him, setting employed reference to SQL NULL
user.employed = None
# Retrieve some ID
shop = Shop(552)
shop.name = 'Corrected name'
shop.save() # Save now instead of waiting for garbage collactor
# Include SQL where-statements in selections
myIDs = User.getAllIDs(("name='soiland'", 'salary > 5'))
Requirements:
The attributes 'cursor' and '_dbModule' should be set from the
outside. The cursor should be DB 2.0 complient, preferably with
autocommit turned on. (Transactions are not within the scope of this
module yet)
Python 2.2 (iterators, methodclasses)
"""
# How long to keep objects in cache?
_timeout = 60
# Will be True once prepare() is called
_prepared = False
# The default table containing our fields
# _sqlTable = 'shop'
_sqlTable = ''
# A mapping between our fields and the database fields.
#
# You must include all fields needed here. You may specify
# other names if you want to make the sql name more approriate
# for object oriented programming. (Like calling a field 'location'
# instead of 'location_id', because we wrap the location in a seperate
# object and don't really care about the id)
#
# You may reference to other tables with a dot, all
# other db fields will be related to _sqlTable.
# If you reference other tables, don't forget to
# modify _sqlLinks.
#
# _sqlFields = {
# 'id': 'shop_id',
# 'name': 'name',
# 'location': 'location_id',
# 'chain': 'shop_chain_id',
# 'address': 'address.address_id',
# }
_sqlFields = {}
# A list of attribute names (in the object, not database)
# that are the primary key in the database. Normally
# 'id' is sufficient. It is legal to have
# multiple fields as primary key, but it won't work
# properly with _userClasses and getChildren().
#
# If your table is a link table or something, ALL fields
# should be in _sqlPrimary. (all fields are needed to define
# a unique row to be deleted/updated)
_sqlPrimary = ('id',)
# When using several tables, you should include a
# 'link' statement, displaying which fields link the
# two tables together. Note that these are sql names.
# _sqlLinks = (
# ('shop_id', 'address.shop_id'),
# )
_sqlLinks = ()
# The name of the sequence used by _nextSequence
# - if None, a guess will be made based on _sqlTable
# and _sqlPrimary.
_sqlSequence = None
# Order by this attribute by default, if specified
# _orderBy = 'name' - this could also be a tuple
_orderBy = None
# _userClasses can be used to trigger creation of a field
# with an instance of the class. The given database field
# will be sent to the constructor as an objectID
# (ie. as self.id in this object) (ie. the class does not
# neccessary need to be a subclass of Forgetter)
#
# This means that the attribute will be an instance of that
# class, not the ID. The object will not be loaded from the
# database until you try to read any of it's attributes,
# though. (to prevent unneccessary database overload and
# recursions)
#
# Notice that _userClasses must be a name resolvable, ie.
# from the same module as your other classes.
# _userClasses = {
# 'location': 'Location',
# 'chain': 'Chain',
# 'address': 'Address',
# }
_userClasses = {}
# If you want userClasses to work properly with strings instead of
# instances, you must also 'prepare' your classes to resolve the
# names. This must be done from the same module you are defining the
# classes: forgetSQL.prepareClasses(locals())
# A list of fields that are suitable for a textual
# representation (typical a one liner).
#
# Fields will be joint together with spaces or
# simular.
# _shortView = ('name')
_shortView = ()
# Description for the fields (ie. labels)
# Note that these fields will be translated with the _ function.
# If a field is undescribe, a capitalized version of the field name
# will be presented.
#_descriptions = {
# 'name': 'Full name',
# 'description': 'Description of thingie',
#}
_descriptions = {}
def cursor(cls):
try:
import database
return database.cursor()
except:
raise "cursor method undefined, no database connection could be made"
cursor = classmethod(cursor)
# a reference to the database module object used, ie.
# MySQLdb, psycopg etc.
# Use MyClass._dbModule = MySQLdb - not "MySQLdb"
#
_dbModule = None
# By default, autosave change when object is garbage collected.
# NOTE: This could give weird side affects as you need to keep track
# on when objects are garbage collected. Instead, use .save()
# explicitely when you want to save. By default, for backward
# compatibility, autosave is on.
_autosave = True
def __new__(cls, *args):
if not hasattr(cls, '_cache'):
cls._cache = {}
try: # to implement 'goto' in Python.. UGH
if not cls._cache.has_key(args):
# unknown
raise "NotFound"
(ref, updated) = cls._cache[args]
realObject = ref()
if realObject is None:
# No more real references to it, dead object
raise "NotFound"
age = time.time() - updated
if age > cls._timeout:
# Too old!
raise "NotFound"
updated = time.time()
except "NotFound":
# We'll need to create it
realObject = object.__new__(cls, *args)
ref = weakref.ref(realObject)
updated = time.time()
# store a weak reference
cls._cache[args] = (ref, updated)
return realObject
def __init__(self, *id):
"""Initialize, possibly with a database id.
A forgetter with multivalue primary key (ie. _sqlPrimary more
than 1 in length), may be initalized by using several parameters
to this constructor. Note that the object will not be loaded
before you call load().
"""
self._values = {}
self.reset()
if not id:
self._resetID()
else:
self._setID(id)
def _setID(self, id):
"""Set the ID, ie. the values for primary keys.
id can be either a list, following the
_sqlPrimary, or some other type, that will be set
as the singleton ID (requires 1-length sqlPrimary).
"""
if type(id) in (types.ListType, types.TupleType):
try:
for key in self._sqlPrimary:
value = id[0]
self.__dict__[key] = value
id = id[1:] # rest, go revursive
except IndexError:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
elif len(self._sqlPrimary) <= 1:
# It's a simple value
key = self._sqlPrimary[0]
self.__dict__[key] = id
else:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
self._new = False
def _getID(self):
"""Get the ID values as a tuple annotated by sqlPrimary"""
id = []
for key in self._sqlPrimary:
value = self.__dict__[key]
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Part %s of %s primary key is a reference to %s, with multiple-primary-key %s " % (key, self.__class__, value.__class__, value)
id.append(value)
return id
def _resetID(self):
"""Reset all ID fields."""
# Dirty.. .=))
self._setID((None,) * len(self._sqlPrimary))
self._new = True
def _validID(self):
"""Is all ID fields with values, ie. not None?"""
return not None in self._getID()
def __getattr__(self, key):
"""Get an attribute, normally a SQL field value.
Will be called when an unknown key is to be
retrieved, ie. most likely one of our database
fields.
"""
if self._sqlFields.has_key(key):
if not self._updated:
self.load()
return self._values[key]
else:
raise AttributeError, key
def __setattr__(self, key, value):
"""Set an attribute, normally a SQL field value.
Will be called whenever something needs to be set, so
we store the value as a SQL-thingie unless the key
is not listed in sqlFields.
"""
if key not in self._sqlPrimary and self._sqlFields.has_key(key):
if not self._updated:
self.load()
self._values[key] = value
self._changed = time.time()
else:
# It's a normal thingie
self.__dict__[key] = value
def __del__(self):
"""Save the object on deletion.
Be aware of this. If you want to undo some change, use reset()
first.
Be aware of Python 2.2's garbage collector, that
might run in the background. This means that
unless you call save() changes might not
be done immediately in the database.
Not calling save() also means that you cannot catch
errors caused by wrong insertion/update (ie. wrong
datatype for a field)
"""
if not self._autosave:
return
try:
self.save()
except Exception, e:
pass
def _checkTable(cls, field):
"""Split a field from _sqlFields into table, column.
Registers the table in cls._tables, and returns a fully
qualified table.column (default table: cls._sqlTable)
"""
# Get table part
try:
(table, field) = field.split('.')
except ValueError:
table = cls._sqlTable
# clean away white space
table = table.strip()
field = field.strip()
# register table
cls._tables[table] = None
# and return in proper shape
return table + '.' + field
_checkTable = classmethod(_checkTable)
def reset(self):
"""Reset all fields, almost like creating a new object.
Note: Forgets changes you have made not saved to database!
(Remember: Others might reference the object already, expecting
something else!) Override this method if you add properties not
defined in _sqlFields.
"""
self._resetID()
self._new = None
self._updated = None
self._changed = None
self._values = {}
# initially create fields
for field in self._sqlFields.keys():
self._values[field] = None
def load(self, id=None):
"""Load from database. Old values will be discarded."""
if id is not None:
# We are asked to change our ID to something else
self.reset()
self._setID(id)
if not self._new and self._validID():
self._loadDB()
self._updated = time.time()
def save(self):
"""Save to database if anything has changed since last load"""
if ( self._new or
(self._validID() and self._changed) or
(self._updated and self._changed > self._updated) ):
# Don't save if we have not loaded existing data!
self._saveDB()
return True
return False
def delete(self):
"""Mark this object for deletion in the database.
The object will then be reset and ready for use
again with a new id.
"""
(sql, ) = self._prepareSQL("DELETE")
curs = self.cursor()
curs.execute(sql, self._getID())
curs.close()
self.reset()
def _prepareSQL(cls, operation="SELECT", where=None, selectfields=None, orderBy=None):
"""Return a sql for the given operation.
Possible operations:
SELECT read data for this id
SELECTALL read data for all ids
INSERT insert data, create new id
UPDATE update data for this id
DELETE remove data for this id
SQL will be built by data from _sqlFields, and will
contain 0 or several %s for you to sprintf-format in later:
SELECT --> len(cls._sqlPrimary)
SELECTALL --> 0 %s
INSERT --> len(cls._sqlFields) %s (including id)
UPDATE --> len(cls._sqlFields) %s (including id)
DELETE --> len(cls._sqlPrimary)
(Note: INSERT and UPDATE will only change values in _sqlTable, so
the actual number of fields for substitutions might be lower
than len(cls._sqlFields) )
For INSERT you should use cls._nextSequence() to retrieve
a new 'id' number. Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary key
'john_id', sequence name blapp_john_id_seq) you must give the sequence
name as an optional argument to _nextSequence)
Additional note: cls._nextSequence() MUST be overloaded
for multi _sqlPrimary classes. Return a tuple.
Return values will always be tuples:
SELECT --> (sql, fields)
SELECTALL -> sql, fields)
INSERT -> (sql, fields)
UPDATE -> (sql, fields)
DELETE -> (sql,) -- for consistency
fields will be object properties as a list, ie. the keys from
cls._sqlFields. The purpose of this list is to give the programmer
an idea of which order the keys are inserted in the SQL, giving
help for retreiving (SELECT, SELECTALL) or inserting for %s
(INSERT, DELETE).
Why? Well, the keys are stored in a hash, and we cannot be sure
about the order of hash.keys() from time to time, not even with
the same instance.
Optional where-parameter applies to SELECT, SELECTALL and DELETE.
where should be a list or string of where clauses.
"""
# Normalize parameter for later comparissions
operation = operation.upper()
# Convert where to a list if it is a string
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if orderBy is None:
orderBy = cls._orderBy
if operation in ('SELECT', 'SELECTALL'):
# Get the object fields and sql fields in the same
# order to be able to reconstruct later.
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if selectfields is None or field in selectfields:
fields.append(field)
sqlfields.append(sqlfield)
if not fields:
# dirrrrrty!
raise """ERROR: No fields defined, cannot create SQL.
Maybe sqlPrimary is invalid?
Fields asked: %s
My fields: %s""" % (selectfields, cls._sqlFields)
sql = "SELECT\n "
sql += ', '.join(sqlfields)
sql += "\nFROM\n "
tables = cls._tables.keys()
if not tables:
raise "REALITY ERROR: No tables defined"
sql += ', '.join(tables)
tempWhere = ["%s=%s" % linkPair for linkPair in cls._sqlLinks]
# this MUST be here.
if operation <> 'SELECTALL':
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
if where:
tempWhere += where
if(tempWhere):
# Make sure to use paranteses in case someone has used
# ORs in the WHERE-list..
sql += "\nWHERE\n ("
sql += ') AND\n ('.join(tempWhere)
sql += ')'
if operation == 'SELECTALL' and orderBy:
sql += '\nORDER BY\n '
if type(orderBy) in (types.TupleType, types.ListType):
orderBy = [cls._sqlFields[x] for x in orderBy]
orderBy = ',\n '.join(orderBy)
else:
orderBy = cls._sqlFields[orderBy]
sql += orderBy
return (sql, fields)
elif operation in ('INSERT', 'UPDATE'):
if operation == 'UPDATE':
sql = 'UPDATE %s SET\n ' % cls._sqlTable
else:
sql = 'INSERT INTO %s (\n ' % cls._sqlTable
set = []
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if operation == 'UPDATE' and field in cls._sqlPrimary:
continue
if sqlfield.find(cls._sqlTable + '.') == 0:
# It's a local field, chop of the table part
sqlfield = sqlfield[len(cls._sqlTable)+1:]
fields.append(field)
sqlfields.append(sqlfield)
set.append(sqlfield + '=%s')
if operation == 'UPDATE':
sql += ',\n '.join(set)
sql += '\nWHERE\n '
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
fields.append(key)
sql += ' AND\n '.join(tempWhere)
else:
sql += ',\n '.join(sqlfields)
sql += ')\nVALUES (\n '
sql += ',\n '.join(('%s',) * len(sqlfields))
sql += ')'
return (sql, fields)
elif operation == 'DELETE':
sql = 'DELETE FROM ' + cls._sqlTable + ' WHERE '
if where:
sql += " AND\n ".join(where)
else:
for key in cls._sqlPrimary:
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
sql += ' AND\n '.join(tempWhere)
return (sql, )
else:
raise "Unknown operation", operation
_prepareSQL = classmethod(_prepareSQL)
def _nextSequence(cls, name=None):
"""Return a new sequence number for insertion in self._sqlTable.
Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary
key 'john_id', sequence name blapp_john_id_seq) you must give
the full sequence name as an optional argument to _nextSequence)
"""
if not name:
name = cls._sqlSequence
if not name:
# Assume it's tablename_primarykey_seq
if len(cls._sqlPrimary) <> 1:
raise "Could not guess sequence name for multi-primary-key"
primary = cls._sqlPrimary[0]
name = '%s_%s_seq' % (cls._sqlTable, primary.replace('.','_'))
# Don't have . as a tablename or column name! =)
curs = cls.cursor()
curs.execute("SELECT nextval('%s')" % name)
value = curs.fetchone()[0]
curs.close()
return value
_nextSequence = classmethod(_nextSequence)
def _loadFromRow(self, result, fields, cursor):
"""Load from a database row, described by fields.
``fields`` should be the attribute names that
will be set. Note that userclasses will be
created (but not loaded).
"""
position = 0
for elem in fields:
value = result[position]
valueType = cursor.description[position][1]
if hasattr(self._dbModule, 'BOOLEAN') and \
valueType == self._dbModule.BOOLEAN and \
(value is not True or value is not False):
# convert to a python boolean
value = value and True or False
if value and self._userClasses.has_key(elem):
userClass = self._userClasses[elem]
# create an instance
value = userClass(value)
self._values[elem] = value
position += 1
def _loadDB(self):
"""Connect to the database to load myself"""
if not self._validID():
raise NotFound, self._getID()
(sql, fields) = self._prepareSQL("SELECT")
curs = self.cursor()
curs.execute(sql, self._getID())
result = curs.fetchone()
if not result:
curs.close()
raise NotFound, self._getID()
self._loadFromRow(result, fields, curs)
curs.close()
self._updated = time.time()
def _saveDB(self):
"""Insert or update into the database.
Note that every field will be updated, not just the changed
one.
"""
# We're a "fresh" copy now
self._updated = time.time()
if self._new:
operation = 'INSERT'
if not self._validID():
self._setID(self._nextSequence())
# Note that we assign this ID to our self
# BEFORE possibly saving any of our attribute
# objects that might be new as well. This means
# that they might have references to us, as long
# as the database does not require our existence
# yet.
#
# Since mysql does not have Sequences, this will
# not work as smoothly there. See class
# MysqlForgetter below.
else:
operation = 'UPDATE'
(sql, fields) = self._prepareSQL(operation)
values = []
for field in fields:
value = getattr(self, field)
# First some dirty datatype hacks
if DateTime and type(value) == DateTime.DateTimeType:
# stupid psycopg does not support it's own return type..
# lovely..
value = str(value)
if DateTime and type(value) == DateTime.DateTimeDeltaType:
# Format delta as days, hours, minutes seconds
# NOTE: includes value.second directly to get the
# whole floating number
value = value.strftime("%d %H:%M:") + str(value.second)
if value is True or value is False:
# We must store booleans as 't' and 'f' ...
value = value and 't' or 'f'
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Can't reference multiple-primary-key: %s" % value
values.append(value)
cursor = self.cursor()
cursor.execute(sql, values)
# cursor.commit()
cursor.close()
self._new = False
self._changed = None
def getAll(cls, where=None, orderBy=None):
"""Retrieve all the objects.
If a list of ``where`` clauses are given, they will be AND-ed
and will limit the search.
This will not load everything out from the database, but will
create a large amount of objects with only the ID inserted. The
data will be loaded from the objects when needed by the regular
load()-autocall.
"""
ids = cls.getAllIDs(where, orderBy=orderBy)
# Instansiate a lot of them
if len(cls._sqlPrimary) > 1:
return [cls(*id) for id in ids]
else:
return [cls(id) for id in ids]
getAll = classmethod(getAll)
def getAllIterator(cls, where=None, buffer=100,
useObject=None, orderBy=None):
"""Retrieve every object as an iterator.
Possibly limitted by the where list of clauses that will be
AND-ed.
Since an iterator is returned, only ``buffer`` rows are loaded
from the database at once. This is useful if you need
to process all objects.
If useObject is given, this object is returned each time, but
with new data. This can be used to avoid creating many new
objects when only one object is needed each time.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
fetchedAt = time.time()
curs.execute(sql)
# We might start eating memory at this point
def getNext(rows=[]):
forgetter = cls
if not rows:
rows += curs.fetchmany(buffer)
if not rows:
curs.close()
return None
row = rows[0]
del rows[0]
try:
idPositions = [fields.index(key) for key in cls._sqlPrimary]
except ValueError:
raise "Bad sqlPrimary, should be a list or tuple: %s" % cls._sqlPrimary
ids = [row[pos] for pos in idPositions]
if useObject:
result = useObject
result.reset()
result._setID(ids)
else:
result = forgetter(*ids)
result._loadFromRow(row, fields, curs)
result._updated = fetchedAt
return result
return iter(getNext, None)
getAllIterator = classmethod(getAllIterator)
def getAllIDs(cls, where=None, orderBy=None):
"""Retrive all the IDs, possibly matching the where clauses.
Where should be some list of where clauses that will be joined
with AND). Note that the result might be tuples if this table
has a multivalue _sqlPrimary.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where,
cls._sqlPrimary, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
result.append((ids))
return result
getAllIDs = classmethod(getAllIDs)
def getAllText(cls, where=None, SEPERATOR=' ', orderBy=None):
"""Retrieve a list of of all possible instances of this class.
The list is composed of tuples in the format (id, description) -
where description is a string composed by the fields from
cls._shortView, joint with SEPERATOR.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
shortPos = [fields.index(short) for short in cls._shortView]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
text = SEPERATOR.join([str(row[pos]) for pos in shortPos])
result.append((ids, text))
return result
getAllText = classmethod(getAllText)
def getChildren(self, forgetter, field=None, where=None, orderBy=None):
"""Return the children that links to me.
That means that I have to be listed in their _userClasses
somehow. If field is specified, that field in my children is
used as the pointer to me. Use this if you have multiple fields
referring to my class.
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAll(whereList, orderBy=orderBy)
def getChildrenIterator(self, forgetter, field=None, where=None,
orderBy=None, useObject=None):
"""Like getChildren, except that it returns an
iterator, like getAllIterator. An iterator should
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAllIterator(whereList, useObject=useObject,
orderBy=orderBy)
def __repr__(self):
return self.__class__.__name__ + ' %s' % self._getID()
def __str__(self):
shortView = self._shortView or self._sqlPrimary
short = [str(getattr(self, short)) for short in shortView]
text = ', '.join(short)
# return repr(self) + ': ' + text
return text
def __eq__(self, obj):
"""Simple comparsion of objects."""
return self.__class__.__name__ == obj.__class__.__name__ \
and self._getID() == obj._getID()
class MysqlForgetter(Forgetter):
"""MySQL-compatible Forgetter"""
def _saveDB(self):
"""Overloaded - we don't have nextval() in mysql"""
# We're a "fresh" copy now
self._updated = time.time()
if self._new:
operation = 'INSERT'
else:
operation = 'UPDATE'
(sql, fields) = self._prepareSQL(operation)
values = []
for field in fields:
value = getattr(self, field)
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Can't reference multiple-primary-key: %s" % value
values.append(value)
cursor = self.cursor()
cursor.execute(sql, values)
# cursor.commit()
if not self._validID():
if not len(self._getID()) == 1:
raise "Can't retrieve auto-inserted ID for multiple-primary-key"
# Here's the mysql magic to get the new ID
self._setID(cursor.insert_id())
cursor.close()
self._new = False
def prepareClasses(locals):
"""Fix _userClasses and some stuff in classes.
Traverses locals, which is a locals() dictionary from
the namespace where Forgetter subclasses have been
defined, and resolves names in _userClasses to real
class-references.
Normally you would call forgettSQL.prepareClasses(locals())
after defining all classes in your local module.
prepareClasses will only touch objects in the name space
that is a subclassed of Forgetter.
"""
for (name, forgetter) in locals.items():
if not (type(forgetter) is types.TypeType and
issubclass(forgetter, Forgetter)):
# Only care about Forgetter objects
continue
# Resolve classes
for (key, userclass) in forgetter._userClasses.items():
if type(userclass) is types.StringType:
# resolve from locals
resolved = locals[userclass]
forgetter._userClasses[key] = resolved
forgetter._tables = {}
# Update all fields with proper names
for (field, sqlfield) in forgetter._sqlFields.items():
forgetter._sqlFields[field] = forgetter._checkTable(sqlfield)
newLinks = []
for linkpair in forgetter._sqlLinks:
(link1, link2) = linkpair
link1=forgetter._checkTable(link1)
link2=forgetter._checkTable(link2)
newLinks.append((link1, link2))
forgetter._sqlLinks = newLinks
forgetter._prepared = True
|
stain/forgetSQL | lib/forgetSQL.py | Forgetter._setID | python | def _setID(self, id):
if type(id) in (types.ListType, types.TupleType):
try:
for key in self._sqlPrimary:
value = id[0]
self.__dict__[key] = value
id = id[1:] # rest, go revursive
except IndexError:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
elif len(self._sqlPrimary) <= 1:
# It's a simple value
key = self._sqlPrimary[0]
self.__dict__[key] = id
else:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
self._new = False | Set the ID, ie. the values for primary keys.
id can be either a list, following the
_sqlPrimary, or some other type, that will be set
as the singleton ID (requires 1-length sqlPrimary). | train | https://github.com/stain/forgetSQL/blob/2e13f983020b121fd75a95fcafce3ea75573fb6b/lib/forgetSQL.py#L279-L300 | null | class Forgetter(object):
"""SQL to object database wrapper.
Given a welldefined database, by subclassing Forgetter
and supplying some attributes, you may wrap your SQL tables
into objects that are easier to program with.
You must define all fields in the database table that you want
to expose, and you may refine the names to suit your
object oriented programming style. (ie. customerID -> customer)
Objects will be created without loading from database,
loading will occur when you try to read or write some of the
attributes defined as a SQL field. If you change some attributes the
object will be saved to the database by save() or garbage
collection. (be aware that GC in Py >= 2.2 is not immediate)
If you want to create new objects, just supply them with blank
ID-fields, and _nextSequence() will be called to fetch a new
ID used for insertion.
The rule is one class pr. table, although it is possible
to join several table into one class, as long as the
identificator is unique.
By defining _userClasses you can resolve links to other
tables, a field in this table would be an id in another
table, ie. another class. In practical use this means that
behind attributes pointing to other classes (tables)
you will find instances of that class.
Short example usage of forgetter objects::
# Process all
for user in User.getAllIterator():
# Access attributes
print user.name
print "Employed at:"
# Access the Employed-class/table
print user.employed.name, user.employed.address
# fire him, setting employed reference to SQL NULL
user.employed = None
# Retrieve some ID
shop = Shop(552)
shop.name = 'Corrected name'
shop.save() # Save now instead of waiting for garbage collactor
# Include SQL where-statements in selections
myIDs = User.getAllIDs(("name='soiland'", 'salary > 5'))
Requirements:
The attributes 'cursor' and '_dbModule' should be set from the
outside. The cursor should be DB 2.0 complient, preferably with
autocommit turned on. (Transactions are not within the scope of this
module yet)
Python 2.2 (iterators, methodclasses)
"""
# How long to keep objects in cache?
_timeout = 60
# Will be True once prepare() is called
_prepared = False
# The default table containing our fields
# _sqlTable = 'shop'
_sqlTable = ''
# A mapping between our fields and the database fields.
#
# You must include all fields needed here. You may specify
# other names if you want to make the sql name more approriate
# for object oriented programming. (Like calling a field 'location'
# instead of 'location_id', because we wrap the location in a seperate
# object and don't really care about the id)
#
# You may reference to other tables with a dot, all
# other db fields will be related to _sqlTable.
# If you reference other tables, don't forget to
# modify _sqlLinks.
#
# _sqlFields = {
# 'id': 'shop_id',
# 'name': 'name',
# 'location': 'location_id',
# 'chain': 'shop_chain_id',
# 'address': 'address.address_id',
# }
_sqlFields = {}
# A list of attribute names (in the object, not database)
# that are the primary key in the database. Normally
# 'id' is sufficient. It is legal to have
# multiple fields as primary key, but it won't work
# properly with _userClasses and getChildren().
#
# If your table is a link table or something, ALL fields
# should be in _sqlPrimary. (all fields are needed to define
# a unique row to be deleted/updated)
_sqlPrimary = ('id',)
# When using several tables, you should include a
# 'link' statement, displaying which fields link the
# two tables together. Note that these are sql names.
# _sqlLinks = (
# ('shop_id', 'address.shop_id'),
# )
_sqlLinks = ()
# The name of the sequence used by _nextSequence
# - if None, a guess will be made based on _sqlTable
# and _sqlPrimary.
_sqlSequence = None
# Order by this attribute by default, if specified
# _orderBy = 'name' - this could also be a tuple
_orderBy = None
# _userClasses can be used to trigger creation of a field
# with an instance of the class. The given database field
# will be sent to the constructor as an objectID
# (ie. as self.id in this object) (ie. the class does not
# neccessary need to be a subclass of Forgetter)
#
# This means that the attribute will be an instance of that
# class, not the ID. The object will not be loaded from the
# database until you try to read any of it's attributes,
# though. (to prevent unneccessary database overload and
# recursions)
#
# Notice that _userClasses must be a name resolvable, ie.
# from the same module as your other classes.
# _userClasses = {
# 'location': 'Location',
# 'chain': 'Chain',
# 'address': 'Address',
# }
_userClasses = {}
# If you want userClasses to work properly with strings instead of
# instances, you must also 'prepare' your classes to resolve the
# names. This must be done from the same module you are defining the
# classes: forgetSQL.prepareClasses(locals())
# A list of fields that are suitable for a textual
# representation (typical a one liner).
#
# Fields will be joint together with spaces or
# simular.
# _shortView = ('name')
_shortView = ()
# Description for the fields (ie. labels)
# Note that these fields will be translated with the _ function.
# If a field is undescribe, a capitalized version of the field name
# will be presented.
#_descriptions = {
# 'name': 'Full name',
# 'description': 'Description of thingie',
#}
_descriptions = {}
def cursor(cls):
try:
import database
return database.cursor()
except:
raise "cursor method undefined, no database connection could be made"
cursor = classmethod(cursor)
# a reference to the database module object used, ie.
# MySQLdb, psycopg etc.
# Use MyClass._dbModule = MySQLdb - not "MySQLdb"
#
_dbModule = None
# By default, autosave change when object is garbage collected.
# NOTE: This could give weird side affects as you need to keep track
# on when objects are garbage collected. Instead, use .save()
# explicitely when you want to save. By default, for backward
# compatibility, autosave is on.
_autosave = True
def __new__(cls, *args):
if not hasattr(cls, '_cache'):
cls._cache = {}
try: # to implement 'goto' in Python.. UGH
if not cls._cache.has_key(args):
# unknown
raise "NotFound"
(ref, updated) = cls._cache[args]
realObject = ref()
if realObject is None:
# No more real references to it, dead object
raise "NotFound"
age = time.time() - updated
if age > cls._timeout:
# Too old!
raise "NotFound"
updated = time.time()
except "NotFound":
# We'll need to create it
realObject = object.__new__(cls, *args)
ref = weakref.ref(realObject)
updated = time.time()
# store a weak reference
cls._cache[args] = (ref, updated)
return realObject
def __init__(self, *id):
"""Initialize, possibly with a database id.
A forgetter with multivalue primary key (ie. _sqlPrimary more
than 1 in length), may be initalized by using several parameters
to this constructor. Note that the object will not be loaded
before you call load().
"""
self._values = {}
self.reset()
if not id:
self._resetID()
else:
self._setID(id)
def _getID(self):
"""Get the ID values as a tuple annotated by sqlPrimary"""
id = []
for key in self._sqlPrimary:
value = self.__dict__[key]
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Part %s of %s primary key is a reference to %s, with multiple-primary-key %s " % (key, self.__class__, value.__class__, value)
id.append(value)
return id
def _resetID(self):
"""Reset all ID fields."""
# Dirty.. .=))
self._setID((None,) * len(self._sqlPrimary))
self._new = True
def _validID(self):
"""Is all ID fields with values, ie. not None?"""
return not None in self._getID()
def __getattr__(self, key):
"""Get an attribute, normally a SQL field value.
Will be called when an unknown key is to be
retrieved, ie. most likely one of our database
fields.
"""
if self._sqlFields.has_key(key):
if not self._updated:
self.load()
return self._values[key]
else:
raise AttributeError, key
def __setattr__(self, key, value):
"""Set an attribute, normally a SQL field value.
Will be called whenever something needs to be set, so
we store the value as a SQL-thingie unless the key
is not listed in sqlFields.
"""
if key not in self._sqlPrimary and self._sqlFields.has_key(key):
if not self._updated:
self.load()
self._values[key] = value
self._changed = time.time()
else:
# It's a normal thingie
self.__dict__[key] = value
def __del__(self):
"""Save the object on deletion.
Be aware of this. If you want to undo some change, use reset()
first.
Be aware of Python 2.2's garbage collector, that
might run in the background. This means that
unless you call save() changes might not
be done immediately in the database.
Not calling save() also means that you cannot catch
errors caused by wrong insertion/update (ie. wrong
datatype for a field)
"""
if not self._autosave:
return
try:
self.save()
except Exception, e:
pass
def _checkTable(cls, field):
"""Split a field from _sqlFields into table, column.
Registers the table in cls._tables, and returns a fully
qualified table.column (default table: cls._sqlTable)
"""
# Get table part
try:
(table, field) = field.split('.')
except ValueError:
table = cls._sqlTable
# clean away white space
table = table.strip()
field = field.strip()
# register table
cls._tables[table] = None
# and return in proper shape
return table + '.' + field
_checkTable = classmethod(_checkTable)
def reset(self):
"""Reset all fields, almost like creating a new object.
Note: Forgets changes you have made not saved to database!
(Remember: Others might reference the object already, expecting
something else!) Override this method if you add properties not
defined in _sqlFields.
"""
self._resetID()
self._new = None
self._updated = None
self._changed = None
self._values = {}
# initially create fields
for field in self._sqlFields.keys():
self._values[field] = None
def load(self, id=None):
"""Load from database. Old values will be discarded."""
if id is not None:
# We are asked to change our ID to something else
self.reset()
self._setID(id)
if not self._new and self._validID():
self._loadDB()
self._updated = time.time()
def save(self):
"""Save to database if anything has changed since last load"""
if ( self._new or
(self._validID() and self._changed) or
(self._updated and self._changed > self._updated) ):
# Don't save if we have not loaded existing data!
self._saveDB()
return True
return False
def delete(self):
"""Mark this object for deletion in the database.
The object will then be reset and ready for use
again with a new id.
"""
(sql, ) = self._prepareSQL("DELETE")
curs = self.cursor()
curs.execute(sql, self._getID())
curs.close()
self.reset()
def _prepareSQL(cls, operation="SELECT", where=None, selectfields=None, orderBy=None):
"""Return a sql for the given operation.
Possible operations:
SELECT read data for this id
SELECTALL read data for all ids
INSERT insert data, create new id
UPDATE update data for this id
DELETE remove data for this id
SQL will be built by data from _sqlFields, and will
contain 0 or several %s for you to sprintf-format in later:
SELECT --> len(cls._sqlPrimary)
SELECTALL --> 0 %s
INSERT --> len(cls._sqlFields) %s (including id)
UPDATE --> len(cls._sqlFields) %s (including id)
DELETE --> len(cls._sqlPrimary)
(Note: INSERT and UPDATE will only change values in _sqlTable, so
the actual number of fields for substitutions might be lower
than len(cls._sqlFields) )
For INSERT you should use cls._nextSequence() to retrieve
a new 'id' number. Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary key
'john_id', sequence name blapp_john_id_seq) you must give the sequence
name as an optional argument to _nextSequence)
Additional note: cls._nextSequence() MUST be overloaded
for multi _sqlPrimary classes. Return a tuple.
Return values will always be tuples:
SELECT --> (sql, fields)
SELECTALL -> sql, fields)
INSERT -> (sql, fields)
UPDATE -> (sql, fields)
DELETE -> (sql,) -- for consistency
fields will be object properties as a list, ie. the keys from
cls._sqlFields. The purpose of this list is to give the programmer
an idea of which order the keys are inserted in the SQL, giving
help for retreiving (SELECT, SELECTALL) or inserting for %s
(INSERT, DELETE).
Why? Well, the keys are stored in a hash, and we cannot be sure
about the order of hash.keys() from time to time, not even with
the same instance.
Optional where-parameter applies to SELECT, SELECTALL and DELETE.
where should be a list or string of where clauses.
"""
# Normalize parameter for later comparissions
operation = operation.upper()
# Convert where to a list if it is a string
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if orderBy is None:
orderBy = cls._orderBy
if operation in ('SELECT', 'SELECTALL'):
# Get the object fields and sql fields in the same
# order to be able to reconstruct later.
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if selectfields is None or field in selectfields:
fields.append(field)
sqlfields.append(sqlfield)
if not fields:
# dirrrrrty!
raise """ERROR: No fields defined, cannot create SQL.
Maybe sqlPrimary is invalid?
Fields asked: %s
My fields: %s""" % (selectfields, cls._sqlFields)
sql = "SELECT\n "
sql += ', '.join(sqlfields)
sql += "\nFROM\n "
tables = cls._tables.keys()
if not tables:
raise "REALITY ERROR: No tables defined"
sql += ', '.join(tables)
tempWhere = ["%s=%s" % linkPair for linkPair in cls._sqlLinks]
# this MUST be here.
if operation <> 'SELECTALL':
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
if where:
tempWhere += where
if(tempWhere):
# Make sure to use paranteses in case someone has used
# ORs in the WHERE-list..
sql += "\nWHERE\n ("
sql += ') AND\n ('.join(tempWhere)
sql += ')'
if operation == 'SELECTALL' and orderBy:
sql += '\nORDER BY\n '
if type(orderBy) in (types.TupleType, types.ListType):
orderBy = [cls._sqlFields[x] for x in orderBy]
orderBy = ',\n '.join(orderBy)
else:
orderBy = cls._sqlFields[orderBy]
sql += orderBy
return (sql, fields)
elif operation in ('INSERT', 'UPDATE'):
if operation == 'UPDATE':
sql = 'UPDATE %s SET\n ' % cls._sqlTable
else:
sql = 'INSERT INTO %s (\n ' % cls._sqlTable
set = []
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if operation == 'UPDATE' and field in cls._sqlPrimary:
continue
if sqlfield.find(cls._sqlTable + '.') == 0:
# It's a local field, chop of the table part
sqlfield = sqlfield[len(cls._sqlTable)+1:]
fields.append(field)
sqlfields.append(sqlfield)
set.append(sqlfield + '=%s')
if operation == 'UPDATE':
sql += ',\n '.join(set)
sql += '\nWHERE\n '
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
fields.append(key)
sql += ' AND\n '.join(tempWhere)
else:
sql += ',\n '.join(sqlfields)
sql += ')\nVALUES (\n '
sql += ',\n '.join(('%s',) * len(sqlfields))
sql += ')'
return (sql, fields)
elif operation == 'DELETE':
sql = 'DELETE FROM ' + cls._sqlTable + ' WHERE '
if where:
sql += " AND\n ".join(where)
else:
for key in cls._sqlPrimary:
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
sql += ' AND\n '.join(tempWhere)
return (sql, )
else:
raise "Unknown operation", operation
_prepareSQL = classmethod(_prepareSQL)
def _nextSequence(cls, name=None):
"""Return a new sequence number for insertion in self._sqlTable.
Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary
key 'john_id', sequence name blapp_john_id_seq) you must give
the full sequence name as an optional argument to _nextSequence)
"""
if not name:
name = cls._sqlSequence
if not name:
# Assume it's tablename_primarykey_seq
if len(cls._sqlPrimary) <> 1:
raise "Could not guess sequence name for multi-primary-key"
primary = cls._sqlPrimary[0]
name = '%s_%s_seq' % (cls._sqlTable, primary.replace('.','_'))
# Don't have . as a tablename or column name! =)
curs = cls.cursor()
curs.execute("SELECT nextval('%s')" % name)
value = curs.fetchone()[0]
curs.close()
return value
_nextSequence = classmethod(_nextSequence)
def _loadFromRow(self, result, fields, cursor):
"""Load from a database row, described by fields.
``fields`` should be the attribute names that
will be set. Note that userclasses will be
created (but not loaded).
"""
position = 0
for elem in fields:
value = result[position]
valueType = cursor.description[position][1]
if hasattr(self._dbModule, 'BOOLEAN') and \
valueType == self._dbModule.BOOLEAN and \
(value is not True or value is not False):
# convert to a python boolean
value = value and True or False
if value and self._userClasses.has_key(elem):
userClass = self._userClasses[elem]
# create an instance
value = userClass(value)
self._values[elem] = value
position += 1
def _loadDB(self):
"""Connect to the database to load myself"""
if not self._validID():
raise NotFound, self._getID()
(sql, fields) = self._prepareSQL("SELECT")
curs = self.cursor()
curs.execute(sql, self._getID())
result = curs.fetchone()
if not result:
curs.close()
raise NotFound, self._getID()
self._loadFromRow(result, fields, curs)
curs.close()
self._updated = time.time()
def _saveDB(self):
"""Insert or update into the database.
Note that every field will be updated, not just the changed
one.
"""
# We're a "fresh" copy now
self._updated = time.time()
if self._new:
operation = 'INSERT'
if not self._validID():
self._setID(self._nextSequence())
# Note that we assign this ID to our self
# BEFORE possibly saving any of our attribute
# objects that might be new as well. This means
# that they might have references to us, as long
# as the database does not require our existence
# yet.
#
# Since mysql does not have Sequences, this will
# not work as smoothly there. See class
# MysqlForgetter below.
else:
operation = 'UPDATE'
(sql, fields) = self._prepareSQL(operation)
values = []
for field in fields:
value = getattr(self, field)
# First some dirty datatype hacks
if DateTime and type(value) == DateTime.DateTimeType:
# stupid psycopg does not support it's own return type..
# lovely..
value = str(value)
if DateTime and type(value) == DateTime.DateTimeDeltaType:
# Format delta as days, hours, minutes seconds
# NOTE: includes value.second directly to get the
# whole floating number
value = value.strftime("%d %H:%M:") + str(value.second)
if value is True or value is False:
# We must store booleans as 't' and 'f' ...
value = value and 't' or 'f'
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Can't reference multiple-primary-key: %s" % value
values.append(value)
cursor = self.cursor()
cursor.execute(sql, values)
# cursor.commit()
cursor.close()
self._new = False
self._changed = None
def getAll(cls, where=None, orderBy=None):
"""Retrieve all the objects.
If a list of ``where`` clauses are given, they will be AND-ed
and will limit the search.
This will not load everything out from the database, but will
create a large amount of objects with only the ID inserted. The
data will be loaded from the objects when needed by the regular
load()-autocall.
"""
ids = cls.getAllIDs(where, orderBy=orderBy)
# Instansiate a lot of them
if len(cls._sqlPrimary) > 1:
return [cls(*id) for id in ids]
else:
return [cls(id) for id in ids]
getAll = classmethod(getAll)
def getAllIterator(cls, where=None, buffer=100,
useObject=None, orderBy=None):
"""Retrieve every object as an iterator.
Possibly limitted by the where list of clauses that will be
AND-ed.
Since an iterator is returned, only ``buffer`` rows are loaded
from the database at once. This is useful if you need
to process all objects.
If useObject is given, this object is returned each time, but
with new data. This can be used to avoid creating many new
objects when only one object is needed each time.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
fetchedAt = time.time()
curs.execute(sql)
# We might start eating memory at this point
def getNext(rows=[]):
forgetter = cls
if not rows:
rows += curs.fetchmany(buffer)
if not rows:
curs.close()
return None
row = rows[0]
del rows[0]
try:
idPositions = [fields.index(key) for key in cls._sqlPrimary]
except ValueError:
raise "Bad sqlPrimary, should be a list or tuple: %s" % cls._sqlPrimary
ids = [row[pos] for pos in idPositions]
if useObject:
result = useObject
result.reset()
result._setID(ids)
else:
result = forgetter(*ids)
result._loadFromRow(row, fields, curs)
result._updated = fetchedAt
return result
return iter(getNext, None)
getAllIterator = classmethod(getAllIterator)
def getAllIDs(cls, where=None, orderBy=None):
"""Retrive all the IDs, possibly matching the where clauses.
Where should be some list of where clauses that will be joined
with AND). Note that the result might be tuples if this table
has a multivalue _sqlPrimary.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where,
cls._sqlPrimary, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
result.append((ids))
return result
getAllIDs = classmethod(getAllIDs)
def getAllText(cls, where=None, SEPERATOR=' ', orderBy=None):
"""Retrieve a list of of all possible instances of this class.
The list is composed of tuples in the format (id, description) -
where description is a string composed by the fields from
cls._shortView, joint with SEPERATOR.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
shortPos = [fields.index(short) for short in cls._shortView]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
text = SEPERATOR.join([str(row[pos]) for pos in shortPos])
result.append((ids, text))
return result
getAllText = classmethod(getAllText)
def getChildren(self, forgetter, field=None, where=None, orderBy=None):
"""Return the children that links to me.
That means that I have to be listed in their _userClasses
somehow. If field is specified, that field in my children is
used as the pointer to me. Use this if you have multiple fields
referring to my class.
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAll(whereList, orderBy=orderBy)
def getChildrenIterator(self, forgetter, field=None, where=None,
orderBy=None, useObject=None):
"""Like getChildren, except that it returns an
iterator, like getAllIterator. An iterator should
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAllIterator(whereList, useObject=useObject,
orderBy=orderBy)
def __repr__(self):
return self.__class__.__name__ + ' %s' % self._getID()
def __str__(self):
shortView = self._shortView or self._sqlPrimary
short = [str(getattr(self, short)) for short in shortView]
text = ', '.join(short)
# return repr(self) + ': ' + text
return text
def __eq__(self, obj):
"""Simple comparsion of objects."""
return self.__class__.__name__ == obj.__class__.__name__ \
and self._getID() == obj._getID()
|
stain/forgetSQL | lib/forgetSQL.py | Forgetter._getID | python | def _getID(self):
id = []
for key in self._sqlPrimary:
value = self.__dict__[key]
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Part %s of %s primary key is a reference to %s, with multiple-primary-key %s " % (key, self.__class__, value.__class__, value)
id.append(value)
return id | Get the ID values as a tuple annotated by sqlPrimary | train | https://github.com/stain/forgetSQL/blob/2e13f983020b121fd75a95fcafce3ea75573fb6b/lib/forgetSQL.py#L302-L317 | null | class Forgetter(object):
"""SQL to object database wrapper.
Given a welldefined database, by subclassing Forgetter
and supplying some attributes, you may wrap your SQL tables
into objects that are easier to program with.
You must define all fields in the database table that you want
to expose, and you may refine the names to suit your
object oriented programming style. (ie. customerID -> customer)
Objects will be created without loading from database,
loading will occur when you try to read or write some of the
attributes defined as a SQL field. If you change some attributes the
object will be saved to the database by save() or garbage
collection. (be aware that GC in Py >= 2.2 is not immediate)
If you want to create new objects, just supply them with blank
ID-fields, and _nextSequence() will be called to fetch a new
ID used for insertion.
The rule is one class pr. table, although it is possible
to join several table into one class, as long as the
identificator is unique.
By defining _userClasses you can resolve links to other
tables, a field in this table would be an id in another
table, ie. another class. In practical use this means that
behind attributes pointing to other classes (tables)
you will find instances of that class.
Short example usage of forgetter objects::
# Process all
for user in User.getAllIterator():
# Access attributes
print user.name
print "Employed at:"
# Access the Employed-class/table
print user.employed.name, user.employed.address
# fire him, setting employed reference to SQL NULL
user.employed = None
# Retrieve some ID
shop = Shop(552)
shop.name = 'Corrected name'
shop.save() # Save now instead of waiting for garbage collactor
# Include SQL where-statements in selections
myIDs = User.getAllIDs(("name='soiland'", 'salary > 5'))
Requirements:
The attributes 'cursor' and '_dbModule' should be set from the
outside. The cursor should be DB 2.0 complient, preferably with
autocommit turned on. (Transactions are not within the scope of this
module yet)
Python 2.2 (iterators, methodclasses)
"""
# How long to keep objects in cache?
_timeout = 60
# Will be True once prepare() is called
_prepared = False
# The default table containing our fields
# _sqlTable = 'shop'
_sqlTable = ''
# A mapping between our fields and the database fields.
#
# You must include all fields needed here. You may specify
# other names if you want to make the sql name more approriate
# for object oriented programming. (Like calling a field 'location'
# instead of 'location_id', because we wrap the location in a seperate
# object and don't really care about the id)
#
# You may reference to other tables with a dot, all
# other db fields will be related to _sqlTable.
# If you reference other tables, don't forget to
# modify _sqlLinks.
#
# _sqlFields = {
# 'id': 'shop_id',
# 'name': 'name',
# 'location': 'location_id',
# 'chain': 'shop_chain_id',
# 'address': 'address.address_id',
# }
_sqlFields = {}
# A list of attribute names (in the object, not database)
# that are the primary key in the database. Normally
# 'id' is sufficient. It is legal to have
# multiple fields as primary key, but it won't work
# properly with _userClasses and getChildren().
#
# If your table is a link table or something, ALL fields
# should be in _sqlPrimary. (all fields are needed to define
# a unique row to be deleted/updated)
_sqlPrimary = ('id',)
# When using several tables, you should include a
# 'link' statement, displaying which fields link the
# two tables together. Note that these are sql names.
# _sqlLinks = (
# ('shop_id', 'address.shop_id'),
# )
_sqlLinks = ()
# The name of the sequence used by _nextSequence
# - if None, a guess will be made based on _sqlTable
# and _sqlPrimary.
_sqlSequence = None
# Order by this attribute by default, if specified
# _orderBy = 'name' - this could also be a tuple
_orderBy = None
# _userClasses can be used to trigger creation of a field
# with an instance of the class. The given database field
# will be sent to the constructor as an objectID
# (ie. as self.id in this object) (ie. the class does not
# neccessary need to be a subclass of Forgetter)
#
# This means that the attribute will be an instance of that
# class, not the ID. The object will not be loaded from the
# database until you try to read any of it's attributes,
# though. (to prevent unneccessary database overload and
# recursions)
#
# Notice that _userClasses must be a name resolvable, ie.
# from the same module as your other classes.
# _userClasses = {
# 'location': 'Location',
# 'chain': 'Chain',
# 'address': 'Address',
# }
_userClasses = {}
# If you want userClasses to work properly with strings instead of
# instances, you must also 'prepare' your classes to resolve the
# names. This must be done from the same module you are defining the
# classes: forgetSQL.prepareClasses(locals())
# A list of fields that are suitable for a textual
# representation (typical a one liner).
#
# Fields will be joint together with spaces or
# simular.
# _shortView = ('name')
_shortView = ()
# Description for the fields (ie. labels)
# Note that these fields will be translated with the _ function.
# If a field is undescribe, a capitalized version of the field name
# will be presented.
#_descriptions = {
# 'name': 'Full name',
# 'description': 'Description of thingie',
#}
_descriptions = {}
def cursor(cls):
try:
import database
return database.cursor()
except:
raise "cursor method undefined, no database connection could be made"
cursor = classmethod(cursor)
# a reference to the database module object used, ie.
# MySQLdb, psycopg etc.
# Use MyClass._dbModule = MySQLdb - not "MySQLdb"
#
_dbModule = None
# By default, autosave change when object is garbage collected.
# NOTE: This could give weird side affects as you need to keep track
# on when objects are garbage collected. Instead, use .save()
# explicitely when you want to save. By default, for backward
# compatibility, autosave is on.
_autosave = True
def __new__(cls, *args):
if not hasattr(cls, '_cache'):
cls._cache = {}
try: # to implement 'goto' in Python.. UGH
if not cls._cache.has_key(args):
# unknown
raise "NotFound"
(ref, updated) = cls._cache[args]
realObject = ref()
if realObject is None:
# No more real references to it, dead object
raise "NotFound"
age = time.time() - updated
if age > cls._timeout:
# Too old!
raise "NotFound"
updated = time.time()
except "NotFound":
# We'll need to create it
realObject = object.__new__(cls, *args)
ref = weakref.ref(realObject)
updated = time.time()
# store a weak reference
cls._cache[args] = (ref, updated)
return realObject
def __init__(self, *id):
"""Initialize, possibly with a database id.
A forgetter with multivalue primary key (ie. _sqlPrimary more
than 1 in length), may be initalized by using several parameters
to this constructor. Note that the object will not be loaded
before you call load().
"""
self._values = {}
self.reset()
if not id:
self._resetID()
else:
self._setID(id)
def _setID(self, id):
"""Set the ID, ie. the values for primary keys.
id can be either a list, following the
_sqlPrimary, or some other type, that will be set
as the singleton ID (requires 1-length sqlPrimary).
"""
if type(id) in (types.ListType, types.TupleType):
try:
for key in self._sqlPrimary:
value = id[0]
self.__dict__[key] = value
id = id[1:] # rest, go revursive
except IndexError:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
elif len(self._sqlPrimary) <= 1:
# It's a simple value
key = self._sqlPrimary[0]
self.__dict__[key] = id
else:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
self._new = False
def _resetID(self):
"""Reset all ID fields."""
# Dirty.. .=))
self._setID((None,) * len(self._sqlPrimary))
self._new = True
def _validID(self):
"""Is all ID fields with values, ie. not None?"""
return not None in self._getID()
def __getattr__(self, key):
"""Get an attribute, normally a SQL field value.
Will be called when an unknown key is to be
retrieved, ie. most likely one of our database
fields.
"""
if self._sqlFields.has_key(key):
if not self._updated:
self.load()
return self._values[key]
else:
raise AttributeError, key
def __setattr__(self, key, value):
"""Set an attribute, normally a SQL field value.
Will be called whenever something needs to be set, so
we store the value as a SQL-thingie unless the key
is not listed in sqlFields.
"""
if key not in self._sqlPrimary and self._sqlFields.has_key(key):
if not self._updated:
self.load()
self._values[key] = value
self._changed = time.time()
else:
# It's a normal thingie
self.__dict__[key] = value
def __del__(self):
"""Save the object on deletion.
Be aware of this. If you want to undo some change, use reset()
first.
Be aware of Python 2.2's garbage collector, that
might run in the background. This means that
unless you call save() changes might not
be done immediately in the database.
Not calling save() also means that you cannot catch
errors caused by wrong insertion/update (ie. wrong
datatype for a field)
"""
if not self._autosave:
return
try:
self.save()
except Exception, e:
pass
def _checkTable(cls, field):
"""Split a field from _sqlFields into table, column.
Registers the table in cls._tables, and returns a fully
qualified table.column (default table: cls._sqlTable)
"""
# Get table part
try:
(table, field) = field.split('.')
except ValueError:
table = cls._sqlTable
# clean away white space
table = table.strip()
field = field.strip()
# register table
cls._tables[table] = None
# and return in proper shape
return table + '.' + field
_checkTable = classmethod(_checkTable)
def reset(self):
"""Reset all fields, almost like creating a new object.
Note: Forgets changes you have made not saved to database!
(Remember: Others might reference the object already, expecting
something else!) Override this method if you add properties not
defined in _sqlFields.
"""
self._resetID()
self._new = None
self._updated = None
self._changed = None
self._values = {}
# initially create fields
for field in self._sqlFields.keys():
self._values[field] = None
def load(self, id=None):
"""Load from database. Old values will be discarded."""
if id is not None:
# We are asked to change our ID to something else
self.reset()
self._setID(id)
if not self._new and self._validID():
self._loadDB()
self._updated = time.time()
def save(self):
"""Save to database if anything has changed since last load"""
if ( self._new or
(self._validID() and self._changed) or
(self._updated and self._changed > self._updated) ):
# Don't save if we have not loaded existing data!
self._saveDB()
return True
return False
def delete(self):
"""Mark this object for deletion in the database.
The object will then be reset and ready for use
again with a new id.
"""
(sql, ) = self._prepareSQL("DELETE")
curs = self.cursor()
curs.execute(sql, self._getID())
curs.close()
self.reset()
def _prepareSQL(cls, operation="SELECT", where=None, selectfields=None, orderBy=None):
"""Return a sql for the given operation.
Possible operations:
SELECT read data for this id
SELECTALL read data for all ids
INSERT insert data, create new id
UPDATE update data for this id
DELETE remove data for this id
SQL will be built by data from _sqlFields, and will
contain 0 or several %s for you to sprintf-format in later:
SELECT --> len(cls._sqlPrimary)
SELECTALL --> 0 %s
INSERT --> len(cls._sqlFields) %s (including id)
UPDATE --> len(cls._sqlFields) %s (including id)
DELETE --> len(cls._sqlPrimary)
(Note: INSERT and UPDATE will only change values in _sqlTable, so
the actual number of fields for substitutions might be lower
than len(cls._sqlFields) )
For INSERT you should use cls._nextSequence() to retrieve
a new 'id' number. Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary key
'john_id', sequence name blapp_john_id_seq) you must give the sequence
name as an optional argument to _nextSequence)
Additional note: cls._nextSequence() MUST be overloaded
for multi _sqlPrimary classes. Return a tuple.
Return values will always be tuples:
SELECT --> (sql, fields)
SELECTALL -> sql, fields)
INSERT -> (sql, fields)
UPDATE -> (sql, fields)
DELETE -> (sql,) -- for consistency
fields will be object properties as a list, ie. the keys from
cls._sqlFields. The purpose of this list is to give the programmer
an idea of which order the keys are inserted in the SQL, giving
help for retreiving (SELECT, SELECTALL) or inserting for %s
(INSERT, DELETE).
Why? Well, the keys are stored in a hash, and we cannot be sure
about the order of hash.keys() from time to time, not even with
the same instance.
Optional where-parameter applies to SELECT, SELECTALL and DELETE.
where should be a list or string of where clauses.
"""
# Normalize parameter for later comparissions
operation = operation.upper()
# Convert where to a list if it is a string
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if orderBy is None:
orderBy = cls._orderBy
if operation in ('SELECT', 'SELECTALL'):
# Get the object fields and sql fields in the same
# order to be able to reconstruct later.
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if selectfields is None or field in selectfields:
fields.append(field)
sqlfields.append(sqlfield)
if not fields:
# dirrrrrty!
raise """ERROR: No fields defined, cannot create SQL.
Maybe sqlPrimary is invalid?
Fields asked: %s
My fields: %s""" % (selectfields, cls._sqlFields)
sql = "SELECT\n "
sql += ', '.join(sqlfields)
sql += "\nFROM\n "
tables = cls._tables.keys()
if not tables:
raise "REALITY ERROR: No tables defined"
sql += ', '.join(tables)
tempWhere = ["%s=%s" % linkPair for linkPair in cls._sqlLinks]
# this MUST be here.
if operation <> 'SELECTALL':
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
if where:
tempWhere += where
if(tempWhere):
# Make sure to use paranteses in case someone has used
# ORs in the WHERE-list..
sql += "\nWHERE\n ("
sql += ') AND\n ('.join(tempWhere)
sql += ')'
if operation == 'SELECTALL' and orderBy:
sql += '\nORDER BY\n '
if type(orderBy) in (types.TupleType, types.ListType):
orderBy = [cls._sqlFields[x] for x in orderBy]
orderBy = ',\n '.join(orderBy)
else:
orderBy = cls._sqlFields[orderBy]
sql += orderBy
return (sql, fields)
elif operation in ('INSERT', 'UPDATE'):
if operation == 'UPDATE':
sql = 'UPDATE %s SET\n ' % cls._sqlTable
else:
sql = 'INSERT INTO %s (\n ' % cls._sqlTable
set = []
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if operation == 'UPDATE' and field in cls._sqlPrimary:
continue
if sqlfield.find(cls._sqlTable + '.') == 0:
# It's a local field, chop of the table part
sqlfield = sqlfield[len(cls._sqlTable)+1:]
fields.append(field)
sqlfields.append(sqlfield)
set.append(sqlfield + '=%s')
if operation == 'UPDATE':
sql += ',\n '.join(set)
sql += '\nWHERE\n '
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
fields.append(key)
sql += ' AND\n '.join(tempWhere)
else:
sql += ',\n '.join(sqlfields)
sql += ')\nVALUES (\n '
sql += ',\n '.join(('%s',) * len(sqlfields))
sql += ')'
return (sql, fields)
elif operation == 'DELETE':
sql = 'DELETE FROM ' + cls._sqlTable + ' WHERE '
if where:
sql += " AND\n ".join(where)
else:
for key in cls._sqlPrimary:
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
sql += ' AND\n '.join(tempWhere)
return (sql, )
else:
raise "Unknown operation", operation
_prepareSQL = classmethod(_prepareSQL)
def _nextSequence(cls, name=None):
"""Return a new sequence number for insertion in self._sqlTable.
Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary
key 'john_id', sequence name blapp_john_id_seq) you must give
the full sequence name as an optional argument to _nextSequence)
"""
if not name:
name = cls._sqlSequence
if not name:
# Assume it's tablename_primarykey_seq
if len(cls._sqlPrimary) <> 1:
raise "Could not guess sequence name for multi-primary-key"
primary = cls._sqlPrimary[0]
name = '%s_%s_seq' % (cls._sqlTable, primary.replace('.','_'))
# Don't have . as a tablename or column name! =)
curs = cls.cursor()
curs.execute("SELECT nextval('%s')" % name)
value = curs.fetchone()[0]
curs.close()
return value
_nextSequence = classmethod(_nextSequence)
def _loadFromRow(self, result, fields, cursor):
"""Load from a database row, described by fields.
``fields`` should be the attribute names that
will be set. Note that userclasses will be
created (but not loaded).
"""
position = 0
for elem in fields:
value = result[position]
valueType = cursor.description[position][1]
if hasattr(self._dbModule, 'BOOLEAN') and \
valueType == self._dbModule.BOOLEAN and \
(value is not True or value is not False):
# convert to a python boolean
value = value and True or False
if value and self._userClasses.has_key(elem):
userClass = self._userClasses[elem]
# create an instance
value = userClass(value)
self._values[elem] = value
position += 1
def _loadDB(self):
"""Connect to the database to load myself"""
if not self._validID():
raise NotFound, self._getID()
(sql, fields) = self._prepareSQL("SELECT")
curs = self.cursor()
curs.execute(sql, self._getID())
result = curs.fetchone()
if not result:
curs.close()
raise NotFound, self._getID()
self._loadFromRow(result, fields, curs)
curs.close()
self._updated = time.time()
def _saveDB(self):
"""Insert or update into the database.
Note that every field will be updated, not just the changed
one.
"""
# We're a "fresh" copy now
self._updated = time.time()
if self._new:
operation = 'INSERT'
if not self._validID():
self._setID(self._nextSequence())
# Note that we assign this ID to our self
# BEFORE possibly saving any of our attribute
# objects that might be new as well. This means
# that they might have references to us, as long
# as the database does not require our existence
# yet.
#
# Since mysql does not have Sequences, this will
# not work as smoothly there. See class
# MysqlForgetter below.
else:
operation = 'UPDATE'
(sql, fields) = self._prepareSQL(operation)
values = []
for field in fields:
value = getattr(self, field)
# First some dirty datatype hacks
if DateTime and type(value) == DateTime.DateTimeType:
# stupid psycopg does not support it's own return type..
# lovely..
value = str(value)
if DateTime and type(value) == DateTime.DateTimeDeltaType:
# Format delta as days, hours, minutes seconds
# NOTE: includes value.second directly to get the
# whole floating number
value = value.strftime("%d %H:%M:") + str(value.second)
if value is True or value is False:
# We must store booleans as 't' and 'f' ...
value = value and 't' or 'f'
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Can't reference multiple-primary-key: %s" % value
values.append(value)
cursor = self.cursor()
cursor.execute(sql, values)
# cursor.commit()
cursor.close()
self._new = False
self._changed = None
def getAll(cls, where=None, orderBy=None):
"""Retrieve all the objects.
If a list of ``where`` clauses are given, they will be AND-ed
and will limit the search.
This will not load everything out from the database, but will
create a large amount of objects with only the ID inserted. The
data will be loaded from the objects when needed by the regular
load()-autocall.
"""
ids = cls.getAllIDs(where, orderBy=orderBy)
# Instansiate a lot of them
if len(cls._sqlPrimary) > 1:
return [cls(*id) for id in ids]
else:
return [cls(id) for id in ids]
getAll = classmethod(getAll)
def getAllIterator(cls, where=None, buffer=100,
useObject=None, orderBy=None):
"""Retrieve every object as an iterator.
Possibly limitted by the where list of clauses that will be
AND-ed.
Since an iterator is returned, only ``buffer`` rows are loaded
from the database at once. This is useful if you need
to process all objects.
If useObject is given, this object is returned each time, but
with new data. This can be used to avoid creating many new
objects when only one object is needed each time.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
fetchedAt = time.time()
curs.execute(sql)
# We might start eating memory at this point
def getNext(rows=[]):
forgetter = cls
if not rows:
rows += curs.fetchmany(buffer)
if not rows:
curs.close()
return None
row = rows[0]
del rows[0]
try:
idPositions = [fields.index(key) for key in cls._sqlPrimary]
except ValueError:
raise "Bad sqlPrimary, should be a list or tuple: %s" % cls._sqlPrimary
ids = [row[pos] for pos in idPositions]
if useObject:
result = useObject
result.reset()
result._setID(ids)
else:
result = forgetter(*ids)
result._loadFromRow(row, fields, curs)
result._updated = fetchedAt
return result
return iter(getNext, None)
getAllIterator = classmethod(getAllIterator)
def getAllIDs(cls, where=None, orderBy=None):
"""Retrive all the IDs, possibly matching the where clauses.
Where should be some list of where clauses that will be joined
with AND). Note that the result might be tuples if this table
has a multivalue _sqlPrimary.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where,
cls._sqlPrimary, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
result.append((ids))
return result
getAllIDs = classmethod(getAllIDs)
def getAllText(cls, where=None, SEPERATOR=' ', orderBy=None):
"""Retrieve a list of of all possible instances of this class.
The list is composed of tuples in the format (id, description) -
where description is a string composed by the fields from
cls._shortView, joint with SEPERATOR.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
shortPos = [fields.index(short) for short in cls._shortView]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
text = SEPERATOR.join([str(row[pos]) for pos in shortPos])
result.append((ids, text))
return result
getAllText = classmethod(getAllText)
def getChildren(self, forgetter, field=None, where=None, orderBy=None):
"""Return the children that links to me.
That means that I have to be listed in their _userClasses
somehow. If field is specified, that field in my children is
used as the pointer to me. Use this if you have multiple fields
referring to my class.
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAll(whereList, orderBy=orderBy)
def getChildrenIterator(self, forgetter, field=None, where=None,
orderBy=None, useObject=None):
"""Like getChildren, except that it returns an
iterator, like getAllIterator. An iterator should
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAllIterator(whereList, useObject=useObject,
orderBy=orderBy)
def __repr__(self):
return self.__class__.__name__ + ' %s' % self._getID()
def __str__(self):
shortView = self._shortView or self._sqlPrimary
short = [str(getattr(self, short)) for short in shortView]
text = ', '.join(short)
# return repr(self) + ': ' + text
return text
def __eq__(self, obj):
"""Simple comparsion of objects."""
return self.__class__.__name__ == obj.__class__.__name__ \
and self._getID() == obj._getID()
|
stain/forgetSQL | lib/forgetSQL.py | Forgetter._resetID | python | def _resetID(self):
# Dirty.. .=))
self._setID((None,) * len(self._sqlPrimary))
self._new = True | Reset all ID fields. | train | https://github.com/stain/forgetSQL/blob/2e13f983020b121fd75a95fcafce3ea75573fb6b/lib/forgetSQL.py#L319-L323 | [
"def _setID(self, id):\n \"\"\"Set the ID, ie. the values for primary keys.\n\n id can be either a list, following the\n _sqlPrimary, or some other type, that will be set\n as the singleton ID (requires 1-length sqlPrimary).\n \"\"\"\n if type(id) in (types.ListType, types.TupleType):\n try:\n for key in self._sqlPrimary:\n value = id[0]\n self.__dict__[key] = value\n id = id[1:] # rest, go revursive\n except IndexError:\n raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)\n elif len(self._sqlPrimary) <= 1:\n # It's a simple value\n key = self._sqlPrimary[0]\n self.__dict__[key] = id\n else:\n raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)\n self._new = False\n"
] | class Forgetter(object):
"""SQL to object database wrapper.
Given a welldefined database, by subclassing Forgetter
and supplying some attributes, you may wrap your SQL tables
into objects that are easier to program with.
You must define all fields in the database table that you want
to expose, and you may refine the names to suit your
object oriented programming style. (ie. customerID -> customer)
Objects will be created without loading from database,
loading will occur when you try to read or write some of the
attributes defined as a SQL field. If you change some attributes the
object will be saved to the database by save() or garbage
collection. (be aware that GC in Py >= 2.2 is not immediate)
If you want to create new objects, just supply them with blank
ID-fields, and _nextSequence() will be called to fetch a new
ID used for insertion.
The rule is one class pr. table, although it is possible
to join several table into one class, as long as the
identificator is unique.
By defining _userClasses you can resolve links to other
tables, a field in this table would be an id in another
table, ie. another class. In practical use this means that
behind attributes pointing to other classes (tables)
you will find instances of that class.
Short example usage of forgetter objects::
# Process all
for user in User.getAllIterator():
# Access attributes
print user.name
print "Employed at:"
# Access the Employed-class/table
print user.employed.name, user.employed.address
# fire him, setting employed reference to SQL NULL
user.employed = None
# Retrieve some ID
shop = Shop(552)
shop.name = 'Corrected name'
shop.save() # Save now instead of waiting for garbage collactor
# Include SQL where-statements in selections
myIDs = User.getAllIDs(("name='soiland'", 'salary > 5'))
Requirements:
The attributes 'cursor' and '_dbModule' should be set from the
outside. The cursor should be DB 2.0 complient, preferably with
autocommit turned on. (Transactions are not within the scope of this
module yet)
Python 2.2 (iterators, methodclasses)
"""
# How long to keep objects in cache?
_timeout = 60
# Will be True once prepare() is called
_prepared = False
# The default table containing our fields
# _sqlTable = 'shop'
_sqlTable = ''
# A mapping between our fields and the database fields.
#
# You must include all fields needed here. You may specify
# other names if you want to make the sql name more approriate
# for object oriented programming. (Like calling a field 'location'
# instead of 'location_id', because we wrap the location in a seperate
# object and don't really care about the id)
#
# You may reference to other tables with a dot, all
# other db fields will be related to _sqlTable.
# If you reference other tables, don't forget to
# modify _sqlLinks.
#
# _sqlFields = {
# 'id': 'shop_id',
# 'name': 'name',
# 'location': 'location_id',
# 'chain': 'shop_chain_id',
# 'address': 'address.address_id',
# }
_sqlFields = {}
# A list of attribute names (in the object, not database)
# that are the primary key in the database. Normally
# 'id' is sufficient. It is legal to have
# multiple fields as primary key, but it won't work
# properly with _userClasses and getChildren().
#
# If your table is a link table or something, ALL fields
# should be in _sqlPrimary. (all fields are needed to define
# a unique row to be deleted/updated)
_sqlPrimary = ('id',)
# When using several tables, you should include a
# 'link' statement, displaying which fields link the
# two tables together. Note that these are sql names.
# _sqlLinks = (
# ('shop_id', 'address.shop_id'),
# )
_sqlLinks = ()
# The name of the sequence used by _nextSequence
# - if None, a guess will be made based on _sqlTable
# and _sqlPrimary.
_sqlSequence = None
# Order by this attribute by default, if specified
# _orderBy = 'name' - this could also be a tuple
_orderBy = None
# _userClasses can be used to trigger creation of a field
# with an instance of the class. The given database field
# will be sent to the constructor as an objectID
# (ie. as self.id in this object) (ie. the class does not
# neccessary need to be a subclass of Forgetter)
#
# This means that the attribute will be an instance of that
# class, not the ID. The object will not be loaded from the
# database until you try to read any of it's attributes,
# though. (to prevent unneccessary database overload and
# recursions)
#
# Notice that _userClasses must be a name resolvable, ie.
# from the same module as your other classes.
# _userClasses = {
# 'location': 'Location',
# 'chain': 'Chain',
# 'address': 'Address',
# }
_userClasses = {}
# If you want userClasses to work properly with strings instead of
# instances, you must also 'prepare' your classes to resolve the
# names. This must be done from the same module you are defining the
# classes: forgetSQL.prepareClasses(locals())
# A list of fields that are suitable for a textual
# representation (typical a one liner).
#
# Fields will be joint together with spaces or
# simular.
# _shortView = ('name')
_shortView = ()
# Description for the fields (ie. labels)
# Note that these fields will be translated with the _ function.
# If a field is undescribe, a capitalized version of the field name
# will be presented.
#_descriptions = {
# 'name': 'Full name',
# 'description': 'Description of thingie',
#}
_descriptions = {}
def cursor(cls):
try:
import database
return database.cursor()
except:
raise "cursor method undefined, no database connection could be made"
cursor = classmethod(cursor)
# a reference to the database module object used, ie.
# MySQLdb, psycopg etc.
# Use MyClass._dbModule = MySQLdb - not "MySQLdb"
#
_dbModule = None
# By default, autosave change when object is garbage collected.
# NOTE: This could give weird side affects as you need to keep track
# on when objects are garbage collected. Instead, use .save()
# explicitely when you want to save. By default, for backward
# compatibility, autosave is on.
_autosave = True
def __new__(cls, *args):
if not hasattr(cls, '_cache'):
cls._cache = {}
try: # to implement 'goto' in Python.. UGH
if not cls._cache.has_key(args):
# unknown
raise "NotFound"
(ref, updated) = cls._cache[args]
realObject = ref()
if realObject is None:
# No more real references to it, dead object
raise "NotFound"
age = time.time() - updated
if age > cls._timeout:
# Too old!
raise "NotFound"
updated = time.time()
except "NotFound":
# We'll need to create it
realObject = object.__new__(cls, *args)
ref = weakref.ref(realObject)
updated = time.time()
# store a weak reference
cls._cache[args] = (ref, updated)
return realObject
def __init__(self, *id):
"""Initialize, possibly with a database id.
A forgetter with multivalue primary key (ie. _sqlPrimary more
than 1 in length), may be initalized by using several parameters
to this constructor. Note that the object will not be loaded
before you call load().
"""
self._values = {}
self.reset()
if not id:
self._resetID()
else:
self._setID(id)
def _setID(self, id):
"""Set the ID, ie. the values for primary keys.
id can be either a list, following the
_sqlPrimary, or some other type, that will be set
as the singleton ID (requires 1-length sqlPrimary).
"""
if type(id) in (types.ListType, types.TupleType):
try:
for key in self._sqlPrimary:
value = id[0]
self.__dict__[key] = value
id = id[1:] # rest, go revursive
except IndexError:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
elif len(self._sqlPrimary) <= 1:
# It's a simple value
key = self._sqlPrimary[0]
self.__dict__[key] = id
else:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
self._new = False
def _getID(self):
"""Get the ID values as a tuple annotated by sqlPrimary"""
id = []
for key in self._sqlPrimary:
value = self.__dict__[key]
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Part %s of %s primary key is a reference to %s, with multiple-primary-key %s " % (key, self.__class__, value.__class__, value)
id.append(value)
return id
def _validID(self):
"""Is all ID fields with values, ie. not None?"""
return not None in self._getID()
def __getattr__(self, key):
"""Get an attribute, normally a SQL field value.
Will be called when an unknown key is to be
retrieved, ie. most likely one of our database
fields.
"""
if self._sqlFields.has_key(key):
if not self._updated:
self.load()
return self._values[key]
else:
raise AttributeError, key
def __setattr__(self, key, value):
"""Set an attribute, normally a SQL field value.
Will be called whenever something needs to be set, so
we store the value as a SQL-thingie unless the key
is not listed in sqlFields.
"""
if key not in self._sqlPrimary and self._sqlFields.has_key(key):
if not self._updated:
self.load()
self._values[key] = value
self._changed = time.time()
else:
# It's a normal thingie
self.__dict__[key] = value
def __del__(self):
"""Save the object on deletion.
Be aware of this. If you want to undo some change, use reset()
first.
Be aware of Python 2.2's garbage collector, that
might run in the background. This means that
unless you call save() changes might not
be done immediately in the database.
Not calling save() also means that you cannot catch
errors caused by wrong insertion/update (ie. wrong
datatype for a field)
"""
if not self._autosave:
return
try:
self.save()
except Exception, e:
pass
def _checkTable(cls, field):
"""Split a field from _sqlFields into table, column.
Registers the table in cls._tables, and returns a fully
qualified table.column (default table: cls._sqlTable)
"""
# Get table part
try:
(table, field) = field.split('.')
except ValueError:
table = cls._sqlTable
# clean away white space
table = table.strip()
field = field.strip()
# register table
cls._tables[table] = None
# and return in proper shape
return table + '.' + field
_checkTable = classmethod(_checkTable)
def reset(self):
"""Reset all fields, almost like creating a new object.
Note: Forgets changes you have made not saved to database!
(Remember: Others might reference the object already, expecting
something else!) Override this method if you add properties not
defined in _sqlFields.
"""
self._resetID()
self._new = None
self._updated = None
self._changed = None
self._values = {}
# initially create fields
for field in self._sqlFields.keys():
self._values[field] = None
def load(self, id=None):
"""Load from database. Old values will be discarded."""
if id is not None:
# We are asked to change our ID to something else
self.reset()
self._setID(id)
if not self._new and self._validID():
self._loadDB()
self._updated = time.time()
def save(self):
"""Save to database if anything has changed since last load"""
if ( self._new or
(self._validID() and self._changed) or
(self._updated and self._changed > self._updated) ):
# Don't save if we have not loaded existing data!
self._saveDB()
return True
return False
def delete(self):
"""Mark this object for deletion in the database.
The object will then be reset and ready for use
again with a new id.
"""
(sql, ) = self._prepareSQL("DELETE")
curs = self.cursor()
curs.execute(sql, self._getID())
curs.close()
self.reset()
def _prepareSQL(cls, operation="SELECT", where=None, selectfields=None, orderBy=None):
"""Return a sql for the given operation.
Possible operations:
SELECT read data for this id
SELECTALL read data for all ids
INSERT insert data, create new id
UPDATE update data for this id
DELETE remove data for this id
SQL will be built by data from _sqlFields, and will
contain 0 or several %s for you to sprintf-format in later:
SELECT --> len(cls._sqlPrimary)
SELECTALL --> 0 %s
INSERT --> len(cls._sqlFields) %s (including id)
UPDATE --> len(cls._sqlFields) %s (including id)
DELETE --> len(cls._sqlPrimary)
(Note: INSERT and UPDATE will only change values in _sqlTable, so
the actual number of fields for substitutions might be lower
than len(cls._sqlFields) )
For INSERT you should use cls._nextSequence() to retrieve
a new 'id' number. Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary key
'john_id', sequence name blapp_john_id_seq) you must give the sequence
name as an optional argument to _nextSequence)
Additional note: cls._nextSequence() MUST be overloaded
for multi _sqlPrimary classes. Return a tuple.
Return values will always be tuples:
SELECT --> (sql, fields)
SELECTALL -> sql, fields)
INSERT -> (sql, fields)
UPDATE -> (sql, fields)
DELETE -> (sql,) -- for consistency
fields will be object properties as a list, ie. the keys from
cls._sqlFields. The purpose of this list is to give the programmer
an idea of which order the keys are inserted in the SQL, giving
help for retreiving (SELECT, SELECTALL) or inserting for %s
(INSERT, DELETE).
Why? Well, the keys are stored in a hash, and we cannot be sure
about the order of hash.keys() from time to time, not even with
the same instance.
Optional where-parameter applies to SELECT, SELECTALL and DELETE.
where should be a list or string of where clauses.
"""
# Normalize parameter for later comparissions
operation = operation.upper()
# Convert where to a list if it is a string
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if orderBy is None:
orderBy = cls._orderBy
if operation in ('SELECT', 'SELECTALL'):
# Get the object fields and sql fields in the same
# order to be able to reconstruct later.
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if selectfields is None or field in selectfields:
fields.append(field)
sqlfields.append(sqlfield)
if not fields:
# dirrrrrty!
raise """ERROR: No fields defined, cannot create SQL.
Maybe sqlPrimary is invalid?
Fields asked: %s
My fields: %s""" % (selectfields, cls._sqlFields)
sql = "SELECT\n "
sql += ', '.join(sqlfields)
sql += "\nFROM\n "
tables = cls._tables.keys()
if not tables:
raise "REALITY ERROR: No tables defined"
sql += ', '.join(tables)
tempWhere = ["%s=%s" % linkPair for linkPair in cls._sqlLinks]
# this MUST be here.
if operation <> 'SELECTALL':
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
if where:
tempWhere += where
if(tempWhere):
# Make sure to use paranteses in case someone has used
# ORs in the WHERE-list..
sql += "\nWHERE\n ("
sql += ') AND\n ('.join(tempWhere)
sql += ')'
if operation == 'SELECTALL' and orderBy:
sql += '\nORDER BY\n '
if type(orderBy) in (types.TupleType, types.ListType):
orderBy = [cls._sqlFields[x] for x in orderBy]
orderBy = ',\n '.join(orderBy)
else:
orderBy = cls._sqlFields[orderBy]
sql += orderBy
return (sql, fields)
elif operation in ('INSERT', 'UPDATE'):
if operation == 'UPDATE':
sql = 'UPDATE %s SET\n ' % cls._sqlTable
else:
sql = 'INSERT INTO %s (\n ' % cls._sqlTable
set = []
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if operation == 'UPDATE' and field in cls._sqlPrimary:
continue
if sqlfield.find(cls._sqlTable + '.') == 0:
# It's a local field, chop of the table part
sqlfield = sqlfield[len(cls._sqlTable)+1:]
fields.append(field)
sqlfields.append(sqlfield)
set.append(sqlfield + '=%s')
if operation == 'UPDATE':
sql += ',\n '.join(set)
sql += '\nWHERE\n '
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
fields.append(key)
sql += ' AND\n '.join(tempWhere)
else:
sql += ',\n '.join(sqlfields)
sql += ')\nVALUES (\n '
sql += ',\n '.join(('%s',) * len(sqlfields))
sql += ')'
return (sql, fields)
elif operation == 'DELETE':
sql = 'DELETE FROM ' + cls._sqlTable + ' WHERE '
if where:
sql += " AND\n ".join(where)
else:
for key in cls._sqlPrimary:
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
sql += ' AND\n '.join(tempWhere)
return (sql, )
else:
raise "Unknown operation", operation
_prepareSQL = classmethod(_prepareSQL)
def _nextSequence(cls, name=None):
"""Return a new sequence number for insertion in self._sqlTable.
Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary
key 'john_id', sequence name blapp_john_id_seq) you must give
the full sequence name as an optional argument to _nextSequence)
"""
if not name:
name = cls._sqlSequence
if not name:
# Assume it's tablename_primarykey_seq
if len(cls._sqlPrimary) <> 1:
raise "Could not guess sequence name for multi-primary-key"
primary = cls._sqlPrimary[0]
name = '%s_%s_seq' % (cls._sqlTable, primary.replace('.','_'))
# Don't have . as a tablename or column name! =)
curs = cls.cursor()
curs.execute("SELECT nextval('%s')" % name)
value = curs.fetchone()[0]
curs.close()
return value
_nextSequence = classmethod(_nextSequence)
def _loadFromRow(self, result, fields, cursor):
"""Load from a database row, described by fields.
``fields`` should be the attribute names that
will be set. Note that userclasses will be
created (but not loaded).
"""
position = 0
for elem in fields:
value = result[position]
valueType = cursor.description[position][1]
if hasattr(self._dbModule, 'BOOLEAN') and \
valueType == self._dbModule.BOOLEAN and \
(value is not True or value is not False):
# convert to a python boolean
value = value and True or False
if value and self._userClasses.has_key(elem):
userClass = self._userClasses[elem]
# create an instance
value = userClass(value)
self._values[elem] = value
position += 1
def _loadDB(self):
"""Connect to the database to load myself"""
if not self._validID():
raise NotFound, self._getID()
(sql, fields) = self._prepareSQL("SELECT")
curs = self.cursor()
curs.execute(sql, self._getID())
result = curs.fetchone()
if not result:
curs.close()
raise NotFound, self._getID()
self._loadFromRow(result, fields, curs)
curs.close()
self._updated = time.time()
def _saveDB(self):
"""Insert or update into the database.
Note that every field will be updated, not just the changed
one.
"""
# We're a "fresh" copy now
self._updated = time.time()
if self._new:
operation = 'INSERT'
if not self._validID():
self._setID(self._nextSequence())
# Note that we assign this ID to our self
# BEFORE possibly saving any of our attribute
# objects that might be new as well. This means
# that they might have references to us, as long
# as the database does not require our existence
# yet.
#
# Since mysql does not have Sequences, this will
# not work as smoothly there. See class
# MysqlForgetter below.
else:
operation = 'UPDATE'
(sql, fields) = self._prepareSQL(operation)
values = []
for field in fields:
value = getattr(self, field)
# First some dirty datatype hacks
if DateTime and type(value) == DateTime.DateTimeType:
# stupid psycopg does not support it's own return type..
# lovely..
value = str(value)
if DateTime and type(value) == DateTime.DateTimeDeltaType:
# Format delta as days, hours, minutes seconds
# NOTE: includes value.second directly to get the
# whole floating number
value = value.strftime("%d %H:%M:") + str(value.second)
if value is True or value is False:
# We must store booleans as 't' and 'f' ...
value = value and 't' or 'f'
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Can't reference multiple-primary-key: %s" % value
values.append(value)
cursor = self.cursor()
cursor.execute(sql, values)
# cursor.commit()
cursor.close()
self._new = False
self._changed = None
def getAll(cls, where=None, orderBy=None):
"""Retrieve all the objects.
If a list of ``where`` clauses are given, they will be AND-ed
and will limit the search.
This will not load everything out from the database, but will
create a large amount of objects with only the ID inserted. The
data will be loaded from the objects when needed by the regular
load()-autocall.
"""
ids = cls.getAllIDs(where, orderBy=orderBy)
# Instansiate a lot of them
if len(cls._sqlPrimary) > 1:
return [cls(*id) for id in ids]
else:
return [cls(id) for id in ids]
getAll = classmethod(getAll)
def getAllIterator(cls, where=None, buffer=100,
useObject=None, orderBy=None):
"""Retrieve every object as an iterator.
Possibly limitted by the where list of clauses that will be
AND-ed.
Since an iterator is returned, only ``buffer`` rows are loaded
from the database at once. This is useful if you need
to process all objects.
If useObject is given, this object is returned each time, but
with new data. This can be used to avoid creating many new
objects when only one object is needed each time.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
fetchedAt = time.time()
curs.execute(sql)
# We might start eating memory at this point
def getNext(rows=[]):
forgetter = cls
if not rows:
rows += curs.fetchmany(buffer)
if not rows:
curs.close()
return None
row = rows[0]
del rows[0]
try:
idPositions = [fields.index(key) for key in cls._sqlPrimary]
except ValueError:
raise "Bad sqlPrimary, should be a list or tuple: %s" % cls._sqlPrimary
ids = [row[pos] for pos in idPositions]
if useObject:
result = useObject
result.reset()
result._setID(ids)
else:
result = forgetter(*ids)
result._loadFromRow(row, fields, curs)
result._updated = fetchedAt
return result
return iter(getNext, None)
getAllIterator = classmethod(getAllIterator)
def getAllIDs(cls, where=None, orderBy=None):
"""Retrive all the IDs, possibly matching the where clauses.
Where should be some list of where clauses that will be joined
with AND). Note that the result might be tuples if this table
has a multivalue _sqlPrimary.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where,
cls._sqlPrimary, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
result.append((ids))
return result
getAllIDs = classmethod(getAllIDs)
def getAllText(cls, where=None, SEPERATOR=' ', orderBy=None):
"""Retrieve a list of of all possible instances of this class.
The list is composed of tuples in the format (id, description) -
where description is a string composed by the fields from
cls._shortView, joint with SEPERATOR.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
shortPos = [fields.index(short) for short in cls._shortView]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
text = SEPERATOR.join([str(row[pos]) for pos in shortPos])
result.append((ids, text))
return result
getAllText = classmethod(getAllText)
def getChildren(self, forgetter, field=None, where=None, orderBy=None):
"""Return the children that links to me.
That means that I have to be listed in their _userClasses
somehow. If field is specified, that field in my children is
used as the pointer to me. Use this if you have multiple fields
referring to my class.
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAll(whereList, orderBy=orderBy)
def getChildrenIterator(self, forgetter, field=None, where=None,
orderBy=None, useObject=None):
"""Like getChildren, except that it returns an
iterator, like getAllIterator. An iterator should
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAllIterator(whereList, useObject=useObject,
orderBy=orderBy)
def __repr__(self):
return self.__class__.__name__ + ' %s' % self._getID()
def __str__(self):
shortView = self._shortView or self._sqlPrimary
short = [str(getattr(self, short)) for short in shortView]
text = ', '.join(short)
# return repr(self) + ': ' + text
return text
def __eq__(self, obj):
"""Simple comparsion of objects."""
return self.__class__.__name__ == obj.__class__.__name__ \
and self._getID() == obj._getID()
|
stain/forgetSQL | lib/forgetSQL.py | Forgetter._checkTable | python | def _checkTable(cls, field):
# Get table part
try:
(table, field) = field.split('.')
except ValueError:
table = cls._sqlTable
# clean away white space
table = table.strip()
field = field.strip()
# register table
cls._tables[table] = None
# and return in proper shape
return table + '.' + field | Split a field from _sqlFields into table, column.
Registers the table in cls._tables, and returns a fully
qualified table.column (default table: cls._sqlTable) | train | https://github.com/stain/forgetSQL/blob/2e13f983020b121fd75a95fcafce3ea75573fb6b/lib/forgetSQL.py#L381-L398 | null | class Forgetter(object):
"""SQL to object database wrapper.
Given a welldefined database, by subclassing Forgetter
and supplying some attributes, you may wrap your SQL tables
into objects that are easier to program with.
You must define all fields in the database table that you want
to expose, and you may refine the names to suit your
object oriented programming style. (ie. customerID -> customer)
Objects will be created without loading from database,
loading will occur when you try to read or write some of the
attributes defined as a SQL field. If you change some attributes the
object will be saved to the database by save() or garbage
collection. (be aware that GC in Py >= 2.2 is not immediate)
If you want to create new objects, just supply them with blank
ID-fields, and _nextSequence() will be called to fetch a new
ID used for insertion.
The rule is one class pr. table, although it is possible
to join several table into one class, as long as the
identificator is unique.
By defining _userClasses you can resolve links to other
tables, a field in this table would be an id in another
table, ie. another class. In practical use this means that
behind attributes pointing to other classes (tables)
you will find instances of that class.
Short example usage of forgetter objects::
# Process all
for user in User.getAllIterator():
# Access attributes
print user.name
print "Employed at:"
# Access the Employed-class/table
print user.employed.name, user.employed.address
# fire him, setting employed reference to SQL NULL
user.employed = None
# Retrieve some ID
shop = Shop(552)
shop.name = 'Corrected name'
shop.save() # Save now instead of waiting for garbage collactor
# Include SQL where-statements in selections
myIDs = User.getAllIDs(("name='soiland'", 'salary > 5'))
Requirements:
The attributes 'cursor' and '_dbModule' should be set from the
outside. The cursor should be DB 2.0 complient, preferably with
autocommit turned on. (Transactions are not within the scope of this
module yet)
Python 2.2 (iterators, methodclasses)
"""
# How long to keep objects in cache?
_timeout = 60
# Will be True once prepare() is called
_prepared = False
# The default table containing our fields
# _sqlTable = 'shop'
_sqlTable = ''
# A mapping between our fields and the database fields.
#
# You must include all fields needed here. You may specify
# other names if you want to make the sql name more approriate
# for object oriented programming. (Like calling a field 'location'
# instead of 'location_id', because we wrap the location in a seperate
# object and don't really care about the id)
#
# You may reference to other tables with a dot, all
# other db fields will be related to _sqlTable.
# If you reference other tables, don't forget to
# modify _sqlLinks.
#
# _sqlFields = {
# 'id': 'shop_id',
# 'name': 'name',
# 'location': 'location_id',
# 'chain': 'shop_chain_id',
# 'address': 'address.address_id',
# }
_sqlFields = {}
# A list of attribute names (in the object, not database)
# that are the primary key in the database. Normally
# 'id' is sufficient. It is legal to have
# multiple fields as primary key, but it won't work
# properly with _userClasses and getChildren().
#
# If your table is a link table or something, ALL fields
# should be in _sqlPrimary. (all fields are needed to define
# a unique row to be deleted/updated)
_sqlPrimary = ('id',)
# When using several tables, you should include a
# 'link' statement, displaying which fields link the
# two tables together. Note that these are sql names.
# _sqlLinks = (
# ('shop_id', 'address.shop_id'),
# )
_sqlLinks = ()
# The name of the sequence used by _nextSequence
# - if None, a guess will be made based on _sqlTable
# and _sqlPrimary.
_sqlSequence = None
# Order by this attribute by default, if specified
# _orderBy = 'name' - this could also be a tuple
_orderBy = None
# _userClasses can be used to trigger creation of a field
# with an instance of the class. The given database field
# will be sent to the constructor as an objectID
# (ie. as self.id in this object) (ie. the class does not
# neccessary need to be a subclass of Forgetter)
#
# This means that the attribute will be an instance of that
# class, not the ID. The object will not be loaded from the
# database until you try to read any of it's attributes,
# though. (to prevent unneccessary database overload and
# recursions)
#
# Notice that _userClasses must be a name resolvable, ie.
# from the same module as your other classes.
# _userClasses = {
# 'location': 'Location',
# 'chain': 'Chain',
# 'address': 'Address',
# }
_userClasses = {}
# If you want userClasses to work properly with strings instead of
# instances, you must also 'prepare' your classes to resolve the
# names. This must be done from the same module you are defining the
# classes: forgetSQL.prepareClasses(locals())
# A list of fields that are suitable for a textual
# representation (typical a one liner).
#
# Fields will be joint together with spaces or
# simular.
# _shortView = ('name')
_shortView = ()
# Description for the fields (ie. labels)
# Note that these fields will be translated with the _ function.
# If a field is undescribe, a capitalized version of the field name
# will be presented.
#_descriptions = {
# 'name': 'Full name',
# 'description': 'Description of thingie',
#}
_descriptions = {}
def cursor(cls):
try:
import database
return database.cursor()
except:
raise "cursor method undefined, no database connection could be made"
cursor = classmethod(cursor)
# a reference to the database module object used, ie.
# MySQLdb, psycopg etc.
# Use MyClass._dbModule = MySQLdb - not "MySQLdb"
#
_dbModule = None
# By default, autosave change when object is garbage collected.
# NOTE: This could give weird side affects as you need to keep track
# on when objects are garbage collected. Instead, use .save()
# explicitely when you want to save. By default, for backward
# compatibility, autosave is on.
_autosave = True
def __new__(cls, *args):
if not hasattr(cls, '_cache'):
cls._cache = {}
try: # to implement 'goto' in Python.. UGH
if not cls._cache.has_key(args):
# unknown
raise "NotFound"
(ref, updated) = cls._cache[args]
realObject = ref()
if realObject is None:
# No more real references to it, dead object
raise "NotFound"
age = time.time() - updated
if age > cls._timeout:
# Too old!
raise "NotFound"
updated = time.time()
except "NotFound":
# We'll need to create it
realObject = object.__new__(cls, *args)
ref = weakref.ref(realObject)
updated = time.time()
# store a weak reference
cls._cache[args] = (ref, updated)
return realObject
def __init__(self, *id):
"""Initialize, possibly with a database id.
A forgetter with multivalue primary key (ie. _sqlPrimary more
than 1 in length), may be initalized by using several parameters
to this constructor. Note that the object will not be loaded
before you call load().
"""
self._values = {}
self.reset()
if not id:
self._resetID()
else:
self._setID(id)
def _setID(self, id):
"""Set the ID, ie. the values for primary keys.
id can be either a list, following the
_sqlPrimary, or some other type, that will be set
as the singleton ID (requires 1-length sqlPrimary).
"""
if type(id) in (types.ListType, types.TupleType):
try:
for key in self._sqlPrimary:
value = id[0]
self.__dict__[key] = value
id = id[1:] # rest, go revursive
except IndexError:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
elif len(self._sqlPrimary) <= 1:
# It's a simple value
key = self._sqlPrimary[0]
self.__dict__[key] = id
else:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
self._new = False
def _getID(self):
"""Get the ID values as a tuple annotated by sqlPrimary"""
id = []
for key in self._sqlPrimary:
value = self.__dict__[key]
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Part %s of %s primary key is a reference to %s, with multiple-primary-key %s " % (key, self.__class__, value.__class__, value)
id.append(value)
return id
def _resetID(self):
"""Reset all ID fields."""
# Dirty.. .=))
self._setID((None,) * len(self._sqlPrimary))
self._new = True
def _validID(self):
"""Is all ID fields with values, ie. not None?"""
return not None in self._getID()
def __getattr__(self, key):
"""Get an attribute, normally a SQL field value.
Will be called when an unknown key is to be
retrieved, ie. most likely one of our database
fields.
"""
if self._sqlFields.has_key(key):
if not self._updated:
self.load()
return self._values[key]
else:
raise AttributeError, key
def __setattr__(self, key, value):
"""Set an attribute, normally a SQL field value.
Will be called whenever something needs to be set, so
we store the value as a SQL-thingie unless the key
is not listed in sqlFields.
"""
if key not in self._sqlPrimary and self._sqlFields.has_key(key):
if not self._updated:
self.load()
self._values[key] = value
self._changed = time.time()
else:
# It's a normal thingie
self.__dict__[key] = value
def __del__(self):
"""Save the object on deletion.
Be aware of this. If you want to undo some change, use reset()
first.
Be aware of Python 2.2's garbage collector, that
might run in the background. This means that
unless you call save() changes might not
be done immediately in the database.
Not calling save() also means that you cannot catch
errors caused by wrong insertion/update (ie. wrong
datatype for a field)
"""
if not self._autosave:
return
try:
self.save()
except Exception, e:
pass
_checkTable = classmethod(_checkTable)
def reset(self):
"""Reset all fields, almost like creating a new object.
Note: Forgets changes you have made not saved to database!
(Remember: Others might reference the object already, expecting
something else!) Override this method if you add properties not
defined in _sqlFields.
"""
self._resetID()
self._new = None
self._updated = None
self._changed = None
self._values = {}
# initially create fields
for field in self._sqlFields.keys():
self._values[field] = None
def load(self, id=None):
"""Load from database. Old values will be discarded."""
if id is not None:
# We are asked to change our ID to something else
self.reset()
self._setID(id)
if not self._new and self._validID():
self._loadDB()
self._updated = time.time()
def save(self):
"""Save to database if anything has changed since last load"""
if ( self._new or
(self._validID() and self._changed) or
(self._updated and self._changed > self._updated) ):
# Don't save if we have not loaded existing data!
self._saveDB()
return True
return False
def delete(self):
"""Mark this object for deletion in the database.
The object will then be reset and ready for use
again with a new id.
"""
(sql, ) = self._prepareSQL("DELETE")
curs = self.cursor()
curs.execute(sql, self._getID())
curs.close()
self.reset()
def _prepareSQL(cls, operation="SELECT", where=None, selectfields=None, orderBy=None):
"""Return a sql for the given operation.
Possible operations:
SELECT read data for this id
SELECTALL read data for all ids
INSERT insert data, create new id
UPDATE update data for this id
DELETE remove data for this id
SQL will be built by data from _sqlFields, and will
contain 0 or several %s for you to sprintf-format in later:
SELECT --> len(cls._sqlPrimary)
SELECTALL --> 0 %s
INSERT --> len(cls._sqlFields) %s (including id)
UPDATE --> len(cls._sqlFields) %s (including id)
DELETE --> len(cls._sqlPrimary)
(Note: INSERT and UPDATE will only change values in _sqlTable, so
the actual number of fields for substitutions might be lower
than len(cls._sqlFields) )
For INSERT you should use cls._nextSequence() to retrieve
a new 'id' number. Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary key
'john_id', sequence name blapp_john_id_seq) you must give the sequence
name as an optional argument to _nextSequence)
Additional note: cls._nextSequence() MUST be overloaded
for multi _sqlPrimary classes. Return a tuple.
Return values will always be tuples:
SELECT --> (sql, fields)
SELECTALL -> sql, fields)
INSERT -> (sql, fields)
UPDATE -> (sql, fields)
DELETE -> (sql,) -- for consistency
fields will be object properties as a list, ie. the keys from
cls._sqlFields. The purpose of this list is to give the programmer
an idea of which order the keys are inserted in the SQL, giving
help for retreiving (SELECT, SELECTALL) or inserting for %s
(INSERT, DELETE).
Why? Well, the keys are stored in a hash, and we cannot be sure
about the order of hash.keys() from time to time, not even with
the same instance.
Optional where-parameter applies to SELECT, SELECTALL and DELETE.
where should be a list or string of where clauses.
"""
# Normalize parameter for later comparissions
operation = operation.upper()
# Convert where to a list if it is a string
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if orderBy is None:
orderBy = cls._orderBy
if operation in ('SELECT', 'SELECTALL'):
# Get the object fields and sql fields in the same
# order to be able to reconstruct later.
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if selectfields is None or field in selectfields:
fields.append(field)
sqlfields.append(sqlfield)
if not fields:
# dirrrrrty!
raise """ERROR: No fields defined, cannot create SQL.
Maybe sqlPrimary is invalid?
Fields asked: %s
My fields: %s""" % (selectfields, cls._sqlFields)
sql = "SELECT\n "
sql += ', '.join(sqlfields)
sql += "\nFROM\n "
tables = cls._tables.keys()
if not tables:
raise "REALITY ERROR: No tables defined"
sql += ', '.join(tables)
tempWhere = ["%s=%s" % linkPair for linkPair in cls._sqlLinks]
# this MUST be here.
if operation <> 'SELECTALL':
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
if where:
tempWhere += where
if(tempWhere):
# Make sure to use paranteses in case someone has used
# ORs in the WHERE-list..
sql += "\nWHERE\n ("
sql += ') AND\n ('.join(tempWhere)
sql += ')'
if operation == 'SELECTALL' and orderBy:
sql += '\nORDER BY\n '
if type(orderBy) in (types.TupleType, types.ListType):
orderBy = [cls._sqlFields[x] for x in orderBy]
orderBy = ',\n '.join(orderBy)
else:
orderBy = cls._sqlFields[orderBy]
sql += orderBy
return (sql, fields)
elif operation in ('INSERT', 'UPDATE'):
if operation == 'UPDATE':
sql = 'UPDATE %s SET\n ' % cls._sqlTable
else:
sql = 'INSERT INTO %s (\n ' % cls._sqlTable
set = []
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if operation == 'UPDATE' and field in cls._sqlPrimary:
continue
if sqlfield.find(cls._sqlTable + '.') == 0:
# It's a local field, chop of the table part
sqlfield = sqlfield[len(cls._sqlTable)+1:]
fields.append(field)
sqlfields.append(sqlfield)
set.append(sqlfield + '=%s')
if operation == 'UPDATE':
sql += ',\n '.join(set)
sql += '\nWHERE\n '
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
fields.append(key)
sql += ' AND\n '.join(tempWhere)
else:
sql += ',\n '.join(sqlfields)
sql += ')\nVALUES (\n '
sql += ',\n '.join(('%s',) * len(sqlfields))
sql += ')'
return (sql, fields)
elif operation == 'DELETE':
sql = 'DELETE FROM ' + cls._sqlTable + ' WHERE '
if where:
sql += " AND\n ".join(where)
else:
for key in cls._sqlPrimary:
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
sql += ' AND\n '.join(tempWhere)
return (sql, )
else:
raise "Unknown operation", operation
_prepareSQL = classmethod(_prepareSQL)
def _nextSequence(cls, name=None):
"""Return a new sequence number for insertion in self._sqlTable.
Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary
key 'john_id', sequence name blapp_john_id_seq) you must give
the full sequence name as an optional argument to _nextSequence)
"""
if not name:
name = cls._sqlSequence
if not name:
# Assume it's tablename_primarykey_seq
if len(cls._sqlPrimary) <> 1:
raise "Could not guess sequence name for multi-primary-key"
primary = cls._sqlPrimary[0]
name = '%s_%s_seq' % (cls._sqlTable, primary.replace('.','_'))
# Don't have . as a tablename or column name! =)
curs = cls.cursor()
curs.execute("SELECT nextval('%s')" % name)
value = curs.fetchone()[0]
curs.close()
return value
_nextSequence = classmethod(_nextSequence)
def _loadFromRow(self, result, fields, cursor):
"""Load from a database row, described by fields.
``fields`` should be the attribute names that
will be set. Note that userclasses will be
created (but not loaded).
"""
position = 0
for elem in fields:
value = result[position]
valueType = cursor.description[position][1]
if hasattr(self._dbModule, 'BOOLEAN') and \
valueType == self._dbModule.BOOLEAN and \
(value is not True or value is not False):
# convert to a python boolean
value = value and True or False
if value and self._userClasses.has_key(elem):
userClass = self._userClasses[elem]
# create an instance
value = userClass(value)
self._values[elem] = value
position += 1
def _loadDB(self):
"""Connect to the database to load myself"""
if not self._validID():
raise NotFound, self._getID()
(sql, fields) = self._prepareSQL("SELECT")
curs = self.cursor()
curs.execute(sql, self._getID())
result = curs.fetchone()
if not result:
curs.close()
raise NotFound, self._getID()
self._loadFromRow(result, fields, curs)
curs.close()
self._updated = time.time()
def _saveDB(self):
"""Insert or update into the database.
Note that every field will be updated, not just the changed
one.
"""
# We're a "fresh" copy now
self._updated = time.time()
if self._new:
operation = 'INSERT'
if not self._validID():
self._setID(self._nextSequence())
# Note that we assign this ID to our self
# BEFORE possibly saving any of our attribute
# objects that might be new as well. This means
# that they might have references to us, as long
# as the database does not require our existence
# yet.
#
# Since mysql does not have Sequences, this will
# not work as smoothly there. See class
# MysqlForgetter below.
else:
operation = 'UPDATE'
(sql, fields) = self._prepareSQL(operation)
values = []
for field in fields:
value = getattr(self, field)
# First some dirty datatype hacks
if DateTime and type(value) == DateTime.DateTimeType:
# stupid psycopg does not support it's own return type..
# lovely..
value = str(value)
if DateTime and type(value) == DateTime.DateTimeDeltaType:
# Format delta as days, hours, minutes seconds
# NOTE: includes value.second directly to get the
# whole floating number
value = value.strftime("%d %H:%M:") + str(value.second)
if value is True or value is False:
# We must store booleans as 't' and 'f' ...
value = value and 't' or 'f'
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Can't reference multiple-primary-key: %s" % value
values.append(value)
cursor = self.cursor()
cursor.execute(sql, values)
# cursor.commit()
cursor.close()
self._new = False
self._changed = None
def getAll(cls, where=None, orderBy=None):
"""Retrieve all the objects.
If a list of ``where`` clauses are given, they will be AND-ed
and will limit the search.
This will not load everything out from the database, but will
create a large amount of objects with only the ID inserted. The
data will be loaded from the objects when needed by the regular
load()-autocall.
"""
ids = cls.getAllIDs(where, orderBy=orderBy)
# Instansiate a lot of them
if len(cls._sqlPrimary) > 1:
return [cls(*id) for id in ids]
else:
return [cls(id) for id in ids]
getAll = classmethod(getAll)
def getAllIterator(cls, where=None, buffer=100,
useObject=None, orderBy=None):
"""Retrieve every object as an iterator.
Possibly limitted by the where list of clauses that will be
AND-ed.
Since an iterator is returned, only ``buffer`` rows are loaded
from the database at once. This is useful if you need
to process all objects.
If useObject is given, this object is returned each time, but
with new data. This can be used to avoid creating many new
objects when only one object is needed each time.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
fetchedAt = time.time()
curs.execute(sql)
# We might start eating memory at this point
def getNext(rows=[]):
forgetter = cls
if not rows:
rows += curs.fetchmany(buffer)
if not rows:
curs.close()
return None
row = rows[0]
del rows[0]
try:
idPositions = [fields.index(key) for key in cls._sqlPrimary]
except ValueError:
raise "Bad sqlPrimary, should be a list or tuple: %s" % cls._sqlPrimary
ids = [row[pos] for pos in idPositions]
if useObject:
result = useObject
result.reset()
result._setID(ids)
else:
result = forgetter(*ids)
result._loadFromRow(row, fields, curs)
result._updated = fetchedAt
return result
return iter(getNext, None)
getAllIterator = classmethod(getAllIterator)
def getAllIDs(cls, where=None, orderBy=None):
"""Retrive all the IDs, possibly matching the where clauses.
Where should be some list of where clauses that will be joined
with AND). Note that the result might be tuples if this table
has a multivalue _sqlPrimary.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where,
cls._sqlPrimary, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
result.append((ids))
return result
getAllIDs = classmethod(getAllIDs)
def getAllText(cls, where=None, SEPERATOR=' ', orderBy=None):
"""Retrieve a list of of all possible instances of this class.
The list is composed of tuples in the format (id, description) -
where description is a string composed by the fields from
cls._shortView, joint with SEPERATOR.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
shortPos = [fields.index(short) for short in cls._shortView]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
text = SEPERATOR.join([str(row[pos]) for pos in shortPos])
result.append((ids, text))
return result
getAllText = classmethod(getAllText)
def getChildren(self, forgetter, field=None, where=None, orderBy=None):
"""Return the children that links to me.
That means that I have to be listed in their _userClasses
somehow. If field is specified, that field in my children is
used as the pointer to me. Use this if you have multiple fields
referring to my class.
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAll(whereList, orderBy=orderBy)
def getChildrenIterator(self, forgetter, field=None, where=None,
orderBy=None, useObject=None):
"""Like getChildren, except that it returns an
iterator, like getAllIterator. An iterator should
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAllIterator(whereList, useObject=useObject,
orderBy=orderBy)
def __repr__(self):
return self.__class__.__name__ + ' %s' % self._getID()
def __str__(self):
shortView = self._shortView or self._sqlPrimary
short = [str(getattr(self, short)) for short in shortView]
text = ', '.join(short)
# return repr(self) + ': ' + text
return text
def __eq__(self, obj):
"""Simple comparsion of objects."""
return self.__class__.__name__ == obj.__class__.__name__ \
and self._getID() == obj._getID()
|
stain/forgetSQL | lib/forgetSQL.py | Forgetter.reset | python | def reset(self):
self._resetID()
self._new = None
self._updated = None
self._changed = None
self._values = {}
# initially create fields
for field in self._sqlFields.keys():
self._values[field] = None | Reset all fields, almost like creating a new object.
Note: Forgets changes you have made not saved to database!
(Remember: Others might reference the object already, expecting
something else!) Override this method if you add properties not
defined in _sqlFields. | train | https://github.com/stain/forgetSQL/blob/2e13f983020b121fd75a95fcafce3ea75573fb6b/lib/forgetSQL.py#L402-L417 | [
"def _resetID(self):\n \"\"\"Reset all ID fields.\"\"\"\n # Dirty.. .=))\n self._setID((None,) * len(self._sqlPrimary))\n self._new = True\n"
] | class Forgetter(object):
"""SQL to object database wrapper.
Given a welldefined database, by subclassing Forgetter
and supplying some attributes, you may wrap your SQL tables
into objects that are easier to program with.
You must define all fields in the database table that you want
to expose, and you may refine the names to suit your
object oriented programming style. (ie. customerID -> customer)
Objects will be created without loading from database,
loading will occur when you try to read or write some of the
attributes defined as a SQL field. If you change some attributes the
object will be saved to the database by save() or garbage
collection. (be aware that GC in Py >= 2.2 is not immediate)
If you want to create new objects, just supply them with blank
ID-fields, and _nextSequence() will be called to fetch a new
ID used for insertion.
The rule is one class pr. table, although it is possible
to join several table into one class, as long as the
identificator is unique.
By defining _userClasses you can resolve links to other
tables, a field in this table would be an id in another
table, ie. another class. In practical use this means that
behind attributes pointing to other classes (tables)
you will find instances of that class.
Short example usage of forgetter objects::
# Process all
for user in User.getAllIterator():
# Access attributes
print user.name
print "Employed at:"
# Access the Employed-class/table
print user.employed.name, user.employed.address
# fire him, setting employed reference to SQL NULL
user.employed = None
# Retrieve some ID
shop = Shop(552)
shop.name = 'Corrected name'
shop.save() # Save now instead of waiting for garbage collactor
# Include SQL where-statements in selections
myIDs = User.getAllIDs(("name='soiland'", 'salary > 5'))
Requirements:
The attributes 'cursor' and '_dbModule' should be set from the
outside. The cursor should be DB 2.0 complient, preferably with
autocommit turned on. (Transactions are not within the scope of this
module yet)
Python 2.2 (iterators, methodclasses)
"""
# How long to keep objects in cache?
_timeout = 60
# Will be True once prepare() is called
_prepared = False
# The default table containing our fields
# _sqlTable = 'shop'
_sqlTable = ''
# A mapping between our fields and the database fields.
#
# You must include all fields needed here. You may specify
# other names if you want to make the sql name more approriate
# for object oriented programming. (Like calling a field 'location'
# instead of 'location_id', because we wrap the location in a seperate
# object and don't really care about the id)
#
# You may reference to other tables with a dot, all
# other db fields will be related to _sqlTable.
# If you reference other tables, don't forget to
# modify _sqlLinks.
#
# _sqlFields = {
# 'id': 'shop_id',
# 'name': 'name',
# 'location': 'location_id',
# 'chain': 'shop_chain_id',
# 'address': 'address.address_id',
# }
_sqlFields = {}
# A list of attribute names (in the object, not database)
# that are the primary key in the database. Normally
# 'id' is sufficient. It is legal to have
# multiple fields as primary key, but it won't work
# properly with _userClasses and getChildren().
#
# If your table is a link table or something, ALL fields
# should be in _sqlPrimary. (all fields are needed to define
# a unique row to be deleted/updated)
_sqlPrimary = ('id',)
# When using several tables, you should include a
# 'link' statement, displaying which fields link the
# two tables together. Note that these are sql names.
# _sqlLinks = (
# ('shop_id', 'address.shop_id'),
# )
_sqlLinks = ()
# The name of the sequence used by _nextSequence
# - if None, a guess will be made based on _sqlTable
# and _sqlPrimary.
_sqlSequence = None
# Order by this attribute by default, if specified
# _orderBy = 'name' - this could also be a tuple
_orderBy = None
# _userClasses can be used to trigger creation of a field
# with an instance of the class. The given database field
# will be sent to the constructor as an objectID
# (ie. as self.id in this object) (ie. the class does not
# neccessary need to be a subclass of Forgetter)
#
# This means that the attribute will be an instance of that
# class, not the ID. The object will not be loaded from the
# database until you try to read any of it's attributes,
# though. (to prevent unneccessary database overload and
# recursions)
#
# Notice that _userClasses must be a name resolvable, ie.
# from the same module as your other classes.
# _userClasses = {
# 'location': 'Location',
# 'chain': 'Chain',
# 'address': 'Address',
# }
_userClasses = {}
# If you want userClasses to work properly with strings instead of
# instances, you must also 'prepare' your classes to resolve the
# names. This must be done from the same module you are defining the
# classes: forgetSQL.prepareClasses(locals())
# A list of fields that are suitable for a textual
# representation (typical a one liner).
#
# Fields will be joint together with spaces or
# simular.
# _shortView = ('name')
_shortView = ()
# Description for the fields (ie. labels)
# Note that these fields will be translated with the _ function.
# If a field is undescribe, a capitalized version of the field name
# will be presented.
#_descriptions = {
# 'name': 'Full name',
# 'description': 'Description of thingie',
#}
_descriptions = {}
def cursor(cls):
try:
import database
return database.cursor()
except:
raise "cursor method undefined, no database connection could be made"
cursor = classmethod(cursor)
# a reference to the database module object used, ie.
# MySQLdb, psycopg etc.
# Use MyClass._dbModule = MySQLdb - not "MySQLdb"
#
_dbModule = None
# By default, autosave change when object is garbage collected.
# NOTE: This could give weird side affects as you need to keep track
# on when objects are garbage collected. Instead, use .save()
# explicitely when you want to save. By default, for backward
# compatibility, autosave is on.
_autosave = True
def __new__(cls, *args):
if not hasattr(cls, '_cache'):
cls._cache = {}
try: # to implement 'goto' in Python.. UGH
if not cls._cache.has_key(args):
# unknown
raise "NotFound"
(ref, updated) = cls._cache[args]
realObject = ref()
if realObject is None:
# No more real references to it, dead object
raise "NotFound"
age = time.time() - updated
if age > cls._timeout:
# Too old!
raise "NotFound"
updated = time.time()
except "NotFound":
# We'll need to create it
realObject = object.__new__(cls, *args)
ref = weakref.ref(realObject)
updated = time.time()
# store a weak reference
cls._cache[args] = (ref, updated)
return realObject
def __init__(self, *id):
"""Initialize, possibly with a database id.
A forgetter with multivalue primary key (ie. _sqlPrimary more
than 1 in length), may be initalized by using several parameters
to this constructor. Note that the object will not be loaded
before you call load().
"""
self._values = {}
self.reset()
if not id:
self._resetID()
else:
self._setID(id)
def _setID(self, id):
"""Set the ID, ie. the values for primary keys.
id can be either a list, following the
_sqlPrimary, or some other type, that will be set
as the singleton ID (requires 1-length sqlPrimary).
"""
if type(id) in (types.ListType, types.TupleType):
try:
for key in self._sqlPrimary:
value = id[0]
self.__dict__[key] = value
id = id[1:] # rest, go revursive
except IndexError:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
elif len(self._sqlPrimary) <= 1:
# It's a simple value
key = self._sqlPrimary[0]
self.__dict__[key] = id
else:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
self._new = False
def _getID(self):
"""Get the ID values as a tuple annotated by sqlPrimary"""
id = []
for key in self._sqlPrimary:
value = self.__dict__[key]
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Part %s of %s primary key is a reference to %s, with multiple-primary-key %s " % (key, self.__class__, value.__class__, value)
id.append(value)
return id
def _resetID(self):
"""Reset all ID fields."""
# Dirty.. .=))
self._setID((None,) * len(self._sqlPrimary))
self._new = True
def _validID(self):
"""Is all ID fields with values, ie. not None?"""
return not None in self._getID()
def __getattr__(self, key):
"""Get an attribute, normally a SQL field value.
Will be called when an unknown key is to be
retrieved, ie. most likely one of our database
fields.
"""
if self._sqlFields.has_key(key):
if not self._updated:
self.load()
return self._values[key]
else:
raise AttributeError, key
def __setattr__(self, key, value):
"""Set an attribute, normally a SQL field value.
Will be called whenever something needs to be set, so
we store the value as a SQL-thingie unless the key
is not listed in sqlFields.
"""
if key not in self._sqlPrimary and self._sqlFields.has_key(key):
if not self._updated:
self.load()
self._values[key] = value
self._changed = time.time()
else:
# It's a normal thingie
self.__dict__[key] = value
def __del__(self):
"""Save the object on deletion.
Be aware of this. If you want to undo some change, use reset()
first.
Be aware of Python 2.2's garbage collector, that
might run in the background. This means that
unless you call save() changes might not
be done immediately in the database.
Not calling save() also means that you cannot catch
errors caused by wrong insertion/update (ie. wrong
datatype for a field)
"""
if not self._autosave:
return
try:
self.save()
except Exception, e:
pass
def _checkTable(cls, field):
"""Split a field from _sqlFields into table, column.
Registers the table in cls._tables, and returns a fully
qualified table.column (default table: cls._sqlTable)
"""
# Get table part
try:
(table, field) = field.split('.')
except ValueError:
table = cls._sqlTable
# clean away white space
table = table.strip()
field = field.strip()
# register table
cls._tables[table] = None
# and return in proper shape
return table + '.' + field
_checkTable = classmethod(_checkTable)
def load(self, id=None):
"""Load from database. Old values will be discarded."""
if id is not None:
# We are asked to change our ID to something else
self.reset()
self._setID(id)
if not self._new and self._validID():
self._loadDB()
self._updated = time.time()
def save(self):
"""Save to database if anything has changed since last load"""
if ( self._new or
(self._validID() and self._changed) or
(self._updated and self._changed > self._updated) ):
# Don't save if we have not loaded existing data!
self._saveDB()
return True
return False
def delete(self):
"""Mark this object for deletion in the database.
The object will then be reset and ready for use
again with a new id.
"""
(sql, ) = self._prepareSQL("DELETE")
curs = self.cursor()
curs.execute(sql, self._getID())
curs.close()
self.reset()
def _prepareSQL(cls, operation="SELECT", where=None, selectfields=None, orderBy=None):
"""Return a sql for the given operation.
Possible operations:
SELECT read data for this id
SELECTALL read data for all ids
INSERT insert data, create new id
UPDATE update data for this id
DELETE remove data for this id
SQL will be built by data from _sqlFields, and will
contain 0 or several %s for you to sprintf-format in later:
SELECT --> len(cls._sqlPrimary)
SELECTALL --> 0 %s
INSERT --> len(cls._sqlFields) %s (including id)
UPDATE --> len(cls._sqlFields) %s (including id)
DELETE --> len(cls._sqlPrimary)
(Note: INSERT and UPDATE will only change values in _sqlTable, so
the actual number of fields for substitutions might be lower
than len(cls._sqlFields) )
For INSERT you should use cls._nextSequence() to retrieve
a new 'id' number. Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary key
'john_id', sequence name blapp_john_id_seq) you must give the sequence
name as an optional argument to _nextSequence)
Additional note: cls._nextSequence() MUST be overloaded
for multi _sqlPrimary classes. Return a tuple.
Return values will always be tuples:
SELECT --> (sql, fields)
SELECTALL -> sql, fields)
INSERT -> (sql, fields)
UPDATE -> (sql, fields)
DELETE -> (sql,) -- for consistency
fields will be object properties as a list, ie. the keys from
cls._sqlFields. The purpose of this list is to give the programmer
an idea of which order the keys are inserted in the SQL, giving
help for retreiving (SELECT, SELECTALL) or inserting for %s
(INSERT, DELETE).
Why? Well, the keys are stored in a hash, and we cannot be sure
about the order of hash.keys() from time to time, not even with
the same instance.
Optional where-parameter applies to SELECT, SELECTALL and DELETE.
where should be a list or string of where clauses.
"""
# Normalize parameter for later comparissions
operation = operation.upper()
# Convert where to a list if it is a string
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if orderBy is None:
orderBy = cls._orderBy
if operation in ('SELECT', 'SELECTALL'):
# Get the object fields and sql fields in the same
# order to be able to reconstruct later.
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if selectfields is None or field in selectfields:
fields.append(field)
sqlfields.append(sqlfield)
if not fields:
# dirrrrrty!
raise """ERROR: No fields defined, cannot create SQL.
Maybe sqlPrimary is invalid?
Fields asked: %s
My fields: %s""" % (selectfields, cls._sqlFields)
sql = "SELECT\n "
sql += ', '.join(sqlfields)
sql += "\nFROM\n "
tables = cls._tables.keys()
if not tables:
raise "REALITY ERROR: No tables defined"
sql += ', '.join(tables)
tempWhere = ["%s=%s" % linkPair for linkPair in cls._sqlLinks]
# this MUST be here.
if operation <> 'SELECTALL':
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
if where:
tempWhere += where
if(tempWhere):
# Make sure to use paranteses in case someone has used
# ORs in the WHERE-list..
sql += "\nWHERE\n ("
sql += ') AND\n ('.join(tempWhere)
sql += ')'
if operation == 'SELECTALL' and orderBy:
sql += '\nORDER BY\n '
if type(orderBy) in (types.TupleType, types.ListType):
orderBy = [cls._sqlFields[x] for x in orderBy]
orderBy = ',\n '.join(orderBy)
else:
orderBy = cls._sqlFields[orderBy]
sql += orderBy
return (sql, fields)
elif operation in ('INSERT', 'UPDATE'):
if operation == 'UPDATE':
sql = 'UPDATE %s SET\n ' % cls._sqlTable
else:
sql = 'INSERT INTO %s (\n ' % cls._sqlTable
set = []
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if operation == 'UPDATE' and field in cls._sqlPrimary:
continue
if sqlfield.find(cls._sqlTable + '.') == 0:
# It's a local field, chop of the table part
sqlfield = sqlfield[len(cls._sqlTable)+1:]
fields.append(field)
sqlfields.append(sqlfield)
set.append(sqlfield + '=%s')
if operation == 'UPDATE':
sql += ',\n '.join(set)
sql += '\nWHERE\n '
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
fields.append(key)
sql += ' AND\n '.join(tempWhere)
else:
sql += ',\n '.join(sqlfields)
sql += ')\nVALUES (\n '
sql += ',\n '.join(('%s',) * len(sqlfields))
sql += ')'
return (sql, fields)
elif operation == 'DELETE':
sql = 'DELETE FROM ' + cls._sqlTable + ' WHERE '
if where:
sql += " AND\n ".join(where)
else:
for key in cls._sqlPrimary:
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
sql += ' AND\n '.join(tempWhere)
return (sql, )
else:
raise "Unknown operation", operation
_prepareSQL = classmethod(_prepareSQL)
def _nextSequence(cls, name=None):
"""Return a new sequence number for insertion in self._sqlTable.
Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary
key 'john_id', sequence name blapp_john_id_seq) you must give
the full sequence name as an optional argument to _nextSequence)
"""
if not name:
name = cls._sqlSequence
if not name:
# Assume it's tablename_primarykey_seq
if len(cls._sqlPrimary) <> 1:
raise "Could not guess sequence name for multi-primary-key"
primary = cls._sqlPrimary[0]
name = '%s_%s_seq' % (cls._sqlTable, primary.replace('.','_'))
# Don't have . as a tablename or column name! =)
curs = cls.cursor()
curs.execute("SELECT nextval('%s')" % name)
value = curs.fetchone()[0]
curs.close()
return value
_nextSequence = classmethod(_nextSequence)
def _loadFromRow(self, result, fields, cursor):
"""Load from a database row, described by fields.
``fields`` should be the attribute names that
will be set. Note that userclasses will be
created (but not loaded).
"""
position = 0
for elem in fields:
value = result[position]
valueType = cursor.description[position][1]
if hasattr(self._dbModule, 'BOOLEAN') and \
valueType == self._dbModule.BOOLEAN and \
(value is not True or value is not False):
# convert to a python boolean
value = value and True or False
if value and self._userClasses.has_key(elem):
userClass = self._userClasses[elem]
# create an instance
value = userClass(value)
self._values[elem] = value
position += 1
def _loadDB(self):
"""Connect to the database to load myself"""
if not self._validID():
raise NotFound, self._getID()
(sql, fields) = self._prepareSQL("SELECT")
curs = self.cursor()
curs.execute(sql, self._getID())
result = curs.fetchone()
if not result:
curs.close()
raise NotFound, self._getID()
self._loadFromRow(result, fields, curs)
curs.close()
self._updated = time.time()
def _saveDB(self):
"""Insert or update into the database.
Note that every field will be updated, not just the changed
one.
"""
# We're a "fresh" copy now
self._updated = time.time()
if self._new:
operation = 'INSERT'
if not self._validID():
self._setID(self._nextSequence())
# Note that we assign this ID to our self
# BEFORE possibly saving any of our attribute
# objects that might be new as well. This means
# that they might have references to us, as long
# as the database does not require our existence
# yet.
#
# Since mysql does not have Sequences, this will
# not work as smoothly there. See class
# MysqlForgetter below.
else:
operation = 'UPDATE'
(sql, fields) = self._prepareSQL(operation)
values = []
for field in fields:
value = getattr(self, field)
# First some dirty datatype hacks
if DateTime and type(value) == DateTime.DateTimeType:
# stupid psycopg does not support it's own return type..
# lovely..
value = str(value)
if DateTime and type(value) == DateTime.DateTimeDeltaType:
# Format delta as days, hours, minutes seconds
# NOTE: includes value.second directly to get the
# whole floating number
value = value.strftime("%d %H:%M:") + str(value.second)
if value is True or value is False:
# We must store booleans as 't' and 'f' ...
value = value and 't' or 'f'
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Can't reference multiple-primary-key: %s" % value
values.append(value)
cursor = self.cursor()
cursor.execute(sql, values)
# cursor.commit()
cursor.close()
self._new = False
self._changed = None
def getAll(cls, where=None, orderBy=None):
"""Retrieve all the objects.
If a list of ``where`` clauses are given, they will be AND-ed
and will limit the search.
This will not load everything out from the database, but will
create a large amount of objects with only the ID inserted. The
data will be loaded from the objects when needed by the regular
load()-autocall.
"""
ids = cls.getAllIDs(where, orderBy=orderBy)
# Instansiate a lot of them
if len(cls._sqlPrimary) > 1:
return [cls(*id) for id in ids]
else:
return [cls(id) for id in ids]
getAll = classmethod(getAll)
def getAllIterator(cls, where=None, buffer=100,
useObject=None, orderBy=None):
"""Retrieve every object as an iterator.
Possibly limitted by the where list of clauses that will be
AND-ed.
Since an iterator is returned, only ``buffer`` rows are loaded
from the database at once. This is useful if you need
to process all objects.
If useObject is given, this object is returned each time, but
with new data. This can be used to avoid creating many new
objects when only one object is needed each time.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
fetchedAt = time.time()
curs.execute(sql)
# We might start eating memory at this point
def getNext(rows=[]):
forgetter = cls
if not rows:
rows += curs.fetchmany(buffer)
if not rows:
curs.close()
return None
row = rows[0]
del rows[0]
try:
idPositions = [fields.index(key) for key in cls._sqlPrimary]
except ValueError:
raise "Bad sqlPrimary, should be a list or tuple: %s" % cls._sqlPrimary
ids = [row[pos] for pos in idPositions]
if useObject:
result = useObject
result.reset()
result._setID(ids)
else:
result = forgetter(*ids)
result._loadFromRow(row, fields, curs)
result._updated = fetchedAt
return result
return iter(getNext, None)
getAllIterator = classmethod(getAllIterator)
def getAllIDs(cls, where=None, orderBy=None):
"""Retrive all the IDs, possibly matching the where clauses.
Where should be some list of where clauses that will be joined
with AND). Note that the result might be tuples if this table
has a multivalue _sqlPrimary.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where,
cls._sqlPrimary, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
result.append((ids))
return result
getAllIDs = classmethod(getAllIDs)
def getAllText(cls, where=None, SEPERATOR=' ', orderBy=None):
"""Retrieve a list of of all possible instances of this class.
The list is composed of tuples in the format (id, description) -
where description is a string composed by the fields from
cls._shortView, joint with SEPERATOR.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
shortPos = [fields.index(short) for short in cls._shortView]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
text = SEPERATOR.join([str(row[pos]) for pos in shortPos])
result.append((ids, text))
return result
getAllText = classmethod(getAllText)
def getChildren(self, forgetter, field=None, where=None, orderBy=None):
"""Return the children that links to me.
That means that I have to be listed in their _userClasses
somehow. If field is specified, that field in my children is
used as the pointer to me. Use this if you have multiple fields
referring to my class.
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAll(whereList, orderBy=orderBy)
def getChildrenIterator(self, forgetter, field=None, where=None,
orderBy=None, useObject=None):
"""Like getChildren, except that it returns an
iterator, like getAllIterator. An iterator should
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAllIterator(whereList, useObject=useObject,
orderBy=orderBy)
def __repr__(self):
return self.__class__.__name__ + ' %s' % self._getID()
def __str__(self):
shortView = self._shortView or self._sqlPrimary
short = [str(getattr(self, short)) for short in shortView]
text = ', '.join(short)
# return repr(self) + ': ' + text
return text
def __eq__(self, obj):
"""Simple comparsion of objects."""
return self.__class__.__name__ == obj.__class__.__name__ \
and self._getID() == obj._getID()
|
stain/forgetSQL | lib/forgetSQL.py | Forgetter.load | python | def load(self, id=None):
if id is not None:
# We are asked to change our ID to something else
self.reset()
self._setID(id)
if not self._new and self._validID():
self._loadDB()
self._updated = time.time() | Load from database. Old values will be discarded. | train | https://github.com/stain/forgetSQL/blob/2e13f983020b121fd75a95fcafce3ea75573fb6b/lib/forgetSQL.py#L419-L427 | [
"def _setID(self, id):\n \"\"\"Set the ID, ie. the values for primary keys.\n\n id can be either a list, following the\n _sqlPrimary, or some other type, that will be set\n as the singleton ID (requires 1-length sqlPrimary).\n \"\"\"\n if type(id) in (types.ListType, types.TupleType):\n try:\n for key in self._sqlPrimary:\n value = id[0]\n self.__dict__[key] = value\n id = id[1:] # rest, go revursive\n except IndexError:\n raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)\n elif len(self._sqlPrimary) <= 1:\n # It's a simple value\n key = self._sqlPrimary[0]\n self.__dict__[key] = id\n else:\n raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)\n self._new = False\n",
"def _validID(self):\n \"\"\"Is all ID fields with values, ie. not None?\"\"\"\n return not None in self._getID()\n",
"def reset(self):\n \"\"\"Reset all fields, almost like creating a new object.\n\n Note: Forgets changes you have made not saved to database!\n (Remember: Others might reference the object already, expecting\n something else!) Override this method if you add properties not\n defined in _sqlFields.\n \"\"\"\n self._resetID()\n self._new = None\n self._updated = None\n self._changed = None\n self._values = {}\n # initially create fields\n for field in self._sqlFields.keys():\n self._values[field] = None\n"
] | class Forgetter(object):
"""SQL to object database wrapper.
Given a welldefined database, by subclassing Forgetter
and supplying some attributes, you may wrap your SQL tables
into objects that are easier to program with.
You must define all fields in the database table that you want
to expose, and you may refine the names to suit your
object oriented programming style. (ie. customerID -> customer)
Objects will be created without loading from database,
loading will occur when you try to read or write some of the
attributes defined as a SQL field. If you change some attributes the
object will be saved to the database by save() or garbage
collection. (be aware that GC in Py >= 2.2 is not immediate)
If you want to create new objects, just supply them with blank
ID-fields, and _nextSequence() will be called to fetch a new
ID used for insertion.
The rule is one class pr. table, although it is possible
to join several table into one class, as long as the
identificator is unique.
By defining _userClasses you can resolve links to other
tables, a field in this table would be an id in another
table, ie. another class. In practical use this means that
behind attributes pointing to other classes (tables)
you will find instances of that class.
Short example usage of forgetter objects::
# Process all
for user in User.getAllIterator():
# Access attributes
print user.name
print "Employed at:"
# Access the Employed-class/table
print user.employed.name, user.employed.address
# fire him, setting employed reference to SQL NULL
user.employed = None
# Retrieve some ID
shop = Shop(552)
shop.name = 'Corrected name'
shop.save() # Save now instead of waiting for garbage collactor
# Include SQL where-statements in selections
myIDs = User.getAllIDs(("name='soiland'", 'salary > 5'))
Requirements:
The attributes 'cursor' and '_dbModule' should be set from the
outside. The cursor should be DB 2.0 complient, preferably with
autocommit turned on. (Transactions are not within the scope of this
module yet)
Python 2.2 (iterators, methodclasses)
"""
# How long to keep objects in cache?
_timeout = 60
# Will be True once prepare() is called
_prepared = False
# The default table containing our fields
# _sqlTable = 'shop'
_sqlTable = ''
# A mapping between our fields and the database fields.
#
# You must include all fields needed here. You may specify
# other names if you want to make the sql name more approriate
# for object oriented programming. (Like calling a field 'location'
# instead of 'location_id', because we wrap the location in a seperate
# object and don't really care about the id)
#
# You may reference to other tables with a dot, all
# other db fields will be related to _sqlTable.
# If you reference other tables, don't forget to
# modify _sqlLinks.
#
# _sqlFields = {
# 'id': 'shop_id',
# 'name': 'name',
# 'location': 'location_id',
# 'chain': 'shop_chain_id',
# 'address': 'address.address_id',
# }
_sqlFields = {}
# A list of attribute names (in the object, not database)
# that are the primary key in the database. Normally
# 'id' is sufficient. It is legal to have
# multiple fields as primary key, but it won't work
# properly with _userClasses and getChildren().
#
# If your table is a link table or something, ALL fields
# should be in _sqlPrimary. (all fields are needed to define
# a unique row to be deleted/updated)
_sqlPrimary = ('id',)
# When using several tables, you should include a
# 'link' statement, displaying which fields link the
# two tables together. Note that these are sql names.
# _sqlLinks = (
# ('shop_id', 'address.shop_id'),
# )
_sqlLinks = ()
# The name of the sequence used by _nextSequence
# - if None, a guess will be made based on _sqlTable
# and _sqlPrimary.
_sqlSequence = None
# Order by this attribute by default, if specified
# _orderBy = 'name' - this could also be a tuple
_orderBy = None
# _userClasses can be used to trigger creation of a field
# with an instance of the class. The given database field
# will be sent to the constructor as an objectID
# (ie. as self.id in this object) (ie. the class does not
# neccessary need to be a subclass of Forgetter)
#
# This means that the attribute will be an instance of that
# class, not the ID. The object will not be loaded from the
# database until you try to read any of it's attributes,
# though. (to prevent unneccessary database overload and
# recursions)
#
# Notice that _userClasses must be a name resolvable, ie.
# from the same module as your other classes.
# _userClasses = {
# 'location': 'Location',
# 'chain': 'Chain',
# 'address': 'Address',
# }
_userClasses = {}
# If you want userClasses to work properly with strings instead of
# instances, you must also 'prepare' your classes to resolve the
# names. This must be done from the same module you are defining the
# classes: forgetSQL.prepareClasses(locals())
# A list of fields that are suitable for a textual
# representation (typical a one liner).
#
# Fields will be joint together with spaces or
# simular.
# _shortView = ('name')
_shortView = ()
# Description for the fields (ie. labels)
# Note that these fields will be translated with the _ function.
# If a field is undescribe, a capitalized version of the field name
# will be presented.
#_descriptions = {
# 'name': 'Full name',
# 'description': 'Description of thingie',
#}
_descriptions = {}
def cursor(cls):
try:
import database
return database.cursor()
except:
raise "cursor method undefined, no database connection could be made"
cursor = classmethod(cursor)
# a reference to the database module object used, ie.
# MySQLdb, psycopg etc.
# Use MyClass._dbModule = MySQLdb - not "MySQLdb"
#
_dbModule = None
# By default, autosave change when object is garbage collected.
# NOTE: This could give weird side affects as you need to keep track
# on when objects are garbage collected. Instead, use .save()
# explicitely when you want to save. By default, for backward
# compatibility, autosave is on.
_autosave = True
def __new__(cls, *args):
if not hasattr(cls, '_cache'):
cls._cache = {}
try: # to implement 'goto' in Python.. UGH
if not cls._cache.has_key(args):
# unknown
raise "NotFound"
(ref, updated) = cls._cache[args]
realObject = ref()
if realObject is None:
# No more real references to it, dead object
raise "NotFound"
age = time.time() - updated
if age > cls._timeout:
# Too old!
raise "NotFound"
updated = time.time()
except "NotFound":
# We'll need to create it
realObject = object.__new__(cls, *args)
ref = weakref.ref(realObject)
updated = time.time()
# store a weak reference
cls._cache[args] = (ref, updated)
return realObject
def __init__(self, *id):
"""Initialize, possibly with a database id.
A forgetter with multivalue primary key (ie. _sqlPrimary more
than 1 in length), may be initalized by using several parameters
to this constructor. Note that the object will not be loaded
before you call load().
"""
self._values = {}
self.reset()
if not id:
self._resetID()
else:
self._setID(id)
def _setID(self, id):
"""Set the ID, ie. the values for primary keys.
id can be either a list, following the
_sqlPrimary, or some other type, that will be set
as the singleton ID (requires 1-length sqlPrimary).
"""
if type(id) in (types.ListType, types.TupleType):
try:
for key in self._sqlPrimary:
value = id[0]
self.__dict__[key] = value
id = id[1:] # rest, go revursive
except IndexError:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
elif len(self._sqlPrimary) <= 1:
# It's a simple value
key = self._sqlPrimary[0]
self.__dict__[key] = id
else:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
self._new = False
def _getID(self):
"""Get the ID values as a tuple annotated by sqlPrimary"""
id = []
for key in self._sqlPrimary:
value = self.__dict__[key]
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Part %s of %s primary key is a reference to %s, with multiple-primary-key %s " % (key, self.__class__, value.__class__, value)
id.append(value)
return id
def _resetID(self):
"""Reset all ID fields."""
# Dirty.. .=))
self._setID((None,) * len(self._sqlPrimary))
self._new = True
def _validID(self):
"""Is all ID fields with values, ie. not None?"""
return not None in self._getID()
def __getattr__(self, key):
"""Get an attribute, normally a SQL field value.
Will be called when an unknown key is to be
retrieved, ie. most likely one of our database
fields.
"""
if self._sqlFields.has_key(key):
if not self._updated:
self.load()
return self._values[key]
else:
raise AttributeError, key
def __setattr__(self, key, value):
"""Set an attribute, normally a SQL field value.
Will be called whenever something needs to be set, so
we store the value as a SQL-thingie unless the key
is not listed in sqlFields.
"""
if key not in self._sqlPrimary and self._sqlFields.has_key(key):
if not self._updated:
self.load()
self._values[key] = value
self._changed = time.time()
else:
# It's a normal thingie
self.__dict__[key] = value
def __del__(self):
"""Save the object on deletion.
Be aware of this. If you want to undo some change, use reset()
first.
Be aware of Python 2.2's garbage collector, that
might run in the background. This means that
unless you call save() changes might not
be done immediately in the database.
Not calling save() also means that you cannot catch
errors caused by wrong insertion/update (ie. wrong
datatype for a field)
"""
if not self._autosave:
return
try:
self.save()
except Exception, e:
pass
def _checkTable(cls, field):
"""Split a field from _sqlFields into table, column.
Registers the table in cls._tables, and returns a fully
qualified table.column (default table: cls._sqlTable)
"""
# Get table part
try:
(table, field) = field.split('.')
except ValueError:
table = cls._sqlTable
# clean away white space
table = table.strip()
field = field.strip()
# register table
cls._tables[table] = None
# and return in proper shape
return table + '.' + field
_checkTable = classmethod(_checkTable)
def reset(self):
"""Reset all fields, almost like creating a new object.
Note: Forgets changes you have made not saved to database!
(Remember: Others might reference the object already, expecting
something else!) Override this method if you add properties not
defined in _sqlFields.
"""
self._resetID()
self._new = None
self._updated = None
self._changed = None
self._values = {}
# initially create fields
for field in self._sqlFields.keys():
self._values[field] = None
def save(self):
"""Save to database if anything has changed since last load"""
if ( self._new or
(self._validID() and self._changed) or
(self._updated and self._changed > self._updated) ):
# Don't save if we have not loaded existing data!
self._saveDB()
return True
return False
def delete(self):
"""Mark this object for deletion in the database.
The object will then be reset and ready for use
again with a new id.
"""
(sql, ) = self._prepareSQL("DELETE")
curs = self.cursor()
curs.execute(sql, self._getID())
curs.close()
self.reset()
def _prepareSQL(cls, operation="SELECT", where=None, selectfields=None, orderBy=None):
"""Return a sql for the given operation.
Possible operations:
SELECT read data for this id
SELECTALL read data for all ids
INSERT insert data, create new id
UPDATE update data for this id
DELETE remove data for this id
SQL will be built by data from _sqlFields, and will
contain 0 or several %s for you to sprintf-format in later:
SELECT --> len(cls._sqlPrimary)
SELECTALL --> 0 %s
INSERT --> len(cls._sqlFields) %s (including id)
UPDATE --> len(cls._sqlFields) %s (including id)
DELETE --> len(cls._sqlPrimary)
(Note: INSERT and UPDATE will only change values in _sqlTable, so
the actual number of fields for substitutions might be lower
than len(cls._sqlFields) )
For INSERT you should use cls._nextSequence() to retrieve
a new 'id' number. Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary key
'john_id', sequence name blapp_john_id_seq) you must give the sequence
name as an optional argument to _nextSequence)
Additional note: cls._nextSequence() MUST be overloaded
for multi _sqlPrimary classes. Return a tuple.
Return values will always be tuples:
SELECT --> (sql, fields)
SELECTALL -> sql, fields)
INSERT -> (sql, fields)
UPDATE -> (sql, fields)
DELETE -> (sql,) -- for consistency
fields will be object properties as a list, ie. the keys from
cls._sqlFields. The purpose of this list is to give the programmer
an idea of which order the keys are inserted in the SQL, giving
help for retreiving (SELECT, SELECTALL) or inserting for %s
(INSERT, DELETE).
Why? Well, the keys are stored in a hash, and we cannot be sure
about the order of hash.keys() from time to time, not even with
the same instance.
Optional where-parameter applies to SELECT, SELECTALL and DELETE.
where should be a list or string of where clauses.
"""
# Normalize parameter for later comparissions
operation = operation.upper()
# Convert where to a list if it is a string
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if orderBy is None:
orderBy = cls._orderBy
if operation in ('SELECT', 'SELECTALL'):
# Get the object fields and sql fields in the same
# order to be able to reconstruct later.
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if selectfields is None or field in selectfields:
fields.append(field)
sqlfields.append(sqlfield)
if not fields:
# dirrrrrty!
raise """ERROR: No fields defined, cannot create SQL.
Maybe sqlPrimary is invalid?
Fields asked: %s
My fields: %s""" % (selectfields, cls._sqlFields)
sql = "SELECT\n "
sql += ', '.join(sqlfields)
sql += "\nFROM\n "
tables = cls._tables.keys()
if not tables:
raise "REALITY ERROR: No tables defined"
sql += ', '.join(tables)
tempWhere = ["%s=%s" % linkPair for linkPair in cls._sqlLinks]
# this MUST be here.
if operation <> 'SELECTALL':
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
if where:
tempWhere += where
if(tempWhere):
# Make sure to use paranteses in case someone has used
# ORs in the WHERE-list..
sql += "\nWHERE\n ("
sql += ') AND\n ('.join(tempWhere)
sql += ')'
if operation == 'SELECTALL' and orderBy:
sql += '\nORDER BY\n '
if type(orderBy) in (types.TupleType, types.ListType):
orderBy = [cls._sqlFields[x] for x in orderBy]
orderBy = ',\n '.join(orderBy)
else:
orderBy = cls._sqlFields[orderBy]
sql += orderBy
return (sql, fields)
elif operation in ('INSERT', 'UPDATE'):
if operation == 'UPDATE':
sql = 'UPDATE %s SET\n ' % cls._sqlTable
else:
sql = 'INSERT INTO %s (\n ' % cls._sqlTable
set = []
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if operation == 'UPDATE' and field in cls._sqlPrimary:
continue
if sqlfield.find(cls._sqlTable + '.') == 0:
# It's a local field, chop of the table part
sqlfield = sqlfield[len(cls._sqlTable)+1:]
fields.append(field)
sqlfields.append(sqlfield)
set.append(sqlfield + '=%s')
if operation == 'UPDATE':
sql += ',\n '.join(set)
sql += '\nWHERE\n '
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
fields.append(key)
sql += ' AND\n '.join(tempWhere)
else:
sql += ',\n '.join(sqlfields)
sql += ')\nVALUES (\n '
sql += ',\n '.join(('%s',) * len(sqlfields))
sql += ')'
return (sql, fields)
elif operation == 'DELETE':
sql = 'DELETE FROM ' + cls._sqlTable + ' WHERE '
if where:
sql += " AND\n ".join(where)
else:
for key in cls._sqlPrimary:
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
sql += ' AND\n '.join(tempWhere)
return (sql, )
else:
raise "Unknown operation", operation
_prepareSQL = classmethod(_prepareSQL)
def _nextSequence(cls, name=None):
"""Return a new sequence number for insertion in self._sqlTable.
Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary
key 'john_id', sequence name blapp_john_id_seq) you must give
the full sequence name as an optional argument to _nextSequence)
"""
if not name:
name = cls._sqlSequence
if not name:
# Assume it's tablename_primarykey_seq
if len(cls._sqlPrimary) <> 1:
raise "Could not guess sequence name for multi-primary-key"
primary = cls._sqlPrimary[0]
name = '%s_%s_seq' % (cls._sqlTable, primary.replace('.','_'))
# Don't have . as a tablename or column name! =)
curs = cls.cursor()
curs.execute("SELECT nextval('%s')" % name)
value = curs.fetchone()[0]
curs.close()
return value
_nextSequence = classmethod(_nextSequence)
def _loadFromRow(self, result, fields, cursor):
"""Load from a database row, described by fields.
``fields`` should be the attribute names that
will be set. Note that userclasses will be
created (but not loaded).
"""
position = 0
for elem in fields:
value = result[position]
valueType = cursor.description[position][1]
if hasattr(self._dbModule, 'BOOLEAN') and \
valueType == self._dbModule.BOOLEAN and \
(value is not True or value is not False):
# convert to a python boolean
value = value and True or False
if value and self._userClasses.has_key(elem):
userClass = self._userClasses[elem]
# create an instance
value = userClass(value)
self._values[elem] = value
position += 1
def _loadDB(self):
"""Connect to the database to load myself"""
if not self._validID():
raise NotFound, self._getID()
(sql, fields) = self._prepareSQL("SELECT")
curs = self.cursor()
curs.execute(sql, self._getID())
result = curs.fetchone()
if not result:
curs.close()
raise NotFound, self._getID()
self._loadFromRow(result, fields, curs)
curs.close()
self._updated = time.time()
def _saveDB(self):
"""Insert or update into the database.
Note that every field will be updated, not just the changed
one.
"""
# We're a "fresh" copy now
self._updated = time.time()
if self._new:
operation = 'INSERT'
if not self._validID():
self._setID(self._nextSequence())
# Note that we assign this ID to our self
# BEFORE possibly saving any of our attribute
# objects that might be new as well. This means
# that they might have references to us, as long
# as the database does not require our existence
# yet.
#
# Since mysql does not have Sequences, this will
# not work as smoothly there. See class
# MysqlForgetter below.
else:
operation = 'UPDATE'
(sql, fields) = self._prepareSQL(operation)
values = []
for field in fields:
value = getattr(self, field)
# First some dirty datatype hacks
if DateTime and type(value) == DateTime.DateTimeType:
# stupid psycopg does not support it's own return type..
# lovely..
value = str(value)
if DateTime and type(value) == DateTime.DateTimeDeltaType:
# Format delta as days, hours, minutes seconds
# NOTE: includes value.second directly to get the
# whole floating number
value = value.strftime("%d %H:%M:") + str(value.second)
if value is True or value is False:
# We must store booleans as 't' and 'f' ...
value = value and 't' or 'f'
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Can't reference multiple-primary-key: %s" % value
values.append(value)
cursor = self.cursor()
cursor.execute(sql, values)
# cursor.commit()
cursor.close()
self._new = False
self._changed = None
def getAll(cls, where=None, orderBy=None):
"""Retrieve all the objects.
If a list of ``where`` clauses are given, they will be AND-ed
and will limit the search.
This will not load everything out from the database, but will
create a large amount of objects with only the ID inserted. The
data will be loaded from the objects when needed by the regular
load()-autocall.
"""
ids = cls.getAllIDs(where, orderBy=orderBy)
# Instansiate a lot of them
if len(cls._sqlPrimary) > 1:
return [cls(*id) for id in ids]
else:
return [cls(id) for id in ids]
getAll = classmethod(getAll)
def getAllIterator(cls, where=None, buffer=100,
useObject=None, orderBy=None):
"""Retrieve every object as an iterator.
Possibly limitted by the where list of clauses that will be
AND-ed.
Since an iterator is returned, only ``buffer`` rows are loaded
from the database at once. This is useful if you need
to process all objects.
If useObject is given, this object is returned each time, but
with new data. This can be used to avoid creating many new
objects when only one object is needed each time.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
fetchedAt = time.time()
curs.execute(sql)
# We might start eating memory at this point
def getNext(rows=[]):
forgetter = cls
if not rows:
rows += curs.fetchmany(buffer)
if not rows:
curs.close()
return None
row = rows[0]
del rows[0]
try:
idPositions = [fields.index(key) for key in cls._sqlPrimary]
except ValueError:
raise "Bad sqlPrimary, should be a list or tuple: %s" % cls._sqlPrimary
ids = [row[pos] for pos in idPositions]
if useObject:
result = useObject
result.reset()
result._setID(ids)
else:
result = forgetter(*ids)
result._loadFromRow(row, fields, curs)
result._updated = fetchedAt
return result
return iter(getNext, None)
getAllIterator = classmethod(getAllIterator)
def getAllIDs(cls, where=None, orderBy=None):
"""Retrive all the IDs, possibly matching the where clauses.
Where should be some list of where clauses that will be joined
with AND). Note that the result might be tuples if this table
has a multivalue _sqlPrimary.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where,
cls._sqlPrimary, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
result.append((ids))
return result
getAllIDs = classmethod(getAllIDs)
def getAllText(cls, where=None, SEPERATOR=' ', orderBy=None):
"""Retrieve a list of of all possible instances of this class.
The list is composed of tuples in the format (id, description) -
where description is a string composed by the fields from
cls._shortView, joint with SEPERATOR.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
shortPos = [fields.index(short) for short in cls._shortView]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
text = SEPERATOR.join([str(row[pos]) for pos in shortPos])
result.append((ids, text))
return result
getAllText = classmethod(getAllText)
def getChildren(self, forgetter, field=None, where=None, orderBy=None):
"""Return the children that links to me.
That means that I have to be listed in their _userClasses
somehow. If field is specified, that field in my children is
used as the pointer to me. Use this if you have multiple fields
referring to my class.
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAll(whereList, orderBy=orderBy)
def getChildrenIterator(self, forgetter, field=None, where=None,
orderBy=None, useObject=None):
"""Like getChildren, except that it returns an
iterator, like getAllIterator. An iterator should
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAllIterator(whereList, useObject=useObject,
orderBy=orderBy)
def __repr__(self):
return self.__class__.__name__ + ' %s' % self._getID()
def __str__(self):
shortView = self._shortView or self._sqlPrimary
short = [str(getattr(self, short)) for short in shortView]
text = ', '.join(short)
# return repr(self) + ': ' + text
return text
def __eq__(self, obj):
"""Simple comparsion of objects."""
return self.__class__.__name__ == obj.__class__.__name__ \
and self._getID() == obj._getID()
|
stain/forgetSQL | lib/forgetSQL.py | Forgetter.save | python | def save(self):
if ( self._new or
(self._validID() and self._changed) or
(self._updated and self._changed > self._updated) ):
# Don't save if we have not loaded existing data!
self._saveDB()
return True
return False | Save to database if anything has changed since last load | train | https://github.com/stain/forgetSQL/blob/2e13f983020b121fd75a95fcafce3ea75573fb6b/lib/forgetSQL.py#L429-L437 | [
"def _validID(self):\n \"\"\"Is all ID fields with values, ie. not None?\"\"\"\n return not None in self._getID()\n",
"def _saveDB(self):\n \"\"\"Insert or update into the database.\n\n Note that every field will be updated, not just the changed\n one.\n \"\"\"\n # We're a \"fresh\" copy now\n self._updated = time.time()\n if self._new:\n operation = 'INSERT'\n if not self._validID():\n self._setID(self._nextSequence())\n # Note that we assign this ID to our self\n # BEFORE possibly saving any of our attribute\n # objects that might be new as well. This means\n # that they might have references to us, as long\n # as the database does not require our existence\n # yet.\n #\n # Since mysql does not have Sequences, this will\n # not work as smoothly there. See class\n # MysqlForgetter below.\n else:\n operation = 'UPDATE'\n (sql, fields) = self._prepareSQL(operation)\n values = []\n for field in fields:\n value = getattr(self, field)\n # First some dirty datatype hacks\n if DateTime and type(value) == DateTime.DateTimeType:\n # stupid psycopg does not support it's own return type..\n # lovely..\n value = str(value)\n if DateTime and type(value) == DateTime.DateTimeDeltaType:\n # Format delta as days, hours, minutes seconds\n # NOTE: includes value.second directly to get the\n # whole floating number\n value = value.strftime(\"%d %H:%M:\") + str(value.second)\n if value is True or value is False:\n # We must store booleans as 't' and 'f' ...\n value = value and 't' or 'f'\n if isinstance(value, Forgetter):\n # It's another object, we store only the ID\n if value._new:\n # It's a new object too, it must be saved!\n value.save()\n try:\n (value,) = value._getID()\n except:\n raise \"Unsupported: Can't reference multiple-primary-key: %s\" % value\n values.append(value)\n cursor = self.cursor()\n cursor.execute(sql, values)\n # cursor.commit()\n cursor.close()\n self._new = False\n self._changed = None\n"
] | class Forgetter(object):
"""SQL to object database wrapper.
Given a welldefined database, by subclassing Forgetter
and supplying some attributes, you may wrap your SQL tables
into objects that are easier to program with.
You must define all fields in the database table that you want
to expose, and you may refine the names to suit your
object oriented programming style. (ie. customerID -> customer)
Objects will be created without loading from database,
loading will occur when you try to read or write some of the
attributes defined as a SQL field. If you change some attributes the
object will be saved to the database by save() or garbage
collection. (be aware that GC in Py >= 2.2 is not immediate)
If you want to create new objects, just supply them with blank
ID-fields, and _nextSequence() will be called to fetch a new
ID used for insertion.
The rule is one class pr. table, although it is possible
to join several table into one class, as long as the
identificator is unique.
By defining _userClasses you can resolve links to other
tables, a field in this table would be an id in another
table, ie. another class. In practical use this means that
behind attributes pointing to other classes (tables)
you will find instances of that class.
Short example usage of forgetter objects::
# Process all
for user in User.getAllIterator():
# Access attributes
print user.name
print "Employed at:"
# Access the Employed-class/table
print user.employed.name, user.employed.address
# fire him, setting employed reference to SQL NULL
user.employed = None
# Retrieve some ID
shop = Shop(552)
shop.name = 'Corrected name'
shop.save() # Save now instead of waiting for garbage collactor
# Include SQL where-statements in selections
myIDs = User.getAllIDs(("name='soiland'", 'salary > 5'))
Requirements:
The attributes 'cursor' and '_dbModule' should be set from the
outside. The cursor should be DB 2.0 complient, preferably with
autocommit turned on. (Transactions are not within the scope of this
module yet)
Python 2.2 (iterators, methodclasses)
"""
# How long to keep objects in cache?
_timeout = 60
# Will be True once prepare() is called
_prepared = False
# The default table containing our fields
# _sqlTable = 'shop'
_sqlTable = ''
# A mapping between our fields and the database fields.
#
# You must include all fields needed here. You may specify
# other names if you want to make the sql name more approriate
# for object oriented programming. (Like calling a field 'location'
# instead of 'location_id', because we wrap the location in a seperate
# object and don't really care about the id)
#
# You may reference to other tables with a dot, all
# other db fields will be related to _sqlTable.
# If you reference other tables, don't forget to
# modify _sqlLinks.
#
# _sqlFields = {
# 'id': 'shop_id',
# 'name': 'name',
# 'location': 'location_id',
# 'chain': 'shop_chain_id',
# 'address': 'address.address_id',
# }
_sqlFields = {}
# A list of attribute names (in the object, not database)
# that are the primary key in the database. Normally
# 'id' is sufficient. It is legal to have
# multiple fields as primary key, but it won't work
# properly with _userClasses and getChildren().
#
# If your table is a link table or something, ALL fields
# should be in _sqlPrimary. (all fields are needed to define
# a unique row to be deleted/updated)
_sqlPrimary = ('id',)
# When using several tables, you should include a
# 'link' statement, displaying which fields link the
# two tables together. Note that these are sql names.
# _sqlLinks = (
# ('shop_id', 'address.shop_id'),
# )
_sqlLinks = ()
# The name of the sequence used by _nextSequence
# - if None, a guess will be made based on _sqlTable
# and _sqlPrimary.
_sqlSequence = None
# Order by this attribute by default, if specified
# _orderBy = 'name' - this could also be a tuple
_orderBy = None
# _userClasses can be used to trigger creation of a field
# with an instance of the class. The given database field
# will be sent to the constructor as an objectID
# (ie. as self.id in this object) (ie. the class does not
# neccessary need to be a subclass of Forgetter)
#
# This means that the attribute will be an instance of that
# class, not the ID. The object will not be loaded from the
# database until you try to read any of it's attributes,
# though. (to prevent unneccessary database overload and
# recursions)
#
# Notice that _userClasses must be a name resolvable, ie.
# from the same module as your other classes.
# _userClasses = {
# 'location': 'Location',
# 'chain': 'Chain',
# 'address': 'Address',
# }
_userClasses = {}
# If you want userClasses to work properly with strings instead of
# instances, you must also 'prepare' your classes to resolve the
# names. This must be done from the same module you are defining the
# classes: forgetSQL.prepareClasses(locals())
# A list of fields that are suitable for a textual
# representation (typical a one liner).
#
# Fields will be joint together with spaces or
# simular.
# _shortView = ('name')
_shortView = ()
# Description for the fields (ie. labels)
# Note that these fields will be translated with the _ function.
# If a field is undescribe, a capitalized version of the field name
# will be presented.
#_descriptions = {
# 'name': 'Full name',
# 'description': 'Description of thingie',
#}
_descriptions = {}
def cursor(cls):
try:
import database
return database.cursor()
except:
raise "cursor method undefined, no database connection could be made"
cursor = classmethod(cursor)
# a reference to the database module object used, ie.
# MySQLdb, psycopg etc.
# Use MyClass._dbModule = MySQLdb - not "MySQLdb"
#
_dbModule = None
# By default, autosave change when object is garbage collected.
# NOTE: This could give weird side affects as you need to keep track
# on when objects are garbage collected. Instead, use .save()
# explicitely when you want to save. By default, for backward
# compatibility, autosave is on.
_autosave = True
def __new__(cls, *args):
if not hasattr(cls, '_cache'):
cls._cache = {}
try: # to implement 'goto' in Python.. UGH
if not cls._cache.has_key(args):
# unknown
raise "NotFound"
(ref, updated) = cls._cache[args]
realObject = ref()
if realObject is None:
# No more real references to it, dead object
raise "NotFound"
age = time.time() - updated
if age > cls._timeout:
# Too old!
raise "NotFound"
updated = time.time()
except "NotFound":
# We'll need to create it
realObject = object.__new__(cls, *args)
ref = weakref.ref(realObject)
updated = time.time()
# store a weak reference
cls._cache[args] = (ref, updated)
return realObject
def __init__(self, *id):
"""Initialize, possibly with a database id.
A forgetter with multivalue primary key (ie. _sqlPrimary more
than 1 in length), may be initalized by using several parameters
to this constructor. Note that the object will not be loaded
before you call load().
"""
self._values = {}
self.reset()
if not id:
self._resetID()
else:
self._setID(id)
def _setID(self, id):
"""Set the ID, ie. the values for primary keys.
id can be either a list, following the
_sqlPrimary, or some other type, that will be set
as the singleton ID (requires 1-length sqlPrimary).
"""
if type(id) in (types.ListType, types.TupleType):
try:
for key in self._sqlPrimary:
value = id[0]
self.__dict__[key] = value
id = id[1:] # rest, go revursive
except IndexError:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
elif len(self._sqlPrimary) <= 1:
# It's a simple value
key = self._sqlPrimary[0]
self.__dict__[key] = id
else:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
self._new = False
def _getID(self):
"""Get the ID values as a tuple annotated by sqlPrimary"""
id = []
for key in self._sqlPrimary:
value = self.__dict__[key]
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Part %s of %s primary key is a reference to %s, with multiple-primary-key %s " % (key, self.__class__, value.__class__, value)
id.append(value)
return id
def _resetID(self):
"""Reset all ID fields."""
# Dirty.. .=))
self._setID((None,) * len(self._sqlPrimary))
self._new = True
def _validID(self):
"""Is all ID fields with values, ie. not None?"""
return not None in self._getID()
def __getattr__(self, key):
"""Get an attribute, normally a SQL field value.
Will be called when an unknown key is to be
retrieved, ie. most likely one of our database
fields.
"""
if self._sqlFields.has_key(key):
if not self._updated:
self.load()
return self._values[key]
else:
raise AttributeError, key
def __setattr__(self, key, value):
"""Set an attribute, normally a SQL field value.
Will be called whenever something needs to be set, so
we store the value as a SQL-thingie unless the key
is not listed in sqlFields.
"""
if key not in self._sqlPrimary and self._sqlFields.has_key(key):
if not self._updated:
self.load()
self._values[key] = value
self._changed = time.time()
else:
# It's a normal thingie
self.__dict__[key] = value
def __del__(self):
"""Save the object on deletion.
Be aware of this. If you want to undo some change, use reset()
first.
Be aware of Python 2.2's garbage collector, that
might run in the background. This means that
unless you call save() changes might not
be done immediately in the database.
Not calling save() also means that you cannot catch
errors caused by wrong insertion/update (ie. wrong
datatype for a field)
"""
if not self._autosave:
return
try:
self.save()
except Exception, e:
pass
def _checkTable(cls, field):
"""Split a field from _sqlFields into table, column.
Registers the table in cls._tables, and returns a fully
qualified table.column (default table: cls._sqlTable)
"""
# Get table part
try:
(table, field) = field.split('.')
except ValueError:
table = cls._sqlTable
# clean away white space
table = table.strip()
field = field.strip()
# register table
cls._tables[table] = None
# and return in proper shape
return table + '.' + field
_checkTable = classmethod(_checkTable)
def reset(self):
"""Reset all fields, almost like creating a new object.
Note: Forgets changes you have made not saved to database!
(Remember: Others might reference the object already, expecting
something else!) Override this method if you add properties not
defined in _sqlFields.
"""
self._resetID()
self._new = None
self._updated = None
self._changed = None
self._values = {}
# initially create fields
for field in self._sqlFields.keys():
self._values[field] = None
def load(self, id=None):
"""Load from database. Old values will be discarded."""
if id is not None:
# We are asked to change our ID to something else
self.reset()
self._setID(id)
if not self._new and self._validID():
self._loadDB()
self._updated = time.time()
def delete(self):
"""Mark this object for deletion in the database.
The object will then be reset and ready for use
again with a new id.
"""
(sql, ) = self._prepareSQL("DELETE")
curs = self.cursor()
curs.execute(sql, self._getID())
curs.close()
self.reset()
def _prepareSQL(cls, operation="SELECT", where=None, selectfields=None, orderBy=None):
"""Return a sql for the given operation.
Possible operations:
SELECT read data for this id
SELECTALL read data for all ids
INSERT insert data, create new id
UPDATE update data for this id
DELETE remove data for this id
SQL will be built by data from _sqlFields, and will
contain 0 or several %s for you to sprintf-format in later:
SELECT --> len(cls._sqlPrimary)
SELECTALL --> 0 %s
INSERT --> len(cls._sqlFields) %s (including id)
UPDATE --> len(cls._sqlFields) %s (including id)
DELETE --> len(cls._sqlPrimary)
(Note: INSERT and UPDATE will only change values in _sqlTable, so
the actual number of fields for substitutions might be lower
than len(cls._sqlFields) )
For INSERT you should use cls._nextSequence() to retrieve
a new 'id' number. Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary key
'john_id', sequence name blapp_john_id_seq) you must give the sequence
name as an optional argument to _nextSequence)
Additional note: cls._nextSequence() MUST be overloaded
for multi _sqlPrimary classes. Return a tuple.
Return values will always be tuples:
SELECT --> (sql, fields)
SELECTALL -> sql, fields)
INSERT -> (sql, fields)
UPDATE -> (sql, fields)
DELETE -> (sql,) -- for consistency
fields will be object properties as a list, ie. the keys from
cls._sqlFields. The purpose of this list is to give the programmer
an idea of which order the keys are inserted in the SQL, giving
help for retreiving (SELECT, SELECTALL) or inserting for %s
(INSERT, DELETE).
Why? Well, the keys are stored in a hash, and we cannot be sure
about the order of hash.keys() from time to time, not even with
the same instance.
Optional where-parameter applies to SELECT, SELECTALL and DELETE.
where should be a list or string of where clauses.
"""
# Normalize parameter for later comparissions
operation = operation.upper()
# Convert where to a list if it is a string
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if orderBy is None:
orderBy = cls._orderBy
if operation in ('SELECT', 'SELECTALL'):
# Get the object fields and sql fields in the same
# order to be able to reconstruct later.
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if selectfields is None or field in selectfields:
fields.append(field)
sqlfields.append(sqlfield)
if not fields:
# dirrrrrty!
raise """ERROR: No fields defined, cannot create SQL.
Maybe sqlPrimary is invalid?
Fields asked: %s
My fields: %s""" % (selectfields, cls._sqlFields)
sql = "SELECT\n "
sql += ', '.join(sqlfields)
sql += "\nFROM\n "
tables = cls._tables.keys()
if not tables:
raise "REALITY ERROR: No tables defined"
sql += ', '.join(tables)
tempWhere = ["%s=%s" % linkPair for linkPair in cls._sqlLinks]
# this MUST be here.
if operation <> 'SELECTALL':
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
if where:
tempWhere += where
if(tempWhere):
# Make sure to use paranteses in case someone has used
# ORs in the WHERE-list..
sql += "\nWHERE\n ("
sql += ') AND\n ('.join(tempWhere)
sql += ')'
if operation == 'SELECTALL' and orderBy:
sql += '\nORDER BY\n '
if type(orderBy) in (types.TupleType, types.ListType):
orderBy = [cls._sqlFields[x] for x in orderBy]
orderBy = ',\n '.join(orderBy)
else:
orderBy = cls._sqlFields[orderBy]
sql += orderBy
return (sql, fields)
elif operation in ('INSERT', 'UPDATE'):
if operation == 'UPDATE':
sql = 'UPDATE %s SET\n ' % cls._sqlTable
else:
sql = 'INSERT INTO %s (\n ' % cls._sqlTable
set = []
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if operation == 'UPDATE' and field in cls._sqlPrimary:
continue
if sqlfield.find(cls._sqlTable + '.') == 0:
# It's a local field, chop of the table part
sqlfield = sqlfield[len(cls._sqlTable)+1:]
fields.append(field)
sqlfields.append(sqlfield)
set.append(sqlfield + '=%s')
if operation == 'UPDATE':
sql += ',\n '.join(set)
sql += '\nWHERE\n '
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
fields.append(key)
sql += ' AND\n '.join(tempWhere)
else:
sql += ',\n '.join(sqlfields)
sql += ')\nVALUES (\n '
sql += ',\n '.join(('%s',) * len(sqlfields))
sql += ')'
return (sql, fields)
elif operation == 'DELETE':
sql = 'DELETE FROM ' + cls._sqlTable + ' WHERE '
if where:
sql += " AND\n ".join(where)
else:
for key in cls._sqlPrimary:
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
sql += ' AND\n '.join(tempWhere)
return (sql, )
else:
raise "Unknown operation", operation
_prepareSQL = classmethod(_prepareSQL)
def _nextSequence(cls, name=None):
"""Return a new sequence number for insertion in self._sqlTable.
Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary
key 'john_id', sequence name blapp_john_id_seq) you must give
the full sequence name as an optional argument to _nextSequence)
"""
if not name:
name = cls._sqlSequence
if not name:
# Assume it's tablename_primarykey_seq
if len(cls._sqlPrimary) <> 1:
raise "Could not guess sequence name for multi-primary-key"
primary = cls._sqlPrimary[0]
name = '%s_%s_seq' % (cls._sqlTable, primary.replace('.','_'))
# Don't have . as a tablename or column name! =)
curs = cls.cursor()
curs.execute("SELECT nextval('%s')" % name)
value = curs.fetchone()[0]
curs.close()
return value
_nextSequence = classmethod(_nextSequence)
def _loadFromRow(self, result, fields, cursor):
"""Load from a database row, described by fields.
``fields`` should be the attribute names that
will be set. Note that userclasses will be
created (but not loaded).
"""
position = 0
for elem in fields:
value = result[position]
valueType = cursor.description[position][1]
if hasattr(self._dbModule, 'BOOLEAN') and \
valueType == self._dbModule.BOOLEAN and \
(value is not True or value is not False):
# convert to a python boolean
value = value and True or False
if value and self._userClasses.has_key(elem):
userClass = self._userClasses[elem]
# create an instance
value = userClass(value)
self._values[elem] = value
position += 1
def _loadDB(self):
"""Connect to the database to load myself"""
if not self._validID():
raise NotFound, self._getID()
(sql, fields) = self._prepareSQL("SELECT")
curs = self.cursor()
curs.execute(sql, self._getID())
result = curs.fetchone()
if not result:
curs.close()
raise NotFound, self._getID()
self._loadFromRow(result, fields, curs)
curs.close()
self._updated = time.time()
def _saveDB(self):
"""Insert or update into the database.
Note that every field will be updated, not just the changed
one.
"""
# We're a "fresh" copy now
self._updated = time.time()
if self._new:
operation = 'INSERT'
if not self._validID():
self._setID(self._nextSequence())
# Note that we assign this ID to our self
# BEFORE possibly saving any of our attribute
# objects that might be new as well. This means
# that they might have references to us, as long
# as the database does not require our existence
# yet.
#
# Since mysql does not have Sequences, this will
# not work as smoothly there. See class
# MysqlForgetter below.
else:
operation = 'UPDATE'
(sql, fields) = self._prepareSQL(operation)
values = []
for field in fields:
value = getattr(self, field)
# First some dirty datatype hacks
if DateTime and type(value) == DateTime.DateTimeType:
# stupid psycopg does not support it's own return type..
# lovely..
value = str(value)
if DateTime and type(value) == DateTime.DateTimeDeltaType:
# Format delta as days, hours, minutes seconds
# NOTE: includes value.second directly to get the
# whole floating number
value = value.strftime("%d %H:%M:") + str(value.second)
if value is True or value is False:
# We must store booleans as 't' and 'f' ...
value = value and 't' or 'f'
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Can't reference multiple-primary-key: %s" % value
values.append(value)
cursor = self.cursor()
cursor.execute(sql, values)
# cursor.commit()
cursor.close()
self._new = False
self._changed = None
def getAll(cls, where=None, orderBy=None):
"""Retrieve all the objects.
If a list of ``where`` clauses are given, they will be AND-ed
and will limit the search.
This will not load everything out from the database, but will
create a large amount of objects with only the ID inserted. The
data will be loaded from the objects when needed by the regular
load()-autocall.
"""
ids = cls.getAllIDs(where, orderBy=orderBy)
# Instansiate a lot of them
if len(cls._sqlPrimary) > 1:
return [cls(*id) for id in ids]
else:
return [cls(id) for id in ids]
getAll = classmethod(getAll)
def getAllIterator(cls, where=None, buffer=100,
useObject=None, orderBy=None):
"""Retrieve every object as an iterator.
Possibly limitted by the where list of clauses that will be
AND-ed.
Since an iterator is returned, only ``buffer`` rows are loaded
from the database at once. This is useful if you need
to process all objects.
If useObject is given, this object is returned each time, but
with new data. This can be used to avoid creating many new
objects when only one object is needed each time.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
fetchedAt = time.time()
curs.execute(sql)
# We might start eating memory at this point
def getNext(rows=[]):
forgetter = cls
if not rows:
rows += curs.fetchmany(buffer)
if not rows:
curs.close()
return None
row = rows[0]
del rows[0]
try:
idPositions = [fields.index(key) for key in cls._sqlPrimary]
except ValueError:
raise "Bad sqlPrimary, should be a list or tuple: %s" % cls._sqlPrimary
ids = [row[pos] for pos in idPositions]
if useObject:
result = useObject
result.reset()
result._setID(ids)
else:
result = forgetter(*ids)
result._loadFromRow(row, fields, curs)
result._updated = fetchedAt
return result
return iter(getNext, None)
getAllIterator = classmethod(getAllIterator)
def getAllIDs(cls, where=None, orderBy=None):
"""Retrive all the IDs, possibly matching the where clauses.
Where should be some list of where clauses that will be joined
with AND). Note that the result might be tuples if this table
has a multivalue _sqlPrimary.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where,
cls._sqlPrimary, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
result.append((ids))
return result
getAllIDs = classmethod(getAllIDs)
def getAllText(cls, where=None, SEPERATOR=' ', orderBy=None):
"""Retrieve a list of of all possible instances of this class.
The list is composed of tuples in the format (id, description) -
where description is a string composed by the fields from
cls._shortView, joint with SEPERATOR.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
shortPos = [fields.index(short) for short in cls._shortView]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
text = SEPERATOR.join([str(row[pos]) for pos in shortPos])
result.append((ids, text))
return result
getAllText = classmethod(getAllText)
def getChildren(self, forgetter, field=None, where=None, orderBy=None):
"""Return the children that links to me.
That means that I have to be listed in their _userClasses
somehow. If field is specified, that field in my children is
used as the pointer to me. Use this if you have multiple fields
referring to my class.
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAll(whereList, orderBy=orderBy)
def getChildrenIterator(self, forgetter, field=None, where=None,
orderBy=None, useObject=None):
"""Like getChildren, except that it returns an
iterator, like getAllIterator. An iterator should
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAllIterator(whereList, useObject=useObject,
orderBy=orderBy)
def __repr__(self):
return self.__class__.__name__ + ' %s' % self._getID()
def __str__(self):
shortView = self._shortView or self._sqlPrimary
short = [str(getattr(self, short)) for short in shortView]
text = ', '.join(short)
# return repr(self) + ': ' + text
return text
def __eq__(self, obj):
"""Simple comparsion of objects."""
return self.__class__.__name__ == obj.__class__.__name__ \
and self._getID() == obj._getID()
|
stain/forgetSQL | lib/forgetSQL.py | Forgetter.delete | python | def delete(self):
(sql, ) = self._prepareSQL("DELETE")
curs = self.cursor()
curs.execute(sql, self._getID())
curs.close()
self.reset() | Mark this object for deletion in the database.
The object will then be reset and ready for use
again with a new id. | train | https://github.com/stain/forgetSQL/blob/2e13f983020b121fd75a95fcafce3ea75573fb6b/lib/forgetSQL.py#L439-L449 | [
"def cursor(cls):\n try:\n import database\n return database.cursor()\n except:\n raise \"cursor method undefined, no database connection could be made\"\n",
"def _getID(self):\n \"\"\"Get the ID values as a tuple annotated by sqlPrimary\"\"\"\n id = []\n for key in self._sqlPrimary:\n value = self.__dict__[key]\n if isinstance(value, Forgetter):\n # It's another object, we store only the ID\n if value._new:\n # It's a new object too, it must be saved!\n value.save()\n try:\n (value,) = value._getID()\n except:\n raise \"Unsupported: Part %s of %s primary key is a reference to %s, with multiple-primary-key %s \" % (key, self.__class__, value.__class__, value)\n id.append(value)\n return id\n",
"def reset(self):\n \"\"\"Reset all fields, almost like creating a new object.\n\n Note: Forgets changes you have made not saved to database!\n (Remember: Others might reference the object already, expecting\n something else!) Override this method if you add properties not\n defined in _sqlFields.\n \"\"\"\n self._resetID()\n self._new = None\n self._updated = None\n self._changed = None\n self._values = {}\n # initially create fields\n for field in self._sqlFields.keys():\n self._values[field] = None\n",
" def _prepareSQL(cls, operation=\"SELECT\", where=None, selectfields=None, orderBy=None):\n \"\"\"Return a sql for the given operation.\n\n Possible operations:\n SELECT read data for this id\n SELECTALL read data for all ids\n INSERT insert data, create new id\n UPDATE update data for this id\n DELETE remove data for this id\n\n SQL will be built by data from _sqlFields, and will\n contain 0 or several %s for you to sprintf-format in later:\n\n SELECT --> len(cls._sqlPrimary)\n SELECTALL --> 0 %s\n INSERT --> len(cls._sqlFields) %s (including id)\n UPDATE --> len(cls._sqlFields) %s (including id)\n DELETE --> len(cls._sqlPrimary)\n\n (Note: INSERT and UPDATE will only change values in _sqlTable, so\n the actual number of fields for substitutions might be lower\n than len(cls._sqlFields) )\n\n For INSERT you should use cls._nextSequence() to retrieve\n a new 'id' number. Note that if your sequences are not named\n tablename_primarykey_seq (ie. for table 'blapp' with primary key\n 'john_id', sequence name blapp_john_id_seq) you must give the sequence\n name as an optional argument to _nextSequence)\n\n Additional note: cls._nextSequence() MUST be overloaded\n for multi _sqlPrimary classes. Return a tuple.\n\n Return values will always be tuples:\n SELECT --> (sql, fields)\n SELECTALL -> sql, fields)\n INSERT -> (sql, fields)\n UPDATE -> (sql, fields)\n DELETE -> (sql,) -- for consistency\n\n fields will be object properties as a list, ie. the keys from\n cls._sqlFields. The purpose of this list is to give the programmer\n an idea of which order the keys are inserted in the SQL, giving\n help for retreiving (SELECT, SELECTALL) or inserting for %s\n (INSERT, DELETE).\n\n Why? Well, the keys are stored in a hash, and we cannot be sure\n about the order of hash.keys() from time to time, not even with\n the same instance.\n\n Optional where-parameter applies to SELECT, SELECTALL and DELETE.\n where should be a list or string of where clauses.\n\n \"\"\"\n # Normalize parameter for later comparissions\n operation = operation.upper()\n # Convert where to a list if it is a string\n if type(where) in (types.StringType, types.UnicodeType):\n where = (where,)\n if orderBy is None:\n orderBy = cls._orderBy\n\n if operation in ('SELECT', 'SELECTALL'):\n # Get the object fields and sql fields in the same\n # order to be able to reconstruct later.\n fields = []\n sqlfields = []\n for (field, sqlfield) in cls._sqlFields.items():\n if selectfields is None or field in selectfields:\n fields.append(field)\n sqlfields.append(sqlfield)\n if not fields:\n # dirrrrrty!\n raise \"\"\"ERROR: No fields defined, cannot create SQL.\nMaybe sqlPrimary is invalid?\nFields asked: %s\nMy fields: %s\"\"\" % (selectfields, cls._sqlFields)\n\n sql = \"SELECT\\n \"\n sql += ', '.join(sqlfields)\n sql += \"\\nFROM\\n \"\n tables = cls._tables.keys()\n if not tables:\n raise \"REALITY ERROR: No tables defined\"\n sql += ', '.join(tables)\n tempWhere = [\"%s=%s\" % linkPair for linkPair in cls._sqlLinks]\n # this MUST be here.\n if operation <> 'SELECTALL':\n for key in cls._sqlPrimary:\n tempWhere.append(cls._sqlFields[key] + \"=%s\")\n if where:\n tempWhere += where\n if(tempWhere):\n # Make sure to use paranteses in case someone has used\n # ORs in the WHERE-list..\n sql += \"\\nWHERE\\n (\"\n sql += ') AND\\n ('.join(tempWhere)\n sql += ')'\n if operation == 'SELECTALL' and orderBy:\n sql += '\\nORDER BY\\n '\n if type(orderBy) in (types.TupleType, types.ListType):\n orderBy = [cls._sqlFields[x] for x in orderBy]\n orderBy = ',\\n '.join(orderBy)\n else:\n orderBy = cls._sqlFields[orderBy]\n sql += orderBy\n return (sql, fields)\n\n elif operation in ('INSERT', 'UPDATE'):\n if operation == 'UPDATE':\n sql = 'UPDATE %s SET\\n ' % cls._sqlTable\n else:\n sql = 'INSERT INTO %s (\\n ' % cls._sqlTable\n\n set = []\n fields = []\n sqlfields = []\n for (field, sqlfield) in cls._sqlFields.items():\n if operation == 'UPDATE' and field in cls._sqlPrimary:\n continue\n if sqlfield.find(cls._sqlTable + '.') == 0:\n # It's a local field, chop of the table part\n sqlfield = sqlfield[len(cls._sqlTable)+1:]\n fields.append(field)\n sqlfields.append(sqlfield)\n set.append(sqlfield + '=%s')\n if operation == 'UPDATE':\n sql += ',\\n '.join(set)\n sql += '\\nWHERE\\n '\n tempWhere = []\n for key in cls._sqlPrimary:\n tempWhere.append(cls._sqlFields[key] + \"=%s\")\n fields.append(key)\n sql += ' AND\\n '.join(tempWhere)\n else:\n sql += ',\\n '.join(sqlfields)\n sql += ')\\nVALUES (\\n '\n sql += ',\\n '.join(('%s',) * len(sqlfields))\n sql += ')'\n\n return (sql, fields)\n\n elif operation == 'DELETE':\n sql = 'DELETE FROM ' + cls._sqlTable + ' WHERE '\n if where:\n sql += \" AND\\n \".join(where)\n else:\n for key in cls._sqlPrimary:\n tempWhere = []\n for key in cls._sqlPrimary:\n tempWhere.append(cls._sqlFields[key] + \"=%s\")\n sql += ' AND\\n '.join(tempWhere)\n return (sql, )\n else:\n raise \"Unknown operation\", operation\n"
] | class Forgetter(object):
"""SQL to object database wrapper.
Given a welldefined database, by subclassing Forgetter
and supplying some attributes, you may wrap your SQL tables
into objects that are easier to program with.
You must define all fields in the database table that you want
to expose, and you may refine the names to suit your
object oriented programming style. (ie. customerID -> customer)
Objects will be created without loading from database,
loading will occur when you try to read or write some of the
attributes defined as a SQL field. If you change some attributes the
object will be saved to the database by save() or garbage
collection. (be aware that GC in Py >= 2.2 is not immediate)
If you want to create new objects, just supply them with blank
ID-fields, and _nextSequence() will be called to fetch a new
ID used for insertion.
The rule is one class pr. table, although it is possible
to join several table into one class, as long as the
identificator is unique.
By defining _userClasses you can resolve links to other
tables, a field in this table would be an id in another
table, ie. another class. In practical use this means that
behind attributes pointing to other classes (tables)
you will find instances of that class.
Short example usage of forgetter objects::
# Process all
for user in User.getAllIterator():
# Access attributes
print user.name
print "Employed at:"
# Access the Employed-class/table
print user.employed.name, user.employed.address
# fire him, setting employed reference to SQL NULL
user.employed = None
# Retrieve some ID
shop = Shop(552)
shop.name = 'Corrected name'
shop.save() # Save now instead of waiting for garbage collactor
# Include SQL where-statements in selections
myIDs = User.getAllIDs(("name='soiland'", 'salary > 5'))
Requirements:
The attributes 'cursor' and '_dbModule' should be set from the
outside. The cursor should be DB 2.0 complient, preferably with
autocommit turned on. (Transactions are not within the scope of this
module yet)
Python 2.2 (iterators, methodclasses)
"""
# How long to keep objects in cache?
_timeout = 60
# Will be True once prepare() is called
_prepared = False
# The default table containing our fields
# _sqlTable = 'shop'
_sqlTable = ''
# A mapping between our fields and the database fields.
#
# You must include all fields needed here. You may specify
# other names if you want to make the sql name more approriate
# for object oriented programming. (Like calling a field 'location'
# instead of 'location_id', because we wrap the location in a seperate
# object and don't really care about the id)
#
# You may reference to other tables with a dot, all
# other db fields will be related to _sqlTable.
# If you reference other tables, don't forget to
# modify _sqlLinks.
#
# _sqlFields = {
# 'id': 'shop_id',
# 'name': 'name',
# 'location': 'location_id',
# 'chain': 'shop_chain_id',
# 'address': 'address.address_id',
# }
_sqlFields = {}
# A list of attribute names (in the object, not database)
# that are the primary key in the database. Normally
# 'id' is sufficient. It is legal to have
# multiple fields as primary key, but it won't work
# properly with _userClasses and getChildren().
#
# If your table is a link table or something, ALL fields
# should be in _sqlPrimary. (all fields are needed to define
# a unique row to be deleted/updated)
_sqlPrimary = ('id',)
# When using several tables, you should include a
# 'link' statement, displaying which fields link the
# two tables together. Note that these are sql names.
# _sqlLinks = (
# ('shop_id', 'address.shop_id'),
# )
_sqlLinks = ()
# The name of the sequence used by _nextSequence
# - if None, a guess will be made based on _sqlTable
# and _sqlPrimary.
_sqlSequence = None
# Order by this attribute by default, if specified
# _orderBy = 'name' - this could also be a tuple
_orderBy = None
# _userClasses can be used to trigger creation of a field
# with an instance of the class. The given database field
# will be sent to the constructor as an objectID
# (ie. as self.id in this object) (ie. the class does not
# neccessary need to be a subclass of Forgetter)
#
# This means that the attribute will be an instance of that
# class, not the ID. The object will not be loaded from the
# database until you try to read any of it's attributes,
# though. (to prevent unneccessary database overload and
# recursions)
#
# Notice that _userClasses must be a name resolvable, ie.
# from the same module as your other classes.
# _userClasses = {
# 'location': 'Location',
# 'chain': 'Chain',
# 'address': 'Address',
# }
_userClasses = {}
# If you want userClasses to work properly with strings instead of
# instances, you must also 'prepare' your classes to resolve the
# names. This must be done from the same module you are defining the
# classes: forgetSQL.prepareClasses(locals())
# A list of fields that are suitable for a textual
# representation (typical a one liner).
#
# Fields will be joint together with spaces or
# simular.
# _shortView = ('name')
_shortView = ()
# Description for the fields (ie. labels)
# Note that these fields will be translated with the _ function.
# If a field is undescribe, a capitalized version of the field name
# will be presented.
#_descriptions = {
# 'name': 'Full name',
# 'description': 'Description of thingie',
#}
_descriptions = {}
def cursor(cls):
try:
import database
return database.cursor()
except:
raise "cursor method undefined, no database connection could be made"
cursor = classmethod(cursor)
# a reference to the database module object used, ie.
# MySQLdb, psycopg etc.
# Use MyClass._dbModule = MySQLdb - not "MySQLdb"
#
_dbModule = None
# By default, autosave change when object is garbage collected.
# NOTE: This could give weird side affects as you need to keep track
# on when objects are garbage collected. Instead, use .save()
# explicitely when you want to save. By default, for backward
# compatibility, autosave is on.
_autosave = True
def __new__(cls, *args):
if not hasattr(cls, '_cache'):
cls._cache = {}
try: # to implement 'goto' in Python.. UGH
if not cls._cache.has_key(args):
# unknown
raise "NotFound"
(ref, updated) = cls._cache[args]
realObject = ref()
if realObject is None:
# No more real references to it, dead object
raise "NotFound"
age = time.time() - updated
if age > cls._timeout:
# Too old!
raise "NotFound"
updated = time.time()
except "NotFound":
# We'll need to create it
realObject = object.__new__(cls, *args)
ref = weakref.ref(realObject)
updated = time.time()
# store a weak reference
cls._cache[args] = (ref, updated)
return realObject
def __init__(self, *id):
"""Initialize, possibly with a database id.
A forgetter with multivalue primary key (ie. _sqlPrimary more
than 1 in length), may be initalized by using several parameters
to this constructor. Note that the object will not be loaded
before you call load().
"""
self._values = {}
self.reset()
if not id:
self._resetID()
else:
self._setID(id)
def _setID(self, id):
"""Set the ID, ie. the values for primary keys.
id can be either a list, following the
_sqlPrimary, or some other type, that will be set
as the singleton ID (requires 1-length sqlPrimary).
"""
if type(id) in (types.ListType, types.TupleType):
try:
for key in self._sqlPrimary:
value = id[0]
self.__dict__[key] = value
id = id[1:] # rest, go revursive
except IndexError:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
elif len(self._sqlPrimary) <= 1:
# It's a simple value
key = self._sqlPrimary[0]
self.__dict__[key] = id
else:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
self._new = False
def _getID(self):
"""Get the ID values as a tuple annotated by sqlPrimary"""
id = []
for key in self._sqlPrimary:
value = self.__dict__[key]
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Part %s of %s primary key is a reference to %s, with multiple-primary-key %s " % (key, self.__class__, value.__class__, value)
id.append(value)
return id
def _resetID(self):
"""Reset all ID fields."""
# Dirty.. .=))
self._setID((None,) * len(self._sqlPrimary))
self._new = True
def _validID(self):
"""Is all ID fields with values, ie. not None?"""
return not None in self._getID()
def __getattr__(self, key):
"""Get an attribute, normally a SQL field value.
Will be called when an unknown key is to be
retrieved, ie. most likely one of our database
fields.
"""
if self._sqlFields.has_key(key):
if not self._updated:
self.load()
return self._values[key]
else:
raise AttributeError, key
def __setattr__(self, key, value):
"""Set an attribute, normally a SQL field value.
Will be called whenever something needs to be set, so
we store the value as a SQL-thingie unless the key
is not listed in sqlFields.
"""
if key not in self._sqlPrimary and self._sqlFields.has_key(key):
if not self._updated:
self.load()
self._values[key] = value
self._changed = time.time()
else:
# It's a normal thingie
self.__dict__[key] = value
def __del__(self):
"""Save the object on deletion.
Be aware of this. If you want to undo some change, use reset()
first.
Be aware of Python 2.2's garbage collector, that
might run in the background. This means that
unless you call save() changes might not
be done immediately in the database.
Not calling save() also means that you cannot catch
errors caused by wrong insertion/update (ie. wrong
datatype for a field)
"""
if not self._autosave:
return
try:
self.save()
except Exception, e:
pass
def _checkTable(cls, field):
"""Split a field from _sqlFields into table, column.
Registers the table in cls._tables, and returns a fully
qualified table.column (default table: cls._sqlTable)
"""
# Get table part
try:
(table, field) = field.split('.')
except ValueError:
table = cls._sqlTable
# clean away white space
table = table.strip()
field = field.strip()
# register table
cls._tables[table] = None
# and return in proper shape
return table + '.' + field
_checkTable = classmethod(_checkTable)
def reset(self):
"""Reset all fields, almost like creating a new object.
Note: Forgets changes you have made not saved to database!
(Remember: Others might reference the object already, expecting
something else!) Override this method if you add properties not
defined in _sqlFields.
"""
self._resetID()
self._new = None
self._updated = None
self._changed = None
self._values = {}
# initially create fields
for field in self._sqlFields.keys():
self._values[field] = None
def load(self, id=None):
"""Load from database. Old values will be discarded."""
if id is not None:
# We are asked to change our ID to something else
self.reset()
self._setID(id)
if not self._new and self._validID():
self._loadDB()
self._updated = time.time()
def save(self):
"""Save to database if anything has changed since last load"""
if ( self._new or
(self._validID() and self._changed) or
(self._updated and self._changed > self._updated) ):
# Don't save if we have not loaded existing data!
self._saveDB()
return True
return False
def _prepareSQL(cls, operation="SELECT", where=None, selectfields=None, orderBy=None):
"""Return a sql for the given operation.
Possible operations:
SELECT read data for this id
SELECTALL read data for all ids
INSERT insert data, create new id
UPDATE update data for this id
DELETE remove data for this id
SQL will be built by data from _sqlFields, and will
contain 0 or several %s for you to sprintf-format in later:
SELECT --> len(cls._sqlPrimary)
SELECTALL --> 0 %s
INSERT --> len(cls._sqlFields) %s (including id)
UPDATE --> len(cls._sqlFields) %s (including id)
DELETE --> len(cls._sqlPrimary)
(Note: INSERT and UPDATE will only change values in _sqlTable, so
the actual number of fields for substitutions might be lower
than len(cls._sqlFields) )
For INSERT you should use cls._nextSequence() to retrieve
a new 'id' number. Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary key
'john_id', sequence name blapp_john_id_seq) you must give the sequence
name as an optional argument to _nextSequence)
Additional note: cls._nextSequence() MUST be overloaded
for multi _sqlPrimary classes. Return a tuple.
Return values will always be tuples:
SELECT --> (sql, fields)
SELECTALL -> sql, fields)
INSERT -> (sql, fields)
UPDATE -> (sql, fields)
DELETE -> (sql,) -- for consistency
fields will be object properties as a list, ie. the keys from
cls._sqlFields. The purpose of this list is to give the programmer
an idea of which order the keys are inserted in the SQL, giving
help for retreiving (SELECT, SELECTALL) or inserting for %s
(INSERT, DELETE).
Why? Well, the keys are stored in a hash, and we cannot be sure
about the order of hash.keys() from time to time, not even with
the same instance.
Optional where-parameter applies to SELECT, SELECTALL and DELETE.
where should be a list or string of where clauses.
"""
# Normalize parameter for later comparissions
operation = operation.upper()
# Convert where to a list if it is a string
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if orderBy is None:
orderBy = cls._orderBy
if operation in ('SELECT', 'SELECTALL'):
# Get the object fields and sql fields in the same
# order to be able to reconstruct later.
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if selectfields is None or field in selectfields:
fields.append(field)
sqlfields.append(sqlfield)
if not fields:
# dirrrrrty!
raise """ERROR: No fields defined, cannot create SQL.
Maybe sqlPrimary is invalid?
Fields asked: %s
My fields: %s""" % (selectfields, cls._sqlFields)
sql = "SELECT\n "
sql += ', '.join(sqlfields)
sql += "\nFROM\n "
tables = cls._tables.keys()
if not tables:
raise "REALITY ERROR: No tables defined"
sql += ', '.join(tables)
tempWhere = ["%s=%s" % linkPair for linkPair in cls._sqlLinks]
# this MUST be here.
if operation <> 'SELECTALL':
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
if where:
tempWhere += where
if(tempWhere):
# Make sure to use paranteses in case someone has used
# ORs in the WHERE-list..
sql += "\nWHERE\n ("
sql += ') AND\n ('.join(tempWhere)
sql += ')'
if operation == 'SELECTALL' and orderBy:
sql += '\nORDER BY\n '
if type(orderBy) in (types.TupleType, types.ListType):
orderBy = [cls._sqlFields[x] for x in orderBy]
orderBy = ',\n '.join(orderBy)
else:
orderBy = cls._sqlFields[orderBy]
sql += orderBy
return (sql, fields)
elif operation in ('INSERT', 'UPDATE'):
if operation == 'UPDATE':
sql = 'UPDATE %s SET\n ' % cls._sqlTable
else:
sql = 'INSERT INTO %s (\n ' % cls._sqlTable
set = []
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if operation == 'UPDATE' and field in cls._sqlPrimary:
continue
if sqlfield.find(cls._sqlTable + '.') == 0:
# It's a local field, chop of the table part
sqlfield = sqlfield[len(cls._sqlTable)+1:]
fields.append(field)
sqlfields.append(sqlfield)
set.append(sqlfield + '=%s')
if operation == 'UPDATE':
sql += ',\n '.join(set)
sql += '\nWHERE\n '
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
fields.append(key)
sql += ' AND\n '.join(tempWhere)
else:
sql += ',\n '.join(sqlfields)
sql += ')\nVALUES (\n '
sql += ',\n '.join(('%s',) * len(sqlfields))
sql += ')'
return (sql, fields)
elif operation == 'DELETE':
sql = 'DELETE FROM ' + cls._sqlTable + ' WHERE '
if where:
sql += " AND\n ".join(where)
else:
for key in cls._sqlPrimary:
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
sql += ' AND\n '.join(tempWhere)
return (sql, )
else:
raise "Unknown operation", operation
_prepareSQL = classmethod(_prepareSQL)
def _nextSequence(cls, name=None):
"""Return a new sequence number for insertion in self._sqlTable.
Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary
key 'john_id', sequence name blapp_john_id_seq) you must give
the full sequence name as an optional argument to _nextSequence)
"""
if not name:
name = cls._sqlSequence
if not name:
# Assume it's tablename_primarykey_seq
if len(cls._sqlPrimary) <> 1:
raise "Could not guess sequence name for multi-primary-key"
primary = cls._sqlPrimary[0]
name = '%s_%s_seq' % (cls._sqlTable, primary.replace('.','_'))
# Don't have . as a tablename or column name! =)
curs = cls.cursor()
curs.execute("SELECT nextval('%s')" % name)
value = curs.fetchone()[0]
curs.close()
return value
_nextSequence = classmethod(_nextSequence)
def _loadFromRow(self, result, fields, cursor):
"""Load from a database row, described by fields.
``fields`` should be the attribute names that
will be set. Note that userclasses will be
created (but not loaded).
"""
position = 0
for elem in fields:
value = result[position]
valueType = cursor.description[position][1]
if hasattr(self._dbModule, 'BOOLEAN') and \
valueType == self._dbModule.BOOLEAN and \
(value is not True or value is not False):
# convert to a python boolean
value = value and True or False
if value and self._userClasses.has_key(elem):
userClass = self._userClasses[elem]
# create an instance
value = userClass(value)
self._values[elem] = value
position += 1
def _loadDB(self):
"""Connect to the database to load myself"""
if not self._validID():
raise NotFound, self._getID()
(sql, fields) = self._prepareSQL("SELECT")
curs = self.cursor()
curs.execute(sql, self._getID())
result = curs.fetchone()
if not result:
curs.close()
raise NotFound, self._getID()
self._loadFromRow(result, fields, curs)
curs.close()
self._updated = time.time()
def _saveDB(self):
"""Insert or update into the database.
Note that every field will be updated, not just the changed
one.
"""
# We're a "fresh" copy now
self._updated = time.time()
if self._new:
operation = 'INSERT'
if not self._validID():
self._setID(self._nextSequence())
# Note that we assign this ID to our self
# BEFORE possibly saving any of our attribute
# objects that might be new as well. This means
# that they might have references to us, as long
# as the database does not require our existence
# yet.
#
# Since mysql does not have Sequences, this will
# not work as smoothly there. See class
# MysqlForgetter below.
else:
operation = 'UPDATE'
(sql, fields) = self._prepareSQL(operation)
values = []
for field in fields:
value = getattr(self, field)
# First some dirty datatype hacks
if DateTime and type(value) == DateTime.DateTimeType:
# stupid psycopg does not support it's own return type..
# lovely..
value = str(value)
if DateTime and type(value) == DateTime.DateTimeDeltaType:
# Format delta as days, hours, minutes seconds
# NOTE: includes value.second directly to get the
# whole floating number
value = value.strftime("%d %H:%M:") + str(value.second)
if value is True or value is False:
# We must store booleans as 't' and 'f' ...
value = value and 't' or 'f'
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Can't reference multiple-primary-key: %s" % value
values.append(value)
cursor = self.cursor()
cursor.execute(sql, values)
# cursor.commit()
cursor.close()
self._new = False
self._changed = None
def getAll(cls, where=None, orderBy=None):
"""Retrieve all the objects.
If a list of ``where`` clauses are given, they will be AND-ed
and will limit the search.
This will not load everything out from the database, but will
create a large amount of objects with only the ID inserted. The
data will be loaded from the objects when needed by the regular
load()-autocall.
"""
ids = cls.getAllIDs(where, orderBy=orderBy)
# Instansiate a lot of them
if len(cls._sqlPrimary) > 1:
return [cls(*id) for id in ids]
else:
return [cls(id) for id in ids]
getAll = classmethod(getAll)
def getAllIterator(cls, where=None, buffer=100,
useObject=None, orderBy=None):
"""Retrieve every object as an iterator.
Possibly limitted by the where list of clauses that will be
AND-ed.
Since an iterator is returned, only ``buffer`` rows are loaded
from the database at once. This is useful if you need
to process all objects.
If useObject is given, this object is returned each time, but
with new data. This can be used to avoid creating many new
objects when only one object is needed each time.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
fetchedAt = time.time()
curs.execute(sql)
# We might start eating memory at this point
def getNext(rows=[]):
forgetter = cls
if not rows:
rows += curs.fetchmany(buffer)
if not rows:
curs.close()
return None
row = rows[0]
del rows[0]
try:
idPositions = [fields.index(key) for key in cls._sqlPrimary]
except ValueError:
raise "Bad sqlPrimary, should be a list or tuple: %s" % cls._sqlPrimary
ids = [row[pos] for pos in idPositions]
if useObject:
result = useObject
result.reset()
result._setID(ids)
else:
result = forgetter(*ids)
result._loadFromRow(row, fields, curs)
result._updated = fetchedAt
return result
return iter(getNext, None)
getAllIterator = classmethod(getAllIterator)
def getAllIDs(cls, where=None, orderBy=None):
"""Retrive all the IDs, possibly matching the where clauses.
Where should be some list of where clauses that will be joined
with AND). Note that the result might be tuples if this table
has a multivalue _sqlPrimary.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where,
cls._sqlPrimary, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
result.append((ids))
return result
getAllIDs = classmethod(getAllIDs)
def getAllText(cls, where=None, SEPERATOR=' ', orderBy=None):
"""Retrieve a list of of all possible instances of this class.
The list is composed of tuples in the format (id, description) -
where description is a string composed by the fields from
cls._shortView, joint with SEPERATOR.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
shortPos = [fields.index(short) for short in cls._shortView]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
text = SEPERATOR.join([str(row[pos]) for pos in shortPos])
result.append((ids, text))
return result
getAllText = classmethod(getAllText)
def getChildren(self, forgetter, field=None, where=None, orderBy=None):
"""Return the children that links to me.
That means that I have to be listed in their _userClasses
somehow. If field is specified, that field in my children is
used as the pointer to me. Use this if you have multiple fields
referring to my class.
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAll(whereList, orderBy=orderBy)
def getChildrenIterator(self, forgetter, field=None, where=None,
orderBy=None, useObject=None):
"""Like getChildren, except that it returns an
iterator, like getAllIterator. An iterator should
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAllIterator(whereList, useObject=useObject,
orderBy=orderBy)
def __repr__(self):
return self.__class__.__name__ + ' %s' % self._getID()
def __str__(self):
shortView = self._shortView or self._sqlPrimary
short = [str(getattr(self, short)) for short in shortView]
text = ', '.join(short)
# return repr(self) + ': ' + text
return text
def __eq__(self, obj):
"""Simple comparsion of objects."""
return self.__class__.__name__ == obj.__class__.__name__ \
and self._getID() == obj._getID()
|
stain/forgetSQL | lib/forgetSQL.py | Forgetter._prepareSQL | python | def _prepareSQL(cls, operation="SELECT", where=None, selectfields=None, orderBy=None):
# Normalize parameter for later comparissions
operation = operation.upper()
# Convert where to a list if it is a string
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if orderBy is None:
orderBy = cls._orderBy
if operation in ('SELECT', 'SELECTALL'):
# Get the object fields and sql fields in the same
# order to be able to reconstruct later.
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if selectfields is None or field in selectfields:
fields.append(field)
sqlfields.append(sqlfield)
if not fields:
# dirrrrrty!
raise """ERROR: No fields defined, cannot create SQL.
Maybe sqlPrimary is invalid?
Fields asked: %s
My fields: %s""" % (selectfields, cls._sqlFields)
sql = "SELECT\n "
sql += ', '.join(sqlfields)
sql += "\nFROM\n "
tables = cls._tables.keys()
if not tables:
raise "REALITY ERROR: No tables defined"
sql += ', '.join(tables)
tempWhere = ["%s=%s" % linkPair for linkPair in cls._sqlLinks]
# this MUST be here.
if operation <> 'SELECTALL':
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
if where:
tempWhere += where
if(tempWhere):
# Make sure to use paranteses in case someone has used
# ORs in the WHERE-list..
sql += "\nWHERE\n ("
sql += ') AND\n ('.join(tempWhere)
sql += ')'
if operation == 'SELECTALL' and orderBy:
sql += '\nORDER BY\n '
if type(orderBy) in (types.TupleType, types.ListType):
orderBy = [cls._sqlFields[x] for x in orderBy]
orderBy = ',\n '.join(orderBy)
else:
orderBy = cls._sqlFields[orderBy]
sql += orderBy
return (sql, fields)
elif operation in ('INSERT', 'UPDATE'):
if operation == 'UPDATE':
sql = 'UPDATE %s SET\n ' % cls._sqlTable
else:
sql = 'INSERT INTO %s (\n ' % cls._sqlTable
set = []
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if operation == 'UPDATE' and field in cls._sqlPrimary:
continue
if sqlfield.find(cls._sqlTable + '.') == 0:
# It's a local field, chop of the table part
sqlfield = sqlfield[len(cls._sqlTable)+1:]
fields.append(field)
sqlfields.append(sqlfield)
set.append(sqlfield + '=%s')
if operation == 'UPDATE':
sql += ',\n '.join(set)
sql += '\nWHERE\n '
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
fields.append(key)
sql += ' AND\n '.join(tempWhere)
else:
sql += ',\n '.join(sqlfields)
sql += ')\nVALUES (\n '
sql += ',\n '.join(('%s',) * len(sqlfields))
sql += ')'
return (sql, fields)
elif operation == 'DELETE':
sql = 'DELETE FROM ' + cls._sqlTable + ' WHERE '
if where:
sql += " AND\n ".join(where)
else:
for key in cls._sqlPrimary:
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
sql += ' AND\n '.join(tempWhere)
return (sql, )
else:
raise "Unknown operation", operation | Return a sql for the given operation.
Possible operations:
SELECT read data for this id
SELECTALL read data for all ids
INSERT insert data, create new id
UPDATE update data for this id
DELETE remove data for this id
SQL will be built by data from _sqlFields, and will
contain 0 or several %s for you to sprintf-format in later:
SELECT --> len(cls._sqlPrimary)
SELECTALL --> 0 %s
INSERT --> len(cls._sqlFields) %s (including id)
UPDATE --> len(cls._sqlFields) %s (including id)
DELETE --> len(cls._sqlPrimary)
(Note: INSERT and UPDATE will only change values in _sqlTable, so
the actual number of fields for substitutions might be lower
than len(cls._sqlFields) )
For INSERT you should use cls._nextSequence() to retrieve
a new 'id' number. Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary key
'john_id', sequence name blapp_john_id_seq) you must give the sequence
name as an optional argument to _nextSequence)
Additional note: cls._nextSequence() MUST be overloaded
for multi _sqlPrimary classes. Return a tuple.
Return values will always be tuples:
SELECT --> (sql, fields)
SELECTALL -> sql, fields)
INSERT -> (sql, fields)
UPDATE -> (sql, fields)
DELETE -> (sql,) -- for consistency
fields will be object properties as a list, ie. the keys from
cls._sqlFields. The purpose of this list is to give the programmer
an idea of which order the keys are inserted in the SQL, giving
help for retreiving (SELECT, SELECTALL) or inserting for %s
(INSERT, DELETE).
Why? Well, the keys are stored in a hash, and we cannot be sure
about the order of hash.keys() from time to time, not even with
the same instance.
Optional where-parameter applies to SELECT, SELECTALL and DELETE.
where should be a list or string of where clauses. | train | https://github.com/stain/forgetSQL/blob/2e13f983020b121fd75a95fcafce3ea75573fb6b/lib/forgetSQL.py#L451-L604 | null | class Forgetter(object):
"""SQL to object database wrapper.
Given a welldefined database, by subclassing Forgetter
and supplying some attributes, you may wrap your SQL tables
into objects that are easier to program with.
You must define all fields in the database table that you want
to expose, and you may refine the names to suit your
object oriented programming style. (ie. customerID -> customer)
Objects will be created without loading from database,
loading will occur when you try to read or write some of the
attributes defined as a SQL field. If you change some attributes the
object will be saved to the database by save() or garbage
collection. (be aware that GC in Py >= 2.2 is not immediate)
If you want to create new objects, just supply them with blank
ID-fields, and _nextSequence() will be called to fetch a new
ID used for insertion.
The rule is one class pr. table, although it is possible
to join several table into one class, as long as the
identificator is unique.
By defining _userClasses you can resolve links to other
tables, a field in this table would be an id in another
table, ie. another class. In practical use this means that
behind attributes pointing to other classes (tables)
you will find instances of that class.
Short example usage of forgetter objects::
# Process all
for user in User.getAllIterator():
# Access attributes
print user.name
print "Employed at:"
# Access the Employed-class/table
print user.employed.name, user.employed.address
# fire him, setting employed reference to SQL NULL
user.employed = None
# Retrieve some ID
shop = Shop(552)
shop.name = 'Corrected name'
shop.save() # Save now instead of waiting for garbage collactor
# Include SQL where-statements in selections
myIDs = User.getAllIDs(("name='soiland'", 'salary > 5'))
Requirements:
The attributes 'cursor' and '_dbModule' should be set from the
outside. The cursor should be DB 2.0 complient, preferably with
autocommit turned on. (Transactions are not within the scope of this
module yet)
Python 2.2 (iterators, methodclasses)
"""
# How long to keep objects in cache?
_timeout = 60
# Will be True once prepare() is called
_prepared = False
# The default table containing our fields
# _sqlTable = 'shop'
_sqlTable = ''
# A mapping between our fields and the database fields.
#
# You must include all fields needed here. You may specify
# other names if you want to make the sql name more approriate
# for object oriented programming. (Like calling a field 'location'
# instead of 'location_id', because we wrap the location in a seperate
# object and don't really care about the id)
#
# You may reference to other tables with a dot, all
# other db fields will be related to _sqlTable.
# If you reference other tables, don't forget to
# modify _sqlLinks.
#
# _sqlFields = {
# 'id': 'shop_id',
# 'name': 'name',
# 'location': 'location_id',
# 'chain': 'shop_chain_id',
# 'address': 'address.address_id',
# }
_sqlFields = {}
# A list of attribute names (in the object, not database)
# that are the primary key in the database. Normally
# 'id' is sufficient. It is legal to have
# multiple fields as primary key, but it won't work
# properly with _userClasses and getChildren().
#
# If your table is a link table or something, ALL fields
# should be in _sqlPrimary. (all fields are needed to define
# a unique row to be deleted/updated)
_sqlPrimary = ('id',)
# When using several tables, you should include a
# 'link' statement, displaying which fields link the
# two tables together. Note that these are sql names.
# _sqlLinks = (
# ('shop_id', 'address.shop_id'),
# )
_sqlLinks = ()
# The name of the sequence used by _nextSequence
# - if None, a guess will be made based on _sqlTable
# and _sqlPrimary.
_sqlSequence = None
# Order by this attribute by default, if specified
# _orderBy = 'name' - this could also be a tuple
_orderBy = None
# _userClasses can be used to trigger creation of a field
# with an instance of the class. The given database field
# will be sent to the constructor as an objectID
# (ie. as self.id in this object) (ie. the class does not
# neccessary need to be a subclass of Forgetter)
#
# This means that the attribute will be an instance of that
# class, not the ID. The object will not be loaded from the
# database until you try to read any of it's attributes,
# though. (to prevent unneccessary database overload and
# recursions)
#
# Notice that _userClasses must be a name resolvable, ie.
# from the same module as your other classes.
# _userClasses = {
# 'location': 'Location',
# 'chain': 'Chain',
# 'address': 'Address',
# }
_userClasses = {}
# If you want userClasses to work properly with strings instead of
# instances, you must also 'prepare' your classes to resolve the
# names. This must be done from the same module you are defining the
# classes: forgetSQL.prepareClasses(locals())
# A list of fields that are suitable for a textual
# representation (typical a one liner).
#
# Fields will be joint together with spaces or
# simular.
# _shortView = ('name')
_shortView = ()
# Description for the fields (ie. labels)
# Note that these fields will be translated with the _ function.
# If a field is undescribe, a capitalized version of the field name
# will be presented.
#_descriptions = {
# 'name': 'Full name',
# 'description': 'Description of thingie',
#}
_descriptions = {}
def cursor(cls):
try:
import database
return database.cursor()
except:
raise "cursor method undefined, no database connection could be made"
cursor = classmethod(cursor)
# a reference to the database module object used, ie.
# MySQLdb, psycopg etc.
# Use MyClass._dbModule = MySQLdb - not "MySQLdb"
#
_dbModule = None
# By default, autosave change when object is garbage collected.
# NOTE: This could give weird side affects as you need to keep track
# on when objects are garbage collected. Instead, use .save()
# explicitely when you want to save. By default, for backward
# compatibility, autosave is on.
_autosave = True
def __new__(cls, *args):
if not hasattr(cls, '_cache'):
cls._cache = {}
try: # to implement 'goto' in Python.. UGH
if not cls._cache.has_key(args):
# unknown
raise "NotFound"
(ref, updated) = cls._cache[args]
realObject = ref()
if realObject is None:
# No more real references to it, dead object
raise "NotFound"
age = time.time() - updated
if age > cls._timeout:
# Too old!
raise "NotFound"
updated = time.time()
except "NotFound":
# We'll need to create it
realObject = object.__new__(cls, *args)
ref = weakref.ref(realObject)
updated = time.time()
# store a weak reference
cls._cache[args] = (ref, updated)
return realObject
def __init__(self, *id):
"""Initialize, possibly with a database id.
A forgetter with multivalue primary key (ie. _sqlPrimary more
than 1 in length), may be initalized by using several parameters
to this constructor. Note that the object will not be loaded
before you call load().
"""
self._values = {}
self.reset()
if not id:
self._resetID()
else:
self._setID(id)
def _setID(self, id):
"""Set the ID, ie. the values for primary keys.
id can be either a list, following the
_sqlPrimary, or some other type, that will be set
as the singleton ID (requires 1-length sqlPrimary).
"""
if type(id) in (types.ListType, types.TupleType):
try:
for key in self._sqlPrimary:
value = id[0]
self.__dict__[key] = value
id = id[1:] # rest, go revursive
except IndexError:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
elif len(self._sqlPrimary) <= 1:
# It's a simple value
key = self._sqlPrimary[0]
self.__dict__[key] = id
else:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
self._new = False
def _getID(self):
"""Get the ID values as a tuple annotated by sqlPrimary"""
id = []
for key in self._sqlPrimary:
value = self.__dict__[key]
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Part %s of %s primary key is a reference to %s, with multiple-primary-key %s " % (key, self.__class__, value.__class__, value)
id.append(value)
return id
def _resetID(self):
"""Reset all ID fields."""
# Dirty.. .=))
self._setID((None,) * len(self._sqlPrimary))
self._new = True
def _validID(self):
"""Is all ID fields with values, ie. not None?"""
return not None in self._getID()
def __getattr__(self, key):
"""Get an attribute, normally a SQL field value.
Will be called when an unknown key is to be
retrieved, ie. most likely one of our database
fields.
"""
if self._sqlFields.has_key(key):
if not self._updated:
self.load()
return self._values[key]
else:
raise AttributeError, key
def __setattr__(self, key, value):
"""Set an attribute, normally a SQL field value.
Will be called whenever something needs to be set, so
we store the value as a SQL-thingie unless the key
is not listed in sqlFields.
"""
if key not in self._sqlPrimary and self._sqlFields.has_key(key):
if not self._updated:
self.load()
self._values[key] = value
self._changed = time.time()
else:
# It's a normal thingie
self.__dict__[key] = value
def __del__(self):
"""Save the object on deletion.
Be aware of this. If you want to undo some change, use reset()
first.
Be aware of Python 2.2's garbage collector, that
might run in the background. This means that
unless you call save() changes might not
be done immediately in the database.
Not calling save() also means that you cannot catch
errors caused by wrong insertion/update (ie. wrong
datatype for a field)
"""
if not self._autosave:
return
try:
self.save()
except Exception, e:
pass
def _checkTable(cls, field):
"""Split a field from _sqlFields into table, column.
Registers the table in cls._tables, and returns a fully
qualified table.column (default table: cls._sqlTable)
"""
# Get table part
try:
(table, field) = field.split('.')
except ValueError:
table = cls._sqlTable
# clean away white space
table = table.strip()
field = field.strip()
# register table
cls._tables[table] = None
# and return in proper shape
return table + '.' + field
_checkTable = classmethod(_checkTable)
def reset(self):
"""Reset all fields, almost like creating a new object.
Note: Forgets changes you have made not saved to database!
(Remember: Others might reference the object already, expecting
something else!) Override this method if you add properties not
defined in _sqlFields.
"""
self._resetID()
self._new = None
self._updated = None
self._changed = None
self._values = {}
# initially create fields
for field in self._sqlFields.keys():
self._values[field] = None
def load(self, id=None):
"""Load from database. Old values will be discarded."""
if id is not None:
# We are asked to change our ID to something else
self.reset()
self._setID(id)
if not self._new and self._validID():
self._loadDB()
self._updated = time.time()
def save(self):
"""Save to database if anything has changed since last load"""
if ( self._new or
(self._validID() and self._changed) or
(self._updated and self._changed > self._updated) ):
# Don't save if we have not loaded existing data!
self._saveDB()
return True
return False
def delete(self):
"""Mark this object for deletion in the database.
The object will then be reset and ready for use
again with a new id.
"""
(sql, ) = self._prepareSQL("DELETE")
curs = self.cursor()
curs.execute(sql, self._getID())
curs.close()
self.reset()
_prepareSQL = classmethod(_prepareSQL)
def _nextSequence(cls, name=None):
"""Return a new sequence number for insertion in self._sqlTable.
Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary
key 'john_id', sequence name blapp_john_id_seq) you must give
the full sequence name as an optional argument to _nextSequence)
"""
if not name:
name = cls._sqlSequence
if not name:
# Assume it's tablename_primarykey_seq
if len(cls._sqlPrimary) <> 1:
raise "Could not guess sequence name for multi-primary-key"
primary = cls._sqlPrimary[0]
name = '%s_%s_seq' % (cls._sqlTable, primary.replace('.','_'))
# Don't have . as a tablename or column name! =)
curs = cls.cursor()
curs.execute("SELECT nextval('%s')" % name)
value = curs.fetchone()[0]
curs.close()
return value
_nextSequence = classmethod(_nextSequence)
def _loadFromRow(self, result, fields, cursor):
"""Load from a database row, described by fields.
``fields`` should be the attribute names that
will be set. Note that userclasses will be
created (but not loaded).
"""
position = 0
for elem in fields:
value = result[position]
valueType = cursor.description[position][1]
if hasattr(self._dbModule, 'BOOLEAN') and \
valueType == self._dbModule.BOOLEAN and \
(value is not True or value is not False):
# convert to a python boolean
value = value and True or False
if value and self._userClasses.has_key(elem):
userClass = self._userClasses[elem]
# create an instance
value = userClass(value)
self._values[elem] = value
position += 1
def _loadDB(self):
"""Connect to the database to load myself"""
if not self._validID():
raise NotFound, self._getID()
(sql, fields) = self._prepareSQL("SELECT")
curs = self.cursor()
curs.execute(sql, self._getID())
result = curs.fetchone()
if not result:
curs.close()
raise NotFound, self._getID()
self._loadFromRow(result, fields, curs)
curs.close()
self._updated = time.time()
def _saveDB(self):
"""Insert or update into the database.
Note that every field will be updated, not just the changed
one.
"""
# We're a "fresh" copy now
self._updated = time.time()
if self._new:
operation = 'INSERT'
if not self._validID():
self._setID(self._nextSequence())
# Note that we assign this ID to our self
# BEFORE possibly saving any of our attribute
# objects that might be new as well. This means
# that they might have references to us, as long
# as the database does not require our existence
# yet.
#
# Since mysql does not have Sequences, this will
# not work as smoothly there. See class
# MysqlForgetter below.
else:
operation = 'UPDATE'
(sql, fields) = self._prepareSQL(operation)
values = []
for field in fields:
value = getattr(self, field)
# First some dirty datatype hacks
if DateTime and type(value) == DateTime.DateTimeType:
# stupid psycopg does not support it's own return type..
# lovely..
value = str(value)
if DateTime and type(value) == DateTime.DateTimeDeltaType:
# Format delta as days, hours, minutes seconds
# NOTE: includes value.second directly to get the
# whole floating number
value = value.strftime("%d %H:%M:") + str(value.second)
if value is True or value is False:
# We must store booleans as 't' and 'f' ...
value = value and 't' or 'f'
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Can't reference multiple-primary-key: %s" % value
values.append(value)
cursor = self.cursor()
cursor.execute(sql, values)
# cursor.commit()
cursor.close()
self._new = False
self._changed = None
def getAll(cls, where=None, orderBy=None):
"""Retrieve all the objects.
If a list of ``where`` clauses are given, they will be AND-ed
and will limit the search.
This will not load everything out from the database, but will
create a large amount of objects with only the ID inserted. The
data will be loaded from the objects when needed by the regular
load()-autocall.
"""
ids = cls.getAllIDs(where, orderBy=orderBy)
# Instansiate a lot of them
if len(cls._sqlPrimary) > 1:
return [cls(*id) for id in ids]
else:
return [cls(id) for id in ids]
getAll = classmethod(getAll)
def getAllIterator(cls, where=None, buffer=100,
useObject=None, orderBy=None):
"""Retrieve every object as an iterator.
Possibly limitted by the where list of clauses that will be
AND-ed.
Since an iterator is returned, only ``buffer`` rows are loaded
from the database at once. This is useful if you need
to process all objects.
If useObject is given, this object is returned each time, but
with new data. This can be used to avoid creating many new
objects when only one object is needed each time.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
fetchedAt = time.time()
curs.execute(sql)
# We might start eating memory at this point
def getNext(rows=[]):
forgetter = cls
if not rows:
rows += curs.fetchmany(buffer)
if not rows:
curs.close()
return None
row = rows[0]
del rows[0]
try:
idPositions = [fields.index(key) for key in cls._sqlPrimary]
except ValueError:
raise "Bad sqlPrimary, should be a list or tuple: %s" % cls._sqlPrimary
ids = [row[pos] for pos in idPositions]
if useObject:
result = useObject
result.reset()
result._setID(ids)
else:
result = forgetter(*ids)
result._loadFromRow(row, fields, curs)
result._updated = fetchedAt
return result
return iter(getNext, None)
getAllIterator = classmethod(getAllIterator)
def getAllIDs(cls, where=None, orderBy=None):
"""Retrive all the IDs, possibly matching the where clauses.
Where should be some list of where clauses that will be joined
with AND). Note that the result might be tuples if this table
has a multivalue _sqlPrimary.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where,
cls._sqlPrimary, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
result.append((ids))
return result
getAllIDs = classmethod(getAllIDs)
def getAllText(cls, where=None, SEPERATOR=' ', orderBy=None):
"""Retrieve a list of of all possible instances of this class.
The list is composed of tuples in the format (id, description) -
where description is a string composed by the fields from
cls._shortView, joint with SEPERATOR.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
shortPos = [fields.index(short) for short in cls._shortView]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
text = SEPERATOR.join([str(row[pos]) for pos in shortPos])
result.append((ids, text))
return result
getAllText = classmethod(getAllText)
def getChildren(self, forgetter, field=None, where=None, orderBy=None):
"""Return the children that links to me.
That means that I have to be listed in their _userClasses
somehow. If field is specified, that field in my children is
used as the pointer to me. Use this if you have multiple fields
referring to my class.
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAll(whereList, orderBy=orderBy)
def getChildrenIterator(self, forgetter, field=None, where=None,
orderBy=None, useObject=None):
"""Like getChildren, except that it returns an
iterator, like getAllIterator. An iterator should
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAllIterator(whereList, useObject=useObject,
orderBy=orderBy)
def __repr__(self):
return self.__class__.__name__ + ' %s' % self._getID()
def __str__(self):
shortView = self._shortView or self._sqlPrimary
short = [str(getattr(self, short)) for short in shortView]
text = ', '.join(short)
# return repr(self) + ': ' + text
return text
def __eq__(self, obj):
"""Simple comparsion of objects."""
return self.__class__.__name__ == obj.__class__.__name__ \
and self._getID() == obj._getID()
|
stain/forgetSQL | lib/forgetSQL.py | Forgetter._nextSequence | python | def _nextSequence(cls, name=None):
if not name:
name = cls._sqlSequence
if not name:
# Assume it's tablename_primarykey_seq
if len(cls._sqlPrimary) <> 1:
raise "Could not guess sequence name for multi-primary-key"
primary = cls._sqlPrimary[0]
name = '%s_%s_seq' % (cls._sqlTable, primary.replace('.','_'))
# Don't have . as a tablename or column name! =)
curs = cls.cursor()
curs.execute("SELECT nextval('%s')" % name)
value = curs.fetchone()[0]
curs.close()
return value | Return a new sequence number for insertion in self._sqlTable.
Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary
key 'john_id', sequence name blapp_john_id_seq) you must give
the full sequence name as an optional argument to _nextSequence) | train | https://github.com/stain/forgetSQL/blob/2e13f983020b121fd75a95fcafce3ea75573fb6b/lib/forgetSQL.py#L608-L629 | null | class Forgetter(object):
"""SQL to object database wrapper.
Given a welldefined database, by subclassing Forgetter
and supplying some attributes, you may wrap your SQL tables
into objects that are easier to program with.
You must define all fields in the database table that you want
to expose, and you may refine the names to suit your
object oriented programming style. (ie. customerID -> customer)
Objects will be created without loading from database,
loading will occur when you try to read or write some of the
attributes defined as a SQL field. If you change some attributes the
object will be saved to the database by save() or garbage
collection. (be aware that GC in Py >= 2.2 is not immediate)
If you want to create new objects, just supply them with blank
ID-fields, and _nextSequence() will be called to fetch a new
ID used for insertion.
The rule is one class pr. table, although it is possible
to join several table into one class, as long as the
identificator is unique.
By defining _userClasses you can resolve links to other
tables, a field in this table would be an id in another
table, ie. another class. In practical use this means that
behind attributes pointing to other classes (tables)
you will find instances of that class.
Short example usage of forgetter objects::
# Process all
for user in User.getAllIterator():
# Access attributes
print user.name
print "Employed at:"
# Access the Employed-class/table
print user.employed.name, user.employed.address
# fire him, setting employed reference to SQL NULL
user.employed = None
# Retrieve some ID
shop = Shop(552)
shop.name = 'Corrected name'
shop.save() # Save now instead of waiting for garbage collactor
# Include SQL where-statements in selections
myIDs = User.getAllIDs(("name='soiland'", 'salary > 5'))
Requirements:
The attributes 'cursor' and '_dbModule' should be set from the
outside. The cursor should be DB 2.0 complient, preferably with
autocommit turned on. (Transactions are not within the scope of this
module yet)
Python 2.2 (iterators, methodclasses)
"""
# How long to keep objects in cache?
_timeout = 60
# Will be True once prepare() is called
_prepared = False
# The default table containing our fields
# _sqlTable = 'shop'
_sqlTable = ''
# A mapping between our fields and the database fields.
#
# You must include all fields needed here. You may specify
# other names if you want to make the sql name more approriate
# for object oriented programming. (Like calling a field 'location'
# instead of 'location_id', because we wrap the location in a seperate
# object and don't really care about the id)
#
# You may reference to other tables with a dot, all
# other db fields will be related to _sqlTable.
# If you reference other tables, don't forget to
# modify _sqlLinks.
#
# _sqlFields = {
# 'id': 'shop_id',
# 'name': 'name',
# 'location': 'location_id',
# 'chain': 'shop_chain_id',
# 'address': 'address.address_id',
# }
_sqlFields = {}
# A list of attribute names (in the object, not database)
# that are the primary key in the database. Normally
# 'id' is sufficient. It is legal to have
# multiple fields as primary key, but it won't work
# properly with _userClasses and getChildren().
#
# If your table is a link table or something, ALL fields
# should be in _sqlPrimary. (all fields are needed to define
# a unique row to be deleted/updated)
_sqlPrimary = ('id',)
# When using several tables, you should include a
# 'link' statement, displaying which fields link the
# two tables together. Note that these are sql names.
# _sqlLinks = (
# ('shop_id', 'address.shop_id'),
# )
_sqlLinks = ()
# The name of the sequence used by _nextSequence
# - if None, a guess will be made based on _sqlTable
# and _sqlPrimary.
_sqlSequence = None
# Order by this attribute by default, if specified
# _orderBy = 'name' - this could also be a tuple
_orderBy = None
# _userClasses can be used to trigger creation of a field
# with an instance of the class. The given database field
# will be sent to the constructor as an objectID
# (ie. as self.id in this object) (ie. the class does not
# neccessary need to be a subclass of Forgetter)
#
# This means that the attribute will be an instance of that
# class, not the ID. The object will not be loaded from the
# database until you try to read any of it's attributes,
# though. (to prevent unneccessary database overload and
# recursions)
#
# Notice that _userClasses must be a name resolvable, ie.
# from the same module as your other classes.
# _userClasses = {
# 'location': 'Location',
# 'chain': 'Chain',
# 'address': 'Address',
# }
_userClasses = {}
# If you want userClasses to work properly with strings instead of
# instances, you must also 'prepare' your classes to resolve the
# names. This must be done from the same module you are defining the
# classes: forgetSQL.prepareClasses(locals())
# A list of fields that are suitable for a textual
# representation (typical a one liner).
#
# Fields will be joint together with spaces or
# simular.
# _shortView = ('name')
_shortView = ()
# Description for the fields (ie. labels)
# Note that these fields will be translated with the _ function.
# If a field is undescribe, a capitalized version of the field name
# will be presented.
#_descriptions = {
# 'name': 'Full name',
# 'description': 'Description of thingie',
#}
_descriptions = {}
def cursor(cls):
try:
import database
return database.cursor()
except:
raise "cursor method undefined, no database connection could be made"
cursor = classmethod(cursor)
# a reference to the database module object used, ie.
# MySQLdb, psycopg etc.
# Use MyClass._dbModule = MySQLdb - not "MySQLdb"
#
_dbModule = None
# By default, autosave change when object is garbage collected.
# NOTE: This could give weird side affects as you need to keep track
# on when objects are garbage collected. Instead, use .save()
# explicitely when you want to save. By default, for backward
# compatibility, autosave is on.
_autosave = True
def __new__(cls, *args):
if not hasattr(cls, '_cache'):
cls._cache = {}
try: # to implement 'goto' in Python.. UGH
if not cls._cache.has_key(args):
# unknown
raise "NotFound"
(ref, updated) = cls._cache[args]
realObject = ref()
if realObject is None:
# No more real references to it, dead object
raise "NotFound"
age = time.time() - updated
if age > cls._timeout:
# Too old!
raise "NotFound"
updated = time.time()
except "NotFound":
# We'll need to create it
realObject = object.__new__(cls, *args)
ref = weakref.ref(realObject)
updated = time.time()
# store a weak reference
cls._cache[args] = (ref, updated)
return realObject
def __init__(self, *id):
"""Initialize, possibly with a database id.
A forgetter with multivalue primary key (ie. _sqlPrimary more
than 1 in length), may be initalized by using several parameters
to this constructor. Note that the object will not be loaded
before you call load().
"""
self._values = {}
self.reset()
if not id:
self._resetID()
else:
self._setID(id)
def _setID(self, id):
"""Set the ID, ie. the values for primary keys.
id can be either a list, following the
_sqlPrimary, or some other type, that will be set
as the singleton ID (requires 1-length sqlPrimary).
"""
if type(id) in (types.ListType, types.TupleType):
try:
for key in self._sqlPrimary:
value = id[0]
self.__dict__[key] = value
id = id[1:] # rest, go revursive
except IndexError:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
elif len(self._sqlPrimary) <= 1:
# It's a simple value
key = self._sqlPrimary[0]
self.__dict__[key] = id
else:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
self._new = False
def _getID(self):
"""Get the ID values as a tuple annotated by sqlPrimary"""
id = []
for key in self._sqlPrimary:
value = self.__dict__[key]
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Part %s of %s primary key is a reference to %s, with multiple-primary-key %s " % (key, self.__class__, value.__class__, value)
id.append(value)
return id
def _resetID(self):
"""Reset all ID fields."""
# Dirty.. .=))
self._setID((None,) * len(self._sqlPrimary))
self._new = True
def _validID(self):
"""Is all ID fields with values, ie. not None?"""
return not None in self._getID()
def __getattr__(self, key):
"""Get an attribute, normally a SQL field value.
Will be called when an unknown key is to be
retrieved, ie. most likely one of our database
fields.
"""
if self._sqlFields.has_key(key):
if not self._updated:
self.load()
return self._values[key]
else:
raise AttributeError, key
def __setattr__(self, key, value):
"""Set an attribute, normally a SQL field value.
Will be called whenever something needs to be set, so
we store the value as a SQL-thingie unless the key
is not listed in sqlFields.
"""
if key not in self._sqlPrimary and self._sqlFields.has_key(key):
if not self._updated:
self.load()
self._values[key] = value
self._changed = time.time()
else:
# It's a normal thingie
self.__dict__[key] = value
def __del__(self):
"""Save the object on deletion.
Be aware of this. If you want to undo some change, use reset()
first.
Be aware of Python 2.2's garbage collector, that
might run in the background. This means that
unless you call save() changes might not
be done immediately in the database.
Not calling save() also means that you cannot catch
errors caused by wrong insertion/update (ie. wrong
datatype for a field)
"""
if not self._autosave:
return
try:
self.save()
except Exception, e:
pass
def _checkTable(cls, field):
"""Split a field from _sqlFields into table, column.
Registers the table in cls._tables, and returns a fully
qualified table.column (default table: cls._sqlTable)
"""
# Get table part
try:
(table, field) = field.split('.')
except ValueError:
table = cls._sqlTable
# clean away white space
table = table.strip()
field = field.strip()
# register table
cls._tables[table] = None
# and return in proper shape
return table + '.' + field
_checkTable = classmethod(_checkTable)
def reset(self):
"""Reset all fields, almost like creating a new object.
Note: Forgets changes you have made not saved to database!
(Remember: Others might reference the object already, expecting
something else!) Override this method if you add properties not
defined in _sqlFields.
"""
self._resetID()
self._new = None
self._updated = None
self._changed = None
self._values = {}
# initially create fields
for field in self._sqlFields.keys():
self._values[field] = None
def load(self, id=None):
"""Load from database. Old values will be discarded."""
if id is not None:
# We are asked to change our ID to something else
self.reset()
self._setID(id)
if not self._new and self._validID():
self._loadDB()
self._updated = time.time()
def save(self):
"""Save to database if anything has changed since last load"""
if ( self._new or
(self._validID() and self._changed) or
(self._updated and self._changed > self._updated) ):
# Don't save if we have not loaded existing data!
self._saveDB()
return True
return False
def delete(self):
"""Mark this object for deletion in the database.
The object will then be reset and ready for use
again with a new id.
"""
(sql, ) = self._prepareSQL("DELETE")
curs = self.cursor()
curs.execute(sql, self._getID())
curs.close()
self.reset()
def _prepareSQL(cls, operation="SELECT", where=None, selectfields=None, orderBy=None):
"""Return a sql for the given operation.
Possible operations:
SELECT read data for this id
SELECTALL read data for all ids
INSERT insert data, create new id
UPDATE update data for this id
DELETE remove data for this id
SQL will be built by data from _sqlFields, and will
contain 0 or several %s for you to sprintf-format in later:
SELECT --> len(cls._sqlPrimary)
SELECTALL --> 0 %s
INSERT --> len(cls._sqlFields) %s (including id)
UPDATE --> len(cls._sqlFields) %s (including id)
DELETE --> len(cls._sqlPrimary)
(Note: INSERT and UPDATE will only change values in _sqlTable, so
the actual number of fields for substitutions might be lower
than len(cls._sqlFields) )
For INSERT you should use cls._nextSequence() to retrieve
a new 'id' number. Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary key
'john_id', sequence name blapp_john_id_seq) you must give the sequence
name as an optional argument to _nextSequence)
Additional note: cls._nextSequence() MUST be overloaded
for multi _sqlPrimary classes. Return a tuple.
Return values will always be tuples:
SELECT --> (sql, fields)
SELECTALL -> sql, fields)
INSERT -> (sql, fields)
UPDATE -> (sql, fields)
DELETE -> (sql,) -- for consistency
fields will be object properties as a list, ie. the keys from
cls._sqlFields. The purpose of this list is to give the programmer
an idea of which order the keys are inserted in the SQL, giving
help for retreiving (SELECT, SELECTALL) or inserting for %s
(INSERT, DELETE).
Why? Well, the keys are stored in a hash, and we cannot be sure
about the order of hash.keys() from time to time, not even with
the same instance.
Optional where-parameter applies to SELECT, SELECTALL and DELETE.
where should be a list or string of where clauses.
"""
# Normalize parameter for later comparissions
operation = operation.upper()
# Convert where to a list if it is a string
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if orderBy is None:
orderBy = cls._orderBy
if operation in ('SELECT', 'SELECTALL'):
# Get the object fields and sql fields in the same
# order to be able to reconstruct later.
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if selectfields is None or field in selectfields:
fields.append(field)
sqlfields.append(sqlfield)
if not fields:
# dirrrrrty!
raise """ERROR: No fields defined, cannot create SQL.
Maybe sqlPrimary is invalid?
Fields asked: %s
My fields: %s""" % (selectfields, cls._sqlFields)
sql = "SELECT\n "
sql += ', '.join(sqlfields)
sql += "\nFROM\n "
tables = cls._tables.keys()
if not tables:
raise "REALITY ERROR: No tables defined"
sql += ', '.join(tables)
tempWhere = ["%s=%s" % linkPair for linkPair in cls._sqlLinks]
# this MUST be here.
if operation <> 'SELECTALL':
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
if where:
tempWhere += where
if(tempWhere):
# Make sure to use paranteses in case someone has used
# ORs in the WHERE-list..
sql += "\nWHERE\n ("
sql += ') AND\n ('.join(tempWhere)
sql += ')'
if operation == 'SELECTALL' and orderBy:
sql += '\nORDER BY\n '
if type(orderBy) in (types.TupleType, types.ListType):
orderBy = [cls._sqlFields[x] for x in orderBy]
orderBy = ',\n '.join(orderBy)
else:
orderBy = cls._sqlFields[orderBy]
sql += orderBy
return (sql, fields)
elif operation in ('INSERT', 'UPDATE'):
if operation == 'UPDATE':
sql = 'UPDATE %s SET\n ' % cls._sqlTable
else:
sql = 'INSERT INTO %s (\n ' % cls._sqlTable
set = []
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if operation == 'UPDATE' and field in cls._sqlPrimary:
continue
if sqlfield.find(cls._sqlTable + '.') == 0:
# It's a local field, chop of the table part
sqlfield = sqlfield[len(cls._sqlTable)+1:]
fields.append(field)
sqlfields.append(sqlfield)
set.append(sqlfield + '=%s')
if operation == 'UPDATE':
sql += ',\n '.join(set)
sql += '\nWHERE\n '
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
fields.append(key)
sql += ' AND\n '.join(tempWhere)
else:
sql += ',\n '.join(sqlfields)
sql += ')\nVALUES (\n '
sql += ',\n '.join(('%s',) * len(sqlfields))
sql += ')'
return (sql, fields)
elif operation == 'DELETE':
sql = 'DELETE FROM ' + cls._sqlTable + ' WHERE '
if where:
sql += " AND\n ".join(where)
else:
for key in cls._sqlPrimary:
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
sql += ' AND\n '.join(tempWhere)
return (sql, )
else:
raise "Unknown operation", operation
_prepareSQL = classmethod(_prepareSQL)
_nextSequence = classmethod(_nextSequence)
def _loadFromRow(self, result, fields, cursor):
"""Load from a database row, described by fields.
``fields`` should be the attribute names that
will be set. Note that userclasses will be
created (but not loaded).
"""
position = 0
for elem in fields:
value = result[position]
valueType = cursor.description[position][1]
if hasattr(self._dbModule, 'BOOLEAN') and \
valueType == self._dbModule.BOOLEAN and \
(value is not True or value is not False):
# convert to a python boolean
value = value and True or False
if value and self._userClasses.has_key(elem):
userClass = self._userClasses[elem]
# create an instance
value = userClass(value)
self._values[elem] = value
position += 1
def _loadDB(self):
"""Connect to the database to load myself"""
if not self._validID():
raise NotFound, self._getID()
(sql, fields) = self._prepareSQL("SELECT")
curs = self.cursor()
curs.execute(sql, self._getID())
result = curs.fetchone()
if not result:
curs.close()
raise NotFound, self._getID()
self._loadFromRow(result, fields, curs)
curs.close()
self._updated = time.time()
def _saveDB(self):
"""Insert or update into the database.
Note that every field will be updated, not just the changed
one.
"""
# We're a "fresh" copy now
self._updated = time.time()
if self._new:
operation = 'INSERT'
if not self._validID():
self._setID(self._nextSequence())
# Note that we assign this ID to our self
# BEFORE possibly saving any of our attribute
# objects that might be new as well. This means
# that they might have references to us, as long
# as the database does not require our existence
# yet.
#
# Since mysql does not have Sequences, this will
# not work as smoothly there. See class
# MysqlForgetter below.
else:
operation = 'UPDATE'
(sql, fields) = self._prepareSQL(operation)
values = []
for field in fields:
value = getattr(self, field)
# First some dirty datatype hacks
if DateTime and type(value) == DateTime.DateTimeType:
# stupid psycopg does not support it's own return type..
# lovely..
value = str(value)
if DateTime and type(value) == DateTime.DateTimeDeltaType:
# Format delta as days, hours, minutes seconds
# NOTE: includes value.second directly to get the
# whole floating number
value = value.strftime("%d %H:%M:") + str(value.second)
if value is True or value is False:
# We must store booleans as 't' and 'f' ...
value = value and 't' or 'f'
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Can't reference multiple-primary-key: %s" % value
values.append(value)
cursor = self.cursor()
cursor.execute(sql, values)
# cursor.commit()
cursor.close()
self._new = False
self._changed = None
def getAll(cls, where=None, orderBy=None):
"""Retrieve all the objects.
If a list of ``where`` clauses are given, they will be AND-ed
and will limit the search.
This will not load everything out from the database, but will
create a large amount of objects with only the ID inserted. The
data will be loaded from the objects when needed by the regular
load()-autocall.
"""
ids = cls.getAllIDs(where, orderBy=orderBy)
# Instansiate a lot of them
if len(cls._sqlPrimary) > 1:
return [cls(*id) for id in ids]
else:
return [cls(id) for id in ids]
getAll = classmethod(getAll)
def getAllIterator(cls, where=None, buffer=100,
useObject=None, orderBy=None):
"""Retrieve every object as an iterator.
Possibly limitted by the where list of clauses that will be
AND-ed.
Since an iterator is returned, only ``buffer`` rows are loaded
from the database at once. This is useful if you need
to process all objects.
If useObject is given, this object is returned each time, but
with new data. This can be used to avoid creating many new
objects when only one object is needed each time.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
fetchedAt = time.time()
curs.execute(sql)
# We might start eating memory at this point
def getNext(rows=[]):
forgetter = cls
if not rows:
rows += curs.fetchmany(buffer)
if not rows:
curs.close()
return None
row = rows[0]
del rows[0]
try:
idPositions = [fields.index(key) for key in cls._sqlPrimary]
except ValueError:
raise "Bad sqlPrimary, should be a list or tuple: %s" % cls._sqlPrimary
ids = [row[pos] for pos in idPositions]
if useObject:
result = useObject
result.reset()
result._setID(ids)
else:
result = forgetter(*ids)
result._loadFromRow(row, fields, curs)
result._updated = fetchedAt
return result
return iter(getNext, None)
getAllIterator = classmethod(getAllIterator)
def getAllIDs(cls, where=None, orderBy=None):
"""Retrive all the IDs, possibly matching the where clauses.
Where should be some list of where clauses that will be joined
with AND). Note that the result might be tuples if this table
has a multivalue _sqlPrimary.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where,
cls._sqlPrimary, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
result.append((ids))
return result
getAllIDs = classmethod(getAllIDs)
def getAllText(cls, where=None, SEPERATOR=' ', orderBy=None):
"""Retrieve a list of of all possible instances of this class.
The list is composed of tuples in the format (id, description) -
where description is a string composed by the fields from
cls._shortView, joint with SEPERATOR.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
shortPos = [fields.index(short) for short in cls._shortView]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
text = SEPERATOR.join([str(row[pos]) for pos in shortPos])
result.append((ids, text))
return result
getAllText = classmethod(getAllText)
def getChildren(self, forgetter, field=None, where=None, orderBy=None):
"""Return the children that links to me.
That means that I have to be listed in their _userClasses
somehow. If field is specified, that field in my children is
used as the pointer to me. Use this if you have multiple fields
referring to my class.
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAll(whereList, orderBy=orderBy)
def getChildrenIterator(self, forgetter, field=None, where=None,
orderBy=None, useObject=None):
"""Like getChildren, except that it returns an
iterator, like getAllIterator. An iterator should
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAllIterator(whereList, useObject=useObject,
orderBy=orderBy)
def __repr__(self):
return self.__class__.__name__ + ' %s' % self._getID()
def __str__(self):
shortView = self._shortView or self._sqlPrimary
short = [str(getattr(self, short)) for short in shortView]
text = ', '.join(short)
# return repr(self) + ': ' + text
return text
def __eq__(self, obj):
"""Simple comparsion of objects."""
return self.__class__.__name__ == obj.__class__.__name__ \
and self._getID() == obj._getID()
|
stain/forgetSQL | lib/forgetSQL.py | Forgetter._loadFromRow | python | def _loadFromRow(self, result, fields, cursor):
position = 0
for elem in fields:
value = result[position]
valueType = cursor.description[position][1]
if hasattr(self._dbModule, 'BOOLEAN') and \
valueType == self._dbModule.BOOLEAN and \
(value is not True or value is not False):
# convert to a python boolean
value = value and True or False
if value and self._userClasses.has_key(elem):
userClass = self._userClasses[elem]
# create an instance
value = userClass(value)
self._values[elem] = value
position += 1 | Load from a database row, described by fields.
``fields`` should be the attribute names that
will be set. Note that userclasses will be
created (but not loaded). | train | https://github.com/stain/forgetSQL/blob/2e13f983020b121fd75a95fcafce3ea75573fb6b/lib/forgetSQL.py#L633-L655 | null | class Forgetter(object):
"""SQL to object database wrapper.
Given a welldefined database, by subclassing Forgetter
and supplying some attributes, you may wrap your SQL tables
into objects that are easier to program with.
You must define all fields in the database table that you want
to expose, and you may refine the names to suit your
object oriented programming style. (ie. customerID -> customer)
Objects will be created without loading from database,
loading will occur when you try to read or write some of the
attributes defined as a SQL field. If you change some attributes the
object will be saved to the database by save() or garbage
collection. (be aware that GC in Py >= 2.2 is not immediate)
If you want to create new objects, just supply them with blank
ID-fields, and _nextSequence() will be called to fetch a new
ID used for insertion.
The rule is one class pr. table, although it is possible
to join several table into one class, as long as the
identificator is unique.
By defining _userClasses you can resolve links to other
tables, a field in this table would be an id in another
table, ie. another class. In practical use this means that
behind attributes pointing to other classes (tables)
you will find instances of that class.
Short example usage of forgetter objects::
# Process all
for user in User.getAllIterator():
# Access attributes
print user.name
print "Employed at:"
# Access the Employed-class/table
print user.employed.name, user.employed.address
# fire him, setting employed reference to SQL NULL
user.employed = None
# Retrieve some ID
shop = Shop(552)
shop.name = 'Corrected name'
shop.save() # Save now instead of waiting for garbage collactor
# Include SQL where-statements in selections
myIDs = User.getAllIDs(("name='soiland'", 'salary > 5'))
Requirements:
The attributes 'cursor' and '_dbModule' should be set from the
outside. The cursor should be DB 2.0 complient, preferably with
autocommit turned on. (Transactions are not within the scope of this
module yet)
Python 2.2 (iterators, methodclasses)
"""
# How long to keep objects in cache?
_timeout = 60
# Will be True once prepare() is called
_prepared = False
# The default table containing our fields
# _sqlTable = 'shop'
_sqlTable = ''
# A mapping between our fields and the database fields.
#
# You must include all fields needed here. You may specify
# other names if you want to make the sql name more approriate
# for object oriented programming. (Like calling a field 'location'
# instead of 'location_id', because we wrap the location in a seperate
# object and don't really care about the id)
#
# You may reference to other tables with a dot, all
# other db fields will be related to _sqlTable.
# If you reference other tables, don't forget to
# modify _sqlLinks.
#
# _sqlFields = {
# 'id': 'shop_id',
# 'name': 'name',
# 'location': 'location_id',
# 'chain': 'shop_chain_id',
# 'address': 'address.address_id',
# }
_sqlFields = {}
# A list of attribute names (in the object, not database)
# that are the primary key in the database. Normally
# 'id' is sufficient. It is legal to have
# multiple fields as primary key, but it won't work
# properly with _userClasses and getChildren().
#
# If your table is a link table or something, ALL fields
# should be in _sqlPrimary. (all fields are needed to define
# a unique row to be deleted/updated)
_sqlPrimary = ('id',)
# When using several tables, you should include a
# 'link' statement, displaying which fields link the
# two tables together. Note that these are sql names.
# _sqlLinks = (
# ('shop_id', 'address.shop_id'),
# )
_sqlLinks = ()
# The name of the sequence used by _nextSequence
# - if None, a guess will be made based on _sqlTable
# and _sqlPrimary.
_sqlSequence = None
# Order by this attribute by default, if specified
# _orderBy = 'name' - this could also be a tuple
_orderBy = None
# _userClasses can be used to trigger creation of a field
# with an instance of the class. The given database field
# will be sent to the constructor as an objectID
# (ie. as self.id in this object) (ie. the class does not
# neccessary need to be a subclass of Forgetter)
#
# This means that the attribute will be an instance of that
# class, not the ID. The object will not be loaded from the
# database until you try to read any of it's attributes,
# though. (to prevent unneccessary database overload and
# recursions)
#
# Notice that _userClasses must be a name resolvable, ie.
# from the same module as your other classes.
# _userClasses = {
# 'location': 'Location',
# 'chain': 'Chain',
# 'address': 'Address',
# }
_userClasses = {}
# If you want userClasses to work properly with strings instead of
# instances, you must also 'prepare' your classes to resolve the
# names. This must be done from the same module you are defining the
# classes: forgetSQL.prepareClasses(locals())
# A list of fields that are suitable for a textual
# representation (typical a one liner).
#
# Fields will be joint together with spaces or
# simular.
# _shortView = ('name')
_shortView = ()
# Description for the fields (ie. labels)
# Note that these fields will be translated with the _ function.
# If a field is undescribe, a capitalized version of the field name
# will be presented.
#_descriptions = {
# 'name': 'Full name',
# 'description': 'Description of thingie',
#}
_descriptions = {}
def cursor(cls):
try:
import database
return database.cursor()
except:
raise "cursor method undefined, no database connection could be made"
cursor = classmethod(cursor)
# a reference to the database module object used, ie.
# MySQLdb, psycopg etc.
# Use MyClass._dbModule = MySQLdb - not "MySQLdb"
#
_dbModule = None
# By default, autosave change when object is garbage collected.
# NOTE: This could give weird side affects as you need to keep track
# on when objects are garbage collected. Instead, use .save()
# explicitely when you want to save. By default, for backward
# compatibility, autosave is on.
_autosave = True
def __new__(cls, *args):
if not hasattr(cls, '_cache'):
cls._cache = {}
try: # to implement 'goto' in Python.. UGH
if not cls._cache.has_key(args):
# unknown
raise "NotFound"
(ref, updated) = cls._cache[args]
realObject = ref()
if realObject is None:
# No more real references to it, dead object
raise "NotFound"
age = time.time() - updated
if age > cls._timeout:
# Too old!
raise "NotFound"
updated = time.time()
except "NotFound":
# We'll need to create it
realObject = object.__new__(cls, *args)
ref = weakref.ref(realObject)
updated = time.time()
# store a weak reference
cls._cache[args] = (ref, updated)
return realObject
def __init__(self, *id):
"""Initialize, possibly with a database id.
A forgetter with multivalue primary key (ie. _sqlPrimary more
than 1 in length), may be initalized by using several parameters
to this constructor. Note that the object will not be loaded
before you call load().
"""
self._values = {}
self.reset()
if not id:
self._resetID()
else:
self._setID(id)
def _setID(self, id):
"""Set the ID, ie. the values for primary keys.
id can be either a list, following the
_sqlPrimary, or some other type, that will be set
as the singleton ID (requires 1-length sqlPrimary).
"""
if type(id) in (types.ListType, types.TupleType):
try:
for key in self._sqlPrimary:
value = id[0]
self.__dict__[key] = value
id = id[1:] # rest, go revursive
except IndexError:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
elif len(self._sqlPrimary) <= 1:
# It's a simple value
key = self._sqlPrimary[0]
self.__dict__[key] = id
else:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
self._new = False
def _getID(self):
"""Get the ID values as a tuple annotated by sqlPrimary"""
id = []
for key in self._sqlPrimary:
value = self.__dict__[key]
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Part %s of %s primary key is a reference to %s, with multiple-primary-key %s " % (key, self.__class__, value.__class__, value)
id.append(value)
return id
def _resetID(self):
"""Reset all ID fields."""
# Dirty.. .=))
self._setID((None,) * len(self._sqlPrimary))
self._new = True
def _validID(self):
"""Is all ID fields with values, ie. not None?"""
return not None in self._getID()
def __getattr__(self, key):
"""Get an attribute, normally a SQL field value.
Will be called when an unknown key is to be
retrieved, ie. most likely one of our database
fields.
"""
if self._sqlFields.has_key(key):
if not self._updated:
self.load()
return self._values[key]
else:
raise AttributeError, key
def __setattr__(self, key, value):
"""Set an attribute, normally a SQL field value.
Will be called whenever something needs to be set, so
we store the value as a SQL-thingie unless the key
is not listed in sqlFields.
"""
if key not in self._sqlPrimary and self._sqlFields.has_key(key):
if not self._updated:
self.load()
self._values[key] = value
self._changed = time.time()
else:
# It's a normal thingie
self.__dict__[key] = value
def __del__(self):
"""Save the object on deletion.
Be aware of this. If you want to undo some change, use reset()
first.
Be aware of Python 2.2's garbage collector, that
might run in the background. This means that
unless you call save() changes might not
be done immediately in the database.
Not calling save() also means that you cannot catch
errors caused by wrong insertion/update (ie. wrong
datatype for a field)
"""
if not self._autosave:
return
try:
self.save()
except Exception, e:
pass
def _checkTable(cls, field):
"""Split a field from _sqlFields into table, column.
Registers the table in cls._tables, and returns a fully
qualified table.column (default table: cls._sqlTable)
"""
# Get table part
try:
(table, field) = field.split('.')
except ValueError:
table = cls._sqlTable
# clean away white space
table = table.strip()
field = field.strip()
# register table
cls._tables[table] = None
# and return in proper shape
return table + '.' + field
_checkTable = classmethod(_checkTable)
def reset(self):
"""Reset all fields, almost like creating a new object.
Note: Forgets changes you have made not saved to database!
(Remember: Others might reference the object already, expecting
something else!) Override this method if you add properties not
defined in _sqlFields.
"""
self._resetID()
self._new = None
self._updated = None
self._changed = None
self._values = {}
# initially create fields
for field in self._sqlFields.keys():
self._values[field] = None
def load(self, id=None):
"""Load from database. Old values will be discarded."""
if id is not None:
# We are asked to change our ID to something else
self.reset()
self._setID(id)
if not self._new and self._validID():
self._loadDB()
self._updated = time.time()
def save(self):
"""Save to database if anything has changed since last load"""
if ( self._new or
(self._validID() and self._changed) or
(self._updated and self._changed > self._updated) ):
# Don't save if we have not loaded existing data!
self._saveDB()
return True
return False
def delete(self):
"""Mark this object for deletion in the database.
The object will then be reset and ready for use
again with a new id.
"""
(sql, ) = self._prepareSQL("DELETE")
curs = self.cursor()
curs.execute(sql, self._getID())
curs.close()
self.reset()
def _prepareSQL(cls, operation="SELECT", where=None, selectfields=None, orderBy=None):
"""Return a sql for the given operation.
Possible operations:
SELECT read data for this id
SELECTALL read data for all ids
INSERT insert data, create new id
UPDATE update data for this id
DELETE remove data for this id
SQL will be built by data from _sqlFields, and will
contain 0 or several %s for you to sprintf-format in later:
SELECT --> len(cls._sqlPrimary)
SELECTALL --> 0 %s
INSERT --> len(cls._sqlFields) %s (including id)
UPDATE --> len(cls._sqlFields) %s (including id)
DELETE --> len(cls._sqlPrimary)
(Note: INSERT and UPDATE will only change values in _sqlTable, so
the actual number of fields for substitutions might be lower
than len(cls._sqlFields) )
For INSERT you should use cls._nextSequence() to retrieve
a new 'id' number. Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary key
'john_id', sequence name blapp_john_id_seq) you must give the sequence
name as an optional argument to _nextSequence)
Additional note: cls._nextSequence() MUST be overloaded
for multi _sqlPrimary classes. Return a tuple.
Return values will always be tuples:
SELECT --> (sql, fields)
SELECTALL -> sql, fields)
INSERT -> (sql, fields)
UPDATE -> (sql, fields)
DELETE -> (sql,) -- for consistency
fields will be object properties as a list, ie. the keys from
cls._sqlFields. The purpose of this list is to give the programmer
an idea of which order the keys are inserted in the SQL, giving
help for retreiving (SELECT, SELECTALL) or inserting for %s
(INSERT, DELETE).
Why? Well, the keys are stored in a hash, and we cannot be sure
about the order of hash.keys() from time to time, not even with
the same instance.
Optional where-parameter applies to SELECT, SELECTALL and DELETE.
where should be a list or string of where clauses.
"""
# Normalize parameter for later comparissions
operation = operation.upper()
# Convert where to a list if it is a string
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if orderBy is None:
orderBy = cls._orderBy
if operation in ('SELECT', 'SELECTALL'):
# Get the object fields and sql fields in the same
# order to be able to reconstruct later.
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if selectfields is None or field in selectfields:
fields.append(field)
sqlfields.append(sqlfield)
if not fields:
# dirrrrrty!
raise """ERROR: No fields defined, cannot create SQL.
Maybe sqlPrimary is invalid?
Fields asked: %s
My fields: %s""" % (selectfields, cls._sqlFields)
sql = "SELECT\n "
sql += ', '.join(sqlfields)
sql += "\nFROM\n "
tables = cls._tables.keys()
if not tables:
raise "REALITY ERROR: No tables defined"
sql += ', '.join(tables)
tempWhere = ["%s=%s" % linkPair for linkPair in cls._sqlLinks]
# this MUST be here.
if operation <> 'SELECTALL':
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
if where:
tempWhere += where
if(tempWhere):
# Make sure to use paranteses in case someone has used
# ORs in the WHERE-list..
sql += "\nWHERE\n ("
sql += ') AND\n ('.join(tempWhere)
sql += ')'
if operation == 'SELECTALL' and orderBy:
sql += '\nORDER BY\n '
if type(orderBy) in (types.TupleType, types.ListType):
orderBy = [cls._sqlFields[x] for x in orderBy]
orderBy = ',\n '.join(orderBy)
else:
orderBy = cls._sqlFields[orderBy]
sql += orderBy
return (sql, fields)
elif operation in ('INSERT', 'UPDATE'):
if operation == 'UPDATE':
sql = 'UPDATE %s SET\n ' % cls._sqlTable
else:
sql = 'INSERT INTO %s (\n ' % cls._sqlTable
set = []
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if operation == 'UPDATE' and field in cls._sqlPrimary:
continue
if sqlfield.find(cls._sqlTable + '.') == 0:
# It's a local field, chop of the table part
sqlfield = sqlfield[len(cls._sqlTable)+1:]
fields.append(field)
sqlfields.append(sqlfield)
set.append(sqlfield + '=%s')
if operation == 'UPDATE':
sql += ',\n '.join(set)
sql += '\nWHERE\n '
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
fields.append(key)
sql += ' AND\n '.join(tempWhere)
else:
sql += ',\n '.join(sqlfields)
sql += ')\nVALUES (\n '
sql += ',\n '.join(('%s',) * len(sqlfields))
sql += ')'
return (sql, fields)
elif operation == 'DELETE':
sql = 'DELETE FROM ' + cls._sqlTable + ' WHERE '
if where:
sql += " AND\n ".join(where)
else:
for key in cls._sqlPrimary:
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
sql += ' AND\n '.join(tempWhere)
return (sql, )
else:
raise "Unknown operation", operation
_prepareSQL = classmethod(_prepareSQL)
def _nextSequence(cls, name=None):
"""Return a new sequence number for insertion in self._sqlTable.
Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary
key 'john_id', sequence name blapp_john_id_seq) you must give
the full sequence name as an optional argument to _nextSequence)
"""
if not name:
name = cls._sqlSequence
if not name:
# Assume it's tablename_primarykey_seq
if len(cls._sqlPrimary) <> 1:
raise "Could not guess sequence name for multi-primary-key"
primary = cls._sqlPrimary[0]
name = '%s_%s_seq' % (cls._sqlTable, primary.replace('.','_'))
# Don't have . as a tablename or column name! =)
curs = cls.cursor()
curs.execute("SELECT nextval('%s')" % name)
value = curs.fetchone()[0]
curs.close()
return value
_nextSequence = classmethod(_nextSequence)
def _loadDB(self):
"""Connect to the database to load myself"""
if not self._validID():
raise NotFound, self._getID()
(sql, fields) = self._prepareSQL("SELECT")
curs = self.cursor()
curs.execute(sql, self._getID())
result = curs.fetchone()
if not result:
curs.close()
raise NotFound, self._getID()
self._loadFromRow(result, fields, curs)
curs.close()
self._updated = time.time()
def _saveDB(self):
"""Insert or update into the database.
Note that every field will be updated, not just the changed
one.
"""
# We're a "fresh" copy now
self._updated = time.time()
if self._new:
operation = 'INSERT'
if not self._validID():
self._setID(self._nextSequence())
# Note that we assign this ID to our self
# BEFORE possibly saving any of our attribute
# objects that might be new as well. This means
# that they might have references to us, as long
# as the database does not require our existence
# yet.
#
# Since mysql does not have Sequences, this will
# not work as smoothly there. See class
# MysqlForgetter below.
else:
operation = 'UPDATE'
(sql, fields) = self._prepareSQL(operation)
values = []
for field in fields:
value = getattr(self, field)
# First some dirty datatype hacks
if DateTime and type(value) == DateTime.DateTimeType:
# stupid psycopg does not support it's own return type..
# lovely..
value = str(value)
if DateTime and type(value) == DateTime.DateTimeDeltaType:
# Format delta as days, hours, minutes seconds
# NOTE: includes value.second directly to get the
# whole floating number
value = value.strftime("%d %H:%M:") + str(value.second)
if value is True or value is False:
# We must store booleans as 't' and 'f' ...
value = value and 't' or 'f'
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Can't reference multiple-primary-key: %s" % value
values.append(value)
cursor = self.cursor()
cursor.execute(sql, values)
# cursor.commit()
cursor.close()
self._new = False
self._changed = None
def getAll(cls, where=None, orderBy=None):
"""Retrieve all the objects.
If a list of ``where`` clauses are given, they will be AND-ed
and will limit the search.
This will not load everything out from the database, but will
create a large amount of objects with only the ID inserted. The
data will be loaded from the objects when needed by the regular
load()-autocall.
"""
ids = cls.getAllIDs(where, orderBy=orderBy)
# Instansiate a lot of them
if len(cls._sqlPrimary) > 1:
return [cls(*id) for id in ids]
else:
return [cls(id) for id in ids]
getAll = classmethod(getAll)
def getAllIterator(cls, where=None, buffer=100,
useObject=None, orderBy=None):
"""Retrieve every object as an iterator.
Possibly limitted by the where list of clauses that will be
AND-ed.
Since an iterator is returned, only ``buffer`` rows are loaded
from the database at once. This is useful if you need
to process all objects.
If useObject is given, this object is returned each time, but
with new data. This can be used to avoid creating many new
objects when only one object is needed each time.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
fetchedAt = time.time()
curs.execute(sql)
# We might start eating memory at this point
def getNext(rows=[]):
forgetter = cls
if not rows:
rows += curs.fetchmany(buffer)
if not rows:
curs.close()
return None
row = rows[0]
del rows[0]
try:
idPositions = [fields.index(key) for key in cls._sqlPrimary]
except ValueError:
raise "Bad sqlPrimary, should be a list or tuple: %s" % cls._sqlPrimary
ids = [row[pos] for pos in idPositions]
if useObject:
result = useObject
result.reset()
result._setID(ids)
else:
result = forgetter(*ids)
result._loadFromRow(row, fields, curs)
result._updated = fetchedAt
return result
return iter(getNext, None)
getAllIterator = classmethod(getAllIterator)
def getAllIDs(cls, where=None, orderBy=None):
"""Retrive all the IDs, possibly matching the where clauses.
Where should be some list of where clauses that will be joined
with AND). Note that the result might be tuples if this table
has a multivalue _sqlPrimary.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where,
cls._sqlPrimary, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
result.append((ids))
return result
getAllIDs = classmethod(getAllIDs)
def getAllText(cls, where=None, SEPERATOR=' ', orderBy=None):
"""Retrieve a list of of all possible instances of this class.
The list is composed of tuples in the format (id, description) -
where description is a string composed by the fields from
cls._shortView, joint with SEPERATOR.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
shortPos = [fields.index(short) for short in cls._shortView]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
text = SEPERATOR.join([str(row[pos]) for pos in shortPos])
result.append((ids, text))
return result
getAllText = classmethod(getAllText)
def getChildren(self, forgetter, field=None, where=None, orderBy=None):
"""Return the children that links to me.
That means that I have to be listed in their _userClasses
somehow. If field is specified, that field in my children is
used as the pointer to me. Use this if you have multiple fields
referring to my class.
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAll(whereList, orderBy=orderBy)
def getChildrenIterator(self, forgetter, field=None, where=None,
orderBy=None, useObject=None):
"""Like getChildren, except that it returns an
iterator, like getAllIterator. An iterator should
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAllIterator(whereList, useObject=useObject,
orderBy=orderBy)
def __repr__(self):
return self.__class__.__name__ + ' %s' % self._getID()
def __str__(self):
shortView = self._shortView or self._sqlPrimary
short = [str(getattr(self, short)) for short in shortView]
text = ', '.join(short)
# return repr(self) + ': ' + text
return text
def __eq__(self, obj):
"""Simple comparsion of objects."""
return self.__class__.__name__ == obj.__class__.__name__ \
and self._getID() == obj._getID()
|
stain/forgetSQL | lib/forgetSQL.py | Forgetter._loadDB | python | def _loadDB(self):
if not self._validID():
raise NotFound, self._getID()
(sql, fields) = self._prepareSQL("SELECT")
curs = self.cursor()
curs.execute(sql, self._getID())
result = curs.fetchone()
if not result:
curs.close()
raise NotFound, self._getID()
self._loadFromRow(result, fields, curs)
curs.close()
self._updated = time.time() | Connect to the database to load myself | train | https://github.com/stain/forgetSQL/blob/2e13f983020b121fd75a95fcafce3ea75573fb6b/lib/forgetSQL.py#L657-L670 | null | class Forgetter(object):
"""SQL to object database wrapper.
Given a welldefined database, by subclassing Forgetter
and supplying some attributes, you may wrap your SQL tables
into objects that are easier to program with.
You must define all fields in the database table that you want
to expose, and you may refine the names to suit your
object oriented programming style. (ie. customerID -> customer)
Objects will be created without loading from database,
loading will occur when you try to read or write some of the
attributes defined as a SQL field. If you change some attributes the
object will be saved to the database by save() or garbage
collection. (be aware that GC in Py >= 2.2 is not immediate)
If you want to create new objects, just supply them with blank
ID-fields, and _nextSequence() will be called to fetch a new
ID used for insertion.
The rule is one class pr. table, although it is possible
to join several table into one class, as long as the
identificator is unique.
By defining _userClasses you can resolve links to other
tables, a field in this table would be an id in another
table, ie. another class. In practical use this means that
behind attributes pointing to other classes (tables)
you will find instances of that class.
Short example usage of forgetter objects::
# Process all
for user in User.getAllIterator():
# Access attributes
print user.name
print "Employed at:"
# Access the Employed-class/table
print user.employed.name, user.employed.address
# fire him, setting employed reference to SQL NULL
user.employed = None
# Retrieve some ID
shop = Shop(552)
shop.name = 'Corrected name'
shop.save() # Save now instead of waiting for garbage collactor
# Include SQL where-statements in selections
myIDs = User.getAllIDs(("name='soiland'", 'salary > 5'))
Requirements:
The attributes 'cursor' and '_dbModule' should be set from the
outside. The cursor should be DB 2.0 complient, preferably with
autocommit turned on. (Transactions are not within the scope of this
module yet)
Python 2.2 (iterators, methodclasses)
"""
# How long to keep objects in cache?
_timeout = 60
# Will be True once prepare() is called
_prepared = False
# The default table containing our fields
# _sqlTable = 'shop'
_sqlTable = ''
# A mapping between our fields and the database fields.
#
# You must include all fields needed here. You may specify
# other names if you want to make the sql name more approriate
# for object oriented programming. (Like calling a field 'location'
# instead of 'location_id', because we wrap the location in a seperate
# object and don't really care about the id)
#
# You may reference to other tables with a dot, all
# other db fields will be related to _sqlTable.
# If you reference other tables, don't forget to
# modify _sqlLinks.
#
# _sqlFields = {
# 'id': 'shop_id',
# 'name': 'name',
# 'location': 'location_id',
# 'chain': 'shop_chain_id',
# 'address': 'address.address_id',
# }
_sqlFields = {}
# A list of attribute names (in the object, not database)
# that are the primary key in the database. Normally
# 'id' is sufficient. It is legal to have
# multiple fields as primary key, but it won't work
# properly with _userClasses and getChildren().
#
# If your table is a link table or something, ALL fields
# should be in _sqlPrimary. (all fields are needed to define
# a unique row to be deleted/updated)
_sqlPrimary = ('id',)
# When using several tables, you should include a
# 'link' statement, displaying which fields link the
# two tables together. Note that these are sql names.
# _sqlLinks = (
# ('shop_id', 'address.shop_id'),
# )
_sqlLinks = ()
# The name of the sequence used by _nextSequence
# - if None, a guess will be made based on _sqlTable
# and _sqlPrimary.
_sqlSequence = None
# Order by this attribute by default, if specified
# _orderBy = 'name' - this could also be a tuple
_orderBy = None
# _userClasses can be used to trigger creation of a field
# with an instance of the class. The given database field
# will be sent to the constructor as an objectID
# (ie. as self.id in this object) (ie. the class does not
# neccessary need to be a subclass of Forgetter)
#
# This means that the attribute will be an instance of that
# class, not the ID. The object will not be loaded from the
# database until you try to read any of it's attributes,
# though. (to prevent unneccessary database overload and
# recursions)
#
# Notice that _userClasses must be a name resolvable, ie.
# from the same module as your other classes.
# _userClasses = {
# 'location': 'Location',
# 'chain': 'Chain',
# 'address': 'Address',
# }
_userClasses = {}
# If you want userClasses to work properly with strings instead of
# instances, you must also 'prepare' your classes to resolve the
# names. This must be done from the same module you are defining the
# classes: forgetSQL.prepareClasses(locals())
# A list of fields that are suitable for a textual
# representation (typical a one liner).
#
# Fields will be joint together with spaces or
# simular.
# _shortView = ('name')
_shortView = ()
# Description for the fields (ie. labels)
# Note that these fields will be translated with the _ function.
# If a field is undescribe, a capitalized version of the field name
# will be presented.
#_descriptions = {
# 'name': 'Full name',
# 'description': 'Description of thingie',
#}
_descriptions = {}
def cursor(cls):
try:
import database
return database.cursor()
except:
raise "cursor method undefined, no database connection could be made"
cursor = classmethod(cursor)
# a reference to the database module object used, ie.
# MySQLdb, psycopg etc.
# Use MyClass._dbModule = MySQLdb - not "MySQLdb"
#
_dbModule = None
# By default, autosave change when object is garbage collected.
# NOTE: This could give weird side affects as you need to keep track
# on when objects are garbage collected. Instead, use .save()
# explicitely when you want to save. By default, for backward
# compatibility, autosave is on.
_autosave = True
def __new__(cls, *args):
if not hasattr(cls, '_cache'):
cls._cache = {}
try: # to implement 'goto' in Python.. UGH
if not cls._cache.has_key(args):
# unknown
raise "NotFound"
(ref, updated) = cls._cache[args]
realObject = ref()
if realObject is None:
# No more real references to it, dead object
raise "NotFound"
age = time.time() - updated
if age > cls._timeout:
# Too old!
raise "NotFound"
updated = time.time()
except "NotFound":
# We'll need to create it
realObject = object.__new__(cls, *args)
ref = weakref.ref(realObject)
updated = time.time()
# store a weak reference
cls._cache[args] = (ref, updated)
return realObject
def __init__(self, *id):
"""Initialize, possibly with a database id.
A forgetter with multivalue primary key (ie. _sqlPrimary more
than 1 in length), may be initalized by using several parameters
to this constructor. Note that the object will not be loaded
before you call load().
"""
self._values = {}
self.reset()
if not id:
self._resetID()
else:
self._setID(id)
def _setID(self, id):
"""Set the ID, ie. the values for primary keys.
id can be either a list, following the
_sqlPrimary, or some other type, that will be set
as the singleton ID (requires 1-length sqlPrimary).
"""
if type(id) in (types.ListType, types.TupleType):
try:
for key in self._sqlPrimary:
value = id[0]
self.__dict__[key] = value
id = id[1:] # rest, go revursive
except IndexError:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
elif len(self._sqlPrimary) <= 1:
# It's a simple value
key = self._sqlPrimary[0]
self.__dict__[key] = id
else:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
self._new = False
def _getID(self):
"""Get the ID values as a tuple annotated by sqlPrimary"""
id = []
for key in self._sqlPrimary:
value = self.__dict__[key]
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Part %s of %s primary key is a reference to %s, with multiple-primary-key %s " % (key, self.__class__, value.__class__, value)
id.append(value)
return id
def _resetID(self):
"""Reset all ID fields."""
# Dirty.. .=))
self._setID((None,) * len(self._sqlPrimary))
self._new = True
def _validID(self):
"""Is all ID fields with values, ie. not None?"""
return not None in self._getID()
def __getattr__(self, key):
"""Get an attribute, normally a SQL field value.
Will be called when an unknown key is to be
retrieved, ie. most likely one of our database
fields.
"""
if self._sqlFields.has_key(key):
if not self._updated:
self.load()
return self._values[key]
else:
raise AttributeError, key
def __setattr__(self, key, value):
"""Set an attribute, normally a SQL field value.
Will be called whenever something needs to be set, so
we store the value as a SQL-thingie unless the key
is not listed in sqlFields.
"""
if key not in self._sqlPrimary and self._sqlFields.has_key(key):
if not self._updated:
self.load()
self._values[key] = value
self._changed = time.time()
else:
# It's a normal thingie
self.__dict__[key] = value
def __del__(self):
"""Save the object on deletion.
Be aware of this. If you want to undo some change, use reset()
first.
Be aware of Python 2.2's garbage collector, that
might run in the background. This means that
unless you call save() changes might not
be done immediately in the database.
Not calling save() also means that you cannot catch
errors caused by wrong insertion/update (ie. wrong
datatype for a field)
"""
if not self._autosave:
return
try:
self.save()
except Exception, e:
pass
def _checkTable(cls, field):
"""Split a field from _sqlFields into table, column.
Registers the table in cls._tables, and returns a fully
qualified table.column (default table: cls._sqlTable)
"""
# Get table part
try:
(table, field) = field.split('.')
except ValueError:
table = cls._sqlTable
# clean away white space
table = table.strip()
field = field.strip()
# register table
cls._tables[table] = None
# and return in proper shape
return table + '.' + field
_checkTable = classmethod(_checkTable)
def reset(self):
"""Reset all fields, almost like creating a new object.
Note: Forgets changes you have made not saved to database!
(Remember: Others might reference the object already, expecting
something else!) Override this method if you add properties not
defined in _sqlFields.
"""
self._resetID()
self._new = None
self._updated = None
self._changed = None
self._values = {}
# initially create fields
for field in self._sqlFields.keys():
self._values[field] = None
def load(self, id=None):
"""Load from database. Old values will be discarded."""
if id is not None:
# We are asked to change our ID to something else
self.reset()
self._setID(id)
if not self._new and self._validID():
self._loadDB()
self._updated = time.time()
def save(self):
"""Save to database if anything has changed since last load"""
if ( self._new or
(self._validID() and self._changed) or
(self._updated and self._changed > self._updated) ):
# Don't save if we have not loaded existing data!
self._saveDB()
return True
return False
def delete(self):
"""Mark this object for deletion in the database.
The object will then be reset and ready for use
again with a new id.
"""
(sql, ) = self._prepareSQL("DELETE")
curs = self.cursor()
curs.execute(sql, self._getID())
curs.close()
self.reset()
def _prepareSQL(cls, operation="SELECT", where=None, selectfields=None, orderBy=None):
"""Return a sql for the given operation.
Possible operations:
SELECT read data for this id
SELECTALL read data for all ids
INSERT insert data, create new id
UPDATE update data for this id
DELETE remove data for this id
SQL will be built by data from _sqlFields, and will
contain 0 or several %s for you to sprintf-format in later:
SELECT --> len(cls._sqlPrimary)
SELECTALL --> 0 %s
INSERT --> len(cls._sqlFields) %s (including id)
UPDATE --> len(cls._sqlFields) %s (including id)
DELETE --> len(cls._sqlPrimary)
(Note: INSERT and UPDATE will only change values in _sqlTable, so
the actual number of fields for substitutions might be lower
than len(cls._sqlFields) )
For INSERT you should use cls._nextSequence() to retrieve
a new 'id' number. Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary key
'john_id', sequence name blapp_john_id_seq) you must give the sequence
name as an optional argument to _nextSequence)
Additional note: cls._nextSequence() MUST be overloaded
for multi _sqlPrimary classes. Return a tuple.
Return values will always be tuples:
SELECT --> (sql, fields)
SELECTALL -> sql, fields)
INSERT -> (sql, fields)
UPDATE -> (sql, fields)
DELETE -> (sql,) -- for consistency
fields will be object properties as a list, ie. the keys from
cls._sqlFields. The purpose of this list is to give the programmer
an idea of which order the keys are inserted in the SQL, giving
help for retreiving (SELECT, SELECTALL) or inserting for %s
(INSERT, DELETE).
Why? Well, the keys are stored in a hash, and we cannot be sure
about the order of hash.keys() from time to time, not even with
the same instance.
Optional where-parameter applies to SELECT, SELECTALL and DELETE.
where should be a list or string of where clauses.
"""
# Normalize parameter for later comparissions
operation = operation.upper()
# Convert where to a list if it is a string
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if orderBy is None:
orderBy = cls._orderBy
if operation in ('SELECT', 'SELECTALL'):
# Get the object fields and sql fields in the same
# order to be able to reconstruct later.
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if selectfields is None or field in selectfields:
fields.append(field)
sqlfields.append(sqlfield)
if not fields:
# dirrrrrty!
raise """ERROR: No fields defined, cannot create SQL.
Maybe sqlPrimary is invalid?
Fields asked: %s
My fields: %s""" % (selectfields, cls._sqlFields)
sql = "SELECT\n "
sql += ', '.join(sqlfields)
sql += "\nFROM\n "
tables = cls._tables.keys()
if not tables:
raise "REALITY ERROR: No tables defined"
sql += ', '.join(tables)
tempWhere = ["%s=%s" % linkPair for linkPair in cls._sqlLinks]
# this MUST be here.
if operation <> 'SELECTALL':
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
if where:
tempWhere += where
if(tempWhere):
# Make sure to use paranteses in case someone has used
# ORs in the WHERE-list..
sql += "\nWHERE\n ("
sql += ') AND\n ('.join(tempWhere)
sql += ')'
if operation == 'SELECTALL' and orderBy:
sql += '\nORDER BY\n '
if type(orderBy) in (types.TupleType, types.ListType):
orderBy = [cls._sqlFields[x] for x in orderBy]
orderBy = ',\n '.join(orderBy)
else:
orderBy = cls._sqlFields[orderBy]
sql += orderBy
return (sql, fields)
elif operation in ('INSERT', 'UPDATE'):
if operation == 'UPDATE':
sql = 'UPDATE %s SET\n ' % cls._sqlTable
else:
sql = 'INSERT INTO %s (\n ' % cls._sqlTable
set = []
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if operation == 'UPDATE' and field in cls._sqlPrimary:
continue
if sqlfield.find(cls._sqlTable + '.') == 0:
# It's a local field, chop of the table part
sqlfield = sqlfield[len(cls._sqlTable)+1:]
fields.append(field)
sqlfields.append(sqlfield)
set.append(sqlfield + '=%s')
if operation == 'UPDATE':
sql += ',\n '.join(set)
sql += '\nWHERE\n '
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
fields.append(key)
sql += ' AND\n '.join(tempWhere)
else:
sql += ',\n '.join(sqlfields)
sql += ')\nVALUES (\n '
sql += ',\n '.join(('%s',) * len(sqlfields))
sql += ')'
return (sql, fields)
elif operation == 'DELETE':
sql = 'DELETE FROM ' + cls._sqlTable + ' WHERE '
if where:
sql += " AND\n ".join(where)
else:
for key in cls._sqlPrimary:
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
sql += ' AND\n '.join(tempWhere)
return (sql, )
else:
raise "Unknown operation", operation
_prepareSQL = classmethod(_prepareSQL)
def _nextSequence(cls, name=None):
"""Return a new sequence number for insertion in self._sqlTable.
Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary
key 'john_id', sequence name blapp_john_id_seq) you must give
the full sequence name as an optional argument to _nextSequence)
"""
if not name:
name = cls._sqlSequence
if not name:
# Assume it's tablename_primarykey_seq
if len(cls._sqlPrimary) <> 1:
raise "Could not guess sequence name for multi-primary-key"
primary = cls._sqlPrimary[0]
name = '%s_%s_seq' % (cls._sqlTable, primary.replace('.','_'))
# Don't have . as a tablename or column name! =)
curs = cls.cursor()
curs.execute("SELECT nextval('%s')" % name)
value = curs.fetchone()[0]
curs.close()
return value
_nextSequence = classmethod(_nextSequence)
def _loadFromRow(self, result, fields, cursor):
"""Load from a database row, described by fields.
``fields`` should be the attribute names that
will be set. Note that userclasses will be
created (but not loaded).
"""
position = 0
for elem in fields:
value = result[position]
valueType = cursor.description[position][1]
if hasattr(self._dbModule, 'BOOLEAN') and \
valueType == self._dbModule.BOOLEAN and \
(value is not True or value is not False):
# convert to a python boolean
value = value and True or False
if value and self._userClasses.has_key(elem):
userClass = self._userClasses[elem]
# create an instance
value = userClass(value)
self._values[elem] = value
position += 1
def _saveDB(self):
"""Insert or update into the database.
Note that every field will be updated, not just the changed
one.
"""
# We're a "fresh" copy now
self._updated = time.time()
if self._new:
operation = 'INSERT'
if not self._validID():
self._setID(self._nextSequence())
# Note that we assign this ID to our self
# BEFORE possibly saving any of our attribute
# objects that might be new as well. This means
# that they might have references to us, as long
# as the database does not require our existence
# yet.
#
# Since mysql does not have Sequences, this will
# not work as smoothly there. See class
# MysqlForgetter below.
else:
operation = 'UPDATE'
(sql, fields) = self._prepareSQL(operation)
values = []
for field in fields:
value = getattr(self, field)
# First some dirty datatype hacks
if DateTime and type(value) == DateTime.DateTimeType:
# stupid psycopg does not support it's own return type..
# lovely..
value = str(value)
if DateTime and type(value) == DateTime.DateTimeDeltaType:
# Format delta as days, hours, minutes seconds
# NOTE: includes value.second directly to get the
# whole floating number
value = value.strftime("%d %H:%M:") + str(value.second)
if value is True or value is False:
# We must store booleans as 't' and 'f' ...
value = value and 't' or 'f'
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Can't reference multiple-primary-key: %s" % value
values.append(value)
cursor = self.cursor()
cursor.execute(sql, values)
# cursor.commit()
cursor.close()
self._new = False
self._changed = None
def getAll(cls, where=None, orderBy=None):
"""Retrieve all the objects.
If a list of ``where`` clauses are given, they will be AND-ed
and will limit the search.
This will not load everything out from the database, but will
create a large amount of objects with only the ID inserted. The
data will be loaded from the objects when needed by the regular
load()-autocall.
"""
ids = cls.getAllIDs(where, orderBy=orderBy)
# Instansiate a lot of them
if len(cls._sqlPrimary) > 1:
return [cls(*id) for id in ids]
else:
return [cls(id) for id in ids]
getAll = classmethod(getAll)
def getAllIterator(cls, where=None, buffer=100,
useObject=None, orderBy=None):
"""Retrieve every object as an iterator.
Possibly limitted by the where list of clauses that will be
AND-ed.
Since an iterator is returned, only ``buffer`` rows are loaded
from the database at once. This is useful if you need
to process all objects.
If useObject is given, this object is returned each time, but
with new data. This can be used to avoid creating many new
objects when only one object is needed each time.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
fetchedAt = time.time()
curs.execute(sql)
# We might start eating memory at this point
def getNext(rows=[]):
forgetter = cls
if not rows:
rows += curs.fetchmany(buffer)
if not rows:
curs.close()
return None
row = rows[0]
del rows[0]
try:
idPositions = [fields.index(key) for key in cls._sqlPrimary]
except ValueError:
raise "Bad sqlPrimary, should be a list or tuple: %s" % cls._sqlPrimary
ids = [row[pos] for pos in idPositions]
if useObject:
result = useObject
result.reset()
result._setID(ids)
else:
result = forgetter(*ids)
result._loadFromRow(row, fields, curs)
result._updated = fetchedAt
return result
return iter(getNext, None)
getAllIterator = classmethod(getAllIterator)
def getAllIDs(cls, where=None, orderBy=None):
"""Retrive all the IDs, possibly matching the where clauses.
Where should be some list of where clauses that will be joined
with AND). Note that the result might be tuples if this table
has a multivalue _sqlPrimary.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where,
cls._sqlPrimary, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
result.append((ids))
return result
getAllIDs = classmethod(getAllIDs)
def getAllText(cls, where=None, SEPERATOR=' ', orderBy=None):
"""Retrieve a list of of all possible instances of this class.
The list is composed of tuples in the format (id, description) -
where description is a string composed by the fields from
cls._shortView, joint with SEPERATOR.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
shortPos = [fields.index(short) for short in cls._shortView]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
text = SEPERATOR.join([str(row[pos]) for pos in shortPos])
result.append((ids, text))
return result
getAllText = classmethod(getAllText)
def getChildren(self, forgetter, field=None, where=None, orderBy=None):
"""Return the children that links to me.
That means that I have to be listed in their _userClasses
somehow. If field is specified, that field in my children is
used as the pointer to me. Use this if you have multiple fields
referring to my class.
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAll(whereList, orderBy=orderBy)
def getChildrenIterator(self, forgetter, field=None, where=None,
orderBy=None, useObject=None):
"""Like getChildren, except that it returns an
iterator, like getAllIterator. An iterator should
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAllIterator(whereList, useObject=useObject,
orderBy=orderBy)
def __repr__(self):
return self.__class__.__name__ + ' %s' % self._getID()
def __str__(self):
shortView = self._shortView or self._sqlPrimary
short = [str(getattr(self, short)) for short in shortView]
text = ', '.join(short)
# return repr(self) + ': ' + text
return text
def __eq__(self, obj):
"""Simple comparsion of objects."""
return self.__class__.__name__ == obj.__class__.__name__ \
and self._getID() == obj._getID()
|
stain/forgetSQL | lib/forgetSQL.py | Forgetter._saveDB | python | def _saveDB(self):
# We're a "fresh" copy now
self._updated = time.time()
if self._new:
operation = 'INSERT'
if not self._validID():
self._setID(self._nextSequence())
# Note that we assign this ID to our self
# BEFORE possibly saving any of our attribute
# objects that might be new as well. This means
# that they might have references to us, as long
# as the database does not require our existence
# yet.
#
# Since mysql does not have Sequences, this will
# not work as smoothly there. See class
# MysqlForgetter below.
else:
operation = 'UPDATE'
(sql, fields) = self._prepareSQL(operation)
values = []
for field in fields:
value = getattr(self, field)
# First some dirty datatype hacks
if DateTime and type(value) == DateTime.DateTimeType:
# stupid psycopg does not support it's own return type..
# lovely..
value = str(value)
if DateTime and type(value) == DateTime.DateTimeDeltaType:
# Format delta as days, hours, minutes seconds
# NOTE: includes value.second directly to get the
# whole floating number
value = value.strftime("%d %H:%M:") + str(value.second)
if value is True or value is False:
# We must store booleans as 't' and 'f' ...
value = value and 't' or 'f'
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Can't reference multiple-primary-key: %s" % value
values.append(value)
cursor = self.cursor()
cursor.execute(sql, values)
# cursor.commit()
cursor.close()
self._new = False
self._changed = None | Insert or update into the database.
Note that every field will be updated, not just the changed
one. | train | https://github.com/stain/forgetSQL/blob/2e13f983020b121fd75a95fcafce3ea75573fb6b/lib/forgetSQL.py#L672-L728 | [
"def cursor(cls):\n try:\n import database\n return database.cursor()\n except:\n raise \"cursor method undefined, no database connection could be made\"\n",
"def _validID(self):\n \"\"\"Is all ID fields with values, ie. not None?\"\"\"\n return not None in self._getID()\n",
" def _prepareSQL(cls, operation=\"SELECT\", where=None, selectfields=None, orderBy=None):\n \"\"\"Return a sql for the given operation.\n\n Possible operations:\n SELECT read data for this id\n SELECTALL read data for all ids\n INSERT insert data, create new id\n UPDATE update data for this id\n DELETE remove data for this id\n\n SQL will be built by data from _sqlFields, and will\n contain 0 or several %s for you to sprintf-format in later:\n\n SELECT --> len(cls._sqlPrimary)\n SELECTALL --> 0 %s\n INSERT --> len(cls._sqlFields) %s (including id)\n UPDATE --> len(cls._sqlFields) %s (including id)\n DELETE --> len(cls._sqlPrimary)\n\n (Note: INSERT and UPDATE will only change values in _sqlTable, so\n the actual number of fields for substitutions might be lower\n than len(cls._sqlFields) )\n\n For INSERT you should use cls._nextSequence() to retrieve\n a new 'id' number. Note that if your sequences are not named\n tablename_primarykey_seq (ie. for table 'blapp' with primary key\n 'john_id', sequence name blapp_john_id_seq) you must give the sequence\n name as an optional argument to _nextSequence)\n\n Additional note: cls._nextSequence() MUST be overloaded\n for multi _sqlPrimary classes. Return a tuple.\n\n Return values will always be tuples:\n SELECT --> (sql, fields)\n SELECTALL -> sql, fields)\n INSERT -> (sql, fields)\n UPDATE -> (sql, fields)\n DELETE -> (sql,) -- for consistency\n\n fields will be object properties as a list, ie. the keys from\n cls._sqlFields. The purpose of this list is to give the programmer\n an idea of which order the keys are inserted in the SQL, giving\n help for retreiving (SELECT, SELECTALL) or inserting for %s\n (INSERT, DELETE).\n\n Why? Well, the keys are stored in a hash, and we cannot be sure\n about the order of hash.keys() from time to time, not even with\n the same instance.\n\n Optional where-parameter applies to SELECT, SELECTALL and DELETE.\n where should be a list or string of where clauses.\n\n \"\"\"\n # Normalize parameter for later comparissions\n operation = operation.upper()\n # Convert where to a list if it is a string\n if type(where) in (types.StringType, types.UnicodeType):\n where = (where,)\n if orderBy is None:\n orderBy = cls._orderBy\n\n if operation in ('SELECT', 'SELECTALL'):\n # Get the object fields and sql fields in the same\n # order to be able to reconstruct later.\n fields = []\n sqlfields = []\n for (field, sqlfield) in cls._sqlFields.items():\n if selectfields is None or field in selectfields:\n fields.append(field)\n sqlfields.append(sqlfield)\n if not fields:\n # dirrrrrty!\n raise \"\"\"ERROR: No fields defined, cannot create SQL.\nMaybe sqlPrimary is invalid?\nFields asked: %s\nMy fields: %s\"\"\" % (selectfields, cls._sqlFields)\n\n sql = \"SELECT\\n \"\n sql += ', '.join(sqlfields)\n sql += \"\\nFROM\\n \"\n tables = cls._tables.keys()\n if not tables:\n raise \"REALITY ERROR: No tables defined\"\n sql += ', '.join(tables)\n tempWhere = [\"%s=%s\" % linkPair for linkPair in cls._sqlLinks]\n # this MUST be here.\n if operation <> 'SELECTALL':\n for key in cls._sqlPrimary:\n tempWhere.append(cls._sqlFields[key] + \"=%s\")\n if where:\n tempWhere += where\n if(tempWhere):\n # Make sure to use paranteses in case someone has used\n # ORs in the WHERE-list..\n sql += \"\\nWHERE\\n (\"\n sql += ') AND\\n ('.join(tempWhere)\n sql += ')'\n if operation == 'SELECTALL' and orderBy:\n sql += '\\nORDER BY\\n '\n if type(orderBy) in (types.TupleType, types.ListType):\n orderBy = [cls._sqlFields[x] for x in orderBy]\n orderBy = ',\\n '.join(orderBy)\n else:\n orderBy = cls._sqlFields[orderBy]\n sql += orderBy\n return (sql, fields)\n\n elif operation in ('INSERT', 'UPDATE'):\n if operation == 'UPDATE':\n sql = 'UPDATE %s SET\\n ' % cls._sqlTable\n else:\n sql = 'INSERT INTO %s (\\n ' % cls._sqlTable\n\n set = []\n fields = []\n sqlfields = []\n for (field, sqlfield) in cls._sqlFields.items():\n if operation == 'UPDATE' and field in cls._sqlPrimary:\n continue\n if sqlfield.find(cls._sqlTable + '.') == 0:\n # It's a local field, chop of the table part\n sqlfield = sqlfield[len(cls._sqlTable)+1:]\n fields.append(field)\n sqlfields.append(sqlfield)\n set.append(sqlfield + '=%s')\n if operation == 'UPDATE':\n sql += ',\\n '.join(set)\n sql += '\\nWHERE\\n '\n tempWhere = []\n for key in cls._sqlPrimary:\n tempWhere.append(cls._sqlFields[key] + \"=%s\")\n fields.append(key)\n sql += ' AND\\n '.join(tempWhere)\n else:\n sql += ',\\n '.join(sqlfields)\n sql += ')\\nVALUES (\\n '\n sql += ',\\n '.join(('%s',) * len(sqlfields))\n sql += ')'\n\n return (sql, fields)\n\n elif operation == 'DELETE':\n sql = 'DELETE FROM ' + cls._sqlTable + ' WHERE '\n if where:\n sql += \" AND\\n \".join(where)\n else:\n for key in cls._sqlPrimary:\n tempWhere = []\n for key in cls._sqlPrimary:\n tempWhere.append(cls._sqlFields[key] + \"=%s\")\n sql += ' AND\\n '.join(tempWhere)\n return (sql, )\n else:\n raise \"Unknown operation\", operation\n"
] | class Forgetter(object):
"""SQL to object database wrapper.
Given a welldefined database, by subclassing Forgetter
and supplying some attributes, you may wrap your SQL tables
into objects that are easier to program with.
You must define all fields in the database table that you want
to expose, and you may refine the names to suit your
object oriented programming style. (ie. customerID -> customer)
Objects will be created without loading from database,
loading will occur when you try to read or write some of the
attributes defined as a SQL field. If you change some attributes the
object will be saved to the database by save() or garbage
collection. (be aware that GC in Py >= 2.2 is not immediate)
If you want to create new objects, just supply them with blank
ID-fields, and _nextSequence() will be called to fetch a new
ID used for insertion.
The rule is one class pr. table, although it is possible
to join several table into one class, as long as the
identificator is unique.
By defining _userClasses you can resolve links to other
tables, a field in this table would be an id in another
table, ie. another class. In practical use this means that
behind attributes pointing to other classes (tables)
you will find instances of that class.
Short example usage of forgetter objects::
# Process all
for user in User.getAllIterator():
# Access attributes
print user.name
print "Employed at:"
# Access the Employed-class/table
print user.employed.name, user.employed.address
# fire him, setting employed reference to SQL NULL
user.employed = None
# Retrieve some ID
shop = Shop(552)
shop.name = 'Corrected name'
shop.save() # Save now instead of waiting for garbage collactor
# Include SQL where-statements in selections
myIDs = User.getAllIDs(("name='soiland'", 'salary > 5'))
Requirements:
The attributes 'cursor' and '_dbModule' should be set from the
outside. The cursor should be DB 2.0 complient, preferably with
autocommit turned on. (Transactions are not within the scope of this
module yet)
Python 2.2 (iterators, methodclasses)
"""
# How long to keep objects in cache?
_timeout = 60
# Will be True once prepare() is called
_prepared = False
# The default table containing our fields
# _sqlTable = 'shop'
_sqlTable = ''
# A mapping between our fields and the database fields.
#
# You must include all fields needed here. You may specify
# other names if you want to make the sql name more approriate
# for object oriented programming. (Like calling a field 'location'
# instead of 'location_id', because we wrap the location in a seperate
# object and don't really care about the id)
#
# You may reference to other tables with a dot, all
# other db fields will be related to _sqlTable.
# If you reference other tables, don't forget to
# modify _sqlLinks.
#
# _sqlFields = {
# 'id': 'shop_id',
# 'name': 'name',
# 'location': 'location_id',
# 'chain': 'shop_chain_id',
# 'address': 'address.address_id',
# }
_sqlFields = {}
# A list of attribute names (in the object, not database)
# that are the primary key in the database. Normally
# 'id' is sufficient. It is legal to have
# multiple fields as primary key, but it won't work
# properly with _userClasses and getChildren().
#
# If your table is a link table or something, ALL fields
# should be in _sqlPrimary. (all fields are needed to define
# a unique row to be deleted/updated)
_sqlPrimary = ('id',)
# When using several tables, you should include a
# 'link' statement, displaying which fields link the
# two tables together. Note that these are sql names.
# _sqlLinks = (
# ('shop_id', 'address.shop_id'),
# )
_sqlLinks = ()
# The name of the sequence used by _nextSequence
# - if None, a guess will be made based on _sqlTable
# and _sqlPrimary.
_sqlSequence = None
# Order by this attribute by default, if specified
# _orderBy = 'name' - this could also be a tuple
_orderBy = None
# _userClasses can be used to trigger creation of a field
# with an instance of the class. The given database field
# will be sent to the constructor as an objectID
# (ie. as self.id in this object) (ie. the class does not
# neccessary need to be a subclass of Forgetter)
#
# This means that the attribute will be an instance of that
# class, not the ID. The object will not be loaded from the
# database until you try to read any of it's attributes,
# though. (to prevent unneccessary database overload and
# recursions)
#
# Notice that _userClasses must be a name resolvable, ie.
# from the same module as your other classes.
# _userClasses = {
# 'location': 'Location',
# 'chain': 'Chain',
# 'address': 'Address',
# }
_userClasses = {}
# If you want userClasses to work properly with strings instead of
# instances, you must also 'prepare' your classes to resolve the
# names. This must be done from the same module you are defining the
# classes: forgetSQL.prepareClasses(locals())
# A list of fields that are suitable for a textual
# representation (typical a one liner).
#
# Fields will be joint together with spaces or
# simular.
# _shortView = ('name')
_shortView = ()
# Description for the fields (ie. labels)
# Note that these fields will be translated with the _ function.
# If a field is undescribe, a capitalized version of the field name
# will be presented.
#_descriptions = {
# 'name': 'Full name',
# 'description': 'Description of thingie',
#}
_descriptions = {}
def cursor(cls):
try:
import database
return database.cursor()
except:
raise "cursor method undefined, no database connection could be made"
cursor = classmethod(cursor)
# a reference to the database module object used, ie.
# MySQLdb, psycopg etc.
# Use MyClass._dbModule = MySQLdb - not "MySQLdb"
#
_dbModule = None
# By default, autosave change when object is garbage collected.
# NOTE: This could give weird side affects as you need to keep track
# on when objects are garbage collected. Instead, use .save()
# explicitely when you want to save. By default, for backward
# compatibility, autosave is on.
_autosave = True
def __new__(cls, *args):
if not hasattr(cls, '_cache'):
cls._cache = {}
try: # to implement 'goto' in Python.. UGH
if not cls._cache.has_key(args):
# unknown
raise "NotFound"
(ref, updated) = cls._cache[args]
realObject = ref()
if realObject is None:
# No more real references to it, dead object
raise "NotFound"
age = time.time() - updated
if age > cls._timeout:
# Too old!
raise "NotFound"
updated = time.time()
except "NotFound":
# We'll need to create it
realObject = object.__new__(cls, *args)
ref = weakref.ref(realObject)
updated = time.time()
# store a weak reference
cls._cache[args] = (ref, updated)
return realObject
def __init__(self, *id):
"""Initialize, possibly with a database id.
A forgetter with multivalue primary key (ie. _sqlPrimary more
than 1 in length), may be initalized by using several parameters
to this constructor. Note that the object will not be loaded
before you call load().
"""
self._values = {}
self.reset()
if not id:
self._resetID()
else:
self._setID(id)
def _setID(self, id):
"""Set the ID, ie. the values for primary keys.
id can be either a list, following the
_sqlPrimary, or some other type, that will be set
as the singleton ID (requires 1-length sqlPrimary).
"""
if type(id) in (types.ListType, types.TupleType):
try:
for key in self._sqlPrimary:
value = id[0]
self.__dict__[key] = value
id = id[1:] # rest, go revursive
except IndexError:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
elif len(self._sqlPrimary) <= 1:
# It's a simple value
key = self._sqlPrimary[0]
self.__dict__[key] = id
else:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
self._new = False
def _getID(self):
"""Get the ID values as a tuple annotated by sqlPrimary"""
id = []
for key in self._sqlPrimary:
value = self.__dict__[key]
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Part %s of %s primary key is a reference to %s, with multiple-primary-key %s " % (key, self.__class__, value.__class__, value)
id.append(value)
return id
def _resetID(self):
"""Reset all ID fields."""
# Dirty.. .=))
self._setID((None,) * len(self._sqlPrimary))
self._new = True
def _validID(self):
"""Is all ID fields with values, ie. not None?"""
return not None in self._getID()
def __getattr__(self, key):
"""Get an attribute, normally a SQL field value.
Will be called when an unknown key is to be
retrieved, ie. most likely one of our database
fields.
"""
if self._sqlFields.has_key(key):
if not self._updated:
self.load()
return self._values[key]
else:
raise AttributeError, key
def __setattr__(self, key, value):
"""Set an attribute, normally a SQL field value.
Will be called whenever something needs to be set, so
we store the value as a SQL-thingie unless the key
is not listed in sqlFields.
"""
if key not in self._sqlPrimary and self._sqlFields.has_key(key):
if not self._updated:
self.load()
self._values[key] = value
self._changed = time.time()
else:
# It's a normal thingie
self.__dict__[key] = value
def __del__(self):
"""Save the object on deletion.
Be aware of this. If you want to undo some change, use reset()
first.
Be aware of Python 2.2's garbage collector, that
might run in the background. This means that
unless you call save() changes might not
be done immediately in the database.
Not calling save() also means that you cannot catch
errors caused by wrong insertion/update (ie. wrong
datatype for a field)
"""
if not self._autosave:
return
try:
self.save()
except Exception, e:
pass
def _checkTable(cls, field):
"""Split a field from _sqlFields into table, column.
Registers the table in cls._tables, and returns a fully
qualified table.column (default table: cls._sqlTable)
"""
# Get table part
try:
(table, field) = field.split('.')
except ValueError:
table = cls._sqlTable
# clean away white space
table = table.strip()
field = field.strip()
# register table
cls._tables[table] = None
# and return in proper shape
return table + '.' + field
_checkTable = classmethod(_checkTable)
def reset(self):
"""Reset all fields, almost like creating a new object.
Note: Forgets changes you have made not saved to database!
(Remember: Others might reference the object already, expecting
something else!) Override this method if you add properties not
defined in _sqlFields.
"""
self._resetID()
self._new = None
self._updated = None
self._changed = None
self._values = {}
# initially create fields
for field in self._sqlFields.keys():
self._values[field] = None
def load(self, id=None):
"""Load from database. Old values will be discarded."""
if id is not None:
# We are asked to change our ID to something else
self.reset()
self._setID(id)
if not self._new and self._validID():
self._loadDB()
self._updated = time.time()
def save(self):
"""Save to database if anything has changed since last load"""
if ( self._new or
(self._validID() and self._changed) or
(self._updated and self._changed > self._updated) ):
# Don't save if we have not loaded existing data!
self._saveDB()
return True
return False
def delete(self):
"""Mark this object for deletion in the database.
The object will then be reset and ready for use
again with a new id.
"""
(sql, ) = self._prepareSQL("DELETE")
curs = self.cursor()
curs.execute(sql, self._getID())
curs.close()
self.reset()
def _prepareSQL(cls, operation="SELECT", where=None, selectfields=None, orderBy=None):
"""Return a sql for the given operation.
Possible operations:
SELECT read data for this id
SELECTALL read data for all ids
INSERT insert data, create new id
UPDATE update data for this id
DELETE remove data for this id
SQL will be built by data from _sqlFields, and will
contain 0 or several %s for you to sprintf-format in later:
SELECT --> len(cls._sqlPrimary)
SELECTALL --> 0 %s
INSERT --> len(cls._sqlFields) %s (including id)
UPDATE --> len(cls._sqlFields) %s (including id)
DELETE --> len(cls._sqlPrimary)
(Note: INSERT and UPDATE will only change values in _sqlTable, so
the actual number of fields for substitutions might be lower
than len(cls._sqlFields) )
For INSERT you should use cls._nextSequence() to retrieve
a new 'id' number. Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary key
'john_id', sequence name blapp_john_id_seq) you must give the sequence
name as an optional argument to _nextSequence)
Additional note: cls._nextSequence() MUST be overloaded
for multi _sqlPrimary classes. Return a tuple.
Return values will always be tuples:
SELECT --> (sql, fields)
SELECTALL -> sql, fields)
INSERT -> (sql, fields)
UPDATE -> (sql, fields)
DELETE -> (sql,) -- for consistency
fields will be object properties as a list, ie. the keys from
cls._sqlFields. The purpose of this list is to give the programmer
an idea of which order the keys are inserted in the SQL, giving
help for retreiving (SELECT, SELECTALL) or inserting for %s
(INSERT, DELETE).
Why? Well, the keys are stored in a hash, and we cannot be sure
about the order of hash.keys() from time to time, not even with
the same instance.
Optional where-parameter applies to SELECT, SELECTALL and DELETE.
where should be a list or string of where clauses.
"""
# Normalize parameter for later comparissions
operation = operation.upper()
# Convert where to a list if it is a string
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if orderBy is None:
orderBy = cls._orderBy
if operation in ('SELECT', 'SELECTALL'):
# Get the object fields and sql fields in the same
# order to be able to reconstruct later.
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if selectfields is None or field in selectfields:
fields.append(field)
sqlfields.append(sqlfield)
if not fields:
# dirrrrrty!
raise """ERROR: No fields defined, cannot create SQL.
Maybe sqlPrimary is invalid?
Fields asked: %s
My fields: %s""" % (selectfields, cls._sqlFields)
sql = "SELECT\n "
sql += ', '.join(sqlfields)
sql += "\nFROM\n "
tables = cls._tables.keys()
if not tables:
raise "REALITY ERROR: No tables defined"
sql += ', '.join(tables)
tempWhere = ["%s=%s" % linkPair for linkPair in cls._sqlLinks]
# this MUST be here.
if operation <> 'SELECTALL':
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
if where:
tempWhere += where
if(tempWhere):
# Make sure to use paranteses in case someone has used
# ORs in the WHERE-list..
sql += "\nWHERE\n ("
sql += ') AND\n ('.join(tempWhere)
sql += ')'
if operation == 'SELECTALL' and orderBy:
sql += '\nORDER BY\n '
if type(orderBy) in (types.TupleType, types.ListType):
orderBy = [cls._sqlFields[x] for x in orderBy]
orderBy = ',\n '.join(orderBy)
else:
orderBy = cls._sqlFields[orderBy]
sql += orderBy
return (sql, fields)
elif operation in ('INSERT', 'UPDATE'):
if operation == 'UPDATE':
sql = 'UPDATE %s SET\n ' % cls._sqlTable
else:
sql = 'INSERT INTO %s (\n ' % cls._sqlTable
set = []
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if operation == 'UPDATE' and field in cls._sqlPrimary:
continue
if sqlfield.find(cls._sqlTable + '.') == 0:
# It's a local field, chop of the table part
sqlfield = sqlfield[len(cls._sqlTable)+1:]
fields.append(field)
sqlfields.append(sqlfield)
set.append(sqlfield + '=%s')
if operation == 'UPDATE':
sql += ',\n '.join(set)
sql += '\nWHERE\n '
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
fields.append(key)
sql += ' AND\n '.join(tempWhere)
else:
sql += ',\n '.join(sqlfields)
sql += ')\nVALUES (\n '
sql += ',\n '.join(('%s',) * len(sqlfields))
sql += ')'
return (sql, fields)
elif operation == 'DELETE':
sql = 'DELETE FROM ' + cls._sqlTable + ' WHERE '
if where:
sql += " AND\n ".join(where)
else:
for key in cls._sqlPrimary:
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
sql += ' AND\n '.join(tempWhere)
return (sql, )
else:
raise "Unknown operation", operation
_prepareSQL = classmethod(_prepareSQL)
def _nextSequence(cls, name=None):
"""Return a new sequence number for insertion in self._sqlTable.
Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary
key 'john_id', sequence name blapp_john_id_seq) you must give
the full sequence name as an optional argument to _nextSequence)
"""
if not name:
name = cls._sqlSequence
if not name:
# Assume it's tablename_primarykey_seq
if len(cls._sqlPrimary) <> 1:
raise "Could not guess sequence name for multi-primary-key"
primary = cls._sqlPrimary[0]
name = '%s_%s_seq' % (cls._sqlTable, primary.replace('.','_'))
# Don't have . as a tablename or column name! =)
curs = cls.cursor()
curs.execute("SELECT nextval('%s')" % name)
value = curs.fetchone()[0]
curs.close()
return value
_nextSequence = classmethod(_nextSequence)
def _loadFromRow(self, result, fields, cursor):
"""Load from a database row, described by fields.
``fields`` should be the attribute names that
will be set. Note that userclasses will be
created (but not loaded).
"""
position = 0
for elem in fields:
value = result[position]
valueType = cursor.description[position][1]
if hasattr(self._dbModule, 'BOOLEAN') and \
valueType == self._dbModule.BOOLEAN and \
(value is not True or value is not False):
# convert to a python boolean
value = value and True or False
if value and self._userClasses.has_key(elem):
userClass = self._userClasses[elem]
# create an instance
value = userClass(value)
self._values[elem] = value
position += 1
def _loadDB(self):
"""Connect to the database to load myself"""
if not self._validID():
raise NotFound, self._getID()
(sql, fields) = self._prepareSQL("SELECT")
curs = self.cursor()
curs.execute(sql, self._getID())
result = curs.fetchone()
if not result:
curs.close()
raise NotFound, self._getID()
self._loadFromRow(result, fields, curs)
curs.close()
self._updated = time.time()
def getAll(cls, where=None, orderBy=None):
"""Retrieve all the objects.
If a list of ``where`` clauses are given, they will be AND-ed
and will limit the search.
This will not load everything out from the database, but will
create a large amount of objects with only the ID inserted. The
data will be loaded from the objects when needed by the regular
load()-autocall.
"""
ids = cls.getAllIDs(where, orderBy=orderBy)
# Instansiate a lot of them
if len(cls._sqlPrimary) > 1:
return [cls(*id) for id in ids]
else:
return [cls(id) for id in ids]
getAll = classmethod(getAll)
def getAllIterator(cls, where=None, buffer=100,
useObject=None, orderBy=None):
"""Retrieve every object as an iterator.
Possibly limitted by the where list of clauses that will be
AND-ed.
Since an iterator is returned, only ``buffer`` rows are loaded
from the database at once. This is useful if you need
to process all objects.
If useObject is given, this object is returned each time, but
with new data. This can be used to avoid creating many new
objects when only one object is needed each time.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
fetchedAt = time.time()
curs.execute(sql)
# We might start eating memory at this point
def getNext(rows=[]):
forgetter = cls
if not rows:
rows += curs.fetchmany(buffer)
if not rows:
curs.close()
return None
row = rows[0]
del rows[0]
try:
idPositions = [fields.index(key) for key in cls._sqlPrimary]
except ValueError:
raise "Bad sqlPrimary, should be a list or tuple: %s" % cls._sqlPrimary
ids = [row[pos] for pos in idPositions]
if useObject:
result = useObject
result.reset()
result._setID(ids)
else:
result = forgetter(*ids)
result._loadFromRow(row, fields, curs)
result._updated = fetchedAt
return result
return iter(getNext, None)
getAllIterator = classmethod(getAllIterator)
def getAllIDs(cls, where=None, orderBy=None):
"""Retrive all the IDs, possibly matching the where clauses.
Where should be some list of where clauses that will be joined
with AND). Note that the result might be tuples if this table
has a multivalue _sqlPrimary.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where,
cls._sqlPrimary, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
result.append((ids))
return result
getAllIDs = classmethod(getAllIDs)
def getAllText(cls, where=None, SEPERATOR=' ', orderBy=None):
"""Retrieve a list of of all possible instances of this class.
The list is composed of tuples in the format (id, description) -
where description is a string composed by the fields from
cls._shortView, joint with SEPERATOR.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
shortPos = [fields.index(short) for short in cls._shortView]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
text = SEPERATOR.join([str(row[pos]) for pos in shortPos])
result.append((ids, text))
return result
getAllText = classmethod(getAllText)
def getChildren(self, forgetter, field=None, where=None, orderBy=None):
"""Return the children that links to me.
That means that I have to be listed in their _userClasses
somehow. If field is specified, that field in my children is
used as the pointer to me. Use this if you have multiple fields
referring to my class.
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAll(whereList, orderBy=orderBy)
def getChildrenIterator(self, forgetter, field=None, where=None,
orderBy=None, useObject=None):
"""Like getChildren, except that it returns an
iterator, like getAllIterator. An iterator should
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAllIterator(whereList, useObject=useObject,
orderBy=orderBy)
def __repr__(self):
return self.__class__.__name__ + ' %s' % self._getID()
def __str__(self):
shortView = self._shortView or self._sqlPrimary
short = [str(getattr(self, short)) for short in shortView]
text = ', '.join(short)
# return repr(self) + ': ' + text
return text
def __eq__(self, obj):
"""Simple comparsion of objects."""
return self.__class__.__name__ == obj.__class__.__name__ \
and self._getID() == obj._getID()
|
stain/forgetSQL | lib/forgetSQL.py | Forgetter.getAll | python | def getAll(cls, where=None, orderBy=None):
ids = cls.getAllIDs(where, orderBy=orderBy)
# Instansiate a lot of them
if len(cls._sqlPrimary) > 1:
return [cls(*id) for id in ids]
else:
return [cls(id) for id in ids] | Retrieve all the objects.
If a list of ``where`` clauses are given, they will be AND-ed
and will limit the search.
This will not load everything out from the database, but will
create a large amount of objects with only the ID inserted. The
data will be loaded from the objects when needed by the regular
load()-autocall. | train | https://github.com/stain/forgetSQL/blob/2e13f983020b121fd75a95fcafce3ea75573fb6b/lib/forgetSQL.py#L730-L746 | [
"def getAllIDs(cls, where=None, orderBy=None):\n \"\"\"Retrive all the IDs, possibly matching the where clauses.\n\n Where should be some list of where clauses that will be joined\n with AND). Note that the result might be tuples if this table\n has a multivalue _sqlPrimary.\n \"\"\"\n (sql, fields) = cls._prepareSQL(\"SELECTALL\", where,\n cls._sqlPrimary, orderBy=orderBy)\n curs = cls.cursor()\n curs.execute(sql)\n # We might start eating memory at this point\n rows = curs.fetchall()\n curs.close()\n result = []\n idPositions = [fields.index(key) for key in cls._sqlPrimary]\n for row in rows:\n ids = [row[pos] for pos in idPositions]\n if len(idPositions) > 1:\n ids = tuple(ids)\n else:\n ids = ids[0]\n result.append((ids))\n return result\n"
] | class Forgetter(object):
"""SQL to object database wrapper.
Given a welldefined database, by subclassing Forgetter
and supplying some attributes, you may wrap your SQL tables
into objects that are easier to program with.
You must define all fields in the database table that you want
to expose, and you may refine the names to suit your
object oriented programming style. (ie. customerID -> customer)
Objects will be created without loading from database,
loading will occur when you try to read or write some of the
attributes defined as a SQL field. If you change some attributes the
object will be saved to the database by save() or garbage
collection. (be aware that GC in Py >= 2.2 is not immediate)
If you want to create new objects, just supply them with blank
ID-fields, and _nextSequence() will be called to fetch a new
ID used for insertion.
The rule is one class pr. table, although it is possible
to join several table into one class, as long as the
identificator is unique.
By defining _userClasses you can resolve links to other
tables, a field in this table would be an id in another
table, ie. another class. In practical use this means that
behind attributes pointing to other classes (tables)
you will find instances of that class.
Short example usage of forgetter objects::
# Process all
for user in User.getAllIterator():
# Access attributes
print user.name
print "Employed at:"
# Access the Employed-class/table
print user.employed.name, user.employed.address
# fire him, setting employed reference to SQL NULL
user.employed = None
# Retrieve some ID
shop = Shop(552)
shop.name = 'Corrected name'
shop.save() # Save now instead of waiting for garbage collactor
# Include SQL where-statements in selections
myIDs = User.getAllIDs(("name='soiland'", 'salary > 5'))
Requirements:
The attributes 'cursor' and '_dbModule' should be set from the
outside. The cursor should be DB 2.0 complient, preferably with
autocommit turned on. (Transactions are not within the scope of this
module yet)
Python 2.2 (iterators, methodclasses)
"""
# How long to keep objects in cache?
_timeout = 60
# Will be True once prepare() is called
_prepared = False
# The default table containing our fields
# _sqlTable = 'shop'
_sqlTable = ''
# A mapping between our fields and the database fields.
#
# You must include all fields needed here. You may specify
# other names if you want to make the sql name more approriate
# for object oriented programming. (Like calling a field 'location'
# instead of 'location_id', because we wrap the location in a seperate
# object and don't really care about the id)
#
# You may reference to other tables with a dot, all
# other db fields will be related to _sqlTable.
# If you reference other tables, don't forget to
# modify _sqlLinks.
#
# _sqlFields = {
# 'id': 'shop_id',
# 'name': 'name',
# 'location': 'location_id',
# 'chain': 'shop_chain_id',
# 'address': 'address.address_id',
# }
_sqlFields = {}
# A list of attribute names (in the object, not database)
# that are the primary key in the database. Normally
# 'id' is sufficient. It is legal to have
# multiple fields as primary key, but it won't work
# properly with _userClasses and getChildren().
#
# If your table is a link table or something, ALL fields
# should be in _sqlPrimary. (all fields are needed to define
# a unique row to be deleted/updated)
_sqlPrimary = ('id',)
# When using several tables, you should include a
# 'link' statement, displaying which fields link the
# two tables together. Note that these are sql names.
# _sqlLinks = (
# ('shop_id', 'address.shop_id'),
# )
_sqlLinks = ()
# The name of the sequence used by _nextSequence
# - if None, a guess will be made based on _sqlTable
# and _sqlPrimary.
_sqlSequence = None
# Order by this attribute by default, if specified
# _orderBy = 'name' - this could also be a tuple
_orderBy = None
# _userClasses can be used to trigger creation of a field
# with an instance of the class. The given database field
# will be sent to the constructor as an objectID
# (ie. as self.id in this object) (ie. the class does not
# neccessary need to be a subclass of Forgetter)
#
# This means that the attribute will be an instance of that
# class, not the ID. The object will not be loaded from the
# database until you try to read any of it's attributes,
# though. (to prevent unneccessary database overload and
# recursions)
#
# Notice that _userClasses must be a name resolvable, ie.
# from the same module as your other classes.
# _userClasses = {
# 'location': 'Location',
# 'chain': 'Chain',
# 'address': 'Address',
# }
_userClasses = {}
# If you want userClasses to work properly with strings instead of
# instances, you must also 'prepare' your classes to resolve the
# names. This must be done from the same module you are defining the
# classes: forgetSQL.prepareClasses(locals())
# A list of fields that are suitable for a textual
# representation (typical a one liner).
#
# Fields will be joint together with spaces or
# simular.
# _shortView = ('name')
_shortView = ()
# Description for the fields (ie. labels)
# Note that these fields will be translated with the _ function.
# If a field is undescribe, a capitalized version of the field name
# will be presented.
#_descriptions = {
# 'name': 'Full name',
# 'description': 'Description of thingie',
#}
_descriptions = {}
def cursor(cls):
try:
import database
return database.cursor()
except:
raise "cursor method undefined, no database connection could be made"
cursor = classmethod(cursor)
# a reference to the database module object used, ie.
# MySQLdb, psycopg etc.
# Use MyClass._dbModule = MySQLdb - not "MySQLdb"
#
_dbModule = None
# By default, autosave change when object is garbage collected.
# NOTE: This could give weird side affects as you need to keep track
# on when objects are garbage collected. Instead, use .save()
# explicitely when you want to save. By default, for backward
# compatibility, autosave is on.
_autosave = True
def __new__(cls, *args):
if not hasattr(cls, '_cache'):
cls._cache = {}
try: # to implement 'goto' in Python.. UGH
if not cls._cache.has_key(args):
# unknown
raise "NotFound"
(ref, updated) = cls._cache[args]
realObject = ref()
if realObject is None:
# No more real references to it, dead object
raise "NotFound"
age = time.time() - updated
if age > cls._timeout:
# Too old!
raise "NotFound"
updated = time.time()
except "NotFound":
# We'll need to create it
realObject = object.__new__(cls, *args)
ref = weakref.ref(realObject)
updated = time.time()
# store a weak reference
cls._cache[args] = (ref, updated)
return realObject
def __init__(self, *id):
"""Initialize, possibly with a database id.
A forgetter with multivalue primary key (ie. _sqlPrimary more
than 1 in length), may be initalized by using several parameters
to this constructor. Note that the object will not be loaded
before you call load().
"""
self._values = {}
self.reset()
if not id:
self._resetID()
else:
self._setID(id)
def _setID(self, id):
"""Set the ID, ie. the values for primary keys.
id can be either a list, following the
_sqlPrimary, or some other type, that will be set
as the singleton ID (requires 1-length sqlPrimary).
"""
if type(id) in (types.ListType, types.TupleType):
try:
for key in self._sqlPrimary:
value = id[0]
self.__dict__[key] = value
id = id[1:] # rest, go revursive
except IndexError:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
elif len(self._sqlPrimary) <= 1:
# It's a simple value
key = self._sqlPrimary[0]
self.__dict__[key] = id
else:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
self._new = False
def _getID(self):
"""Get the ID values as a tuple annotated by sqlPrimary"""
id = []
for key in self._sqlPrimary:
value = self.__dict__[key]
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Part %s of %s primary key is a reference to %s, with multiple-primary-key %s " % (key, self.__class__, value.__class__, value)
id.append(value)
return id
def _resetID(self):
"""Reset all ID fields."""
# Dirty.. .=))
self._setID((None,) * len(self._sqlPrimary))
self._new = True
def _validID(self):
"""Is all ID fields with values, ie. not None?"""
return not None in self._getID()
def __getattr__(self, key):
"""Get an attribute, normally a SQL field value.
Will be called when an unknown key is to be
retrieved, ie. most likely one of our database
fields.
"""
if self._sqlFields.has_key(key):
if not self._updated:
self.load()
return self._values[key]
else:
raise AttributeError, key
def __setattr__(self, key, value):
"""Set an attribute, normally a SQL field value.
Will be called whenever something needs to be set, so
we store the value as a SQL-thingie unless the key
is not listed in sqlFields.
"""
if key not in self._sqlPrimary and self._sqlFields.has_key(key):
if not self._updated:
self.load()
self._values[key] = value
self._changed = time.time()
else:
# It's a normal thingie
self.__dict__[key] = value
def __del__(self):
"""Save the object on deletion.
Be aware of this. If you want to undo some change, use reset()
first.
Be aware of Python 2.2's garbage collector, that
might run in the background. This means that
unless you call save() changes might not
be done immediately in the database.
Not calling save() also means that you cannot catch
errors caused by wrong insertion/update (ie. wrong
datatype for a field)
"""
if not self._autosave:
return
try:
self.save()
except Exception, e:
pass
def _checkTable(cls, field):
"""Split a field from _sqlFields into table, column.
Registers the table in cls._tables, and returns a fully
qualified table.column (default table: cls._sqlTable)
"""
# Get table part
try:
(table, field) = field.split('.')
except ValueError:
table = cls._sqlTable
# clean away white space
table = table.strip()
field = field.strip()
# register table
cls._tables[table] = None
# and return in proper shape
return table + '.' + field
_checkTable = classmethod(_checkTable)
def reset(self):
"""Reset all fields, almost like creating a new object.
Note: Forgets changes you have made not saved to database!
(Remember: Others might reference the object already, expecting
something else!) Override this method if you add properties not
defined in _sqlFields.
"""
self._resetID()
self._new = None
self._updated = None
self._changed = None
self._values = {}
# initially create fields
for field in self._sqlFields.keys():
self._values[field] = None
def load(self, id=None):
"""Load from database. Old values will be discarded."""
if id is not None:
# We are asked to change our ID to something else
self.reset()
self._setID(id)
if not self._new and self._validID():
self._loadDB()
self._updated = time.time()
def save(self):
"""Save to database if anything has changed since last load"""
if ( self._new or
(self._validID() and self._changed) or
(self._updated and self._changed > self._updated) ):
# Don't save if we have not loaded existing data!
self._saveDB()
return True
return False
def delete(self):
"""Mark this object for deletion in the database.
The object will then be reset and ready for use
again with a new id.
"""
(sql, ) = self._prepareSQL("DELETE")
curs = self.cursor()
curs.execute(sql, self._getID())
curs.close()
self.reset()
def _prepareSQL(cls, operation="SELECT", where=None, selectfields=None, orderBy=None):
"""Return a sql for the given operation.
Possible operations:
SELECT read data for this id
SELECTALL read data for all ids
INSERT insert data, create new id
UPDATE update data for this id
DELETE remove data for this id
SQL will be built by data from _sqlFields, and will
contain 0 or several %s for you to sprintf-format in later:
SELECT --> len(cls._sqlPrimary)
SELECTALL --> 0 %s
INSERT --> len(cls._sqlFields) %s (including id)
UPDATE --> len(cls._sqlFields) %s (including id)
DELETE --> len(cls._sqlPrimary)
(Note: INSERT and UPDATE will only change values in _sqlTable, so
the actual number of fields for substitutions might be lower
than len(cls._sqlFields) )
For INSERT you should use cls._nextSequence() to retrieve
a new 'id' number. Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary key
'john_id', sequence name blapp_john_id_seq) you must give the sequence
name as an optional argument to _nextSequence)
Additional note: cls._nextSequence() MUST be overloaded
for multi _sqlPrimary classes. Return a tuple.
Return values will always be tuples:
SELECT --> (sql, fields)
SELECTALL -> sql, fields)
INSERT -> (sql, fields)
UPDATE -> (sql, fields)
DELETE -> (sql,) -- for consistency
fields will be object properties as a list, ie. the keys from
cls._sqlFields. The purpose of this list is to give the programmer
an idea of which order the keys are inserted in the SQL, giving
help for retreiving (SELECT, SELECTALL) or inserting for %s
(INSERT, DELETE).
Why? Well, the keys are stored in a hash, and we cannot be sure
about the order of hash.keys() from time to time, not even with
the same instance.
Optional where-parameter applies to SELECT, SELECTALL and DELETE.
where should be a list or string of where clauses.
"""
# Normalize parameter for later comparissions
operation = operation.upper()
# Convert where to a list if it is a string
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if orderBy is None:
orderBy = cls._orderBy
if operation in ('SELECT', 'SELECTALL'):
# Get the object fields and sql fields in the same
# order to be able to reconstruct later.
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if selectfields is None or field in selectfields:
fields.append(field)
sqlfields.append(sqlfield)
if not fields:
# dirrrrrty!
raise """ERROR: No fields defined, cannot create SQL.
Maybe sqlPrimary is invalid?
Fields asked: %s
My fields: %s""" % (selectfields, cls._sqlFields)
sql = "SELECT\n "
sql += ', '.join(sqlfields)
sql += "\nFROM\n "
tables = cls._tables.keys()
if not tables:
raise "REALITY ERROR: No tables defined"
sql += ', '.join(tables)
tempWhere = ["%s=%s" % linkPair for linkPair in cls._sqlLinks]
# this MUST be here.
if operation <> 'SELECTALL':
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
if where:
tempWhere += where
if(tempWhere):
# Make sure to use paranteses in case someone has used
# ORs in the WHERE-list..
sql += "\nWHERE\n ("
sql += ') AND\n ('.join(tempWhere)
sql += ')'
if operation == 'SELECTALL' and orderBy:
sql += '\nORDER BY\n '
if type(orderBy) in (types.TupleType, types.ListType):
orderBy = [cls._sqlFields[x] for x in orderBy]
orderBy = ',\n '.join(orderBy)
else:
orderBy = cls._sqlFields[orderBy]
sql += orderBy
return (sql, fields)
elif operation in ('INSERT', 'UPDATE'):
if operation == 'UPDATE':
sql = 'UPDATE %s SET\n ' % cls._sqlTable
else:
sql = 'INSERT INTO %s (\n ' % cls._sqlTable
set = []
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if operation == 'UPDATE' and field in cls._sqlPrimary:
continue
if sqlfield.find(cls._sqlTable + '.') == 0:
# It's a local field, chop of the table part
sqlfield = sqlfield[len(cls._sqlTable)+1:]
fields.append(field)
sqlfields.append(sqlfield)
set.append(sqlfield + '=%s')
if operation == 'UPDATE':
sql += ',\n '.join(set)
sql += '\nWHERE\n '
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
fields.append(key)
sql += ' AND\n '.join(tempWhere)
else:
sql += ',\n '.join(sqlfields)
sql += ')\nVALUES (\n '
sql += ',\n '.join(('%s',) * len(sqlfields))
sql += ')'
return (sql, fields)
elif operation == 'DELETE':
sql = 'DELETE FROM ' + cls._sqlTable + ' WHERE '
if where:
sql += " AND\n ".join(where)
else:
for key in cls._sqlPrimary:
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
sql += ' AND\n '.join(tempWhere)
return (sql, )
else:
raise "Unknown operation", operation
_prepareSQL = classmethod(_prepareSQL)
def _nextSequence(cls, name=None):
"""Return a new sequence number for insertion in self._sqlTable.
Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary
key 'john_id', sequence name blapp_john_id_seq) you must give
the full sequence name as an optional argument to _nextSequence)
"""
if not name:
name = cls._sqlSequence
if not name:
# Assume it's tablename_primarykey_seq
if len(cls._sqlPrimary) <> 1:
raise "Could not guess sequence name for multi-primary-key"
primary = cls._sqlPrimary[0]
name = '%s_%s_seq' % (cls._sqlTable, primary.replace('.','_'))
# Don't have . as a tablename or column name! =)
curs = cls.cursor()
curs.execute("SELECT nextval('%s')" % name)
value = curs.fetchone()[0]
curs.close()
return value
_nextSequence = classmethod(_nextSequence)
def _loadFromRow(self, result, fields, cursor):
"""Load from a database row, described by fields.
``fields`` should be the attribute names that
will be set. Note that userclasses will be
created (but not loaded).
"""
position = 0
for elem in fields:
value = result[position]
valueType = cursor.description[position][1]
if hasattr(self._dbModule, 'BOOLEAN') and \
valueType == self._dbModule.BOOLEAN and \
(value is not True or value is not False):
# convert to a python boolean
value = value and True or False
if value and self._userClasses.has_key(elem):
userClass = self._userClasses[elem]
# create an instance
value = userClass(value)
self._values[elem] = value
position += 1
def _loadDB(self):
"""Connect to the database to load myself"""
if not self._validID():
raise NotFound, self._getID()
(sql, fields) = self._prepareSQL("SELECT")
curs = self.cursor()
curs.execute(sql, self._getID())
result = curs.fetchone()
if not result:
curs.close()
raise NotFound, self._getID()
self._loadFromRow(result, fields, curs)
curs.close()
self._updated = time.time()
def _saveDB(self):
"""Insert or update into the database.
Note that every field will be updated, not just the changed
one.
"""
# We're a "fresh" copy now
self._updated = time.time()
if self._new:
operation = 'INSERT'
if not self._validID():
self._setID(self._nextSequence())
# Note that we assign this ID to our self
# BEFORE possibly saving any of our attribute
# objects that might be new as well. This means
# that they might have references to us, as long
# as the database does not require our existence
# yet.
#
# Since mysql does not have Sequences, this will
# not work as smoothly there. See class
# MysqlForgetter below.
else:
operation = 'UPDATE'
(sql, fields) = self._prepareSQL(operation)
values = []
for field in fields:
value = getattr(self, field)
# First some dirty datatype hacks
if DateTime and type(value) == DateTime.DateTimeType:
# stupid psycopg does not support it's own return type..
# lovely..
value = str(value)
if DateTime and type(value) == DateTime.DateTimeDeltaType:
# Format delta as days, hours, minutes seconds
# NOTE: includes value.second directly to get the
# whole floating number
value = value.strftime("%d %H:%M:") + str(value.second)
if value is True or value is False:
# We must store booleans as 't' and 'f' ...
value = value and 't' or 'f'
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Can't reference multiple-primary-key: %s" % value
values.append(value)
cursor = self.cursor()
cursor.execute(sql, values)
# cursor.commit()
cursor.close()
self._new = False
self._changed = None
getAll = classmethod(getAll)
def getAllIterator(cls, where=None, buffer=100,
useObject=None, orderBy=None):
"""Retrieve every object as an iterator.
Possibly limitted by the where list of clauses that will be
AND-ed.
Since an iterator is returned, only ``buffer`` rows are loaded
from the database at once. This is useful if you need
to process all objects.
If useObject is given, this object is returned each time, but
with new data. This can be used to avoid creating many new
objects when only one object is needed each time.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
fetchedAt = time.time()
curs.execute(sql)
# We might start eating memory at this point
def getNext(rows=[]):
forgetter = cls
if not rows:
rows += curs.fetchmany(buffer)
if not rows:
curs.close()
return None
row = rows[0]
del rows[0]
try:
idPositions = [fields.index(key) for key in cls._sqlPrimary]
except ValueError:
raise "Bad sqlPrimary, should be a list or tuple: %s" % cls._sqlPrimary
ids = [row[pos] for pos in idPositions]
if useObject:
result = useObject
result.reset()
result._setID(ids)
else:
result = forgetter(*ids)
result._loadFromRow(row, fields, curs)
result._updated = fetchedAt
return result
return iter(getNext, None)
getAllIterator = classmethod(getAllIterator)
def getAllIDs(cls, where=None, orderBy=None):
"""Retrive all the IDs, possibly matching the where clauses.
Where should be some list of where clauses that will be joined
with AND). Note that the result might be tuples if this table
has a multivalue _sqlPrimary.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where,
cls._sqlPrimary, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
result.append((ids))
return result
getAllIDs = classmethod(getAllIDs)
def getAllText(cls, where=None, SEPERATOR=' ', orderBy=None):
"""Retrieve a list of of all possible instances of this class.
The list is composed of tuples in the format (id, description) -
where description is a string composed by the fields from
cls._shortView, joint with SEPERATOR.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
shortPos = [fields.index(short) for short in cls._shortView]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
text = SEPERATOR.join([str(row[pos]) for pos in shortPos])
result.append((ids, text))
return result
getAllText = classmethod(getAllText)
def getChildren(self, forgetter, field=None, where=None, orderBy=None):
"""Return the children that links to me.
That means that I have to be listed in their _userClasses
somehow. If field is specified, that field in my children is
used as the pointer to me. Use this if you have multiple fields
referring to my class.
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAll(whereList, orderBy=orderBy)
def getChildrenIterator(self, forgetter, field=None, where=None,
orderBy=None, useObject=None):
"""Like getChildren, except that it returns an
iterator, like getAllIterator. An iterator should
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAllIterator(whereList, useObject=useObject,
orderBy=orderBy)
def __repr__(self):
return self.__class__.__name__ + ' %s' % self._getID()
def __str__(self):
shortView = self._shortView or self._sqlPrimary
short = [str(getattr(self, short)) for short in shortView]
text = ', '.join(short)
# return repr(self) + ': ' + text
return text
def __eq__(self, obj):
"""Simple comparsion of objects."""
return self.__class__.__name__ == obj.__class__.__name__ \
and self._getID() == obj._getID()
|
stain/forgetSQL | lib/forgetSQL.py | Forgetter.getAllIterator | python | def getAllIterator(cls, where=None, buffer=100,
useObject=None, orderBy=None):
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
fetchedAt = time.time()
curs.execute(sql)
# We might start eating memory at this point
def getNext(rows=[]):
forgetter = cls
if not rows:
rows += curs.fetchmany(buffer)
if not rows:
curs.close()
return None
row = rows[0]
del rows[0]
try:
idPositions = [fields.index(key) for key in cls._sqlPrimary]
except ValueError:
raise "Bad sqlPrimary, should be a list or tuple: %s" % cls._sqlPrimary
ids = [row[pos] for pos in idPositions]
if useObject:
result = useObject
result.reset()
result._setID(ids)
else:
result = forgetter(*ids)
result._loadFromRow(row, fields, curs)
result._updated = fetchedAt
return result
return iter(getNext, None) | Retrieve every object as an iterator.
Possibly limitted by the where list of clauses that will be
AND-ed.
Since an iterator is returned, only ``buffer`` rows are loaded
from the database at once. This is useful if you need
to process all objects.
If useObject is given, this object is returned each time, but
with new data. This can be used to avoid creating many new
objects when only one object is needed each time. | train | https://github.com/stain/forgetSQL/blob/2e13f983020b121fd75a95fcafce3ea75573fb6b/lib/forgetSQL.py#L751-L797 | [
"def cursor(cls):\n try:\n import database\n return database.cursor()\n except:\n raise \"cursor method undefined, no database connection could be made\"\n",
" def _prepareSQL(cls, operation=\"SELECT\", where=None, selectfields=None, orderBy=None):\n \"\"\"Return a sql for the given operation.\n\n Possible operations:\n SELECT read data for this id\n SELECTALL read data for all ids\n INSERT insert data, create new id\n UPDATE update data for this id\n DELETE remove data for this id\n\n SQL will be built by data from _sqlFields, and will\n contain 0 or several %s for you to sprintf-format in later:\n\n SELECT --> len(cls._sqlPrimary)\n SELECTALL --> 0 %s\n INSERT --> len(cls._sqlFields) %s (including id)\n UPDATE --> len(cls._sqlFields) %s (including id)\n DELETE --> len(cls._sqlPrimary)\n\n (Note: INSERT and UPDATE will only change values in _sqlTable, so\n the actual number of fields for substitutions might be lower\n than len(cls._sqlFields) )\n\n For INSERT you should use cls._nextSequence() to retrieve\n a new 'id' number. Note that if your sequences are not named\n tablename_primarykey_seq (ie. for table 'blapp' with primary key\n 'john_id', sequence name blapp_john_id_seq) you must give the sequence\n name as an optional argument to _nextSequence)\n\n Additional note: cls._nextSequence() MUST be overloaded\n for multi _sqlPrimary classes. Return a tuple.\n\n Return values will always be tuples:\n SELECT --> (sql, fields)\n SELECTALL -> sql, fields)\n INSERT -> (sql, fields)\n UPDATE -> (sql, fields)\n DELETE -> (sql,) -- for consistency\n\n fields will be object properties as a list, ie. the keys from\n cls._sqlFields. The purpose of this list is to give the programmer\n an idea of which order the keys are inserted in the SQL, giving\n help for retreiving (SELECT, SELECTALL) or inserting for %s\n (INSERT, DELETE).\n\n Why? Well, the keys are stored in a hash, and we cannot be sure\n about the order of hash.keys() from time to time, not even with\n the same instance.\n\n Optional where-parameter applies to SELECT, SELECTALL and DELETE.\n where should be a list or string of where clauses.\n\n \"\"\"\n # Normalize parameter for later comparissions\n operation = operation.upper()\n # Convert where to a list if it is a string\n if type(where) in (types.StringType, types.UnicodeType):\n where = (where,)\n if orderBy is None:\n orderBy = cls._orderBy\n\n if operation in ('SELECT', 'SELECTALL'):\n # Get the object fields and sql fields in the same\n # order to be able to reconstruct later.\n fields = []\n sqlfields = []\n for (field, sqlfield) in cls._sqlFields.items():\n if selectfields is None or field in selectfields:\n fields.append(field)\n sqlfields.append(sqlfield)\n if not fields:\n # dirrrrrty!\n raise \"\"\"ERROR: No fields defined, cannot create SQL.\nMaybe sqlPrimary is invalid?\nFields asked: %s\nMy fields: %s\"\"\" % (selectfields, cls._sqlFields)\n\n sql = \"SELECT\\n \"\n sql += ', '.join(sqlfields)\n sql += \"\\nFROM\\n \"\n tables = cls._tables.keys()\n if not tables:\n raise \"REALITY ERROR: No tables defined\"\n sql += ', '.join(tables)\n tempWhere = [\"%s=%s\" % linkPair for linkPair in cls._sqlLinks]\n # this MUST be here.\n if operation <> 'SELECTALL':\n for key in cls._sqlPrimary:\n tempWhere.append(cls._sqlFields[key] + \"=%s\")\n if where:\n tempWhere += where\n if(tempWhere):\n # Make sure to use paranteses in case someone has used\n # ORs in the WHERE-list..\n sql += \"\\nWHERE\\n (\"\n sql += ') AND\\n ('.join(tempWhere)\n sql += ')'\n if operation == 'SELECTALL' and orderBy:\n sql += '\\nORDER BY\\n '\n if type(orderBy) in (types.TupleType, types.ListType):\n orderBy = [cls._sqlFields[x] for x in orderBy]\n orderBy = ',\\n '.join(orderBy)\n else:\n orderBy = cls._sqlFields[orderBy]\n sql += orderBy\n return (sql, fields)\n\n elif operation in ('INSERT', 'UPDATE'):\n if operation == 'UPDATE':\n sql = 'UPDATE %s SET\\n ' % cls._sqlTable\n else:\n sql = 'INSERT INTO %s (\\n ' % cls._sqlTable\n\n set = []\n fields = []\n sqlfields = []\n for (field, sqlfield) in cls._sqlFields.items():\n if operation == 'UPDATE' and field in cls._sqlPrimary:\n continue\n if sqlfield.find(cls._sqlTable + '.') == 0:\n # It's a local field, chop of the table part\n sqlfield = sqlfield[len(cls._sqlTable)+1:]\n fields.append(field)\n sqlfields.append(sqlfield)\n set.append(sqlfield + '=%s')\n if operation == 'UPDATE':\n sql += ',\\n '.join(set)\n sql += '\\nWHERE\\n '\n tempWhere = []\n for key in cls._sqlPrimary:\n tempWhere.append(cls._sqlFields[key] + \"=%s\")\n fields.append(key)\n sql += ' AND\\n '.join(tempWhere)\n else:\n sql += ',\\n '.join(sqlfields)\n sql += ')\\nVALUES (\\n '\n sql += ',\\n '.join(('%s',) * len(sqlfields))\n sql += ')'\n\n return (sql, fields)\n\n elif operation == 'DELETE':\n sql = 'DELETE FROM ' + cls._sqlTable + ' WHERE '\n if where:\n sql += \" AND\\n \".join(where)\n else:\n for key in cls._sqlPrimary:\n tempWhere = []\n for key in cls._sqlPrimary:\n tempWhere.append(cls._sqlFields[key] + \"=%s\")\n sql += ' AND\\n '.join(tempWhere)\n return (sql, )\n else:\n raise \"Unknown operation\", operation\n"
] | class Forgetter(object):
"""SQL to object database wrapper.
Given a welldefined database, by subclassing Forgetter
and supplying some attributes, you may wrap your SQL tables
into objects that are easier to program with.
You must define all fields in the database table that you want
to expose, and you may refine the names to suit your
object oriented programming style. (ie. customerID -> customer)
Objects will be created without loading from database,
loading will occur when you try to read or write some of the
attributes defined as a SQL field. If you change some attributes the
object will be saved to the database by save() or garbage
collection. (be aware that GC in Py >= 2.2 is not immediate)
If you want to create new objects, just supply them with blank
ID-fields, and _nextSequence() will be called to fetch a new
ID used for insertion.
The rule is one class pr. table, although it is possible
to join several table into one class, as long as the
identificator is unique.
By defining _userClasses you can resolve links to other
tables, a field in this table would be an id in another
table, ie. another class. In practical use this means that
behind attributes pointing to other classes (tables)
you will find instances of that class.
Short example usage of forgetter objects::
# Process all
for user in User.getAllIterator():
# Access attributes
print user.name
print "Employed at:"
# Access the Employed-class/table
print user.employed.name, user.employed.address
# fire him, setting employed reference to SQL NULL
user.employed = None
# Retrieve some ID
shop = Shop(552)
shop.name = 'Corrected name'
shop.save() # Save now instead of waiting for garbage collactor
# Include SQL where-statements in selections
myIDs = User.getAllIDs(("name='soiland'", 'salary > 5'))
Requirements:
The attributes 'cursor' and '_dbModule' should be set from the
outside. The cursor should be DB 2.0 complient, preferably with
autocommit turned on. (Transactions are not within the scope of this
module yet)
Python 2.2 (iterators, methodclasses)
"""
# How long to keep objects in cache?
_timeout = 60
# Will be True once prepare() is called
_prepared = False
# The default table containing our fields
# _sqlTable = 'shop'
_sqlTable = ''
# A mapping between our fields and the database fields.
#
# You must include all fields needed here. You may specify
# other names if you want to make the sql name more approriate
# for object oriented programming. (Like calling a field 'location'
# instead of 'location_id', because we wrap the location in a seperate
# object and don't really care about the id)
#
# You may reference to other tables with a dot, all
# other db fields will be related to _sqlTable.
# If you reference other tables, don't forget to
# modify _sqlLinks.
#
# _sqlFields = {
# 'id': 'shop_id',
# 'name': 'name',
# 'location': 'location_id',
# 'chain': 'shop_chain_id',
# 'address': 'address.address_id',
# }
_sqlFields = {}
# A list of attribute names (in the object, not database)
# that are the primary key in the database. Normally
# 'id' is sufficient. It is legal to have
# multiple fields as primary key, but it won't work
# properly with _userClasses and getChildren().
#
# If your table is a link table or something, ALL fields
# should be in _sqlPrimary. (all fields are needed to define
# a unique row to be deleted/updated)
_sqlPrimary = ('id',)
# When using several tables, you should include a
# 'link' statement, displaying which fields link the
# two tables together. Note that these are sql names.
# _sqlLinks = (
# ('shop_id', 'address.shop_id'),
# )
_sqlLinks = ()
# The name of the sequence used by _nextSequence
# - if None, a guess will be made based on _sqlTable
# and _sqlPrimary.
_sqlSequence = None
# Order by this attribute by default, if specified
# _orderBy = 'name' - this could also be a tuple
_orderBy = None
# _userClasses can be used to trigger creation of a field
# with an instance of the class. The given database field
# will be sent to the constructor as an objectID
# (ie. as self.id in this object) (ie. the class does not
# neccessary need to be a subclass of Forgetter)
#
# This means that the attribute will be an instance of that
# class, not the ID. The object will not be loaded from the
# database until you try to read any of it's attributes,
# though. (to prevent unneccessary database overload and
# recursions)
#
# Notice that _userClasses must be a name resolvable, ie.
# from the same module as your other classes.
# _userClasses = {
# 'location': 'Location',
# 'chain': 'Chain',
# 'address': 'Address',
# }
_userClasses = {}
# If you want userClasses to work properly with strings instead of
# instances, you must also 'prepare' your classes to resolve the
# names. This must be done from the same module you are defining the
# classes: forgetSQL.prepareClasses(locals())
# A list of fields that are suitable for a textual
# representation (typical a one liner).
#
# Fields will be joint together with spaces or
# simular.
# _shortView = ('name')
_shortView = ()
# Description for the fields (ie. labels)
# Note that these fields will be translated with the _ function.
# If a field is undescribe, a capitalized version of the field name
# will be presented.
#_descriptions = {
# 'name': 'Full name',
# 'description': 'Description of thingie',
#}
_descriptions = {}
def cursor(cls):
try:
import database
return database.cursor()
except:
raise "cursor method undefined, no database connection could be made"
cursor = classmethod(cursor)
# a reference to the database module object used, ie.
# MySQLdb, psycopg etc.
# Use MyClass._dbModule = MySQLdb - not "MySQLdb"
#
_dbModule = None
# By default, autosave change when object is garbage collected.
# NOTE: This could give weird side affects as you need to keep track
# on when objects are garbage collected. Instead, use .save()
# explicitely when you want to save. By default, for backward
# compatibility, autosave is on.
_autosave = True
def __new__(cls, *args):
if not hasattr(cls, '_cache'):
cls._cache = {}
try: # to implement 'goto' in Python.. UGH
if not cls._cache.has_key(args):
# unknown
raise "NotFound"
(ref, updated) = cls._cache[args]
realObject = ref()
if realObject is None:
# No more real references to it, dead object
raise "NotFound"
age = time.time() - updated
if age > cls._timeout:
# Too old!
raise "NotFound"
updated = time.time()
except "NotFound":
# We'll need to create it
realObject = object.__new__(cls, *args)
ref = weakref.ref(realObject)
updated = time.time()
# store a weak reference
cls._cache[args] = (ref, updated)
return realObject
def __init__(self, *id):
"""Initialize, possibly with a database id.
A forgetter with multivalue primary key (ie. _sqlPrimary more
than 1 in length), may be initalized by using several parameters
to this constructor. Note that the object will not be loaded
before you call load().
"""
self._values = {}
self.reset()
if not id:
self._resetID()
else:
self._setID(id)
def _setID(self, id):
"""Set the ID, ie. the values for primary keys.
id can be either a list, following the
_sqlPrimary, or some other type, that will be set
as the singleton ID (requires 1-length sqlPrimary).
"""
if type(id) in (types.ListType, types.TupleType):
try:
for key in self._sqlPrimary:
value = id[0]
self.__dict__[key] = value
id = id[1:] # rest, go revursive
except IndexError:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
elif len(self._sqlPrimary) <= 1:
# It's a simple value
key = self._sqlPrimary[0]
self.__dict__[key] = id
else:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
self._new = False
def _getID(self):
"""Get the ID values as a tuple annotated by sqlPrimary"""
id = []
for key in self._sqlPrimary:
value = self.__dict__[key]
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Part %s of %s primary key is a reference to %s, with multiple-primary-key %s " % (key, self.__class__, value.__class__, value)
id.append(value)
return id
def _resetID(self):
"""Reset all ID fields."""
# Dirty.. .=))
self._setID((None,) * len(self._sqlPrimary))
self._new = True
def _validID(self):
"""Is all ID fields with values, ie. not None?"""
return not None in self._getID()
def __getattr__(self, key):
"""Get an attribute, normally a SQL field value.
Will be called when an unknown key is to be
retrieved, ie. most likely one of our database
fields.
"""
if self._sqlFields.has_key(key):
if not self._updated:
self.load()
return self._values[key]
else:
raise AttributeError, key
def __setattr__(self, key, value):
"""Set an attribute, normally a SQL field value.
Will be called whenever something needs to be set, so
we store the value as a SQL-thingie unless the key
is not listed in sqlFields.
"""
if key not in self._sqlPrimary and self._sqlFields.has_key(key):
if not self._updated:
self.load()
self._values[key] = value
self._changed = time.time()
else:
# It's a normal thingie
self.__dict__[key] = value
def __del__(self):
"""Save the object on deletion.
Be aware of this. If you want to undo some change, use reset()
first.
Be aware of Python 2.2's garbage collector, that
might run in the background. This means that
unless you call save() changes might not
be done immediately in the database.
Not calling save() also means that you cannot catch
errors caused by wrong insertion/update (ie. wrong
datatype for a field)
"""
if not self._autosave:
return
try:
self.save()
except Exception, e:
pass
def _checkTable(cls, field):
"""Split a field from _sqlFields into table, column.
Registers the table in cls._tables, and returns a fully
qualified table.column (default table: cls._sqlTable)
"""
# Get table part
try:
(table, field) = field.split('.')
except ValueError:
table = cls._sqlTable
# clean away white space
table = table.strip()
field = field.strip()
# register table
cls._tables[table] = None
# and return in proper shape
return table + '.' + field
_checkTable = classmethod(_checkTable)
def reset(self):
"""Reset all fields, almost like creating a new object.
Note: Forgets changes you have made not saved to database!
(Remember: Others might reference the object already, expecting
something else!) Override this method if you add properties not
defined in _sqlFields.
"""
self._resetID()
self._new = None
self._updated = None
self._changed = None
self._values = {}
# initially create fields
for field in self._sqlFields.keys():
self._values[field] = None
def load(self, id=None):
"""Load from database. Old values will be discarded."""
if id is not None:
# We are asked to change our ID to something else
self.reset()
self._setID(id)
if not self._new and self._validID():
self._loadDB()
self._updated = time.time()
def save(self):
"""Save to database if anything has changed since last load"""
if ( self._new or
(self._validID() and self._changed) or
(self._updated and self._changed > self._updated) ):
# Don't save if we have not loaded existing data!
self._saveDB()
return True
return False
def delete(self):
"""Mark this object for deletion in the database.
The object will then be reset and ready for use
again with a new id.
"""
(sql, ) = self._prepareSQL("DELETE")
curs = self.cursor()
curs.execute(sql, self._getID())
curs.close()
self.reset()
def _prepareSQL(cls, operation="SELECT", where=None, selectfields=None, orderBy=None):
"""Return a sql for the given operation.
Possible operations:
SELECT read data for this id
SELECTALL read data for all ids
INSERT insert data, create new id
UPDATE update data for this id
DELETE remove data for this id
SQL will be built by data from _sqlFields, and will
contain 0 or several %s for you to sprintf-format in later:
SELECT --> len(cls._sqlPrimary)
SELECTALL --> 0 %s
INSERT --> len(cls._sqlFields) %s (including id)
UPDATE --> len(cls._sqlFields) %s (including id)
DELETE --> len(cls._sqlPrimary)
(Note: INSERT and UPDATE will only change values in _sqlTable, so
the actual number of fields for substitutions might be lower
than len(cls._sqlFields) )
For INSERT you should use cls._nextSequence() to retrieve
a new 'id' number. Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary key
'john_id', sequence name blapp_john_id_seq) you must give the sequence
name as an optional argument to _nextSequence)
Additional note: cls._nextSequence() MUST be overloaded
for multi _sqlPrimary classes. Return a tuple.
Return values will always be tuples:
SELECT --> (sql, fields)
SELECTALL -> sql, fields)
INSERT -> (sql, fields)
UPDATE -> (sql, fields)
DELETE -> (sql,) -- for consistency
fields will be object properties as a list, ie. the keys from
cls._sqlFields. The purpose of this list is to give the programmer
an idea of which order the keys are inserted in the SQL, giving
help for retreiving (SELECT, SELECTALL) or inserting for %s
(INSERT, DELETE).
Why? Well, the keys are stored in a hash, and we cannot be sure
about the order of hash.keys() from time to time, not even with
the same instance.
Optional where-parameter applies to SELECT, SELECTALL and DELETE.
where should be a list or string of where clauses.
"""
# Normalize parameter for later comparissions
operation = operation.upper()
# Convert where to a list if it is a string
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if orderBy is None:
orderBy = cls._orderBy
if operation in ('SELECT', 'SELECTALL'):
# Get the object fields and sql fields in the same
# order to be able to reconstruct later.
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if selectfields is None or field in selectfields:
fields.append(field)
sqlfields.append(sqlfield)
if not fields:
# dirrrrrty!
raise """ERROR: No fields defined, cannot create SQL.
Maybe sqlPrimary is invalid?
Fields asked: %s
My fields: %s""" % (selectfields, cls._sqlFields)
sql = "SELECT\n "
sql += ', '.join(sqlfields)
sql += "\nFROM\n "
tables = cls._tables.keys()
if not tables:
raise "REALITY ERROR: No tables defined"
sql += ', '.join(tables)
tempWhere = ["%s=%s" % linkPair for linkPair in cls._sqlLinks]
# this MUST be here.
if operation <> 'SELECTALL':
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
if where:
tempWhere += where
if(tempWhere):
# Make sure to use paranteses in case someone has used
# ORs in the WHERE-list..
sql += "\nWHERE\n ("
sql += ') AND\n ('.join(tempWhere)
sql += ')'
if operation == 'SELECTALL' and orderBy:
sql += '\nORDER BY\n '
if type(orderBy) in (types.TupleType, types.ListType):
orderBy = [cls._sqlFields[x] for x in orderBy]
orderBy = ',\n '.join(orderBy)
else:
orderBy = cls._sqlFields[orderBy]
sql += orderBy
return (sql, fields)
elif operation in ('INSERT', 'UPDATE'):
if operation == 'UPDATE':
sql = 'UPDATE %s SET\n ' % cls._sqlTable
else:
sql = 'INSERT INTO %s (\n ' % cls._sqlTable
set = []
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if operation == 'UPDATE' and field in cls._sqlPrimary:
continue
if sqlfield.find(cls._sqlTable + '.') == 0:
# It's a local field, chop of the table part
sqlfield = sqlfield[len(cls._sqlTable)+1:]
fields.append(field)
sqlfields.append(sqlfield)
set.append(sqlfield + '=%s')
if operation == 'UPDATE':
sql += ',\n '.join(set)
sql += '\nWHERE\n '
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
fields.append(key)
sql += ' AND\n '.join(tempWhere)
else:
sql += ',\n '.join(sqlfields)
sql += ')\nVALUES (\n '
sql += ',\n '.join(('%s',) * len(sqlfields))
sql += ')'
return (sql, fields)
elif operation == 'DELETE':
sql = 'DELETE FROM ' + cls._sqlTable + ' WHERE '
if where:
sql += " AND\n ".join(where)
else:
for key in cls._sqlPrimary:
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
sql += ' AND\n '.join(tempWhere)
return (sql, )
else:
raise "Unknown operation", operation
_prepareSQL = classmethod(_prepareSQL)
def _nextSequence(cls, name=None):
"""Return a new sequence number for insertion in self._sqlTable.
Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary
key 'john_id', sequence name blapp_john_id_seq) you must give
the full sequence name as an optional argument to _nextSequence)
"""
if not name:
name = cls._sqlSequence
if not name:
# Assume it's tablename_primarykey_seq
if len(cls._sqlPrimary) <> 1:
raise "Could not guess sequence name for multi-primary-key"
primary = cls._sqlPrimary[0]
name = '%s_%s_seq' % (cls._sqlTable, primary.replace('.','_'))
# Don't have . as a tablename or column name! =)
curs = cls.cursor()
curs.execute("SELECT nextval('%s')" % name)
value = curs.fetchone()[0]
curs.close()
return value
_nextSequence = classmethod(_nextSequence)
def _loadFromRow(self, result, fields, cursor):
"""Load from a database row, described by fields.
``fields`` should be the attribute names that
will be set. Note that userclasses will be
created (but not loaded).
"""
position = 0
for elem in fields:
value = result[position]
valueType = cursor.description[position][1]
if hasattr(self._dbModule, 'BOOLEAN') and \
valueType == self._dbModule.BOOLEAN and \
(value is not True or value is not False):
# convert to a python boolean
value = value and True or False
if value and self._userClasses.has_key(elem):
userClass = self._userClasses[elem]
# create an instance
value = userClass(value)
self._values[elem] = value
position += 1
def _loadDB(self):
"""Connect to the database to load myself"""
if not self._validID():
raise NotFound, self._getID()
(sql, fields) = self._prepareSQL("SELECT")
curs = self.cursor()
curs.execute(sql, self._getID())
result = curs.fetchone()
if not result:
curs.close()
raise NotFound, self._getID()
self._loadFromRow(result, fields, curs)
curs.close()
self._updated = time.time()
def _saveDB(self):
"""Insert or update into the database.
Note that every field will be updated, not just the changed
one.
"""
# We're a "fresh" copy now
self._updated = time.time()
if self._new:
operation = 'INSERT'
if not self._validID():
self._setID(self._nextSequence())
# Note that we assign this ID to our self
# BEFORE possibly saving any of our attribute
# objects that might be new as well. This means
# that they might have references to us, as long
# as the database does not require our existence
# yet.
#
# Since mysql does not have Sequences, this will
# not work as smoothly there. See class
# MysqlForgetter below.
else:
operation = 'UPDATE'
(sql, fields) = self._prepareSQL(operation)
values = []
for field in fields:
value = getattr(self, field)
# First some dirty datatype hacks
if DateTime and type(value) == DateTime.DateTimeType:
# stupid psycopg does not support it's own return type..
# lovely..
value = str(value)
if DateTime and type(value) == DateTime.DateTimeDeltaType:
# Format delta as days, hours, minutes seconds
# NOTE: includes value.second directly to get the
# whole floating number
value = value.strftime("%d %H:%M:") + str(value.second)
if value is True or value is False:
# We must store booleans as 't' and 'f' ...
value = value and 't' or 'f'
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Can't reference multiple-primary-key: %s" % value
values.append(value)
cursor = self.cursor()
cursor.execute(sql, values)
# cursor.commit()
cursor.close()
self._new = False
self._changed = None
def getAll(cls, where=None, orderBy=None):
"""Retrieve all the objects.
If a list of ``where`` clauses are given, they will be AND-ed
and will limit the search.
This will not load everything out from the database, but will
create a large amount of objects with only the ID inserted. The
data will be loaded from the objects when needed by the regular
load()-autocall.
"""
ids = cls.getAllIDs(where, orderBy=orderBy)
# Instansiate a lot of them
if len(cls._sqlPrimary) > 1:
return [cls(*id) for id in ids]
else:
return [cls(id) for id in ids]
getAll = classmethod(getAll)
getAllIterator = classmethod(getAllIterator)
def getAllIDs(cls, where=None, orderBy=None):
"""Retrive all the IDs, possibly matching the where clauses.
Where should be some list of where clauses that will be joined
with AND). Note that the result might be tuples if this table
has a multivalue _sqlPrimary.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where,
cls._sqlPrimary, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
result.append((ids))
return result
getAllIDs = classmethod(getAllIDs)
def getAllText(cls, where=None, SEPERATOR=' ', orderBy=None):
"""Retrieve a list of of all possible instances of this class.
The list is composed of tuples in the format (id, description) -
where description is a string composed by the fields from
cls._shortView, joint with SEPERATOR.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
shortPos = [fields.index(short) for short in cls._shortView]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
text = SEPERATOR.join([str(row[pos]) for pos in shortPos])
result.append((ids, text))
return result
getAllText = classmethod(getAllText)
def getChildren(self, forgetter, field=None, where=None, orderBy=None):
"""Return the children that links to me.
That means that I have to be listed in their _userClasses
somehow. If field is specified, that field in my children is
used as the pointer to me. Use this if you have multiple fields
referring to my class.
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAll(whereList, orderBy=orderBy)
def getChildrenIterator(self, forgetter, field=None, where=None,
orderBy=None, useObject=None):
"""Like getChildren, except that it returns an
iterator, like getAllIterator. An iterator should
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAllIterator(whereList, useObject=useObject,
orderBy=orderBy)
def __repr__(self):
return self.__class__.__name__ + ' %s' % self._getID()
def __str__(self):
shortView = self._shortView or self._sqlPrimary
short = [str(getattr(self, short)) for short in shortView]
text = ', '.join(short)
# return repr(self) + ': ' + text
return text
def __eq__(self, obj):
"""Simple comparsion of objects."""
return self.__class__.__name__ == obj.__class__.__name__ \
and self._getID() == obj._getID()
|
stain/forgetSQL | lib/forgetSQL.py | Forgetter.getAllIDs | python | def getAllIDs(cls, where=None, orderBy=None):
(sql, fields) = cls._prepareSQL("SELECTALL", where,
cls._sqlPrimary, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
result.append((ids))
return result | Retrive all the IDs, possibly matching the where clauses.
Where should be some list of where clauses that will be joined
with AND). Note that the result might be tuples if this table
has a multivalue _sqlPrimary. | train | https://github.com/stain/forgetSQL/blob/2e13f983020b121fd75a95fcafce3ea75573fb6b/lib/forgetSQL.py#L801-L824 | [
"def cursor(cls):\n try:\n import database\n return database.cursor()\n except:\n raise \"cursor method undefined, no database connection could be made\"\n",
" def _prepareSQL(cls, operation=\"SELECT\", where=None, selectfields=None, orderBy=None):\n \"\"\"Return a sql for the given operation.\n\n Possible operations:\n SELECT read data for this id\n SELECTALL read data for all ids\n INSERT insert data, create new id\n UPDATE update data for this id\n DELETE remove data for this id\n\n SQL will be built by data from _sqlFields, and will\n contain 0 or several %s for you to sprintf-format in later:\n\n SELECT --> len(cls._sqlPrimary)\n SELECTALL --> 0 %s\n INSERT --> len(cls._sqlFields) %s (including id)\n UPDATE --> len(cls._sqlFields) %s (including id)\n DELETE --> len(cls._sqlPrimary)\n\n (Note: INSERT and UPDATE will only change values in _sqlTable, so\n the actual number of fields for substitutions might be lower\n than len(cls._sqlFields) )\n\n For INSERT you should use cls._nextSequence() to retrieve\n a new 'id' number. Note that if your sequences are not named\n tablename_primarykey_seq (ie. for table 'blapp' with primary key\n 'john_id', sequence name blapp_john_id_seq) you must give the sequence\n name as an optional argument to _nextSequence)\n\n Additional note: cls._nextSequence() MUST be overloaded\n for multi _sqlPrimary classes. Return a tuple.\n\n Return values will always be tuples:\n SELECT --> (sql, fields)\n SELECTALL -> sql, fields)\n INSERT -> (sql, fields)\n UPDATE -> (sql, fields)\n DELETE -> (sql,) -- for consistency\n\n fields will be object properties as a list, ie. the keys from\n cls._sqlFields. The purpose of this list is to give the programmer\n an idea of which order the keys are inserted in the SQL, giving\n help for retreiving (SELECT, SELECTALL) or inserting for %s\n (INSERT, DELETE).\n\n Why? Well, the keys are stored in a hash, and we cannot be sure\n about the order of hash.keys() from time to time, not even with\n the same instance.\n\n Optional where-parameter applies to SELECT, SELECTALL and DELETE.\n where should be a list or string of where clauses.\n\n \"\"\"\n # Normalize parameter for later comparissions\n operation = operation.upper()\n # Convert where to a list if it is a string\n if type(where) in (types.StringType, types.UnicodeType):\n where = (where,)\n if orderBy is None:\n orderBy = cls._orderBy\n\n if operation in ('SELECT', 'SELECTALL'):\n # Get the object fields and sql fields in the same\n # order to be able to reconstruct later.\n fields = []\n sqlfields = []\n for (field, sqlfield) in cls._sqlFields.items():\n if selectfields is None or field in selectfields:\n fields.append(field)\n sqlfields.append(sqlfield)\n if not fields:\n # dirrrrrty!\n raise \"\"\"ERROR: No fields defined, cannot create SQL.\nMaybe sqlPrimary is invalid?\nFields asked: %s\nMy fields: %s\"\"\" % (selectfields, cls._sqlFields)\n\n sql = \"SELECT\\n \"\n sql += ', '.join(sqlfields)\n sql += \"\\nFROM\\n \"\n tables = cls._tables.keys()\n if not tables:\n raise \"REALITY ERROR: No tables defined\"\n sql += ', '.join(tables)\n tempWhere = [\"%s=%s\" % linkPair for linkPair in cls._sqlLinks]\n # this MUST be here.\n if operation <> 'SELECTALL':\n for key in cls._sqlPrimary:\n tempWhere.append(cls._sqlFields[key] + \"=%s\")\n if where:\n tempWhere += where\n if(tempWhere):\n # Make sure to use paranteses in case someone has used\n # ORs in the WHERE-list..\n sql += \"\\nWHERE\\n (\"\n sql += ') AND\\n ('.join(tempWhere)\n sql += ')'\n if operation == 'SELECTALL' and orderBy:\n sql += '\\nORDER BY\\n '\n if type(orderBy) in (types.TupleType, types.ListType):\n orderBy = [cls._sqlFields[x] for x in orderBy]\n orderBy = ',\\n '.join(orderBy)\n else:\n orderBy = cls._sqlFields[orderBy]\n sql += orderBy\n return (sql, fields)\n\n elif operation in ('INSERT', 'UPDATE'):\n if operation == 'UPDATE':\n sql = 'UPDATE %s SET\\n ' % cls._sqlTable\n else:\n sql = 'INSERT INTO %s (\\n ' % cls._sqlTable\n\n set = []\n fields = []\n sqlfields = []\n for (field, sqlfield) in cls._sqlFields.items():\n if operation == 'UPDATE' and field in cls._sqlPrimary:\n continue\n if sqlfield.find(cls._sqlTable + '.') == 0:\n # It's a local field, chop of the table part\n sqlfield = sqlfield[len(cls._sqlTable)+1:]\n fields.append(field)\n sqlfields.append(sqlfield)\n set.append(sqlfield + '=%s')\n if operation == 'UPDATE':\n sql += ',\\n '.join(set)\n sql += '\\nWHERE\\n '\n tempWhere = []\n for key in cls._sqlPrimary:\n tempWhere.append(cls._sqlFields[key] + \"=%s\")\n fields.append(key)\n sql += ' AND\\n '.join(tempWhere)\n else:\n sql += ',\\n '.join(sqlfields)\n sql += ')\\nVALUES (\\n '\n sql += ',\\n '.join(('%s',) * len(sqlfields))\n sql += ')'\n\n return (sql, fields)\n\n elif operation == 'DELETE':\n sql = 'DELETE FROM ' + cls._sqlTable + ' WHERE '\n if where:\n sql += \" AND\\n \".join(where)\n else:\n for key in cls._sqlPrimary:\n tempWhere = []\n for key in cls._sqlPrimary:\n tempWhere.append(cls._sqlFields[key] + \"=%s\")\n sql += ' AND\\n '.join(tempWhere)\n return (sql, )\n else:\n raise \"Unknown operation\", operation\n"
] | class Forgetter(object):
"""SQL to object database wrapper.
Given a welldefined database, by subclassing Forgetter
and supplying some attributes, you may wrap your SQL tables
into objects that are easier to program with.
You must define all fields in the database table that you want
to expose, and you may refine the names to suit your
object oriented programming style. (ie. customerID -> customer)
Objects will be created without loading from database,
loading will occur when you try to read or write some of the
attributes defined as a SQL field. If you change some attributes the
object will be saved to the database by save() or garbage
collection. (be aware that GC in Py >= 2.2 is not immediate)
If you want to create new objects, just supply them with blank
ID-fields, and _nextSequence() will be called to fetch a new
ID used for insertion.
The rule is one class pr. table, although it is possible
to join several table into one class, as long as the
identificator is unique.
By defining _userClasses you can resolve links to other
tables, a field in this table would be an id in another
table, ie. another class. In practical use this means that
behind attributes pointing to other classes (tables)
you will find instances of that class.
Short example usage of forgetter objects::
# Process all
for user in User.getAllIterator():
# Access attributes
print user.name
print "Employed at:"
# Access the Employed-class/table
print user.employed.name, user.employed.address
# fire him, setting employed reference to SQL NULL
user.employed = None
# Retrieve some ID
shop = Shop(552)
shop.name = 'Corrected name'
shop.save() # Save now instead of waiting for garbage collactor
# Include SQL where-statements in selections
myIDs = User.getAllIDs(("name='soiland'", 'salary > 5'))
Requirements:
The attributes 'cursor' and '_dbModule' should be set from the
outside. The cursor should be DB 2.0 complient, preferably with
autocommit turned on. (Transactions are not within the scope of this
module yet)
Python 2.2 (iterators, methodclasses)
"""
# How long to keep objects in cache?
_timeout = 60
# Will be True once prepare() is called
_prepared = False
# The default table containing our fields
# _sqlTable = 'shop'
_sqlTable = ''
# A mapping between our fields and the database fields.
#
# You must include all fields needed here. You may specify
# other names if you want to make the sql name more approriate
# for object oriented programming. (Like calling a field 'location'
# instead of 'location_id', because we wrap the location in a seperate
# object and don't really care about the id)
#
# You may reference to other tables with a dot, all
# other db fields will be related to _sqlTable.
# If you reference other tables, don't forget to
# modify _sqlLinks.
#
# _sqlFields = {
# 'id': 'shop_id',
# 'name': 'name',
# 'location': 'location_id',
# 'chain': 'shop_chain_id',
# 'address': 'address.address_id',
# }
_sqlFields = {}
# A list of attribute names (in the object, not database)
# that are the primary key in the database. Normally
# 'id' is sufficient. It is legal to have
# multiple fields as primary key, but it won't work
# properly with _userClasses and getChildren().
#
# If your table is a link table or something, ALL fields
# should be in _sqlPrimary. (all fields are needed to define
# a unique row to be deleted/updated)
_sqlPrimary = ('id',)
# When using several tables, you should include a
# 'link' statement, displaying which fields link the
# two tables together. Note that these are sql names.
# _sqlLinks = (
# ('shop_id', 'address.shop_id'),
# )
_sqlLinks = ()
# The name of the sequence used by _nextSequence
# - if None, a guess will be made based on _sqlTable
# and _sqlPrimary.
_sqlSequence = None
# Order by this attribute by default, if specified
# _orderBy = 'name' - this could also be a tuple
_orderBy = None
# _userClasses can be used to trigger creation of a field
# with an instance of the class. The given database field
# will be sent to the constructor as an objectID
# (ie. as self.id in this object) (ie. the class does not
# neccessary need to be a subclass of Forgetter)
#
# This means that the attribute will be an instance of that
# class, not the ID. The object will not be loaded from the
# database until you try to read any of it's attributes,
# though. (to prevent unneccessary database overload and
# recursions)
#
# Notice that _userClasses must be a name resolvable, ie.
# from the same module as your other classes.
# _userClasses = {
# 'location': 'Location',
# 'chain': 'Chain',
# 'address': 'Address',
# }
_userClasses = {}
# If you want userClasses to work properly with strings instead of
# instances, you must also 'prepare' your classes to resolve the
# names. This must be done from the same module you are defining the
# classes: forgetSQL.prepareClasses(locals())
# A list of fields that are suitable for a textual
# representation (typical a one liner).
#
# Fields will be joint together with spaces or
# simular.
# _shortView = ('name')
_shortView = ()
# Description for the fields (ie. labels)
# Note that these fields will be translated with the _ function.
# If a field is undescribe, a capitalized version of the field name
# will be presented.
#_descriptions = {
# 'name': 'Full name',
# 'description': 'Description of thingie',
#}
_descriptions = {}
def cursor(cls):
try:
import database
return database.cursor()
except:
raise "cursor method undefined, no database connection could be made"
cursor = classmethod(cursor)
# a reference to the database module object used, ie.
# MySQLdb, psycopg etc.
# Use MyClass._dbModule = MySQLdb - not "MySQLdb"
#
_dbModule = None
# By default, autosave change when object is garbage collected.
# NOTE: This could give weird side affects as you need to keep track
# on when objects are garbage collected. Instead, use .save()
# explicitely when you want to save. By default, for backward
# compatibility, autosave is on.
_autosave = True
def __new__(cls, *args):
if not hasattr(cls, '_cache'):
cls._cache = {}
try: # to implement 'goto' in Python.. UGH
if not cls._cache.has_key(args):
# unknown
raise "NotFound"
(ref, updated) = cls._cache[args]
realObject = ref()
if realObject is None:
# No more real references to it, dead object
raise "NotFound"
age = time.time() - updated
if age > cls._timeout:
# Too old!
raise "NotFound"
updated = time.time()
except "NotFound":
# We'll need to create it
realObject = object.__new__(cls, *args)
ref = weakref.ref(realObject)
updated = time.time()
# store a weak reference
cls._cache[args] = (ref, updated)
return realObject
def __init__(self, *id):
"""Initialize, possibly with a database id.
A forgetter with multivalue primary key (ie. _sqlPrimary more
than 1 in length), may be initalized by using several parameters
to this constructor. Note that the object will not be loaded
before you call load().
"""
self._values = {}
self.reset()
if not id:
self._resetID()
else:
self._setID(id)
def _setID(self, id):
"""Set the ID, ie. the values for primary keys.
id can be either a list, following the
_sqlPrimary, or some other type, that will be set
as the singleton ID (requires 1-length sqlPrimary).
"""
if type(id) in (types.ListType, types.TupleType):
try:
for key in self._sqlPrimary:
value = id[0]
self.__dict__[key] = value
id = id[1:] # rest, go revursive
except IndexError:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
elif len(self._sqlPrimary) <= 1:
# It's a simple value
key = self._sqlPrimary[0]
self.__dict__[key] = id
else:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
self._new = False
def _getID(self):
"""Get the ID values as a tuple annotated by sqlPrimary"""
id = []
for key in self._sqlPrimary:
value = self.__dict__[key]
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Part %s of %s primary key is a reference to %s, with multiple-primary-key %s " % (key, self.__class__, value.__class__, value)
id.append(value)
return id
def _resetID(self):
"""Reset all ID fields."""
# Dirty.. .=))
self._setID((None,) * len(self._sqlPrimary))
self._new = True
def _validID(self):
"""Is all ID fields with values, ie. not None?"""
return not None in self._getID()
def __getattr__(self, key):
"""Get an attribute, normally a SQL field value.
Will be called when an unknown key is to be
retrieved, ie. most likely one of our database
fields.
"""
if self._sqlFields.has_key(key):
if not self._updated:
self.load()
return self._values[key]
else:
raise AttributeError, key
def __setattr__(self, key, value):
"""Set an attribute, normally a SQL field value.
Will be called whenever something needs to be set, so
we store the value as a SQL-thingie unless the key
is not listed in sqlFields.
"""
if key not in self._sqlPrimary and self._sqlFields.has_key(key):
if not self._updated:
self.load()
self._values[key] = value
self._changed = time.time()
else:
# It's a normal thingie
self.__dict__[key] = value
def __del__(self):
"""Save the object on deletion.
Be aware of this. If you want to undo some change, use reset()
first.
Be aware of Python 2.2's garbage collector, that
might run in the background. This means that
unless you call save() changes might not
be done immediately in the database.
Not calling save() also means that you cannot catch
errors caused by wrong insertion/update (ie. wrong
datatype for a field)
"""
if not self._autosave:
return
try:
self.save()
except Exception, e:
pass
def _checkTable(cls, field):
"""Split a field from _sqlFields into table, column.
Registers the table in cls._tables, and returns a fully
qualified table.column (default table: cls._sqlTable)
"""
# Get table part
try:
(table, field) = field.split('.')
except ValueError:
table = cls._sqlTable
# clean away white space
table = table.strip()
field = field.strip()
# register table
cls._tables[table] = None
# and return in proper shape
return table + '.' + field
_checkTable = classmethod(_checkTable)
def reset(self):
"""Reset all fields, almost like creating a new object.
Note: Forgets changes you have made not saved to database!
(Remember: Others might reference the object already, expecting
something else!) Override this method if you add properties not
defined in _sqlFields.
"""
self._resetID()
self._new = None
self._updated = None
self._changed = None
self._values = {}
# initially create fields
for field in self._sqlFields.keys():
self._values[field] = None
def load(self, id=None):
"""Load from database. Old values will be discarded."""
if id is not None:
# We are asked to change our ID to something else
self.reset()
self._setID(id)
if not self._new and self._validID():
self._loadDB()
self._updated = time.time()
def save(self):
"""Save to database if anything has changed since last load"""
if ( self._new or
(self._validID() and self._changed) or
(self._updated and self._changed > self._updated) ):
# Don't save if we have not loaded existing data!
self._saveDB()
return True
return False
def delete(self):
"""Mark this object for deletion in the database.
The object will then be reset and ready for use
again with a new id.
"""
(sql, ) = self._prepareSQL("DELETE")
curs = self.cursor()
curs.execute(sql, self._getID())
curs.close()
self.reset()
def _prepareSQL(cls, operation="SELECT", where=None, selectfields=None, orderBy=None):
"""Return a sql for the given operation.
Possible operations:
SELECT read data for this id
SELECTALL read data for all ids
INSERT insert data, create new id
UPDATE update data for this id
DELETE remove data for this id
SQL will be built by data from _sqlFields, and will
contain 0 or several %s for you to sprintf-format in later:
SELECT --> len(cls._sqlPrimary)
SELECTALL --> 0 %s
INSERT --> len(cls._sqlFields) %s (including id)
UPDATE --> len(cls._sqlFields) %s (including id)
DELETE --> len(cls._sqlPrimary)
(Note: INSERT and UPDATE will only change values in _sqlTable, so
the actual number of fields for substitutions might be lower
than len(cls._sqlFields) )
For INSERT you should use cls._nextSequence() to retrieve
a new 'id' number. Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary key
'john_id', sequence name blapp_john_id_seq) you must give the sequence
name as an optional argument to _nextSequence)
Additional note: cls._nextSequence() MUST be overloaded
for multi _sqlPrimary classes. Return a tuple.
Return values will always be tuples:
SELECT --> (sql, fields)
SELECTALL -> sql, fields)
INSERT -> (sql, fields)
UPDATE -> (sql, fields)
DELETE -> (sql,) -- for consistency
fields will be object properties as a list, ie. the keys from
cls._sqlFields. The purpose of this list is to give the programmer
an idea of which order the keys are inserted in the SQL, giving
help for retreiving (SELECT, SELECTALL) or inserting for %s
(INSERT, DELETE).
Why? Well, the keys are stored in a hash, and we cannot be sure
about the order of hash.keys() from time to time, not even with
the same instance.
Optional where-parameter applies to SELECT, SELECTALL and DELETE.
where should be a list or string of where clauses.
"""
# Normalize parameter for later comparissions
operation = operation.upper()
# Convert where to a list if it is a string
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if orderBy is None:
orderBy = cls._orderBy
if operation in ('SELECT', 'SELECTALL'):
# Get the object fields and sql fields in the same
# order to be able to reconstruct later.
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if selectfields is None or field in selectfields:
fields.append(field)
sqlfields.append(sqlfield)
if not fields:
# dirrrrrty!
raise """ERROR: No fields defined, cannot create SQL.
Maybe sqlPrimary is invalid?
Fields asked: %s
My fields: %s""" % (selectfields, cls._sqlFields)
sql = "SELECT\n "
sql += ', '.join(sqlfields)
sql += "\nFROM\n "
tables = cls._tables.keys()
if not tables:
raise "REALITY ERROR: No tables defined"
sql += ', '.join(tables)
tempWhere = ["%s=%s" % linkPair for linkPair in cls._sqlLinks]
# this MUST be here.
if operation <> 'SELECTALL':
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
if where:
tempWhere += where
if(tempWhere):
# Make sure to use paranteses in case someone has used
# ORs in the WHERE-list..
sql += "\nWHERE\n ("
sql += ') AND\n ('.join(tempWhere)
sql += ')'
if operation == 'SELECTALL' and orderBy:
sql += '\nORDER BY\n '
if type(orderBy) in (types.TupleType, types.ListType):
orderBy = [cls._sqlFields[x] for x in orderBy]
orderBy = ',\n '.join(orderBy)
else:
orderBy = cls._sqlFields[orderBy]
sql += orderBy
return (sql, fields)
elif operation in ('INSERT', 'UPDATE'):
if operation == 'UPDATE':
sql = 'UPDATE %s SET\n ' % cls._sqlTable
else:
sql = 'INSERT INTO %s (\n ' % cls._sqlTable
set = []
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if operation == 'UPDATE' and field in cls._sqlPrimary:
continue
if sqlfield.find(cls._sqlTable + '.') == 0:
# It's a local field, chop of the table part
sqlfield = sqlfield[len(cls._sqlTable)+1:]
fields.append(field)
sqlfields.append(sqlfield)
set.append(sqlfield + '=%s')
if operation == 'UPDATE':
sql += ',\n '.join(set)
sql += '\nWHERE\n '
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
fields.append(key)
sql += ' AND\n '.join(tempWhere)
else:
sql += ',\n '.join(sqlfields)
sql += ')\nVALUES (\n '
sql += ',\n '.join(('%s',) * len(sqlfields))
sql += ')'
return (sql, fields)
elif operation == 'DELETE':
sql = 'DELETE FROM ' + cls._sqlTable + ' WHERE '
if where:
sql += " AND\n ".join(where)
else:
for key in cls._sqlPrimary:
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
sql += ' AND\n '.join(tempWhere)
return (sql, )
else:
raise "Unknown operation", operation
_prepareSQL = classmethod(_prepareSQL)
def _nextSequence(cls, name=None):
"""Return a new sequence number for insertion in self._sqlTable.
Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary
key 'john_id', sequence name blapp_john_id_seq) you must give
the full sequence name as an optional argument to _nextSequence)
"""
if not name:
name = cls._sqlSequence
if not name:
# Assume it's tablename_primarykey_seq
if len(cls._sqlPrimary) <> 1:
raise "Could not guess sequence name for multi-primary-key"
primary = cls._sqlPrimary[0]
name = '%s_%s_seq' % (cls._sqlTable, primary.replace('.','_'))
# Don't have . as a tablename or column name! =)
curs = cls.cursor()
curs.execute("SELECT nextval('%s')" % name)
value = curs.fetchone()[0]
curs.close()
return value
_nextSequence = classmethod(_nextSequence)
def _loadFromRow(self, result, fields, cursor):
"""Load from a database row, described by fields.
``fields`` should be the attribute names that
will be set. Note that userclasses will be
created (but not loaded).
"""
position = 0
for elem in fields:
value = result[position]
valueType = cursor.description[position][1]
if hasattr(self._dbModule, 'BOOLEAN') and \
valueType == self._dbModule.BOOLEAN and \
(value is not True or value is not False):
# convert to a python boolean
value = value and True or False
if value and self._userClasses.has_key(elem):
userClass = self._userClasses[elem]
# create an instance
value = userClass(value)
self._values[elem] = value
position += 1
def _loadDB(self):
"""Connect to the database to load myself"""
if not self._validID():
raise NotFound, self._getID()
(sql, fields) = self._prepareSQL("SELECT")
curs = self.cursor()
curs.execute(sql, self._getID())
result = curs.fetchone()
if not result:
curs.close()
raise NotFound, self._getID()
self._loadFromRow(result, fields, curs)
curs.close()
self._updated = time.time()
def _saveDB(self):
"""Insert or update into the database.
Note that every field will be updated, not just the changed
one.
"""
# We're a "fresh" copy now
self._updated = time.time()
if self._new:
operation = 'INSERT'
if not self._validID():
self._setID(self._nextSequence())
# Note that we assign this ID to our self
# BEFORE possibly saving any of our attribute
# objects that might be new as well. This means
# that they might have references to us, as long
# as the database does not require our existence
# yet.
#
# Since mysql does not have Sequences, this will
# not work as smoothly there. See class
# MysqlForgetter below.
else:
operation = 'UPDATE'
(sql, fields) = self._prepareSQL(operation)
values = []
for field in fields:
value = getattr(self, field)
# First some dirty datatype hacks
if DateTime and type(value) == DateTime.DateTimeType:
# stupid psycopg does not support it's own return type..
# lovely..
value = str(value)
if DateTime and type(value) == DateTime.DateTimeDeltaType:
# Format delta as days, hours, minutes seconds
# NOTE: includes value.second directly to get the
# whole floating number
value = value.strftime("%d %H:%M:") + str(value.second)
if value is True or value is False:
# We must store booleans as 't' and 'f' ...
value = value and 't' or 'f'
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Can't reference multiple-primary-key: %s" % value
values.append(value)
cursor = self.cursor()
cursor.execute(sql, values)
# cursor.commit()
cursor.close()
self._new = False
self._changed = None
def getAll(cls, where=None, orderBy=None):
"""Retrieve all the objects.
If a list of ``where`` clauses are given, they will be AND-ed
and will limit the search.
This will not load everything out from the database, but will
create a large amount of objects with only the ID inserted. The
data will be loaded from the objects when needed by the regular
load()-autocall.
"""
ids = cls.getAllIDs(where, orderBy=orderBy)
# Instansiate a lot of them
if len(cls._sqlPrimary) > 1:
return [cls(*id) for id in ids]
else:
return [cls(id) for id in ids]
getAll = classmethod(getAll)
def getAllIterator(cls, where=None, buffer=100,
useObject=None, orderBy=None):
"""Retrieve every object as an iterator.
Possibly limitted by the where list of clauses that will be
AND-ed.
Since an iterator is returned, only ``buffer`` rows are loaded
from the database at once. This is useful if you need
to process all objects.
If useObject is given, this object is returned each time, but
with new data. This can be used to avoid creating many new
objects when only one object is needed each time.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
fetchedAt = time.time()
curs.execute(sql)
# We might start eating memory at this point
def getNext(rows=[]):
forgetter = cls
if not rows:
rows += curs.fetchmany(buffer)
if not rows:
curs.close()
return None
row = rows[0]
del rows[0]
try:
idPositions = [fields.index(key) for key in cls._sqlPrimary]
except ValueError:
raise "Bad sqlPrimary, should be a list or tuple: %s" % cls._sqlPrimary
ids = [row[pos] for pos in idPositions]
if useObject:
result = useObject
result.reset()
result._setID(ids)
else:
result = forgetter(*ids)
result._loadFromRow(row, fields, curs)
result._updated = fetchedAt
return result
return iter(getNext, None)
getAllIterator = classmethod(getAllIterator)
getAllIDs = classmethod(getAllIDs)
def getAllText(cls, where=None, SEPERATOR=' ', orderBy=None):
"""Retrieve a list of of all possible instances of this class.
The list is composed of tuples in the format (id, description) -
where description is a string composed by the fields from
cls._shortView, joint with SEPERATOR.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
shortPos = [fields.index(short) for short in cls._shortView]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
text = SEPERATOR.join([str(row[pos]) for pos in shortPos])
result.append((ids, text))
return result
getAllText = classmethod(getAllText)
def getChildren(self, forgetter, field=None, where=None, orderBy=None):
"""Return the children that links to me.
That means that I have to be listed in their _userClasses
somehow. If field is specified, that field in my children is
used as the pointer to me. Use this if you have multiple fields
referring to my class.
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAll(whereList, orderBy=orderBy)
def getChildrenIterator(self, forgetter, field=None, where=None,
orderBy=None, useObject=None):
"""Like getChildren, except that it returns an
iterator, like getAllIterator. An iterator should
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAllIterator(whereList, useObject=useObject,
orderBy=orderBy)
def __repr__(self):
return self.__class__.__name__ + ' %s' % self._getID()
def __str__(self):
shortView = self._shortView or self._sqlPrimary
short = [str(getattr(self, short)) for short in shortView]
text = ', '.join(short)
# return repr(self) + ': ' + text
return text
def __eq__(self, obj):
"""Simple comparsion of objects."""
return self.__class__.__name__ == obj.__class__.__name__ \
and self._getID() == obj._getID()
|
stain/forgetSQL | lib/forgetSQL.py | Forgetter.getAllText | python | def getAllText(cls, where=None, SEPERATOR=' ', orderBy=None):
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
shortPos = [fields.index(short) for short in cls._shortView]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
text = SEPERATOR.join([str(row[pos]) for pos in shortPos])
result.append((ids, text))
return result | Retrieve a list of of all possible instances of this class.
The list is composed of tuples in the format (id, description) -
where description is a string composed by the fields from
cls._shortView, joint with SEPERATOR. | train | https://github.com/stain/forgetSQL/blob/2e13f983020b121fd75a95fcafce3ea75573fb6b/lib/forgetSQL.py#L828-L852 | [
"def cursor(cls):\n try:\n import database\n return database.cursor()\n except:\n raise \"cursor method undefined, no database connection could be made\"\n",
" def _prepareSQL(cls, operation=\"SELECT\", where=None, selectfields=None, orderBy=None):\n \"\"\"Return a sql for the given operation.\n\n Possible operations:\n SELECT read data for this id\n SELECTALL read data for all ids\n INSERT insert data, create new id\n UPDATE update data for this id\n DELETE remove data for this id\n\n SQL will be built by data from _sqlFields, and will\n contain 0 or several %s for you to sprintf-format in later:\n\n SELECT --> len(cls._sqlPrimary)\n SELECTALL --> 0 %s\n INSERT --> len(cls._sqlFields) %s (including id)\n UPDATE --> len(cls._sqlFields) %s (including id)\n DELETE --> len(cls._sqlPrimary)\n\n (Note: INSERT and UPDATE will only change values in _sqlTable, so\n the actual number of fields for substitutions might be lower\n than len(cls._sqlFields) )\n\n For INSERT you should use cls._nextSequence() to retrieve\n a new 'id' number. Note that if your sequences are not named\n tablename_primarykey_seq (ie. for table 'blapp' with primary key\n 'john_id', sequence name blapp_john_id_seq) you must give the sequence\n name as an optional argument to _nextSequence)\n\n Additional note: cls._nextSequence() MUST be overloaded\n for multi _sqlPrimary classes. Return a tuple.\n\n Return values will always be tuples:\n SELECT --> (sql, fields)\n SELECTALL -> sql, fields)\n INSERT -> (sql, fields)\n UPDATE -> (sql, fields)\n DELETE -> (sql,) -- for consistency\n\n fields will be object properties as a list, ie. the keys from\n cls._sqlFields. The purpose of this list is to give the programmer\n an idea of which order the keys are inserted in the SQL, giving\n help for retreiving (SELECT, SELECTALL) or inserting for %s\n (INSERT, DELETE).\n\n Why? Well, the keys are stored in a hash, and we cannot be sure\n about the order of hash.keys() from time to time, not even with\n the same instance.\n\n Optional where-parameter applies to SELECT, SELECTALL and DELETE.\n where should be a list or string of where clauses.\n\n \"\"\"\n # Normalize parameter for later comparissions\n operation = operation.upper()\n # Convert where to a list if it is a string\n if type(where) in (types.StringType, types.UnicodeType):\n where = (where,)\n if orderBy is None:\n orderBy = cls._orderBy\n\n if operation in ('SELECT', 'SELECTALL'):\n # Get the object fields and sql fields in the same\n # order to be able to reconstruct later.\n fields = []\n sqlfields = []\n for (field, sqlfield) in cls._sqlFields.items():\n if selectfields is None or field in selectfields:\n fields.append(field)\n sqlfields.append(sqlfield)\n if not fields:\n # dirrrrrty!\n raise \"\"\"ERROR: No fields defined, cannot create SQL.\nMaybe sqlPrimary is invalid?\nFields asked: %s\nMy fields: %s\"\"\" % (selectfields, cls._sqlFields)\n\n sql = \"SELECT\\n \"\n sql += ', '.join(sqlfields)\n sql += \"\\nFROM\\n \"\n tables = cls._tables.keys()\n if not tables:\n raise \"REALITY ERROR: No tables defined\"\n sql += ', '.join(tables)\n tempWhere = [\"%s=%s\" % linkPair for linkPair in cls._sqlLinks]\n # this MUST be here.\n if operation <> 'SELECTALL':\n for key in cls._sqlPrimary:\n tempWhere.append(cls._sqlFields[key] + \"=%s\")\n if where:\n tempWhere += where\n if(tempWhere):\n # Make sure to use paranteses in case someone has used\n # ORs in the WHERE-list..\n sql += \"\\nWHERE\\n (\"\n sql += ') AND\\n ('.join(tempWhere)\n sql += ')'\n if operation == 'SELECTALL' and orderBy:\n sql += '\\nORDER BY\\n '\n if type(orderBy) in (types.TupleType, types.ListType):\n orderBy = [cls._sqlFields[x] for x in orderBy]\n orderBy = ',\\n '.join(orderBy)\n else:\n orderBy = cls._sqlFields[orderBy]\n sql += orderBy\n return (sql, fields)\n\n elif operation in ('INSERT', 'UPDATE'):\n if operation == 'UPDATE':\n sql = 'UPDATE %s SET\\n ' % cls._sqlTable\n else:\n sql = 'INSERT INTO %s (\\n ' % cls._sqlTable\n\n set = []\n fields = []\n sqlfields = []\n for (field, sqlfield) in cls._sqlFields.items():\n if operation == 'UPDATE' and field in cls._sqlPrimary:\n continue\n if sqlfield.find(cls._sqlTable + '.') == 0:\n # It's a local field, chop of the table part\n sqlfield = sqlfield[len(cls._sqlTable)+1:]\n fields.append(field)\n sqlfields.append(sqlfield)\n set.append(sqlfield + '=%s')\n if operation == 'UPDATE':\n sql += ',\\n '.join(set)\n sql += '\\nWHERE\\n '\n tempWhere = []\n for key in cls._sqlPrimary:\n tempWhere.append(cls._sqlFields[key] + \"=%s\")\n fields.append(key)\n sql += ' AND\\n '.join(tempWhere)\n else:\n sql += ',\\n '.join(sqlfields)\n sql += ')\\nVALUES (\\n '\n sql += ',\\n '.join(('%s',) * len(sqlfields))\n sql += ')'\n\n return (sql, fields)\n\n elif operation == 'DELETE':\n sql = 'DELETE FROM ' + cls._sqlTable + ' WHERE '\n if where:\n sql += \" AND\\n \".join(where)\n else:\n for key in cls._sqlPrimary:\n tempWhere = []\n for key in cls._sqlPrimary:\n tempWhere.append(cls._sqlFields[key] + \"=%s\")\n sql += ' AND\\n '.join(tempWhere)\n return (sql, )\n else:\n raise \"Unknown operation\", operation\n"
] | class Forgetter(object):
"""SQL to object database wrapper.
Given a welldefined database, by subclassing Forgetter
and supplying some attributes, you may wrap your SQL tables
into objects that are easier to program with.
You must define all fields in the database table that you want
to expose, and you may refine the names to suit your
object oriented programming style. (ie. customerID -> customer)
Objects will be created without loading from database,
loading will occur when you try to read or write some of the
attributes defined as a SQL field. If you change some attributes the
object will be saved to the database by save() or garbage
collection. (be aware that GC in Py >= 2.2 is not immediate)
If you want to create new objects, just supply them with blank
ID-fields, and _nextSequence() will be called to fetch a new
ID used for insertion.
The rule is one class pr. table, although it is possible
to join several table into one class, as long as the
identificator is unique.
By defining _userClasses you can resolve links to other
tables, a field in this table would be an id in another
table, ie. another class. In practical use this means that
behind attributes pointing to other classes (tables)
you will find instances of that class.
Short example usage of forgetter objects::
# Process all
for user in User.getAllIterator():
# Access attributes
print user.name
print "Employed at:"
# Access the Employed-class/table
print user.employed.name, user.employed.address
# fire him, setting employed reference to SQL NULL
user.employed = None
# Retrieve some ID
shop = Shop(552)
shop.name = 'Corrected name'
shop.save() # Save now instead of waiting for garbage collactor
# Include SQL where-statements in selections
myIDs = User.getAllIDs(("name='soiland'", 'salary > 5'))
Requirements:
The attributes 'cursor' and '_dbModule' should be set from the
outside. The cursor should be DB 2.0 complient, preferably with
autocommit turned on. (Transactions are not within the scope of this
module yet)
Python 2.2 (iterators, methodclasses)
"""
# How long to keep objects in cache?
_timeout = 60
# Will be True once prepare() is called
_prepared = False
# The default table containing our fields
# _sqlTable = 'shop'
_sqlTable = ''
# A mapping between our fields and the database fields.
#
# You must include all fields needed here. You may specify
# other names if you want to make the sql name more approriate
# for object oriented programming. (Like calling a field 'location'
# instead of 'location_id', because we wrap the location in a seperate
# object and don't really care about the id)
#
# You may reference to other tables with a dot, all
# other db fields will be related to _sqlTable.
# If you reference other tables, don't forget to
# modify _sqlLinks.
#
# _sqlFields = {
# 'id': 'shop_id',
# 'name': 'name',
# 'location': 'location_id',
# 'chain': 'shop_chain_id',
# 'address': 'address.address_id',
# }
_sqlFields = {}
# A list of attribute names (in the object, not database)
# that are the primary key in the database. Normally
# 'id' is sufficient. It is legal to have
# multiple fields as primary key, but it won't work
# properly with _userClasses and getChildren().
#
# If your table is a link table or something, ALL fields
# should be in _sqlPrimary. (all fields are needed to define
# a unique row to be deleted/updated)
_sqlPrimary = ('id',)
# When using several tables, you should include a
# 'link' statement, displaying which fields link the
# two tables together. Note that these are sql names.
# _sqlLinks = (
# ('shop_id', 'address.shop_id'),
# )
_sqlLinks = ()
# The name of the sequence used by _nextSequence
# - if None, a guess will be made based on _sqlTable
# and _sqlPrimary.
_sqlSequence = None
# Order by this attribute by default, if specified
# _orderBy = 'name' - this could also be a tuple
_orderBy = None
# _userClasses can be used to trigger creation of a field
# with an instance of the class. The given database field
# will be sent to the constructor as an objectID
# (ie. as self.id in this object) (ie. the class does not
# neccessary need to be a subclass of Forgetter)
#
# This means that the attribute will be an instance of that
# class, not the ID. The object will not be loaded from the
# database until you try to read any of it's attributes,
# though. (to prevent unneccessary database overload and
# recursions)
#
# Notice that _userClasses must be a name resolvable, ie.
# from the same module as your other classes.
# _userClasses = {
# 'location': 'Location',
# 'chain': 'Chain',
# 'address': 'Address',
# }
_userClasses = {}
# If you want userClasses to work properly with strings instead of
# instances, you must also 'prepare' your classes to resolve the
# names. This must be done from the same module you are defining the
# classes: forgetSQL.prepareClasses(locals())
# A list of fields that are suitable for a textual
# representation (typical a one liner).
#
# Fields will be joint together with spaces or
# simular.
# _shortView = ('name')
_shortView = ()
# Description for the fields (ie. labels)
# Note that these fields will be translated with the _ function.
# If a field is undescribe, a capitalized version of the field name
# will be presented.
#_descriptions = {
# 'name': 'Full name',
# 'description': 'Description of thingie',
#}
_descriptions = {}
def cursor(cls):
try:
import database
return database.cursor()
except:
raise "cursor method undefined, no database connection could be made"
cursor = classmethod(cursor)
# a reference to the database module object used, ie.
# MySQLdb, psycopg etc.
# Use MyClass._dbModule = MySQLdb - not "MySQLdb"
#
_dbModule = None
# By default, autosave change when object is garbage collected.
# NOTE: This could give weird side affects as you need to keep track
# on when objects are garbage collected. Instead, use .save()
# explicitely when you want to save. By default, for backward
# compatibility, autosave is on.
_autosave = True
def __new__(cls, *args):
if not hasattr(cls, '_cache'):
cls._cache = {}
try: # to implement 'goto' in Python.. UGH
if not cls._cache.has_key(args):
# unknown
raise "NotFound"
(ref, updated) = cls._cache[args]
realObject = ref()
if realObject is None:
# No more real references to it, dead object
raise "NotFound"
age = time.time() - updated
if age > cls._timeout:
# Too old!
raise "NotFound"
updated = time.time()
except "NotFound":
# We'll need to create it
realObject = object.__new__(cls, *args)
ref = weakref.ref(realObject)
updated = time.time()
# store a weak reference
cls._cache[args] = (ref, updated)
return realObject
def __init__(self, *id):
"""Initialize, possibly with a database id.
A forgetter with multivalue primary key (ie. _sqlPrimary more
than 1 in length), may be initalized by using several parameters
to this constructor. Note that the object will not be loaded
before you call load().
"""
self._values = {}
self.reset()
if not id:
self._resetID()
else:
self._setID(id)
def _setID(self, id):
"""Set the ID, ie. the values for primary keys.
id can be either a list, following the
_sqlPrimary, or some other type, that will be set
as the singleton ID (requires 1-length sqlPrimary).
"""
if type(id) in (types.ListType, types.TupleType):
try:
for key in self._sqlPrimary:
value = id[0]
self.__dict__[key] = value
id = id[1:] # rest, go revursive
except IndexError:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
elif len(self._sqlPrimary) <= 1:
# It's a simple value
key = self._sqlPrimary[0]
self.__dict__[key] = id
else:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
self._new = False
def _getID(self):
"""Get the ID values as a tuple annotated by sqlPrimary"""
id = []
for key in self._sqlPrimary:
value = self.__dict__[key]
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Part %s of %s primary key is a reference to %s, with multiple-primary-key %s " % (key, self.__class__, value.__class__, value)
id.append(value)
return id
def _resetID(self):
"""Reset all ID fields."""
# Dirty.. .=))
self._setID((None,) * len(self._sqlPrimary))
self._new = True
def _validID(self):
"""Is all ID fields with values, ie. not None?"""
return not None in self._getID()
def __getattr__(self, key):
"""Get an attribute, normally a SQL field value.
Will be called when an unknown key is to be
retrieved, ie. most likely one of our database
fields.
"""
if self._sqlFields.has_key(key):
if not self._updated:
self.load()
return self._values[key]
else:
raise AttributeError, key
def __setattr__(self, key, value):
"""Set an attribute, normally a SQL field value.
Will be called whenever something needs to be set, so
we store the value as a SQL-thingie unless the key
is not listed in sqlFields.
"""
if key not in self._sqlPrimary and self._sqlFields.has_key(key):
if not self._updated:
self.load()
self._values[key] = value
self._changed = time.time()
else:
# It's a normal thingie
self.__dict__[key] = value
def __del__(self):
"""Save the object on deletion.
Be aware of this. If you want to undo some change, use reset()
first.
Be aware of Python 2.2's garbage collector, that
might run in the background. This means that
unless you call save() changes might not
be done immediately in the database.
Not calling save() also means that you cannot catch
errors caused by wrong insertion/update (ie. wrong
datatype for a field)
"""
if not self._autosave:
return
try:
self.save()
except Exception, e:
pass
def _checkTable(cls, field):
"""Split a field from _sqlFields into table, column.
Registers the table in cls._tables, and returns a fully
qualified table.column (default table: cls._sqlTable)
"""
# Get table part
try:
(table, field) = field.split('.')
except ValueError:
table = cls._sqlTable
# clean away white space
table = table.strip()
field = field.strip()
# register table
cls._tables[table] = None
# and return in proper shape
return table + '.' + field
_checkTable = classmethod(_checkTable)
def reset(self):
"""Reset all fields, almost like creating a new object.
Note: Forgets changes you have made not saved to database!
(Remember: Others might reference the object already, expecting
something else!) Override this method if you add properties not
defined in _sqlFields.
"""
self._resetID()
self._new = None
self._updated = None
self._changed = None
self._values = {}
# initially create fields
for field in self._sqlFields.keys():
self._values[field] = None
def load(self, id=None):
"""Load from database. Old values will be discarded."""
if id is not None:
# We are asked to change our ID to something else
self.reset()
self._setID(id)
if not self._new and self._validID():
self._loadDB()
self._updated = time.time()
def save(self):
"""Save to database if anything has changed since last load"""
if ( self._new or
(self._validID() and self._changed) or
(self._updated and self._changed > self._updated) ):
# Don't save if we have not loaded existing data!
self._saveDB()
return True
return False
def delete(self):
"""Mark this object for deletion in the database.
The object will then be reset and ready for use
again with a new id.
"""
(sql, ) = self._prepareSQL("DELETE")
curs = self.cursor()
curs.execute(sql, self._getID())
curs.close()
self.reset()
def _prepareSQL(cls, operation="SELECT", where=None, selectfields=None, orderBy=None):
"""Return a sql for the given operation.
Possible operations:
SELECT read data for this id
SELECTALL read data for all ids
INSERT insert data, create new id
UPDATE update data for this id
DELETE remove data for this id
SQL will be built by data from _sqlFields, and will
contain 0 or several %s for you to sprintf-format in later:
SELECT --> len(cls._sqlPrimary)
SELECTALL --> 0 %s
INSERT --> len(cls._sqlFields) %s (including id)
UPDATE --> len(cls._sqlFields) %s (including id)
DELETE --> len(cls._sqlPrimary)
(Note: INSERT and UPDATE will only change values in _sqlTable, so
the actual number of fields for substitutions might be lower
than len(cls._sqlFields) )
For INSERT you should use cls._nextSequence() to retrieve
a new 'id' number. Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary key
'john_id', sequence name blapp_john_id_seq) you must give the sequence
name as an optional argument to _nextSequence)
Additional note: cls._nextSequence() MUST be overloaded
for multi _sqlPrimary classes. Return a tuple.
Return values will always be tuples:
SELECT --> (sql, fields)
SELECTALL -> sql, fields)
INSERT -> (sql, fields)
UPDATE -> (sql, fields)
DELETE -> (sql,) -- for consistency
fields will be object properties as a list, ie. the keys from
cls._sqlFields. The purpose of this list is to give the programmer
an idea of which order the keys are inserted in the SQL, giving
help for retreiving (SELECT, SELECTALL) or inserting for %s
(INSERT, DELETE).
Why? Well, the keys are stored in a hash, and we cannot be sure
about the order of hash.keys() from time to time, not even with
the same instance.
Optional where-parameter applies to SELECT, SELECTALL and DELETE.
where should be a list or string of where clauses.
"""
# Normalize parameter for later comparissions
operation = operation.upper()
# Convert where to a list if it is a string
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if orderBy is None:
orderBy = cls._orderBy
if operation in ('SELECT', 'SELECTALL'):
# Get the object fields and sql fields in the same
# order to be able to reconstruct later.
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if selectfields is None or field in selectfields:
fields.append(field)
sqlfields.append(sqlfield)
if not fields:
# dirrrrrty!
raise """ERROR: No fields defined, cannot create SQL.
Maybe sqlPrimary is invalid?
Fields asked: %s
My fields: %s""" % (selectfields, cls._sqlFields)
sql = "SELECT\n "
sql += ', '.join(sqlfields)
sql += "\nFROM\n "
tables = cls._tables.keys()
if not tables:
raise "REALITY ERROR: No tables defined"
sql += ', '.join(tables)
tempWhere = ["%s=%s" % linkPair for linkPair in cls._sqlLinks]
# this MUST be here.
if operation <> 'SELECTALL':
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
if where:
tempWhere += where
if(tempWhere):
# Make sure to use paranteses in case someone has used
# ORs in the WHERE-list..
sql += "\nWHERE\n ("
sql += ') AND\n ('.join(tempWhere)
sql += ')'
if operation == 'SELECTALL' and orderBy:
sql += '\nORDER BY\n '
if type(orderBy) in (types.TupleType, types.ListType):
orderBy = [cls._sqlFields[x] for x in orderBy]
orderBy = ',\n '.join(orderBy)
else:
orderBy = cls._sqlFields[orderBy]
sql += orderBy
return (sql, fields)
elif operation in ('INSERT', 'UPDATE'):
if operation == 'UPDATE':
sql = 'UPDATE %s SET\n ' % cls._sqlTable
else:
sql = 'INSERT INTO %s (\n ' % cls._sqlTable
set = []
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if operation == 'UPDATE' and field in cls._sqlPrimary:
continue
if sqlfield.find(cls._sqlTable + '.') == 0:
# It's a local field, chop of the table part
sqlfield = sqlfield[len(cls._sqlTable)+1:]
fields.append(field)
sqlfields.append(sqlfield)
set.append(sqlfield + '=%s')
if operation == 'UPDATE':
sql += ',\n '.join(set)
sql += '\nWHERE\n '
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
fields.append(key)
sql += ' AND\n '.join(tempWhere)
else:
sql += ',\n '.join(sqlfields)
sql += ')\nVALUES (\n '
sql += ',\n '.join(('%s',) * len(sqlfields))
sql += ')'
return (sql, fields)
elif operation == 'DELETE':
sql = 'DELETE FROM ' + cls._sqlTable + ' WHERE '
if where:
sql += " AND\n ".join(where)
else:
for key in cls._sqlPrimary:
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
sql += ' AND\n '.join(tempWhere)
return (sql, )
else:
raise "Unknown operation", operation
_prepareSQL = classmethod(_prepareSQL)
def _nextSequence(cls, name=None):
"""Return a new sequence number for insertion in self._sqlTable.
Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary
key 'john_id', sequence name blapp_john_id_seq) you must give
the full sequence name as an optional argument to _nextSequence)
"""
if not name:
name = cls._sqlSequence
if not name:
# Assume it's tablename_primarykey_seq
if len(cls._sqlPrimary) <> 1:
raise "Could not guess sequence name for multi-primary-key"
primary = cls._sqlPrimary[0]
name = '%s_%s_seq' % (cls._sqlTable, primary.replace('.','_'))
# Don't have . as a tablename or column name! =)
curs = cls.cursor()
curs.execute("SELECT nextval('%s')" % name)
value = curs.fetchone()[0]
curs.close()
return value
_nextSequence = classmethod(_nextSequence)
def _loadFromRow(self, result, fields, cursor):
"""Load from a database row, described by fields.
``fields`` should be the attribute names that
will be set. Note that userclasses will be
created (but not loaded).
"""
position = 0
for elem in fields:
value = result[position]
valueType = cursor.description[position][1]
if hasattr(self._dbModule, 'BOOLEAN') and \
valueType == self._dbModule.BOOLEAN and \
(value is not True or value is not False):
# convert to a python boolean
value = value and True or False
if value and self._userClasses.has_key(elem):
userClass = self._userClasses[elem]
# create an instance
value = userClass(value)
self._values[elem] = value
position += 1
def _loadDB(self):
"""Connect to the database to load myself"""
if not self._validID():
raise NotFound, self._getID()
(sql, fields) = self._prepareSQL("SELECT")
curs = self.cursor()
curs.execute(sql, self._getID())
result = curs.fetchone()
if not result:
curs.close()
raise NotFound, self._getID()
self._loadFromRow(result, fields, curs)
curs.close()
self._updated = time.time()
def _saveDB(self):
"""Insert or update into the database.
Note that every field will be updated, not just the changed
one.
"""
# We're a "fresh" copy now
self._updated = time.time()
if self._new:
operation = 'INSERT'
if not self._validID():
self._setID(self._nextSequence())
# Note that we assign this ID to our self
# BEFORE possibly saving any of our attribute
# objects that might be new as well. This means
# that they might have references to us, as long
# as the database does not require our existence
# yet.
#
# Since mysql does not have Sequences, this will
# not work as smoothly there. See class
# MysqlForgetter below.
else:
operation = 'UPDATE'
(sql, fields) = self._prepareSQL(operation)
values = []
for field in fields:
value = getattr(self, field)
# First some dirty datatype hacks
if DateTime and type(value) == DateTime.DateTimeType:
# stupid psycopg does not support it's own return type..
# lovely..
value = str(value)
if DateTime and type(value) == DateTime.DateTimeDeltaType:
# Format delta as days, hours, minutes seconds
# NOTE: includes value.second directly to get the
# whole floating number
value = value.strftime("%d %H:%M:") + str(value.second)
if value is True or value is False:
# We must store booleans as 't' and 'f' ...
value = value and 't' or 'f'
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Can't reference multiple-primary-key: %s" % value
values.append(value)
cursor = self.cursor()
cursor.execute(sql, values)
# cursor.commit()
cursor.close()
self._new = False
self._changed = None
def getAll(cls, where=None, orderBy=None):
"""Retrieve all the objects.
If a list of ``where`` clauses are given, they will be AND-ed
and will limit the search.
This will not load everything out from the database, but will
create a large amount of objects with only the ID inserted. The
data will be loaded from the objects when needed by the regular
load()-autocall.
"""
ids = cls.getAllIDs(where, orderBy=orderBy)
# Instansiate a lot of them
if len(cls._sqlPrimary) > 1:
return [cls(*id) for id in ids]
else:
return [cls(id) for id in ids]
getAll = classmethod(getAll)
def getAllIterator(cls, where=None, buffer=100,
useObject=None, orderBy=None):
"""Retrieve every object as an iterator.
Possibly limitted by the where list of clauses that will be
AND-ed.
Since an iterator is returned, only ``buffer`` rows are loaded
from the database at once. This is useful if you need
to process all objects.
If useObject is given, this object is returned each time, but
with new data. This can be used to avoid creating many new
objects when only one object is needed each time.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
fetchedAt = time.time()
curs.execute(sql)
# We might start eating memory at this point
def getNext(rows=[]):
forgetter = cls
if not rows:
rows += curs.fetchmany(buffer)
if not rows:
curs.close()
return None
row = rows[0]
del rows[0]
try:
idPositions = [fields.index(key) for key in cls._sqlPrimary]
except ValueError:
raise "Bad sqlPrimary, should be a list or tuple: %s" % cls._sqlPrimary
ids = [row[pos] for pos in idPositions]
if useObject:
result = useObject
result.reset()
result._setID(ids)
else:
result = forgetter(*ids)
result._loadFromRow(row, fields, curs)
result._updated = fetchedAt
return result
return iter(getNext, None)
getAllIterator = classmethod(getAllIterator)
def getAllIDs(cls, where=None, orderBy=None):
"""Retrive all the IDs, possibly matching the where clauses.
Where should be some list of where clauses that will be joined
with AND). Note that the result might be tuples if this table
has a multivalue _sqlPrimary.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where,
cls._sqlPrimary, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
result.append((ids))
return result
getAllIDs = classmethod(getAllIDs)
getAllText = classmethod(getAllText)
def getChildren(self, forgetter, field=None, where=None, orderBy=None):
"""Return the children that links to me.
That means that I have to be listed in their _userClasses
somehow. If field is specified, that field in my children is
used as the pointer to me. Use this if you have multiple fields
referring to my class.
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAll(whereList, orderBy=orderBy)
def getChildrenIterator(self, forgetter, field=None, where=None,
orderBy=None, useObject=None):
"""Like getChildren, except that it returns an
iterator, like getAllIterator. An iterator should
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAllIterator(whereList, useObject=useObject,
orderBy=orderBy)
def __repr__(self):
return self.__class__.__name__ + ' %s' % self._getID()
def __str__(self):
shortView = self._shortView or self._sqlPrimary
short = [str(getattr(self, short)) for short in shortView]
text = ', '.join(short)
# return repr(self) + ': ' + text
return text
def __eq__(self, obj):
"""Simple comparsion of objects."""
return self.__class__.__name__ == obj.__class__.__name__ \
and self._getID() == obj._getID()
|
stain/forgetSQL | lib/forgetSQL.py | Forgetter.getChildren | python | def getChildren(self, forgetter, field=None, where=None, orderBy=None):
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAll(whereList, orderBy=orderBy) | Return the children that links to me.
That means that I have to be listed in their _userClasses
somehow. If field is specified, that field in my children is
used as the pointer to me. Use this if you have multiple fields
referring to my class. | train | https://github.com/stain/forgetSQL/blob/2e13f983020b121fd75a95fcafce3ea75573fb6b/lib/forgetSQL.py#L856-L880 | [
"def _getID(self):\n \"\"\"Get the ID values as a tuple annotated by sqlPrimary\"\"\"\n id = []\n for key in self._sqlPrimary:\n value = self.__dict__[key]\n if isinstance(value, Forgetter):\n # It's another object, we store only the ID\n if value._new:\n # It's a new object too, it must be saved!\n value.save()\n try:\n (value,) = value._getID()\n except:\n raise \"Unsupported: Part %s of %s primary key is a reference to %s, with multiple-primary-key %s \" % (key, self.__class__, value.__class__, value)\n id.append(value)\n return id\n"
] | class Forgetter(object):
"""SQL to object database wrapper.
Given a welldefined database, by subclassing Forgetter
and supplying some attributes, you may wrap your SQL tables
into objects that are easier to program with.
You must define all fields in the database table that you want
to expose, and you may refine the names to suit your
object oriented programming style. (ie. customerID -> customer)
Objects will be created without loading from database,
loading will occur when you try to read or write some of the
attributes defined as a SQL field. If you change some attributes the
object will be saved to the database by save() or garbage
collection. (be aware that GC in Py >= 2.2 is not immediate)
If you want to create new objects, just supply them with blank
ID-fields, and _nextSequence() will be called to fetch a new
ID used for insertion.
The rule is one class pr. table, although it is possible
to join several table into one class, as long as the
identificator is unique.
By defining _userClasses you can resolve links to other
tables, a field in this table would be an id in another
table, ie. another class. In practical use this means that
behind attributes pointing to other classes (tables)
you will find instances of that class.
Short example usage of forgetter objects::
# Process all
for user in User.getAllIterator():
# Access attributes
print user.name
print "Employed at:"
# Access the Employed-class/table
print user.employed.name, user.employed.address
# fire him, setting employed reference to SQL NULL
user.employed = None
# Retrieve some ID
shop = Shop(552)
shop.name = 'Corrected name'
shop.save() # Save now instead of waiting for garbage collactor
# Include SQL where-statements in selections
myIDs = User.getAllIDs(("name='soiland'", 'salary > 5'))
Requirements:
The attributes 'cursor' and '_dbModule' should be set from the
outside. The cursor should be DB 2.0 complient, preferably with
autocommit turned on. (Transactions are not within the scope of this
module yet)
Python 2.2 (iterators, methodclasses)
"""
# How long to keep objects in cache?
_timeout = 60
# Will be True once prepare() is called
_prepared = False
# The default table containing our fields
# _sqlTable = 'shop'
_sqlTable = ''
# A mapping between our fields and the database fields.
#
# You must include all fields needed here. You may specify
# other names if you want to make the sql name more approriate
# for object oriented programming. (Like calling a field 'location'
# instead of 'location_id', because we wrap the location in a seperate
# object and don't really care about the id)
#
# You may reference to other tables with a dot, all
# other db fields will be related to _sqlTable.
# If you reference other tables, don't forget to
# modify _sqlLinks.
#
# _sqlFields = {
# 'id': 'shop_id',
# 'name': 'name',
# 'location': 'location_id',
# 'chain': 'shop_chain_id',
# 'address': 'address.address_id',
# }
_sqlFields = {}
# A list of attribute names (in the object, not database)
# that are the primary key in the database. Normally
# 'id' is sufficient. It is legal to have
# multiple fields as primary key, but it won't work
# properly with _userClasses and getChildren().
#
# If your table is a link table or something, ALL fields
# should be in _sqlPrimary. (all fields are needed to define
# a unique row to be deleted/updated)
_sqlPrimary = ('id',)
# When using several tables, you should include a
# 'link' statement, displaying which fields link the
# two tables together. Note that these are sql names.
# _sqlLinks = (
# ('shop_id', 'address.shop_id'),
# )
_sqlLinks = ()
# The name of the sequence used by _nextSequence
# - if None, a guess will be made based on _sqlTable
# and _sqlPrimary.
_sqlSequence = None
# Order by this attribute by default, if specified
# _orderBy = 'name' - this could also be a tuple
_orderBy = None
# _userClasses can be used to trigger creation of a field
# with an instance of the class. The given database field
# will be sent to the constructor as an objectID
# (ie. as self.id in this object) (ie. the class does not
# neccessary need to be a subclass of Forgetter)
#
# This means that the attribute will be an instance of that
# class, not the ID. The object will not be loaded from the
# database until you try to read any of it's attributes,
# though. (to prevent unneccessary database overload and
# recursions)
#
# Notice that _userClasses must be a name resolvable, ie.
# from the same module as your other classes.
# _userClasses = {
# 'location': 'Location',
# 'chain': 'Chain',
# 'address': 'Address',
# }
_userClasses = {}
# If you want userClasses to work properly with strings instead of
# instances, you must also 'prepare' your classes to resolve the
# names. This must be done from the same module you are defining the
# classes: forgetSQL.prepareClasses(locals())
# A list of fields that are suitable for a textual
# representation (typical a one liner).
#
# Fields will be joint together with spaces or
# simular.
# _shortView = ('name')
_shortView = ()
# Description for the fields (ie. labels)
# Note that these fields will be translated with the _ function.
# If a field is undescribe, a capitalized version of the field name
# will be presented.
#_descriptions = {
# 'name': 'Full name',
# 'description': 'Description of thingie',
#}
_descriptions = {}
def cursor(cls):
try:
import database
return database.cursor()
except:
raise "cursor method undefined, no database connection could be made"
cursor = classmethod(cursor)
# a reference to the database module object used, ie.
# MySQLdb, psycopg etc.
# Use MyClass._dbModule = MySQLdb - not "MySQLdb"
#
_dbModule = None
# By default, autosave change when object is garbage collected.
# NOTE: This could give weird side affects as you need to keep track
# on when objects are garbage collected. Instead, use .save()
# explicitely when you want to save. By default, for backward
# compatibility, autosave is on.
_autosave = True
def __new__(cls, *args):
if not hasattr(cls, '_cache'):
cls._cache = {}
try: # to implement 'goto' in Python.. UGH
if not cls._cache.has_key(args):
# unknown
raise "NotFound"
(ref, updated) = cls._cache[args]
realObject = ref()
if realObject is None:
# No more real references to it, dead object
raise "NotFound"
age = time.time() - updated
if age > cls._timeout:
# Too old!
raise "NotFound"
updated = time.time()
except "NotFound":
# We'll need to create it
realObject = object.__new__(cls, *args)
ref = weakref.ref(realObject)
updated = time.time()
# store a weak reference
cls._cache[args] = (ref, updated)
return realObject
def __init__(self, *id):
"""Initialize, possibly with a database id.
A forgetter with multivalue primary key (ie. _sqlPrimary more
than 1 in length), may be initalized by using several parameters
to this constructor. Note that the object will not be loaded
before you call load().
"""
self._values = {}
self.reset()
if not id:
self._resetID()
else:
self._setID(id)
def _setID(self, id):
"""Set the ID, ie. the values for primary keys.
id can be either a list, following the
_sqlPrimary, or some other type, that will be set
as the singleton ID (requires 1-length sqlPrimary).
"""
if type(id) in (types.ListType, types.TupleType):
try:
for key in self._sqlPrimary:
value = id[0]
self.__dict__[key] = value
id = id[1:] # rest, go revursive
except IndexError:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
elif len(self._sqlPrimary) <= 1:
# It's a simple value
key = self._sqlPrimary[0]
self.__dict__[key] = id
else:
raise 'Not enough id fields, required: %s' % len(self._sqlPrimary)
self._new = False
def _getID(self):
"""Get the ID values as a tuple annotated by sqlPrimary"""
id = []
for key in self._sqlPrimary:
value = self.__dict__[key]
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Part %s of %s primary key is a reference to %s, with multiple-primary-key %s " % (key, self.__class__, value.__class__, value)
id.append(value)
return id
def _resetID(self):
"""Reset all ID fields."""
# Dirty.. .=))
self._setID((None,) * len(self._sqlPrimary))
self._new = True
def _validID(self):
"""Is all ID fields with values, ie. not None?"""
return not None in self._getID()
def __getattr__(self, key):
"""Get an attribute, normally a SQL field value.
Will be called when an unknown key is to be
retrieved, ie. most likely one of our database
fields.
"""
if self._sqlFields.has_key(key):
if not self._updated:
self.load()
return self._values[key]
else:
raise AttributeError, key
def __setattr__(self, key, value):
"""Set an attribute, normally a SQL field value.
Will be called whenever something needs to be set, so
we store the value as a SQL-thingie unless the key
is not listed in sqlFields.
"""
if key not in self._sqlPrimary and self._sqlFields.has_key(key):
if not self._updated:
self.load()
self._values[key] = value
self._changed = time.time()
else:
# It's a normal thingie
self.__dict__[key] = value
def __del__(self):
"""Save the object on deletion.
Be aware of this. If you want to undo some change, use reset()
first.
Be aware of Python 2.2's garbage collector, that
might run in the background. This means that
unless you call save() changes might not
be done immediately in the database.
Not calling save() also means that you cannot catch
errors caused by wrong insertion/update (ie. wrong
datatype for a field)
"""
if not self._autosave:
return
try:
self.save()
except Exception, e:
pass
def _checkTable(cls, field):
"""Split a field from _sqlFields into table, column.
Registers the table in cls._tables, and returns a fully
qualified table.column (default table: cls._sqlTable)
"""
# Get table part
try:
(table, field) = field.split('.')
except ValueError:
table = cls._sqlTable
# clean away white space
table = table.strip()
field = field.strip()
# register table
cls._tables[table] = None
# and return in proper shape
return table + '.' + field
_checkTable = classmethod(_checkTable)
def reset(self):
"""Reset all fields, almost like creating a new object.
Note: Forgets changes you have made not saved to database!
(Remember: Others might reference the object already, expecting
something else!) Override this method if you add properties not
defined in _sqlFields.
"""
self._resetID()
self._new = None
self._updated = None
self._changed = None
self._values = {}
# initially create fields
for field in self._sqlFields.keys():
self._values[field] = None
def load(self, id=None):
"""Load from database. Old values will be discarded."""
if id is not None:
# We are asked to change our ID to something else
self.reset()
self._setID(id)
if not self._new and self._validID():
self._loadDB()
self._updated = time.time()
def save(self):
"""Save to database if anything has changed since last load"""
if ( self._new or
(self._validID() and self._changed) or
(self._updated and self._changed > self._updated) ):
# Don't save if we have not loaded existing data!
self._saveDB()
return True
return False
def delete(self):
"""Mark this object for deletion in the database.
The object will then be reset and ready for use
again with a new id.
"""
(sql, ) = self._prepareSQL("DELETE")
curs = self.cursor()
curs.execute(sql, self._getID())
curs.close()
self.reset()
def _prepareSQL(cls, operation="SELECT", where=None, selectfields=None, orderBy=None):
"""Return a sql for the given operation.
Possible operations:
SELECT read data for this id
SELECTALL read data for all ids
INSERT insert data, create new id
UPDATE update data for this id
DELETE remove data for this id
SQL will be built by data from _sqlFields, and will
contain 0 or several %s for you to sprintf-format in later:
SELECT --> len(cls._sqlPrimary)
SELECTALL --> 0 %s
INSERT --> len(cls._sqlFields) %s (including id)
UPDATE --> len(cls._sqlFields) %s (including id)
DELETE --> len(cls._sqlPrimary)
(Note: INSERT and UPDATE will only change values in _sqlTable, so
the actual number of fields for substitutions might be lower
than len(cls._sqlFields) )
For INSERT you should use cls._nextSequence() to retrieve
a new 'id' number. Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary key
'john_id', sequence name blapp_john_id_seq) you must give the sequence
name as an optional argument to _nextSequence)
Additional note: cls._nextSequence() MUST be overloaded
for multi _sqlPrimary classes. Return a tuple.
Return values will always be tuples:
SELECT --> (sql, fields)
SELECTALL -> sql, fields)
INSERT -> (sql, fields)
UPDATE -> (sql, fields)
DELETE -> (sql,) -- for consistency
fields will be object properties as a list, ie. the keys from
cls._sqlFields. The purpose of this list is to give the programmer
an idea of which order the keys are inserted in the SQL, giving
help for retreiving (SELECT, SELECTALL) or inserting for %s
(INSERT, DELETE).
Why? Well, the keys are stored in a hash, and we cannot be sure
about the order of hash.keys() from time to time, not even with
the same instance.
Optional where-parameter applies to SELECT, SELECTALL and DELETE.
where should be a list or string of where clauses.
"""
# Normalize parameter for later comparissions
operation = operation.upper()
# Convert where to a list if it is a string
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if orderBy is None:
orderBy = cls._orderBy
if operation in ('SELECT', 'SELECTALL'):
# Get the object fields and sql fields in the same
# order to be able to reconstruct later.
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if selectfields is None or field in selectfields:
fields.append(field)
sqlfields.append(sqlfield)
if not fields:
# dirrrrrty!
raise """ERROR: No fields defined, cannot create SQL.
Maybe sqlPrimary is invalid?
Fields asked: %s
My fields: %s""" % (selectfields, cls._sqlFields)
sql = "SELECT\n "
sql += ', '.join(sqlfields)
sql += "\nFROM\n "
tables = cls._tables.keys()
if not tables:
raise "REALITY ERROR: No tables defined"
sql += ', '.join(tables)
tempWhere = ["%s=%s" % linkPair for linkPair in cls._sqlLinks]
# this MUST be here.
if operation <> 'SELECTALL':
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
if where:
tempWhere += where
if(tempWhere):
# Make sure to use paranteses in case someone has used
# ORs in the WHERE-list..
sql += "\nWHERE\n ("
sql += ') AND\n ('.join(tempWhere)
sql += ')'
if operation == 'SELECTALL' and orderBy:
sql += '\nORDER BY\n '
if type(orderBy) in (types.TupleType, types.ListType):
orderBy = [cls._sqlFields[x] for x in orderBy]
orderBy = ',\n '.join(orderBy)
else:
orderBy = cls._sqlFields[orderBy]
sql += orderBy
return (sql, fields)
elif operation in ('INSERT', 'UPDATE'):
if operation == 'UPDATE':
sql = 'UPDATE %s SET\n ' % cls._sqlTable
else:
sql = 'INSERT INTO %s (\n ' % cls._sqlTable
set = []
fields = []
sqlfields = []
for (field, sqlfield) in cls._sqlFields.items():
if operation == 'UPDATE' and field in cls._sqlPrimary:
continue
if sqlfield.find(cls._sqlTable + '.') == 0:
# It's a local field, chop of the table part
sqlfield = sqlfield[len(cls._sqlTable)+1:]
fields.append(field)
sqlfields.append(sqlfield)
set.append(sqlfield + '=%s')
if operation == 'UPDATE':
sql += ',\n '.join(set)
sql += '\nWHERE\n '
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
fields.append(key)
sql += ' AND\n '.join(tempWhere)
else:
sql += ',\n '.join(sqlfields)
sql += ')\nVALUES (\n '
sql += ',\n '.join(('%s',) * len(sqlfields))
sql += ')'
return (sql, fields)
elif operation == 'DELETE':
sql = 'DELETE FROM ' + cls._sqlTable + ' WHERE '
if where:
sql += " AND\n ".join(where)
else:
for key in cls._sqlPrimary:
tempWhere = []
for key in cls._sqlPrimary:
tempWhere.append(cls._sqlFields[key] + "=%s")
sql += ' AND\n '.join(tempWhere)
return (sql, )
else:
raise "Unknown operation", operation
_prepareSQL = classmethod(_prepareSQL)
def _nextSequence(cls, name=None):
"""Return a new sequence number for insertion in self._sqlTable.
Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary
key 'john_id', sequence name blapp_john_id_seq) you must give
the full sequence name as an optional argument to _nextSequence)
"""
if not name:
name = cls._sqlSequence
if not name:
# Assume it's tablename_primarykey_seq
if len(cls._sqlPrimary) <> 1:
raise "Could not guess sequence name for multi-primary-key"
primary = cls._sqlPrimary[0]
name = '%s_%s_seq' % (cls._sqlTable, primary.replace('.','_'))
# Don't have . as a tablename or column name! =)
curs = cls.cursor()
curs.execute("SELECT nextval('%s')" % name)
value = curs.fetchone()[0]
curs.close()
return value
_nextSequence = classmethod(_nextSequence)
def _loadFromRow(self, result, fields, cursor):
"""Load from a database row, described by fields.
``fields`` should be the attribute names that
will be set. Note that userclasses will be
created (but not loaded).
"""
position = 0
for elem in fields:
value = result[position]
valueType = cursor.description[position][1]
if hasattr(self._dbModule, 'BOOLEAN') and \
valueType == self._dbModule.BOOLEAN and \
(value is not True or value is not False):
# convert to a python boolean
value = value and True or False
if value and self._userClasses.has_key(elem):
userClass = self._userClasses[elem]
# create an instance
value = userClass(value)
self._values[elem] = value
position += 1
def _loadDB(self):
"""Connect to the database to load myself"""
if not self._validID():
raise NotFound, self._getID()
(sql, fields) = self._prepareSQL("SELECT")
curs = self.cursor()
curs.execute(sql, self._getID())
result = curs.fetchone()
if not result:
curs.close()
raise NotFound, self._getID()
self._loadFromRow(result, fields, curs)
curs.close()
self._updated = time.time()
def _saveDB(self):
"""Insert or update into the database.
Note that every field will be updated, not just the changed
one.
"""
# We're a "fresh" copy now
self._updated = time.time()
if self._new:
operation = 'INSERT'
if not self._validID():
self._setID(self._nextSequence())
# Note that we assign this ID to our self
# BEFORE possibly saving any of our attribute
# objects that might be new as well. This means
# that they might have references to us, as long
# as the database does not require our existence
# yet.
#
# Since mysql does not have Sequences, this will
# not work as smoothly there. See class
# MysqlForgetter below.
else:
operation = 'UPDATE'
(sql, fields) = self._prepareSQL(operation)
values = []
for field in fields:
value = getattr(self, field)
# First some dirty datatype hacks
if DateTime and type(value) == DateTime.DateTimeType:
# stupid psycopg does not support it's own return type..
# lovely..
value = str(value)
if DateTime and type(value) == DateTime.DateTimeDeltaType:
# Format delta as days, hours, minutes seconds
# NOTE: includes value.second directly to get the
# whole floating number
value = value.strftime("%d %H:%M:") + str(value.second)
if value is True or value is False:
# We must store booleans as 't' and 'f' ...
value = value and 't' or 'f'
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Unsupported: Can't reference multiple-primary-key: %s" % value
values.append(value)
cursor = self.cursor()
cursor.execute(sql, values)
# cursor.commit()
cursor.close()
self._new = False
self._changed = None
def getAll(cls, where=None, orderBy=None):
"""Retrieve all the objects.
If a list of ``where`` clauses are given, they will be AND-ed
and will limit the search.
This will not load everything out from the database, but will
create a large amount of objects with only the ID inserted. The
data will be loaded from the objects when needed by the regular
load()-autocall.
"""
ids = cls.getAllIDs(where, orderBy=orderBy)
# Instansiate a lot of them
if len(cls._sqlPrimary) > 1:
return [cls(*id) for id in ids]
else:
return [cls(id) for id in ids]
getAll = classmethod(getAll)
def getAllIterator(cls, where=None, buffer=100,
useObject=None, orderBy=None):
"""Retrieve every object as an iterator.
Possibly limitted by the where list of clauses that will be
AND-ed.
Since an iterator is returned, only ``buffer`` rows are loaded
from the database at once. This is useful if you need
to process all objects.
If useObject is given, this object is returned each time, but
with new data. This can be used to avoid creating many new
objects when only one object is needed each time.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
fetchedAt = time.time()
curs.execute(sql)
# We might start eating memory at this point
def getNext(rows=[]):
forgetter = cls
if not rows:
rows += curs.fetchmany(buffer)
if not rows:
curs.close()
return None
row = rows[0]
del rows[0]
try:
idPositions = [fields.index(key) for key in cls._sqlPrimary]
except ValueError:
raise "Bad sqlPrimary, should be a list or tuple: %s" % cls._sqlPrimary
ids = [row[pos] for pos in idPositions]
if useObject:
result = useObject
result.reset()
result._setID(ids)
else:
result = forgetter(*ids)
result._loadFromRow(row, fields, curs)
result._updated = fetchedAt
return result
return iter(getNext, None)
getAllIterator = classmethod(getAllIterator)
def getAllIDs(cls, where=None, orderBy=None):
"""Retrive all the IDs, possibly matching the where clauses.
Where should be some list of where clauses that will be joined
with AND). Note that the result might be tuples if this table
has a multivalue _sqlPrimary.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where,
cls._sqlPrimary, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
result.append((ids))
return result
getAllIDs = classmethod(getAllIDs)
def getAllText(cls, where=None, SEPERATOR=' ', orderBy=None):
"""Retrieve a list of of all possible instances of this class.
The list is composed of tuples in the format (id, description) -
where description is a string composed by the fields from
cls._shortView, joint with SEPERATOR.
"""
(sql, fields) = cls._prepareSQL("SELECTALL", where, orderBy=orderBy)
curs = cls.cursor()
curs.execute(sql)
# We might start eating memory at this point
rows = curs.fetchall()
curs.close()
result = []
idPositions = [fields.index(key) for key in cls._sqlPrimary]
shortPos = [fields.index(short) for short in cls._shortView]
for row in rows:
ids = [row[pos] for pos in idPositions]
if len(idPositions) > 1:
ids = tuple(ids)
else:
ids = ids[0]
text = SEPERATOR.join([str(row[pos]) for pos in shortPos])
result.append((ids, text))
return result
getAllText = classmethod(getAllText)
def getChildrenIterator(self, forgetter, field=None, where=None,
orderBy=None, useObject=None):
"""Like getChildren, except that it returns an
iterator, like getAllIterator. An iterator should
"""
if type(where) in (types.StringType, types.UnicodeType):
where = (where,)
if not field:
for (i_field, i_class) in forgetter._userClasses.items():
if isinstance(self, i_class):
field = i_field
break # first one found is ok :=)
if not field:
raise "No field found, check forgetter's _userClasses"
sqlname = forgetter._sqlFields[field]
myID = self._getID()[0] # assuming single-primary !
whereList = ["%s='%s'" % (sqlname, myID)]
if where:
whereList.extend(where)
return forgetter.getAllIterator(whereList, useObject=useObject,
orderBy=orderBy)
def __repr__(self):
return self.__class__.__name__ + ' %s' % self._getID()
def __str__(self):
shortView = self._shortView or self._sqlPrimary
short = [str(getattr(self, short)) for short in shortView]
text = ', '.join(short)
# return repr(self) + ': ' + text
return text
def __eq__(self, obj):
"""Simple comparsion of objects."""
return self.__class__.__name__ == obj.__class__.__name__ \
and self._getID() == obj._getID()
|
stain/forgetSQL | lib/forgetSQL.py | MysqlForgetter._saveDB | python | def _saveDB(self):
# We're a "fresh" copy now
self._updated = time.time()
if self._new:
operation = 'INSERT'
else:
operation = 'UPDATE'
(sql, fields) = self._prepareSQL(operation)
values = []
for field in fields:
value = getattr(self, field)
if isinstance(value, Forgetter):
# It's another object, we store only the ID
if value._new:
# It's a new object too, it must be saved!
value.save()
try:
(value,) = value._getID()
except:
raise "Can't reference multiple-primary-key: %s" % value
values.append(value)
cursor = self.cursor()
cursor.execute(sql, values)
# cursor.commit()
if not self._validID():
if not len(self._getID()) == 1:
raise "Can't retrieve auto-inserted ID for multiple-primary-key"
# Here's the mysql magic to get the new ID
self._setID(cursor.insert_id())
cursor.close()
self._new = False | Overloaded - we don't have nextval() in mysql | train | https://github.com/stain/forgetSQL/blob/2e13f983020b121fd75a95fcafce3ea75573fb6b/lib/forgetSQL.py#L925-L957 | null | class MysqlForgetter(Forgetter):
"""MySQL-compatible Forgetter"""
|
schapman1974/tinymongo | setup.py | parse_md_to_rst | python | def parse_md_to_rst(file):
try:
from m2r import parse_from_file
return parse_from_file(file).replace(
"artwork/", "http://198.27.119.65/"
)
except ImportError:
# m2r may not be installed in user environment
return read(file) | Read Markdown file and convert to ReStructured Text. | train | https://github.com/schapman1974/tinymongo/blob/993048059dc0aa789d879b69feb79a0f237a60b3/setup.py#L14-L23 | [
"def read(*names, **kwargs):\n \"\"\"Read a file.\"\"\"\n return io.open(\n os.path.join(os.path.dirname(__file__), *names),\n encoding=kwargs.get('encoding', 'utf8')\n ).read()\n"
] | import io
import os
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
def read(*names, **kwargs):
"""Read a file."""
return io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
class PyTest(TestCommand):
"""PyTest cmdclass hook for test-at-buildtime functionality
http://doc.pytest.org/en/latest/goodpractices.html#manual-integration
"""
user_options = [('pytest-args=', 'a', "Arguments to pass to pytest")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = [
'tests/',
'-rx'
] #load defaults here
def run_tests(self):
import shlex
#import here, cause outside the eggs aren't loaded
import pytest
pytest_commands = []
try: #read commandline
pytest_commands = shlex.split(self.pytest_args)
except AttributeError: #use defaults
pytest_commands = self.pytest_args
errno = pytest.main(pytest_commands)
exit(errno)
setup(
name='tinymongo',
packages=find_packages(),
version='0.2.1',
description='A flat file drop in replacement for mongodb. Requires Tinydb',
author='Stephen Chapman, Jason Jones',
author_email='schapman1974@gmail.com',
url='https://github.com/schapman1974/tinymongo',
download_url='https://github.com/schapman1974/tinymongo/archive/master.zip',
keywords=['mongodb', 'drop-in', 'database', 'tinydb'],
long_description=parse_md_to_rst("README.md"),
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
install_requires=[
'tinydb>=3.2.1',
'tinydb_serialization>=1.0.4',
'pymongo>=3.4.0'
],
tests_require=[
'pytest>=3.2.0',
'py>=1.4.33'
],
cmdclass={
'test':PyTest
}
)
|
schapman1974/tinymongo | tinymongo/results.py | DeleteResult.deleted_count | python | def deleted_count(self):
if isinstance(self.raw_result, list):
return len(self.raw_result)
else:
return self.raw_result | The number of documents deleted. | train | https://github.com/schapman1974/tinymongo/blob/993048059dc0aa789d879b69feb79a0f237a60b3/tinymongo/results.py#L107-L112 | null | class DeleteResult(_WriteResult):
"""The return type for :meth:`~tinymongo.TinyMongoCollection.delete_one`
and :meth:`~tinymongo.TinyMongoCollection.delete_many`"""
__slots__ = ("__raw_result", "__acknowledged")
def __init__(self, raw_result, acknowledged=True):
self.__raw_result = raw_result
super(DeleteResult, self).__init__(acknowledged)
@property
def raw_result(self):
"""The raw result document returned by the server."""
return self.__raw_result
@property
|
schapman1974/tinymongo | tinymongo/tinymongo.py | generate_id | python | def generate_id():
# TODO: Use six.string_type to Py3 compat
try:
return unicode(uuid1()).replace(u"-", u"")
except NameError:
return str(uuid1()).replace(u"-", u"") | Generate new UUID | train | https://github.com/schapman1974/tinymongo/blob/993048059dc0aa789d879b69feb79a0f237a60b3/tinymongo/tinymongo.py#L806-L812 | null | """Acts like a Pymongo client to TinyDB"""
# coding: utf-8
from __future__ import absolute_import
import copy
from functools import reduce
import logging
import os
from math import ceil
from operator import itemgetter
from uuid import uuid1
from tinydb import Query, TinyDB, where
from .results import (
InsertOneResult,
InsertManyResult,
UpdateResult,
DeleteResult
)
from .errors import DuplicateKeyError
try:
basestring
except NameError:
basestring = str
logger = logging.getLogger(__name__)
def Q(query, key):
return reduce(lambda partial_query, field: partial_query[field], key.split('.'), query)
class TinyMongoClient(object):
"""Represents the Tiny `db` client"""
def __init__(self, foldername=u"tinydb", **kwargs):
"""Initialize container folder"""
self._foldername = foldername
try:
os.mkdir(foldername)
except OSError as x:
logger.info('{}'.format(x))
@property
def _storage(self):
"""By default return Tiny.DEFAULT_STORAGE and can be overwritten to
return custom storages and middlewares.
class CustomClient(TinyMongoClient):
@property
def _storage(self):
return CachingMiddleware(OtherMiddleware(JSONMiddleware))
This property is also useful to define Serializers using required
`tinydb-serialization` module.
from tinymongo.serializers import DateTimeSerializer
from tinydb_serialization import SerializationMiddleware
class CustomClient(TinyMongoClient):
@property
def _storage(self):
serialization = SerializationMiddleware()
serialization.register_serializer(
DateTimeSerializer(), 'TinyDate')
# register other custom serializers
return serialization
"""
return TinyDB.DEFAULT_STORAGE
def __getitem__(self, key):
"""Gets a new or existing database based in key"""
return TinyMongoDatabase(key, self._foldername, self._storage)
def close(self):
"""Do nothing"""
pass
def __getattr__(self, name):
"""Gets a new or existing database based in attribute"""
return TinyMongoDatabase(name, self._foldername, self._storage)
class TinyMongoDatabase(object):
"""Representation of a Pymongo database"""
def __init__(self, database, foldername, storage):
"""Initialize a TinyDB file named as the db name in the given folder
"""
self._foldername = foldername
self.tinydb = TinyDB(
os.path.join(foldername, database + u".json"),
storage=storage
)
def __getattr__(self, name):
"""Gets a new or existing collection"""
return TinyMongoCollection(name, self)
def __getitem__(self, name):
"""Gets a new or existing collection"""
return TinyMongoCollection(name, self)
def collection_names(self):
"""Get a list of all the collection names in this database"""
return list(self.tinydb.tables())
class TinyMongoCollection(object):
"""
This class represents a collection and all of the operations that are
commonly performed on a collection
"""
def __init__(self, table, parent=None):
"""
Initilialize the collection
:param table: the table name
:param parent: the parent db name
"""
self.tablename = table
self.table = None
self.parent = parent
def __repr__(self):
"""Return collection name"""
return self.tablename
def __getattr__(self, name):
"""
If attr is not found return self
:param name:
:return:
"""
# if self.table is None:
# self.tablename += u"." + name
if self.table is None:
self.build_table()
return self
def build_table(self):
"""
Builds a new tinydb table at the parent database
:return:
"""
self.table = self.parent.tinydb.table(self.tablename)
def count(self):
"""
Counts the documents in the collection.
:return: Integer representing the number of documents in the collection.
"""
return self.find().count()
def drop(self, **kwargs):
"""
Removes a collection from the database.
**kwargs only because of the optional "writeConcern" field, but does nothing in the TinyDB database.
:return: Returns True when successfully drops a collection. Returns False when collection to drop does not
exist.
"""
if self.table:
self.parent.tinydb.purge_table(self.tablename)
return True
else:
return False
def insert(self, docs, *args, **kwargs):
"""Backwards compatibility with insert"""
if isinstance(docs, list):
return self.insert_many(docs, *args, **kwargs)
else:
return self.insert_one(docs, *args, **kwargs)
def insert_one(self, doc, *args, **kwargs):
"""
Inserts one document into the collection
If contains '_id' key it is used, else it is generated.
:param doc: the document
:return: InsertOneResult
"""
if self.table is None:
self.build_table()
if not isinstance(doc, dict):
raise ValueError(u'"doc" must be a dict')
_id = doc[u'_id'] = doc.get('_id') or generate_id()
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is True:
# insert doc without validation of duplicated `_id`
eid = self.table.insert(doc)
else:
existing = self.find_one({'_id': _id})
if existing is None:
eid = self.table.insert(doc)
else:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
return InsertOneResult(eid=eid, inserted_id=_id)
def insert_many(self, docs, *args, **kwargs):
"""
Inserts several documents into the collection
:param docs: a list of documents
:return: InsertManyResult
"""
if self.table is None:
self.build_table()
if not isinstance(docs, list):
raise ValueError(u'"insert_many" requires a list input')
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is not True:
# get all _id in once, to reduce I/O. (without projection)
existing = [doc['_id'] for doc in self.find({})]
_ids = list()
for doc in docs:
_id = doc[u'_id'] = doc.get('_id') or generate_id()
if bypass_document_validation is not True:
if _id in existing:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
existing.append(_id)
_ids.append(_id)
results = self.table.insert_multiple(docs)
return InsertManyResult(
eids=[eid for eid in results],
inserted_ids=[inserted_id for inserted_id in _ids]
)
def parse_query(self, query):
"""
Creates a tinydb Query() object from the query dict
:param query: object containing the dictionary representation of the
query
:return: composite Query()
"""
logger.debug(u'query to parse2: {}'.format(query))
# this should find all records
if query == {} or query is None:
return Query()._id != u'-1' # noqa
q = None
# find the final result of the generator
for c in self.parse_condition(query):
if q is None:
q = c
else:
q = q & c
logger.debug(u'new query item2: {}'.format(q))
return q
def parse_condition(self, query, prev_key=None, last_prev_key=None):
"""
Creates a recursive generator for parsing some types of Query()
conditions
:param query: Query object
:param prev_key: The key at the next-higher level
:return: generator object, the last of which will be the complete
Query() object containing all conditions
"""
# use this to determine gt/lt/eq on prev_query
logger.debug(u'query: {} prev_query: {}'.format(query, prev_key))
q = Query()
conditions = None
# deal with the {'name': value} case by injecting a previous key
if not prev_key:
temp_query = copy.deepcopy(query)
k, v = temp_query.popitem()
prev_key = k
# deal with the conditions
for key, value in query.items():
logger.debug(u'conditions: {} {}'.format(key, value))
if key == u'$gte':
conditions = (
Q(q, prev_key) >= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) >= value)) if prev_key != "$not" \
else (q[last_prev_key] < value)
elif key == u'$gt':
conditions = (
Q(q, prev_key) > value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) > value)) if prev_key != "$not" \
else (q[last_prev_key] <= value)
elif key == u'$lte':
conditions = (
Q(q, prev_key) <= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) <= value)) if prev_key != "$not" \
else (q[last_prev_key] > value)
elif key == u'$lt':
conditions = (
Q(q, prev_key) < value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) < value)) if prev_key != "$not" \
else (q[last_prev_key] >= value)
elif key == u'$ne':
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value))if prev_key != "$not" \
else (q[last_prev_key] == value)
elif key == u'$not':
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value)) \
if prev_key != "$not" else (q[last_prev_key] >= value)
else:
# let the value's condition be parsed below
pass
elif key == u'$regex':
value = value.replace('\\\\\\', '|||')
value = value.replace('\\\\', '|||')
regex = value.replace('\\', '')
regex = regex.replace('|||', '\\')
currCond = (where(prev_key).matches(regex))
conditions = currCond if not conditions else (conditions & currCond)
elif key in ['$and', '$or', '$in', '$all']:
pass
else:
# don't want to use the previous key if this is a secondary key
# (fixes multiple item query that includes $ codes)
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
(Q(q, key) == value) | (Q(q, key).any([value]))
) if not conditions else (conditions & ((Q(q, key) == value) | (Q(q, key).any([value]))))
prev_key = key
logger.debug(u'c: {}'.format(conditions))
if isinstance(value, dict):
# yield from self.parse_condition(value, key)
for parse_condition in self.parse_condition(value, key, prev_key):
yield parse_condition
elif isinstance(value, list):
if key == '$and':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions & parse_condition
)
yield grouped_conditions
elif key == '$or':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$in':
# use `any` to find with list, before comparing to single string
grouped_conditions = Q(q, prev_key).any(value)
for val in value:
for parse_condition in self.parse_condition({prev_key : val}):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$all':
yield Q(q, prev_key).all(value)
else:
yield Q(q, prev_key).any([value])
else:
yield conditions
def update(self, query, doc, *args, **kwargs):
"""BAckwards compatibility with update"""
if isinstance(doc, list):
return [
self.update_one(query, item, *args, **kwargs)
for item in doc
]
else:
return self.update_one(query, doc, *args, **kwargs)
def update_one(self, query, doc):
"""
Updates one element of the collection
:param query: dictionary representing the mongo query
:param doc: dictionary representing the item to be updated
:return: UpdateResult
"""
if self.table is None:
self.build_table()
if u"$set" in doc:
doc = doc[u"$set"]
allcond = self.parse_query(query)
try:
result = self.table.update(doc, allcond)
except:
# TODO: check table.update result
# check what pymongo does in that case
result = None
return UpdateResult(raw_result=result)
def find(self, filter=None, sort=None, skip=None, limit=None,
*args, **kwargs):
"""
Finds all matching results
:param query: dictionary representing the mongo query
:return: cursor containing the search results
"""
if self.table is None:
self.build_table()
if filter is None:
result = self.table.all()
else:
allcond = self.parse_query(filter)
try:
result = self.table.search(allcond)
except (AttributeError, TypeError):
result = []
result = TinyMongoCursor(
result,
sort=sort,
skip=skip,
limit=limit
)
return result
def find_one(self, filter=None):
"""
Finds one matching query element
:param query: dictionary representing the mongo query
:return: the resulting document (if found)
"""
if self.table is None:
self.build_table()
allcond = self.parse_query(filter)
return self.table.get(allcond)
def remove(self, spec_or_id, multi=True, *args, **kwargs):
"""Backwards compatibility with remove"""
if multi:
return self.delete_many(spec_or_id)
return self.delete_one(spec_or_id)
def delete_one(self, query):
"""
Deletes one document from the collection
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
item = self.find_one(query)
result = self.table.remove(where(u'_id') == item[u'_id'])
return DeleteResult(raw_result=result)
def delete_many(self, query):
"""
Removes all items matching the mongo query
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
items = self.find(query)
result = [
self.table.remove(where(u'_id') == item[u'_id'])
for item in items
]
if query == {}:
# need to reset TinyDB's index for docs order consistency
self.table._last_id = 0
return DeleteResult(raw_result=result)
class TinyMongoCursor(object):
"""Mongo iterable cursor"""
def __init__(self, cursordat, sort=None, skip=None, limit=None):
"""Initialize the mongo iterable cursor with data"""
self.cursordat = cursordat
self.cursorpos = -1
if len(self.cursordat) == 0:
self.currentrec = None
else:
self.currentrec = self.cursordat[self.cursorpos]
if sort:
self.sort(sort)
self.paginate(skip, limit)
def __getitem__(self, key):
"""Gets record by index or value by key"""
if isinstance(key, int):
return self.cursordat[key]
return self.currentrec[key]
def paginate(self, skip, limit):
"""Paginate list of records"""
if not self.count() or not limit:
return
skip = skip or 0
pages = int(ceil(self.count() / float(limit)))
limits = {}
last = 0
for i in range(pages):
current = limit * i
limits[last] = current
last = current
# example with count == 62
# {0: 20, 20: 40, 40: 60, 60: 62}
if limit and limit < self.count():
limit = limits.get(skip, self.count())
self.cursordat = self.cursordat[skip: limit]
def _order(self, value, is_reverse=None):
"""Parsing data to a sortable form
By giving each data type an ID(int), and assemble with the value
into a sortable tuple.
"""
def _dict_parser(dict_doc):
""" dict ordered by:
valueType_N -> key_N -> value_N
"""
result = list()
for key in dict_doc:
data = self._order(dict_doc[key])
res = (data[0], key, data[1])
result.append(res)
return tuple(result)
def _list_parser(list_doc):
"""list will iter members to compare
"""
result = list()
for member in list_doc:
result.append(self._order(member))
return result
# (TODO) include more data type
if value is None or not isinstance(value, (dict,
list,
basestring,
bool,
float,
int)):
# not support/sortable value type
value = (0, None)
elif isinstance(value, bool):
value = (5, value)
elif isinstance(value, (int, float)):
value = (1, value)
elif isinstance(value, basestring):
value = (2, value)
elif isinstance(value, dict):
value = (3, _dict_parser(value))
elif isinstance(value, list):
if len(value) == 0:
# [] less then None
value = [(-1, [])]
else:
value = _list_parser(value)
if is_reverse is not None:
# list will firstly compare with other doc by it's smallest
# or largest member
value = max(value) if is_reverse else min(value)
else:
# if the smallest or largest member is a list
# then compaer with it's sub-member in list index order
value = (4, tuple(value))
return value
def sort(self, key_or_list, direction=None):
"""
Sorts a cursor object based on the input
:param key_or_list: a list/tuple containing the sort specification,
i.e. ('user_number': -1), or a basestring
:param direction: sorting direction, 1 or -1, needed if key_or_list
is a basestring
:return:
"""
# checking input format
sort_specifier = list()
if isinstance(key_or_list, list):
if direction is not None:
raise ValueError('direction can not be set separately '
'if sorting by multiple fields.')
for pair in key_or_list:
if not (isinstance(pair, list) or isinstance(pair, tuple)):
raise TypeError('key pair should be a list or tuple.')
if not len(pair) == 2:
raise ValueError('Need to be (key, direction) pair')
if not isinstance(pair[0], basestring):
raise TypeError('first item in each key pair must '
'be a string')
if not isinstance(pair[1], int) or not abs(pair[1]) == 1:
raise TypeError('bad sort specification.')
sort_specifier = key_or_list
elif isinstance(key_or_list, basestring):
if direction is not None:
if not isinstance(direction, int) or not abs(direction) == 1:
raise TypeError('bad sort specification.')
else:
# default ASCENDING
direction = 1
sort_specifier = [(key_or_list, direction)]
else:
raise ValueError('Wrong input, pass a field name and a direction,'
' or pass a list of (key, direction) pairs.')
# sorting
_cursordat = self.cursordat
total = len(_cursordat)
pre_sect_stack = list()
for pair in sort_specifier:
is_reverse = bool(1-pair[1])
value_stack = list()
for index, data in enumerate(_cursordat):
# get field value
not_found = None
for key in pair[0].split('.'):
not_found = True
if isinstance(data, dict) and key in data:
data = copy.deepcopy(data[key])
not_found = False
elif isinstance(data, list):
if not is_reverse and len(data) == 1:
# MongoDB treat [{data}] as {data}
# when finding fields
if isinstance(data[0], dict) and key in data[0]:
data = copy.deepcopy(data[0][key])
not_found = False
elif is_reverse:
# MongoDB will keep finding field in reverse mode
for _d in data:
if isinstance(_d, dict) and key in _d:
data = copy.deepcopy(_d[key])
not_found = False
break
if not_found:
break
# parsing data for sorting
if not_found:
# treat no match as None
data = None
value = self._order(data, is_reverse)
# read previous section
pre_sect = pre_sect_stack[index] if pre_sect_stack else 0
# inverse if in reverse mode
# for keeping order as ASCENDING after sort
pre_sect = (total - pre_sect) if is_reverse else pre_sect
_ind = (total - index) if is_reverse else index
value_stack.append((pre_sect, value, _ind))
# sorting cursor data
value_stack.sort(reverse=is_reverse)
ordereddat = list()
sect_stack = list()
sect_id = -1
last_dat = None
for dat in value_stack:
# restore if in reverse mode
_ind = (total - dat[-1]) if is_reverse else dat[-1]
ordereddat.append(_cursordat[_ind])
# define section
# maintain the sorting result in next level sorting
if not dat[1] == last_dat:
sect_id += 1
sect_stack.append(sect_id)
last_dat = dat[1]
# save result for next level sorting
_cursordat = ordereddat
pre_sect_stack = sect_stack
# done
self.cursordat = _cursordat
return self
def hasNext(self):
"""
Returns True if the cursor has a next position, False if not
:return:
"""
cursor_pos = self.cursorpos + 1
try:
self.cursordat[cursor_pos]
return True
except IndexError:
return False
def next(self):
"""
Returns the next record
:return:
"""
self.cursorpos += 1
return self.cursordat[self.cursorpos]
def count(self, with_limit_and_skip=False):
"""
Returns the number of records in the current cursor
:return: number of records
"""
return len(self.cursordat)
class TinyGridFS(object):
"""GridFS for tinyDB"""
def __init__(self, *args, **kwargs):
self.database = None
def GridFS(self, tinydatabase):
"""TODO: Must implement yet"""
self.database = tinydatabase
return self
|
schapman1974/tinymongo | tinymongo/tinymongo.py | TinyMongoCollection.build_table | python | def build_table(self):
self.table = self.parent.tinydb.table(self.tablename) | Builds a new tinydb table at the parent database
:return: | train | https://github.com/schapman1974/tinymongo/blob/993048059dc0aa789d879b69feb79a0f237a60b3/tinymongo/tinymongo.py#L143-L148 | null | class TinyMongoCollection(object):
"""
This class represents a collection and all of the operations that are
commonly performed on a collection
"""
def __init__(self, table, parent=None):
"""
Initilialize the collection
:param table: the table name
:param parent: the parent db name
"""
self.tablename = table
self.table = None
self.parent = parent
def __repr__(self):
"""Return collection name"""
return self.tablename
def __getattr__(self, name):
"""
If attr is not found return self
:param name:
:return:
"""
# if self.table is None:
# self.tablename += u"." + name
if self.table is None:
self.build_table()
return self
def count(self):
"""
Counts the documents in the collection.
:return: Integer representing the number of documents in the collection.
"""
return self.find().count()
def drop(self, **kwargs):
"""
Removes a collection from the database.
**kwargs only because of the optional "writeConcern" field, but does nothing in the TinyDB database.
:return: Returns True when successfully drops a collection. Returns False when collection to drop does not
exist.
"""
if self.table:
self.parent.tinydb.purge_table(self.tablename)
return True
else:
return False
def insert(self, docs, *args, **kwargs):
"""Backwards compatibility with insert"""
if isinstance(docs, list):
return self.insert_many(docs, *args, **kwargs)
else:
return self.insert_one(docs, *args, **kwargs)
def insert_one(self, doc, *args, **kwargs):
"""
Inserts one document into the collection
If contains '_id' key it is used, else it is generated.
:param doc: the document
:return: InsertOneResult
"""
if self.table is None:
self.build_table()
if not isinstance(doc, dict):
raise ValueError(u'"doc" must be a dict')
_id = doc[u'_id'] = doc.get('_id') or generate_id()
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is True:
# insert doc without validation of duplicated `_id`
eid = self.table.insert(doc)
else:
existing = self.find_one({'_id': _id})
if existing is None:
eid = self.table.insert(doc)
else:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
return InsertOneResult(eid=eid, inserted_id=_id)
def insert_many(self, docs, *args, **kwargs):
"""
Inserts several documents into the collection
:param docs: a list of documents
:return: InsertManyResult
"""
if self.table is None:
self.build_table()
if not isinstance(docs, list):
raise ValueError(u'"insert_many" requires a list input')
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is not True:
# get all _id in once, to reduce I/O. (without projection)
existing = [doc['_id'] for doc in self.find({})]
_ids = list()
for doc in docs:
_id = doc[u'_id'] = doc.get('_id') or generate_id()
if bypass_document_validation is not True:
if _id in existing:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
existing.append(_id)
_ids.append(_id)
results = self.table.insert_multiple(docs)
return InsertManyResult(
eids=[eid for eid in results],
inserted_ids=[inserted_id for inserted_id in _ids]
)
def parse_query(self, query):
"""
Creates a tinydb Query() object from the query dict
:param query: object containing the dictionary representation of the
query
:return: composite Query()
"""
logger.debug(u'query to parse2: {}'.format(query))
# this should find all records
if query == {} or query is None:
return Query()._id != u'-1' # noqa
q = None
# find the final result of the generator
for c in self.parse_condition(query):
if q is None:
q = c
else:
q = q & c
logger.debug(u'new query item2: {}'.format(q))
return q
def parse_condition(self, query, prev_key=None, last_prev_key=None):
"""
Creates a recursive generator for parsing some types of Query()
conditions
:param query: Query object
:param prev_key: The key at the next-higher level
:return: generator object, the last of which will be the complete
Query() object containing all conditions
"""
# use this to determine gt/lt/eq on prev_query
logger.debug(u'query: {} prev_query: {}'.format(query, prev_key))
q = Query()
conditions = None
# deal with the {'name': value} case by injecting a previous key
if not prev_key:
temp_query = copy.deepcopy(query)
k, v = temp_query.popitem()
prev_key = k
# deal with the conditions
for key, value in query.items():
logger.debug(u'conditions: {} {}'.format(key, value))
if key == u'$gte':
conditions = (
Q(q, prev_key) >= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) >= value)) if prev_key != "$not" \
else (q[last_prev_key] < value)
elif key == u'$gt':
conditions = (
Q(q, prev_key) > value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) > value)) if prev_key != "$not" \
else (q[last_prev_key] <= value)
elif key == u'$lte':
conditions = (
Q(q, prev_key) <= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) <= value)) if prev_key != "$not" \
else (q[last_prev_key] > value)
elif key == u'$lt':
conditions = (
Q(q, prev_key) < value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) < value)) if prev_key != "$not" \
else (q[last_prev_key] >= value)
elif key == u'$ne':
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value))if prev_key != "$not" \
else (q[last_prev_key] == value)
elif key == u'$not':
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value)) \
if prev_key != "$not" else (q[last_prev_key] >= value)
else:
# let the value's condition be parsed below
pass
elif key == u'$regex':
value = value.replace('\\\\\\', '|||')
value = value.replace('\\\\', '|||')
regex = value.replace('\\', '')
regex = regex.replace('|||', '\\')
currCond = (where(prev_key).matches(regex))
conditions = currCond if not conditions else (conditions & currCond)
elif key in ['$and', '$or', '$in', '$all']:
pass
else:
# don't want to use the previous key if this is a secondary key
# (fixes multiple item query that includes $ codes)
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
(Q(q, key) == value) | (Q(q, key).any([value]))
) if not conditions else (conditions & ((Q(q, key) == value) | (Q(q, key).any([value]))))
prev_key = key
logger.debug(u'c: {}'.format(conditions))
if isinstance(value, dict):
# yield from self.parse_condition(value, key)
for parse_condition in self.parse_condition(value, key, prev_key):
yield parse_condition
elif isinstance(value, list):
if key == '$and':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions & parse_condition
)
yield grouped_conditions
elif key == '$or':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$in':
# use `any` to find with list, before comparing to single string
grouped_conditions = Q(q, prev_key).any(value)
for val in value:
for parse_condition in self.parse_condition({prev_key : val}):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$all':
yield Q(q, prev_key).all(value)
else:
yield Q(q, prev_key).any([value])
else:
yield conditions
def update(self, query, doc, *args, **kwargs):
"""BAckwards compatibility with update"""
if isinstance(doc, list):
return [
self.update_one(query, item, *args, **kwargs)
for item in doc
]
else:
return self.update_one(query, doc, *args, **kwargs)
def update_one(self, query, doc):
"""
Updates one element of the collection
:param query: dictionary representing the mongo query
:param doc: dictionary representing the item to be updated
:return: UpdateResult
"""
if self.table is None:
self.build_table()
if u"$set" in doc:
doc = doc[u"$set"]
allcond = self.parse_query(query)
try:
result = self.table.update(doc, allcond)
except:
# TODO: check table.update result
# check what pymongo does in that case
result = None
return UpdateResult(raw_result=result)
def find(self, filter=None, sort=None, skip=None, limit=None,
*args, **kwargs):
"""
Finds all matching results
:param query: dictionary representing the mongo query
:return: cursor containing the search results
"""
if self.table is None:
self.build_table()
if filter is None:
result = self.table.all()
else:
allcond = self.parse_query(filter)
try:
result = self.table.search(allcond)
except (AttributeError, TypeError):
result = []
result = TinyMongoCursor(
result,
sort=sort,
skip=skip,
limit=limit
)
return result
def find_one(self, filter=None):
"""
Finds one matching query element
:param query: dictionary representing the mongo query
:return: the resulting document (if found)
"""
if self.table is None:
self.build_table()
allcond = self.parse_query(filter)
return self.table.get(allcond)
def remove(self, spec_or_id, multi=True, *args, **kwargs):
"""Backwards compatibility with remove"""
if multi:
return self.delete_many(spec_or_id)
return self.delete_one(spec_or_id)
def delete_one(self, query):
"""
Deletes one document from the collection
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
item = self.find_one(query)
result = self.table.remove(where(u'_id') == item[u'_id'])
return DeleteResult(raw_result=result)
def delete_many(self, query):
"""
Removes all items matching the mongo query
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
items = self.find(query)
result = [
self.table.remove(where(u'_id') == item[u'_id'])
for item in items
]
if query == {}:
# need to reset TinyDB's index for docs order consistency
self.table._last_id = 0
return DeleteResult(raw_result=result)
|
schapman1974/tinymongo | tinymongo/tinymongo.py | TinyMongoCollection.drop | python | def drop(self, **kwargs):
if self.table:
self.parent.tinydb.purge_table(self.tablename)
return True
else:
return False | Removes a collection from the database.
**kwargs only because of the optional "writeConcern" field, but does nothing in the TinyDB database.
:return: Returns True when successfully drops a collection. Returns False when collection to drop does not
exist. | train | https://github.com/schapman1974/tinymongo/blob/993048059dc0aa789d879b69feb79a0f237a60b3/tinymongo/tinymongo.py#L157-L168 | null | class TinyMongoCollection(object):
"""
This class represents a collection and all of the operations that are
commonly performed on a collection
"""
def __init__(self, table, parent=None):
"""
Initilialize the collection
:param table: the table name
:param parent: the parent db name
"""
self.tablename = table
self.table = None
self.parent = parent
def __repr__(self):
"""Return collection name"""
return self.tablename
def __getattr__(self, name):
"""
If attr is not found return self
:param name:
:return:
"""
# if self.table is None:
# self.tablename += u"." + name
if self.table is None:
self.build_table()
return self
def build_table(self):
"""
Builds a new tinydb table at the parent database
:return:
"""
self.table = self.parent.tinydb.table(self.tablename)
def count(self):
"""
Counts the documents in the collection.
:return: Integer representing the number of documents in the collection.
"""
return self.find().count()
def insert(self, docs, *args, **kwargs):
"""Backwards compatibility with insert"""
if isinstance(docs, list):
return self.insert_many(docs, *args, **kwargs)
else:
return self.insert_one(docs, *args, **kwargs)
def insert_one(self, doc, *args, **kwargs):
"""
Inserts one document into the collection
If contains '_id' key it is used, else it is generated.
:param doc: the document
:return: InsertOneResult
"""
if self.table is None:
self.build_table()
if not isinstance(doc, dict):
raise ValueError(u'"doc" must be a dict')
_id = doc[u'_id'] = doc.get('_id') or generate_id()
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is True:
# insert doc without validation of duplicated `_id`
eid = self.table.insert(doc)
else:
existing = self.find_one({'_id': _id})
if existing is None:
eid = self.table.insert(doc)
else:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
return InsertOneResult(eid=eid, inserted_id=_id)
def insert_many(self, docs, *args, **kwargs):
"""
Inserts several documents into the collection
:param docs: a list of documents
:return: InsertManyResult
"""
if self.table is None:
self.build_table()
if not isinstance(docs, list):
raise ValueError(u'"insert_many" requires a list input')
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is not True:
# get all _id in once, to reduce I/O. (without projection)
existing = [doc['_id'] for doc in self.find({})]
_ids = list()
for doc in docs:
_id = doc[u'_id'] = doc.get('_id') or generate_id()
if bypass_document_validation is not True:
if _id in existing:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
existing.append(_id)
_ids.append(_id)
results = self.table.insert_multiple(docs)
return InsertManyResult(
eids=[eid for eid in results],
inserted_ids=[inserted_id for inserted_id in _ids]
)
def parse_query(self, query):
"""
Creates a tinydb Query() object from the query dict
:param query: object containing the dictionary representation of the
query
:return: composite Query()
"""
logger.debug(u'query to parse2: {}'.format(query))
# this should find all records
if query == {} or query is None:
return Query()._id != u'-1' # noqa
q = None
# find the final result of the generator
for c in self.parse_condition(query):
if q is None:
q = c
else:
q = q & c
logger.debug(u'new query item2: {}'.format(q))
return q
def parse_condition(self, query, prev_key=None, last_prev_key=None):
"""
Creates a recursive generator for parsing some types of Query()
conditions
:param query: Query object
:param prev_key: The key at the next-higher level
:return: generator object, the last of which will be the complete
Query() object containing all conditions
"""
# use this to determine gt/lt/eq on prev_query
logger.debug(u'query: {} prev_query: {}'.format(query, prev_key))
q = Query()
conditions = None
# deal with the {'name': value} case by injecting a previous key
if not prev_key:
temp_query = copy.deepcopy(query)
k, v = temp_query.popitem()
prev_key = k
# deal with the conditions
for key, value in query.items():
logger.debug(u'conditions: {} {}'.format(key, value))
if key == u'$gte':
conditions = (
Q(q, prev_key) >= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) >= value)) if prev_key != "$not" \
else (q[last_prev_key] < value)
elif key == u'$gt':
conditions = (
Q(q, prev_key) > value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) > value)) if prev_key != "$not" \
else (q[last_prev_key] <= value)
elif key == u'$lte':
conditions = (
Q(q, prev_key) <= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) <= value)) if prev_key != "$not" \
else (q[last_prev_key] > value)
elif key == u'$lt':
conditions = (
Q(q, prev_key) < value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) < value)) if prev_key != "$not" \
else (q[last_prev_key] >= value)
elif key == u'$ne':
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value))if prev_key != "$not" \
else (q[last_prev_key] == value)
elif key == u'$not':
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value)) \
if prev_key != "$not" else (q[last_prev_key] >= value)
else:
# let the value's condition be parsed below
pass
elif key == u'$regex':
value = value.replace('\\\\\\', '|||')
value = value.replace('\\\\', '|||')
regex = value.replace('\\', '')
regex = regex.replace('|||', '\\')
currCond = (where(prev_key).matches(regex))
conditions = currCond if not conditions else (conditions & currCond)
elif key in ['$and', '$or', '$in', '$all']:
pass
else:
# don't want to use the previous key if this is a secondary key
# (fixes multiple item query that includes $ codes)
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
(Q(q, key) == value) | (Q(q, key).any([value]))
) if not conditions else (conditions & ((Q(q, key) == value) | (Q(q, key).any([value]))))
prev_key = key
logger.debug(u'c: {}'.format(conditions))
if isinstance(value, dict):
# yield from self.parse_condition(value, key)
for parse_condition in self.parse_condition(value, key, prev_key):
yield parse_condition
elif isinstance(value, list):
if key == '$and':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions & parse_condition
)
yield grouped_conditions
elif key == '$or':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$in':
# use `any` to find with list, before comparing to single string
grouped_conditions = Q(q, prev_key).any(value)
for val in value:
for parse_condition in self.parse_condition({prev_key : val}):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$all':
yield Q(q, prev_key).all(value)
else:
yield Q(q, prev_key).any([value])
else:
yield conditions
def update(self, query, doc, *args, **kwargs):
"""BAckwards compatibility with update"""
if isinstance(doc, list):
return [
self.update_one(query, item, *args, **kwargs)
for item in doc
]
else:
return self.update_one(query, doc, *args, **kwargs)
def update_one(self, query, doc):
"""
Updates one element of the collection
:param query: dictionary representing the mongo query
:param doc: dictionary representing the item to be updated
:return: UpdateResult
"""
if self.table is None:
self.build_table()
if u"$set" in doc:
doc = doc[u"$set"]
allcond = self.parse_query(query)
try:
result = self.table.update(doc, allcond)
except:
# TODO: check table.update result
# check what pymongo does in that case
result = None
return UpdateResult(raw_result=result)
def find(self, filter=None, sort=None, skip=None, limit=None,
*args, **kwargs):
"""
Finds all matching results
:param query: dictionary representing the mongo query
:return: cursor containing the search results
"""
if self.table is None:
self.build_table()
if filter is None:
result = self.table.all()
else:
allcond = self.parse_query(filter)
try:
result = self.table.search(allcond)
except (AttributeError, TypeError):
result = []
result = TinyMongoCursor(
result,
sort=sort,
skip=skip,
limit=limit
)
return result
def find_one(self, filter=None):
"""
Finds one matching query element
:param query: dictionary representing the mongo query
:return: the resulting document (if found)
"""
if self.table is None:
self.build_table()
allcond = self.parse_query(filter)
return self.table.get(allcond)
def remove(self, spec_or_id, multi=True, *args, **kwargs):
"""Backwards compatibility with remove"""
if multi:
return self.delete_many(spec_or_id)
return self.delete_one(spec_or_id)
def delete_one(self, query):
"""
Deletes one document from the collection
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
item = self.find_one(query)
result = self.table.remove(where(u'_id') == item[u'_id'])
return DeleteResult(raw_result=result)
def delete_many(self, query):
"""
Removes all items matching the mongo query
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
items = self.find(query)
result = [
self.table.remove(where(u'_id') == item[u'_id'])
for item in items
]
if query == {}:
# need to reset TinyDB's index for docs order consistency
self.table._last_id = 0
return DeleteResult(raw_result=result)
|
schapman1974/tinymongo | tinymongo/tinymongo.py | TinyMongoCollection.insert | python | def insert(self, docs, *args, **kwargs):
if isinstance(docs, list):
return self.insert_many(docs, *args, **kwargs)
else:
return self.insert_one(docs, *args, **kwargs) | Backwards compatibility with insert | train | https://github.com/schapman1974/tinymongo/blob/993048059dc0aa789d879b69feb79a0f237a60b3/tinymongo/tinymongo.py#L170-L175 | [
"def insert_one(self, doc, *args, **kwargs):\n \"\"\"\n Inserts one document into the collection\n If contains '_id' key it is used, else it is generated.\n :param doc: the document\n :return: InsertOneResult\n \"\"\"\n if self.table is None:\n self.build_table()\n\n if not isinstance(doc, dict):\n raise ValueError(u'\"doc\" must be a dict')\n\n _id = doc[u'_id'] = doc.get('_id') or generate_id()\n\n bypass_document_validation = kwargs.get('bypass_document_validation')\n if bypass_document_validation is True:\n # insert doc without validation of duplicated `_id`\n eid = self.table.insert(doc)\n else:\n existing = self.find_one({'_id': _id})\n if existing is None:\n eid = self.table.insert(doc)\n else:\n raise DuplicateKeyError(\n u'_id:{0} already exists in collection:{1}'.format(\n _id, self.tablename\n )\n )\n\n return InsertOneResult(eid=eid, inserted_id=_id)\n",
"def insert_many(self, docs, *args, **kwargs):\n \"\"\"\n Inserts several documents into the collection\n :param docs: a list of documents\n :return: InsertManyResult\n \"\"\"\n if self.table is None:\n self.build_table()\n\n if not isinstance(docs, list):\n raise ValueError(u'\"insert_many\" requires a list input')\n\n bypass_document_validation = kwargs.get('bypass_document_validation')\n\n if bypass_document_validation is not True:\n # get all _id in once, to reduce I/O. (without projection)\n existing = [doc['_id'] for doc in self.find({})]\n\n _ids = list()\n for doc in docs:\n\n _id = doc[u'_id'] = doc.get('_id') or generate_id()\n\n if bypass_document_validation is not True:\n if _id in existing:\n raise DuplicateKeyError(\n u'_id:{0} already exists in collection:{1}'.format(\n _id, self.tablename\n )\n )\n existing.append(_id)\n\n _ids.append(_id)\n\n results = self.table.insert_multiple(docs)\n\n return InsertManyResult(\n eids=[eid for eid in results],\n inserted_ids=[inserted_id for inserted_id in _ids]\n )\n"
] | class TinyMongoCollection(object):
"""
This class represents a collection and all of the operations that are
commonly performed on a collection
"""
def __init__(self, table, parent=None):
"""
Initilialize the collection
:param table: the table name
:param parent: the parent db name
"""
self.tablename = table
self.table = None
self.parent = parent
def __repr__(self):
"""Return collection name"""
return self.tablename
def __getattr__(self, name):
"""
If attr is not found return self
:param name:
:return:
"""
# if self.table is None:
# self.tablename += u"." + name
if self.table is None:
self.build_table()
return self
def build_table(self):
"""
Builds a new tinydb table at the parent database
:return:
"""
self.table = self.parent.tinydb.table(self.tablename)
def count(self):
"""
Counts the documents in the collection.
:return: Integer representing the number of documents in the collection.
"""
return self.find().count()
def drop(self, **kwargs):
"""
Removes a collection from the database.
**kwargs only because of the optional "writeConcern" field, but does nothing in the TinyDB database.
:return: Returns True when successfully drops a collection. Returns False when collection to drop does not
exist.
"""
if self.table:
self.parent.tinydb.purge_table(self.tablename)
return True
else:
return False
def insert_one(self, doc, *args, **kwargs):
"""
Inserts one document into the collection
If contains '_id' key it is used, else it is generated.
:param doc: the document
:return: InsertOneResult
"""
if self.table is None:
self.build_table()
if not isinstance(doc, dict):
raise ValueError(u'"doc" must be a dict')
_id = doc[u'_id'] = doc.get('_id') or generate_id()
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is True:
# insert doc without validation of duplicated `_id`
eid = self.table.insert(doc)
else:
existing = self.find_one({'_id': _id})
if existing is None:
eid = self.table.insert(doc)
else:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
return InsertOneResult(eid=eid, inserted_id=_id)
def insert_many(self, docs, *args, **kwargs):
"""
Inserts several documents into the collection
:param docs: a list of documents
:return: InsertManyResult
"""
if self.table is None:
self.build_table()
if not isinstance(docs, list):
raise ValueError(u'"insert_many" requires a list input')
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is not True:
# get all _id in once, to reduce I/O. (without projection)
existing = [doc['_id'] for doc in self.find({})]
_ids = list()
for doc in docs:
_id = doc[u'_id'] = doc.get('_id') or generate_id()
if bypass_document_validation is not True:
if _id in existing:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
existing.append(_id)
_ids.append(_id)
results = self.table.insert_multiple(docs)
return InsertManyResult(
eids=[eid for eid in results],
inserted_ids=[inserted_id for inserted_id in _ids]
)
def parse_query(self, query):
"""
Creates a tinydb Query() object from the query dict
:param query: object containing the dictionary representation of the
query
:return: composite Query()
"""
logger.debug(u'query to parse2: {}'.format(query))
# this should find all records
if query == {} or query is None:
return Query()._id != u'-1' # noqa
q = None
# find the final result of the generator
for c in self.parse_condition(query):
if q is None:
q = c
else:
q = q & c
logger.debug(u'new query item2: {}'.format(q))
return q
def parse_condition(self, query, prev_key=None, last_prev_key=None):
"""
Creates a recursive generator for parsing some types of Query()
conditions
:param query: Query object
:param prev_key: The key at the next-higher level
:return: generator object, the last of which will be the complete
Query() object containing all conditions
"""
# use this to determine gt/lt/eq on prev_query
logger.debug(u'query: {} prev_query: {}'.format(query, prev_key))
q = Query()
conditions = None
# deal with the {'name': value} case by injecting a previous key
if not prev_key:
temp_query = copy.deepcopy(query)
k, v = temp_query.popitem()
prev_key = k
# deal with the conditions
for key, value in query.items():
logger.debug(u'conditions: {} {}'.format(key, value))
if key == u'$gte':
conditions = (
Q(q, prev_key) >= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) >= value)) if prev_key != "$not" \
else (q[last_prev_key] < value)
elif key == u'$gt':
conditions = (
Q(q, prev_key) > value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) > value)) if prev_key != "$not" \
else (q[last_prev_key] <= value)
elif key == u'$lte':
conditions = (
Q(q, prev_key) <= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) <= value)) if prev_key != "$not" \
else (q[last_prev_key] > value)
elif key == u'$lt':
conditions = (
Q(q, prev_key) < value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) < value)) if prev_key != "$not" \
else (q[last_prev_key] >= value)
elif key == u'$ne':
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value))if prev_key != "$not" \
else (q[last_prev_key] == value)
elif key == u'$not':
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value)) \
if prev_key != "$not" else (q[last_prev_key] >= value)
else:
# let the value's condition be parsed below
pass
elif key == u'$regex':
value = value.replace('\\\\\\', '|||')
value = value.replace('\\\\', '|||')
regex = value.replace('\\', '')
regex = regex.replace('|||', '\\')
currCond = (where(prev_key).matches(regex))
conditions = currCond if not conditions else (conditions & currCond)
elif key in ['$and', '$or', '$in', '$all']:
pass
else:
# don't want to use the previous key if this is a secondary key
# (fixes multiple item query that includes $ codes)
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
(Q(q, key) == value) | (Q(q, key).any([value]))
) if not conditions else (conditions & ((Q(q, key) == value) | (Q(q, key).any([value]))))
prev_key = key
logger.debug(u'c: {}'.format(conditions))
if isinstance(value, dict):
# yield from self.parse_condition(value, key)
for parse_condition in self.parse_condition(value, key, prev_key):
yield parse_condition
elif isinstance(value, list):
if key == '$and':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions & parse_condition
)
yield grouped_conditions
elif key == '$or':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$in':
# use `any` to find with list, before comparing to single string
grouped_conditions = Q(q, prev_key).any(value)
for val in value:
for parse_condition in self.parse_condition({prev_key : val}):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$all':
yield Q(q, prev_key).all(value)
else:
yield Q(q, prev_key).any([value])
else:
yield conditions
def update(self, query, doc, *args, **kwargs):
"""BAckwards compatibility with update"""
if isinstance(doc, list):
return [
self.update_one(query, item, *args, **kwargs)
for item in doc
]
else:
return self.update_one(query, doc, *args, **kwargs)
def update_one(self, query, doc):
"""
Updates one element of the collection
:param query: dictionary representing the mongo query
:param doc: dictionary representing the item to be updated
:return: UpdateResult
"""
if self.table is None:
self.build_table()
if u"$set" in doc:
doc = doc[u"$set"]
allcond = self.parse_query(query)
try:
result = self.table.update(doc, allcond)
except:
# TODO: check table.update result
# check what pymongo does in that case
result = None
return UpdateResult(raw_result=result)
def find(self, filter=None, sort=None, skip=None, limit=None,
*args, **kwargs):
"""
Finds all matching results
:param query: dictionary representing the mongo query
:return: cursor containing the search results
"""
if self.table is None:
self.build_table()
if filter is None:
result = self.table.all()
else:
allcond = self.parse_query(filter)
try:
result = self.table.search(allcond)
except (AttributeError, TypeError):
result = []
result = TinyMongoCursor(
result,
sort=sort,
skip=skip,
limit=limit
)
return result
def find_one(self, filter=None):
"""
Finds one matching query element
:param query: dictionary representing the mongo query
:return: the resulting document (if found)
"""
if self.table is None:
self.build_table()
allcond = self.parse_query(filter)
return self.table.get(allcond)
def remove(self, spec_or_id, multi=True, *args, **kwargs):
"""Backwards compatibility with remove"""
if multi:
return self.delete_many(spec_or_id)
return self.delete_one(spec_or_id)
def delete_one(self, query):
"""
Deletes one document from the collection
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
item = self.find_one(query)
result = self.table.remove(where(u'_id') == item[u'_id'])
return DeleteResult(raw_result=result)
def delete_many(self, query):
"""
Removes all items matching the mongo query
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
items = self.find(query)
result = [
self.table.remove(where(u'_id') == item[u'_id'])
for item in items
]
if query == {}:
# need to reset TinyDB's index for docs order consistency
self.table._last_id = 0
return DeleteResult(raw_result=result)
|
schapman1974/tinymongo | tinymongo/tinymongo.py | TinyMongoCollection.insert_one | python | def insert_one(self, doc, *args, **kwargs):
if self.table is None:
self.build_table()
if not isinstance(doc, dict):
raise ValueError(u'"doc" must be a dict')
_id = doc[u'_id'] = doc.get('_id') or generate_id()
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is True:
# insert doc without validation of duplicated `_id`
eid = self.table.insert(doc)
else:
existing = self.find_one({'_id': _id})
if existing is None:
eid = self.table.insert(doc)
else:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
return InsertOneResult(eid=eid, inserted_id=_id) | Inserts one document into the collection
If contains '_id' key it is used, else it is generated.
:param doc: the document
:return: InsertOneResult | train | https://github.com/schapman1974/tinymongo/blob/993048059dc0aa789d879b69feb79a0f237a60b3/tinymongo/tinymongo.py#L177-L207 | [
"def generate_id():\n \"\"\"Generate new UUID\"\"\"\n # TODO: Use six.string_type to Py3 compat\n try:\n return unicode(uuid1()).replace(u\"-\", u\"\")\n except NameError:\n return str(uuid1()).replace(u\"-\", u\"\")\n",
"def build_table(self):\n \"\"\"\n Builds a new tinydb table at the parent database\n :return:\n \"\"\"\n self.table = self.parent.tinydb.table(self.tablename)\n",
"def find_one(self, filter=None):\n \"\"\"\n Finds one matching query element\n\n :param query: dictionary representing the mongo query\n :return: the resulting document (if found)\n \"\"\"\n\n if self.table is None:\n self.build_table()\n\n allcond = self.parse_query(filter)\n\n return self.table.get(allcond)\n"
] | class TinyMongoCollection(object):
"""
This class represents a collection and all of the operations that are
commonly performed on a collection
"""
def __init__(self, table, parent=None):
"""
Initilialize the collection
:param table: the table name
:param parent: the parent db name
"""
self.tablename = table
self.table = None
self.parent = parent
def __repr__(self):
"""Return collection name"""
return self.tablename
def __getattr__(self, name):
"""
If attr is not found return self
:param name:
:return:
"""
# if self.table is None:
# self.tablename += u"." + name
if self.table is None:
self.build_table()
return self
def build_table(self):
"""
Builds a new tinydb table at the parent database
:return:
"""
self.table = self.parent.tinydb.table(self.tablename)
def count(self):
"""
Counts the documents in the collection.
:return: Integer representing the number of documents in the collection.
"""
return self.find().count()
def drop(self, **kwargs):
"""
Removes a collection from the database.
**kwargs only because of the optional "writeConcern" field, but does nothing in the TinyDB database.
:return: Returns True when successfully drops a collection. Returns False when collection to drop does not
exist.
"""
if self.table:
self.parent.tinydb.purge_table(self.tablename)
return True
else:
return False
def insert(self, docs, *args, **kwargs):
"""Backwards compatibility with insert"""
if isinstance(docs, list):
return self.insert_many(docs, *args, **kwargs)
else:
return self.insert_one(docs, *args, **kwargs)
def insert_many(self, docs, *args, **kwargs):
"""
Inserts several documents into the collection
:param docs: a list of documents
:return: InsertManyResult
"""
if self.table is None:
self.build_table()
if not isinstance(docs, list):
raise ValueError(u'"insert_many" requires a list input')
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is not True:
# get all _id in once, to reduce I/O. (without projection)
existing = [doc['_id'] for doc in self.find({})]
_ids = list()
for doc in docs:
_id = doc[u'_id'] = doc.get('_id') or generate_id()
if bypass_document_validation is not True:
if _id in existing:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
existing.append(_id)
_ids.append(_id)
results = self.table.insert_multiple(docs)
return InsertManyResult(
eids=[eid for eid in results],
inserted_ids=[inserted_id for inserted_id in _ids]
)
def parse_query(self, query):
"""
Creates a tinydb Query() object from the query dict
:param query: object containing the dictionary representation of the
query
:return: composite Query()
"""
logger.debug(u'query to parse2: {}'.format(query))
# this should find all records
if query == {} or query is None:
return Query()._id != u'-1' # noqa
q = None
# find the final result of the generator
for c in self.parse_condition(query):
if q is None:
q = c
else:
q = q & c
logger.debug(u'new query item2: {}'.format(q))
return q
def parse_condition(self, query, prev_key=None, last_prev_key=None):
"""
Creates a recursive generator for parsing some types of Query()
conditions
:param query: Query object
:param prev_key: The key at the next-higher level
:return: generator object, the last of which will be the complete
Query() object containing all conditions
"""
# use this to determine gt/lt/eq on prev_query
logger.debug(u'query: {} prev_query: {}'.format(query, prev_key))
q = Query()
conditions = None
# deal with the {'name': value} case by injecting a previous key
if not prev_key:
temp_query = copy.deepcopy(query)
k, v = temp_query.popitem()
prev_key = k
# deal with the conditions
for key, value in query.items():
logger.debug(u'conditions: {} {}'.format(key, value))
if key == u'$gte':
conditions = (
Q(q, prev_key) >= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) >= value)) if prev_key != "$not" \
else (q[last_prev_key] < value)
elif key == u'$gt':
conditions = (
Q(q, prev_key) > value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) > value)) if prev_key != "$not" \
else (q[last_prev_key] <= value)
elif key == u'$lte':
conditions = (
Q(q, prev_key) <= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) <= value)) if prev_key != "$not" \
else (q[last_prev_key] > value)
elif key == u'$lt':
conditions = (
Q(q, prev_key) < value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) < value)) if prev_key != "$not" \
else (q[last_prev_key] >= value)
elif key == u'$ne':
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value))if prev_key != "$not" \
else (q[last_prev_key] == value)
elif key == u'$not':
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value)) \
if prev_key != "$not" else (q[last_prev_key] >= value)
else:
# let the value's condition be parsed below
pass
elif key == u'$regex':
value = value.replace('\\\\\\', '|||')
value = value.replace('\\\\', '|||')
regex = value.replace('\\', '')
regex = regex.replace('|||', '\\')
currCond = (where(prev_key).matches(regex))
conditions = currCond if not conditions else (conditions & currCond)
elif key in ['$and', '$or', '$in', '$all']:
pass
else:
# don't want to use the previous key if this is a secondary key
# (fixes multiple item query that includes $ codes)
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
(Q(q, key) == value) | (Q(q, key).any([value]))
) if not conditions else (conditions & ((Q(q, key) == value) | (Q(q, key).any([value]))))
prev_key = key
logger.debug(u'c: {}'.format(conditions))
if isinstance(value, dict):
# yield from self.parse_condition(value, key)
for parse_condition in self.parse_condition(value, key, prev_key):
yield parse_condition
elif isinstance(value, list):
if key == '$and':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions & parse_condition
)
yield grouped_conditions
elif key == '$or':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$in':
# use `any` to find with list, before comparing to single string
grouped_conditions = Q(q, prev_key).any(value)
for val in value:
for parse_condition in self.parse_condition({prev_key : val}):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$all':
yield Q(q, prev_key).all(value)
else:
yield Q(q, prev_key).any([value])
else:
yield conditions
def update(self, query, doc, *args, **kwargs):
"""BAckwards compatibility with update"""
if isinstance(doc, list):
return [
self.update_one(query, item, *args, **kwargs)
for item in doc
]
else:
return self.update_one(query, doc, *args, **kwargs)
def update_one(self, query, doc):
"""
Updates one element of the collection
:param query: dictionary representing the mongo query
:param doc: dictionary representing the item to be updated
:return: UpdateResult
"""
if self.table is None:
self.build_table()
if u"$set" in doc:
doc = doc[u"$set"]
allcond = self.parse_query(query)
try:
result = self.table.update(doc, allcond)
except:
# TODO: check table.update result
# check what pymongo does in that case
result = None
return UpdateResult(raw_result=result)
def find(self, filter=None, sort=None, skip=None, limit=None,
*args, **kwargs):
"""
Finds all matching results
:param query: dictionary representing the mongo query
:return: cursor containing the search results
"""
if self.table is None:
self.build_table()
if filter is None:
result = self.table.all()
else:
allcond = self.parse_query(filter)
try:
result = self.table.search(allcond)
except (AttributeError, TypeError):
result = []
result = TinyMongoCursor(
result,
sort=sort,
skip=skip,
limit=limit
)
return result
def find_one(self, filter=None):
"""
Finds one matching query element
:param query: dictionary representing the mongo query
:return: the resulting document (if found)
"""
if self.table is None:
self.build_table()
allcond = self.parse_query(filter)
return self.table.get(allcond)
def remove(self, spec_or_id, multi=True, *args, **kwargs):
"""Backwards compatibility with remove"""
if multi:
return self.delete_many(spec_or_id)
return self.delete_one(spec_or_id)
def delete_one(self, query):
"""
Deletes one document from the collection
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
item = self.find_one(query)
result = self.table.remove(where(u'_id') == item[u'_id'])
return DeleteResult(raw_result=result)
def delete_many(self, query):
"""
Removes all items matching the mongo query
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
items = self.find(query)
result = [
self.table.remove(where(u'_id') == item[u'_id'])
for item in items
]
if query == {}:
# need to reset TinyDB's index for docs order consistency
self.table._last_id = 0
return DeleteResult(raw_result=result)
|
schapman1974/tinymongo | tinymongo/tinymongo.py | TinyMongoCollection.insert_many | python | def insert_many(self, docs, *args, **kwargs):
if self.table is None:
self.build_table()
if not isinstance(docs, list):
raise ValueError(u'"insert_many" requires a list input')
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is not True:
# get all _id in once, to reduce I/O. (without projection)
existing = [doc['_id'] for doc in self.find({})]
_ids = list()
for doc in docs:
_id = doc[u'_id'] = doc.get('_id') or generate_id()
if bypass_document_validation is not True:
if _id in existing:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
existing.append(_id)
_ids.append(_id)
results = self.table.insert_multiple(docs)
return InsertManyResult(
eids=[eid for eid in results],
inserted_ids=[inserted_id for inserted_id in _ids]
) | Inserts several documents into the collection
:param docs: a list of documents
:return: InsertManyResult | train | https://github.com/schapman1974/tinymongo/blob/993048059dc0aa789d879b69feb79a0f237a60b3/tinymongo/tinymongo.py#L209-L248 | [
"def generate_id():\n \"\"\"Generate new UUID\"\"\"\n # TODO: Use six.string_type to Py3 compat\n try:\n return unicode(uuid1()).replace(u\"-\", u\"\")\n except NameError:\n return str(uuid1()).replace(u\"-\", u\"\")\n",
"def build_table(self):\n \"\"\"\n Builds a new tinydb table at the parent database\n :return:\n \"\"\"\n self.table = self.parent.tinydb.table(self.tablename)\n",
"def find(self, filter=None, sort=None, skip=None, limit=None,\n *args, **kwargs):\n \"\"\"\n Finds all matching results\n\n :param query: dictionary representing the mongo query\n :return: cursor containing the search results\n \"\"\"\n if self.table is None:\n self.build_table()\n\n if filter is None:\n result = self.table.all()\n else:\n allcond = self.parse_query(filter)\n\n try:\n result = self.table.search(allcond)\n except (AttributeError, TypeError):\n result = []\n\n result = TinyMongoCursor(\n result,\n sort=sort,\n skip=skip,\n limit=limit\n )\n\n return result\n"
] | class TinyMongoCollection(object):
"""
This class represents a collection and all of the operations that are
commonly performed on a collection
"""
def __init__(self, table, parent=None):
"""
Initilialize the collection
:param table: the table name
:param parent: the parent db name
"""
self.tablename = table
self.table = None
self.parent = parent
def __repr__(self):
"""Return collection name"""
return self.tablename
def __getattr__(self, name):
"""
If attr is not found return self
:param name:
:return:
"""
# if self.table is None:
# self.tablename += u"." + name
if self.table is None:
self.build_table()
return self
def build_table(self):
"""
Builds a new tinydb table at the parent database
:return:
"""
self.table = self.parent.tinydb.table(self.tablename)
def count(self):
"""
Counts the documents in the collection.
:return: Integer representing the number of documents in the collection.
"""
return self.find().count()
def drop(self, **kwargs):
"""
Removes a collection from the database.
**kwargs only because of the optional "writeConcern" field, but does nothing in the TinyDB database.
:return: Returns True when successfully drops a collection. Returns False when collection to drop does not
exist.
"""
if self.table:
self.parent.tinydb.purge_table(self.tablename)
return True
else:
return False
def insert(self, docs, *args, **kwargs):
"""Backwards compatibility with insert"""
if isinstance(docs, list):
return self.insert_many(docs, *args, **kwargs)
else:
return self.insert_one(docs, *args, **kwargs)
def insert_one(self, doc, *args, **kwargs):
"""
Inserts one document into the collection
If contains '_id' key it is used, else it is generated.
:param doc: the document
:return: InsertOneResult
"""
if self.table is None:
self.build_table()
if not isinstance(doc, dict):
raise ValueError(u'"doc" must be a dict')
_id = doc[u'_id'] = doc.get('_id') or generate_id()
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is True:
# insert doc without validation of duplicated `_id`
eid = self.table.insert(doc)
else:
existing = self.find_one({'_id': _id})
if existing is None:
eid = self.table.insert(doc)
else:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
return InsertOneResult(eid=eid, inserted_id=_id)
def parse_query(self, query):
"""
Creates a tinydb Query() object from the query dict
:param query: object containing the dictionary representation of the
query
:return: composite Query()
"""
logger.debug(u'query to parse2: {}'.format(query))
# this should find all records
if query == {} or query is None:
return Query()._id != u'-1' # noqa
q = None
# find the final result of the generator
for c in self.parse_condition(query):
if q is None:
q = c
else:
q = q & c
logger.debug(u'new query item2: {}'.format(q))
return q
def parse_condition(self, query, prev_key=None, last_prev_key=None):
"""
Creates a recursive generator for parsing some types of Query()
conditions
:param query: Query object
:param prev_key: The key at the next-higher level
:return: generator object, the last of which will be the complete
Query() object containing all conditions
"""
# use this to determine gt/lt/eq on prev_query
logger.debug(u'query: {} prev_query: {}'.format(query, prev_key))
q = Query()
conditions = None
# deal with the {'name': value} case by injecting a previous key
if not prev_key:
temp_query = copy.deepcopy(query)
k, v = temp_query.popitem()
prev_key = k
# deal with the conditions
for key, value in query.items():
logger.debug(u'conditions: {} {}'.format(key, value))
if key == u'$gte':
conditions = (
Q(q, prev_key) >= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) >= value)) if prev_key != "$not" \
else (q[last_prev_key] < value)
elif key == u'$gt':
conditions = (
Q(q, prev_key) > value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) > value)) if prev_key != "$not" \
else (q[last_prev_key] <= value)
elif key == u'$lte':
conditions = (
Q(q, prev_key) <= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) <= value)) if prev_key != "$not" \
else (q[last_prev_key] > value)
elif key == u'$lt':
conditions = (
Q(q, prev_key) < value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) < value)) if prev_key != "$not" \
else (q[last_prev_key] >= value)
elif key == u'$ne':
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value))if prev_key != "$not" \
else (q[last_prev_key] == value)
elif key == u'$not':
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value)) \
if prev_key != "$not" else (q[last_prev_key] >= value)
else:
# let the value's condition be parsed below
pass
elif key == u'$regex':
value = value.replace('\\\\\\', '|||')
value = value.replace('\\\\', '|||')
regex = value.replace('\\', '')
regex = regex.replace('|||', '\\')
currCond = (where(prev_key).matches(regex))
conditions = currCond if not conditions else (conditions & currCond)
elif key in ['$and', '$or', '$in', '$all']:
pass
else:
# don't want to use the previous key if this is a secondary key
# (fixes multiple item query that includes $ codes)
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
(Q(q, key) == value) | (Q(q, key).any([value]))
) if not conditions else (conditions & ((Q(q, key) == value) | (Q(q, key).any([value]))))
prev_key = key
logger.debug(u'c: {}'.format(conditions))
if isinstance(value, dict):
# yield from self.parse_condition(value, key)
for parse_condition in self.parse_condition(value, key, prev_key):
yield parse_condition
elif isinstance(value, list):
if key == '$and':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions & parse_condition
)
yield grouped_conditions
elif key == '$or':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$in':
# use `any` to find with list, before comparing to single string
grouped_conditions = Q(q, prev_key).any(value)
for val in value:
for parse_condition in self.parse_condition({prev_key : val}):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$all':
yield Q(q, prev_key).all(value)
else:
yield Q(q, prev_key).any([value])
else:
yield conditions
def update(self, query, doc, *args, **kwargs):
"""BAckwards compatibility with update"""
if isinstance(doc, list):
return [
self.update_one(query, item, *args, **kwargs)
for item in doc
]
else:
return self.update_one(query, doc, *args, **kwargs)
def update_one(self, query, doc):
"""
Updates one element of the collection
:param query: dictionary representing the mongo query
:param doc: dictionary representing the item to be updated
:return: UpdateResult
"""
if self.table is None:
self.build_table()
if u"$set" in doc:
doc = doc[u"$set"]
allcond = self.parse_query(query)
try:
result = self.table.update(doc, allcond)
except:
# TODO: check table.update result
# check what pymongo does in that case
result = None
return UpdateResult(raw_result=result)
def find(self, filter=None, sort=None, skip=None, limit=None,
*args, **kwargs):
"""
Finds all matching results
:param query: dictionary representing the mongo query
:return: cursor containing the search results
"""
if self.table is None:
self.build_table()
if filter is None:
result = self.table.all()
else:
allcond = self.parse_query(filter)
try:
result = self.table.search(allcond)
except (AttributeError, TypeError):
result = []
result = TinyMongoCursor(
result,
sort=sort,
skip=skip,
limit=limit
)
return result
def find_one(self, filter=None):
"""
Finds one matching query element
:param query: dictionary representing the mongo query
:return: the resulting document (if found)
"""
if self.table is None:
self.build_table()
allcond = self.parse_query(filter)
return self.table.get(allcond)
def remove(self, spec_or_id, multi=True, *args, **kwargs):
"""Backwards compatibility with remove"""
if multi:
return self.delete_many(spec_or_id)
return self.delete_one(spec_or_id)
def delete_one(self, query):
"""
Deletes one document from the collection
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
item = self.find_one(query)
result = self.table.remove(where(u'_id') == item[u'_id'])
return DeleteResult(raw_result=result)
def delete_many(self, query):
"""
Removes all items matching the mongo query
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
items = self.find(query)
result = [
self.table.remove(where(u'_id') == item[u'_id'])
for item in items
]
if query == {}:
# need to reset TinyDB's index for docs order consistency
self.table._last_id = 0
return DeleteResult(raw_result=result)
|
schapman1974/tinymongo | tinymongo/tinymongo.py | TinyMongoCollection.parse_query | python | def parse_query(self, query):
logger.debug(u'query to parse2: {}'.format(query))
# this should find all records
if query == {} or query is None:
return Query()._id != u'-1' # noqa
q = None
# find the final result of the generator
for c in self.parse_condition(query):
if q is None:
q = c
else:
q = q & c
logger.debug(u'new query item2: {}'.format(q))
return q | Creates a tinydb Query() object from the query dict
:param query: object containing the dictionary representation of the
query
:return: composite Query() | train | https://github.com/schapman1974/tinymongo/blob/993048059dc0aa789d879b69feb79a0f237a60b3/tinymongo/tinymongo.py#L250-L274 | [
"def parse_condition(self, query, prev_key=None, last_prev_key=None):\n \"\"\"\n Creates a recursive generator for parsing some types of Query()\n conditions\n\n :param query: Query object\n :param prev_key: The key at the next-higher level\n :return: generator object, the last of which will be the complete\n Query() object containing all conditions\n \"\"\"\n # use this to determine gt/lt/eq on prev_query\n logger.debug(u'query: {} prev_query: {}'.format(query, prev_key))\n\n q = Query()\n conditions = None\n\n # deal with the {'name': value} case by injecting a previous key\n if not prev_key:\n temp_query = copy.deepcopy(query)\n k, v = temp_query.popitem()\n prev_key = k\n\n # deal with the conditions\n for key, value in query.items():\n logger.debug(u'conditions: {} {}'.format(key, value))\n\n if key == u'$gte':\n conditions = (\n Q(q, prev_key) >= value\n ) if not conditions and prev_key != \"$not\" \\\n else (conditions & (Q(q, prev_key) >= value)) if prev_key != \"$not\" \\\n else (q[last_prev_key] < value)\n elif key == u'$gt':\n conditions = (\n Q(q, prev_key) > value\n ) if not conditions and prev_key != \"$not\" \\\n else (conditions & (Q(q, prev_key) > value)) if prev_key != \"$not\" \\\n else (q[last_prev_key] <= value)\n elif key == u'$lte':\n conditions = (\n Q(q, prev_key) <= value\n ) if not conditions and prev_key != \"$not\" \\\n else (conditions & (Q(q, prev_key) <= value)) if prev_key != \"$not\" \\\n else (q[last_prev_key] > value)\n elif key == u'$lt':\n conditions = (\n Q(q, prev_key) < value\n ) if not conditions and prev_key != \"$not\" \\\n else (conditions & (Q(q, prev_key) < value)) if prev_key != \"$not\" \\\n else (q[last_prev_key] >= value)\n elif key == u'$ne':\n conditions = (\n Q(q, prev_key) != value\n ) if not conditions and prev_key != \"$not\" \\\n else (conditions & (Q(q, prev_key) != value))if prev_key != \"$not\" \\\n else (q[last_prev_key] == value)\n elif key == u'$not':\n if not isinstance(value, dict) and not isinstance(value, list):\n conditions = (\n Q(q, prev_key) != value\n ) if not conditions and prev_key != \"$not\" \\\n else (conditions & (Q(q, prev_key) != value)) \\\n if prev_key != \"$not\" else (q[last_prev_key] >= value)\n else:\n # let the value's condition be parsed below\n pass\n elif key == u'$regex':\n value = value.replace('\\\\\\\\\\\\', '|||')\n value = value.replace('\\\\\\\\', '|||')\n regex = value.replace('\\\\', '')\n regex = regex.replace('|||', '\\\\')\n currCond = (where(prev_key).matches(regex))\n conditions = currCond if not conditions else (conditions & currCond)\n elif key in ['$and', '$or', '$in', '$all']:\n pass\n else:\n\n\n # don't want to use the previous key if this is a secondary key\n # (fixes multiple item query that includes $ codes)\n if not isinstance(value, dict) and not isinstance(value, list):\n conditions = (\n (Q(q, key) == value) | (Q(q, key).any([value]))\n ) if not conditions else (conditions & ((Q(q, key) == value) | (Q(q, key).any([value]))))\n prev_key = key\n\n logger.debug(u'c: {}'.format(conditions))\n if isinstance(value, dict):\n # yield from self.parse_condition(value, key)\n for parse_condition in self.parse_condition(value, key, prev_key):\n yield parse_condition\n elif isinstance(value, list):\n if key == '$and':\n grouped_conditions = None\n for spec in value:\n for parse_condition in self.parse_condition(spec):\n grouped_conditions = (\n parse_condition\n if not grouped_conditions\n else grouped_conditions & parse_condition\n )\n yield grouped_conditions\n elif key == '$or':\n grouped_conditions = None\n for spec in value:\n for parse_condition in self.parse_condition(spec):\n grouped_conditions = (\n parse_condition\n if not grouped_conditions\n else grouped_conditions | parse_condition\n )\n yield grouped_conditions\n elif key == '$in':\n # use `any` to find with list, before comparing to single string\n grouped_conditions = Q(q, prev_key).any(value)\n for val in value:\n for parse_condition in self.parse_condition({prev_key : val}):\n grouped_conditions = (\n parse_condition\n if not grouped_conditions\n else grouped_conditions | parse_condition\n )\n yield grouped_conditions\n elif key == '$all':\n yield Q(q, prev_key).all(value)\n else:\n yield Q(q, prev_key).any([value])\n else:\n yield conditions\n"
] | class TinyMongoCollection(object):
"""
This class represents a collection and all of the operations that are
commonly performed on a collection
"""
def __init__(self, table, parent=None):
"""
Initilialize the collection
:param table: the table name
:param parent: the parent db name
"""
self.tablename = table
self.table = None
self.parent = parent
def __repr__(self):
"""Return collection name"""
return self.tablename
def __getattr__(self, name):
"""
If attr is not found return self
:param name:
:return:
"""
# if self.table is None:
# self.tablename += u"." + name
if self.table is None:
self.build_table()
return self
def build_table(self):
"""
Builds a new tinydb table at the parent database
:return:
"""
self.table = self.parent.tinydb.table(self.tablename)
def count(self):
"""
Counts the documents in the collection.
:return: Integer representing the number of documents in the collection.
"""
return self.find().count()
def drop(self, **kwargs):
"""
Removes a collection from the database.
**kwargs only because of the optional "writeConcern" field, but does nothing in the TinyDB database.
:return: Returns True when successfully drops a collection. Returns False when collection to drop does not
exist.
"""
if self.table:
self.parent.tinydb.purge_table(self.tablename)
return True
else:
return False
def insert(self, docs, *args, **kwargs):
"""Backwards compatibility with insert"""
if isinstance(docs, list):
return self.insert_many(docs, *args, **kwargs)
else:
return self.insert_one(docs, *args, **kwargs)
def insert_one(self, doc, *args, **kwargs):
"""
Inserts one document into the collection
If contains '_id' key it is used, else it is generated.
:param doc: the document
:return: InsertOneResult
"""
if self.table is None:
self.build_table()
if not isinstance(doc, dict):
raise ValueError(u'"doc" must be a dict')
_id = doc[u'_id'] = doc.get('_id') or generate_id()
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is True:
# insert doc without validation of duplicated `_id`
eid = self.table.insert(doc)
else:
existing = self.find_one({'_id': _id})
if existing is None:
eid = self.table.insert(doc)
else:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
return InsertOneResult(eid=eid, inserted_id=_id)
def insert_many(self, docs, *args, **kwargs):
"""
Inserts several documents into the collection
:param docs: a list of documents
:return: InsertManyResult
"""
if self.table is None:
self.build_table()
if not isinstance(docs, list):
raise ValueError(u'"insert_many" requires a list input')
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is not True:
# get all _id in once, to reduce I/O. (without projection)
existing = [doc['_id'] for doc in self.find({})]
_ids = list()
for doc in docs:
_id = doc[u'_id'] = doc.get('_id') or generate_id()
if bypass_document_validation is not True:
if _id in existing:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
existing.append(_id)
_ids.append(_id)
results = self.table.insert_multiple(docs)
return InsertManyResult(
eids=[eid for eid in results],
inserted_ids=[inserted_id for inserted_id in _ids]
)
def parse_condition(self, query, prev_key=None, last_prev_key=None):
"""
Creates a recursive generator for parsing some types of Query()
conditions
:param query: Query object
:param prev_key: The key at the next-higher level
:return: generator object, the last of which will be the complete
Query() object containing all conditions
"""
# use this to determine gt/lt/eq on prev_query
logger.debug(u'query: {} prev_query: {}'.format(query, prev_key))
q = Query()
conditions = None
# deal with the {'name': value} case by injecting a previous key
if not prev_key:
temp_query = copy.deepcopy(query)
k, v = temp_query.popitem()
prev_key = k
# deal with the conditions
for key, value in query.items():
logger.debug(u'conditions: {} {}'.format(key, value))
if key == u'$gte':
conditions = (
Q(q, prev_key) >= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) >= value)) if prev_key != "$not" \
else (q[last_prev_key] < value)
elif key == u'$gt':
conditions = (
Q(q, prev_key) > value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) > value)) if prev_key != "$not" \
else (q[last_prev_key] <= value)
elif key == u'$lte':
conditions = (
Q(q, prev_key) <= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) <= value)) if prev_key != "$not" \
else (q[last_prev_key] > value)
elif key == u'$lt':
conditions = (
Q(q, prev_key) < value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) < value)) if prev_key != "$not" \
else (q[last_prev_key] >= value)
elif key == u'$ne':
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value))if prev_key != "$not" \
else (q[last_prev_key] == value)
elif key == u'$not':
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value)) \
if prev_key != "$not" else (q[last_prev_key] >= value)
else:
# let the value's condition be parsed below
pass
elif key == u'$regex':
value = value.replace('\\\\\\', '|||')
value = value.replace('\\\\', '|||')
regex = value.replace('\\', '')
regex = regex.replace('|||', '\\')
currCond = (where(prev_key).matches(regex))
conditions = currCond if not conditions else (conditions & currCond)
elif key in ['$and', '$or', '$in', '$all']:
pass
else:
# don't want to use the previous key if this is a secondary key
# (fixes multiple item query that includes $ codes)
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
(Q(q, key) == value) | (Q(q, key).any([value]))
) if not conditions else (conditions & ((Q(q, key) == value) | (Q(q, key).any([value]))))
prev_key = key
logger.debug(u'c: {}'.format(conditions))
if isinstance(value, dict):
# yield from self.parse_condition(value, key)
for parse_condition in self.parse_condition(value, key, prev_key):
yield parse_condition
elif isinstance(value, list):
if key == '$and':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions & parse_condition
)
yield grouped_conditions
elif key == '$or':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$in':
# use `any` to find with list, before comparing to single string
grouped_conditions = Q(q, prev_key).any(value)
for val in value:
for parse_condition in self.parse_condition({prev_key : val}):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$all':
yield Q(q, prev_key).all(value)
else:
yield Q(q, prev_key).any([value])
else:
yield conditions
def update(self, query, doc, *args, **kwargs):
"""BAckwards compatibility with update"""
if isinstance(doc, list):
return [
self.update_one(query, item, *args, **kwargs)
for item in doc
]
else:
return self.update_one(query, doc, *args, **kwargs)
def update_one(self, query, doc):
"""
Updates one element of the collection
:param query: dictionary representing the mongo query
:param doc: dictionary representing the item to be updated
:return: UpdateResult
"""
if self.table is None:
self.build_table()
if u"$set" in doc:
doc = doc[u"$set"]
allcond = self.parse_query(query)
try:
result = self.table.update(doc, allcond)
except:
# TODO: check table.update result
# check what pymongo does in that case
result = None
return UpdateResult(raw_result=result)
def find(self, filter=None, sort=None, skip=None, limit=None,
*args, **kwargs):
"""
Finds all matching results
:param query: dictionary representing the mongo query
:return: cursor containing the search results
"""
if self.table is None:
self.build_table()
if filter is None:
result = self.table.all()
else:
allcond = self.parse_query(filter)
try:
result = self.table.search(allcond)
except (AttributeError, TypeError):
result = []
result = TinyMongoCursor(
result,
sort=sort,
skip=skip,
limit=limit
)
return result
def find_one(self, filter=None):
"""
Finds one matching query element
:param query: dictionary representing the mongo query
:return: the resulting document (if found)
"""
if self.table is None:
self.build_table()
allcond = self.parse_query(filter)
return self.table.get(allcond)
def remove(self, spec_or_id, multi=True, *args, **kwargs):
"""Backwards compatibility with remove"""
if multi:
return self.delete_many(spec_or_id)
return self.delete_one(spec_or_id)
def delete_one(self, query):
"""
Deletes one document from the collection
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
item = self.find_one(query)
result = self.table.remove(where(u'_id') == item[u'_id'])
return DeleteResult(raw_result=result)
def delete_many(self, query):
"""
Removes all items matching the mongo query
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
items = self.find(query)
result = [
self.table.remove(where(u'_id') == item[u'_id'])
for item in items
]
if query == {}:
# need to reset TinyDB's index for docs order consistency
self.table._last_id = 0
return DeleteResult(raw_result=result)
|
schapman1974/tinymongo | tinymongo/tinymongo.py | TinyMongoCollection.parse_condition | python | def parse_condition(self, query, prev_key=None, last_prev_key=None):
# use this to determine gt/lt/eq on prev_query
logger.debug(u'query: {} prev_query: {}'.format(query, prev_key))
q = Query()
conditions = None
# deal with the {'name': value} case by injecting a previous key
if not prev_key:
temp_query = copy.deepcopy(query)
k, v = temp_query.popitem()
prev_key = k
# deal with the conditions
for key, value in query.items():
logger.debug(u'conditions: {} {}'.format(key, value))
if key == u'$gte':
conditions = (
Q(q, prev_key) >= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) >= value)) if prev_key != "$not" \
else (q[last_prev_key] < value)
elif key == u'$gt':
conditions = (
Q(q, prev_key) > value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) > value)) if prev_key != "$not" \
else (q[last_prev_key] <= value)
elif key == u'$lte':
conditions = (
Q(q, prev_key) <= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) <= value)) if prev_key != "$not" \
else (q[last_prev_key] > value)
elif key == u'$lt':
conditions = (
Q(q, prev_key) < value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) < value)) if prev_key != "$not" \
else (q[last_prev_key] >= value)
elif key == u'$ne':
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value))if prev_key != "$not" \
else (q[last_prev_key] == value)
elif key == u'$not':
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value)) \
if prev_key != "$not" else (q[last_prev_key] >= value)
else:
# let the value's condition be parsed below
pass
elif key == u'$regex':
value = value.replace('\\\\\\', '|||')
value = value.replace('\\\\', '|||')
regex = value.replace('\\', '')
regex = regex.replace('|||', '\\')
currCond = (where(prev_key).matches(regex))
conditions = currCond if not conditions else (conditions & currCond)
elif key in ['$and', '$or', '$in', '$all']:
pass
else:
# don't want to use the previous key if this is a secondary key
# (fixes multiple item query that includes $ codes)
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
(Q(q, key) == value) | (Q(q, key).any([value]))
) if not conditions else (conditions & ((Q(q, key) == value) | (Q(q, key).any([value]))))
prev_key = key
logger.debug(u'c: {}'.format(conditions))
if isinstance(value, dict):
# yield from self.parse_condition(value, key)
for parse_condition in self.parse_condition(value, key, prev_key):
yield parse_condition
elif isinstance(value, list):
if key == '$and':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions & parse_condition
)
yield grouped_conditions
elif key == '$or':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$in':
# use `any` to find with list, before comparing to single string
grouped_conditions = Q(q, prev_key).any(value)
for val in value:
for parse_condition in self.parse_condition({prev_key : val}):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$all':
yield Q(q, prev_key).all(value)
else:
yield Q(q, prev_key).any([value])
else:
yield conditions | Creates a recursive generator for parsing some types of Query()
conditions
:param query: Query object
:param prev_key: The key at the next-higher level
:return: generator object, the last of which will be the complete
Query() object containing all conditions | train | https://github.com/schapman1974/tinymongo/blob/993048059dc0aa789d879b69feb79a0f237a60b3/tinymongo/tinymongo.py#L276-L404 | [
"def Q(query, key):\n return reduce(lambda partial_query, field: partial_query[field], key.split('.'), query)\n",
"def parse_condition(self, query, prev_key=None, last_prev_key=None):\n \"\"\"\n Creates a recursive generator for parsing some types of Query()\n conditions\n\n :param query: Query object\n :param prev_key: The key at the next-higher level\n :return: generator object, the last of which will be the complete\n Query() object containing all conditions\n \"\"\"\n # use this to determine gt/lt/eq on prev_query\n logger.debug(u'query: {} prev_query: {}'.format(query, prev_key))\n\n q = Query()\n conditions = None\n\n # deal with the {'name': value} case by injecting a previous key\n if not prev_key:\n temp_query = copy.deepcopy(query)\n k, v = temp_query.popitem()\n prev_key = k\n\n # deal with the conditions\n for key, value in query.items():\n logger.debug(u'conditions: {} {}'.format(key, value))\n\n if key == u'$gte':\n conditions = (\n Q(q, prev_key) >= value\n ) if not conditions and prev_key != \"$not\" \\\n else (conditions & (Q(q, prev_key) >= value)) if prev_key != \"$not\" \\\n else (q[last_prev_key] < value)\n elif key == u'$gt':\n conditions = (\n Q(q, prev_key) > value\n ) if not conditions and prev_key != \"$not\" \\\n else (conditions & (Q(q, prev_key) > value)) if prev_key != \"$not\" \\\n else (q[last_prev_key] <= value)\n elif key == u'$lte':\n conditions = (\n Q(q, prev_key) <= value\n ) if not conditions and prev_key != \"$not\" \\\n else (conditions & (Q(q, prev_key) <= value)) if prev_key != \"$not\" \\\n else (q[last_prev_key] > value)\n elif key == u'$lt':\n conditions = (\n Q(q, prev_key) < value\n ) if not conditions and prev_key != \"$not\" \\\n else (conditions & (Q(q, prev_key) < value)) if prev_key != \"$not\" \\\n else (q[last_prev_key] >= value)\n elif key == u'$ne':\n conditions = (\n Q(q, prev_key) != value\n ) if not conditions and prev_key != \"$not\" \\\n else (conditions & (Q(q, prev_key) != value))if prev_key != \"$not\" \\\n else (q[last_prev_key] == value)\n elif key == u'$not':\n if not isinstance(value, dict) and not isinstance(value, list):\n conditions = (\n Q(q, prev_key) != value\n ) if not conditions and prev_key != \"$not\" \\\n else (conditions & (Q(q, prev_key) != value)) \\\n if prev_key != \"$not\" else (q[last_prev_key] >= value)\n else:\n # let the value's condition be parsed below\n pass\n elif key == u'$regex':\n value = value.replace('\\\\\\\\\\\\', '|||')\n value = value.replace('\\\\\\\\', '|||')\n regex = value.replace('\\\\', '')\n regex = regex.replace('|||', '\\\\')\n currCond = (where(prev_key).matches(regex))\n conditions = currCond if not conditions else (conditions & currCond)\n elif key in ['$and', '$or', '$in', '$all']:\n pass\n else:\n\n\n # don't want to use the previous key if this is a secondary key\n # (fixes multiple item query that includes $ codes)\n if not isinstance(value, dict) and not isinstance(value, list):\n conditions = (\n (Q(q, key) == value) | (Q(q, key).any([value]))\n ) if not conditions else (conditions & ((Q(q, key) == value) | (Q(q, key).any([value]))))\n prev_key = key\n\n logger.debug(u'c: {}'.format(conditions))\n if isinstance(value, dict):\n # yield from self.parse_condition(value, key)\n for parse_condition in self.parse_condition(value, key, prev_key):\n yield parse_condition\n elif isinstance(value, list):\n if key == '$and':\n grouped_conditions = None\n for spec in value:\n for parse_condition in self.parse_condition(spec):\n grouped_conditions = (\n parse_condition\n if not grouped_conditions\n else grouped_conditions & parse_condition\n )\n yield grouped_conditions\n elif key == '$or':\n grouped_conditions = None\n for spec in value:\n for parse_condition in self.parse_condition(spec):\n grouped_conditions = (\n parse_condition\n if not grouped_conditions\n else grouped_conditions | parse_condition\n )\n yield grouped_conditions\n elif key == '$in':\n # use `any` to find with list, before comparing to single string\n grouped_conditions = Q(q, prev_key).any(value)\n for val in value:\n for parse_condition in self.parse_condition({prev_key : val}):\n grouped_conditions = (\n parse_condition\n if not grouped_conditions\n else grouped_conditions | parse_condition\n )\n yield grouped_conditions\n elif key == '$all':\n yield Q(q, prev_key).all(value)\n else:\n yield Q(q, prev_key).any([value])\n else:\n yield conditions\n"
] | class TinyMongoCollection(object):
"""
This class represents a collection and all of the operations that are
commonly performed on a collection
"""
def __init__(self, table, parent=None):
"""
Initilialize the collection
:param table: the table name
:param parent: the parent db name
"""
self.tablename = table
self.table = None
self.parent = parent
def __repr__(self):
"""Return collection name"""
return self.tablename
def __getattr__(self, name):
"""
If attr is not found return self
:param name:
:return:
"""
# if self.table is None:
# self.tablename += u"." + name
if self.table is None:
self.build_table()
return self
def build_table(self):
"""
Builds a new tinydb table at the parent database
:return:
"""
self.table = self.parent.tinydb.table(self.tablename)
def count(self):
"""
Counts the documents in the collection.
:return: Integer representing the number of documents in the collection.
"""
return self.find().count()
def drop(self, **kwargs):
"""
Removes a collection from the database.
**kwargs only because of the optional "writeConcern" field, but does nothing in the TinyDB database.
:return: Returns True when successfully drops a collection. Returns False when collection to drop does not
exist.
"""
if self.table:
self.parent.tinydb.purge_table(self.tablename)
return True
else:
return False
def insert(self, docs, *args, **kwargs):
"""Backwards compatibility with insert"""
if isinstance(docs, list):
return self.insert_many(docs, *args, **kwargs)
else:
return self.insert_one(docs, *args, **kwargs)
def insert_one(self, doc, *args, **kwargs):
"""
Inserts one document into the collection
If contains '_id' key it is used, else it is generated.
:param doc: the document
:return: InsertOneResult
"""
if self.table is None:
self.build_table()
if not isinstance(doc, dict):
raise ValueError(u'"doc" must be a dict')
_id = doc[u'_id'] = doc.get('_id') or generate_id()
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is True:
# insert doc without validation of duplicated `_id`
eid = self.table.insert(doc)
else:
existing = self.find_one({'_id': _id})
if existing is None:
eid = self.table.insert(doc)
else:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
return InsertOneResult(eid=eid, inserted_id=_id)
def insert_many(self, docs, *args, **kwargs):
"""
Inserts several documents into the collection
:param docs: a list of documents
:return: InsertManyResult
"""
if self.table is None:
self.build_table()
if not isinstance(docs, list):
raise ValueError(u'"insert_many" requires a list input')
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is not True:
# get all _id in once, to reduce I/O. (without projection)
existing = [doc['_id'] for doc in self.find({})]
_ids = list()
for doc in docs:
_id = doc[u'_id'] = doc.get('_id') or generate_id()
if bypass_document_validation is not True:
if _id in existing:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
existing.append(_id)
_ids.append(_id)
results = self.table.insert_multiple(docs)
return InsertManyResult(
eids=[eid for eid in results],
inserted_ids=[inserted_id for inserted_id in _ids]
)
def parse_query(self, query):
"""
Creates a tinydb Query() object from the query dict
:param query: object containing the dictionary representation of the
query
:return: composite Query()
"""
logger.debug(u'query to parse2: {}'.format(query))
# this should find all records
if query == {} or query is None:
return Query()._id != u'-1' # noqa
q = None
# find the final result of the generator
for c in self.parse_condition(query):
if q is None:
q = c
else:
q = q & c
logger.debug(u'new query item2: {}'.format(q))
return q
def update(self, query, doc, *args, **kwargs):
"""BAckwards compatibility with update"""
if isinstance(doc, list):
return [
self.update_one(query, item, *args, **kwargs)
for item in doc
]
else:
return self.update_one(query, doc, *args, **kwargs)
def update_one(self, query, doc):
"""
Updates one element of the collection
:param query: dictionary representing the mongo query
:param doc: dictionary representing the item to be updated
:return: UpdateResult
"""
if self.table is None:
self.build_table()
if u"$set" in doc:
doc = doc[u"$set"]
allcond = self.parse_query(query)
try:
result = self.table.update(doc, allcond)
except:
# TODO: check table.update result
# check what pymongo does in that case
result = None
return UpdateResult(raw_result=result)
def find(self, filter=None, sort=None, skip=None, limit=None,
*args, **kwargs):
"""
Finds all matching results
:param query: dictionary representing the mongo query
:return: cursor containing the search results
"""
if self.table is None:
self.build_table()
if filter is None:
result = self.table.all()
else:
allcond = self.parse_query(filter)
try:
result = self.table.search(allcond)
except (AttributeError, TypeError):
result = []
result = TinyMongoCursor(
result,
sort=sort,
skip=skip,
limit=limit
)
return result
def find_one(self, filter=None):
"""
Finds one matching query element
:param query: dictionary representing the mongo query
:return: the resulting document (if found)
"""
if self.table is None:
self.build_table()
allcond = self.parse_query(filter)
return self.table.get(allcond)
def remove(self, spec_or_id, multi=True, *args, **kwargs):
"""Backwards compatibility with remove"""
if multi:
return self.delete_many(spec_or_id)
return self.delete_one(spec_or_id)
def delete_one(self, query):
"""
Deletes one document from the collection
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
item = self.find_one(query)
result = self.table.remove(where(u'_id') == item[u'_id'])
return DeleteResult(raw_result=result)
def delete_many(self, query):
"""
Removes all items matching the mongo query
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
items = self.find(query)
result = [
self.table.remove(where(u'_id') == item[u'_id'])
for item in items
]
if query == {}:
# need to reset TinyDB's index for docs order consistency
self.table._last_id = 0
return DeleteResult(raw_result=result)
|
schapman1974/tinymongo | tinymongo/tinymongo.py | TinyMongoCollection.update | python | def update(self, query, doc, *args, **kwargs):
if isinstance(doc, list):
return [
self.update_one(query, item, *args, **kwargs)
for item in doc
]
else:
return self.update_one(query, doc, *args, **kwargs) | BAckwards compatibility with update | train | https://github.com/schapman1974/tinymongo/blob/993048059dc0aa789d879b69feb79a0f237a60b3/tinymongo/tinymongo.py#L406-L414 | [
"def update_one(self, query, doc):\n \"\"\"\n Updates one element of the collection\n\n :param query: dictionary representing the mongo query\n :param doc: dictionary representing the item to be updated\n :return: UpdateResult\n \"\"\"\n if self.table is None:\n self.build_table()\n\n if u\"$set\" in doc:\n doc = doc[u\"$set\"]\n\n allcond = self.parse_query(query)\n\n try:\n result = self.table.update(doc, allcond)\n except:\n # TODO: check table.update result\n # check what pymongo does in that case\n result = None\n\n return UpdateResult(raw_result=result)\n"
] | class TinyMongoCollection(object):
"""
This class represents a collection and all of the operations that are
commonly performed on a collection
"""
def __init__(self, table, parent=None):
"""
Initilialize the collection
:param table: the table name
:param parent: the parent db name
"""
self.tablename = table
self.table = None
self.parent = parent
def __repr__(self):
"""Return collection name"""
return self.tablename
def __getattr__(self, name):
"""
If attr is not found return self
:param name:
:return:
"""
# if self.table is None:
# self.tablename += u"." + name
if self.table is None:
self.build_table()
return self
def build_table(self):
"""
Builds a new tinydb table at the parent database
:return:
"""
self.table = self.parent.tinydb.table(self.tablename)
def count(self):
"""
Counts the documents in the collection.
:return: Integer representing the number of documents in the collection.
"""
return self.find().count()
def drop(self, **kwargs):
"""
Removes a collection from the database.
**kwargs only because of the optional "writeConcern" field, but does nothing in the TinyDB database.
:return: Returns True when successfully drops a collection. Returns False when collection to drop does not
exist.
"""
if self.table:
self.parent.tinydb.purge_table(self.tablename)
return True
else:
return False
def insert(self, docs, *args, **kwargs):
"""Backwards compatibility with insert"""
if isinstance(docs, list):
return self.insert_many(docs, *args, **kwargs)
else:
return self.insert_one(docs, *args, **kwargs)
def insert_one(self, doc, *args, **kwargs):
"""
Inserts one document into the collection
If contains '_id' key it is used, else it is generated.
:param doc: the document
:return: InsertOneResult
"""
if self.table is None:
self.build_table()
if not isinstance(doc, dict):
raise ValueError(u'"doc" must be a dict')
_id = doc[u'_id'] = doc.get('_id') or generate_id()
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is True:
# insert doc without validation of duplicated `_id`
eid = self.table.insert(doc)
else:
existing = self.find_one({'_id': _id})
if existing is None:
eid = self.table.insert(doc)
else:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
return InsertOneResult(eid=eid, inserted_id=_id)
def insert_many(self, docs, *args, **kwargs):
"""
Inserts several documents into the collection
:param docs: a list of documents
:return: InsertManyResult
"""
if self.table is None:
self.build_table()
if not isinstance(docs, list):
raise ValueError(u'"insert_many" requires a list input')
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is not True:
# get all _id in once, to reduce I/O. (without projection)
existing = [doc['_id'] for doc in self.find({})]
_ids = list()
for doc in docs:
_id = doc[u'_id'] = doc.get('_id') or generate_id()
if bypass_document_validation is not True:
if _id in existing:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
existing.append(_id)
_ids.append(_id)
results = self.table.insert_multiple(docs)
return InsertManyResult(
eids=[eid for eid in results],
inserted_ids=[inserted_id for inserted_id in _ids]
)
def parse_query(self, query):
"""
Creates a tinydb Query() object from the query dict
:param query: object containing the dictionary representation of the
query
:return: composite Query()
"""
logger.debug(u'query to parse2: {}'.format(query))
# this should find all records
if query == {} or query is None:
return Query()._id != u'-1' # noqa
q = None
# find the final result of the generator
for c in self.parse_condition(query):
if q is None:
q = c
else:
q = q & c
logger.debug(u'new query item2: {}'.format(q))
return q
def parse_condition(self, query, prev_key=None, last_prev_key=None):
"""
Creates a recursive generator for parsing some types of Query()
conditions
:param query: Query object
:param prev_key: The key at the next-higher level
:return: generator object, the last of which will be the complete
Query() object containing all conditions
"""
# use this to determine gt/lt/eq on prev_query
logger.debug(u'query: {} prev_query: {}'.format(query, prev_key))
q = Query()
conditions = None
# deal with the {'name': value} case by injecting a previous key
if not prev_key:
temp_query = copy.deepcopy(query)
k, v = temp_query.popitem()
prev_key = k
# deal with the conditions
for key, value in query.items():
logger.debug(u'conditions: {} {}'.format(key, value))
if key == u'$gte':
conditions = (
Q(q, prev_key) >= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) >= value)) if prev_key != "$not" \
else (q[last_prev_key] < value)
elif key == u'$gt':
conditions = (
Q(q, prev_key) > value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) > value)) if prev_key != "$not" \
else (q[last_prev_key] <= value)
elif key == u'$lte':
conditions = (
Q(q, prev_key) <= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) <= value)) if prev_key != "$not" \
else (q[last_prev_key] > value)
elif key == u'$lt':
conditions = (
Q(q, prev_key) < value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) < value)) if prev_key != "$not" \
else (q[last_prev_key] >= value)
elif key == u'$ne':
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value))if prev_key != "$not" \
else (q[last_prev_key] == value)
elif key == u'$not':
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value)) \
if prev_key != "$not" else (q[last_prev_key] >= value)
else:
# let the value's condition be parsed below
pass
elif key == u'$regex':
value = value.replace('\\\\\\', '|||')
value = value.replace('\\\\', '|||')
regex = value.replace('\\', '')
regex = regex.replace('|||', '\\')
currCond = (where(prev_key).matches(regex))
conditions = currCond if not conditions else (conditions & currCond)
elif key in ['$and', '$or', '$in', '$all']:
pass
else:
# don't want to use the previous key if this is a secondary key
# (fixes multiple item query that includes $ codes)
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
(Q(q, key) == value) | (Q(q, key).any([value]))
) if not conditions else (conditions & ((Q(q, key) == value) | (Q(q, key).any([value]))))
prev_key = key
logger.debug(u'c: {}'.format(conditions))
if isinstance(value, dict):
# yield from self.parse_condition(value, key)
for parse_condition in self.parse_condition(value, key, prev_key):
yield parse_condition
elif isinstance(value, list):
if key == '$and':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions & parse_condition
)
yield grouped_conditions
elif key == '$or':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$in':
# use `any` to find with list, before comparing to single string
grouped_conditions = Q(q, prev_key).any(value)
for val in value:
for parse_condition in self.parse_condition({prev_key : val}):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$all':
yield Q(q, prev_key).all(value)
else:
yield Q(q, prev_key).any([value])
else:
yield conditions
def update_one(self, query, doc):
"""
Updates one element of the collection
:param query: dictionary representing the mongo query
:param doc: dictionary representing the item to be updated
:return: UpdateResult
"""
if self.table is None:
self.build_table()
if u"$set" in doc:
doc = doc[u"$set"]
allcond = self.parse_query(query)
try:
result = self.table.update(doc, allcond)
except:
# TODO: check table.update result
# check what pymongo does in that case
result = None
return UpdateResult(raw_result=result)
def find(self, filter=None, sort=None, skip=None, limit=None,
*args, **kwargs):
"""
Finds all matching results
:param query: dictionary representing the mongo query
:return: cursor containing the search results
"""
if self.table is None:
self.build_table()
if filter is None:
result = self.table.all()
else:
allcond = self.parse_query(filter)
try:
result = self.table.search(allcond)
except (AttributeError, TypeError):
result = []
result = TinyMongoCursor(
result,
sort=sort,
skip=skip,
limit=limit
)
return result
def find_one(self, filter=None):
"""
Finds one matching query element
:param query: dictionary representing the mongo query
:return: the resulting document (if found)
"""
if self.table is None:
self.build_table()
allcond = self.parse_query(filter)
return self.table.get(allcond)
def remove(self, spec_or_id, multi=True, *args, **kwargs):
"""Backwards compatibility with remove"""
if multi:
return self.delete_many(spec_or_id)
return self.delete_one(spec_or_id)
def delete_one(self, query):
"""
Deletes one document from the collection
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
item = self.find_one(query)
result = self.table.remove(where(u'_id') == item[u'_id'])
return DeleteResult(raw_result=result)
def delete_many(self, query):
"""
Removes all items matching the mongo query
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
items = self.find(query)
result = [
self.table.remove(where(u'_id') == item[u'_id'])
for item in items
]
if query == {}:
# need to reset TinyDB's index for docs order consistency
self.table._last_id = 0
return DeleteResult(raw_result=result)
|
schapman1974/tinymongo | tinymongo/tinymongo.py | TinyMongoCollection.update_one | python | def update_one(self, query, doc):
if self.table is None:
self.build_table()
if u"$set" in doc:
doc = doc[u"$set"]
allcond = self.parse_query(query)
try:
result = self.table.update(doc, allcond)
except:
# TODO: check table.update result
# check what pymongo does in that case
result = None
return UpdateResult(raw_result=result) | Updates one element of the collection
:param query: dictionary representing the mongo query
:param doc: dictionary representing the item to be updated
:return: UpdateResult | train | https://github.com/schapman1974/tinymongo/blob/993048059dc0aa789d879b69feb79a0f237a60b3/tinymongo/tinymongo.py#L416-L439 | [
"def build_table(self):\n \"\"\"\n Builds a new tinydb table at the parent database\n :return:\n \"\"\"\n self.table = self.parent.tinydb.table(self.tablename)\n",
"def parse_query(self, query):\n \"\"\"\n Creates a tinydb Query() object from the query dict\n\n :param query: object containing the dictionary representation of the\n query\n :return: composite Query()\n \"\"\"\n logger.debug(u'query to parse2: {}'.format(query))\n\n # this should find all records\n if query == {} or query is None:\n return Query()._id != u'-1' # noqa\n\n q = None\n # find the final result of the generator\n for c in self.parse_condition(query):\n if q is None:\n q = c\n else:\n q = q & c\n\n logger.debug(u'new query item2: {}'.format(q))\n\n return q\n"
] | class TinyMongoCollection(object):
"""
This class represents a collection and all of the operations that are
commonly performed on a collection
"""
def __init__(self, table, parent=None):
"""
Initilialize the collection
:param table: the table name
:param parent: the parent db name
"""
self.tablename = table
self.table = None
self.parent = parent
def __repr__(self):
"""Return collection name"""
return self.tablename
def __getattr__(self, name):
"""
If attr is not found return self
:param name:
:return:
"""
# if self.table is None:
# self.tablename += u"." + name
if self.table is None:
self.build_table()
return self
def build_table(self):
"""
Builds a new tinydb table at the parent database
:return:
"""
self.table = self.parent.tinydb.table(self.tablename)
def count(self):
"""
Counts the documents in the collection.
:return: Integer representing the number of documents in the collection.
"""
return self.find().count()
def drop(self, **kwargs):
"""
Removes a collection from the database.
**kwargs only because of the optional "writeConcern" field, but does nothing in the TinyDB database.
:return: Returns True when successfully drops a collection. Returns False when collection to drop does not
exist.
"""
if self.table:
self.parent.tinydb.purge_table(self.tablename)
return True
else:
return False
def insert(self, docs, *args, **kwargs):
"""Backwards compatibility with insert"""
if isinstance(docs, list):
return self.insert_many(docs, *args, **kwargs)
else:
return self.insert_one(docs, *args, **kwargs)
def insert_one(self, doc, *args, **kwargs):
"""
Inserts one document into the collection
If contains '_id' key it is used, else it is generated.
:param doc: the document
:return: InsertOneResult
"""
if self.table is None:
self.build_table()
if not isinstance(doc, dict):
raise ValueError(u'"doc" must be a dict')
_id = doc[u'_id'] = doc.get('_id') or generate_id()
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is True:
# insert doc without validation of duplicated `_id`
eid = self.table.insert(doc)
else:
existing = self.find_one({'_id': _id})
if existing is None:
eid = self.table.insert(doc)
else:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
return InsertOneResult(eid=eid, inserted_id=_id)
def insert_many(self, docs, *args, **kwargs):
"""
Inserts several documents into the collection
:param docs: a list of documents
:return: InsertManyResult
"""
if self.table is None:
self.build_table()
if not isinstance(docs, list):
raise ValueError(u'"insert_many" requires a list input')
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is not True:
# get all _id in once, to reduce I/O. (without projection)
existing = [doc['_id'] for doc in self.find({})]
_ids = list()
for doc in docs:
_id = doc[u'_id'] = doc.get('_id') or generate_id()
if bypass_document_validation is not True:
if _id in existing:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
existing.append(_id)
_ids.append(_id)
results = self.table.insert_multiple(docs)
return InsertManyResult(
eids=[eid for eid in results],
inserted_ids=[inserted_id for inserted_id in _ids]
)
def parse_query(self, query):
"""
Creates a tinydb Query() object from the query dict
:param query: object containing the dictionary representation of the
query
:return: composite Query()
"""
logger.debug(u'query to parse2: {}'.format(query))
# this should find all records
if query == {} or query is None:
return Query()._id != u'-1' # noqa
q = None
# find the final result of the generator
for c in self.parse_condition(query):
if q is None:
q = c
else:
q = q & c
logger.debug(u'new query item2: {}'.format(q))
return q
def parse_condition(self, query, prev_key=None, last_prev_key=None):
"""
Creates a recursive generator for parsing some types of Query()
conditions
:param query: Query object
:param prev_key: The key at the next-higher level
:return: generator object, the last of which will be the complete
Query() object containing all conditions
"""
# use this to determine gt/lt/eq on prev_query
logger.debug(u'query: {} prev_query: {}'.format(query, prev_key))
q = Query()
conditions = None
# deal with the {'name': value} case by injecting a previous key
if not prev_key:
temp_query = copy.deepcopy(query)
k, v = temp_query.popitem()
prev_key = k
# deal with the conditions
for key, value in query.items():
logger.debug(u'conditions: {} {}'.format(key, value))
if key == u'$gte':
conditions = (
Q(q, prev_key) >= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) >= value)) if prev_key != "$not" \
else (q[last_prev_key] < value)
elif key == u'$gt':
conditions = (
Q(q, prev_key) > value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) > value)) if prev_key != "$not" \
else (q[last_prev_key] <= value)
elif key == u'$lte':
conditions = (
Q(q, prev_key) <= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) <= value)) if prev_key != "$not" \
else (q[last_prev_key] > value)
elif key == u'$lt':
conditions = (
Q(q, prev_key) < value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) < value)) if prev_key != "$not" \
else (q[last_prev_key] >= value)
elif key == u'$ne':
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value))if prev_key != "$not" \
else (q[last_prev_key] == value)
elif key == u'$not':
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value)) \
if prev_key != "$not" else (q[last_prev_key] >= value)
else:
# let the value's condition be parsed below
pass
elif key == u'$regex':
value = value.replace('\\\\\\', '|||')
value = value.replace('\\\\', '|||')
regex = value.replace('\\', '')
regex = regex.replace('|||', '\\')
currCond = (where(prev_key).matches(regex))
conditions = currCond if not conditions else (conditions & currCond)
elif key in ['$and', '$or', '$in', '$all']:
pass
else:
# don't want to use the previous key if this is a secondary key
# (fixes multiple item query that includes $ codes)
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
(Q(q, key) == value) | (Q(q, key).any([value]))
) if not conditions else (conditions & ((Q(q, key) == value) | (Q(q, key).any([value]))))
prev_key = key
logger.debug(u'c: {}'.format(conditions))
if isinstance(value, dict):
# yield from self.parse_condition(value, key)
for parse_condition in self.parse_condition(value, key, prev_key):
yield parse_condition
elif isinstance(value, list):
if key == '$and':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions & parse_condition
)
yield grouped_conditions
elif key == '$or':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$in':
# use `any` to find with list, before comparing to single string
grouped_conditions = Q(q, prev_key).any(value)
for val in value:
for parse_condition in self.parse_condition({prev_key : val}):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$all':
yield Q(q, prev_key).all(value)
else:
yield Q(q, prev_key).any([value])
else:
yield conditions
def update(self, query, doc, *args, **kwargs):
"""BAckwards compatibility with update"""
if isinstance(doc, list):
return [
self.update_one(query, item, *args, **kwargs)
for item in doc
]
else:
return self.update_one(query, doc, *args, **kwargs)
def find(self, filter=None, sort=None, skip=None, limit=None,
*args, **kwargs):
"""
Finds all matching results
:param query: dictionary representing the mongo query
:return: cursor containing the search results
"""
if self.table is None:
self.build_table()
if filter is None:
result = self.table.all()
else:
allcond = self.parse_query(filter)
try:
result = self.table.search(allcond)
except (AttributeError, TypeError):
result = []
result = TinyMongoCursor(
result,
sort=sort,
skip=skip,
limit=limit
)
return result
def find_one(self, filter=None):
"""
Finds one matching query element
:param query: dictionary representing the mongo query
:return: the resulting document (if found)
"""
if self.table is None:
self.build_table()
allcond = self.parse_query(filter)
return self.table.get(allcond)
def remove(self, spec_or_id, multi=True, *args, **kwargs):
"""Backwards compatibility with remove"""
if multi:
return self.delete_many(spec_or_id)
return self.delete_one(spec_or_id)
def delete_one(self, query):
"""
Deletes one document from the collection
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
item = self.find_one(query)
result = self.table.remove(where(u'_id') == item[u'_id'])
return DeleteResult(raw_result=result)
def delete_many(self, query):
"""
Removes all items matching the mongo query
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
items = self.find(query)
result = [
self.table.remove(where(u'_id') == item[u'_id'])
for item in items
]
if query == {}:
# need to reset TinyDB's index for docs order consistency
self.table._last_id = 0
return DeleteResult(raw_result=result)
|
schapman1974/tinymongo | tinymongo/tinymongo.py | TinyMongoCollection.find | python | def find(self, filter=None, sort=None, skip=None, limit=None,
*args, **kwargs):
if self.table is None:
self.build_table()
if filter is None:
result = self.table.all()
else:
allcond = self.parse_query(filter)
try:
result = self.table.search(allcond)
except (AttributeError, TypeError):
result = []
result = TinyMongoCursor(
result,
sort=sort,
skip=skip,
limit=limit
)
return result | Finds all matching results
:param query: dictionary representing the mongo query
:return: cursor containing the search results | train | https://github.com/schapman1974/tinymongo/blob/993048059dc0aa789d879b69feb79a0f237a60b3/tinymongo/tinymongo.py#L441-L469 | [
"def build_table(self):\n \"\"\"\n Builds a new tinydb table at the parent database\n :return:\n \"\"\"\n self.table = self.parent.tinydb.table(self.tablename)\n",
"def parse_query(self, query):\n \"\"\"\n Creates a tinydb Query() object from the query dict\n\n :param query: object containing the dictionary representation of the\n query\n :return: composite Query()\n \"\"\"\n logger.debug(u'query to parse2: {}'.format(query))\n\n # this should find all records\n if query == {} or query is None:\n return Query()._id != u'-1' # noqa\n\n q = None\n # find the final result of the generator\n for c in self.parse_condition(query):\n if q is None:\n q = c\n else:\n q = q & c\n\n logger.debug(u'new query item2: {}'.format(q))\n\n return q\n"
] | class TinyMongoCollection(object):
"""
This class represents a collection and all of the operations that are
commonly performed on a collection
"""
def __init__(self, table, parent=None):
"""
Initilialize the collection
:param table: the table name
:param parent: the parent db name
"""
self.tablename = table
self.table = None
self.parent = parent
def __repr__(self):
"""Return collection name"""
return self.tablename
def __getattr__(self, name):
"""
If attr is not found return self
:param name:
:return:
"""
# if self.table is None:
# self.tablename += u"." + name
if self.table is None:
self.build_table()
return self
def build_table(self):
"""
Builds a new tinydb table at the parent database
:return:
"""
self.table = self.parent.tinydb.table(self.tablename)
def count(self):
"""
Counts the documents in the collection.
:return: Integer representing the number of documents in the collection.
"""
return self.find().count()
def drop(self, **kwargs):
"""
Removes a collection from the database.
**kwargs only because of the optional "writeConcern" field, but does nothing in the TinyDB database.
:return: Returns True when successfully drops a collection. Returns False when collection to drop does not
exist.
"""
if self.table:
self.parent.tinydb.purge_table(self.tablename)
return True
else:
return False
def insert(self, docs, *args, **kwargs):
"""Backwards compatibility with insert"""
if isinstance(docs, list):
return self.insert_many(docs, *args, **kwargs)
else:
return self.insert_one(docs, *args, **kwargs)
def insert_one(self, doc, *args, **kwargs):
"""
Inserts one document into the collection
If contains '_id' key it is used, else it is generated.
:param doc: the document
:return: InsertOneResult
"""
if self.table is None:
self.build_table()
if not isinstance(doc, dict):
raise ValueError(u'"doc" must be a dict')
_id = doc[u'_id'] = doc.get('_id') or generate_id()
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is True:
# insert doc without validation of duplicated `_id`
eid = self.table.insert(doc)
else:
existing = self.find_one({'_id': _id})
if existing is None:
eid = self.table.insert(doc)
else:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
return InsertOneResult(eid=eid, inserted_id=_id)
def insert_many(self, docs, *args, **kwargs):
"""
Inserts several documents into the collection
:param docs: a list of documents
:return: InsertManyResult
"""
if self.table is None:
self.build_table()
if not isinstance(docs, list):
raise ValueError(u'"insert_many" requires a list input')
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is not True:
# get all _id in once, to reduce I/O. (without projection)
existing = [doc['_id'] for doc in self.find({})]
_ids = list()
for doc in docs:
_id = doc[u'_id'] = doc.get('_id') or generate_id()
if bypass_document_validation is not True:
if _id in existing:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
existing.append(_id)
_ids.append(_id)
results = self.table.insert_multiple(docs)
return InsertManyResult(
eids=[eid for eid in results],
inserted_ids=[inserted_id for inserted_id in _ids]
)
def parse_query(self, query):
"""
Creates a tinydb Query() object from the query dict
:param query: object containing the dictionary representation of the
query
:return: composite Query()
"""
logger.debug(u'query to parse2: {}'.format(query))
# this should find all records
if query == {} or query is None:
return Query()._id != u'-1' # noqa
q = None
# find the final result of the generator
for c in self.parse_condition(query):
if q is None:
q = c
else:
q = q & c
logger.debug(u'new query item2: {}'.format(q))
return q
def parse_condition(self, query, prev_key=None, last_prev_key=None):
"""
Creates a recursive generator for parsing some types of Query()
conditions
:param query: Query object
:param prev_key: The key at the next-higher level
:return: generator object, the last of which will be the complete
Query() object containing all conditions
"""
# use this to determine gt/lt/eq on prev_query
logger.debug(u'query: {} prev_query: {}'.format(query, prev_key))
q = Query()
conditions = None
# deal with the {'name': value} case by injecting a previous key
if not prev_key:
temp_query = copy.deepcopy(query)
k, v = temp_query.popitem()
prev_key = k
# deal with the conditions
for key, value in query.items():
logger.debug(u'conditions: {} {}'.format(key, value))
if key == u'$gte':
conditions = (
Q(q, prev_key) >= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) >= value)) if prev_key != "$not" \
else (q[last_prev_key] < value)
elif key == u'$gt':
conditions = (
Q(q, prev_key) > value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) > value)) if prev_key != "$not" \
else (q[last_prev_key] <= value)
elif key == u'$lte':
conditions = (
Q(q, prev_key) <= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) <= value)) if prev_key != "$not" \
else (q[last_prev_key] > value)
elif key == u'$lt':
conditions = (
Q(q, prev_key) < value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) < value)) if prev_key != "$not" \
else (q[last_prev_key] >= value)
elif key == u'$ne':
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value))if prev_key != "$not" \
else (q[last_prev_key] == value)
elif key == u'$not':
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value)) \
if prev_key != "$not" else (q[last_prev_key] >= value)
else:
# let the value's condition be parsed below
pass
elif key == u'$regex':
value = value.replace('\\\\\\', '|||')
value = value.replace('\\\\', '|||')
regex = value.replace('\\', '')
regex = regex.replace('|||', '\\')
currCond = (where(prev_key).matches(regex))
conditions = currCond if not conditions else (conditions & currCond)
elif key in ['$and', '$or', '$in', '$all']:
pass
else:
# don't want to use the previous key if this is a secondary key
# (fixes multiple item query that includes $ codes)
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
(Q(q, key) == value) | (Q(q, key).any([value]))
) if not conditions else (conditions & ((Q(q, key) == value) | (Q(q, key).any([value]))))
prev_key = key
logger.debug(u'c: {}'.format(conditions))
if isinstance(value, dict):
# yield from self.parse_condition(value, key)
for parse_condition in self.parse_condition(value, key, prev_key):
yield parse_condition
elif isinstance(value, list):
if key == '$and':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions & parse_condition
)
yield grouped_conditions
elif key == '$or':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$in':
# use `any` to find with list, before comparing to single string
grouped_conditions = Q(q, prev_key).any(value)
for val in value:
for parse_condition in self.parse_condition({prev_key : val}):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$all':
yield Q(q, prev_key).all(value)
else:
yield Q(q, prev_key).any([value])
else:
yield conditions
def update(self, query, doc, *args, **kwargs):
"""BAckwards compatibility with update"""
if isinstance(doc, list):
return [
self.update_one(query, item, *args, **kwargs)
for item in doc
]
else:
return self.update_one(query, doc, *args, **kwargs)
def update_one(self, query, doc):
"""
Updates one element of the collection
:param query: dictionary representing the mongo query
:param doc: dictionary representing the item to be updated
:return: UpdateResult
"""
if self.table is None:
self.build_table()
if u"$set" in doc:
doc = doc[u"$set"]
allcond = self.parse_query(query)
try:
result = self.table.update(doc, allcond)
except:
# TODO: check table.update result
# check what pymongo does in that case
result = None
return UpdateResult(raw_result=result)
def find_one(self, filter=None):
"""
Finds one matching query element
:param query: dictionary representing the mongo query
:return: the resulting document (if found)
"""
if self.table is None:
self.build_table()
allcond = self.parse_query(filter)
return self.table.get(allcond)
def remove(self, spec_or_id, multi=True, *args, **kwargs):
"""Backwards compatibility with remove"""
if multi:
return self.delete_many(spec_or_id)
return self.delete_one(spec_or_id)
def delete_one(self, query):
"""
Deletes one document from the collection
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
item = self.find_one(query)
result = self.table.remove(where(u'_id') == item[u'_id'])
return DeleteResult(raw_result=result)
def delete_many(self, query):
"""
Removes all items matching the mongo query
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
items = self.find(query)
result = [
self.table.remove(where(u'_id') == item[u'_id'])
for item in items
]
if query == {}:
# need to reset TinyDB's index for docs order consistency
self.table._last_id = 0
return DeleteResult(raw_result=result)
|
schapman1974/tinymongo | tinymongo/tinymongo.py | TinyMongoCollection.find_one | python | def find_one(self, filter=None):
if self.table is None:
self.build_table()
allcond = self.parse_query(filter)
return self.table.get(allcond) | Finds one matching query element
:param query: dictionary representing the mongo query
:return: the resulting document (if found) | train | https://github.com/schapman1974/tinymongo/blob/993048059dc0aa789d879b69feb79a0f237a60b3/tinymongo/tinymongo.py#L471-L484 | [
"def build_table(self):\n \"\"\"\n Builds a new tinydb table at the parent database\n :return:\n \"\"\"\n self.table = self.parent.tinydb.table(self.tablename)\n",
"def parse_query(self, query):\n \"\"\"\n Creates a tinydb Query() object from the query dict\n\n :param query: object containing the dictionary representation of the\n query\n :return: composite Query()\n \"\"\"\n logger.debug(u'query to parse2: {}'.format(query))\n\n # this should find all records\n if query == {} or query is None:\n return Query()._id != u'-1' # noqa\n\n q = None\n # find the final result of the generator\n for c in self.parse_condition(query):\n if q is None:\n q = c\n else:\n q = q & c\n\n logger.debug(u'new query item2: {}'.format(q))\n\n return q\n"
] | class TinyMongoCollection(object):
"""
This class represents a collection and all of the operations that are
commonly performed on a collection
"""
def __init__(self, table, parent=None):
"""
Initilialize the collection
:param table: the table name
:param parent: the parent db name
"""
self.tablename = table
self.table = None
self.parent = parent
def __repr__(self):
"""Return collection name"""
return self.tablename
def __getattr__(self, name):
"""
If attr is not found return self
:param name:
:return:
"""
# if self.table is None:
# self.tablename += u"." + name
if self.table is None:
self.build_table()
return self
def build_table(self):
"""
Builds a new tinydb table at the parent database
:return:
"""
self.table = self.parent.tinydb.table(self.tablename)
def count(self):
"""
Counts the documents in the collection.
:return: Integer representing the number of documents in the collection.
"""
return self.find().count()
def drop(self, **kwargs):
"""
Removes a collection from the database.
**kwargs only because of the optional "writeConcern" field, but does nothing in the TinyDB database.
:return: Returns True when successfully drops a collection. Returns False when collection to drop does not
exist.
"""
if self.table:
self.parent.tinydb.purge_table(self.tablename)
return True
else:
return False
def insert(self, docs, *args, **kwargs):
"""Backwards compatibility with insert"""
if isinstance(docs, list):
return self.insert_many(docs, *args, **kwargs)
else:
return self.insert_one(docs, *args, **kwargs)
def insert_one(self, doc, *args, **kwargs):
"""
Inserts one document into the collection
If contains '_id' key it is used, else it is generated.
:param doc: the document
:return: InsertOneResult
"""
if self.table is None:
self.build_table()
if not isinstance(doc, dict):
raise ValueError(u'"doc" must be a dict')
_id = doc[u'_id'] = doc.get('_id') or generate_id()
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is True:
# insert doc without validation of duplicated `_id`
eid = self.table.insert(doc)
else:
existing = self.find_one({'_id': _id})
if existing is None:
eid = self.table.insert(doc)
else:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
return InsertOneResult(eid=eid, inserted_id=_id)
def insert_many(self, docs, *args, **kwargs):
"""
Inserts several documents into the collection
:param docs: a list of documents
:return: InsertManyResult
"""
if self.table is None:
self.build_table()
if not isinstance(docs, list):
raise ValueError(u'"insert_many" requires a list input')
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is not True:
# get all _id in once, to reduce I/O. (without projection)
existing = [doc['_id'] for doc in self.find({})]
_ids = list()
for doc in docs:
_id = doc[u'_id'] = doc.get('_id') or generate_id()
if bypass_document_validation is not True:
if _id in existing:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
existing.append(_id)
_ids.append(_id)
results = self.table.insert_multiple(docs)
return InsertManyResult(
eids=[eid for eid in results],
inserted_ids=[inserted_id for inserted_id in _ids]
)
def parse_query(self, query):
"""
Creates a tinydb Query() object from the query dict
:param query: object containing the dictionary representation of the
query
:return: composite Query()
"""
logger.debug(u'query to parse2: {}'.format(query))
# this should find all records
if query == {} or query is None:
return Query()._id != u'-1' # noqa
q = None
# find the final result of the generator
for c in self.parse_condition(query):
if q is None:
q = c
else:
q = q & c
logger.debug(u'new query item2: {}'.format(q))
return q
def parse_condition(self, query, prev_key=None, last_prev_key=None):
"""
Creates a recursive generator for parsing some types of Query()
conditions
:param query: Query object
:param prev_key: The key at the next-higher level
:return: generator object, the last of which will be the complete
Query() object containing all conditions
"""
# use this to determine gt/lt/eq on prev_query
logger.debug(u'query: {} prev_query: {}'.format(query, prev_key))
q = Query()
conditions = None
# deal with the {'name': value} case by injecting a previous key
if not prev_key:
temp_query = copy.deepcopy(query)
k, v = temp_query.popitem()
prev_key = k
# deal with the conditions
for key, value in query.items():
logger.debug(u'conditions: {} {}'.format(key, value))
if key == u'$gte':
conditions = (
Q(q, prev_key) >= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) >= value)) if prev_key != "$not" \
else (q[last_prev_key] < value)
elif key == u'$gt':
conditions = (
Q(q, prev_key) > value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) > value)) if prev_key != "$not" \
else (q[last_prev_key] <= value)
elif key == u'$lte':
conditions = (
Q(q, prev_key) <= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) <= value)) if prev_key != "$not" \
else (q[last_prev_key] > value)
elif key == u'$lt':
conditions = (
Q(q, prev_key) < value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) < value)) if prev_key != "$not" \
else (q[last_prev_key] >= value)
elif key == u'$ne':
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value))if prev_key != "$not" \
else (q[last_prev_key] == value)
elif key == u'$not':
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value)) \
if prev_key != "$not" else (q[last_prev_key] >= value)
else:
# let the value's condition be parsed below
pass
elif key == u'$regex':
value = value.replace('\\\\\\', '|||')
value = value.replace('\\\\', '|||')
regex = value.replace('\\', '')
regex = regex.replace('|||', '\\')
currCond = (where(prev_key).matches(regex))
conditions = currCond if not conditions else (conditions & currCond)
elif key in ['$and', '$or', '$in', '$all']:
pass
else:
# don't want to use the previous key if this is a secondary key
# (fixes multiple item query that includes $ codes)
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
(Q(q, key) == value) | (Q(q, key).any([value]))
) if not conditions else (conditions & ((Q(q, key) == value) | (Q(q, key).any([value]))))
prev_key = key
logger.debug(u'c: {}'.format(conditions))
if isinstance(value, dict):
# yield from self.parse_condition(value, key)
for parse_condition in self.parse_condition(value, key, prev_key):
yield parse_condition
elif isinstance(value, list):
if key == '$and':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions & parse_condition
)
yield grouped_conditions
elif key == '$or':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$in':
# use `any` to find with list, before comparing to single string
grouped_conditions = Q(q, prev_key).any(value)
for val in value:
for parse_condition in self.parse_condition({prev_key : val}):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$all':
yield Q(q, prev_key).all(value)
else:
yield Q(q, prev_key).any([value])
else:
yield conditions
def update(self, query, doc, *args, **kwargs):
"""BAckwards compatibility with update"""
if isinstance(doc, list):
return [
self.update_one(query, item, *args, **kwargs)
for item in doc
]
else:
return self.update_one(query, doc, *args, **kwargs)
def update_one(self, query, doc):
"""
Updates one element of the collection
:param query: dictionary representing the mongo query
:param doc: dictionary representing the item to be updated
:return: UpdateResult
"""
if self.table is None:
self.build_table()
if u"$set" in doc:
doc = doc[u"$set"]
allcond = self.parse_query(query)
try:
result = self.table.update(doc, allcond)
except:
# TODO: check table.update result
# check what pymongo does in that case
result = None
return UpdateResult(raw_result=result)
def find(self, filter=None, sort=None, skip=None, limit=None,
*args, **kwargs):
"""
Finds all matching results
:param query: dictionary representing the mongo query
:return: cursor containing the search results
"""
if self.table is None:
self.build_table()
if filter is None:
result = self.table.all()
else:
allcond = self.parse_query(filter)
try:
result = self.table.search(allcond)
except (AttributeError, TypeError):
result = []
result = TinyMongoCursor(
result,
sort=sort,
skip=skip,
limit=limit
)
return result
def remove(self, spec_or_id, multi=True, *args, **kwargs):
"""Backwards compatibility with remove"""
if multi:
return self.delete_many(spec_or_id)
return self.delete_one(spec_or_id)
def delete_one(self, query):
"""
Deletes one document from the collection
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
item = self.find_one(query)
result = self.table.remove(where(u'_id') == item[u'_id'])
return DeleteResult(raw_result=result)
def delete_many(self, query):
"""
Removes all items matching the mongo query
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
items = self.find(query)
result = [
self.table.remove(where(u'_id') == item[u'_id'])
for item in items
]
if query == {}:
# need to reset TinyDB's index for docs order consistency
self.table._last_id = 0
return DeleteResult(raw_result=result)
|
schapman1974/tinymongo | tinymongo/tinymongo.py | TinyMongoCollection.remove | python | def remove(self, spec_or_id, multi=True, *args, **kwargs):
if multi:
return self.delete_many(spec_or_id)
return self.delete_one(spec_or_id) | Backwards compatibility with remove | train | https://github.com/schapman1974/tinymongo/blob/993048059dc0aa789d879b69feb79a0f237a60b3/tinymongo/tinymongo.py#L486-L490 | [
"def delete_one(self, query):\n \"\"\"\n Deletes one document from the collection\n\n :param query: dictionary representing the mongo query\n :return: DeleteResult\n \"\"\"\n item = self.find_one(query)\n result = self.table.remove(where(u'_id') == item[u'_id'])\n\n return DeleteResult(raw_result=result)\n",
"def delete_many(self, query):\n \"\"\"\n Removes all items matching the mongo query\n\n :param query: dictionary representing the mongo query\n :return: DeleteResult\n \"\"\"\n items = self.find(query)\n result = [\n self.table.remove(where(u'_id') == item[u'_id'])\n for item in items\n ]\n\n if query == {}:\n # need to reset TinyDB's index for docs order consistency\n self.table._last_id = 0\n\n return DeleteResult(raw_result=result)\n"
] | class TinyMongoCollection(object):
"""
This class represents a collection and all of the operations that are
commonly performed on a collection
"""
def __init__(self, table, parent=None):
"""
Initilialize the collection
:param table: the table name
:param parent: the parent db name
"""
self.tablename = table
self.table = None
self.parent = parent
def __repr__(self):
"""Return collection name"""
return self.tablename
def __getattr__(self, name):
"""
If attr is not found return self
:param name:
:return:
"""
# if self.table is None:
# self.tablename += u"." + name
if self.table is None:
self.build_table()
return self
def build_table(self):
"""
Builds a new tinydb table at the parent database
:return:
"""
self.table = self.parent.tinydb.table(self.tablename)
def count(self):
"""
Counts the documents in the collection.
:return: Integer representing the number of documents in the collection.
"""
return self.find().count()
def drop(self, **kwargs):
"""
Removes a collection from the database.
**kwargs only because of the optional "writeConcern" field, but does nothing in the TinyDB database.
:return: Returns True when successfully drops a collection. Returns False when collection to drop does not
exist.
"""
if self.table:
self.parent.tinydb.purge_table(self.tablename)
return True
else:
return False
def insert(self, docs, *args, **kwargs):
"""Backwards compatibility with insert"""
if isinstance(docs, list):
return self.insert_many(docs, *args, **kwargs)
else:
return self.insert_one(docs, *args, **kwargs)
def insert_one(self, doc, *args, **kwargs):
"""
Inserts one document into the collection
If contains '_id' key it is used, else it is generated.
:param doc: the document
:return: InsertOneResult
"""
if self.table is None:
self.build_table()
if not isinstance(doc, dict):
raise ValueError(u'"doc" must be a dict')
_id = doc[u'_id'] = doc.get('_id') or generate_id()
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is True:
# insert doc without validation of duplicated `_id`
eid = self.table.insert(doc)
else:
existing = self.find_one({'_id': _id})
if existing is None:
eid = self.table.insert(doc)
else:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
return InsertOneResult(eid=eid, inserted_id=_id)
def insert_many(self, docs, *args, **kwargs):
"""
Inserts several documents into the collection
:param docs: a list of documents
:return: InsertManyResult
"""
if self.table is None:
self.build_table()
if not isinstance(docs, list):
raise ValueError(u'"insert_many" requires a list input')
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is not True:
# get all _id in once, to reduce I/O. (without projection)
existing = [doc['_id'] for doc in self.find({})]
_ids = list()
for doc in docs:
_id = doc[u'_id'] = doc.get('_id') or generate_id()
if bypass_document_validation is not True:
if _id in existing:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
existing.append(_id)
_ids.append(_id)
results = self.table.insert_multiple(docs)
return InsertManyResult(
eids=[eid for eid in results],
inserted_ids=[inserted_id for inserted_id in _ids]
)
def parse_query(self, query):
"""
Creates a tinydb Query() object from the query dict
:param query: object containing the dictionary representation of the
query
:return: composite Query()
"""
logger.debug(u'query to parse2: {}'.format(query))
# this should find all records
if query == {} or query is None:
return Query()._id != u'-1' # noqa
q = None
# find the final result of the generator
for c in self.parse_condition(query):
if q is None:
q = c
else:
q = q & c
logger.debug(u'new query item2: {}'.format(q))
return q
def parse_condition(self, query, prev_key=None, last_prev_key=None):
"""
Creates a recursive generator for parsing some types of Query()
conditions
:param query: Query object
:param prev_key: The key at the next-higher level
:return: generator object, the last of which will be the complete
Query() object containing all conditions
"""
# use this to determine gt/lt/eq on prev_query
logger.debug(u'query: {} prev_query: {}'.format(query, prev_key))
q = Query()
conditions = None
# deal with the {'name': value} case by injecting a previous key
if not prev_key:
temp_query = copy.deepcopy(query)
k, v = temp_query.popitem()
prev_key = k
# deal with the conditions
for key, value in query.items():
logger.debug(u'conditions: {} {}'.format(key, value))
if key == u'$gte':
conditions = (
Q(q, prev_key) >= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) >= value)) if prev_key != "$not" \
else (q[last_prev_key] < value)
elif key == u'$gt':
conditions = (
Q(q, prev_key) > value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) > value)) if prev_key != "$not" \
else (q[last_prev_key] <= value)
elif key == u'$lte':
conditions = (
Q(q, prev_key) <= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) <= value)) if prev_key != "$not" \
else (q[last_prev_key] > value)
elif key == u'$lt':
conditions = (
Q(q, prev_key) < value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) < value)) if prev_key != "$not" \
else (q[last_prev_key] >= value)
elif key == u'$ne':
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value))if prev_key != "$not" \
else (q[last_prev_key] == value)
elif key == u'$not':
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value)) \
if prev_key != "$not" else (q[last_prev_key] >= value)
else:
# let the value's condition be parsed below
pass
elif key == u'$regex':
value = value.replace('\\\\\\', '|||')
value = value.replace('\\\\', '|||')
regex = value.replace('\\', '')
regex = regex.replace('|||', '\\')
currCond = (where(prev_key).matches(regex))
conditions = currCond if not conditions else (conditions & currCond)
elif key in ['$and', '$or', '$in', '$all']:
pass
else:
# don't want to use the previous key if this is a secondary key
# (fixes multiple item query that includes $ codes)
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
(Q(q, key) == value) | (Q(q, key).any([value]))
) if not conditions else (conditions & ((Q(q, key) == value) | (Q(q, key).any([value]))))
prev_key = key
logger.debug(u'c: {}'.format(conditions))
if isinstance(value, dict):
# yield from self.parse_condition(value, key)
for parse_condition in self.parse_condition(value, key, prev_key):
yield parse_condition
elif isinstance(value, list):
if key == '$and':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions & parse_condition
)
yield grouped_conditions
elif key == '$or':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$in':
# use `any` to find with list, before comparing to single string
grouped_conditions = Q(q, prev_key).any(value)
for val in value:
for parse_condition in self.parse_condition({prev_key : val}):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$all':
yield Q(q, prev_key).all(value)
else:
yield Q(q, prev_key).any([value])
else:
yield conditions
def update(self, query, doc, *args, **kwargs):
"""BAckwards compatibility with update"""
if isinstance(doc, list):
return [
self.update_one(query, item, *args, **kwargs)
for item in doc
]
else:
return self.update_one(query, doc, *args, **kwargs)
def update_one(self, query, doc):
"""
Updates one element of the collection
:param query: dictionary representing the mongo query
:param doc: dictionary representing the item to be updated
:return: UpdateResult
"""
if self.table is None:
self.build_table()
if u"$set" in doc:
doc = doc[u"$set"]
allcond = self.parse_query(query)
try:
result = self.table.update(doc, allcond)
except:
# TODO: check table.update result
# check what pymongo does in that case
result = None
return UpdateResult(raw_result=result)
def find(self, filter=None, sort=None, skip=None, limit=None,
*args, **kwargs):
"""
Finds all matching results
:param query: dictionary representing the mongo query
:return: cursor containing the search results
"""
if self.table is None:
self.build_table()
if filter is None:
result = self.table.all()
else:
allcond = self.parse_query(filter)
try:
result = self.table.search(allcond)
except (AttributeError, TypeError):
result = []
result = TinyMongoCursor(
result,
sort=sort,
skip=skip,
limit=limit
)
return result
def find_one(self, filter=None):
"""
Finds one matching query element
:param query: dictionary representing the mongo query
:return: the resulting document (if found)
"""
if self.table is None:
self.build_table()
allcond = self.parse_query(filter)
return self.table.get(allcond)
def delete_one(self, query):
"""
Deletes one document from the collection
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
item = self.find_one(query)
result = self.table.remove(where(u'_id') == item[u'_id'])
return DeleteResult(raw_result=result)
def delete_many(self, query):
"""
Removes all items matching the mongo query
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
items = self.find(query)
result = [
self.table.remove(where(u'_id') == item[u'_id'])
for item in items
]
if query == {}:
# need to reset TinyDB's index for docs order consistency
self.table._last_id = 0
return DeleteResult(raw_result=result)
|
schapman1974/tinymongo | tinymongo/tinymongo.py | TinyMongoCollection.delete_one | python | def delete_one(self, query):
item = self.find_one(query)
result = self.table.remove(where(u'_id') == item[u'_id'])
return DeleteResult(raw_result=result) | Deletes one document from the collection
:param query: dictionary representing the mongo query
:return: DeleteResult | train | https://github.com/schapman1974/tinymongo/blob/993048059dc0aa789d879b69feb79a0f237a60b3/tinymongo/tinymongo.py#L492-L502 | [
"def find_one(self, filter=None):\n \"\"\"\n Finds one matching query element\n\n :param query: dictionary representing the mongo query\n :return: the resulting document (if found)\n \"\"\"\n\n if self.table is None:\n self.build_table()\n\n allcond = self.parse_query(filter)\n\n return self.table.get(allcond)\n"
] | class TinyMongoCollection(object):
"""
This class represents a collection and all of the operations that are
commonly performed on a collection
"""
def __init__(self, table, parent=None):
"""
Initilialize the collection
:param table: the table name
:param parent: the parent db name
"""
self.tablename = table
self.table = None
self.parent = parent
def __repr__(self):
"""Return collection name"""
return self.tablename
def __getattr__(self, name):
"""
If attr is not found return self
:param name:
:return:
"""
# if self.table is None:
# self.tablename += u"." + name
if self.table is None:
self.build_table()
return self
def build_table(self):
"""
Builds a new tinydb table at the parent database
:return:
"""
self.table = self.parent.tinydb.table(self.tablename)
def count(self):
"""
Counts the documents in the collection.
:return: Integer representing the number of documents in the collection.
"""
return self.find().count()
def drop(self, **kwargs):
"""
Removes a collection from the database.
**kwargs only because of the optional "writeConcern" field, but does nothing in the TinyDB database.
:return: Returns True when successfully drops a collection. Returns False when collection to drop does not
exist.
"""
if self.table:
self.parent.tinydb.purge_table(self.tablename)
return True
else:
return False
def insert(self, docs, *args, **kwargs):
"""Backwards compatibility with insert"""
if isinstance(docs, list):
return self.insert_many(docs, *args, **kwargs)
else:
return self.insert_one(docs, *args, **kwargs)
def insert_one(self, doc, *args, **kwargs):
"""
Inserts one document into the collection
If contains '_id' key it is used, else it is generated.
:param doc: the document
:return: InsertOneResult
"""
if self.table is None:
self.build_table()
if not isinstance(doc, dict):
raise ValueError(u'"doc" must be a dict')
_id = doc[u'_id'] = doc.get('_id') or generate_id()
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is True:
# insert doc without validation of duplicated `_id`
eid = self.table.insert(doc)
else:
existing = self.find_one({'_id': _id})
if existing is None:
eid = self.table.insert(doc)
else:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
return InsertOneResult(eid=eid, inserted_id=_id)
def insert_many(self, docs, *args, **kwargs):
"""
Inserts several documents into the collection
:param docs: a list of documents
:return: InsertManyResult
"""
if self.table is None:
self.build_table()
if not isinstance(docs, list):
raise ValueError(u'"insert_many" requires a list input')
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is not True:
# get all _id in once, to reduce I/O. (without projection)
existing = [doc['_id'] for doc in self.find({})]
_ids = list()
for doc in docs:
_id = doc[u'_id'] = doc.get('_id') or generate_id()
if bypass_document_validation is not True:
if _id in existing:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
existing.append(_id)
_ids.append(_id)
results = self.table.insert_multiple(docs)
return InsertManyResult(
eids=[eid for eid in results],
inserted_ids=[inserted_id for inserted_id in _ids]
)
def parse_query(self, query):
"""
Creates a tinydb Query() object from the query dict
:param query: object containing the dictionary representation of the
query
:return: composite Query()
"""
logger.debug(u'query to parse2: {}'.format(query))
# this should find all records
if query == {} or query is None:
return Query()._id != u'-1' # noqa
q = None
# find the final result of the generator
for c in self.parse_condition(query):
if q is None:
q = c
else:
q = q & c
logger.debug(u'new query item2: {}'.format(q))
return q
def parse_condition(self, query, prev_key=None, last_prev_key=None):
"""
Creates a recursive generator for parsing some types of Query()
conditions
:param query: Query object
:param prev_key: The key at the next-higher level
:return: generator object, the last of which will be the complete
Query() object containing all conditions
"""
# use this to determine gt/lt/eq on prev_query
logger.debug(u'query: {} prev_query: {}'.format(query, prev_key))
q = Query()
conditions = None
# deal with the {'name': value} case by injecting a previous key
if not prev_key:
temp_query = copy.deepcopy(query)
k, v = temp_query.popitem()
prev_key = k
# deal with the conditions
for key, value in query.items():
logger.debug(u'conditions: {} {}'.format(key, value))
if key == u'$gte':
conditions = (
Q(q, prev_key) >= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) >= value)) if prev_key != "$not" \
else (q[last_prev_key] < value)
elif key == u'$gt':
conditions = (
Q(q, prev_key) > value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) > value)) if prev_key != "$not" \
else (q[last_prev_key] <= value)
elif key == u'$lte':
conditions = (
Q(q, prev_key) <= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) <= value)) if prev_key != "$not" \
else (q[last_prev_key] > value)
elif key == u'$lt':
conditions = (
Q(q, prev_key) < value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) < value)) if prev_key != "$not" \
else (q[last_prev_key] >= value)
elif key == u'$ne':
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value))if prev_key != "$not" \
else (q[last_prev_key] == value)
elif key == u'$not':
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value)) \
if prev_key != "$not" else (q[last_prev_key] >= value)
else:
# let the value's condition be parsed below
pass
elif key == u'$regex':
value = value.replace('\\\\\\', '|||')
value = value.replace('\\\\', '|||')
regex = value.replace('\\', '')
regex = regex.replace('|||', '\\')
currCond = (where(prev_key).matches(regex))
conditions = currCond if not conditions else (conditions & currCond)
elif key in ['$and', '$or', '$in', '$all']:
pass
else:
# don't want to use the previous key if this is a secondary key
# (fixes multiple item query that includes $ codes)
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
(Q(q, key) == value) | (Q(q, key).any([value]))
) if not conditions else (conditions & ((Q(q, key) == value) | (Q(q, key).any([value]))))
prev_key = key
logger.debug(u'c: {}'.format(conditions))
if isinstance(value, dict):
# yield from self.parse_condition(value, key)
for parse_condition in self.parse_condition(value, key, prev_key):
yield parse_condition
elif isinstance(value, list):
if key == '$and':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions & parse_condition
)
yield grouped_conditions
elif key == '$or':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$in':
# use `any` to find with list, before comparing to single string
grouped_conditions = Q(q, prev_key).any(value)
for val in value:
for parse_condition in self.parse_condition({prev_key : val}):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$all':
yield Q(q, prev_key).all(value)
else:
yield Q(q, prev_key).any([value])
else:
yield conditions
def update(self, query, doc, *args, **kwargs):
"""BAckwards compatibility with update"""
if isinstance(doc, list):
return [
self.update_one(query, item, *args, **kwargs)
for item in doc
]
else:
return self.update_one(query, doc, *args, **kwargs)
def update_one(self, query, doc):
"""
Updates one element of the collection
:param query: dictionary representing the mongo query
:param doc: dictionary representing the item to be updated
:return: UpdateResult
"""
if self.table is None:
self.build_table()
if u"$set" in doc:
doc = doc[u"$set"]
allcond = self.parse_query(query)
try:
result = self.table.update(doc, allcond)
except:
# TODO: check table.update result
# check what pymongo does in that case
result = None
return UpdateResult(raw_result=result)
def find(self, filter=None, sort=None, skip=None, limit=None,
*args, **kwargs):
"""
Finds all matching results
:param query: dictionary representing the mongo query
:return: cursor containing the search results
"""
if self.table is None:
self.build_table()
if filter is None:
result = self.table.all()
else:
allcond = self.parse_query(filter)
try:
result = self.table.search(allcond)
except (AttributeError, TypeError):
result = []
result = TinyMongoCursor(
result,
sort=sort,
skip=skip,
limit=limit
)
return result
def find_one(self, filter=None):
"""
Finds one matching query element
:param query: dictionary representing the mongo query
:return: the resulting document (if found)
"""
if self.table is None:
self.build_table()
allcond = self.parse_query(filter)
return self.table.get(allcond)
def remove(self, spec_or_id, multi=True, *args, **kwargs):
"""Backwards compatibility with remove"""
if multi:
return self.delete_many(spec_or_id)
return self.delete_one(spec_or_id)
def delete_many(self, query):
"""
Removes all items matching the mongo query
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
items = self.find(query)
result = [
self.table.remove(where(u'_id') == item[u'_id'])
for item in items
]
if query == {}:
# need to reset TinyDB's index for docs order consistency
self.table._last_id = 0
return DeleteResult(raw_result=result)
|
schapman1974/tinymongo | tinymongo/tinymongo.py | TinyMongoCollection.delete_many | python | def delete_many(self, query):
items = self.find(query)
result = [
self.table.remove(where(u'_id') == item[u'_id'])
for item in items
]
if query == {}:
# need to reset TinyDB's index for docs order consistency
self.table._last_id = 0
return DeleteResult(raw_result=result) | Removes all items matching the mongo query
:param query: dictionary representing the mongo query
:return: DeleteResult | train | https://github.com/schapman1974/tinymongo/blob/993048059dc0aa789d879b69feb79a0f237a60b3/tinymongo/tinymongo.py#L504-L521 | [
"def find(self, filter=None, sort=None, skip=None, limit=None,\n *args, **kwargs):\n \"\"\"\n Finds all matching results\n\n :param query: dictionary representing the mongo query\n :return: cursor containing the search results\n \"\"\"\n if self.table is None:\n self.build_table()\n\n if filter is None:\n result = self.table.all()\n else:\n allcond = self.parse_query(filter)\n\n try:\n result = self.table.search(allcond)\n except (AttributeError, TypeError):\n result = []\n\n result = TinyMongoCursor(\n result,\n sort=sort,\n skip=skip,\n limit=limit\n )\n\n return result\n"
] | class TinyMongoCollection(object):
"""
This class represents a collection and all of the operations that are
commonly performed on a collection
"""
def __init__(self, table, parent=None):
"""
Initilialize the collection
:param table: the table name
:param parent: the parent db name
"""
self.tablename = table
self.table = None
self.parent = parent
def __repr__(self):
"""Return collection name"""
return self.tablename
def __getattr__(self, name):
"""
If attr is not found return self
:param name:
:return:
"""
# if self.table is None:
# self.tablename += u"." + name
if self.table is None:
self.build_table()
return self
def build_table(self):
"""
Builds a new tinydb table at the parent database
:return:
"""
self.table = self.parent.tinydb.table(self.tablename)
def count(self):
"""
Counts the documents in the collection.
:return: Integer representing the number of documents in the collection.
"""
return self.find().count()
def drop(self, **kwargs):
"""
Removes a collection from the database.
**kwargs only because of the optional "writeConcern" field, but does nothing in the TinyDB database.
:return: Returns True when successfully drops a collection. Returns False when collection to drop does not
exist.
"""
if self.table:
self.parent.tinydb.purge_table(self.tablename)
return True
else:
return False
def insert(self, docs, *args, **kwargs):
"""Backwards compatibility with insert"""
if isinstance(docs, list):
return self.insert_many(docs, *args, **kwargs)
else:
return self.insert_one(docs, *args, **kwargs)
def insert_one(self, doc, *args, **kwargs):
"""
Inserts one document into the collection
If contains '_id' key it is used, else it is generated.
:param doc: the document
:return: InsertOneResult
"""
if self.table is None:
self.build_table()
if not isinstance(doc, dict):
raise ValueError(u'"doc" must be a dict')
_id = doc[u'_id'] = doc.get('_id') or generate_id()
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is True:
# insert doc without validation of duplicated `_id`
eid = self.table.insert(doc)
else:
existing = self.find_one({'_id': _id})
if existing is None:
eid = self.table.insert(doc)
else:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
return InsertOneResult(eid=eid, inserted_id=_id)
def insert_many(self, docs, *args, **kwargs):
"""
Inserts several documents into the collection
:param docs: a list of documents
:return: InsertManyResult
"""
if self.table is None:
self.build_table()
if not isinstance(docs, list):
raise ValueError(u'"insert_many" requires a list input')
bypass_document_validation = kwargs.get('bypass_document_validation')
if bypass_document_validation is not True:
# get all _id in once, to reduce I/O. (without projection)
existing = [doc['_id'] for doc in self.find({})]
_ids = list()
for doc in docs:
_id = doc[u'_id'] = doc.get('_id') or generate_id()
if bypass_document_validation is not True:
if _id in existing:
raise DuplicateKeyError(
u'_id:{0} already exists in collection:{1}'.format(
_id, self.tablename
)
)
existing.append(_id)
_ids.append(_id)
results = self.table.insert_multiple(docs)
return InsertManyResult(
eids=[eid for eid in results],
inserted_ids=[inserted_id for inserted_id in _ids]
)
def parse_query(self, query):
"""
Creates a tinydb Query() object from the query dict
:param query: object containing the dictionary representation of the
query
:return: composite Query()
"""
logger.debug(u'query to parse2: {}'.format(query))
# this should find all records
if query == {} or query is None:
return Query()._id != u'-1' # noqa
q = None
# find the final result of the generator
for c in self.parse_condition(query):
if q is None:
q = c
else:
q = q & c
logger.debug(u'new query item2: {}'.format(q))
return q
def parse_condition(self, query, prev_key=None, last_prev_key=None):
"""
Creates a recursive generator for parsing some types of Query()
conditions
:param query: Query object
:param prev_key: The key at the next-higher level
:return: generator object, the last of which will be the complete
Query() object containing all conditions
"""
# use this to determine gt/lt/eq on prev_query
logger.debug(u'query: {} prev_query: {}'.format(query, prev_key))
q = Query()
conditions = None
# deal with the {'name': value} case by injecting a previous key
if not prev_key:
temp_query = copy.deepcopy(query)
k, v = temp_query.popitem()
prev_key = k
# deal with the conditions
for key, value in query.items():
logger.debug(u'conditions: {} {}'.format(key, value))
if key == u'$gte':
conditions = (
Q(q, prev_key) >= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) >= value)) if prev_key != "$not" \
else (q[last_prev_key] < value)
elif key == u'$gt':
conditions = (
Q(q, prev_key) > value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) > value)) if prev_key != "$not" \
else (q[last_prev_key] <= value)
elif key == u'$lte':
conditions = (
Q(q, prev_key) <= value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) <= value)) if prev_key != "$not" \
else (q[last_prev_key] > value)
elif key == u'$lt':
conditions = (
Q(q, prev_key) < value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) < value)) if prev_key != "$not" \
else (q[last_prev_key] >= value)
elif key == u'$ne':
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value))if prev_key != "$not" \
else (q[last_prev_key] == value)
elif key == u'$not':
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
Q(q, prev_key) != value
) if not conditions and prev_key != "$not" \
else (conditions & (Q(q, prev_key) != value)) \
if prev_key != "$not" else (q[last_prev_key] >= value)
else:
# let the value's condition be parsed below
pass
elif key == u'$regex':
value = value.replace('\\\\\\', '|||')
value = value.replace('\\\\', '|||')
regex = value.replace('\\', '')
regex = regex.replace('|||', '\\')
currCond = (where(prev_key).matches(regex))
conditions = currCond if not conditions else (conditions & currCond)
elif key in ['$and', '$or', '$in', '$all']:
pass
else:
# don't want to use the previous key if this is a secondary key
# (fixes multiple item query that includes $ codes)
if not isinstance(value, dict) and not isinstance(value, list):
conditions = (
(Q(q, key) == value) | (Q(q, key).any([value]))
) if not conditions else (conditions & ((Q(q, key) == value) | (Q(q, key).any([value]))))
prev_key = key
logger.debug(u'c: {}'.format(conditions))
if isinstance(value, dict):
# yield from self.parse_condition(value, key)
for parse_condition in self.parse_condition(value, key, prev_key):
yield parse_condition
elif isinstance(value, list):
if key == '$and':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions & parse_condition
)
yield grouped_conditions
elif key == '$or':
grouped_conditions = None
for spec in value:
for parse_condition in self.parse_condition(spec):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$in':
# use `any` to find with list, before comparing to single string
grouped_conditions = Q(q, prev_key).any(value)
for val in value:
for parse_condition in self.parse_condition({prev_key : val}):
grouped_conditions = (
parse_condition
if not grouped_conditions
else grouped_conditions | parse_condition
)
yield grouped_conditions
elif key == '$all':
yield Q(q, prev_key).all(value)
else:
yield Q(q, prev_key).any([value])
else:
yield conditions
def update(self, query, doc, *args, **kwargs):
"""BAckwards compatibility with update"""
if isinstance(doc, list):
return [
self.update_one(query, item, *args, **kwargs)
for item in doc
]
else:
return self.update_one(query, doc, *args, **kwargs)
def update_one(self, query, doc):
"""
Updates one element of the collection
:param query: dictionary representing the mongo query
:param doc: dictionary representing the item to be updated
:return: UpdateResult
"""
if self.table is None:
self.build_table()
if u"$set" in doc:
doc = doc[u"$set"]
allcond = self.parse_query(query)
try:
result = self.table.update(doc, allcond)
except:
# TODO: check table.update result
# check what pymongo does in that case
result = None
return UpdateResult(raw_result=result)
def find(self, filter=None, sort=None, skip=None, limit=None,
*args, **kwargs):
"""
Finds all matching results
:param query: dictionary representing the mongo query
:return: cursor containing the search results
"""
if self.table is None:
self.build_table()
if filter is None:
result = self.table.all()
else:
allcond = self.parse_query(filter)
try:
result = self.table.search(allcond)
except (AttributeError, TypeError):
result = []
result = TinyMongoCursor(
result,
sort=sort,
skip=skip,
limit=limit
)
return result
def find_one(self, filter=None):
"""
Finds one matching query element
:param query: dictionary representing the mongo query
:return: the resulting document (if found)
"""
if self.table is None:
self.build_table()
allcond = self.parse_query(filter)
return self.table.get(allcond)
def remove(self, spec_or_id, multi=True, *args, **kwargs):
"""Backwards compatibility with remove"""
if multi:
return self.delete_many(spec_or_id)
return self.delete_one(spec_or_id)
def delete_one(self, query):
"""
Deletes one document from the collection
:param query: dictionary representing the mongo query
:return: DeleteResult
"""
item = self.find_one(query)
result = self.table.remove(where(u'_id') == item[u'_id'])
return DeleteResult(raw_result=result)
|
schapman1974/tinymongo | tinymongo/tinymongo.py | TinyMongoCursor.paginate | python | def paginate(self, skip, limit):
if not self.count() or not limit:
return
skip = skip or 0
pages = int(ceil(self.count() / float(limit)))
limits = {}
last = 0
for i in range(pages):
current = limit * i
limits[last] = current
last = current
# example with count == 62
# {0: 20, 20: 40, 40: 60, 60: 62}
if limit and limit < self.count():
limit = limits.get(skip, self.count())
self.cursordat = self.cursordat[skip: limit] | Paginate list of records | train | https://github.com/schapman1974/tinymongo/blob/993048059dc0aa789d879b69feb79a0f237a60b3/tinymongo/tinymongo.py#L548-L564 | [
"def count(self, with_limit_and_skip=False):\n \"\"\"\n Returns the number of records in the current cursor\n\n :return: number of records\n \"\"\"\n return len(self.cursordat)\n"
] | class TinyMongoCursor(object):
"""Mongo iterable cursor"""
def __init__(self, cursordat, sort=None, skip=None, limit=None):
"""Initialize the mongo iterable cursor with data"""
self.cursordat = cursordat
self.cursorpos = -1
if len(self.cursordat) == 0:
self.currentrec = None
else:
self.currentrec = self.cursordat[self.cursorpos]
if sort:
self.sort(sort)
self.paginate(skip, limit)
def __getitem__(self, key):
"""Gets record by index or value by key"""
if isinstance(key, int):
return self.cursordat[key]
return self.currentrec[key]
def _order(self, value, is_reverse=None):
"""Parsing data to a sortable form
By giving each data type an ID(int), and assemble with the value
into a sortable tuple.
"""
def _dict_parser(dict_doc):
""" dict ordered by:
valueType_N -> key_N -> value_N
"""
result = list()
for key in dict_doc:
data = self._order(dict_doc[key])
res = (data[0], key, data[1])
result.append(res)
return tuple(result)
def _list_parser(list_doc):
"""list will iter members to compare
"""
result = list()
for member in list_doc:
result.append(self._order(member))
return result
# (TODO) include more data type
if value is None or not isinstance(value, (dict,
list,
basestring,
bool,
float,
int)):
# not support/sortable value type
value = (0, None)
elif isinstance(value, bool):
value = (5, value)
elif isinstance(value, (int, float)):
value = (1, value)
elif isinstance(value, basestring):
value = (2, value)
elif isinstance(value, dict):
value = (3, _dict_parser(value))
elif isinstance(value, list):
if len(value) == 0:
# [] less then None
value = [(-1, [])]
else:
value = _list_parser(value)
if is_reverse is not None:
# list will firstly compare with other doc by it's smallest
# or largest member
value = max(value) if is_reverse else min(value)
else:
# if the smallest or largest member is a list
# then compaer with it's sub-member in list index order
value = (4, tuple(value))
return value
def sort(self, key_or_list, direction=None):
"""
Sorts a cursor object based on the input
:param key_or_list: a list/tuple containing the sort specification,
i.e. ('user_number': -1), or a basestring
:param direction: sorting direction, 1 or -1, needed if key_or_list
is a basestring
:return:
"""
# checking input format
sort_specifier = list()
if isinstance(key_or_list, list):
if direction is not None:
raise ValueError('direction can not be set separately '
'if sorting by multiple fields.')
for pair in key_or_list:
if not (isinstance(pair, list) or isinstance(pair, tuple)):
raise TypeError('key pair should be a list or tuple.')
if not len(pair) == 2:
raise ValueError('Need to be (key, direction) pair')
if not isinstance(pair[0], basestring):
raise TypeError('first item in each key pair must '
'be a string')
if not isinstance(pair[1], int) or not abs(pair[1]) == 1:
raise TypeError('bad sort specification.')
sort_specifier = key_or_list
elif isinstance(key_or_list, basestring):
if direction is not None:
if not isinstance(direction, int) or not abs(direction) == 1:
raise TypeError('bad sort specification.')
else:
# default ASCENDING
direction = 1
sort_specifier = [(key_or_list, direction)]
else:
raise ValueError('Wrong input, pass a field name and a direction,'
' or pass a list of (key, direction) pairs.')
# sorting
_cursordat = self.cursordat
total = len(_cursordat)
pre_sect_stack = list()
for pair in sort_specifier:
is_reverse = bool(1-pair[1])
value_stack = list()
for index, data in enumerate(_cursordat):
# get field value
not_found = None
for key in pair[0].split('.'):
not_found = True
if isinstance(data, dict) and key in data:
data = copy.deepcopy(data[key])
not_found = False
elif isinstance(data, list):
if not is_reverse and len(data) == 1:
# MongoDB treat [{data}] as {data}
# when finding fields
if isinstance(data[0], dict) and key in data[0]:
data = copy.deepcopy(data[0][key])
not_found = False
elif is_reverse:
# MongoDB will keep finding field in reverse mode
for _d in data:
if isinstance(_d, dict) and key in _d:
data = copy.deepcopy(_d[key])
not_found = False
break
if not_found:
break
# parsing data for sorting
if not_found:
# treat no match as None
data = None
value = self._order(data, is_reverse)
# read previous section
pre_sect = pre_sect_stack[index] if pre_sect_stack else 0
# inverse if in reverse mode
# for keeping order as ASCENDING after sort
pre_sect = (total - pre_sect) if is_reverse else pre_sect
_ind = (total - index) if is_reverse else index
value_stack.append((pre_sect, value, _ind))
# sorting cursor data
value_stack.sort(reverse=is_reverse)
ordereddat = list()
sect_stack = list()
sect_id = -1
last_dat = None
for dat in value_stack:
# restore if in reverse mode
_ind = (total - dat[-1]) if is_reverse else dat[-1]
ordereddat.append(_cursordat[_ind])
# define section
# maintain the sorting result in next level sorting
if not dat[1] == last_dat:
sect_id += 1
sect_stack.append(sect_id)
last_dat = dat[1]
# save result for next level sorting
_cursordat = ordereddat
pre_sect_stack = sect_stack
# done
self.cursordat = _cursordat
return self
def hasNext(self):
"""
Returns True if the cursor has a next position, False if not
:return:
"""
cursor_pos = self.cursorpos + 1
try:
self.cursordat[cursor_pos]
return True
except IndexError:
return False
def next(self):
"""
Returns the next record
:return:
"""
self.cursorpos += 1
return self.cursordat[self.cursorpos]
def count(self, with_limit_and_skip=False):
"""
Returns the number of records in the current cursor
:return: number of records
"""
return len(self.cursordat)
|
schapman1974/tinymongo | tinymongo/tinymongo.py | TinyMongoCursor._order | python | def _order(self, value, is_reverse=None):
def _dict_parser(dict_doc):
""" dict ordered by:
valueType_N -> key_N -> value_N
"""
result = list()
for key in dict_doc:
data = self._order(dict_doc[key])
res = (data[0], key, data[1])
result.append(res)
return tuple(result)
def _list_parser(list_doc):
"""list will iter members to compare
"""
result = list()
for member in list_doc:
result.append(self._order(member))
return result
# (TODO) include more data type
if value is None or not isinstance(value, (dict,
list,
basestring,
bool,
float,
int)):
# not support/sortable value type
value = (0, None)
elif isinstance(value, bool):
value = (5, value)
elif isinstance(value, (int, float)):
value = (1, value)
elif isinstance(value, basestring):
value = (2, value)
elif isinstance(value, dict):
value = (3, _dict_parser(value))
elif isinstance(value, list):
if len(value) == 0:
# [] less then None
value = [(-1, [])]
else:
value = _list_parser(value)
if is_reverse is not None:
# list will firstly compare with other doc by it's smallest
# or largest member
value = max(value) if is_reverse else min(value)
else:
# if the smallest or largest member is a list
# then compaer with it's sub-member in list index order
value = (4, tuple(value))
return value | Parsing data to a sortable form
By giving each data type an ID(int), and assemble with the value
into a sortable tuple. | train | https://github.com/schapman1974/tinymongo/blob/993048059dc0aa789d879b69feb79a0f237a60b3/tinymongo/tinymongo.py#L566-L629 | null | class TinyMongoCursor(object):
"""Mongo iterable cursor"""
def __init__(self, cursordat, sort=None, skip=None, limit=None):
"""Initialize the mongo iterable cursor with data"""
self.cursordat = cursordat
self.cursorpos = -1
if len(self.cursordat) == 0:
self.currentrec = None
else:
self.currentrec = self.cursordat[self.cursorpos]
if sort:
self.sort(sort)
self.paginate(skip, limit)
def __getitem__(self, key):
"""Gets record by index or value by key"""
if isinstance(key, int):
return self.cursordat[key]
return self.currentrec[key]
def paginate(self, skip, limit):
"""Paginate list of records"""
if not self.count() or not limit:
return
skip = skip or 0
pages = int(ceil(self.count() / float(limit)))
limits = {}
last = 0
for i in range(pages):
current = limit * i
limits[last] = current
last = current
# example with count == 62
# {0: 20, 20: 40, 40: 60, 60: 62}
if limit and limit < self.count():
limit = limits.get(skip, self.count())
self.cursordat = self.cursordat[skip: limit]
def sort(self, key_or_list, direction=None):
"""
Sorts a cursor object based on the input
:param key_or_list: a list/tuple containing the sort specification,
i.e. ('user_number': -1), or a basestring
:param direction: sorting direction, 1 or -1, needed if key_or_list
is a basestring
:return:
"""
# checking input format
sort_specifier = list()
if isinstance(key_or_list, list):
if direction is not None:
raise ValueError('direction can not be set separately '
'if sorting by multiple fields.')
for pair in key_or_list:
if not (isinstance(pair, list) or isinstance(pair, tuple)):
raise TypeError('key pair should be a list or tuple.')
if not len(pair) == 2:
raise ValueError('Need to be (key, direction) pair')
if not isinstance(pair[0], basestring):
raise TypeError('first item in each key pair must '
'be a string')
if not isinstance(pair[1], int) or not abs(pair[1]) == 1:
raise TypeError('bad sort specification.')
sort_specifier = key_or_list
elif isinstance(key_or_list, basestring):
if direction is not None:
if not isinstance(direction, int) or not abs(direction) == 1:
raise TypeError('bad sort specification.')
else:
# default ASCENDING
direction = 1
sort_specifier = [(key_or_list, direction)]
else:
raise ValueError('Wrong input, pass a field name and a direction,'
' or pass a list of (key, direction) pairs.')
# sorting
_cursordat = self.cursordat
total = len(_cursordat)
pre_sect_stack = list()
for pair in sort_specifier:
is_reverse = bool(1-pair[1])
value_stack = list()
for index, data in enumerate(_cursordat):
# get field value
not_found = None
for key in pair[0].split('.'):
not_found = True
if isinstance(data, dict) and key in data:
data = copy.deepcopy(data[key])
not_found = False
elif isinstance(data, list):
if not is_reverse and len(data) == 1:
# MongoDB treat [{data}] as {data}
# when finding fields
if isinstance(data[0], dict) and key in data[0]:
data = copy.deepcopy(data[0][key])
not_found = False
elif is_reverse:
# MongoDB will keep finding field in reverse mode
for _d in data:
if isinstance(_d, dict) and key in _d:
data = copy.deepcopy(_d[key])
not_found = False
break
if not_found:
break
# parsing data for sorting
if not_found:
# treat no match as None
data = None
value = self._order(data, is_reverse)
# read previous section
pre_sect = pre_sect_stack[index] if pre_sect_stack else 0
# inverse if in reverse mode
# for keeping order as ASCENDING after sort
pre_sect = (total - pre_sect) if is_reverse else pre_sect
_ind = (total - index) if is_reverse else index
value_stack.append((pre_sect, value, _ind))
# sorting cursor data
value_stack.sort(reverse=is_reverse)
ordereddat = list()
sect_stack = list()
sect_id = -1
last_dat = None
for dat in value_stack:
# restore if in reverse mode
_ind = (total - dat[-1]) if is_reverse else dat[-1]
ordereddat.append(_cursordat[_ind])
# define section
# maintain the sorting result in next level sorting
if not dat[1] == last_dat:
sect_id += 1
sect_stack.append(sect_id)
last_dat = dat[1]
# save result for next level sorting
_cursordat = ordereddat
pre_sect_stack = sect_stack
# done
self.cursordat = _cursordat
return self
def hasNext(self):
"""
Returns True if the cursor has a next position, False if not
:return:
"""
cursor_pos = self.cursorpos + 1
try:
self.cursordat[cursor_pos]
return True
except IndexError:
return False
def next(self):
"""
Returns the next record
:return:
"""
self.cursorpos += 1
return self.cursordat[self.cursorpos]
def count(self, with_limit_and_skip=False):
"""
Returns the number of records in the current cursor
:return: number of records
"""
return len(self.cursordat)
|
schapman1974/tinymongo | tinymongo/tinymongo.py | TinyMongoCursor.sort | python | def sort(self, key_or_list, direction=None):
# checking input format
sort_specifier = list()
if isinstance(key_or_list, list):
if direction is not None:
raise ValueError('direction can not be set separately '
'if sorting by multiple fields.')
for pair in key_or_list:
if not (isinstance(pair, list) or isinstance(pair, tuple)):
raise TypeError('key pair should be a list or tuple.')
if not len(pair) == 2:
raise ValueError('Need to be (key, direction) pair')
if not isinstance(pair[0], basestring):
raise TypeError('first item in each key pair must '
'be a string')
if not isinstance(pair[1], int) or not abs(pair[1]) == 1:
raise TypeError('bad sort specification.')
sort_specifier = key_or_list
elif isinstance(key_or_list, basestring):
if direction is not None:
if not isinstance(direction, int) or not abs(direction) == 1:
raise TypeError('bad sort specification.')
else:
# default ASCENDING
direction = 1
sort_specifier = [(key_or_list, direction)]
else:
raise ValueError('Wrong input, pass a field name and a direction,'
' or pass a list of (key, direction) pairs.')
# sorting
_cursordat = self.cursordat
total = len(_cursordat)
pre_sect_stack = list()
for pair in sort_specifier:
is_reverse = bool(1-pair[1])
value_stack = list()
for index, data in enumerate(_cursordat):
# get field value
not_found = None
for key in pair[0].split('.'):
not_found = True
if isinstance(data, dict) and key in data:
data = copy.deepcopy(data[key])
not_found = False
elif isinstance(data, list):
if not is_reverse and len(data) == 1:
# MongoDB treat [{data}] as {data}
# when finding fields
if isinstance(data[0], dict) and key in data[0]:
data = copy.deepcopy(data[0][key])
not_found = False
elif is_reverse:
# MongoDB will keep finding field in reverse mode
for _d in data:
if isinstance(_d, dict) and key in _d:
data = copy.deepcopy(_d[key])
not_found = False
break
if not_found:
break
# parsing data for sorting
if not_found:
# treat no match as None
data = None
value = self._order(data, is_reverse)
# read previous section
pre_sect = pre_sect_stack[index] if pre_sect_stack else 0
# inverse if in reverse mode
# for keeping order as ASCENDING after sort
pre_sect = (total - pre_sect) if is_reverse else pre_sect
_ind = (total - index) if is_reverse else index
value_stack.append((pre_sect, value, _ind))
# sorting cursor data
value_stack.sort(reverse=is_reverse)
ordereddat = list()
sect_stack = list()
sect_id = -1
last_dat = None
for dat in value_stack:
# restore if in reverse mode
_ind = (total - dat[-1]) if is_reverse else dat[-1]
ordereddat.append(_cursordat[_ind])
# define section
# maintain the sorting result in next level sorting
if not dat[1] == last_dat:
sect_id += 1
sect_stack.append(sect_id)
last_dat = dat[1]
# save result for next level sorting
_cursordat = ordereddat
pre_sect_stack = sect_stack
# done
self.cursordat = _cursordat
return self | Sorts a cursor object based on the input
:param key_or_list: a list/tuple containing the sort specification,
i.e. ('user_number': -1), or a basestring
:param direction: sorting direction, 1 or -1, needed if key_or_list
is a basestring
:return: | train | https://github.com/schapman1974/tinymongo/blob/993048059dc0aa789d879b69feb79a0f237a60b3/tinymongo/tinymongo.py#L631-L762 | [
"def _order(self, value, is_reverse=None):\n \"\"\"Parsing data to a sortable form\n By giving each data type an ID(int), and assemble with the value\n into a sortable tuple.\n \"\"\"\n\n def _dict_parser(dict_doc):\n \"\"\" dict ordered by:\n valueType_N -> key_N -> value_N\n \"\"\"\n result = list()\n for key in dict_doc:\n data = self._order(dict_doc[key])\n res = (data[0], key, data[1])\n result.append(res)\n return tuple(result)\n\n def _list_parser(list_doc):\n \"\"\"list will iter members to compare\n \"\"\"\n result = list()\n for member in list_doc:\n result.append(self._order(member))\n return result\n\n # (TODO) include more data type\n if value is None or not isinstance(value, (dict,\n list,\n basestring,\n bool,\n float,\n int)):\n # not support/sortable value type\n value = (0, None)\n\n elif isinstance(value, bool):\n value = (5, value)\n\n elif isinstance(value, (int, float)):\n value = (1, value)\n\n elif isinstance(value, basestring):\n value = (2, value)\n\n elif isinstance(value, dict):\n value = (3, _dict_parser(value))\n\n elif isinstance(value, list):\n if len(value) == 0:\n # [] less then None\n value = [(-1, [])]\n else:\n value = _list_parser(value)\n\n if is_reverse is not None:\n # list will firstly compare with other doc by it's smallest\n # or largest member\n value = max(value) if is_reverse else min(value)\n else:\n # if the smallest or largest member is a list\n # then compaer with it's sub-member in list index order\n value = (4, tuple(value))\n\n return value\n"
] | class TinyMongoCursor(object):
"""Mongo iterable cursor"""
def __init__(self, cursordat, sort=None, skip=None, limit=None):
"""Initialize the mongo iterable cursor with data"""
self.cursordat = cursordat
self.cursorpos = -1
if len(self.cursordat) == 0:
self.currentrec = None
else:
self.currentrec = self.cursordat[self.cursorpos]
if sort:
self.sort(sort)
self.paginate(skip, limit)
def __getitem__(self, key):
"""Gets record by index or value by key"""
if isinstance(key, int):
return self.cursordat[key]
return self.currentrec[key]
def paginate(self, skip, limit):
"""Paginate list of records"""
if not self.count() or not limit:
return
skip = skip or 0
pages = int(ceil(self.count() / float(limit)))
limits = {}
last = 0
for i in range(pages):
current = limit * i
limits[last] = current
last = current
# example with count == 62
# {0: 20, 20: 40, 40: 60, 60: 62}
if limit and limit < self.count():
limit = limits.get(skip, self.count())
self.cursordat = self.cursordat[skip: limit]
def _order(self, value, is_reverse=None):
"""Parsing data to a sortable form
By giving each data type an ID(int), and assemble with the value
into a sortable tuple.
"""
def _dict_parser(dict_doc):
""" dict ordered by:
valueType_N -> key_N -> value_N
"""
result = list()
for key in dict_doc:
data = self._order(dict_doc[key])
res = (data[0], key, data[1])
result.append(res)
return tuple(result)
def _list_parser(list_doc):
"""list will iter members to compare
"""
result = list()
for member in list_doc:
result.append(self._order(member))
return result
# (TODO) include more data type
if value is None or not isinstance(value, (dict,
list,
basestring,
bool,
float,
int)):
# not support/sortable value type
value = (0, None)
elif isinstance(value, bool):
value = (5, value)
elif isinstance(value, (int, float)):
value = (1, value)
elif isinstance(value, basestring):
value = (2, value)
elif isinstance(value, dict):
value = (3, _dict_parser(value))
elif isinstance(value, list):
if len(value) == 0:
# [] less then None
value = [(-1, [])]
else:
value = _list_parser(value)
if is_reverse is not None:
# list will firstly compare with other doc by it's smallest
# or largest member
value = max(value) if is_reverse else min(value)
else:
# if the smallest or largest member is a list
# then compaer with it's sub-member in list index order
value = (4, tuple(value))
return value
def hasNext(self):
"""
Returns True if the cursor has a next position, False if not
:return:
"""
cursor_pos = self.cursorpos + 1
try:
self.cursordat[cursor_pos]
return True
except IndexError:
return False
def next(self):
"""
Returns the next record
:return:
"""
self.cursorpos += 1
return self.cursordat[self.cursorpos]
def count(self, with_limit_and_skip=False):
"""
Returns the number of records in the current cursor
:return: number of records
"""
return len(self.cursordat)
|
schapman1974/tinymongo | tinymongo/tinymongo.py | TinyMongoCursor.hasNext | python | def hasNext(self):
cursor_pos = self.cursorpos + 1
try:
self.cursordat[cursor_pos]
return True
except IndexError:
return False | Returns True if the cursor has a next position, False if not
:return: | train | https://github.com/schapman1974/tinymongo/blob/993048059dc0aa789d879b69feb79a0f237a60b3/tinymongo/tinymongo.py#L764-L775 | null | class TinyMongoCursor(object):
"""Mongo iterable cursor"""
def __init__(self, cursordat, sort=None, skip=None, limit=None):
"""Initialize the mongo iterable cursor with data"""
self.cursordat = cursordat
self.cursorpos = -1
if len(self.cursordat) == 0:
self.currentrec = None
else:
self.currentrec = self.cursordat[self.cursorpos]
if sort:
self.sort(sort)
self.paginate(skip, limit)
def __getitem__(self, key):
"""Gets record by index or value by key"""
if isinstance(key, int):
return self.cursordat[key]
return self.currentrec[key]
def paginate(self, skip, limit):
"""Paginate list of records"""
if not self.count() or not limit:
return
skip = skip or 0
pages = int(ceil(self.count() / float(limit)))
limits = {}
last = 0
for i in range(pages):
current = limit * i
limits[last] = current
last = current
# example with count == 62
# {0: 20, 20: 40, 40: 60, 60: 62}
if limit and limit < self.count():
limit = limits.get(skip, self.count())
self.cursordat = self.cursordat[skip: limit]
def _order(self, value, is_reverse=None):
"""Parsing data to a sortable form
By giving each data type an ID(int), and assemble with the value
into a sortable tuple.
"""
def _dict_parser(dict_doc):
""" dict ordered by:
valueType_N -> key_N -> value_N
"""
result = list()
for key in dict_doc:
data = self._order(dict_doc[key])
res = (data[0], key, data[1])
result.append(res)
return tuple(result)
def _list_parser(list_doc):
"""list will iter members to compare
"""
result = list()
for member in list_doc:
result.append(self._order(member))
return result
# (TODO) include more data type
if value is None or not isinstance(value, (dict,
list,
basestring,
bool,
float,
int)):
# not support/sortable value type
value = (0, None)
elif isinstance(value, bool):
value = (5, value)
elif isinstance(value, (int, float)):
value = (1, value)
elif isinstance(value, basestring):
value = (2, value)
elif isinstance(value, dict):
value = (3, _dict_parser(value))
elif isinstance(value, list):
if len(value) == 0:
# [] less then None
value = [(-1, [])]
else:
value = _list_parser(value)
if is_reverse is not None:
# list will firstly compare with other doc by it's smallest
# or largest member
value = max(value) if is_reverse else min(value)
else:
# if the smallest or largest member is a list
# then compaer with it's sub-member in list index order
value = (4, tuple(value))
return value
def sort(self, key_or_list, direction=None):
"""
Sorts a cursor object based on the input
:param key_or_list: a list/tuple containing the sort specification,
i.e. ('user_number': -1), or a basestring
:param direction: sorting direction, 1 or -1, needed if key_or_list
is a basestring
:return:
"""
# checking input format
sort_specifier = list()
if isinstance(key_or_list, list):
if direction is not None:
raise ValueError('direction can not be set separately '
'if sorting by multiple fields.')
for pair in key_or_list:
if not (isinstance(pair, list) or isinstance(pair, tuple)):
raise TypeError('key pair should be a list or tuple.')
if not len(pair) == 2:
raise ValueError('Need to be (key, direction) pair')
if not isinstance(pair[0], basestring):
raise TypeError('first item in each key pair must '
'be a string')
if not isinstance(pair[1], int) or not abs(pair[1]) == 1:
raise TypeError('bad sort specification.')
sort_specifier = key_or_list
elif isinstance(key_or_list, basestring):
if direction is not None:
if not isinstance(direction, int) or not abs(direction) == 1:
raise TypeError('bad sort specification.')
else:
# default ASCENDING
direction = 1
sort_specifier = [(key_or_list, direction)]
else:
raise ValueError('Wrong input, pass a field name and a direction,'
' or pass a list of (key, direction) pairs.')
# sorting
_cursordat = self.cursordat
total = len(_cursordat)
pre_sect_stack = list()
for pair in sort_specifier:
is_reverse = bool(1-pair[1])
value_stack = list()
for index, data in enumerate(_cursordat):
# get field value
not_found = None
for key in pair[0].split('.'):
not_found = True
if isinstance(data, dict) and key in data:
data = copy.deepcopy(data[key])
not_found = False
elif isinstance(data, list):
if not is_reverse and len(data) == 1:
# MongoDB treat [{data}] as {data}
# when finding fields
if isinstance(data[0], dict) and key in data[0]:
data = copy.deepcopy(data[0][key])
not_found = False
elif is_reverse:
# MongoDB will keep finding field in reverse mode
for _d in data:
if isinstance(_d, dict) and key in _d:
data = copy.deepcopy(_d[key])
not_found = False
break
if not_found:
break
# parsing data for sorting
if not_found:
# treat no match as None
data = None
value = self._order(data, is_reverse)
# read previous section
pre_sect = pre_sect_stack[index] if pre_sect_stack else 0
# inverse if in reverse mode
# for keeping order as ASCENDING after sort
pre_sect = (total - pre_sect) if is_reverse else pre_sect
_ind = (total - index) if is_reverse else index
value_stack.append((pre_sect, value, _ind))
# sorting cursor data
value_stack.sort(reverse=is_reverse)
ordereddat = list()
sect_stack = list()
sect_id = -1
last_dat = None
for dat in value_stack:
# restore if in reverse mode
_ind = (total - dat[-1]) if is_reverse else dat[-1]
ordereddat.append(_cursordat[_ind])
# define section
# maintain the sorting result in next level sorting
if not dat[1] == last_dat:
sect_id += 1
sect_stack.append(sect_id)
last_dat = dat[1]
# save result for next level sorting
_cursordat = ordereddat
pre_sect_stack = sect_stack
# done
self.cursordat = _cursordat
return self
def next(self):
"""
Returns the next record
:return:
"""
self.cursorpos += 1
return self.cursordat[self.cursorpos]
def count(self, with_limit_and_skip=False):
"""
Returns the number of records in the current cursor
:return: number of records
"""
return len(self.cursordat)
|
ethereum/eth-abi | eth_abi/utils/string.py | abbr | python | def abbr(value: Any, limit: int=20) -> str:
rep = repr(value)
if len(rep) > limit:
if limit < 3:
raise ValueError('Abbreviation limit may not be less than 3')
rep = rep[:limit - 3] + '...'
return rep | Converts a value into its string representation and abbreviates that
representation based on the given length `limit` if necessary. | train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/utils/string.py#L6-L19 | null | from typing import (
Any,
)
|
ethereum/eth-abi | eth_abi/encoding.py | BaseEncoder.invalidate_value | python | def invalidate_value(
cls,
value: Any,
exc: Type[Exception]=EncodingTypeError,
msg: Optional[str]=None,
) -> None:
raise exc(
"Value `{rep}` of type {typ} cannot be encoded by {cls}{msg}".format(
rep=abbr(value),
typ=type(value),
cls=cls.__name__,
msg="" if msg is None else (": " + msg),
)
) | Throws a standard exception for when a value is not encodable by an
encoder. | train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/encoding.py#L78-L95 | [
"def abbr(value: Any, limit: int=20) -> str:\n \"\"\"\n Converts a value into its string representation and abbreviates that\n representation based on the given length `limit` if necessary.\n \"\"\"\n rep = repr(value)\n\n if len(rep) > limit:\n if limit < 3:\n raise ValueError('Abbreviation limit may not be less than 3')\n\n rep = rep[:limit - 3] + '...'\n\n return rep\n"
] | class BaseEncoder(BaseCoder, metaclass=abc.ABCMeta):
"""
Base class for all encoder classes. Subclass this if you want to define a
custom encoder class. Subclasses must also implement
:any:`BaseCoder.from_type_str`.
"""
@abc.abstractmethod
def encode(self, value: Any) -> bytes: # pragma: no cover
"""
Encodes the given value as a sequence of bytes. Should raise
:any:`exceptions.EncodingError` if ``value`` cannot be encoded.
"""
pass
@abc.abstractmethod
def validate_value(self, value: Any) -> None: # pragma: no cover
"""
Checks whether or not the given value can be encoded by this encoder.
If the given value cannot be encoded, must raise
:any:`exceptions.EncodingError`.
"""
pass
@classmethod
def __call__(self, value: Any) -> bytes:
return self.encode(value)
|
ethereum/eth-abi | eth_abi/base.py | parse_type_str | python | def parse_type_str(expected_base=None, with_arrlist=False):
def decorator(old_from_type_str):
@functools.wraps(old_from_type_str)
def new_from_type_str(cls, type_str, registry):
normalized_type_str = normalize(type_str)
abi_type = parse(normalized_type_str)
type_str_repr = repr(type_str)
if type_str != normalized_type_str:
type_str_repr = '{} (normalized to {})'.format(
type_str_repr,
repr(normalized_type_str),
)
if expected_base is not None:
if not isinstance(abi_type, BasicType):
raise ValueError(
'Cannot create {} for non-basic type {}'.format(
cls.__name__,
type_str_repr,
)
)
if abi_type.base != expected_base:
raise ValueError(
'Cannot create {} for type {}: expected type with '
"base '{}'".format(
cls.__name__,
type_str_repr,
expected_base,
)
)
if not with_arrlist and abi_type.arrlist is not None:
raise ValueError(
'Cannot create {} for type {}: expected type with '
'no array dimension list'.format(
cls.__name__,
type_str_repr,
)
)
if with_arrlist and abi_type.arrlist is None:
raise ValueError(
'Cannot create {} for type {}: expected type with '
'array dimension list'.format(
cls.__name__,
type_str_repr,
)
)
# Perform general validation of default solidity types
abi_type.validate()
return old_from_type_str(cls, abi_type, registry)
return classmethod(new_from_type_str)
return decorator | Used by BaseCoder subclasses as a convenience for implementing the
``from_type_str`` method required by ``ABIRegistry``. Useful if normalizing
then parsing a type string with an (optional) expected base is required in
that method. | train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/base.py#L15-L77 | null | import functools
from eth_typing.abi import (
TypeStr,
)
from .grammar import (
BasicType,
TupleType,
normalize,
parse,
)
def parse_tuple_type_str(old_from_type_str):
"""
Used by BaseCoder subclasses as a convenience for implementing the
``from_type_str`` method required by ``ABIRegistry``. Useful if normalizing
then parsing a tuple type string is required in that method.
"""
@functools.wraps(old_from_type_str)
def new_from_type_str(cls, type_str, registry):
normalized_type_str = normalize(type_str)
abi_type = parse(normalized_type_str)
type_str_repr = repr(type_str)
if type_str != normalized_type_str:
type_str_repr = '{} (normalized to {})'.format(
type_str_repr,
repr(normalized_type_str),
)
if not isinstance(abi_type, TupleType):
raise ValueError(
'Cannot create {} for non-tuple type {}'.format(
cls.__name__,
type_str_repr,
)
)
abi_type.validate()
return old_from_type_str(cls, abi_type, registry)
return classmethod(new_from_type_str)
class BaseCoder:
"""
Base class for all encoder and decoder classes.
"""
is_dynamic = False
def __init__(self, **kwargs):
cls = type(self)
# Ensure no unrecognized kwargs were given
for key, value in kwargs.items():
if not hasattr(cls, key):
raise AttributeError(
'Property {key} not found on {cls_name} class. '
'`{cls_name}.__init__` only accepts keyword arguments which are '
'present on the {cls_name} class.'.format(
key=key,
cls_name=cls.__name__,
)
)
setattr(self, key, value)
# Validate given combination of kwargs
self.validate()
def validate(self):
pass
@classmethod
def from_type_str(cls, type_str: TypeStr, registry) -> 'BaseCoder': # pragma: no cover
"""
Used by :any:`ABIRegistry` to get an appropriate encoder or decoder
instance for the given type string and type registry.
"""
raise NotImplementedError('Must implement `from_type_str`')
|
ethereum/eth-abi | eth_abi/base.py | parse_tuple_type_str | python | def parse_tuple_type_str(old_from_type_str):
@functools.wraps(old_from_type_str)
def new_from_type_str(cls, type_str, registry):
normalized_type_str = normalize(type_str)
abi_type = parse(normalized_type_str)
type_str_repr = repr(type_str)
if type_str != normalized_type_str:
type_str_repr = '{} (normalized to {})'.format(
type_str_repr,
repr(normalized_type_str),
)
if not isinstance(abi_type, TupleType):
raise ValueError(
'Cannot create {} for non-tuple type {}'.format(
cls.__name__,
type_str_repr,
)
)
abi_type.validate()
return old_from_type_str(cls, abi_type, registry)
return classmethod(new_from_type_str) | Used by BaseCoder subclasses as a convenience for implementing the
``from_type_str`` method required by ``ABIRegistry``. Useful if normalizing
then parsing a tuple type string is required in that method. | train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/base.py#L80-L110 | null | import functools
from eth_typing.abi import (
TypeStr,
)
from .grammar import (
BasicType,
TupleType,
normalize,
parse,
)
def parse_type_str(expected_base=None, with_arrlist=False):
"""
Used by BaseCoder subclasses as a convenience for implementing the
``from_type_str`` method required by ``ABIRegistry``. Useful if normalizing
then parsing a type string with an (optional) expected base is required in
that method.
"""
def decorator(old_from_type_str):
@functools.wraps(old_from_type_str)
def new_from_type_str(cls, type_str, registry):
normalized_type_str = normalize(type_str)
abi_type = parse(normalized_type_str)
type_str_repr = repr(type_str)
if type_str != normalized_type_str:
type_str_repr = '{} (normalized to {})'.format(
type_str_repr,
repr(normalized_type_str),
)
if expected_base is not None:
if not isinstance(abi_type, BasicType):
raise ValueError(
'Cannot create {} for non-basic type {}'.format(
cls.__name__,
type_str_repr,
)
)
if abi_type.base != expected_base:
raise ValueError(
'Cannot create {} for type {}: expected type with '
"base '{}'".format(
cls.__name__,
type_str_repr,
expected_base,
)
)
if not with_arrlist and abi_type.arrlist is not None:
raise ValueError(
'Cannot create {} for type {}: expected type with '
'no array dimension list'.format(
cls.__name__,
type_str_repr,
)
)
if with_arrlist and abi_type.arrlist is None:
raise ValueError(
'Cannot create {} for type {}: expected type with '
'array dimension list'.format(
cls.__name__,
type_str_repr,
)
)
# Perform general validation of default solidity types
abi_type.validate()
return old_from_type_str(cls, abi_type, registry)
return classmethod(new_from_type_str)
return decorator
class BaseCoder:
"""
Base class for all encoder and decoder classes.
"""
is_dynamic = False
def __init__(self, **kwargs):
cls = type(self)
# Ensure no unrecognized kwargs were given
for key, value in kwargs.items():
if not hasattr(cls, key):
raise AttributeError(
'Property {key} not found on {cls_name} class. '
'`{cls_name}.__init__` only accepts keyword arguments which are '
'present on the {cls_name} class.'.format(
key=key,
cls_name=cls.__name__,
)
)
setattr(self, key, value)
# Validate given combination of kwargs
self.validate()
def validate(self):
pass
@classmethod
def from_type_str(cls, type_str: TypeStr, registry) -> 'BaseCoder': # pragma: no cover
"""
Used by :any:`ABIRegistry` to get an appropriate encoder or decoder
instance for the given type string and type registry.
"""
raise NotImplementedError('Must implement `from_type_str`')
|
ethereum/eth-abi | eth_abi/decoding.py | ContextFramesBytesIO.seek_in_frame | python | def seek_in_frame(self, pos, *args, **kwargs):
super().seek(self._total_offset + pos, *args, **kwargs) | Seeks relative to the total offset of the current contextual frames. | train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/decoding.py#L80-L84 | null | class ContextFramesBytesIO(io.BytesIO):
"""
A byte stream which can track a series of contextual frames in a stack. This
data structure is necessary to perform nested decodings using the
:py:class:``HeadTailDecoder`` since offsets present in head sections are
relative only to a particular encoded object. These offsets can only be
used to locate a position in a decoding stream if they are paired with a
contextual offset that establishes the position of the object in which they
are found.
For example, consider the encoding of a value for the following type::
type: (int,(int,int[]))
value: (1,(2,[3,3]))
There are two tuples in this type: one inner and one outer. The inner tuple
type contains a dynamic type ``int[]`` and, therefore, is itself dynamic.
This means that its value encoding will be placed in the tail section of the
outer tuple's encoding. Furthermore, the inner tuple's encoding will,
itself, contain a tail section with the encoding for ``[3,3]``. All
together, the encoded value of ``(1,(2,[3,3]))`` would look like this (the
data values are normally 32 bytes wide but have been truncated to remove the
redundant zeros at the beginnings of their encodings)::
offset data
--------------------------
^ 0 0x01
| 32 0x40 <-- Offset of object A in global frame (64)
-----|--------------------
Global frame ^ 64 0x02 <-- Beginning of object A (64 w/offset 0 = 64)
| | 96 0x40 <-- Offset of object B in frame of object A (64)
-----|-Object A's frame---
| | 128 0x02 <-- Beginning of object B (64 w/offset 64 = 128)
| | 160 0x03
v v 192 0x03
--------------------------
Note that the offset of object B is encoded as 64 which only specifies the
beginning of its encoded value relative to the beginning of object A's
encoding. Globally, object B is located at offset 128. In order to make
sense out of object B's offset, it needs to be positioned in the context of
its enclosing object's frame (object A).
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._frames = []
self._total_offset = 0
def push_frame(self, offset):
"""
Pushes a new contextual frame onto the stack with the given offset and a
return position at the current cursor position then seeks to the new
total offset.
"""
self._frames.append((offset, self.tell()))
self._total_offset += offset
self.seek_in_frame(0)
def pop_frame(self):
"""
Pops the current contextual frame off of the stack and returns the
cursor to the frame's return position.
"""
try:
offset, return_pos = self._frames.pop()
except IndexError:
raise IndexError('no frames to pop')
self._total_offset -= offset
self.seek(return_pos)
|
ethereum/eth-abi | eth_abi/decoding.py | ContextFramesBytesIO.push_frame | python | def push_frame(self, offset):
self._frames.append((offset, self.tell()))
self._total_offset += offset
self.seek_in_frame(0) | Pushes a new contextual frame onto the stack with the given offset and a
return position at the current cursor position then seeks to the new
total offset. | train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/decoding.py#L86-L95 | [
"def seek_in_frame(self, pos, *args, **kwargs):\n \"\"\"\n Seeks relative to the total offset of the current contextual frames.\n \"\"\"\n super().seek(self._total_offset + pos, *args, **kwargs)\n"
] | class ContextFramesBytesIO(io.BytesIO):
"""
A byte stream which can track a series of contextual frames in a stack. This
data structure is necessary to perform nested decodings using the
:py:class:``HeadTailDecoder`` since offsets present in head sections are
relative only to a particular encoded object. These offsets can only be
used to locate a position in a decoding stream if they are paired with a
contextual offset that establishes the position of the object in which they
are found.
For example, consider the encoding of a value for the following type::
type: (int,(int,int[]))
value: (1,(2,[3,3]))
There are two tuples in this type: one inner and one outer. The inner tuple
type contains a dynamic type ``int[]`` and, therefore, is itself dynamic.
This means that its value encoding will be placed in the tail section of the
outer tuple's encoding. Furthermore, the inner tuple's encoding will,
itself, contain a tail section with the encoding for ``[3,3]``. All
together, the encoded value of ``(1,(2,[3,3]))`` would look like this (the
data values are normally 32 bytes wide but have been truncated to remove the
redundant zeros at the beginnings of their encodings)::
offset data
--------------------------
^ 0 0x01
| 32 0x40 <-- Offset of object A in global frame (64)
-----|--------------------
Global frame ^ 64 0x02 <-- Beginning of object A (64 w/offset 0 = 64)
| | 96 0x40 <-- Offset of object B in frame of object A (64)
-----|-Object A's frame---
| | 128 0x02 <-- Beginning of object B (64 w/offset 64 = 128)
| | 160 0x03
v v 192 0x03
--------------------------
Note that the offset of object B is encoded as 64 which only specifies the
beginning of its encoded value relative to the beginning of object A's
encoding. Globally, object B is located at offset 128. In order to make
sense out of object B's offset, it needs to be positioned in the context of
its enclosing object's frame (object A).
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._frames = []
self._total_offset = 0
def seek_in_frame(self, pos, *args, **kwargs):
"""
Seeks relative to the total offset of the current contextual frames.
"""
super().seek(self._total_offset + pos, *args, **kwargs)
def pop_frame(self):
"""
Pops the current contextual frame off of the stack and returns the
cursor to the frame's return position.
"""
try:
offset, return_pos = self._frames.pop()
except IndexError:
raise IndexError('no frames to pop')
self._total_offset -= offset
self.seek(return_pos)
|
ethereum/eth-abi | eth_abi/decoding.py | ContextFramesBytesIO.pop_frame | python | def pop_frame(self):
try:
offset, return_pos = self._frames.pop()
except IndexError:
raise IndexError('no frames to pop')
self._total_offset -= offset
self.seek(return_pos) | Pops the current contextual frame off of the stack and returns the
cursor to the frame's return position. | train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/decoding.py#L97-L108 | null | class ContextFramesBytesIO(io.BytesIO):
"""
A byte stream which can track a series of contextual frames in a stack. This
data structure is necessary to perform nested decodings using the
:py:class:``HeadTailDecoder`` since offsets present in head sections are
relative only to a particular encoded object. These offsets can only be
used to locate a position in a decoding stream if they are paired with a
contextual offset that establishes the position of the object in which they
are found.
For example, consider the encoding of a value for the following type::
type: (int,(int,int[]))
value: (1,(2,[3,3]))
There are two tuples in this type: one inner and one outer. The inner tuple
type contains a dynamic type ``int[]`` and, therefore, is itself dynamic.
This means that its value encoding will be placed in the tail section of the
outer tuple's encoding. Furthermore, the inner tuple's encoding will,
itself, contain a tail section with the encoding for ``[3,3]``. All
together, the encoded value of ``(1,(2,[3,3]))`` would look like this (the
data values are normally 32 bytes wide but have been truncated to remove the
redundant zeros at the beginnings of their encodings)::
offset data
--------------------------
^ 0 0x01
| 32 0x40 <-- Offset of object A in global frame (64)
-----|--------------------
Global frame ^ 64 0x02 <-- Beginning of object A (64 w/offset 0 = 64)
| | 96 0x40 <-- Offset of object B in frame of object A (64)
-----|-Object A's frame---
| | 128 0x02 <-- Beginning of object B (64 w/offset 64 = 128)
| | 160 0x03
v v 192 0x03
--------------------------
Note that the offset of object B is encoded as 64 which only specifies the
beginning of its encoded value relative to the beginning of object A's
encoding. Globally, object B is located at offset 128. In order to make
sense out of object B's offset, it needs to be positioned in the context of
its enclosing object's frame (object A).
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._frames = []
self._total_offset = 0
def seek_in_frame(self, pos, *args, **kwargs):
"""
Seeks relative to the total offset of the current contextual frames.
"""
super().seek(self._total_offset + pos, *args, **kwargs)
def push_frame(self, offset):
"""
Pushes a new contextual frame onto the stack with the given offset and a
return position at the current cursor position then seeks to the new
total offset.
"""
self._frames.append((offset, self.tell()))
self._total_offset += offset
self.seek_in_frame(0)
|
ethereum/eth-abi | eth_abi/registry.py | has_arrlist | python | def has_arrlist(type_str):
try:
abi_type = grammar.parse(type_str)
except exceptions.ParseError:
return False
return abi_type.arrlist is not None | A predicate that matches a type string with an array dimension list. | train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/registry.py#L258-L267 | null | import abc
import copy
import functools
from typing import (
Any,
Callable,
Type,
Union,
)
from eth_typing import (
abi,
)
from . import (
decoding,
encoding,
exceptions,
grammar,
)
from .base import (
BaseCoder,
)
from .exceptions import (
MultipleEntriesFound,
NoEntriesFound,
)
Lookup = Union[abi.TypeStr, Callable[[abi.TypeStr], bool]]
EncoderCallable = Callable[[Any], bytes]
DecoderCallable = Callable[[decoding.ContextFramesBytesIO], Any]
Encoder = Union[EncoderCallable, Type[encoding.BaseEncoder]]
Decoder = Union[DecoderCallable, Type[decoding.BaseDecoder]]
class Copyable(abc.ABC):
@abc.abstractmethod
def copy(self):
pass
def __copy__(self):
return self.copy()
def __deepcopy__(self, *args):
return self.copy()
class PredicateMapping(Copyable):
"""
Acts as a mapping from predicate functions to values. Values are retrieved
when their corresponding predicate matches a given input. Predicates can
also be labeled to facilitate removal from the mapping.
"""
def __init__(self, name):
self._name = name
self._values = {}
self._labeled_predicates = {}
def add(self, predicate, value, label=None):
if predicate in self._values:
raise ValueError('Matcher {} already exists in {}'.format(
repr(predicate),
self._name,
))
if label is not None:
if label in self._labeled_predicates:
raise ValueError(
"Matcher {} with label '{}' already exists in {}".format(
repr(predicate),
label,
self._name,
),
)
self._labeled_predicates[label] = predicate
self._values[predicate] = value
def find(self, type_str):
results = tuple(
(predicate, value) for predicate, value in self._values.items()
if predicate(type_str)
)
if len(results) == 0:
raise NoEntriesFound("No matching entries for '{}' in {}".format(
type_str,
self._name,
))
predicates, values = tuple(zip(*results))
if len(results) > 1:
predicate_reprs = ', '.join(map(repr, predicates))
raise MultipleEntriesFound(
f"Multiple matching entries for '{type_str}' in {self._name}: "
f"{predicate_reprs}. This occurs when two registrations match the "
"same type string. You may need to delete one of the "
"registrations or modify its matching behavior to ensure it "
"doesn't collide with other registrations. See the \"Registry\" "
"documentation for more information."
)
return values[0]
def remove_by_equality(self, predicate):
# Delete the predicate mapping to the previously stored value
try:
del self._values[predicate]
except KeyError:
raise KeyError('Matcher {} not found in {}'.format(
repr(predicate),
self._name,
))
# Delete any label which refers to this predicate
try:
label = self._label_for_predicate(predicate)
except ValueError:
pass
else:
del self._labeled_predicates[label]
def _label_for_predicate(self, predicate):
# Both keys and values in `_labeled_predicates` are unique since the
# `add` method enforces this
for key, value in self._labeled_predicates.items():
if value is predicate:
return key
raise ValueError('Matcher {} not referred to by any label in {}'.format(
repr(predicate),
self._name,
))
def remove_by_label(self, label):
try:
predicate = self._labeled_predicates[label]
except KeyError:
raise KeyError("Label '{}' not found in {}".format(label, self._name))
del self._labeled_predicates[label]
del self._values[predicate]
def remove(self, predicate_or_label):
if callable(predicate_or_label):
self.remove_by_equality(predicate_or_label)
elif isinstance(predicate_or_label, str):
self.remove_by_label(predicate_or_label)
else:
raise TypeError('Key to be removed must be callable or string: got {}'.format(
type(predicate_or_label),
))
def copy(self):
cpy = type(self)(self._name)
cpy._values = copy.copy(self._values)
cpy._labeled_predicates = copy.copy(self._labeled_predicates)
return cpy
class Predicate:
"""
Represents a predicate function to be used for type matching in
``ABIRegistry``.
"""
__slots__ = tuple()
def __call__(self, *args, **kwargs): # pragma: no cover
raise NotImplementedError('Must implement `__call__`')
def __str__(self): # pragma: no cover
raise NotImplementedError('Must implement `__str__`')
def __repr__(self):
return '<{} {}>'.format(type(self).__name__, self)
def __iter__(self):
for attr in self.__slots__:
yield getattr(self, attr)
def __hash__(self):
return hash(tuple(self))
def __eq__(self, other):
return (
type(self) is type(other) and
tuple(self) == tuple(other)
)
class Equals(Predicate):
"""
A predicate that matches any input equal to `value`.
"""
__slots__ = ('value',)
def __init__(self, value):
self.value = value
def __call__(self, other):
return self.value == other
def __str__(self):
return '(== {})'.format(repr(self.value))
class BaseEquals(Predicate):
"""
A predicate that matches a basic type string with a base component equal to
`value` and no array component. If `with_sub` is `True`, the type string
must have a sub component to match. If `with_sub` is `False`, the type
string must *not* have a sub component to match. If `with_sub` is None,
the type string's sub component is ignored.
"""
__slots__ = ('base', 'with_sub')
def __init__(self, base, *, with_sub=None):
self.base = base
self.with_sub = with_sub
def __call__(self, type_str):
try:
abi_type = grammar.parse(type_str)
except exceptions.ParseError:
return False
if isinstance(abi_type, grammar.BasicType):
if abi_type.arrlist is not None:
return False
if self.with_sub is not None:
if self.with_sub and abi_type.sub is None:
return False
if not self.with_sub and abi_type.sub is not None:
return False
return abi_type.base == self.base
# We'd reach this point if `type_str` did not contain a basic type
# e.g. if it contained a tuple type
return False
def __str__(self):
return '(base == {}{})'.format(
repr(self.base),
'' if self.with_sub is None else (
' and sub is not None' if self.with_sub else ' and sub is None'
),
)
def is_base_tuple(type_str):
"""
A predicate that matches a tuple type with no array dimension list.
"""
try:
abi_type = grammar.parse(type_str)
except exceptions.ParseError:
return False
return isinstance(abi_type, grammar.TupleType) and abi_type.arrlist is None
def _clear_encoder_cache(old_method):
@functools.wraps(old_method)
def new_method(self, *args, **kwargs):
self.get_encoder.cache_clear()
return old_method(self, *args, **kwargs)
return new_method
def _clear_decoder_cache(old_method):
@functools.wraps(old_method)
def new_method(self, *args, **kwargs):
self.get_decoder.cache_clear()
return old_method(self, *args, **kwargs)
return new_method
class ABIRegistry(Copyable):
def __init__(self):
self._encoders = PredicateMapping('encoder registry')
self._decoders = PredicateMapping('decoder registry')
@staticmethod
def _register_coder(mapping, lookup, coder, label=None):
if callable(lookup):
mapping.add(lookup, coder, label)
return
if isinstance(lookup, str):
mapping.add(Equals(lookup), coder, lookup)
return
raise TypeError(
'Lookup must be a callable or a value of type `str`: got {}'.format(
repr(lookup),
)
)
@staticmethod
def _unregister_coder(mapping, lookup_or_label):
if callable(lookup_or_label):
mapping.remove_by_equality(lookup_or_label)
return
if isinstance(lookup_or_label, str):
mapping.remove_by_label(lookup_or_label)
return
raise TypeError(
'Lookup/label must be a callable or a value of type `str`: got {}'.format(
repr(lookup_or_label),
)
)
def _get_coder(self, mapping, type_str):
try:
coder = mapping.find(type_str)
except ValueError as e:
if 'No matching' in e.args[0]:
# If no matches found, attempt to parse in case lack of matches
# was due to unparsability
grammar.parse(type_str)
raise
if isinstance(coder, type) and issubclass(coder, BaseCoder):
return coder.from_type_str(type_str, self)
return coder
@_clear_encoder_cache
def register_encoder(self, lookup: Lookup, encoder: Encoder, label: str=None) -> None:
"""
Registers the given ``encoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`.
"""
self._register_coder(self._encoders, lookup, encoder, label=label)
@_clear_encoder_cache
def unregister_encoder(self, lookup_or_label: Lookup) -> None:
"""
Unregisters an encoder in the registry with the given lookup or label.
If ``lookup_or_label`` is a string, the encoder with the label
``lookup_or_label`` will be unregistered. If it is an function, the
encoder with the lookup function ``lookup_or_label`` will be
unregistered.
"""
self._unregister_coder(self._encoders, lookup_or_label)
@_clear_decoder_cache
def register_decoder(self, lookup: Lookup, decoder: Decoder, label: str=None) -> None:
"""
Registers the given ``decoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`.
"""
self._register_coder(self._decoders, lookup, decoder, label=label)
@_clear_decoder_cache
def unregister_decoder(self, lookup_or_label: Lookup) -> None:
"""
Unregisters a decoder in the registry with the given lookup or label.
If ``lookup_or_label`` is a string, the decoder with the label
``lookup_or_label`` will be unregistered. If it is an function, the
decoder with the lookup function ``lookup_or_label`` will be
unregistered.
"""
self._unregister_coder(self._decoders, lookup_or_label)
def register(self, lookup: Lookup, encoder: Encoder, decoder: Decoder, label: str=None) -> None:
"""
Registers the given ``encoder`` and ``decoder`` under the given
``lookup``. A unique string label may be optionally provided that can
be used to refer to the registration by name.
:param lookup: A type string or type string matcher function
(predicate). When the registry is queried with a type string
``query`` to determine which encoder or decoder to use, ``query``
will be checked against every registration in the registry. If a
registration was created with a type string for ``lookup``, it will
be considered a match if ``lookup == query``. If a registration
was created with a matcher function for ``lookup``, it will be
considered a match if ``lookup(query) is True``. If more than one
registration is found to be a match, then an exception is raised.
:param encoder: An encoder callable or class to use if ``lookup``
matches a query. If ``encoder`` is a callable, it must accept a
python value and return a ``bytes`` value. If ``encoder`` is a
class, it must be a valid subclass of :any:`encoding.BaseEncoder`
and must also implement the :any:`from_type_str` method on
:any:`base.BaseCoder`.
:param decoder: A decoder callable or class to use if ``lookup``
matches a query. If ``decoder`` is a callable, it must accept a
stream-like object of bytes and return a python value. If
``decoder`` is a class, it must be a valid subclass of
:any:`decoding.BaseDecoder` and must also implement the
:any:`from_type_str` method on :any:`base.BaseCoder`.
:param label: An optional label that can be used to refer to this
registration by name. This label can be used to unregister an
entry in the registry via the :any:`unregister` method and its
variants.
"""
self.register_encoder(lookup, encoder, label=label)
self.register_decoder(lookup, decoder, label=label)
def unregister(self, label: str) -> None:
"""
Unregisters the entries in the encoder and decoder registries which
have the label ``label``.
"""
self.unregister_encoder(label)
self.unregister_decoder(label)
@functools.lru_cache(maxsize=None)
def get_encoder(self, type_str):
return self._get_coder(self._encoders, type_str)
def has_encoder(self, type_str: abi.TypeStr) -> bool:
"""
Returns ``True`` if an encoder is found for the given type string
``type_str``. Otherwise, returns ``False``. Raises
:class:`~eth_abi.exceptions.MultipleEntriesFound` if multiple encoders
are found.
"""
try:
self.get_encoder(type_str)
except NoEntriesFound:
return False
else:
return True
@functools.lru_cache(maxsize=None)
def get_decoder(self, type_str):
return self._get_coder(self._decoders, type_str)
def copy(self):
"""
Copies a registry such that new registrations can be made or existing
registrations can be unregistered without affecting any instance from
which a copy was obtained. This is useful if an existing registry
fulfills most of a user's needs but requires one or two modifications.
In that case, a copy of that registry can be obtained and the necessary
changes made without affecting the original registry.
"""
cpy = type(self)()
cpy._encoders = copy.copy(self._encoders)
cpy._decoders = copy.copy(self._decoders)
return cpy
registry = ABIRegistry()
registry.register(
BaseEquals('uint'),
encoding.UnsignedIntegerEncoder, decoding.UnsignedIntegerDecoder,
label='uint',
)
registry.register(
BaseEquals('int'),
encoding.SignedIntegerEncoder, decoding.SignedIntegerDecoder,
label='int',
)
registry.register(
BaseEquals('address'),
encoding.AddressEncoder, decoding.AddressDecoder,
label='address',
)
registry.register(
BaseEquals('bool'),
encoding.BooleanEncoder, decoding.BooleanDecoder,
label='bool',
)
registry.register(
BaseEquals('ufixed'),
encoding.UnsignedFixedEncoder, decoding.UnsignedFixedDecoder,
label='ufixed',
)
registry.register(
BaseEquals('fixed'),
encoding.SignedFixedEncoder, decoding.SignedFixedDecoder,
label='fixed',
)
registry.register(
BaseEquals('bytes', with_sub=True),
encoding.BytesEncoder, decoding.BytesDecoder,
label='bytes<M>',
)
registry.register(
BaseEquals('bytes', with_sub=False),
encoding.ByteStringEncoder, decoding.ByteStringDecoder,
label='bytes',
)
registry.register(
BaseEquals('function'),
encoding.BytesEncoder, decoding.BytesDecoder,
label='function',
)
registry.register(
BaseEquals('string'),
encoding.TextStringEncoder, decoding.StringDecoder,
label='string',
)
registry.register(
has_arrlist,
encoding.BaseArrayEncoder, decoding.BaseArrayDecoder,
label='has_arrlist',
)
registry.register(
is_base_tuple,
encoding.TupleEncoder, decoding.TupleDecoder,
label='is_base_tuple',
)
registry_packed = ABIRegistry()
registry_packed.register_encoder(
BaseEquals('uint'),
encoding.PackedUnsignedIntegerEncoder,
label='uint',
)
registry_packed.register_encoder(
BaseEquals('int'),
encoding.PackedSignedIntegerEncoder,
label='int',
)
registry_packed.register_encoder(
BaseEquals('address'),
encoding.PackedAddressEncoder,
label='address',
)
registry_packed.register_encoder(
BaseEquals('bool'),
encoding.PackedBooleanEncoder,
label='bool',
)
registry_packed.register_encoder(
BaseEquals('ufixed'),
encoding.PackedUnsignedFixedEncoder,
label='ufixed',
)
registry_packed.register_encoder(
BaseEquals('fixed'),
encoding.PackedSignedFixedEncoder,
label='fixed',
)
registry_packed.register_encoder(
BaseEquals('bytes', with_sub=True),
encoding.PackedBytesEncoder,
label='bytes<M>',
)
registry_packed.register_encoder(
BaseEquals('bytes', with_sub=False),
encoding.PackedByteStringEncoder,
label='bytes',
)
registry_packed.register_encoder(
BaseEquals('function'),
encoding.PackedBytesEncoder,
label='function',
)
registry_packed.register_encoder(
BaseEquals('string'),
encoding.PackedTextStringEncoder,
label='string',
)
registry_packed.register_encoder(
has_arrlist,
encoding.PackedArrayEncoder,
label='has_arrlist',
)
registry_packed.register_encoder(
is_base_tuple,
encoding.TupleEncoder,
label='is_base_tuple',
)
|
ethereum/eth-abi | eth_abi/registry.py | is_base_tuple | python | def is_base_tuple(type_str):
try:
abi_type = grammar.parse(type_str)
except exceptions.ParseError:
return False
return isinstance(abi_type, grammar.TupleType) and abi_type.arrlist is None | A predicate that matches a tuple type with no array dimension list. | train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/registry.py#L270-L279 | null | import abc
import copy
import functools
from typing import (
Any,
Callable,
Type,
Union,
)
from eth_typing import (
abi,
)
from . import (
decoding,
encoding,
exceptions,
grammar,
)
from .base import (
BaseCoder,
)
from .exceptions import (
MultipleEntriesFound,
NoEntriesFound,
)
Lookup = Union[abi.TypeStr, Callable[[abi.TypeStr], bool]]
EncoderCallable = Callable[[Any], bytes]
DecoderCallable = Callable[[decoding.ContextFramesBytesIO], Any]
Encoder = Union[EncoderCallable, Type[encoding.BaseEncoder]]
Decoder = Union[DecoderCallable, Type[decoding.BaseDecoder]]
class Copyable(abc.ABC):
@abc.abstractmethod
def copy(self):
pass
def __copy__(self):
return self.copy()
def __deepcopy__(self, *args):
return self.copy()
class PredicateMapping(Copyable):
"""
Acts as a mapping from predicate functions to values. Values are retrieved
when their corresponding predicate matches a given input. Predicates can
also be labeled to facilitate removal from the mapping.
"""
def __init__(self, name):
self._name = name
self._values = {}
self._labeled_predicates = {}
def add(self, predicate, value, label=None):
if predicate in self._values:
raise ValueError('Matcher {} already exists in {}'.format(
repr(predicate),
self._name,
))
if label is not None:
if label in self._labeled_predicates:
raise ValueError(
"Matcher {} with label '{}' already exists in {}".format(
repr(predicate),
label,
self._name,
),
)
self._labeled_predicates[label] = predicate
self._values[predicate] = value
def find(self, type_str):
results = tuple(
(predicate, value) for predicate, value in self._values.items()
if predicate(type_str)
)
if len(results) == 0:
raise NoEntriesFound("No matching entries for '{}' in {}".format(
type_str,
self._name,
))
predicates, values = tuple(zip(*results))
if len(results) > 1:
predicate_reprs = ', '.join(map(repr, predicates))
raise MultipleEntriesFound(
f"Multiple matching entries for '{type_str}' in {self._name}: "
f"{predicate_reprs}. This occurs when two registrations match the "
"same type string. You may need to delete one of the "
"registrations or modify its matching behavior to ensure it "
"doesn't collide with other registrations. See the \"Registry\" "
"documentation for more information."
)
return values[0]
def remove_by_equality(self, predicate):
# Delete the predicate mapping to the previously stored value
try:
del self._values[predicate]
except KeyError:
raise KeyError('Matcher {} not found in {}'.format(
repr(predicate),
self._name,
))
# Delete any label which refers to this predicate
try:
label = self._label_for_predicate(predicate)
except ValueError:
pass
else:
del self._labeled_predicates[label]
def _label_for_predicate(self, predicate):
# Both keys and values in `_labeled_predicates` are unique since the
# `add` method enforces this
for key, value in self._labeled_predicates.items():
if value is predicate:
return key
raise ValueError('Matcher {} not referred to by any label in {}'.format(
repr(predicate),
self._name,
))
def remove_by_label(self, label):
try:
predicate = self._labeled_predicates[label]
except KeyError:
raise KeyError("Label '{}' not found in {}".format(label, self._name))
del self._labeled_predicates[label]
del self._values[predicate]
def remove(self, predicate_or_label):
if callable(predicate_or_label):
self.remove_by_equality(predicate_or_label)
elif isinstance(predicate_or_label, str):
self.remove_by_label(predicate_or_label)
else:
raise TypeError('Key to be removed must be callable or string: got {}'.format(
type(predicate_or_label),
))
def copy(self):
cpy = type(self)(self._name)
cpy._values = copy.copy(self._values)
cpy._labeled_predicates = copy.copy(self._labeled_predicates)
return cpy
class Predicate:
"""
Represents a predicate function to be used for type matching in
``ABIRegistry``.
"""
__slots__ = tuple()
def __call__(self, *args, **kwargs): # pragma: no cover
raise NotImplementedError('Must implement `__call__`')
def __str__(self): # pragma: no cover
raise NotImplementedError('Must implement `__str__`')
def __repr__(self):
return '<{} {}>'.format(type(self).__name__, self)
def __iter__(self):
for attr in self.__slots__:
yield getattr(self, attr)
def __hash__(self):
return hash(tuple(self))
def __eq__(self, other):
return (
type(self) is type(other) and
tuple(self) == tuple(other)
)
class Equals(Predicate):
"""
A predicate that matches any input equal to `value`.
"""
__slots__ = ('value',)
def __init__(self, value):
self.value = value
def __call__(self, other):
return self.value == other
def __str__(self):
return '(== {})'.format(repr(self.value))
class BaseEquals(Predicate):
"""
A predicate that matches a basic type string with a base component equal to
`value` and no array component. If `with_sub` is `True`, the type string
must have a sub component to match. If `with_sub` is `False`, the type
string must *not* have a sub component to match. If `with_sub` is None,
the type string's sub component is ignored.
"""
__slots__ = ('base', 'with_sub')
def __init__(self, base, *, with_sub=None):
self.base = base
self.with_sub = with_sub
def __call__(self, type_str):
try:
abi_type = grammar.parse(type_str)
except exceptions.ParseError:
return False
if isinstance(abi_type, grammar.BasicType):
if abi_type.arrlist is not None:
return False
if self.with_sub is not None:
if self.with_sub and abi_type.sub is None:
return False
if not self.with_sub and abi_type.sub is not None:
return False
return abi_type.base == self.base
# We'd reach this point if `type_str` did not contain a basic type
# e.g. if it contained a tuple type
return False
def __str__(self):
return '(base == {}{})'.format(
repr(self.base),
'' if self.with_sub is None else (
' and sub is not None' if self.with_sub else ' and sub is None'
),
)
def has_arrlist(type_str):
"""
A predicate that matches a type string with an array dimension list.
"""
try:
abi_type = grammar.parse(type_str)
except exceptions.ParseError:
return False
return abi_type.arrlist is not None
def _clear_encoder_cache(old_method):
@functools.wraps(old_method)
def new_method(self, *args, **kwargs):
self.get_encoder.cache_clear()
return old_method(self, *args, **kwargs)
return new_method
def _clear_decoder_cache(old_method):
@functools.wraps(old_method)
def new_method(self, *args, **kwargs):
self.get_decoder.cache_clear()
return old_method(self, *args, **kwargs)
return new_method
class ABIRegistry(Copyable):
def __init__(self):
self._encoders = PredicateMapping('encoder registry')
self._decoders = PredicateMapping('decoder registry')
@staticmethod
def _register_coder(mapping, lookup, coder, label=None):
if callable(lookup):
mapping.add(lookup, coder, label)
return
if isinstance(lookup, str):
mapping.add(Equals(lookup), coder, lookup)
return
raise TypeError(
'Lookup must be a callable or a value of type `str`: got {}'.format(
repr(lookup),
)
)
@staticmethod
def _unregister_coder(mapping, lookup_or_label):
if callable(lookup_or_label):
mapping.remove_by_equality(lookup_or_label)
return
if isinstance(lookup_or_label, str):
mapping.remove_by_label(lookup_or_label)
return
raise TypeError(
'Lookup/label must be a callable or a value of type `str`: got {}'.format(
repr(lookup_or_label),
)
)
def _get_coder(self, mapping, type_str):
try:
coder = mapping.find(type_str)
except ValueError as e:
if 'No matching' in e.args[0]:
# If no matches found, attempt to parse in case lack of matches
# was due to unparsability
grammar.parse(type_str)
raise
if isinstance(coder, type) and issubclass(coder, BaseCoder):
return coder.from_type_str(type_str, self)
return coder
@_clear_encoder_cache
def register_encoder(self, lookup: Lookup, encoder: Encoder, label: str=None) -> None:
"""
Registers the given ``encoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`.
"""
self._register_coder(self._encoders, lookup, encoder, label=label)
@_clear_encoder_cache
def unregister_encoder(self, lookup_or_label: Lookup) -> None:
"""
Unregisters an encoder in the registry with the given lookup or label.
If ``lookup_or_label`` is a string, the encoder with the label
``lookup_or_label`` will be unregistered. If it is an function, the
encoder with the lookup function ``lookup_or_label`` will be
unregistered.
"""
self._unregister_coder(self._encoders, lookup_or_label)
@_clear_decoder_cache
def register_decoder(self, lookup: Lookup, decoder: Decoder, label: str=None) -> None:
"""
Registers the given ``decoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`.
"""
self._register_coder(self._decoders, lookup, decoder, label=label)
@_clear_decoder_cache
def unregister_decoder(self, lookup_or_label: Lookup) -> None:
"""
Unregisters a decoder in the registry with the given lookup or label.
If ``lookup_or_label`` is a string, the decoder with the label
``lookup_or_label`` will be unregistered. If it is an function, the
decoder with the lookup function ``lookup_or_label`` will be
unregistered.
"""
self._unregister_coder(self._decoders, lookup_or_label)
def register(self, lookup: Lookup, encoder: Encoder, decoder: Decoder, label: str=None) -> None:
"""
Registers the given ``encoder`` and ``decoder`` under the given
``lookup``. A unique string label may be optionally provided that can
be used to refer to the registration by name.
:param lookup: A type string or type string matcher function
(predicate). When the registry is queried with a type string
``query`` to determine which encoder or decoder to use, ``query``
will be checked against every registration in the registry. If a
registration was created with a type string for ``lookup``, it will
be considered a match if ``lookup == query``. If a registration
was created with a matcher function for ``lookup``, it will be
considered a match if ``lookup(query) is True``. If more than one
registration is found to be a match, then an exception is raised.
:param encoder: An encoder callable or class to use if ``lookup``
matches a query. If ``encoder`` is a callable, it must accept a
python value and return a ``bytes`` value. If ``encoder`` is a
class, it must be a valid subclass of :any:`encoding.BaseEncoder`
and must also implement the :any:`from_type_str` method on
:any:`base.BaseCoder`.
:param decoder: A decoder callable or class to use if ``lookup``
matches a query. If ``decoder`` is a callable, it must accept a
stream-like object of bytes and return a python value. If
``decoder`` is a class, it must be a valid subclass of
:any:`decoding.BaseDecoder` and must also implement the
:any:`from_type_str` method on :any:`base.BaseCoder`.
:param label: An optional label that can be used to refer to this
registration by name. This label can be used to unregister an
entry in the registry via the :any:`unregister` method and its
variants.
"""
self.register_encoder(lookup, encoder, label=label)
self.register_decoder(lookup, decoder, label=label)
def unregister(self, label: str) -> None:
"""
Unregisters the entries in the encoder and decoder registries which
have the label ``label``.
"""
self.unregister_encoder(label)
self.unregister_decoder(label)
@functools.lru_cache(maxsize=None)
def get_encoder(self, type_str):
return self._get_coder(self._encoders, type_str)
def has_encoder(self, type_str: abi.TypeStr) -> bool:
"""
Returns ``True`` if an encoder is found for the given type string
``type_str``. Otherwise, returns ``False``. Raises
:class:`~eth_abi.exceptions.MultipleEntriesFound` if multiple encoders
are found.
"""
try:
self.get_encoder(type_str)
except NoEntriesFound:
return False
else:
return True
@functools.lru_cache(maxsize=None)
def get_decoder(self, type_str):
return self._get_coder(self._decoders, type_str)
def copy(self):
"""
Copies a registry such that new registrations can be made or existing
registrations can be unregistered without affecting any instance from
which a copy was obtained. This is useful if an existing registry
fulfills most of a user's needs but requires one or two modifications.
In that case, a copy of that registry can be obtained and the necessary
changes made without affecting the original registry.
"""
cpy = type(self)()
cpy._encoders = copy.copy(self._encoders)
cpy._decoders = copy.copy(self._decoders)
return cpy
registry = ABIRegistry()
registry.register(
BaseEquals('uint'),
encoding.UnsignedIntegerEncoder, decoding.UnsignedIntegerDecoder,
label='uint',
)
registry.register(
BaseEquals('int'),
encoding.SignedIntegerEncoder, decoding.SignedIntegerDecoder,
label='int',
)
registry.register(
BaseEquals('address'),
encoding.AddressEncoder, decoding.AddressDecoder,
label='address',
)
registry.register(
BaseEquals('bool'),
encoding.BooleanEncoder, decoding.BooleanDecoder,
label='bool',
)
registry.register(
BaseEquals('ufixed'),
encoding.UnsignedFixedEncoder, decoding.UnsignedFixedDecoder,
label='ufixed',
)
registry.register(
BaseEquals('fixed'),
encoding.SignedFixedEncoder, decoding.SignedFixedDecoder,
label='fixed',
)
registry.register(
BaseEquals('bytes', with_sub=True),
encoding.BytesEncoder, decoding.BytesDecoder,
label='bytes<M>',
)
registry.register(
BaseEquals('bytes', with_sub=False),
encoding.ByteStringEncoder, decoding.ByteStringDecoder,
label='bytes',
)
registry.register(
BaseEquals('function'),
encoding.BytesEncoder, decoding.BytesDecoder,
label='function',
)
registry.register(
BaseEquals('string'),
encoding.TextStringEncoder, decoding.StringDecoder,
label='string',
)
registry.register(
has_arrlist,
encoding.BaseArrayEncoder, decoding.BaseArrayDecoder,
label='has_arrlist',
)
registry.register(
is_base_tuple,
encoding.TupleEncoder, decoding.TupleDecoder,
label='is_base_tuple',
)
registry_packed = ABIRegistry()
registry_packed.register_encoder(
BaseEquals('uint'),
encoding.PackedUnsignedIntegerEncoder,
label='uint',
)
registry_packed.register_encoder(
BaseEquals('int'),
encoding.PackedSignedIntegerEncoder,
label='int',
)
registry_packed.register_encoder(
BaseEquals('address'),
encoding.PackedAddressEncoder,
label='address',
)
registry_packed.register_encoder(
BaseEquals('bool'),
encoding.PackedBooleanEncoder,
label='bool',
)
registry_packed.register_encoder(
BaseEquals('ufixed'),
encoding.PackedUnsignedFixedEncoder,
label='ufixed',
)
registry_packed.register_encoder(
BaseEquals('fixed'),
encoding.PackedSignedFixedEncoder,
label='fixed',
)
registry_packed.register_encoder(
BaseEquals('bytes', with_sub=True),
encoding.PackedBytesEncoder,
label='bytes<M>',
)
registry_packed.register_encoder(
BaseEquals('bytes', with_sub=False),
encoding.PackedByteStringEncoder,
label='bytes',
)
registry_packed.register_encoder(
BaseEquals('function'),
encoding.PackedBytesEncoder,
label='function',
)
registry_packed.register_encoder(
BaseEquals('string'),
encoding.PackedTextStringEncoder,
label='string',
)
registry_packed.register_encoder(
has_arrlist,
encoding.PackedArrayEncoder,
label='has_arrlist',
)
registry_packed.register_encoder(
is_base_tuple,
encoding.TupleEncoder,
label='is_base_tuple',
)
|
ethereum/eth-abi | eth_abi/registry.py | ABIRegistry.register_encoder | python | def register_encoder(self, lookup: Lookup, encoder: Encoder, label: str=None) -> None:
self._register_coder(self._encoders, lookup, encoder, label=label) | Registers the given ``encoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`. | train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/registry.py#L354-L361 | [
"def _register_coder(mapping, lookup, coder, label=None):\n if callable(lookup):\n mapping.add(lookup, coder, label)\n return\n\n if isinstance(lookup, str):\n mapping.add(Equals(lookup), coder, lookup)\n return\n\n raise TypeError(\n 'Lookup must be a callable or a value of type `str`: got {}'.format(\n repr(lookup),\n )\n )\n"
] | class ABIRegistry(Copyable):
def __init__(self):
self._encoders = PredicateMapping('encoder registry')
self._decoders = PredicateMapping('decoder registry')
@staticmethod
def _register_coder(mapping, lookup, coder, label=None):
if callable(lookup):
mapping.add(lookup, coder, label)
return
if isinstance(lookup, str):
mapping.add(Equals(lookup), coder, lookup)
return
raise TypeError(
'Lookup must be a callable or a value of type `str`: got {}'.format(
repr(lookup),
)
)
@staticmethod
def _unregister_coder(mapping, lookup_or_label):
if callable(lookup_or_label):
mapping.remove_by_equality(lookup_or_label)
return
if isinstance(lookup_or_label, str):
mapping.remove_by_label(lookup_or_label)
return
raise TypeError(
'Lookup/label must be a callable or a value of type `str`: got {}'.format(
repr(lookup_or_label),
)
)
def _get_coder(self, mapping, type_str):
try:
coder = mapping.find(type_str)
except ValueError as e:
if 'No matching' in e.args[0]:
# If no matches found, attempt to parse in case lack of matches
# was due to unparsability
grammar.parse(type_str)
raise
if isinstance(coder, type) and issubclass(coder, BaseCoder):
return coder.from_type_str(type_str, self)
return coder
@_clear_encoder_cache
@_clear_encoder_cache
def unregister_encoder(self, lookup_or_label: Lookup) -> None:
"""
Unregisters an encoder in the registry with the given lookup or label.
If ``lookup_or_label`` is a string, the encoder with the label
``lookup_or_label`` will be unregistered. If it is an function, the
encoder with the lookup function ``lookup_or_label`` will be
unregistered.
"""
self._unregister_coder(self._encoders, lookup_or_label)
@_clear_decoder_cache
def register_decoder(self, lookup: Lookup, decoder: Decoder, label: str=None) -> None:
"""
Registers the given ``decoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`.
"""
self._register_coder(self._decoders, lookup, decoder, label=label)
@_clear_decoder_cache
def unregister_decoder(self, lookup_or_label: Lookup) -> None:
"""
Unregisters a decoder in the registry with the given lookup or label.
If ``lookup_or_label`` is a string, the decoder with the label
``lookup_or_label`` will be unregistered. If it is an function, the
decoder with the lookup function ``lookup_or_label`` will be
unregistered.
"""
self._unregister_coder(self._decoders, lookup_or_label)
def register(self, lookup: Lookup, encoder: Encoder, decoder: Decoder, label: str=None) -> None:
"""
Registers the given ``encoder`` and ``decoder`` under the given
``lookup``. A unique string label may be optionally provided that can
be used to refer to the registration by name.
:param lookup: A type string or type string matcher function
(predicate). When the registry is queried with a type string
``query`` to determine which encoder or decoder to use, ``query``
will be checked against every registration in the registry. If a
registration was created with a type string for ``lookup``, it will
be considered a match if ``lookup == query``. If a registration
was created with a matcher function for ``lookup``, it will be
considered a match if ``lookup(query) is True``. If more than one
registration is found to be a match, then an exception is raised.
:param encoder: An encoder callable or class to use if ``lookup``
matches a query. If ``encoder`` is a callable, it must accept a
python value and return a ``bytes`` value. If ``encoder`` is a
class, it must be a valid subclass of :any:`encoding.BaseEncoder`
and must also implement the :any:`from_type_str` method on
:any:`base.BaseCoder`.
:param decoder: A decoder callable or class to use if ``lookup``
matches a query. If ``decoder`` is a callable, it must accept a
stream-like object of bytes and return a python value. If
``decoder`` is a class, it must be a valid subclass of
:any:`decoding.BaseDecoder` and must also implement the
:any:`from_type_str` method on :any:`base.BaseCoder`.
:param label: An optional label that can be used to refer to this
registration by name. This label can be used to unregister an
entry in the registry via the :any:`unregister` method and its
variants.
"""
self.register_encoder(lookup, encoder, label=label)
self.register_decoder(lookup, decoder, label=label)
def unregister(self, label: str) -> None:
"""
Unregisters the entries in the encoder and decoder registries which
have the label ``label``.
"""
self.unregister_encoder(label)
self.unregister_decoder(label)
@functools.lru_cache(maxsize=None)
def get_encoder(self, type_str):
return self._get_coder(self._encoders, type_str)
def has_encoder(self, type_str: abi.TypeStr) -> bool:
"""
Returns ``True`` if an encoder is found for the given type string
``type_str``. Otherwise, returns ``False``. Raises
:class:`~eth_abi.exceptions.MultipleEntriesFound` if multiple encoders
are found.
"""
try:
self.get_encoder(type_str)
except NoEntriesFound:
return False
else:
return True
@functools.lru_cache(maxsize=None)
def get_decoder(self, type_str):
return self._get_coder(self._decoders, type_str)
def copy(self):
"""
Copies a registry such that new registrations can be made or existing
registrations can be unregistered without affecting any instance from
which a copy was obtained. This is useful if an existing registry
fulfills most of a user's needs but requires one or two modifications.
In that case, a copy of that registry can be obtained and the necessary
changes made without affecting the original registry.
"""
cpy = type(self)()
cpy._encoders = copy.copy(self._encoders)
cpy._decoders = copy.copy(self._decoders)
return cpy
|
ethereum/eth-abi | eth_abi/registry.py | ABIRegistry.register_decoder | python | def register_decoder(self, lookup: Lookup, decoder: Decoder, label: str=None) -> None:
self._register_coder(self._decoders, lookup, decoder, label=label) | Registers the given ``decoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`. | train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/registry.py#L375-L382 | [
"def _register_coder(mapping, lookup, coder, label=None):\n if callable(lookup):\n mapping.add(lookup, coder, label)\n return\n\n if isinstance(lookup, str):\n mapping.add(Equals(lookup), coder, lookup)\n return\n\n raise TypeError(\n 'Lookup must be a callable or a value of type `str`: got {}'.format(\n repr(lookup),\n )\n )\n"
] | class ABIRegistry(Copyable):
def __init__(self):
self._encoders = PredicateMapping('encoder registry')
self._decoders = PredicateMapping('decoder registry')
@staticmethod
def _register_coder(mapping, lookup, coder, label=None):
if callable(lookup):
mapping.add(lookup, coder, label)
return
if isinstance(lookup, str):
mapping.add(Equals(lookup), coder, lookup)
return
raise TypeError(
'Lookup must be a callable or a value of type `str`: got {}'.format(
repr(lookup),
)
)
@staticmethod
def _unregister_coder(mapping, lookup_or_label):
if callable(lookup_or_label):
mapping.remove_by_equality(lookup_or_label)
return
if isinstance(lookup_or_label, str):
mapping.remove_by_label(lookup_or_label)
return
raise TypeError(
'Lookup/label must be a callable or a value of type `str`: got {}'.format(
repr(lookup_or_label),
)
)
def _get_coder(self, mapping, type_str):
try:
coder = mapping.find(type_str)
except ValueError as e:
if 'No matching' in e.args[0]:
# If no matches found, attempt to parse in case lack of matches
# was due to unparsability
grammar.parse(type_str)
raise
if isinstance(coder, type) and issubclass(coder, BaseCoder):
return coder.from_type_str(type_str, self)
return coder
@_clear_encoder_cache
def register_encoder(self, lookup: Lookup, encoder: Encoder, label: str=None) -> None:
"""
Registers the given ``encoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`.
"""
self._register_coder(self._encoders, lookup, encoder, label=label)
@_clear_encoder_cache
def unregister_encoder(self, lookup_or_label: Lookup) -> None:
"""
Unregisters an encoder in the registry with the given lookup or label.
If ``lookup_or_label`` is a string, the encoder with the label
``lookup_or_label`` will be unregistered. If it is an function, the
encoder with the lookup function ``lookup_or_label`` will be
unregistered.
"""
self._unregister_coder(self._encoders, lookup_or_label)
@_clear_decoder_cache
@_clear_decoder_cache
def unregister_decoder(self, lookup_or_label: Lookup) -> None:
"""
Unregisters a decoder in the registry with the given lookup or label.
If ``lookup_or_label`` is a string, the decoder with the label
``lookup_or_label`` will be unregistered. If it is an function, the
decoder with the lookup function ``lookup_or_label`` will be
unregistered.
"""
self._unregister_coder(self._decoders, lookup_or_label)
def register(self, lookup: Lookup, encoder: Encoder, decoder: Decoder, label: str=None) -> None:
"""
Registers the given ``encoder`` and ``decoder`` under the given
``lookup``. A unique string label may be optionally provided that can
be used to refer to the registration by name.
:param lookup: A type string or type string matcher function
(predicate). When the registry is queried with a type string
``query`` to determine which encoder or decoder to use, ``query``
will be checked against every registration in the registry. If a
registration was created with a type string for ``lookup``, it will
be considered a match if ``lookup == query``. If a registration
was created with a matcher function for ``lookup``, it will be
considered a match if ``lookup(query) is True``. If more than one
registration is found to be a match, then an exception is raised.
:param encoder: An encoder callable or class to use if ``lookup``
matches a query. If ``encoder`` is a callable, it must accept a
python value and return a ``bytes`` value. If ``encoder`` is a
class, it must be a valid subclass of :any:`encoding.BaseEncoder`
and must also implement the :any:`from_type_str` method on
:any:`base.BaseCoder`.
:param decoder: A decoder callable or class to use if ``lookup``
matches a query. If ``decoder`` is a callable, it must accept a
stream-like object of bytes and return a python value. If
``decoder`` is a class, it must be a valid subclass of
:any:`decoding.BaseDecoder` and must also implement the
:any:`from_type_str` method on :any:`base.BaseCoder`.
:param label: An optional label that can be used to refer to this
registration by name. This label can be used to unregister an
entry in the registry via the :any:`unregister` method and its
variants.
"""
self.register_encoder(lookup, encoder, label=label)
self.register_decoder(lookup, decoder, label=label)
def unregister(self, label: str) -> None:
"""
Unregisters the entries in the encoder and decoder registries which
have the label ``label``.
"""
self.unregister_encoder(label)
self.unregister_decoder(label)
@functools.lru_cache(maxsize=None)
def get_encoder(self, type_str):
return self._get_coder(self._encoders, type_str)
def has_encoder(self, type_str: abi.TypeStr) -> bool:
"""
Returns ``True`` if an encoder is found for the given type string
``type_str``. Otherwise, returns ``False``. Raises
:class:`~eth_abi.exceptions.MultipleEntriesFound` if multiple encoders
are found.
"""
try:
self.get_encoder(type_str)
except NoEntriesFound:
return False
else:
return True
@functools.lru_cache(maxsize=None)
def get_decoder(self, type_str):
return self._get_coder(self._decoders, type_str)
def copy(self):
"""
Copies a registry such that new registrations can be made or existing
registrations can be unregistered without affecting any instance from
which a copy was obtained. This is useful if an existing registry
fulfills most of a user's needs but requires one or two modifications.
In that case, a copy of that registry can be obtained and the necessary
changes made without affecting the original registry.
"""
cpy = type(self)()
cpy._encoders = copy.copy(self._encoders)
cpy._decoders = copy.copy(self._decoders)
return cpy
|
ethereum/eth-abi | eth_abi/registry.py | ABIRegistry.register | python | def register(self, lookup: Lookup, encoder: Encoder, decoder: Decoder, label: str=None) -> None:
self.register_encoder(lookup, encoder, label=label)
self.register_decoder(lookup, decoder, label=label) | Registers the given ``encoder`` and ``decoder`` under the given
``lookup``. A unique string label may be optionally provided that can
be used to refer to the registration by name.
:param lookup: A type string or type string matcher function
(predicate). When the registry is queried with a type string
``query`` to determine which encoder or decoder to use, ``query``
will be checked against every registration in the registry. If a
registration was created with a type string for ``lookup``, it will
be considered a match if ``lookup == query``. If a registration
was created with a matcher function for ``lookup``, it will be
considered a match if ``lookup(query) is True``. If more than one
registration is found to be a match, then an exception is raised.
:param encoder: An encoder callable or class to use if ``lookup``
matches a query. If ``encoder`` is a callable, it must accept a
python value and return a ``bytes`` value. If ``encoder`` is a
class, it must be a valid subclass of :any:`encoding.BaseEncoder`
and must also implement the :any:`from_type_str` method on
:any:`base.BaseCoder`.
:param decoder: A decoder callable or class to use if ``lookup``
matches a query. If ``decoder`` is a callable, it must accept a
stream-like object of bytes and return a python value. If
``decoder`` is a class, it must be a valid subclass of
:any:`decoding.BaseDecoder` and must also implement the
:any:`from_type_str` method on :any:`base.BaseCoder`.
:param label: An optional label that can be used to refer to this
registration by name. This label can be used to unregister an
entry in the registry via the :any:`unregister` method and its
variants. | train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/registry.py#L395-L431 | null | class ABIRegistry(Copyable):
def __init__(self):
self._encoders = PredicateMapping('encoder registry')
self._decoders = PredicateMapping('decoder registry')
@staticmethod
def _register_coder(mapping, lookup, coder, label=None):
if callable(lookup):
mapping.add(lookup, coder, label)
return
if isinstance(lookup, str):
mapping.add(Equals(lookup), coder, lookup)
return
raise TypeError(
'Lookup must be a callable or a value of type `str`: got {}'.format(
repr(lookup),
)
)
@staticmethod
def _unregister_coder(mapping, lookup_or_label):
if callable(lookup_or_label):
mapping.remove_by_equality(lookup_or_label)
return
if isinstance(lookup_or_label, str):
mapping.remove_by_label(lookup_or_label)
return
raise TypeError(
'Lookup/label must be a callable or a value of type `str`: got {}'.format(
repr(lookup_or_label),
)
)
def _get_coder(self, mapping, type_str):
try:
coder = mapping.find(type_str)
except ValueError as e:
if 'No matching' in e.args[0]:
# If no matches found, attempt to parse in case lack of matches
# was due to unparsability
grammar.parse(type_str)
raise
if isinstance(coder, type) and issubclass(coder, BaseCoder):
return coder.from_type_str(type_str, self)
return coder
@_clear_encoder_cache
def register_encoder(self, lookup: Lookup, encoder: Encoder, label: str=None) -> None:
"""
Registers the given ``encoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`.
"""
self._register_coder(self._encoders, lookup, encoder, label=label)
@_clear_encoder_cache
def unregister_encoder(self, lookup_or_label: Lookup) -> None:
"""
Unregisters an encoder in the registry with the given lookup or label.
If ``lookup_or_label`` is a string, the encoder with the label
``lookup_or_label`` will be unregistered. If it is an function, the
encoder with the lookup function ``lookup_or_label`` will be
unregistered.
"""
self._unregister_coder(self._encoders, lookup_or_label)
@_clear_decoder_cache
def register_decoder(self, lookup: Lookup, decoder: Decoder, label: str=None) -> None:
"""
Registers the given ``decoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`.
"""
self._register_coder(self._decoders, lookup, decoder, label=label)
@_clear_decoder_cache
def unregister_decoder(self, lookup_or_label: Lookup) -> None:
"""
Unregisters a decoder in the registry with the given lookup or label.
If ``lookup_or_label`` is a string, the decoder with the label
``lookup_or_label`` will be unregistered. If it is an function, the
decoder with the lookup function ``lookup_or_label`` will be
unregistered.
"""
self._unregister_coder(self._decoders, lookup_or_label)
def unregister(self, label: str) -> None:
"""
Unregisters the entries in the encoder and decoder registries which
have the label ``label``.
"""
self.unregister_encoder(label)
self.unregister_decoder(label)
@functools.lru_cache(maxsize=None)
def get_encoder(self, type_str):
return self._get_coder(self._encoders, type_str)
def has_encoder(self, type_str: abi.TypeStr) -> bool:
"""
Returns ``True`` if an encoder is found for the given type string
``type_str``. Otherwise, returns ``False``. Raises
:class:`~eth_abi.exceptions.MultipleEntriesFound` if multiple encoders
are found.
"""
try:
self.get_encoder(type_str)
except NoEntriesFound:
return False
else:
return True
@functools.lru_cache(maxsize=None)
def get_decoder(self, type_str):
return self._get_coder(self._decoders, type_str)
def copy(self):
"""
Copies a registry such that new registrations can be made or existing
registrations can be unregistered without affecting any instance from
which a copy was obtained. This is useful if an existing registry
fulfills most of a user's needs but requires one or two modifications.
In that case, a copy of that registry can be obtained and the necessary
changes made without affecting the original registry.
"""
cpy = type(self)()
cpy._encoders = copy.copy(self._encoders)
cpy._decoders = copy.copy(self._decoders)
return cpy
|
ethereum/eth-abi | eth_abi/registry.py | ABIRegistry.unregister | python | def unregister(self, label: str) -> None:
self.unregister_encoder(label)
self.unregister_decoder(label) | Unregisters the entries in the encoder and decoder registries which
have the label ``label``. | train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/registry.py#L433-L439 | null | class ABIRegistry(Copyable):
def __init__(self):
self._encoders = PredicateMapping('encoder registry')
self._decoders = PredicateMapping('decoder registry')
@staticmethod
def _register_coder(mapping, lookup, coder, label=None):
if callable(lookup):
mapping.add(lookup, coder, label)
return
if isinstance(lookup, str):
mapping.add(Equals(lookup), coder, lookup)
return
raise TypeError(
'Lookup must be a callable or a value of type `str`: got {}'.format(
repr(lookup),
)
)
@staticmethod
def _unregister_coder(mapping, lookup_or_label):
if callable(lookup_or_label):
mapping.remove_by_equality(lookup_or_label)
return
if isinstance(lookup_or_label, str):
mapping.remove_by_label(lookup_or_label)
return
raise TypeError(
'Lookup/label must be a callable or a value of type `str`: got {}'.format(
repr(lookup_or_label),
)
)
def _get_coder(self, mapping, type_str):
try:
coder = mapping.find(type_str)
except ValueError as e:
if 'No matching' in e.args[0]:
# If no matches found, attempt to parse in case lack of matches
# was due to unparsability
grammar.parse(type_str)
raise
if isinstance(coder, type) and issubclass(coder, BaseCoder):
return coder.from_type_str(type_str, self)
return coder
@_clear_encoder_cache
def register_encoder(self, lookup: Lookup, encoder: Encoder, label: str=None) -> None:
"""
Registers the given ``encoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`.
"""
self._register_coder(self._encoders, lookup, encoder, label=label)
@_clear_encoder_cache
def unregister_encoder(self, lookup_or_label: Lookup) -> None:
"""
Unregisters an encoder in the registry with the given lookup or label.
If ``lookup_or_label`` is a string, the encoder with the label
``lookup_or_label`` will be unregistered. If it is an function, the
encoder with the lookup function ``lookup_or_label`` will be
unregistered.
"""
self._unregister_coder(self._encoders, lookup_or_label)
@_clear_decoder_cache
def register_decoder(self, lookup: Lookup, decoder: Decoder, label: str=None) -> None:
"""
Registers the given ``decoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`.
"""
self._register_coder(self._decoders, lookup, decoder, label=label)
@_clear_decoder_cache
def unregister_decoder(self, lookup_or_label: Lookup) -> None:
"""
Unregisters a decoder in the registry with the given lookup or label.
If ``lookup_or_label`` is a string, the decoder with the label
``lookup_or_label`` will be unregistered. If it is an function, the
decoder with the lookup function ``lookup_or_label`` will be
unregistered.
"""
self._unregister_coder(self._decoders, lookup_or_label)
def register(self, lookup: Lookup, encoder: Encoder, decoder: Decoder, label: str=None) -> None:
"""
Registers the given ``encoder`` and ``decoder`` under the given
``lookup``. A unique string label may be optionally provided that can
be used to refer to the registration by name.
:param lookup: A type string or type string matcher function
(predicate). When the registry is queried with a type string
``query`` to determine which encoder or decoder to use, ``query``
will be checked against every registration in the registry. If a
registration was created with a type string for ``lookup``, it will
be considered a match if ``lookup == query``. If a registration
was created with a matcher function for ``lookup``, it will be
considered a match if ``lookup(query) is True``. If more than one
registration is found to be a match, then an exception is raised.
:param encoder: An encoder callable or class to use if ``lookup``
matches a query. If ``encoder`` is a callable, it must accept a
python value and return a ``bytes`` value. If ``encoder`` is a
class, it must be a valid subclass of :any:`encoding.BaseEncoder`
and must also implement the :any:`from_type_str` method on
:any:`base.BaseCoder`.
:param decoder: A decoder callable or class to use if ``lookup``
matches a query. If ``decoder`` is a callable, it must accept a
stream-like object of bytes and return a python value. If
``decoder`` is a class, it must be a valid subclass of
:any:`decoding.BaseDecoder` and must also implement the
:any:`from_type_str` method on :any:`base.BaseCoder`.
:param label: An optional label that can be used to refer to this
registration by name. This label can be used to unregister an
entry in the registry via the :any:`unregister` method and its
variants.
"""
self.register_encoder(lookup, encoder, label=label)
self.register_decoder(lookup, decoder, label=label)
@functools.lru_cache(maxsize=None)
def get_encoder(self, type_str):
return self._get_coder(self._encoders, type_str)
def has_encoder(self, type_str: abi.TypeStr) -> bool:
"""
Returns ``True`` if an encoder is found for the given type string
``type_str``. Otherwise, returns ``False``. Raises
:class:`~eth_abi.exceptions.MultipleEntriesFound` if multiple encoders
are found.
"""
try:
self.get_encoder(type_str)
except NoEntriesFound:
return False
else:
return True
@functools.lru_cache(maxsize=None)
def get_decoder(self, type_str):
return self._get_coder(self._decoders, type_str)
def copy(self):
"""
Copies a registry such that new registrations can be made or existing
registrations can be unregistered without affecting any instance from
which a copy was obtained. This is useful if an existing registry
fulfills most of a user's needs but requires one or two modifications.
In that case, a copy of that registry can be obtained and the necessary
changes made without affecting the original registry.
"""
cpy = type(self)()
cpy._encoders = copy.copy(self._encoders)
cpy._decoders = copy.copy(self._decoders)
return cpy
|
ethereum/eth-abi | eth_abi/registry.py | ABIRegistry.has_encoder | python | def has_encoder(self, type_str: abi.TypeStr) -> bool:
try:
self.get_encoder(type_str)
except NoEntriesFound:
return False
else:
return True | Returns ``True`` if an encoder is found for the given type string
``type_str``. Otherwise, returns ``False``. Raises
:class:`~eth_abi.exceptions.MultipleEntriesFound` if multiple encoders
are found. | train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/registry.py#L445-L457 | null | class ABIRegistry(Copyable):
def __init__(self):
self._encoders = PredicateMapping('encoder registry')
self._decoders = PredicateMapping('decoder registry')
@staticmethod
def _register_coder(mapping, lookup, coder, label=None):
if callable(lookup):
mapping.add(lookup, coder, label)
return
if isinstance(lookup, str):
mapping.add(Equals(lookup), coder, lookup)
return
raise TypeError(
'Lookup must be a callable or a value of type `str`: got {}'.format(
repr(lookup),
)
)
@staticmethod
def _unregister_coder(mapping, lookup_or_label):
if callable(lookup_or_label):
mapping.remove_by_equality(lookup_or_label)
return
if isinstance(lookup_or_label, str):
mapping.remove_by_label(lookup_or_label)
return
raise TypeError(
'Lookup/label must be a callable or a value of type `str`: got {}'.format(
repr(lookup_or_label),
)
)
def _get_coder(self, mapping, type_str):
try:
coder = mapping.find(type_str)
except ValueError as e:
if 'No matching' in e.args[0]:
# If no matches found, attempt to parse in case lack of matches
# was due to unparsability
grammar.parse(type_str)
raise
if isinstance(coder, type) and issubclass(coder, BaseCoder):
return coder.from_type_str(type_str, self)
return coder
@_clear_encoder_cache
def register_encoder(self, lookup: Lookup, encoder: Encoder, label: str=None) -> None:
"""
Registers the given ``encoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`.
"""
self._register_coder(self._encoders, lookup, encoder, label=label)
@_clear_encoder_cache
def unregister_encoder(self, lookup_or_label: Lookup) -> None:
"""
Unregisters an encoder in the registry with the given lookup or label.
If ``lookup_or_label`` is a string, the encoder with the label
``lookup_or_label`` will be unregistered. If it is an function, the
encoder with the lookup function ``lookup_or_label`` will be
unregistered.
"""
self._unregister_coder(self._encoders, lookup_or_label)
@_clear_decoder_cache
def register_decoder(self, lookup: Lookup, decoder: Decoder, label: str=None) -> None:
"""
Registers the given ``decoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`.
"""
self._register_coder(self._decoders, lookup, decoder, label=label)
@_clear_decoder_cache
def unregister_decoder(self, lookup_or_label: Lookup) -> None:
"""
Unregisters a decoder in the registry with the given lookup or label.
If ``lookup_or_label`` is a string, the decoder with the label
``lookup_or_label`` will be unregistered. If it is an function, the
decoder with the lookup function ``lookup_or_label`` will be
unregistered.
"""
self._unregister_coder(self._decoders, lookup_or_label)
def register(self, lookup: Lookup, encoder: Encoder, decoder: Decoder, label: str=None) -> None:
"""
Registers the given ``encoder`` and ``decoder`` under the given
``lookup``. A unique string label may be optionally provided that can
be used to refer to the registration by name.
:param lookup: A type string or type string matcher function
(predicate). When the registry is queried with a type string
``query`` to determine which encoder or decoder to use, ``query``
will be checked against every registration in the registry. If a
registration was created with a type string for ``lookup``, it will
be considered a match if ``lookup == query``. If a registration
was created with a matcher function for ``lookup``, it will be
considered a match if ``lookup(query) is True``. If more than one
registration is found to be a match, then an exception is raised.
:param encoder: An encoder callable or class to use if ``lookup``
matches a query. If ``encoder`` is a callable, it must accept a
python value and return a ``bytes`` value. If ``encoder`` is a
class, it must be a valid subclass of :any:`encoding.BaseEncoder`
and must also implement the :any:`from_type_str` method on
:any:`base.BaseCoder`.
:param decoder: A decoder callable or class to use if ``lookup``
matches a query. If ``decoder`` is a callable, it must accept a
stream-like object of bytes and return a python value. If
``decoder`` is a class, it must be a valid subclass of
:any:`decoding.BaseDecoder` and must also implement the
:any:`from_type_str` method on :any:`base.BaseCoder`.
:param label: An optional label that can be used to refer to this
registration by name. This label can be used to unregister an
entry in the registry via the :any:`unregister` method and its
variants.
"""
self.register_encoder(lookup, encoder, label=label)
self.register_decoder(lookup, decoder, label=label)
def unregister(self, label: str) -> None:
"""
Unregisters the entries in the encoder and decoder registries which
have the label ``label``.
"""
self.unregister_encoder(label)
self.unregister_decoder(label)
@functools.lru_cache(maxsize=None)
def get_encoder(self, type_str):
return self._get_coder(self._encoders, type_str)
@functools.lru_cache(maxsize=None)
def get_decoder(self, type_str):
return self._get_coder(self._decoders, type_str)
def copy(self):
"""
Copies a registry such that new registrations can be made or existing
registrations can be unregistered without affecting any instance from
which a copy was obtained. This is useful if an existing registry
fulfills most of a user's needs but requires one or two modifications.
In that case, a copy of that registry can be obtained and the necessary
changes made without affecting the original registry.
"""
cpy = type(self)()
cpy._encoders = copy.copy(self._encoders)
cpy._decoders = copy.copy(self._decoders)
return cpy
|
ethereum/eth-abi | eth_abi/registry.py | ABIRegistry.copy | python | def copy(self):
cpy = type(self)()
cpy._encoders = copy.copy(self._encoders)
cpy._decoders = copy.copy(self._decoders)
return cpy | Copies a registry such that new registrations can be made or existing
registrations can be unregistered without affecting any instance from
which a copy was obtained. This is useful if an existing registry
fulfills most of a user's needs but requires one or two modifications.
In that case, a copy of that registry can be obtained and the necessary
changes made without affecting the original registry. | train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/registry.py#L463-L477 | null | class ABIRegistry(Copyable):
def __init__(self):
self._encoders = PredicateMapping('encoder registry')
self._decoders = PredicateMapping('decoder registry')
@staticmethod
def _register_coder(mapping, lookup, coder, label=None):
if callable(lookup):
mapping.add(lookup, coder, label)
return
if isinstance(lookup, str):
mapping.add(Equals(lookup), coder, lookup)
return
raise TypeError(
'Lookup must be a callable or a value of type `str`: got {}'.format(
repr(lookup),
)
)
@staticmethod
def _unregister_coder(mapping, lookup_or_label):
if callable(lookup_or_label):
mapping.remove_by_equality(lookup_or_label)
return
if isinstance(lookup_or_label, str):
mapping.remove_by_label(lookup_or_label)
return
raise TypeError(
'Lookup/label must be a callable or a value of type `str`: got {}'.format(
repr(lookup_or_label),
)
)
def _get_coder(self, mapping, type_str):
try:
coder = mapping.find(type_str)
except ValueError as e:
if 'No matching' in e.args[0]:
# If no matches found, attempt to parse in case lack of matches
# was due to unparsability
grammar.parse(type_str)
raise
if isinstance(coder, type) and issubclass(coder, BaseCoder):
return coder.from_type_str(type_str, self)
return coder
@_clear_encoder_cache
def register_encoder(self, lookup: Lookup, encoder: Encoder, label: str=None) -> None:
"""
Registers the given ``encoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`.
"""
self._register_coder(self._encoders, lookup, encoder, label=label)
@_clear_encoder_cache
def unregister_encoder(self, lookup_or_label: Lookup) -> None:
"""
Unregisters an encoder in the registry with the given lookup or label.
If ``lookup_or_label`` is a string, the encoder with the label
``lookup_or_label`` will be unregistered. If it is an function, the
encoder with the lookup function ``lookup_or_label`` will be
unregistered.
"""
self._unregister_coder(self._encoders, lookup_or_label)
@_clear_decoder_cache
def register_decoder(self, lookup: Lookup, decoder: Decoder, label: str=None) -> None:
"""
Registers the given ``decoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`.
"""
self._register_coder(self._decoders, lookup, decoder, label=label)
@_clear_decoder_cache
def unregister_decoder(self, lookup_or_label: Lookup) -> None:
"""
Unregisters a decoder in the registry with the given lookup or label.
If ``lookup_or_label`` is a string, the decoder with the label
``lookup_or_label`` will be unregistered. If it is an function, the
decoder with the lookup function ``lookup_or_label`` will be
unregistered.
"""
self._unregister_coder(self._decoders, lookup_or_label)
def register(self, lookup: Lookup, encoder: Encoder, decoder: Decoder, label: str=None) -> None:
"""
Registers the given ``encoder`` and ``decoder`` under the given
``lookup``. A unique string label may be optionally provided that can
be used to refer to the registration by name.
:param lookup: A type string or type string matcher function
(predicate). When the registry is queried with a type string
``query`` to determine which encoder or decoder to use, ``query``
will be checked against every registration in the registry. If a
registration was created with a type string for ``lookup``, it will
be considered a match if ``lookup == query``. If a registration
was created with a matcher function for ``lookup``, it will be
considered a match if ``lookup(query) is True``. If more than one
registration is found to be a match, then an exception is raised.
:param encoder: An encoder callable or class to use if ``lookup``
matches a query. If ``encoder`` is a callable, it must accept a
python value and return a ``bytes`` value. If ``encoder`` is a
class, it must be a valid subclass of :any:`encoding.BaseEncoder`
and must also implement the :any:`from_type_str` method on
:any:`base.BaseCoder`.
:param decoder: A decoder callable or class to use if ``lookup``
matches a query. If ``decoder`` is a callable, it must accept a
stream-like object of bytes and return a python value. If
``decoder`` is a class, it must be a valid subclass of
:any:`decoding.BaseDecoder` and must also implement the
:any:`from_type_str` method on :any:`base.BaseCoder`.
:param label: An optional label that can be used to refer to this
registration by name. This label can be used to unregister an
entry in the registry via the :any:`unregister` method and its
variants.
"""
self.register_encoder(lookup, encoder, label=label)
self.register_decoder(lookup, decoder, label=label)
def unregister(self, label: str) -> None:
"""
Unregisters the entries in the encoder and decoder registries which
have the label ``label``.
"""
self.unregister_encoder(label)
self.unregister_decoder(label)
@functools.lru_cache(maxsize=None)
def get_encoder(self, type_str):
return self._get_coder(self._encoders, type_str)
def has_encoder(self, type_str: abi.TypeStr) -> bool:
"""
Returns ``True`` if an encoder is found for the given type string
``type_str``. Otherwise, returns ``False``. Raises
:class:`~eth_abi.exceptions.MultipleEntriesFound` if multiple encoders
are found.
"""
try:
self.get_encoder(type_str)
except NoEntriesFound:
return False
else:
return True
@functools.lru_cache(maxsize=None)
def get_decoder(self, type_str):
return self._get_coder(self._decoders, type_str)
|
ethereum/eth-abi | eth_abi/codec.py | ABIEncoder.encode_single | python | def encode_single(self, typ: TypeStr, arg: Any) -> bytes:
encoder = self._registry.get_encoder(typ)
return encoder(arg) | Encodes the python value ``arg`` as a binary value of the ABI type
``typ``.
:param typ: The string representation of the ABI type that will be used
for encoding e.g. ``'uint256'``, ``'bytes[]'``, ``'(int,int)'``,
etc.
:param arg: The python value to be encoded.
:returns: The binary representation of the python value ``arg`` as a
value of the ABI type ``typ``. | train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/codec.py#L50-L65 | null | class ABIEncoder(BaseABICoder):
"""
Wraps a registry to provide last-mile encoding functionality.
"""
def encode_abi(self, types: Iterable[TypeStr], args: Iterable[Any]) -> bytes:
"""
Encodes the python values in ``args`` as a sequence of binary values of
the ABI types in ``types`` via the head-tail mechanism.
:param types: An iterable of string representations of the ABI types
that will be used for encoding e.g. ``('uint256', 'bytes[]',
'(int,int)')``
:param args: An iterable of python values to be encoded.
:returns: The head-tail encoded binary representation of the python
values in ``args`` as values of the ABI types in ``types``.
"""
encoders = [
self._registry.get_encoder(type_str)
for type_str in types
]
encoder = TupleEncoder(encoders=encoders)
return encoder(args)
def is_encodable(self, typ: TypeStr, arg: Any) -> bool:
"""
Determines if the python value ``arg`` is encodable as a value of the
ABI type ``typ``.
:param typ: A string representation for the ABI type against which the
python value ``arg`` will be checked e.g. ``'uint256'``,
``'bytes[]'``, ``'(int,int)'``, etc.
:param arg: The python value whose encodability should be checked.
:returns: ``True`` if ``arg`` is encodable as a value of the ABI type
``typ``. Otherwise, ``False``.
"""
encoder = self._registry.get_encoder(typ)
try:
encoder.validate_value(arg)
except EncodingError:
return False
except AttributeError:
try:
encoder(arg)
except EncodingError:
return False
return True
def is_encodable_type(self, typ: TypeStr) -> bool:
"""
Returns ``True`` if values for the ABI type ``typ`` can be encoded by
this codec.
:param typ: A string representation for the ABI type that will be
checked for encodability e.g. ``'uint256'``, ``'bytes[]'``,
``'(int,int)'``, etc.
:returns: ``True`` if values for ``typ`` can be encoded by this codec.
Otherwise, ``False``.
"""
return self._registry.has_encoder(typ)
|
ethereum/eth-abi | eth_abi/codec.py | ABIEncoder.encode_abi | python | def encode_abi(self, types: Iterable[TypeStr], args: Iterable[Any]) -> bytes:
encoders = [
self._registry.get_encoder(type_str)
for type_str in types
]
encoder = TupleEncoder(encoders=encoders)
return encoder(args) | Encodes the python values in ``args`` as a sequence of binary values of
the ABI types in ``types`` via the head-tail mechanism.
:param types: An iterable of string representations of the ABI types
that will be used for encoding e.g. ``('uint256', 'bytes[]',
'(int,int)')``
:param args: An iterable of python values to be encoded.
:returns: The head-tail encoded binary representation of the python
values in ``args`` as values of the ABI types in ``types``. | train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/codec.py#L67-L87 | null | class ABIEncoder(BaseABICoder):
"""
Wraps a registry to provide last-mile encoding functionality.
"""
def encode_single(self, typ: TypeStr, arg: Any) -> bytes:
"""
Encodes the python value ``arg`` as a binary value of the ABI type
``typ``.
:param typ: The string representation of the ABI type that will be used
for encoding e.g. ``'uint256'``, ``'bytes[]'``, ``'(int,int)'``,
etc.
:param arg: The python value to be encoded.
:returns: The binary representation of the python value ``arg`` as a
value of the ABI type ``typ``.
"""
encoder = self._registry.get_encoder(typ)
return encoder(arg)
def is_encodable(self, typ: TypeStr, arg: Any) -> bool:
"""
Determines if the python value ``arg`` is encodable as a value of the
ABI type ``typ``.
:param typ: A string representation for the ABI type against which the
python value ``arg`` will be checked e.g. ``'uint256'``,
``'bytes[]'``, ``'(int,int)'``, etc.
:param arg: The python value whose encodability should be checked.
:returns: ``True`` if ``arg`` is encodable as a value of the ABI type
``typ``. Otherwise, ``False``.
"""
encoder = self._registry.get_encoder(typ)
try:
encoder.validate_value(arg)
except EncodingError:
return False
except AttributeError:
try:
encoder(arg)
except EncodingError:
return False
return True
def is_encodable_type(self, typ: TypeStr) -> bool:
"""
Returns ``True`` if values for the ABI type ``typ`` can be encoded by
this codec.
:param typ: A string representation for the ABI type that will be
checked for encodability e.g. ``'uint256'``, ``'bytes[]'``,
``'(int,int)'``, etc.
:returns: ``True`` if values for ``typ`` can be encoded by this codec.
Otherwise, ``False``.
"""
return self._registry.has_encoder(typ)
|
ethereum/eth-abi | eth_abi/codec.py | ABIEncoder.is_encodable | python | def is_encodable(self, typ: TypeStr, arg: Any) -> bool:
encoder = self._registry.get_encoder(typ)
try:
encoder.validate_value(arg)
except EncodingError:
return False
except AttributeError:
try:
encoder(arg)
except EncodingError:
return False
return True | Determines if the python value ``arg`` is encodable as a value of the
ABI type ``typ``.
:param typ: A string representation for the ABI type against which the
python value ``arg`` will be checked e.g. ``'uint256'``,
``'bytes[]'``, ``'(int,int)'``, etc.
:param arg: The python value whose encodability should be checked.
:returns: ``True`` if ``arg`` is encodable as a value of the ABI type
``typ``. Otherwise, ``False``. | train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/codec.py#L89-L114 | null | class ABIEncoder(BaseABICoder):
"""
Wraps a registry to provide last-mile encoding functionality.
"""
def encode_single(self, typ: TypeStr, arg: Any) -> bytes:
"""
Encodes the python value ``arg`` as a binary value of the ABI type
``typ``.
:param typ: The string representation of the ABI type that will be used
for encoding e.g. ``'uint256'``, ``'bytes[]'``, ``'(int,int)'``,
etc.
:param arg: The python value to be encoded.
:returns: The binary representation of the python value ``arg`` as a
value of the ABI type ``typ``.
"""
encoder = self._registry.get_encoder(typ)
return encoder(arg)
def encode_abi(self, types: Iterable[TypeStr], args: Iterable[Any]) -> bytes:
"""
Encodes the python values in ``args`` as a sequence of binary values of
the ABI types in ``types`` via the head-tail mechanism.
:param types: An iterable of string representations of the ABI types
that will be used for encoding e.g. ``('uint256', 'bytes[]',
'(int,int)')``
:param args: An iterable of python values to be encoded.
:returns: The head-tail encoded binary representation of the python
values in ``args`` as values of the ABI types in ``types``.
"""
encoders = [
self._registry.get_encoder(type_str)
for type_str in types
]
encoder = TupleEncoder(encoders=encoders)
return encoder(args)
def is_encodable_type(self, typ: TypeStr) -> bool:
"""
Returns ``True`` if values for the ABI type ``typ`` can be encoded by
this codec.
:param typ: A string representation for the ABI type that will be
checked for encodability e.g. ``'uint256'``, ``'bytes[]'``,
``'(int,int)'``, etc.
:returns: ``True`` if values for ``typ`` can be encoded by this codec.
Otherwise, ``False``.
"""
return self._registry.has_encoder(typ)
|
ethereum/eth-abi | eth_abi/codec.py | ABIDecoder.decode_single | python | def decode_single(self, typ: TypeStr, data: Decodable) -> Any:
if not is_bytes(data):
raise TypeError("The `data` value must be of bytes type. Got {0}".format(type(data)))
decoder = self._registry.get_decoder(typ)
stream = ContextFramesBytesIO(data)
return decoder(stream) | Decodes the binary value ``data`` of the ABI type ``typ`` into its
equivalent python value.
:param typ: The string representation of the ABI type that will be used for
decoding e.g. ``'uint256'``, ``'bytes[]'``, ``'(int,int)'``, etc.
:param data: The binary value to be decoded.
:returns: The equivalent python value of the ABI value represented in
``data``. | train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/codec.py#L135-L153 | null | class ABIDecoder(BaseABICoder):
"""
Wraps a registry to provide last-mile decoding functionality.
"""
def decode_abi(self, types: Iterable[TypeStr], data: Decodable) -> Tuple[Any, ...]:
"""
Decodes the binary value ``data`` as a sequence of values of the ABI types
in ``types`` via the head-tail mechanism into a tuple of equivalent python
values.
:param types: An iterable of string representations of the ABI types that
will be used for decoding e.g. ``('uint256', 'bytes[]', '(int,int)')``
:param data: The binary value to be decoded.
:returns: A tuple of equivalent python values for the ABI values
represented in ``data``.
"""
if not is_bytes(data):
raise TypeError("The `data` value must be of bytes type. Got {0}".format(type(data)))
decoders = [
self._registry.get_decoder(type_str)
for type_str in types
]
decoder = TupleDecoder(decoders=decoders)
stream = ContextFramesBytesIO(data)
return decoder(stream)
|
ethereum/eth-abi | eth_abi/codec.py | ABIDecoder.decode_abi | python | def decode_abi(self, types: Iterable[TypeStr], data: Decodable) -> Tuple[Any, ...]:
if not is_bytes(data):
raise TypeError("The `data` value must be of bytes type. Got {0}".format(type(data)))
decoders = [
self._registry.get_decoder(type_str)
for type_str in types
]
decoder = TupleDecoder(decoders=decoders)
stream = ContextFramesBytesIO(data)
return decoder(stream) | Decodes the binary value ``data`` as a sequence of values of the ABI types
in ``types`` via the head-tail mechanism into a tuple of equivalent python
values.
:param types: An iterable of string representations of the ABI types that
will be used for decoding e.g. ``('uint256', 'bytes[]', '(int,int)')``
:param data: The binary value to be decoded.
:returns: A tuple of equivalent python values for the ABI values
represented in ``data``. | train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/codec.py#L155-L179 | null | class ABIDecoder(BaseABICoder):
"""
Wraps a registry to provide last-mile decoding functionality.
"""
def decode_single(self, typ: TypeStr, data: Decodable) -> Any:
"""
Decodes the binary value ``data`` of the ABI type ``typ`` into its
equivalent python value.
:param typ: The string representation of the ABI type that will be used for
decoding e.g. ``'uint256'``, ``'bytes[]'``, ``'(int,int)'``, etc.
:param data: The binary value to be decoded.
:returns: The equivalent python value of the ABI value represented in
``data``.
"""
if not is_bytes(data):
raise TypeError("The `data` value must be of bytes type. Got {0}".format(type(data)))
decoder = self._registry.get_decoder(typ)
stream = ContextFramesBytesIO(data)
return decoder(stream)
|
ethereum/eth-abi | eth_abi/grammar.py | NodeVisitor.parse | python | def parse(self, type_str):
if not isinstance(type_str, str):
raise TypeError('Can only parse string values: got {}'.format(type(type_str)))
try:
return super().parse(type_str)
except parsimonious.ParseError as e:
raise ParseError(e.text, e.pos, e.expr) | Parses a type string into an appropriate instance of
:class:`~eth_abi.grammar.ABIType`. If a type string cannot be parsed,
throws :class:`~eth_abi.exceptions.ParseError`.
:param type_str: The type string to be parsed.
:returns: An instance of :class:`~eth_abi.grammar.ABIType` containing
information about the parsed type string. | train | https://github.com/ethereum/eth-abi/blob/0a5cab0bdeae30b77efa667379427581784f1707/eth_abi/grammar.py#L109-L125 | null | class NodeVisitor(parsimonious.NodeVisitor):
"""
Parsimonious node visitor which performs both parsing of type strings and
post-processing of parse trees. Parsing operations are cached.
"""
grammar = grammar
def visit_non_zero_tuple(self, node, visited_children):
# Ignore left and right parens
_, first, rest, _ = visited_children
return (first,) + rest
def visit_tuple_type(self, node, visited_children):
components, arrlist = visited_children
return TupleType(components, arrlist, node=node)
def visit_next_type(self, node, visited_children):
# Ignore comma
_, abi_type = visited_children
return abi_type
def visit_zero_tuple(self, node, visited_children):
return tuple()
def visit_basic_type(self, node, visited_children):
base, sub, arrlist = visited_children
return BasicType(base, sub, arrlist, node=node)
def visit_two_size(self, node, visited_children):
# Ignore "x"
first, _, second = visited_children
return first, second
def visit_const_arr(self, node, visited_children):
# Ignore left and right brackets
_, int_value, _ = visited_children
return (int_value,)
def visit_dynam_arr(self, node, visited_children):
return tuple()
def visit_alphas(self, node, visited_children):
return node.text
def visit_digits(self, node, visited_children):
return int(node.text)
def generic_visit(self, node, visited_children):
if isinstance(node.expr, expressions.OneOf):
# Unwrap value chosen from alternatives
return visited_children[0]
if isinstance(node.expr, expressions.Optional):
# Unwrap optional value or return `None`
if len(visited_children) != 0:
return visited_children[0]
return None
return tuple(visited_children)
@functools.lru_cache(maxsize=None)
|
barrust/pyspellchecker | spellchecker/spellchecker.py | SpellChecker.distance | python | def distance(self, val):
tmp = 2
try:
int(val)
if val > 0 and val <= 2:
tmp = val
except (ValueError, TypeError):
pass
self._distance = tmp | set the distance parameter | train | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L79-L88 | null | class SpellChecker(object):
""" The SpellChecker class encapsulates the basics needed to accomplish a
simple spell checking algorithm. It is based on the work by
Peter Norvig (https://norvig.com/spell-correct.html)
Args:
language (str): The language of the dictionary to load or None \
for no dictionary. Supported languages are `en`, `es`, `de`, fr` \
and `pt`. Defaults to `en`
local_dictionary (str): The path to a locally stored word \
frequency dictionary; if provided, no language will be loaded
distance (int): The edit distance to use. Defaults to 2 """
__slots__ = ["_distance", "_word_frequency", "_tokenizer"]
def __init__(
self, language="en", local_dictionary=None, distance=2, tokenizer=None
):
self._distance = None
self.distance = distance # use the setter value check
self._tokenizer = _parse_into_words
if tokenizer is not None:
self._tokenizer = tokenizer
self._word_frequency = WordFrequency(self._tokenizer)
if local_dictionary:
self._word_frequency.load_dictionary(local_dictionary)
elif language:
filename = "{}.json.gz".format(language.lower())
here = os.path.dirname(__file__)
full_filename = os.path.join(here, "resources", filename)
if not os.path.exists(full_filename):
msg = (
"The provided dictionary language ({}) does not " "exist!"
).format(language.lower())
raise ValueError(msg)
self._word_frequency.load_dictionary(full_filename)
def __contains__(self, key):
""" setup easier known checks """
return key in self._word_frequency
def __getitem__(self, key):
""" setup easier frequency checks """
return self._word_frequency[key]
@property
def word_frequency(self):
""" WordFrequency: An encapsulation of the word frequency `dictionary`
Note:
Not settable """
return self._word_frequency
@property
def distance(self):
""" int: The maximum edit distance to calculate
Note:
Valid values are 1 or 2; if an invalid value is passed, \
defaults to 2 """
return self._distance
@distance.setter
def split_words(self, text):
""" Split text into individual `words` using either a simple whitespace
regex or the passed in tokenizer
Args:
text (str): The text to split into individual words
Returns:
list(str): A listing of all words in the provided text """
return self._tokenizer(text)
def export(self, filepath, encoding="utf-8", gzipped=True):
""" Export the word frequency list for import in the future
Args:
filepath (str): The filepath to the exported dictionary
encoding (str): The encoding of the resulting output
gzipped (bool): Whether to gzip the dictionary or not """
data = json.dumps(self.word_frequency.dictionary, sort_keys=True)
write_file(filepath, encoding, gzipped, data)
def word_probability(self, word, total_words=None):
""" Calculate the probability of the `word` being the desired, correct
word
Args:
word (str): The word for which the word probability is \
calculated
total_words (int): The total number of words to use in the \
calculation; use the default for using the whole word \
frequency
Returns:
float: The probability that the word is the correct word """
if total_words is None:
total_words = self._word_frequency.total_words
return self._word_frequency.dictionary[word] / total_words
def correction(self, word):
""" The most probable correct spelling for the word
Args:
word (str): The word to correct
Returns:
str: The most likely candidate """
return max(self.candidates(word), key=self.word_probability)
def candidates(self, word):
""" Generate possible spelling corrections for the provided word up to
an edit distance of two, if and only when needed
Args:
word (str): The word for which to calculate candidate spellings
Returns:
set: The set of words that are possible candidates """
if self.known([word]): # short-cut if word is correct already
return {word}
# get edit distance 1...
res = [x for x in self.edit_distance_1(word)]
tmp = self.known(res)
if tmp:
return tmp
# if still not found, use the edit distance 1 to calc edit distance 2
if self._distance == 2:
tmp = self.known([x for x in self.__edit_distance_alt(res)])
if tmp:
return tmp
return {word}
def known(self, words):
""" The subset of `words` that appear in the dictionary of words
Args:
words (list): List of words to determine which are in the \
corpus
Returns:
set: The set of those words from the input that are in the \
corpus """
tmp = [w.lower() for w in words]
return set(
w
for w in tmp
if w in self._word_frequency.dictionary
or not self._check_if_should_check(w)
)
def unknown(self, words):
""" The subset of `words` that do not appear in the dictionary
Args:
words (list): List of words to determine which are not in the \
corpus
Returns:
set: The set of those words from the input that are not in \
the corpus """
tmp = [w.lower() for w in words if self._check_if_should_check(w)]
return set(w for w in tmp if w not in self._word_frequency.dictionary)
def edit_distance_1(self, word):
""" Compute all strings that are one edit away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance one from the \
provided word """
word = word.lower()
if self._check_if_should_check(word) is False:
return {word}
letters = self._word_frequency.letters
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edit_distance_2(self, word):
""" Compute all strings that are two edits away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided word """
word = word.lower()
return [
e2 for e1 in self.edit_distance_1(word) for e2 in self.edit_distance_1(e1)
]
def __edit_distance_alt(self, words):
""" Compute all strings that are 1 edits away from all the words using
only the letters in the corpus
Args:
words (list): The words for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided words """
words = [x.lower() for x in words]
return [e2 for e1 in words for e2 in self.edit_distance_1(e1)]
@staticmethod
def _check_if_should_check(word):
if len(word) == 1 and word in string.punctuation:
return False
try: # check if it is a number (int, float, etc)
float(word)
return False
except ValueError:
pass
return True
|
barrust/pyspellchecker | spellchecker/spellchecker.py | SpellChecker.export | python | def export(self, filepath, encoding="utf-8", gzipped=True):
data = json.dumps(self.word_frequency.dictionary, sort_keys=True)
write_file(filepath, encoding, gzipped, data) | Export the word frequency list for import in the future
Args:
filepath (str): The filepath to the exported dictionary
encoding (str): The encoding of the resulting output
gzipped (bool): Whether to gzip the dictionary or not | train | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L100-L108 | [
"def write_file(filepath, encoding, gzipped, data):\n \"\"\" Write the data to file either as a gzip file or text based on the\n gzipped parameter\n\n Args:\n filepath (str): The filename to open\n encoding (str): The file encoding to use\n gzipped (bool): Whether the file should be gzipped or not\n data (str): The data to be written out\n \"\"\"\n if gzipped:\n with gzip.open(filepath, \"wt\") as fobj:\n fobj.write(data)\n else:\n with OPEN(filepath, \"w\", encoding=encoding) as fobj:\n if sys.version_info < (3, 0):\n data = data.decode(encoding)\n fobj.write(data)\n"
] | class SpellChecker(object):
""" The SpellChecker class encapsulates the basics needed to accomplish a
simple spell checking algorithm. It is based on the work by
Peter Norvig (https://norvig.com/spell-correct.html)
Args:
language (str): The language of the dictionary to load or None \
for no dictionary. Supported languages are `en`, `es`, `de`, fr` \
and `pt`. Defaults to `en`
local_dictionary (str): The path to a locally stored word \
frequency dictionary; if provided, no language will be loaded
distance (int): The edit distance to use. Defaults to 2 """
__slots__ = ["_distance", "_word_frequency", "_tokenizer"]
def __init__(
self, language="en", local_dictionary=None, distance=2, tokenizer=None
):
self._distance = None
self.distance = distance # use the setter value check
self._tokenizer = _parse_into_words
if tokenizer is not None:
self._tokenizer = tokenizer
self._word_frequency = WordFrequency(self._tokenizer)
if local_dictionary:
self._word_frequency.load_dictionary(local_dictionary)
elif language:
filename = "{}.json.gz".format(language.lower())
here = os.path.dirname(__file__)
full_filename = os.path.join(here, "resources", filename)
if not os.path.exists(full_filename):
msg = (
"The provided dictionary language ({}) does not " "exist!"
).format(language.lower())
raise ValueError(msg)
self._word_frequency.load_dictionary(full_filename)
def __contains__(self, key):
""" setup easier known checks """
return key in self._word_frequency
def __getitem__(self, key):
""" setup easier frequency checks """
return self._word_frequency[key]
@property
def word_frequency(self):
""" WordFrequency: An encapsulation of the word frequency `dictionary`
Note:
Not settable """
return self._word_frequency
@property
def distance(self):
""" int: The maximum edit distance to calculate
Note:
Valid values are 1 or 2; if an invalid value is passed, \
defaults to 2 """
return self._distance
@distance.setter
def distance(self, val):
""" set the distance parameter """
tmp = 2
try:
int(val)
if val > 0 and val <= 2:
tmp = val
except (ValueError, TypeError):
pass
self._distance = tmp
def split_words(self, text):
""" Split text into individual `words` using either a simple whitespace
regex or the passed in tokenizer
Args:
text (str): The text to split into individual words
Returns:
list(str): A listing of all words in the provided text """
return self._tokenizer(text)
def word_probability(self, word, total_words=None):
""" Calculate the probability of the `word` being the desired, correct
word
Args:
word (str): The word for which the word probability is \
calculated
total_words (int): The total number of words to use in the \
calculation; use the default for using the whole word \
frequency
Returns:
float: The probability that the word is the correct word """
if total_words is None:
total_words = self._word_frequency.total_words
return self._word_frequency.dictionary[word] / total_words
def correction(self, word):
""" The most probable correct spelling for the word
Args:
word (str): The word to correct
Returns:
str: The most likely candidate """
return max(self.candidates(word), key=self.word_probability)
def candidates(self, word):
""" Generate possible spelling corrections for the provided word up to
an edit distance of two, if and only when needed
Args:
word (str): The word for which to calculate candidate spellings
Returns:
set: The set of words that are possible candidates """
if self.known([word]): # short-cut if word is correct already
return {word}
# get edit distance 1...
res = [x for x in self.edit_distance_1(word)]
tmp = self.known(res)
if tmp:
return tmp
# if still not found, use the edit distance 1 to calc edit distance 2
if self._distance == 2:
tmp = self.known([x for x in self.__edit_distance_alt(res)])
if tmp:
return tmp
return {word}
def known(self, words):
""" The subset of `words` that appear in the dictionary of words
Args:
words (list): List of words to determine which are in the \
corpus
Returns:
set: The set of those words from the input that are in the \
corpus """
tmp = [w.lower() for w in words]
return set(
w
for w in tmp
if w in self._word_frequency.dictionary
or not self._check_if_should_check(w)
)
def unknown(self, words):
""" The subset of `words` that do not appear in the dictionary
Args:
words (list): List of words to determine which are not in the \
corpus
Returns:
set: The set of those words from the input that are not in \
the corpus """
tmp = [w.lower() for w in words if self._check_if_should_check(w)]
return set(w for w in tmp if w not in self._word_frequency.dictionary)
def edit_distance_1(self, word):
""" Compute all strings that are one edit away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance one from the \
provided word """
word = word.lower()
if self._check_if_should_check(word) is False:
return {word}
letters = self._word_frequency.letters
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edit_distance_2(self, word):
""" Compute all strings that are two edits away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided word """
word = word.lower()
return [
e2 for e1 in self.edit_distance_1(word) for e2 in self.edit_distance_1(e1)
]
def __edit_distance_alt(self, words):
""" Compute all strings that are 1 edits away from all the words using
only the letters in the corpus
Args:
words (list): The words for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided words """
words = [x.lower() for x in words]
return [e2 for e1 in words for e2 in self.edit_distance_1(e1)]
@staticmethod
def _check_if_should_check(word):
if len(word) == 1 and word in string.punctuation:
return False
try: # check if it is a number (int, float, etc)
float(word)
return False
except ValueError:
pass
return True
|
barrust/pyspellchecker | spellchecker/spellchecker.py | SpellChecker.word_probability | python | def word_probability(self, word, total_words=None):
if total_words is None:
total_words = self._word_frequency.total_words
return self._word_frequency.dictionary[word] / total_words | Calculate the probability of the `word` being the desired, correct
word
Args:
word (str): The word for which the word probability is \
calculated
total_words (int): The total number of words to use in the \
calculation; use the default for using the whole word \
frequency
Returns:
float: The probability that the word is the correct word | train | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L110-L124 | null | class SpellChecker(object):
""" The SpellChecker class encapsulates the basics needed to accomplish a
simple spell checking algorithm. It is based on the work by
Peter Norvig (https://norvig.com/spell-correct.html)
Args:
language (str): The language of the dictionary to load or None \
for no dictionary. Supported languages are `en`, `es`, `de`, fr` \
and `pt`. Defaults to `en`
local_dictionary (str): The path to a locally stored word \
frequency dictionary; if provided, no language will be loaded
distance (int): The edit distance to use. Defaults to 2 """
__slots__ = ["_distance", "_word_frequency", "_tokenizer"]
def __init__(
self, language="en", local_dictionary=None, distance=2, tokenizer=None
):
self._distance = None
self.distance = distance # use the setter value check
self._tokenizer = _parse_into_words
if tokenizer is not None:
self._tokenizer = tokenizer
self._word_frequency = WordFrequency(self._tokenizer)
if local_dictionary:
self._word_frequency.load_dictionary(local_dictionary)
elif language:
filename = "{}.json.gz".format(language.lower())
here = os.path.dirname(__file__)
full_filename = os.path.join(here, "resources", filename)
if not os.path.exists(full_filename):
msg = (
"The provided dictionary language ({}) does not " "exist!"
).format(language.lower())
raise ValueError(msg)
self._word_frequency.load_dictionary(full_filename)
def __contains__(self, key):
""" setup easier known checks """
return key in self._word_frequency
def __getitem__(self, key):
""" setup easier frequency checks """
return self._word_frequency[key]
@property
def word_frequency(self):
""" WordFrequency: An encapsulation of the word frequency `dictionary`
Note:
Not settable """
return self._word_frequency
@property
def distance(self):
""" int: The maximum edit distance to calculate
Note:
Valid values are 1 or 2; if an invalid value is passed, \
defaults to 2 """
return self._distance
@distance.setter
def distance(self, val):
""" set the distance parameter """
tmp = 2
try:
int(val)
if val > 0 and val <= 2:
tmp = val
except (ValueError, TypeError):
pass
self._distance = tmp
def split_words(self, text):
""" Split text into individual `words` using either a simple whitespace
regex or the passed in tokenizer
Args:
text (str): The text to split into individual words
Returns:
list(str): A listing of all words in the provided text """
return self._tokenizer(text)
def export(self, filepath, encoding="utf-8", gzipped=True):
""" Export the word frequency list for import in the future
Args:
filepath (str): The filepath to the exported dictionary
encoding (str): The encoding of the resulting output
gzipped (bool): Whether to gzip the dictionary or not """
data = json.dumps(self.word_frequency.dictionary, sort_keys=True)
write_file(filepath, encoding, gzipped, data)
def correction(self, word):
""" The most probable correct spelling for the word
Args:
word (str): The word to correct
Returns:
str: The most likely candidate """
return max(self.candidates(word), key=self.word_probability)
def candidates(self, word):
""" Generate possible spelling corrections for the provided word up to
an edit distance of two, if and only when needed
Args:
word (str): The word for which to calculate candidate spellings
Returns:
set: The set of words that are possible candidates """
if self.known([word]): # short-cut if word is correct already
return {word}
# get edit distance 1...
res = [x for x in self.edit_distance_1(word)]
tmp = self.known(res)
if tmp:
return tmp
# if still not found, use the edit distance 1 to calc edit distance 2
if self._distance == 2:
tmp = self.known([x for x in self.__edit_distance_alt(res)])
if tmp:
return tmp
return {word}
def known(self, words):
""" The subset of `words` that appear in the dictionary of words
Args:
words (list): List of words to determine which are in the \
corpus
Returns:
set: The set of those words from the input that are in the \
corpus """
tmp = [w.lower() for w in words]
return set(
w
for w in tmp
if w in self._word_frequency.dictionary
or not self._check_if_should_check(w)
)
def unknown(self, words):
""" The subset of `words` that do not appear in the dictionary
Args:
words (list): List of words to determine which are not in the \
corpus
Returns:
set: The set of those words from the input that are not in \
the corpus """
tmp = [w.lower() for w in words if self._check_if_should_check(w)]
return set(w for w in tmp if w not in self._word_frequency.dictionary)
def edit_distance_1(self, word):
""" Compute all strings that are one edit away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance one from the \
provided word """
word = word.lower()
if self._check_if_should_check(word) is False:
return {word}
letters = self._word_frequency.letters
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edit_distance_2(self, word):
""" Compute all strings that are two edits away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided word """
word = word.lower()
return [
e2 for e1 in self.edit_distance_1(word) for e2 in self.edit_distance_1(e1)
]
def __edit_distance_alt(self, words):
""" Compute all strings that are 1 edits away from all the words using
only the letters in the corpus
Args:
words (list): The words for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided words """
words = [x.lower() for x in words]
return [e2 for e1 in words for e2 in self.edit_distance_1(e1)]
@staticmethod
def _check_if_should_check(word):
if len(word) == 1 and word in string.punctuation:
return False
try: # check if it is a number (int, float, etc)
float(word)
return False
except ValueError:
pass
return True
|
barrust/pyspellchecker | spellchecker/spellchecker.py | SpellChecker.correction | python | def correction(self, word):
return max(self.candidates(word), key=self.word_probability) | The most probable correct spelling for the word
Args:
word (str): The word to correct
Returns:
str: The most likely candidate | train | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L126-L133 | [
"def candidates(self, word):\n \"\"\" Generate possible spelling corrections for the provided word up to\n an edit distance of two, if and only when needed\n\n Args:\n word (str): The word for which to calculate candidate spellings\n Returns:\n set: The set of words that are possible candidates \"\"\"\n if self.known([word]): # short-cut if word is correct already\n return {word}\n # get edit distance 1...\n res = [x for x in self.edit_distance_1(word)]\n tmp = self.known(res)\n if tmp:\n return tmp\n # if still not found, use the edit distance 1 to calc edit distance 2\n if self._distance == 2:\n tmp = self.known([x for x in self.__edit_distance_alt(res)])\n if tmp:\n return tmp\n return {word}\n"
] | class SpellChecker(object):
""" The SpellChecker class encapsulates the basics needed to accomplish a
simple spell checking algorithm. It is based on the work by
Peter Norvig (https://norvig.com/spell-correct.html)
Args:
language (str): The language of the dictionary to load or None \
for no dictionary. Supported languages are `en`, `es`, `de`, fr` \
and `pt`. Defaults to `en`
local_dictionary (str): The path to a locally stored word \
frequency dictionary; if provided, no language will be loaded
distance (int): The edit distance to use. Defaults to 2 """
__slots__ = ["_distance", "_word_frequency", "_tokenizer"]
def __init__(
self, language="en", local_dictionary=None, distance=2, tokenizer=None
):
self._distance = None
self.distance = distance # use the setter value check
self._tokenizer = _parse_into_words
if tokenizer is not None:
self._tokenizer = tokenizer
self._word_frequency = WordFrequency(self._tokenizer)
if local_dictionary:
self._word_frequency.load_dictionary(local_dictionary)
elif language:
filename = "{}.json.gz".format(language.lower())
here = os.path.dirname(__file__)
full_filename = os.path.join(here, "resources", filename)
if not os.path.exists(full_filename):
msg = (
"The provided dictionary language ({}) does not " "exist!"
).format(language.lower())
raise ValueError(msg)
self._word_frequency.load_dictionary(full_filename)
def __contains__(self, key):
""" setup easier known checks """
return key in self._word_frequency
def __getitem__(self, key):
""" setup easier frequency checks """
return self._word_frequency[key]
@property
def word_frequency(self):
""" WordFrequency: An encapsulation of the word frequency `dictionary`
Note:
Not settable """
return self._word_frequency
@property
def distance(self):
""" int: The maximum edit distance to calculate
Note:
Valid values are 1 or 2; if an invalid value is passed, \
defaults to 2 """
return self._distance
@distance.setter
def distance(self, val):
""" set the distance parameter """
tmp = 2
try:
int(val)
if val > 0 and val <= 2:
tmp = val
except (ValueError, TypeError):
pass
self._distance = tmp
def split_words(self, text):
""" Split text into individual `words` using either a simple whitespace
regex or the passed in tokenizer
Args:
text (str): The text to split into individual words
Returns:
list(str): A listing of all words in the provided text """
return self._tokenizer(text)
def export(self, filepath, encoding="utf-8", gzipped=True):
""" Export the word frequency list for import in the future
Args:
filepath (str): The filepath to the exported dictionary
encoding (str): The encoding of the resulting output
gzipped (bool): Whether to gzip the dictionary or not """
data = json.dumps(self.word_frequency.dictionary, sort_keys=True)
write_file(filepath, encoding, gzipped, data)
def word_probability(self, word, total_words=None):
""" Calculate the probability of the `word` being the desired, correct
word
Args:
word (str): The word for which the word probability is \
calculated
total_words (int): The total number of words to use in the \
calculation; use the default for using the whole word \
frequency
Returns:
float: The probability that the word is the correct word """
if total_words is None:
total_words = self._word_frequency.total_words
return self._word_frequency.dictionary[word] / total_words
def candidates(self, word):
""" Generate possible spelling corrections for the provided word up to
an edit distance of two, if and only when needed
Args:
word (str): The word for which to calculate candidate spellings
Returns:
set: The set of words that are possible candidates """
if self.known([word]): # short-cut if word is correct already
return {word}
# get edit distance 1...
res = [x for x in self.edit_distance_1(word)]
tmp = self.known(res)
if tmp:
return tmp
# if still not found, use the edit distance 1 to calc edit distance 2
if self._distance == 2:
tmp = self.known([x for x in self.__edit_distance_alt(res)])
if tmp:
return tmp
return {word}
def known(self, words):
""" The subset of `words` that appear in the dictionary of words
Args:
words (list): List of words to determine which are in the \
corpus
Returns:
set: The set of those words from the input that are in the \
corpus """
tmp = [w.lower() for w in words]
return set(
w
for w in tmp
if w in self._word_frequency.dictionary
or not self._check_if_should_check(w)
)
def unknown(self, words):
""" The subset of `words` that do not appear in the dictionary
Args:
words (list): List of words to determine which are not in the \
corpus
Returns:
set: The set of those words from the input that are not in \
the corpus """
tmp = [w.lower() for w in words if self._check_if_should_check(w)]
return set(w for w in tmp if w not in self._word_frequency.dictionary)
def edit_distance_1(self, word):
""" Compute all strings that are one edit away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance one from the \
provided word """
word = word.lower()
if self._check_if_should_check(word) is False:
return {word}
letters = self._word_frequency.letters
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edit_distance_2(self, word):
""" Compute all strings that are two edits away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided word """
word = word.lower()
return [
e2 for e1 in self.edit_distance_1(word) for e2 in self.edit_distance_1(e1)
]
def __edit_distance_alt(self, words):
""" Compute all strings that are 1 edits away from all the words using
only the letters in the corpus
Args:
words (list): The words for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided words """
words = [x.lower() for x in words]
return [e2 for e1 in words for e2 in self.edit_distance_1(e1)]
@staticmethod
def _check_if_should_check(word):
if len(word) == 1 and word in string.punctuation:
return False
try: # check if it is a number (int, float, etc)
float(word)
return False
except ValueError:
pass
return True
|
barrust/pyspellchecker | spellchecker/spellchecker.py | SpellChecker.candidates | python | def candidates(self, word):
if self.known([word]): # short-cut if word is correct already
return {word}
# get edit distance 1...
res = [x for x in self.edit_distance_1(word)]
tmp = self.known(res)
if tmp:
return tmp
# if still not found, use the edit distance 1 to calc edit distance 2
if self._distance == 2:
tmp = self.known([x for x in self.__edit_distance_alt(res)])
if tmp:
return tmp
return {word} | Generate possible spelling corrections for the provided word up to
an edit distance of two, if and only when needed
Args:
word (str): The word for which to calculate candidate spellings
Returns:
set: The set of words that are possible candidates | train | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L135-L155 | [
"def known(self, words):\n \"\"\" The subset of `words` that appear in the dictionary of words\n\n Args:\n words (list): List of words to determine which are in the \\\n corpus\n Returns:\n set: The set of those words from the input that are in the \\\n corpus \"\"\"\n tmp = [w.lower() for w in words]\n return set(\n w\n for w in tmp\n if w in self._word_frequency.dictionary\n or not self._check_if_should_check(w)\n )\n"
] | class SpellChecker(object):
""" The SpellChecker class encapsulates the basics needed to accomplish a
simple spell checking algorithm. It is based on the work by
Peter Norvig (https://norvig.com/spell-correct.html)
Args:
language (str): The language of the dictionary to load or None \
for no dictionary. Supported languages are `en`, `es`, `de`, fr` \
and `pt`. Defaults to `en`
local_dictionary (str): The path to a locally stored word \
frequency dictionary; if provided, no language will be loaded
distance (int): The edit distance to use. Defaults to 2 """
__slots__ = ["_distance", "_word_frequency", "_tokenizer"]
def __init__(
self, language="en", local_dictionary=None, distance=2, tokenizer=None
):
self._distance = None
self.distance = distance # use the setter value check
self._tokenizer = _parse_into_words
if tokenizer is not None:
self._tokenizer = tokenizer
self._word_frequency = WordFrequency(self._tokenizer)
if local_dictionary:
self._word_frequency.load_dictionary(local_dictionary)
elif language:
filename = "{}.json.gz".format(language.lower())
here = os.path.dirname(__file__)
full_filename = os.path.join(here, "resources", filename)
if not os.path.exists(full_filename):
msg = (
"The provided dictionary language ({}) does not " "exist!"
).format(language.lower())
raise ValueError(msg)
self._word_frequency.load_dictionary(full_filename)
def __contains__(self, key):
""" setup easier known checks """
return key in self._word_frequency
def __getitem__(self, key):
""" setup easier frequency checks """
return self._word_frequency[key]
@property
def word_frequency(self):
""" WordFrequency: An encapsulation of the word frequency `dictionary`
Note:
Not settable """
return self._word_frequency
@property
def distance(self):
""" int: The maximum edit distance to calculate
Note:
Valid values are 1 or 2; if an invalid value is passed, \
defaults to 2 """
return self._distance
@distance.setter
def distance(self, val):
""" set the distance parameter """
tmp = 2
try:
int(val)
if val > 0 and val <= 2:
tmp = val
except (ValueError, TypeError):
pass
self._distance = tmp
def split_words(self, text):
""" Split text into individual `words` using either a simple whitespace
regex or the passed in tokenizer
Args:
text (str): The text to split into individual words
Returns:
list(str): A listing of all words in the provided text """
return self._tokenizer(text)
def export(self, filepath, encoding="utf-8", gzipped=True):
""" Export the word frequency list for import in the future
Args:
filepath (str): The filepath to the exported dictionary
encoding (str): The encoding of the resulting output
gzipped (bool): Whether to gzip the dictionary or not """
data = json.dumps(self.word_frequency.dictionary, sort_keys=True)
write_file(filepath, encoding, gzipped, data)
def word_probability(self, word, total_words=None):
""" Calculate the probability of the `word` being the desired, correct
word
Args:
word (str): The word for which the word probability is \
calculated
total_words (int): The total number of words to use in the \
calculation; use the default for using the whole word \
frequency
Returns:
float: The probability that the word is the correct word """
if total_words is None:
total_words = self._word_frequency.total_words
return self._word_frequency.dictionary[word] / total_words
def correction(self, word):
""" The most probable correct spelling for the word
Args:
word (str): The word to correct
Returns:
str: The most likely candidate """
return max(self.candidates(word), key=self.word_probability)
def known(self, words):
""" The subset of `words` that appear in the dictionary of words
Args:
words (list): List of words to determine which are in the \
corpus
Returns:
set: The set of those words from the input that are in the \
corpus """
tmp = [w.lower() for w in words]
return set(
w
for w in tmp
if w in self._word_frequency.dictionary
or not self._check_if_should_check(w)
)
def unknown(self, words):
""" The subset of `words` that do not appear in the dictionary
Args:
words (list): List of words to determine which are not in the \
corpus
Returns:
set: The set of those words from the input that are not in \
the corpus """
tmp = [w.lower() for w in words if self._check_if_should_check(w)]
return set(w for w in tmp if w not in self._word_frequency.dictionary)
def edit_distance_1(self, word):
""" Compute all strings that are one edit away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance one from the \
provided word """
word = word.lower()
if self._check_if_should_check(word) is False:
return {word}
letters = self._word_frequency.letters
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edit_distance_2(self, word):
""" Compute all strings that are two edits away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided word """
word = word.lower()
return [
e2 for e1 in self.edit_distance_1(word) for e2 in self.edit_distance_1(e1)
]
def __edit_distance_alt(self, words):
""" Compute all strings that are 1 edits away from all the words using
only the letters in the corpus
Args:
words (list): The words for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided words """
words = [x.lower() for x in words]
return [e2 for e1 in words for e2 in self.edit_distance_1(e1)]
@staticmethod
def _check_if_should_check(word):
if len(word) == 1 and word in string.punctuation:
return False
try: # check if it is a number (int, float, etc)
float(word)
return False
except ValueError:
pass
return True
|
barrust/pyspellchecker | spellchecker/spellchecker.py | SpellChecker.known | python | def known(self, words):
tmp = [w.lower() for w in words]
return set(
w
for w in tmp
if w in self._word_frequency.dictionary
or not self._check_if_should_check(w)
) | The subset of `words` that appear in the dictionary of words
Args:
words (list): List of words to determine which are in the \
corpus
Returns:
set: The set of those words from the input that are in the \
corpus | train | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L157-L172 | null | class SpellChecker(object):
""" The SpellChecker class encapsulates the basics needed to accomplish a
simple spell checking algorithm. It is based on the work by
Peter Norvig (https://norvig.com/spell-correct.html)
Args:
language (str): The language of the dictionary to load or None \
for no dictionary. Supported languages are `en`, `es`, `de`, fr` \
and `pt`. Defaults to `en`
local_dictionary (str): The path to a locally stored word \
frequency dictionary; if provided, no language will be loaded
distance (int): The edit distance to use. Defaults to 2 """
__slots__ = ["_distance", "_word_frequency", "_tokenizer"]
def __init__(
self, language="en", local_dictionary=None, distance=2, tokenizer=None
):
self._distance = None
self.distance = distance # use the setter value check
self._tokenizer = _parse_into_words
if tokenizer is not None:
self._tokenizer = tokenizer
self._word_frequency = WordFrequency(self._tokenizer)
if local_dictionary:
self._word_frequency.load_dictionary(local_dictionary)
elif language:
filename = "{}.json.gz".format(language.lower())
here = os.path.dirname(__file__)
full_filename = os.path.join(here, "resources", filename)
if not os.path.exists(full_filename):
msg = (
"The provided dictionary language ({}) does not " "exist!"
).format(language.lower())
raise ValueError(msg)
self._word_frequency.load_dictionary(full_filename)
def __contains__(self, key):
""" setup easier known checks """
return key in self._word_frequency
def __getitem__(self, key):
""" setup easier frequency checks """
return self._word_frequency[key]
@property
def word_frequency(self):
""" WordFrequency: An encapsulation of the word frequency `dictionary`
Note:
Not settable """
return self._word_frequency
@property
def distance(self):
""" int: The maximum edit distance to calculate
Note:
Valid values are 1 or 2; if an invalid value is passed, \
defaults to 2 """
return self._distance
@distance.setter
def distance(self, val):
""" set the distance parameter """
tmp = 2
try:
int(val)
if val > 0 and val <= 2:
tmp = val
except (ValueError, TypeError):
pass
self._distance = tmp
def split_words(self, text):
""" Split text into individual `words` using either a simple whitespace
regex or the passed in tokenizer
Args:
text (str): The text to split into individual words
Returns:
list(str): A listing of all words in the provided text """
return self._tokenizer(text)
def export(self, filepath, encoding="utf-8", gzipped=True):
""" Export the word frequency list for import in the future
Args:
filepath (str): The filepath to the exported dictionary
encoding (str): The encoding of the resulting output
gzipped (bool): Whether to gzip the dictionary or not """
data = json.dumps(self.word_frequency.dictionary, sort_keys=True)
write_file(filepath, encoding, gzipped, data)
def word_probability(self, word, total_words=None):
""" Calculate the probability of the `word` being the desired, correct
word
Args:
word (str): The word for which the word probability is \
calculated
total_words (int): The total number of words to use in the \
calculation; use the default for using the whole word \
frequency
Returns:
float: The probability that the word is the correct word """
if total_words is None:
total_words = self._word_frequency.total_words
return self._word_frequency.dictionary[word] / total_words
def correction(self, word):
""" The most probable correct spelling for the word
Args:
word (str): The word to correct
Returns:
str: The most likely candidate """
return max(self.candidates(word), key=self.word_probability)
def candidates(self, word):
""" Generate possible spelling corrections for the provided word up to
an edit distance of two, if and only when needed
Args:
word (str): The word for which to calculate candidate spellings
Returns:
set: The set of words that are possible candidates """
if self.known([word]): # short-cut if word is correct already
return {word}
# get edit distance 1...
res = [x for x in self.edit_distance_1(word)]
tmp = self.known(res)
if tmp:
return tmp
# if still not found, use the edit distance 1 to calc edit distance 2
if self._distance == 2:
tmp = self.known([x for x in self.__edit_distance_alt(res)])
if tmp:
return tmp
return {word}
def unknown(self, words):
""" The subset of `words` that do not appear in the dictionary
Args:
words (list): List of words to determine which are not in the \
corpus
Returns:
set: The set of those words from the input that are not in \
the corpus """
tmp = [w.lower() for w in words if self._check_if_should_check(w)]
return set(w for w in tmp if w not in self._word_frequency.dictionary)
def edit_distance_1(self, word):
""" Compute all strings that are one edit away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance one from the \
provided word """
word = word.lower()
if self._check_if_should_check(word) is False:
return {word}
letters = self._word_frequency.letters
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edit_distance_2(self, word):
""" Compute all strings that are two edits away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided word """
word = word.lower()
return [
e2 for e1 in self.edit_distance_1(word) for e2 in self.edit_distance_1(e1)
]
def __edit_distance_alt(self, words):
""" Compute all strings that are 1 edits away from all the words using
only the letters in the corpus
Args:
words (list): The words for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided words """
words = [x.lower() for x in words]
return [e2 for e1 in words for e2 in self.edit_distance_1(e1)]
@staticmethod
def _check_if_should_check(word):
if len(word) == 1 and word in string.punctuation:
return False
try: # check if it is a number (int, float, etc)
float(word)
return False
except ValueError:
pass
return True
|
barrust/pyspellchecker | spellchecker/spellchecker.py | SpellChecker.edit_distance_1 | python | def edit_distance_1(self, word):
word = word.lower()
if self._check_if_should_check(word) is False:
return {word}
letters = self._word_frequency.letters
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts) | Compute all strings that are one edit away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance one from the \
provided word | train | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L186-L204 | [
"def _check_if_should_check(word):\n if len(word) == 1 and word in string.punctuation:\n return False\n try: # check if it is a number (int, float, etc)\n float(word)\n return False\n except ValueError:\n pass\n\n return True\n"
] | class SpellChecker(object):
""" The SpellChecker class encapsulates the basics needed to accomplish a
simple spell checking algorithm. It is based on the work by
Peter Norvig (https://norvig.com/spell-correct.html)
Args:
language (str): The language of the dictionary to load or None \
for no dictionary. Supported languages are `en`, `es`, `de`, fr` \
and `pt`. Defaults to `en`
local_dictionary (str): The path to a locally stored word \
frequency dictionary; if provided, no language will be loaded
distance (int): The edit distance to use. Defaults to 2 """
__slots__ = ["_distance", "_word_frequency", "_tokenizer"]
def __init__(
self, language="en", local_dictionary=None, distance=2, tokenizer=None
):
self._distance = None
self.distance = distance # use the setter value check
self._tokenizer = _parse_into_words
if tokenizer is not None:
self._tokenizer = tokenizer
self._word_frequency = WordFrequency(self._tokenizer)
if local_dictionary:
self._word_frequency.load_dictionary(local_dictionary)
elif language:
filename = "{}.json.gz".format(language.lower())
here = os.path.dirname(__file__)
full_filename = os.path.join(here, "resources", filename)
if not os.path.exists(full_filename):
msg = (
"The provided dictionary language ({}) does not " "exist!"
).format(language.lower())
raise ValueError(msg)
self._word_frequency.load_dictionary(full_filename)
def __contains__(self, key):
""" setup easier known checks """
return key in self._word_frequency
def __getitem__(self, key):
""" setup easier frequency checks """
return self._word_frequency[key]
@property
def word_frequency(self):
""" WordFrequency: An encapsulation of the word frequency `dictionary`
Note:
Not settable """
return self._word_frequency
@property
def distance(self):
""" int: The maximum edit distance to calculate
Note:
Valid values are 1 or 2; if an invalid value is passed, \
defaults to 2 """
return self._distance
@distance.setter
def distance(self, val):
""" set the distance parameter """
tmp = 2
try:
int(val)
if val > 0 and val <= 2:
tmp = val
except (ValueError, TypeError):
pass
self._distance = tmp
def split_words(self, text):
""" Split text into individual `words` using either a simple whitespace
regex or the passed in tokenizer
Args:
text (str): The text to split into individual words
Returns:
list(str): A listing of all words in the provided text """
return self._tokenizer(text)
def export(self, filepath, encoding="utf-8", gzipped=True):
""" Export the word frequency list for import in the future
Args:
filepath (str): The filepath to the exported dictionary
encoding (str): The encoding of the resulting output
gzipped (bool): Whether to gzip the dictionary or not """
data = json.dumps(self.word_frequency.dictionary, sort_keys=True)
write_file(filepath, encoding, gzipped, data)
def word_probability(self, word, total_words=None):
""" Calculate the probability of the `word` being the desired, correct
word
Args:
word (str): The word for which the word probability is \
calculated
total_words (int): The total number of words to use in the \
calculation; use the default for using the whole word \
frequency
Returns:
float: The probability that the word is the correct word """
if total_words is None:
total_words = self._word_frequency.total_words
return self._word_frequency.dictionary[word] / total_words
def correction(self, word):
""" The most probable correct spelling for the word
Args:
word (str): The word to correct
Returns:
str: The most likely candidate """
return max(self.candidates(word), key=self.word_probability)
def candidates(self, word):
""" Generate possible spelling corrections for the provided word up to
an edit distance of two, if and only when needed
Args:
word (str): The word for which to calculate candidate spellings
Returns:
set: The set of words that are possible candidates """
if self.known([word]): # short-cut if word is correct already
return {word}
# get edit distance 1...
res = [x for x in self.edit_distance_1(word)]
tmp = self.known(res)
if tmp:
return tmp
# if still not found, use the edit distance 1 to calc edit distance 2
if self._distance == 2:
tmp = self.known([x for x in self.__edit_distance_alt(res)])
if tmp:
return tmp
return {word}
def known(self, words):
""" The subset of `words` that appear in the dictionary of words
Args:
words (list): List of words to determine which are in the \
corpus
Returns:
set: The set of those words from the input that are in the \
corpus """
tmp = [w.lower() for w in words]
return set(
w
for w in tmp
if w in self._word_frequency.dictionary
or not self._check_if_should_check(w)
)
def unknown(self, words):
""" The subset of `words` that do not appear in the dictionary
Args:
words (list): List of words to determine which are not in the \
corpus
Returns:
set: The set of those words from the input that are not in \
the corpus """
tmp = [w.lower() for w in words if self._check_if_should_check(w)]
return set(w for w in tmp if w not in self._word_frequency.dictionary)
def edit_distance_2(self, word):
""" Compute all strings that are two edits away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided word """
word = word.lower()
return [
e2 for e1 in self.edit_distance_1(word) for e2 in self.edit_distance_1(e1)
]
def __edit_distance_alt(self, words):
""" Compute all strings that are 1 edits away from all the words using
only the letters in the corpus
Args:
words (list): The words for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided words """
words = [x.lower() for x in words]
return [e2 for e1 in words for e2 in self.edit_distance_1(e1)]
@staticmethod
def _check_if_should_check(word):
if len(word) == 1 and word in string.punctuation:
return False
try: # check if it is a number (int, float, etc)
float(word)
return False
except ValueError:
pass
return True
|
barrust/pyspellchecker | spellchecker/spellchecker.py | SpellChecker.edit_distance_2 | python | def edit_distance_2(self, word):
word = word.lower()
return [
e2 for e1 in self.edit_distance_1(word) for e2 in self.edit_distance_1(e1)
] | Compute all strings that are two edits away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided word | train | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L206-L218 | [
"def edit_distance_1(self, word):\n \"\"\" Compute all strings that are one edit away from `word` using only\n the letters in the corpus\n\n Args:\n word (str): The word for which to calculate the edit distance\n Returns:\n set: The set of strings that are edit distance one from the \\\n provided word \"\"\"\n word = word.lower()\n if self._check_if_should_check(word) is False:\n return {word}\n letters = self._word_frequency.letters\n splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]\n deletes = [L + R[1:] for L, R in splits if R]\n transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]\n replaces = [L + c + R[1:] for L, R in splits if R for c in letters]\n inserts = [L + c + R for L, R in splits for c in letters]\n return set(deletes + transposes + replaces + inserts)\n"
] | class SpellChecker(object):
""" The SpellChecker class encapsulates the basics needed to accomplish a
simple spell checking algorithm. It is based on the work by
Peter Norvig (https://norvig.com/spell-correct.html)
Args:
language (str): The language of the dictionary to load or None \
for no dictionary. Supported languages are `en`, `es`, `de`, fr` \
and `pt`. Defaults to `en`
local_dictionary (str): The path to a locally stored word \
frequency dictionary; if provided, no language will be loaded
distance (int): The edit distance to use. Defaults to 2 """
__slots__ = ["_distance", "_word_frequency", "_tokenizer"]
def __init__(
self, language="en", local_dictionary=None, distance=2, tokenizer=None
):
self._distance = None
self.distance = distance # use the setter value check
self._tokenizer = _parse_into_words
if tokenizer is not None:
self._tokenizer = tokenizer
self._word_frequency = WordFrequency(self._tokenizer)
if local_dictionary:
self._word_frequency.load_dictionary(local_dictionary)
elif language:
filename = "{}.json.gz".format(language.lower())
here = os.path.dirname(__file__)
full_filename = os.path.join(here, "resources", filename)
if not os.path.exists(full_filename):
msg = (
"The provided dictionary language ({}) does not " "exist!"
).format(language.lower())
raise ValueError(msg)
self._word_frequency.load_dictionary(full_filename)
def __contains__(self, key):
""" setup easier known checks """
return key in self._word_frequency
def __getitem__(self, key):
""" setup easier frequency checks """
return self._word_frequency[key]
@property
def word_frequency(self):
""" WordFrequency: An encapsulation of the word frequency `dictionary`
Note:
Not settable """
return self._word_frequency
@property
def distance(self):
""" int: The maximum edit distance to calculate
Note:
Valid values are 1 or 2; if an invalid value is passed, \
defaults to 2 """
return self._distance
@distance.setter
def distance(self, val):
""" set the distance parameter """
tmp = 2
try:
int(val)
if val > 0 and val <= 2:
tmp = val
except (ValueError, TypeError):
pass
self._distance = tmp
def split_words(self, text):
""" Split text into individual `words` using either a simple whitespace
regex or the passed in tokenizer
Args:
text (str): The text to split into individual words
Returns:
list(str): A listing of all words in the provided text """
return self._tokenizer(text)
def export(self, filepath, encoding="utf-8", gzipped=True):
""" Export the word frequency list for import in the future
Args:
filepath (str): The filepath to the exported dictionary
encoding (str): The encoding of the resulting output
gzipped (bool): Whether to gzip the dictionary or not """
data = json.dumps(self.word_frequency.dictionary, sort_keys=True)
write_file(filepath, encoding, gzipped, data)
def word_probability(self, word, total_words=None):
""" Calculate the probability of the `word` being the desired, correct
word
Args:
word (str): The word for which the word probability is \
calculated
total_words (int): The total number of words to use in the \
calculation; use the default for using the whole word \
frequency
Returns:
float: The probability that the word is the correct word """
if total_words is None:
total_words = self._word_frequency.total_words
return self._word_frequency.dictionary[word] / total_words
def correction(self, word):
""" The most probable correct spelling for the word
Args:
word (str): The word to correct
Returns:
str: The most likely candidate """
return max(self.candidates(word), key=self.word_probability)
def candidates(self, word):
""" Generate possible spelling corrections for the provided word up to
an edit distance of two, if and only when needed
Args:
word (str): The word for which to calculate candidate spellings
Returns:
set: The set of words that are possible candidates """
if self.known([word]): # short-cut if word is correct already
return {word}
# get edit distance 1...
res = [x for x in self.edit_distance_1(word)]
tmp = self.known(res)
if tmp:
return tmp
# if still not found, use the edit distance 1 to calc edit distance 2
if self._distance == 2:
tmp = self.known([x for x in self.__edit_distance_alt(res)])
if tmp:
return tmp
return {word}
def known(self, words):
""" The subset of `words` that appear in the dictionary of words
Args:
words (list): List of words to determine which are in the \
corpus
Returns:
set: The set of those words from the input that are in the \
corpus """
tmp = [w.lower() for w in words]
return set(
w
for w in tmp
if w in self._word_frequency.dictionary
or not self._check_if_should_check(w)
)
def unknown(self, words):
""" The subset of `words` that do not appear in the dictionary
Args:
words (list): List of words to determine which are not in the \
corpus
Returns:
set: The set of those words from the input that are not in \
the corpus """
tmp = [w.lower() for w in words if self._check_if_should_check(w)]
return set(w for w in tmp if w not in self._word_frequency.dictionary)
def edit_distance_1(self, word):
""" Compute all strings that are one edit away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance one from the \
provided word """
word = word.lower()
if self._check_if_should_check(word) is False:
return {word}
letters = self._word_frequency.letters
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def __edit_distance_alt(self, words):
""" Compute all strings that are 1 edits away from all the words using
only the letters in the corpus
Args:
words (list): The words for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided words """
words = [x.lower() for x in words]
return [e2 for e1 in words for e2 in self.edit_distance_1(e1)]
@staticmethod
def _check_if_should_check(word):
if len(word) == 1 and word in string.punctuation:
return False
try: # check if it is a number (int, float, etc)
float(word)
return False
except ValueError:
pass
return True
|
barrust/pyspellchecker | spellchecker/spellchecker.py | SpellChecker.__edit_distance_alt | python | def __edit_distance_alt(self, words):
words = [x.lower() for x in words]
return [e2 for e1 in words for e2 in self.edit_distance_1(e1)] | Compute all strings that are 1 edits away from all the words using
only the letters in the corpus
Args:
words (list): The words for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided words | train | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L220-L230 | null | class SpellChecker(object):
""" The SpellChecker class encapsulates the basics needed to accomplish a
simple spell checking algorithm. It is based on the work by
Peter Norvig (https://norvig.com/spell-correct.html)
Args:
language (str): The language of the dictionary to load or None \
for no dictionary. Supported languages are `en`, `es`, `de`, fr` \
and `pt`. Defaults to `en`
local_dictionary (str): The path to a locally stored word \
frequency dictionary; if provided, no language will be loaded
distance (int): The edit distance to use. Defaults to 2 """
__slots__ = ["_distance", "_word_frequency", "_tokenizer"]
def __init__(
self, language="en", local_dictionary=None, distance=2, tokenizer=None
):
self._distance = None
self.distance = distance # use the setter value check
self._tokenizer = _parse_into_words
if tokenizer is not None:
self._tokenizer = tokenizer
self._word_frequency = WordFrequency(self._tokenizer)
if local_dictionary:
self._word_frequency.load_dictionary(local_dictionary)
elif language:
filename = "{}.json.gz".format(language.lower())
here = os.path.dirname(__file__)
full_filename = os.path.join(here, "resources", filename)
if not os.path.exists(full_filename):
msg = (
"The provided dictionary language ({}) does not " "exist!"
).format(language.lower())
raise ValueError(msg)
self._word_frequency.load_dictionary(full_filename)
def __contains__(self, key):
""" setup easier known checks """
return key in self._word_frequency
def __getitem__(self, key):
""" setup easier frequency checks """
return self._word_frequency[key]
@property
def word_frequency(self):
""" WordFrequency: An encapsulation of the word frequency `dictionary`
Note:
Not settable """
return self._word_frequency
@property
def distance(self):
""" int: The maximum edit distance to calculate
Note:
Valid values are 1 or 2; if an invalid value is passed, \
defaults to 2 """
return self._distance
@distance.setter
def distance(self, val):
""" set the distance parameter """
tmp = 2
try:
int(val)
if val > 0 and val <= 2:
tmp = val
except (ValueError, TypeError):
pass
self._distance = tmp
def split_words(self, text):
""" Split text into individual `words` using either a simple whitespace
regex or the passed in tokenizer
Args:
text (str): The text to split into individual words
Returns:
list(str): A listing of all words in the provided text """
return self._tokenizer(text)
def export(self, filepath, encoding="utf-8", gzipped=True):
""" Export the word frequency list for import in the future
Args:
filepath (str): The filepath to the exported dictionary
encoding (str): The encoding of the resulting output
gzipped (bool): Whether to gzip the dictionary or not """
data = json.dumps(self.word_frequency.dictionary, sort_keys=True)
write_file(filepath, encoding, gzipped, data)
def word_probability(self, word, total_words=None):
""" Calculate the probability of the `word` being the desired, correct
word
Args:
word (str): The word for which the word probability is \
calculated
total_words (int): The total number of words to use in the \
calculation; use the default for using the whole word \
frequency
Returns:
float: The probability that the word is the correct word """
if total_words is None:
total_words = self._word_frequency.total_words
return self._word_frequency.dictionary[word] / total_words
def correction(self, word):
""" The most probable correct spelling for the word
Args:
word (str): The word to correct
Returns:
str: The most likely candidate """
return max(self.candidates(word), key=self.word_probability)
def candidates(self, word):
""" Generate possible spelling corrections for the provided word up to
an edit distance of two, if and only when needed
Args:
word (str): The word for which to calculate candidate spellings
Returns:
set: The set of words that are possible candidates """
if self.known([word]): # short-cut if word is correct already
return {word}
# get edit distance 1...
res = [x for x in self.edit_distance_1(word)]
tmp = self.known(res)
if tmp:
return tmp
# if still not found, use the edit distance 1 to calc edit distance 2
if self._distance == 2:
tmp = self.known([x for x in self.__edit_distance_alt(res)])
if tmp:
return tmp
return {word}
def known(self, words):
""" The subset of `words` that appear in the dictionary of words
Args:
words (list): List of words to determine which are in the \
corpus
Returns:
set: The set of those words from the input that are in the \
corpus """
tmp = [w.lower() for w in words]
return set(
w
for w in tmp
if w in self._word_frequency.dictionary
or not self._check_if_should_check(w)
)
def unknown(self, words):
""" The subset of `words` that do not appear in the dictionary
Args:
words (list): List of words to determine which are not in the \
corpus
Returns:
set: The set of those words from the input that are not in \
the corpus """
tmp = [w.lower() for w in words if self._check_if_should_check(w)]
return set(w for w in tmp if w not in self._word_frequency.dictionary)
def edit_distance_1(self, word):
""" Compute all strings that are one edit away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance one from the \
provided word """
word = word.lower()
if self._check_if_should_check(word) is False:
return {word}
letters = self._word_frequency.letters
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edit_distance_2(self, word):
""" Compute all strings that are two edits away from `word` using only
the letters in the corpus
Args:
word (str): The word for which to calculate the edit distance
Returns:
set: The set of strings that are edit distance two from the \
provided word """
word = word.lower()
return [
e2 for e1 in self.edit_distance_1(word) for e2 in self.edit_distance_1(e1)
]
@staticmethod
def _check_if_should_check(word):
if len(word) == 1 and word in string.punctuation:
return False
try: # check if it is a number (int, float, etc)
float(word)
return False
except ValueError:
pass
return True
|
barrust/pyspellchecker | spellchecker/spellchecker.py | WordFrequency.pop | python | def pop(self, key, default=None):
return self._dictionary.pop(key.lower(), default) | Remove the key and return the associated value or default if not
found
Args:
key (str): The key to remove
default (obj): The value to return if key is not present | train | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L275-L282 | null | class WordFrequency(object):
""" Store the `dictionary` as a word frequency list while allowing for
different methods to load the data and update over time """
__slots__ = [
"_dictionary",
"_total_words",
"_unique_words",
"_letters",
"_tokenizer",
]
def __init__(self, tokenizer=None):
self._dictionary = Counter()
self._total_words = 0
self._unique_words = 0
self._letters = set()
self._tokenizer = _parse_into_words
if tokenizer is not None:
self._tokenizer = tokenizer
def __contains__(self, key):
""" turn on contains """
return key.lower() in self._dictionary
def __getitem__(self, key):
""" turn on getitem """
return self._dictionary[key.lower()]
@property
def dictionary(self):
""" Counter: A counting dictionary of all words in the corpus and the \
number of times each has been seen
Note:
Not settable """
return self._dictionary
@property
def total_words(self):
""" int: The sum of all word occurances in the word frequency \
dictionary
Note:
Not settable """
return self._total_words
@property
def unique_words(self):
""" int: The total number of unique words in the word frequency list
Note:
Not settable """
return self._unique_words
@property
def letters(self):
""" str: The listing of all letters found within the corpus
Note:
Not settable """
return self._letters
def tokenize(self, text):
""" Tokenize the provided string object into individual words
Args:
text (str): The string object to tokenize
Yields:
str: The next `word` in the tokenized string
Note:
This is the same as the `spellchecker.split_words()` """
for x in self._tokenizer(text):
yield x.lower()
def keys(self):
""" Iterator over the key of the dictionary
Yields:
str: The next key in the dictionary
Note:
This is the same as `spellchecker.words()` """
for key in self._dictionary.keys():
yield key
def words(self):
""" Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
Note:
This is the same as `spellchecker.keys()` """
for word in self._dictionary.keys():
yield word
def items(self):
""" Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
int: The number of instances in the dictionary
Note:
This is the same as `dict.items()` """
for word in self._dictionary.keys():
yield word, self._dictionary[word]
def load_dictionary(self, filename, encoding="utf-8"):
""" Load in a pre-built word frequency list
Args:
filename (str): The filepath to the json (optionally gzipped) \
file to be loaded
encoding (str): The encoding of the dictionary """
with load_file(filename, encoding) as data:
self._dictionary.update(json.loads(data.lower(), encoding=encoding))
self._update_dictionary()
def load_text_file(self, filename, encoding="utf-8", tokenizer=None):
""" Load in a text file from which to generate a word frequency list
Args:
filename (str): The filepath to the text file to be loaded
encoding (str): The encoding of the text file
tokenizer (function): The function to use to tokenize a string
"""
with load_file(filename, encoding=encoding) as data:
self.load_text(data, tokenizer)
def load_text(self, text, tokenizer=None):
""" Load text from which to generate a word frequency list
Args:
text (str): The text to be loaded
tokenizer (function): The function to use to tokenize a string
"""
if tokenizer:
words = [x.lower() for x in tokenizer(text)]
else:
words = self.tokenize(text)
self._dictionary.update(words)
self._update_dictionary()
def load_words(self, words):
""" Load a list of words from which to generate a word frequency list
Args:
words (list): The list of words to be loaded """
self._dictionary.update([word.lower() for word in words])
self._update_dictionary()
def add(self, word):
""" Add a word to the word frequency list
Args:
word (str): The word to add """
self.load_words([word])
def remove_words(self, words):
""" Remove a list of words from the word frequency list
Args:
words (list): The list of words to remove """
for word in words:
self._dictionary.pop(word.lower())
self._update_dictionary()
def remove(self, word):
""" Remove a word from the word frequency list
Args:
word (str): The word to remove """
self._dictionary.pop(word.lower())
self._update_dictionary()
def remove_by_threshold(self, threshold=5):
""" Remove all words at, or below, the provided threshold
Args:
threshold (int): The threshold at which a word is to be \
removed """
keys = [x for x in self._dictionary.keys()]
for key in keys:
if self._dictionary[key] <= threshold:
self._dictionary.pop(key)
self._update_dictionary()
def _update_dictionary(self):
""" Update the word frequency object """
self._total_words = sum(self._dictionary.values())
self._unique_words = len(self._dictionary.keys())
self._letters = set()
for key in self._dictionary:
self._letters.update(key)
|
barrust/pyspellchecker | spellchecker/spellchecker.py | WordFrequency.items | python | def items(self):
for word in self._dictionary.keys():
yield word, self._dictionary[word] | Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
int: The number of instances in the dictionary
Note:
This is the same as `dict.items()` | train | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L350-L359 | null | class WordFrequency(object):
""" Store the `dictionary` as a word frequency list while allowing for
different methods to load the data and update over time """
__slots__ = [
"_dictionary",
"_total_words",
"_unique_words",
"_letters",
"_tokenizer",
]
def __init__(self, tokenizer=None):
self._dictionary = Counter()
self._total_words = 0
self._unique_words = 0
self._letters = set()
self._tokenizer = _parse_into_words
if tokenizer is not None:
self._tokenizer = tokenizer
def __contains__(self, key):
""" turn on contains """
return key.lower() in self._dictionary
def __getitem__(self, key):
""" turn on getitem """
return self._dictionary[key.lower()]
def pop(self, key, default=None):
""" Remove the key and return the associated value or default if not
found
Args:
key (str): The key to remove
default (obj): The value to return if key is not present """
return self._dictionary.pop(key.lower(), default)
@property
def dictionary(self):
""" Counter: A counting dictionary of all words in the corpus and the \
number of times each has been seen
Note:
Not settable """
return self._dictionary
@property
def total_words(self):
""" int: The sum of all word occurances in the word frequency \
dictionary
Note:
Not settable """
return self._total_words
@property
def unique_words(self):
""" int: The total number of unique words in the word frequency list
Note:
Not settable """
return self._unique_words
@property
def letters(self):
""" str: The listing of all letters found within the corpus
Note:
Not settable """
return self._letters
def tokenize(self, text):
""" Tokenize the provided string object into individual words
Args:
text (str): The string object to tokenize
Yields:
str: The next `word` in the tokenized string
Note:
This is the same as the `spellchecker.split_words()` """
for x in self._tokenizer(text):
yield x.lower()
def keys(self):
""" Iterator over the key of the dictionary
Yields:
str: The next key in the dictionary
Note:
This is the same as `spellchecker.words()` """
for key in self._dictionary.keys():
yield key
def words(self):
""" Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
Note:
This is the same as `spellchecker.keys()` """
for word in self._dictionary.keys():
yield word
def load_dictionary(self, filename, encoding="utf-8"):
""" Load in a pre-built word frequency list
Args:
filename (str): The filepath to the json (optionally gzipped) \
file to be loaded
encoding (str): The encoding of the dictionary """
with load_file(filename, encoding) as data:
self._dictionary.update(json.loads(data.lower(), encoding=encoding))
self._update_dictionary()
def load_text_file(self, filename, encoding="utf-8", tokenizer=None):
""" Load in a text file from which to generate a word frequency list
Args:
filename (str): The filepath to the text file to be loaded
encoding (str): The encoding of the text file
tokenizer (function): The function to use to tokenize a string
"""
with load_file(filename, encoding=encoding) as data:
self.load_text(data, tokenizer)
def load_text(self, text, tokenizer=None):
""" Load text from which to generate a word frequency list
Args:
text (str): The text to be loaded
tokenizer (function): The function to use to tokenize a string
"""
if tokenizer:
words = [x.lower() for x in tokenizer(text)]
else:
words = self.tokenize(text)
self._dictionary.update(words)
self._update_dictionary()
def load_words(self, words):
""" Load a list of words from which to generate a word frequency list
Args:
words (list): The list of words to be loaded """
self._dictionary.update([word.lower() for word in words])
self._update_dictionary()
def add(self, word):
""" Add a word to the word frequency list
Args:
word (str): The word to add """
self.load_words([word])
def remove_words(self, words):
""" Remove a list of words from the word frequency list
Args:
words (list): The list of words to remove """
for word in words:
self._dictionary.pop(word.lower())
self._update_dictionary()
def remove(self, word):
""" Remove a word from the word frequency list
Args:
word (str): The word to remove """
self._dictionary.pop(word.lower())
self._update_dictionary()
def remove_by_threshold(self, threshold=5):
""" Remove all words at, or below, the provided threshold
Args:
threshold (int): The threshold at which a word is to be \
removed """
keys = [x for x in self._dictionary.keys()]
for key in keys:
if self._dictionary[key] <= threshold:
self._dictionary.pop(key)
self._update_dictionary()
def _update_dictionary(self):
""" Update the word frequency object """
self._total_words = sum(self._dictionary.values())
self._unique_words = len(self._dictionary.keys())
self._letters = set()
for key in self._dictionary:
self._letters.update(key)
|
barrust/pyspellchecker | spellchecker/spellchecker.py | WordFrequency.load_dictionary | python | def load_dictionary(self, filename, encoding="utf-8"):
with load_file(filename, encoding) as data:
self._dictionary.update(json.loads(data.lower(), encoding=encoding))
self._update_dictionary() | Load in a pre-built word frequency list
Args:
filename (str): The filepath to the json (optionally gzipped) \
file to be loaded
encoding (str): The encoding of the dictionary | train | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L361-L370 | [
"def _update_dictionary(self):\n \"\"\" Update the word frequency object \"\"\"\n self._total_words = sum(self._dictionary.values())\n self._unique_words = len(self._dictionary.keys())\n self._letters = set()\n for key in self._dictionary:\n self._letters.update(key)\n"
] | class WordFrequency(object):
""" Store the `dictionary` as a word frequency list while allowing for
different methods to load the data and update over time """
__slots__ = [
"_dictionary",
"_total_words",
"_unique_words",
"_letters",
"_tokenizer",
]
def __init__(self, tokenizer=None):
self._dictionary = Counter()
self._total_words = 0
self._unique_words = 0
self._letters = set()
self._tokenizer = _parse_into_words
if tokenizer is not None:
self._tokenizer = tokenizer
def __contains__(self, key):
""" turn on contains """
return key.lower() in self._dictionary
def __getitem__(self, key):
""" turn on getitem """
return self._dictionary[key.lower()]
def pop(self, key, default=None):
""" Remove the key and return the associated value or default if not
found
Args:
key (str): The key to remove
default (obj): The value to return if key is not present """
return self._dictionary.pop(key.lower(), default)
@property
def dictionary(self):
""" Counter: A counting dictionary of all words in the corpus and the \
number of times each has been seen
Note:
Not settable """
return self._dictionary
@property
def total_words(self):
""" int: The sum of all word occurances in the word frequency \
dictionary
Note:
Not settable """
return self._total_words
@property
def unique_words(self):
""" int: The total number of unique words in the word frequency list
Note:
Not settable """
return self._unique_words
@property
def letters(self):
""" str: The listing of all letters found within the corpus
Note:
Not settable """
return self._letters
def tokenize(self, text):
""" Tokenize the provided string object into individual words
Args:
text (str): The string object to tokenize
Yields:
str: The next `word` in the tokenized string
Note:
This is the same as the `spellchecker.split_words()` """
for x in self._tokenizer(text):
yield x.lower()
def keys(self):
""" Iterator over the key of the dictionary
Yields:
str: The next key in the dictionary
Note:
This is the same as `spellchecker.words()` """
for key in self._dictionary.keys():
yield key
def words(self):
""" Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
Note:
This is the same as `spellchecker.keys()` """
for word in self._dictionary.keys():
yield word
def items(self):
""" Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
int: The number of instances in the dictionary
Note:
This is the same as `dict.items()` """
for word in self._dictionary.keys():
yield word, self._dictionary[word]
def load_text_file(self, filename, encoding="utf-8", tokenizer=None):
""" Load in a text file from which to generate a word frequency list
Args:
filename (str): The filepath to the text file to be loaded
encoding (str): The encoding of the text file
tokenizer (function): The function to use to tokenize a string
"""
with load_file(filename, encoding=encoding) as data:
self.load_text(data, tokenizer)
def load_text(self, text, tokenizer=None):
""" Load text from which to generate a word frequency list
Args:
text (str): The text to be loaded
tokenizer (function): The function to use to tokenize a string
"""
if tokenizer:
words = [x.lower() for x in tokenizer(text)]
else:
words = self.tokenize(text)
self._dictionary.update(words)
self._update_dictionary()
def load_words(self, words):
""" Load a list of words from which to generate a word frequency list
Args:
words (list): The list of words to be loaded """
self._dictionary.update([word.lower() for word in words])
self._update_dictionary()
def add(self, word):
""" Add a word to the word frequency list
Args:
word (str): The word to add """
self.load_words([word])
def remove_words(self, words):
""" Remove a list of words from the word frequency list
Args:
words (list): The list of words to remove """
for word in words:
self._dictionary.pop(word.lower())
self._update_dictionary()
def remove(self, word):
""" Remove a word from the word frequency list
Args:
word (str): The word to remove """
self._dictionary.pop(word.lower())
self._update_dictionary()
def remove_by_threshold(self, threshold=5):
""" Remove all words at, or below, the provided threshold
Args:
threshold (int): The threshold at which a word is to be \
removed """
keys = [x for x in self._dictionary.keys()]
for key in keys:
if self._dictionary[key] <= threshold:
self._dictionary.pop(key)
self._update_dictionary()
def _update_dictionary(self):
""" Update the word frequency object """
self._total_words = sum(self._dictionary.values())
self._unique_words = len(self._dictionary.keys())
self._letters = set()
for key in self._dictionary:
self._letters.update(key)
|
barrust/pyspellchecker | spellchecker/spellchecker.py | WordFrequency.load_text_file | python | def load_text_file(self, filename, encoding="utf-8", tokenizer=None):
with load_file(filename, encoding=encoding) as data:
self.load_text(data, tokenizer) | Load in a text file from which to generate a word frequency list
Args:
filename (str): The filepath to the text file to be loaded
encoding (str): The encoding of the text file
tokenizer (function): The function to use to tokenize a string | train | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L372-L381 | [
"def load_text(self, text, tokenizer=None):\n \"\"\" Load text from which to generate a word frequency list\n\n Args:\n text (str): The text to be loaded\n tokenizer (function): The function to use to tokenize a string\n \"\"\"\n if tokenizer:\n words = [x.lower() for x in tokenizer(text)]\n else:\n words = self.tokenize(text)\n\n self._dictionary.update(words)\n self._update_dictionary()\n"
] | class WordFrequency(object):
""" Store the `dictionary` as a word frequency list while allowing for
different methods to load the data and update over time """
__slots__ = [
"_dictionary",
"_total_words",
"_unique_words",
"_letters",
"_tokenizer",
]
def __init__(self, tokenizer=None):
self._dictionary = Counter()
self._total_words = 0
self._unique_words = 0
self._letters = set()
self._tokenizer = _parse_into_words
if tokenizer is not None:
self._tokenizer = tokenizer
def __contains__(self, key):
""" turn on contains """
return key.lower() in self._dictionary
def __getitem__(self, key):
""" turn on getitem """
return self._dictionary[key.lower()]
def pop(self, key, default=None):
""" Remove the key and return the associated value or default if not
found
Args:
key (str): The key to remove
default (obj): The value to return if key is not present """
return self._dictionary.pop(key.lower(), default)
@property
def dictionary(self):
""" Counter: A counting dictionary of all words in the corpus and the \
number of times each has been seen
Note:
Not settable """
return self._dictionary
@property
def total_words(self):
""" int: The sum of all word occurances in the word frequency \
dictionary
Note:
Not settable """
return self._total_words
@property
def unique_words(self):
""" int: The total number of unique words in the word frequency list
Note:
Not settable """
return self._unique_words
@property
def letters(self):
""" str: The listing of all letters found within the corpus
Note:
Not settable """
return self._letters
def tokenize(self, text):
""" Tokenize the provided string object into individual words
Args:
text (str): The string object to tokenize
Yields:
str: The next `word` in the tokenized string
Note:
This is the same as the `spellchecker.split_words()` """
for x in self._tokenizer(text):
yield x.lower()
def keys(self):
""" Iterator over the key of the dictionary
Yields:
str: The next key in the dictionary
Note:
This is the same as `spellchecker.words()` """
for key in self._dictionary.keys():
yield key
def words(self):
""" Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
Note:
This is the same as `spellchecker.keys()` """
for word in self._dictionary.keys():
yield word
def items(self):
""" Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
int: The number of instances in the dictionary
Note:
This is the same as `dict.items()` """
for word in self._dictionary.keys():
yield word, self._dictionary[word]
def load_dictionary(self, filename, encoding="utf-8"):
""" Load in a pre-built word frequency list
Args:
filename (str): The filepath to the json (optionally gzipped) \
file to be loaded
encoding (str): The encoding of the dictionary """
with load_file(filename, encoding) as data:
self._dictionary.update(json.loads(data.lower(), encoding=encoding))
self._update_dictionary()
def load_text(self, text, tokenizer=None):
""" Load text from which to generate a word frequency list
Args:
text (str): The text to be loaded
tokenizer (function): The function to use to tokenize a string
"""
if tokenizer:
words = [x.lower() for x in tokenizer(text)]
else:
words = self.tokenize(text)
self._dictionary.update(words)
self._update_dictionary()
def load_words(self, words):
""" Load a list of words from which to generate a word frequency list
Args:
words (list): The list of words to be loaded """
self._dictionary.update([word.lower() for word in words])
self._update_dictionary()
def add(self, word):
""" Add a word to the word frequency list
Args:
word (str): The word to add """
self.load_words([word])
def remove_words(self, words):
""" Remove a list of words from the word frequency list
Args:
words (list): The list of words to remove """
for word in words:
self._dictionary.pop(word.lower())
self._update_dictionary()
def remove(self, word):
""" Remove a word from the word frequency list
Args:
word (str): The word to remove """
self._dictionary.pop(word.lower())
self._update_dictionary()
def remove_by_threshold(self, threshold=5):
""" Remove all words at, or below, the provided threshold
Args:
threshold (int): The threshold at which a word is to be \
removed """
keys = [x for x in self._dictionary.keys()]
for key in keys:
if self._dictionary[key] <= threshold:
self._dictionary.pop(key)
self._update_dictionary()
def _update_dictionary(self):
""" Update the word frequency object """
self._total_words = sum(self._dictionary.values())
self._unique_words = len(self._dictionary.keys())
self._letters = set()
for key in self._dictionary:
self._letters.update(key)
|
barrust/pyspellchecker | spellchecker/spellchecker.py | WordFrequency.load_text | python | def load_text(self, text, tokenizer=None):
if tokenizer:
words = [x.lower() for x in tokenizer(text)]
else:
words = self.tokenize(text)
self._dictionary.update(words)
self._update_dictionary() | Load text from which to generate a word frequency list
Args:
text (str): The text to be loaded
tokenizer (function): The function to use to tokenize a string | train | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L383-L396 | [
"def tokenize(self, text):\n \"\"\" Tokenize the provided string object into individual words\n\n Args:\n text (str): The string object to tokenize\n Yields:\n str: The next `word` in the tokenized string\n Note:\n This is the same as the `spellchecker.split_words()` \"\"\"\n for x in self._tokenizer(text):\n yield x.lower()\n",
"def _update_dictionary(self):\n \"\"\" Update the word frequency object \"\"\"\n self._total_words = sum(self._dictionary.values())\n self._unique_words = len(self._dictionary.keys())\n self._letters = set()\n for key in self._dictionary:\n self._letters.update(key)\n"
] | class WordFrequency(object):
""" Store the `dictionary` as a word frequency list while allowing for
different methods to load the data and update over time """
__slots__ = [
"_dictionary",
"_total_words",
"_unique_words",
"_letters",
"_tokenizer",
]
def __init__(self, tokenizer=None):
self._dictionary = Counter()
self._total_words = 0
self._unique_words = 0
self._letters = set()
self._tokenizer = _parse_into_words
if tokenizer is not None:
self._tokenizer = tokenizer
def __contains__(self, key):
""" turn on contains """
return key.lower() in self._dictionary
def __getitem__(self, key):
""" turn on getitem """
return self._dictionary[key.lower()]
def pop(self, key, default=None):
""" Remove the key and return the associated value or default if not
found
Args:
key (str): The key to remove
default (obj): The value to return if key is not present """
return self._dictionary.pop(key.lower(), default)
@property
def dictionary(self):
""" Counter: A counting dictionary of all words in the corpus and the \
number of times each has been seen
Note:
Not settable """
return self._dictionary
@property
def total_words(self):
""" int: The sum of all word occurances in the word frequency \
dictionary
Note:
Not settable """
return self._total_words
@property
def unique_words(self):
""" int: The total number of unique words in the word frequency list
Note:
Not settable """
return self._unique_words
@property
def letters(self):
""" str: The listing of all letters found within the corpus
Note:
Not settable """
return self._letters
def tokenize(self, text):
""" Tokenize the provided string object into individual words
Args:
text (str): The string object to tokenize
Yields:
str: The next `word` in the tokenized string
Note:
This is the same as the `spellchecker.split_words()` """
for x in self._tokenizer(text):
yield x.lower()
def keys(self):
""" Iterator over the key of the dictionary
Yields:
str: The next key in the dictionary
Note:
This is the same as `spellchecker.words()` """
for key in self._dictionary.keys():
yield key
def words(self):
""" Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
Note:
This is the same as `spellchecker.keys()` """
for word in self._dictionary.keys():
yield word
def items(self):
""" Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
int: The number of instances in the dictionary
Note:
This is the same as `dict.items()` """
for word in self._dictionary.keys():
yield word, self._dictionary[word]
def load_dictionary(self, filename, encoding="utf-8"):
""" Load in a pre-built word frequency list
Args:
filename (str): The filepath to the json (optionally gzipped) \
file to be loaded
encoding (str): The encoding of the dictionary """
with load_file(filename, encoding) as data:
self._dictionary.update(json.loads(data.lower(), encoding=encoding))
self._update_dictionary()
def load_text_file(self, filename, encoding="utf-8", tokenizer=None):
""" Load in a text file from which to generate a word frequency list
Args:
filename (str): The filepath to the text file to be loaded
encoding (str): The encoding of the text file
tokenizer (function): The function to use to tokenize a string
"""
with load_file(filename, encoding=encoding) as data:
self.load_text(data, tokenizer)
def load_words(self, words):
""" Load a list of words from which to generate a word frequency list
Args:
words (list): The list of words to be loaded """
self._dictionary.update([word.lower() for word in words])
self._update_dictionary()
def add(self, word):
""" Add a word to the word frequency list
Args:
word (str): The word to add """
self.load_words([word])
def remove_words(self, words):
""" Remove a list of words from the word frequency list
Args:
words (list): The list of words to remove """
for word in words:
self._dictionary.pop(word.lower())
self._update_dictionary()
def remove(self, word):
""" Remove a word from the word frequency list
Args:
word (str): The word to remove """
self._dictionary.pop(word.lower())
self._update_dictionary()
def remove_by_threshold(self, threshold=5):
""" Remove all words at, or below, the provided threshold
Args:
threshold (int): The threshold at which a word is to be \
removed """
keys = [x for x in self._dictionary.keys()]
for key in keys:
if self._dictionary[key] <= threshold:
self._dictionary.pop(key)
self._update_dictionary()
def _update_dictionary(self):
""" Update the word frequency object """
self._total_words = sum(self._dictionary.values())
self._unique_words = len(self._dictionary.keys())
self._letters = set()
for key in self._dictionary:
self._letters.update(key)
|
barrust/pyspellchecker | spellchecker/spellchecker.py | WordFrequency.load_words | python | def load_words(self, words):
self._dictionary.update([word.lower() for word in words])
self._update_dictionary() | Load a list of words from which to generate a word frequency list
Args:
words (list): The list of words to be loaded | train | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L398-L404 | [
"def _update_dictionary(self):\n \"\"\" Update the word frequency object \"\"\"\n self._total_words = sum(self._dictionary.values())\n self._unique_words = len(self._dictionary.keys())\n self._letters = set()\n for key in self._dictionary:\n self._letters.update(key)\n"
] | class WordFrequency(object):
""" Store the `dictionary` as a word frequency list while allowing for
different methods to load the data and update over time """
__slots__ = [
"_dictionary",
"_total_words",
"_unique_words",
"_letters",
"_tokenizer",
]
def __init__(self, tokenizer=None):
self._dictionary = Counter()
self._total_words = 0
self._unique_words = 0
self._letters = set()
self._tokenizer = _parse_into_words
if tokenizer is not None:
self._tokenizer = tokenizer
def __contains__(self, key):
""" turn on contains """
return key.lower() in self._dictionary
def __getitem__(self, key):
""" turn on getitem """
return self._dictionary[key.lower()]
def pop(self, key, default=None):
""" Remove the key and return the associated value or default if not
found
Args:
key (str): The key to remove
default (obj): The value to return if key is not present """
return self._dictionary.pop(key.lower(), default)
@property
def dictionary(self):
""" Counter: A counting dictionary of all words in the corpus and the \
number of times each has been seen
Note:
Not settable """
return self._dictionary
@property
def total_words(self):
""" int: The sum of all word occurances in the word frequency \
dictionary
Note:
Not settable """
return self._total_words
@property
def unique_words(self):
""" int: The total number of unique words in the word frequency list
Note:
Not settable """
return self._unique_words
@property
def letters(self):
""" str: The listing of all letters found within the corpus
Note:
Not settable """
return self._letters
def tokenize(self, text):
""" Tokenize the provided string object into individual words
Args:
text (str): The string object to tokenize
Yields:
str: The next `word` in the tokenized string
Note:
This is the same as the `spellchecker.split_words()` """
for x in self._tokenizer(text):
yield x.lower()
def keys(self):
""" Iterator over the key of the dictionary
Yields:
str: The next key in the dictionary
Note:
This is the same as `spellchecker.words()` """
for key in self._dictionary.keys():
yield key
def words(self):
""" Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
Note:
This is the same as `spellchecker.keys()` """
for word in self._dictionary.keys():
yield word
def items(self):
""" Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
int: The number of instances in the dictionary
Note:
This is the same as `dict.items()` """
for word in self._dictionary.keys():
yield word, self._dictionary[word]
def load_dictionary(self, filename, encoding="utf-8"):
""" Load in a pre-built word frequency list
Args:
filename (str): The filepath to the json (optionally gzipped) \
file to be loaded
encoding (str): The encoding of the dictionary """
with load_file(filename, encoding) as data:
self._dictionary.update(json.loads(data.lower(), encoding=encoding))
self._update_dictionary()
def load_text_file(self, filename, encoding="utf-8", tokenizer=None):
""" Load in a text file from which to generate a word frequency list
Args:
filename (str): The filepath to the text file to be loaded
encoding (str): The encoding of the text file
tokenizer (function): The function to use to tokenize a string
"""
with load_file(filename, encoding=encoding) as data:
self.load_text(data, tokenizer)
def load_text(self, text, tokenizer=None):
""" Load text from which to generate a word frequency list
Args:
text (str): The text to be loaded
tokenizer (function): The function to use to tokenize a string
"""
if tokenizer:
words = [x.lower() for x in tokenizer(text)]
else:
words = self.tokenize(text)
self._dictionary.update(words)
self._update_dictionary()
def add(self, word):
""" Add a word to the word frequency list
Args:
word (str): The word to add """
self.load_words([word])
def remove_words(self, words):
""" Remove a list of words from the word frequency list
Args:
words (list): The list of words to remove """
for word in words:
self._dictionary.pop(word.lower())
self._update_dictionary()
def remove(self, word):
""" Remove a word from the word frequency list
Args:
word (str): The word to remove """
self._dictionary.pop(word.lower())
self._update_dictionary()
def remove_by_threshold(self, threshold=5):
""" Remove all words at, or below, the provided threshold
Args:
threshold (int): The threshold at which a word is to be \
removed """
keys = [x for x in self._dictionary.keys()]
for key in keys:
if self._dictionary[key] <= threshold:
self._dictionary.pop(key)
self._update_dictionary()
def _update_dictionary(self):
""" Update the word frequency object """
self._total_words = sum(self._dictionary.values())
self._unique_words = len(self._dictionary.keys())
self._letters = set()
for key in self._dictionary:
self._letters.update(key)
|
barrust/pyspellchecker | spellchecker/spellchecker.py | WordFrequency.remove_words | python | def remove_words(self, words):
for word in words:
self._dictionary.pop(word.lower())
self._update_dictionary() | Remove a list of words from the word frequency list
Args:
words (list): The list of words to remove | train | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L413-L420 | [
"def _update_dictionary(self):\n \"\"\" Update the word frequency object \"\"\"\n self._total_words = sum(self._dictionary.values())\n self._unique_words = len(self._dictionary.keys())\n self._letters = set()\n for key in self._dictionary:\n self._letters.update(key)\n"
] | class WordFrequency(object):
""" Store the `dictionary` as a word frequency list while allowing for
different methods to load the data and update over time """
__slots__ = [
"_dictionary",
"_total_words",
"_unique_words",
"_letters",
"_tokenizer",
]
def __init__(self, tokenizer=None):
self._dictionary = Counter()
self._total_words = 0
self._unique_words = 0
self._letters = set()
self._tokenizer = _parse_into_words
if tokenizer is not None:
self._tokenizer = tokenizer
def __contains__(self, key):
""" turn on contains """
return key.lower() in self._dictionary
def __getitem__(self, key):
""" turn on getitem """
return self._dictionary[key.lower()]
def pop(self, key, default=None):
""" Remove the key and return the associated value or default if not
found
Args:
key (str): The key to remove
default (obj): The value to return if key is not present """
return self._dictionary.pop(key.lower(), default)
@property
def dictionary(self):
""" Counter: A counting dictionary of all words in the corpus and the \
number of times each has been seen
Note:
Not settable """
return self._dictionary
@property
def total_words(self):
""" int: The sum of all word occurances in the word frequency \
dictionary
Note:
Not settable """
return self._total_words
@property
def unique_words(self):
""" int: The total number of unique words in the word frequency list
Note:
Not settable """
return self._unique_words
@property
def letters(self):
""" str: The listing of all letters found within the corpus
Note:
Not settable """
return self._letters
def tokenize(self, text):
""" Tokenize the provided string object into individual words
Args:
text (str): The string object to tokenize
Yields:
str: The next `word` in the tokenized string
Note:
This is the same as the `spellchecker.split_words()` """
for x in self._tokenizer(text):
yield x.lower()
def keys(self):
""" Iterator over the key of the dictionary
Yields:
str: The next key in the dictionary
Note:
This is the same as `spellchecker.words()` """
for key in self._dictionary.keys():
yield key
def words(self):
""" Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
Note:
This is the same as `spellchecker.keys()` """
for word in self._dictionary.keys():
yield word
def items(self):
""" Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
int: The number of instances in the dictionary
Note:
This is the same as `dict.items()` """
for word in self._dictionary.keys():
yield word, self._dictionary[word]
def load_dictionary(self, filename, encoding="utf-8"):
""" Load in a pre-built word frequency list
Args:
filename (str): The filepath to the json (optionally gzipped) \
file to be loaded
encoding (str): The encoding of the dictionary """
with load_file(filename, encoding) as data:
self._dictionary.update(json.loads(data.lower(), encoding=encoding))
self._update_dictionary()
def load_text_file(self, filename, encoding="utf-8", tokenizer=None):
""" Load in a text file from which to generate a word frequency list
Args:
filename (str): The filepath to the text file to be loaded
encoding (str): The encoding of the text file
tokenizer (function): The function to use to tokenize a string
"""
with load_file(filename, encoding=encoding) as data:
self.load_text(data, tokenizer)
def load_text(self, text, tokenizer=None):
""" Load text from which to generate a word frequency list
Args:
text (str): The text to be loaded
tokenizer (function): The function to use to tokenize a string
"""
if tokenizer:
words = [x.lower() for x in tokenizer(text)]
else:
words = self.tokenize(text)
self._dictionary.update(words)
self._update_dictionary()
def load_words(self, words):
""" Load a list of words from which to generate a word frequency list
Args:
words (list): The list of words to be loaded """
self._dictionary.update([word.lower() for word in words])
self._update_dictionary()
def add(self, word):
""" Add a word to the word frequency list
Args:
word (str): The word to add """
self.load_words([word])
def remove(self, word):
""" Remove a word from the word frequency list
Args:
word (str): The word to remove """
self._dictionary.pop(word.lower())
self._update_dictionary()
def remove_by_threshold(self, threshold=5):
""" Remove all words at, or below, the provided threshold
Args:
threshold (int): The threshold at which a word is to be \
removed """
keys = [x for x in self._dictionary.keys()]
for key in keys:
if self._dictionary[key] <= threshold:
self._dictionary.pop(key)
self._update_dictionary()
def _update_dictionary(self):
""" Update the word frequency object """
self._total_words = sum(self._dictionary.values())
self._unique_words = len(self._dictionary.keys())
self._letters = set()
for key in self._dictionary:
self._letters.update(key)
|
barrust/pyspellchecker | spellchecker/spellchecker.py | WordFrequency.remove | python | def remove(self, word):
self._dictionary.pop(word.lower())
self._update_dictionary() | Remove a word from the word frequency list
Args:
word (str): The word to remove | train | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L422-L428 | [
"def _update_dictionary(self):\n \"\"\" Update the word frequency object \"\"\"\n self._total_words = sum(self._dictionary.values())\n self._unique_words = len(self._dictionary.keys())\n self._letters = set()\n for key in self._dictionary:\n self._letters.update(key)\n"
] | class WordFrequency(object):
""" Store the `dictionary` as a word frequency list while allowing for
different methods to load the data and update over time """
__slots__ = [
"_dictionary",
"_total_words",
"_unique_words",
"_letters",
"_tokenizer",
]
def __init__(self, tokenizer=None):
self._dictionary = Counter()
self._total_words = 0
self._unique_words = 0
self._letters = set()
self._tokenizer = _parse_into_words
if tokenizer is not None:
self._tokenizer = tokenizer
def __contains__(self, key):
""" turn on contains """
return key.lower() in self._dictionary
def __getitem__(self, key):
""" turn on getitem """
return self._dictionary[key.lower()]
def pop(self, key, default=None):
""" Remove the key and return the associated value or default if not
found
Args:
key (str): The key to remove
default (obj): The value to return if key is not present """
return self._dictionary.pop(key.lower(), default)
@property
def dictionary(self):
""" Counter: A counting dictionary of all words in the corpus and the \
number of times each has been seen
Note:
Not settable """
return self._dictionary
@property
def total_words(self):
""" int: The sum of all word occurances in the word frequency \
dictionary
Note:
Not settable """
return self._total_words
@property
def unique_words(self):
""" int: The total number of unique words in the word frequency list
Note:
Not settable """
return self._unique_words
@property
def letters(self):
""" str: The listing of all letters found within the corpus
Note:
Not settable """
return self._letters
def tokenize(self, text):
""" Tokenize the provided string object into individual words
Args:
text (str): The string object to tokenize
Yields:
str: The next `word` in the tokenized string
Note:
This is the same as the `spellchecker.split_words()` """
for x in self._tokenizer(text):
yield x.lower()
def keys(self):
""" Iterator over the key of the dictionary
Yields:
str: The next key in the dictionary
Note:
This is the same as `spellchecker.words()` """
for key in self._dictionary.keys():
yield key
def words(self):
""" Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
Note:
This is the same as `spellchecker.keys()` """
for word in self._dictionary.keys():
yield word
def items(self):
""" Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
int: The number of instances in the dictionary
Note:
This is the same as `dict.items()` """
for word in self._dictionary.keys():
yield word, self._dictionary[word]
def load_dictionary(self, filename, encoding="utf-8"):
""" Load in a pre-built word frequency list
Args:
filename (str): The filepath to the json (optionally gzipped) \
file to be loaded
encoding (str): The encoding of the dictionary """
with load_file(filename, encoding) as data:
self._dictionary.update(json.loads(data.lower(), encoding=encoding))
self._update_dictionary()
def load_text_file(self, filename, encoding="utf-8", tokenizer=None):
""" Load in a text file from which to generate a word frequency list
Args:
filename (str): The filepath to the text file to be loaded
encoding (str): The encoding of the text file
tokenizer (function): The function to use to tokenize a string
"""
with load_file(filename, encoding=encoding) as data:
self.load_text(data, tokenizer)
def load_text(self, text, tokenizer=None):
""" Load text from which to generate a word frequency list
Args:
text (str): The text to be loaded
tokenizer (function): The function to use to tokenize a string
"""
if tokenizer:
words = [x.lower() for x in tokenizer(text)]
else:
words = self.tokenize(text)
self._dictionary.update(words)
self._update_dictionary()
def load_words(self, words):
""" Load a list of words from which to generate a word frequency list
Args:
words (list): The list of words to be loaded """
self._dictionary.update([word.lower() for word in words])
self._update_dictionary()
def add(self, word):
""" Add a word to the word frequency list
Args:
word (str): The word to add """
self.load_words([word])
def remove_words(self, words):
""" Remove a list of words from the word frequency list
Args:
words (list): The list of words to remove """
for word in words:
self._dictionary.pop(word.lower())
self._update_dictionary()
def remove_by_threshold(self, threshold=5):
""" Remove all words at, or below, the provided threshold
Args:
threshold (int): The threshold at which a word is to be \
removed """
keys = [x for x in self._dictionary.keys()]
for key in keys:
if self._dictionary[key] <= threshold:
self._dictionary.pop(key)
self._update_dictionary()
def _update_dictionary(self):
""" Update the word frequency object """
self._total_words = sum(self._dictionary.values())
self._unique_words = len(self._dictionary.keys())
self._letters = set()
for key in self._dictionary:
self._letters.update(key)
|
barrust/pyspellchecker | spellchecker/spellchecker.py | WordFrequency.remove_by_threshold | python | def remove_by_threshold(self, threshold=5):
keys = [x for x in self._dictionary.keys()]
for key in keys:
if self._dictionary[key] <= threshold:
self._dictionary.pop(key)
self._update_dictionary() | Remove all words at, or below, the provided threshold
Args:
threshold (int): The threshold at which a word is to be \
removed | train | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L430-L440 | [
"def _update_dictionary(self):\n \"\"\" Update the word frequency object \"\"\"\n self._total_words = sum(self._dictionary.values())\n self._unique_words = len(self._dictionary.keys())\n self._letters = set()\n for key in self._dictionary:\n self._letters.update(key)\n"
] | class WordFrequency(object):
""" Store the `dictionary` as a word frequency list while allowing for
different methods to load the data and update over time """
__slots__ = [
"_dictionary",
"_total_words",
"_unique_words",
"_letters",
"_tokenizer",
]
def __init__(self, tokenizer=None):
self._dictionary = Counter()
self._total_words = 0
self._unique_words = 0
self._letters = set()
self._tokenizer = _parse_into_words
if tokenizer is not None:
self._tokenizer = tokenizer
def __contains__(self, key):
""" turn on contains """
return key.lower() in self._dictionary
def __getitem__(self, key):
""" turn on getitem """
return self._dictionary[key.lower()]
def pop(self, key, default=None):
""" Remove the key and return the associated value or default if not
found
Args:
key (str): The key to remove
default (obj): The value to return if key is not present """
return self._dictionary.pop(key.lower(), default)
@property
def dictionary(self):
""" Counter: A counting dictionary of all words in the corpus and the \
number of times each has been seen
Note:
Not settable """
return self._dictionary
@property
def total_words(self):
""" int: The sum of all word occurances in the word frequency \
dictionary
Note:
Not settable """
return self._total_words
@property
def unique_words(self):
""" int: The total number of unique words in the word frequency list
Note:
Not settable """
return self._unique_words
@property
def letters(self):
""" str: The listing of all letters found within the corpus
Note:
Not settable """
return self._letters
def tokenize(self, text):
""" Tokenize the provided string object into individual words
Args:
text (str): The string object to tokenize
Yields:
str: The next `word` in the tokenized string
Note:
This is the same as the `spellchecker.split_words()` """
for x in self._tokenizer(text):
yield x.lower()
def keys(self):
""" Iterator over the key of the dictionary
Yields:
str: The next key in the dictionary
Note:
This is the same as `spellchecker.words()` """
for key in self._dictionary.keys():
yield key
def words(self):
""" Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
Note:
This is the same as `spellchecker.keys()` """
for word in self._dictionary.keys():
yield word
def items(self):
""" Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
int: The number of instances in the dictionary
Note:
This is the same as `dict.items()` """
for word in self._dictionary.keys():
yield word, self._dictionary[word]
def load_dictionary(self, filename, encoding="utf-8"):
""" Load in a pre-built word frequency list
Args:
filename (str): The filepath to the json (optionally gzipped) \
file to be loaded
encoding (str): The encoding of the dictionary """
with load_file(filename, encoding) as data:
self._dictionary.update(json.loads(data.lower(), encoding=encoding))
self._update_dictionary()
def load_text_file(self, filename, encoding="utf-8", tokenizer=None):
""" Load in a text file from which to generate a word frequency list
Args:
filename (str): The filepath to the text file to be loaded
encoding (str): The encoding of the text file
tokenizer (function): The function to use to tokenize a string
"""
with load_file(filename, encoding=encoding) as data:
self.load_text(data, tokenizer)
def load_text(self, text, tokenizer=None):
""" Load text from which to generate a word frequency list
Args:
text (str): The text to be loaded
tokenizer (function): The function to use to tokenize a string
"""
if tokenizer:
words = [x.lower() for x in tokenizer(text)]
else:
words = self.tokenize(text)
self._dictionary.update(words)
self._update_dictionary()
def load_words(self, words):
""" Load a list of words from which to generate a word frequency list
Args:
words (list): The list of words to be loaded """
self._dictionary.update([word.lower() for word in words])
self._update_dictionary()
def add(self, word):
""" Add a word to the word frequency list
Args:
word (str): The word to add """
self.load_words([word])
def remove_words(self, words):
""" Remove a list of words from the word frequency list
Args:
words (list): The list of words to remove """
for word in words:
self._dictionary.pop(word.lower())
self._update_dictionary()
def remove(self, word):
""" Remove a word from the word frequency list
Args:
word (str): The word to remove """
self._dictionary.pop(word.lower())
self._update_dictionary()
def _update_dictionary(self):
""" Update the word frequency object """
self._total_words = sum(self._dictionary.values())
self._unique_words = len(self._dictionary.keys())
self._letters = set()
for key in self._dictionary:
self._letters.update(key)
|
barrust/pyspellchecker | spellchecker/spellchecker.py | WordFrequency._update_dictionary | python | def _update_dictionary(self):
self._total_words = sum(self._dictionary.values())
self._unique_words = len(self._dictionary.keys())
self._letters = set()
for key in self._dictionary:
self._letters.update(key) | Update the word frequency object | train | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/spellchecker.py#L442-L448 | null | class WordFrequency(object):
""" Store the `dictionary` as a word frequency list while allowing for
different methods to load the data and update over time """
__slots__ = [
"_dictionary",
"_total_words",
"_unique_words",
"_letters",
"_tokenizer",
]
def __init__(self, tokenizer=None):
self._dictionary = Counter()
self._total_words = 0
self._unique_words = 0
self._letters = set()
self._tokenizer = _parse_into_words
if tokenizer is not None:
self._tokenizer = tokenizer
def __contains__(self, key):
""" turn on contains """
return key.lower() in self._dictionary
def __getitem__(self, key):
""" turn on getitem """
return self._dictionary[key.lower()]
def pop(self, key, default=None):
""" Remove the key and return the associated value or default if not
found
Args:
key (str): The key to remove
default (obj): The value to return if key is not present """
return self._dictionary.pop(key.lower(), default)
@property
def dictionary(self):
""" Counter: A counting dictionary of all words in the corpus and the \
number of times each has been seen
Note:
Not settable """
return self._dictionary
@property
def total_words(self):
""" int: The sum of all word occurances in the word frequency \
dictionary
Note:
Not settable """
return self._total_words
@property
def unique_words(self):
""" int: The total number of unique words in the word frequency list
Note:
Not settable """
return self._unique_words
@property
def letters(self):
""" str: The listing of all letters found within the corpus
Note:
Not settable """
return self._letters
def tokenize(self, text):
""" Tokenize the provided string object into individual words
Args:
text (str): The string object to tokenize
Yields:
str: The next `word` in the tokenized string
Note:
This is the same as the `spellchecker.split_words()` """
for x in self._tokenizer(text):
yield x.lower()
def keys(self):
""" Iterator over the key of the dictionary
Yields:
str: The next key in the dictionary
Note:
This is the same as `spellchecker.words()` """
for key in self._dictionary.keys():
yield key
def words(self):
""" Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
Note:
This is the same as `spellchecker.keys()` """
for word in self._dictionary.keys():
yield word
def items(self):
""" Iterator over the words in the dictionary
Yields:
str: The next word in the dictionary
int: The number of instances in the dictionary
Note:
This is the same as `dict.items()` """
for word in self._dictionary.keys():
yield word, self._dictionary[word]
def load_dictionary(self, filename, encoding="utf-8"):
""" Load in a pre-built word frequency list
Args:
filename (str): The filepath to the json (optionally gzipped) \
file to be loaded
encoding (str): The encoding of the dictionary """
with load_file(filename, encoding) as data:
self._dictionary.update(json.loads(data.lower(), encoding=encoding))
self._update_dictionary()
def load_text_file(self, filename, encoding="utf-8", tokenizer=None):
""" Load in a text file from which to generate a word frequency list
Args:
filename (str): The filepath to the text file to be loaded
encoding (str): The encoding of the text file
tokenizer (function): The function to use to tokenize a string
"""
with load_file(filename, encoding=encoding) as data:
self.load_text(data, tokenizer)
def load_text(self, text, tokenizer=None):
""" Load text from which to generate a word frequency list
Args:
text (str): The text to be loaded
tokenizer (function): The function to use to tokenize a string
"""
if tokenizer:
words = [x.lower() for x in tokenizer(text)]
else:
words = self.tokenize(text)
self._dictionary.update(words)
self._update_dictionary()
def load_words(self, words):
""" Load a list of words from which to generate a word frequency list
Args:
words (list): The list of words to be loaded """
self._dictionary.update([word.lower() for word in words])
self._update_dictionary()
def add(self, word):
""" Add a word to the word frequency list
Args:
word (str): The word to add """
self.load_words([word])
def remove_words(self, words):
""" Remove a list of words from the word frequency list
Args:
words (list): The list of words to remove """
for word in words:
self._dictionary.pop(word.lower())
self._update_dictionary()
def remove(self, word):
""" Remove a word from the word frequency list
Args:
word (str): The word to remove """
self._dictionary.pop(word.lower())
self._update_dictionary()
def remove_by_threshold(self, threshold=5):
""" Remove all words at, or below, the provided threshold
Args:
threshold (int): The threshold at which a word is to be \
removed """
keys = [x for x in self._dictionary.keys()]
for key in keys:
if self._dictionary[key] <= threshold:
self._dictionary.pop(key)
self._update_dictionary()
|
barrust/pyspellchecker | spellchecker/utils.py | load_file | python | def load_file(filename, encoding):
try:
with gzip.open(filename, mode="rt") as fobj:
yield fobj.read()
except (OSError, IOError):
with OPEN(filename, mode="r", encoding=encoding) as fobj:
yield fobj.read() | Context manager to handle opening a gzip or text file correctly and
reading all the data
Args:
filename (str): The filename to open
encoding (str): The file encoding to use
Yields:
str: The string data from the file read | train | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/utils.py#L16-L31 | null | """ Additional utility functions """
import sys
import re
import gzip
import contextlib
if sys.version_info < (3, 0):
import io # python 2 text file encoding support
OPEN = io.open # hijack this
else:
OPEN = open
@contextlib.contextmanager
def write_file(filepath, encoding, gzipped, data):
""" Write the data to file either as a gzip file or text based on the
gzipped parameter
Args:
filepath (str): The filename to open
encoding (str): The file encoding to use
gzipped (bool): Whether the file should be gzipped or not
data (str): The data to be written out
"""
if gzipped:
with gzip.open(filepath, "wt") as fobj:
fobj.write(data)
else:
with OPEN(filepath, "w", encoding=encoding) as fobj:
if sys.version_info < (3, 0):
data = data.decode(encoding)
fobj.write(data)
def _parse_into_words(text):
""" Parse the text into words; currently removes punctuation
Args:
text (str): The text to split into words
"""
return re.findall(r"\w+", text.lower())
|
barrust/pyspellchecker | spellchecker/utils.py | write_file | python | def write_file(filepath, encoding, gzipped, data):
if gzipped:
with gzip.open(filepath, "wt") as fobj:
fobj.write(data)
else:
with OPEN(filepath, "w", encoding=encoding) as fobj:
if sys.version_info < (3, 0):
data = data.decode(encoding)
fobj.write(data) | Write the data to file either as a gzip file or text based on the
gzipped parameter
Args:
filepath (str): The filename to open
encoding (str): The file encoding to use
gzipped (bool): Whether the file should be gzipped or not
data (str): The data to be written out | train | https://github.com/barrust/pyspellchecker/blob/fa96024c0cdeba99e10e11060d5fd7aba796b271/spellchecker/utils.py#L34-L51 | null | """ Additional utility functions """
import sys
import re
import gzip
import contextlib
if sys.version_info < (3, 0):
import io # python 2 text file encoding support
OPEN = io.open # hijack this
else:
OPEN = open
@contextlib.contextmanager
def load_file(filename, encoding):
""" Context manager to handle opening a gzip or text file correctly and
reading all the data
Args:
filename (str): The filename to open
encoding (str): The file encoding to use
Yields:
str: The string data from the file read
"""
try:
with gzip.open(filename, mode="rt") as fobj:
yield fobj.read()
except (OSError, IOError):
with OPEN(filename, mode="r", encoding=encoding) as fobj:
yield fobj.read()
def _parse_into_words(text):
""" Parse the text into words; currently removes punctuation
Args:
text (str): The text to split into words
"""
return re.findall(r"\w+", text.lower())
|
Kautenja/nes-py | nes_py/_image_viewer.py | ImageViewer.open | python | def open(self):
self._window = Window(
caption=self.caption,
height=self.height,
width=self.width,
vsync=False,
resizable=True,
) | Open the window. | train | https://github.com/Kautenja/nes-py/blob/a113885198d418f38fcf24b8f79ac508975788c2/nes_py/_image_viewer.py#L40-L48 | null | class ImageViewer(object):
"""A simple class for viewing images using pyglet."""
def __init__(self, caption, height, width):
"""
Initialize a new image viewer.
Args:
caption (str): the caption/title for the window
height (int): the height of the window
width (int): the width of the window
Returns:
None
"""
self.caption = caption
self.height = height
self.width = width
self._window = None
def __repr__(self):
"""Return an executable string representing this object."""
template = '{}(caption={}, height={}, width={})'
return template.format(self.caption, self.height, self.width)
def __del__(self):
"""Close any open windows and delete this object."""
self.close()
@property
def is_open(self):
"""Return a boolean determining if this window is open."""
return self._window is not None
def show(self, frame):
"""
Show an array of pixels on the window.
Args:
frame (numpy.ndarray): the frame to show on the window
Returns:
None
"""
# check that the frame has the correct dimensions
if len(frame.shape) != 3:
raise ValueError('frame should have shape with only 3 dimensions')
# open the window if it isn't open already
if not self.is_open:
self.open()
# prepare the window for the next frame
self._window.clear()
self._window.switch_to()
self._window.dispatch_events()
# create an image data object
image = ImageData(
frame.shape[1],
frame.shape[0],
'RGB',
frame.tobytes(),
pitch=frame.shape[1]*-3
)
# send the image to the window
image.blit(0, 0, width=self._window.width, height=self._window.height)
self._window.flip()
def close(self):
"""Close the window."""
if self.is_open:
self._window.close()
self._window = None
|
Kautenja/nes-py | nes_py/_image_viewer.py | ImageViewer.show | python | def show(self, frame):
# check that the frame has the correct dimensions
if len(frame.shape) != 3:
raise ValueError('frame should have shape with only 3 dimensions')
# open the window if it isn't open already
if not self.is_open:
self.open()
# prepare the window for the next frame
self._window.clear()
self._window.switch_to()
self._window.dispatch_events()
# create an image data object
image = ImageData(
frame.shape[1],
frame.shape[0],
'RGB',
frame.tobytes(),
pitch=frame.shape[1]*-3
)
# send the image to the window
image.blit(0, 0, width=self._window.width, height=self._window.height)
self._window.flip() | Show an array of pixels on the window.
Args:
frame (numpy.ndarray): the frame to show on the window
Returns:
None | train | https://github.com/Kautenja/nes-py/blob/a113885198d418f38fcf24b8f79ac508975788c2/nes_py/_image_viewer.py#L50-L80 | [
"def open(self):\n \"\"\"Open the window.\"\"\"\n self._window = Window(\n caption=self.caption,\n height=self.height,\n width=self.width,\n vsync=False,\n resizable=True,\n )\n"
] | class ImageViewer(object):
"""A simple class for viewing images using pyglet."""
def __init__(self, caption, height, width):
"""
Initialize a new image viewer.
Args:
caption (str): the caption/title for the window
height (int): the height of the window
width (int): the width of the window
Returns:
None
"""
self.caption = caption
self.height = height
self.width = width
self._window = None
def __repr__(self):
"""Return an executable string representing this object."""
template = '{}(caption={}, height={}, width={})'
return template.format(self.caption, self.height, self.width)
def __del__(self):
"""Close any open windows and delete this object."""
self.close()
@property
def is_open(self):
"""Return a boolean determining if this window is open."""
return self._window is not None
def open(self):
"""Open the window."""
self._window = Window(
caption=self.caption,
height=self.height,
width=self.width,
vsync=False,
resizable=True,
)
def close(self):
"""Close the window."""
if self.is_open:
self._window.close()
self._window = None
|
Kautenja/nes-py | nes_py/app/play_human.py | display_arr | python | def display_arr(screen, arr, video_size, transpose):
# take the transpose if necessary
if transpose:
pyg_img = pygame.surfarray.make_surface(arr.swapaxes(0, 1))
else:
pyg_img = arr
# resize the image according to given image size
pyg_img = pygame.transform.scale(pyg_img, video_size)
# blit the image to the surface
screen.blit(pyg_img, (0, 0)) | Display an image to the pygame screen.
Args:
screen (pygame.Surface): the pygame surface to write frames to
arr (np.ndarray): numpy array representing a single frame of gameplay
video_size (tuple): the size to render the frame as
transpose (bool): whether to transpose the frame before displaying
Returns:
None | train | https://github.com/Kautenja/nes-py/blob/a113885198d418f38fcf24b8f79ac508975788c2/nes_py/app/play_human.py#L6-L28 | null | """A method to play gym environments using human IO inputs."""
import gym
import pygame
def play(env, transpose=True, fps=30, nop_=0):
"""Play the game using the keyboard as a human.
Args:
env (gym.Env): the environment to use for playing
transpose (bool): whether to transpose frame before viewing them
fps (int): number of steps of the environment to execute every second
nop_ (any): the object to use as a null op action for the environment
Returns:
None
"""
# ensure the observation space is a box of pixels
assert isinstance(env.observation_space, gym.spaces.box.Box)
# ensure the observation space is either B&W pixels or RGB Pixels
obs_s = env.observation_space
is_bw = len(obs_s.shape) == 2
is_rgb = len(obs_s.shape) == 3 and obs_s.shape[2] in [1, 3]
assert is_bw or is_rgb
# get the mapping of keyboard keys to actions in the environment
if hasattr(env, 'get_keys_to_action'):
keys_to_action = env.get_keys_to_action()
# get the mapping of keyboard keys to actions in the unwrapped environment
elif hasattr(env.unwrapped, 'get_keys_to_action'):
keys_to_action = env.unwrapped.get_keys_to_action()
else:
raise ValueError('env has no get_keys_to_action method')
relevant_keys = set(sum(map(list, keys_to_action.keys()), []))
# determine the size of the video in pixels
video_size = env.observation_space.shape[0], env.observation_space.shape[1]
if transpose:
video_size = tuple(reversed(video_size))
# generate variables to determine the running state of the game
pressed_keys = []
running = True
env_done = True
# setup the screen using pygame
flags = pygame.RESIZABLE | pygame.HWSURFACE | pygame.DOUBLEBUF
screen = pygame.display.set_mode(video_size, flags)
pygame.event.set_blocked(pygame.MOUSEMOTION)
# set the caption for the pygame window. if the env has a spec use its id
if env.spec is not None:
pygame.display.set_caption(env.spec.id)
# otherwise just use the default nes-py caption
else:
pygame.display.set_caption('nes-py')
# start a clock for limiting the frame rate to the given FPS
clock = pygame.time.Clock()
# start the main game loop
while running:
# reset if the environment is done
if env_done:
env_done = False
obs = env.reset()
# otherwise take a normal step
else:
# unwrap the action based on pressed relevant keys
action = keys_to_action.get(tuple(sorted(pressed_keys)), nop_)
obs, rew, env_done, info = env.step(action)
# make sure the observation exists
if obs is not None:
# if the observation is just height and width (B&W)
if len(obs.shape) == 2:
# add a dummy channel for pygame display
obs = obs[:, :, None]
# if the observation is single channel (B&W)
if obs.shape[2] == 1:
# repeat the single channel 3 times for RGB encoding of B&W
obs = obs.repeat(3, axis=2)
# display the observation on the pygame screen
display_arr(screen, obs, video_size, transpose)
# process keyboard events
for event in pygame.event.get():
# handle a key being pressed
if event.type == pygame.KEYDOWN:
# make sure the key is in the relevant key list
if event.key in relevant_keys:
# add the key to pressed keys
pressed_keys.append(event.key)
# ASCII code 27 is the "escape" key
elif event.key == 27:
running = False
# handle the backup and reset functions
elif event.key == ord('e'):
env.unwrapped._backup()
elif event.key == ord('r'):
env.unwrapped._restore()
# handle a key being released
elif event.type == pygame.KEYUP:
# make sure the key is in the relevant key list
if event.key in relevant_keys:
# remove the key from the pressed keys
pressed_keys.remove(event.key)
# if the event is quit, set running to False
elif event.type == pygame.QUIT:
running = False
# flip the pygame screen
pygame.display.flip()
# throttle to maintain the framerate
clock.tick(fps)
# quite the pygame setup
pygame.quit()
def play_human(env):
"""
Play the environment using keyboard as a human.
Args:
env (gym.Env): the initialized gym environment to play
Returns:
None
"""
# play the game and catch a potential keyboard interrupt
try:
play(env, fps=env.metadata['video.frames_per_second'])
except KeyboardInterrupt:
pass
# reset and close the environment
env.close()
# explicitly define the outward facing API of the module
__all__ = [play_human.__name__]
|
Kautenja/nes-py | nes_py/app/play_human.py | play | python | def play(env, transpose=True, fps=30, nop_=0):
# ensure the observation space is a box of pixels
assert isinstance(env.observation_space, gym.spaces.box.Box)
# ensure the observation space is either B&W pixels or RGB Pixels
obs_s = env.observation_space
is_bw = len(obs_s.shape) == 2
is_rgb = len(obs_s.shape) == 3 and obs_s.shape[2] in [1, 3]
assert is_bw or is_rgb
# get the mapping of keyboard keys to actions in the environment
if hasattr(env, 'get_keys_to_action'):
keys_to_action = env.get_keys_to_action()
# get the mapping of keyboard keys to actions in the unwrapped environment
elif hasattr(env.unwrapped, 'get_keys_to_action'):
keys_to_action = env.unwrapped.get_keys_to_action()
else:
raise ValueError('env has no get_keys_to_action method')
relevant_keys = set(sum(map(list, keys_to_action.keys()), []))
# determine the size of the video in pixels
video_size = env.observation_space.shape[0], env.observation_space.shape[1]
if transpose:
video_size = tuple(reversed(video_size))
# generate variables to determine the running state of the game
pressed_keys = []
running = True
env_done = True
# setup the screen using pygame
flags = pygame.RESIZABLE | pygame.HWSURFACE | pygame.DOUBLEBUF
screen = pygame.display.set_mode(video_size, flags)
pygame.event.set_blocked(pygame.MOUSEMOTION)
# set the caption for the pygame window. if the env has a spec use its id
if env.spec is not None:
pygame.display.set_caption(env.spec.id)
# otherwise just use the default nes-py caption
else:
pygame.display.set_caption('nes-py')
# start a clock for limiting the frame rate to the given FPS
clock = pygame.time.Clock()
# start the main game loop
while running:
# reset if the environment is done
if env_done:
env_done = False
obs = env.reset()
# otherwise take a normal step
else:
# unwrap the action based on pressed relevant keys
action = keys_to_action.get(tuple(sorted(pressed_keys)), nop_)
obs, rew, env_done, info = env.step(action)
# make sure the observation exists
if obs is not None:
# if the observation is just height and width (B&W)
if len(obs.shape) == 2:
# add a dummy channel for pygame display
obs = obs[:, :, None]
# if the observation is single channel (B&W)
if obs.shape[2] == 1:
# repeat the single channel 3 times for RGB encoding of B&W
obs = obs.repeat(3, axis=2)
# display the observation on the pygame screen
display_arr(screen, obs, video_size, transpose)
# process keyboard events
for event in pygame.event.get():
# handle a key being pressed
if event.type == pygame.KEYDOWN:
# make sure the key is in the relevant key list
if event.key in relevant_keys:
# add the key to pressed keys
pressed_keys.append(event.key)
# ASCII code 27 is the "escape" key
elif event.key == 27:
running = False
# handle the backup and reset functions
elif event.key == ord('e'):
env.unwrapped._backup()
elif event.key == ord('r'):
env.unwrapped._restore()
# handle a key being released
elif event.type == pygame.KEYUP:
# make sure the key is in the relevant key list
if event.key in relevant_keys:
# remove the key from the pressed keys
pressed_keys.remove(event.key)
# if the event is quit, set running to False
elif event.type == pygame.QUIT:
running = False
# flip the pygame screen
pygame.display.flip()
# throttle to maintain the framerate
clock.tick(fps)
# quite the pygame setup
pygame.quit() | Play the game using the keyboard as a human.
Args:
env (gym.Env): the environment to use for playing
transpose (bool): whether to transpose frame before viewing them
fps (int): number of steps of the environment to execute every second
nop_ (any): the object to use as a null op action for the environment
Returns:
None | train | https://github.com/Kautenja/nes-py/blob/a113885198d418f38fcf24b8f79ac508975788c2/nes_py/app/play_human.py#L31-L135 | [
"def display_arr(screen, arr, video_size, transpose):\n \"\"\"\n Display an image to the pygame screen.\n\n Args:\n screen (pygame.Surface): the pygame surface to write frames to\n arr (np.ndarray): numpy array representing a single frame of gameplay\n video_size (tuple): the size to render the frame as\n transpose (bool): whether to transpose the frame before displaying\n\n Returns:\n None\n\n \"\"\"\n # take the transpose if necessary\n if transpose:\n pyg_img = pygame.surfarray.make_surface(arr.swapaxes(0, 1))\n else:\n pyg_img = arr\n # resize the image according to given image size\n pyg_img = pygame.transform.scale(pyg_img, video_size)\n # blit the image to the surface\n screen.blit(pyg_img, (0, 0))\n"
] | """A method to play gym environments using human IO inputs."""
import gym
import pygame
def display_arr(screen, arr, video_size, transpose):
"""
Display an image to the pygame screen.
Args:
screen (pygame.Surface): the pygame surface to write frames to
arr (np.ndarray): numpy array representing a single frame of gameplay
video_size (tuple): the size to render the frame as
transpose (bool): whether to transpose the frame before displaying
Returns:
None
"""
# take the transpose if necessary
if transpose:
pyg_img = pygame.surfarray.make_surface(arr.swapaxes(0, 1))
else:
pyg_img = arr
# resize the image according to given image size
pyg_img = pygame.transform.scale(pyg_img, video_size)
# blit the image to the surface
screen.blit(pyg_img, (0, 0))
def play_human(env):
"""
Play the environment using keyboard as a human.
Args:
env (gym.Env): the initialized gym environment to play
Returns:
None
"""
# play the game and catch a potential keyboard interrupt
try:
play(env, fps=env.metadata['video.frames_per_second'])
except KeyboardInterrupt:
pass
# reset and close the environment
env.close()
# explicitly define the outward facing API of the module
__all__ = [play_human.__name__]
|
Kautenja/nes-py | nes_py/app/play_human.py | play_human | python | def play_human(env):
# play the game and catch a potential keyboard interrupt
try:
play(env, fps=env.metadata['video.frames_per_second'])
except KeyboardInterrupt:
pass
# reset and close the environment
env.close() | Play the environment using keyboard as a human.
Args:
env (gym.Env): the initialized gym environment to play
Returns:
None | train | https://github.com/Kautenja/nes-py/blob/a113885198d418f38fcf24b8f79ac508975788c2/nes_py/app/play_human.py#L138-L155 | [
"def play(env, transpose=True, fps=30, nop_=0):\n \"\"\"Play the game using the keyboard as a human.\n\n Args:\n env (gym.Env): the environment to use for playing\n transpose (bool): whether to transpose frame before viewing them\n fps (int): number of steps of the environment to execute every second\n nop_ (any): the object to use as a null op action for the environment\n\n Returns:\n None\n\n \"\"\"\n # ensure the observation space is a box of pixels\n assert isinstance(env.observation_space, gym.spaces.box.Box)\n # ensure the observation space is either B&W pixels or RGB Pixels\n obs_s = env.observation_space\n is_bw = len(obs_s.shape) == 2\n is_rgb = len(obs_s.shape) == 3 and obs_s.shape[2] in [1, 3]\n assert is_bw or is_rgb\n # get the mapping of keyboard keys to actions in the environment\n if hasattr(env, 'get_keys_to_action'):\n keys_to_action = env.get_keys_to_action()\n # get the mapping of keyboard keys to actions in the unwrapped environment\n elif hasattr(env.unwrapped, 'get_keys_to_action'):\n keys_to_action = env.unwrapped.get_keys_to_action()\n else:\n raise ValueError('env has no get_keys_to_action method')\n relevant_keys = set(sum(map(list, keys_to_action.keys()), []))\n # determine the size of the video in pixels\n video_size = env.observation_space.shape[0], env.observation_space.shape[1]\n if transpose:\n video_size = tuple(reversed(video_size))\n # generate variables to determine the running state of the game\n pressed_keys = []\n running = True\n env_done = True\n # setup the screen using pygame\n flags = pygame.RESIZABLE | pygame.HWSURFACE | pygame.DOUBLEBUF\n screen = pygame.display.set_mode(video_size, flags)\n pygame.event.set_blocked(pygame.MOUSEMOTION)\n # set the caption for the pygame window. if the env has a spec use its id\n if env.spec is not None:\n pygame.display.set_caption(env.spec.id)\n # otherwise just use the default nes-py caption\n else:\n pygame.display.set_caption('nes-py')\n # start a clock for limiting the frame rate to the given FPS\n clock = pygame.time.Clock()\n # start the main game loop\n while running:\n # reset if the environment is done\n if env_done:\n env_done = False\n obs = env.reset()\n # otherwise take a normal step\n else:\n # unwrap the action based on pressed relevant keys\n action = keys_to_action.get(tuple(sorted(pressed_keys)), nop_)\n obs, rew, env_done, info = env.step(action)\n # make sure the observation exists\n if obs is not None:\n # if the observation is just height and width (B&W)\n if len(obs.shape) == 2:\n # add a dummy channel for pygame display\n obs = obs[:, :, None]\n # if the observation is single channel (B&W)\n if obs.shape[2] == 1:\n # repeat the single channel 3 times for RGB encoding of B&W\n obs = obs.repeat(3, axis=2)\n # display the observation on the pygame screen\n display_arr(screen, obs, video_size, transpose)\n\n # process keyboard events\n for event in pygame.event.get():\n # handle a key being pressed\n if event.type == pygame.KEYDOWN:\n # make sure the key is in the relevant key list\n if event.key in relevant_keys:\n # add the key to pressed keys\n pressed_keys.append(event.key)\n # ASCII code 27 is the \"escape\" key\n elif event.key == 27:\n running = False\n # handle the backup and reset functions\n elif event.key == ord('e'):\n env.unwrapped._backup()\n elif event.key == ord('r'):\n env.unwrapped._restore()\n # handle a key being released\n elif event.type == pygame.KEYUP:\n # make sure the key is in the relevant key list\n if event.key in relevant_keys:\n # remove the key from the pressed keys\n pressed_keys.remove(event.key)\n # if the event is quit, set running to False\n elif event.type == pygame.QUIT:\n running = False\n\n # flip the pygame screen\n pygame.display.flip()\n # throttle to maintain the framerate\n clock.tick(fps)\n # quite the pygame setup\n pygame.quit()\n"
] | """A method to play gym environments using human IO inputs."""
import gym
import pygame
def display_arr(screen, arr, video_size, transpose):
"""
Display an image to the pygame screen.
Args:
screen (pygame.Surface): the pygame surface to write frames to
arr (np.ndarray): numpy array representing a single frame of gameplay
video_size (tuple): the size to render the frame as
transpose (bool): whether to transpose the frame before displaying
Returns:
None
"""
# take the transpose if necessary
if transpose:
pyg_img = pygame.surfarray.make_surface(arr.swapaxes(0, 1))
else:
pyg_img = arr
# resize the image according to given image size
pyg_img = pygame.transform.scale(pyg_img, video_size)
# blit the image to the surface
screen.blit(pyg_img, (0, 0))
def play(env, transpose=True, fps=30, nop_=0):
"""Play the game using the keyboard as a human.
Args:
env (gym.Env): the environment to use for playing
transpose (bool): whether to transpose frame before viewing them
fps (int): number of steps of the environment to execute every second
nop_ (any): the object to use as a null op action for the environment
Returns:
None
"""
# ensure the observation space is a box of pixels
assert isinstance(env.observation_space, gym.spaces.box.Box)
# ensure the observation space is either B&W pixels or RGB Pixels
obs_s = env.observation_space
is_bw = len(obs_s.shape) == 2
is_rgb = len(obs_s.shape) == 3 and obs_s.shape[2] in [1, 3]
assert is_bw or is_rgb
# get the mapping of keyboard keys to actions in the environment
if hasattr(env, 'get_keys_to_action'):
keys_to_action = env.get_keys_to_action()
# get the mapping of keyboard keys to actions in the unwrapped environment
elif hasattr(env.unwrapped, 'get_keys_to_action'):
keys_to_action = env.unwrapped.get_keys_to_action()
else:
raise ValueError('env has no get_keys_to_action method')
relevant_keys = set(sum(map(list, keys_to_action.keys()), []))
# determine the size of the video in pixels
video_size = env.observation_space.shape[0], env.observation_space.shape[1]
if transpose:
video_size = tuple(reversed(video_size))
# generate variables to determine the running state of the game
pressed_keys = []
running = True
env_done = True
# setup the screen using pygame
flags = pygame.RESIZABLE | pygame.HWSURFACE | pygame.DOUBLEBUF
screen = pygame.display.set_mode(video_size, flags)
pygame.event.set_blocked(pygame.MOUSEMOTION)
# set the caption for the pygame window. if the env has a spec use its id
if env.spec is not None:
pygame.display.set_caption(env.spec.id)
# otherwise just use the default nes-py caption
else:
pygame.display.set_caption('nes-py')
# start a clock for limiting the frame rate to the given FPS
clock = pygame.time.Clock()
# start the main game loop
while running:
# reset if the environment is done
if env_done:
env_done = False
obs = env.reset()
# otherwise take a normal step
else:
# unwrap the action based on pressed relevant keys
action = keys_to_action.get(tuple(sorted(pressed_keys)), nop_)
obs, rew, env_done, info = env.step(action)
# make sure the observation exists
if obs is not None:
# if the observation is just height and width (B&W)
if len(obs.shape) == 2:
# add a dummy channel for pygame display
obs = obs[:, :, None]
# if the observation is single channel (B&W)
if obs.shape[2] == 1:
# repeat the single channel 3 times for RGB encoding of B&W
obs = obs.repeat(3, axis=2)
# display the observation on the pygame screen
display_arr(screen, obs, video_size, transpose)
# process keyboard events
for event in pygame.event.get():
# handle a key being pressed
if event.type == pygame.KEYDOWN:
# make sure the key is in the relevant key list
if event.key in relevant_keys:
# add the key to pressed keys
pressed_keys.append(event.key)
# ASCII code 27 is the "escape" key
elif event.key == 27:
running = False
# handle the backup and reset functions
elif event.key == ord('e'):
env.unwrapped._backup()
elif event.key == ord('r'):
env.unwrapped._restore()
# handle a key being released
elif event.type == pygame.KEYUP:
# make sure the key is in the relevant key list
if event.key in relevant_keys:
# remove the key from the pressed keys
pressed_keys.remove(event.key)
# if the event is quit, set running to False
elif event.type == pygame.QUIT:
running = False
# flip the pygame screen
pygame.display.flip()
# throttle to maintain the framerate
clock.tick(fps)
# quite the pygame setup
pygame.quit()
# explicitly define the outward facing API of the module
__all__ = [play_human.__name__]
|
Kautenja/nes-py | nes_py/wrappers/binary_to_discrete_space_env.py | BinarySpaceToDiscreteSpaceEnv.get_keys_to_action | python | def get_keys_to_action(self):
# get the old mapping of keys to actions
old_keys_to_action = self.env.unwrapped.get_keys_to_action()
# invert the keys to action mapping to lookup key combos by action
action_to_keys = {v: k for k, v in old_keys_to_action.items()}
# create a new mapping of keys to actions
keys_to_action = {}
# iterate over the actions and their byte values in this mapper
for action, byte in self._action_map.items():
# get the keys to press for the action
keys = action_to_keys[byte]
# set the keys value in the dictionary to the current discrete act
keys_to_action[keys] = action
return keys_to_action | Return the dictionary of keyboard keys to actions. | train | https://github.com/Kautenja/nes-py/blob/a113885198d418f38fcf24b8f79ac508975788c2/nes_py/wrappers/binary_to_discrete_space_env.py#L73-L88 | null | class BinarySpaceToDiscreteSpaceEnv(gym.Wrapper):
"""An environment wrapper to convert binary to discrete action space."""
# a mapping of buttons to binary values
_button_map = {
'right': 0b10000000,
'left': 0b01000000,
'down': 0b00100000,
'up': 0b00010000,
'start': 0b00001000,
'select': 0b00000100,
'B': 0b00000010,
'A': 0b00000001,
'NOOP': 0b00000000,
}
def __init__(self, env, actions):
"""
Initialize a new binary to discrete action space wrapper.
Args:
env (gym.Env): the environment to wrap
actions (list): an ordered list of actions (as lists of buttons).
The index of each button list is its discrete coded value
Returns:
None
"""
super(BinarySpaceToDiscreteSpaceEnv, self).__init__(env)
# create the new action space
self.action_space = gym.spaces.Discrete(len(actions))
# create the action map from the list of discrete actions
self._action_map = {}
self._action_meanings = {}
# iterate over all the actions (as button lists)
for action, button_list in enumerate(actions):
# the value of this action's bitmap
byte_action = 0
# iterate over the buttons in this button list
for button in button_list:
byte_action |= self._button_map[button]
# set this action maps value to the byte action value
self._action_map[action] = byte_action
self._action_meanings[action] = ' '.join(button_list)
def step(self, action):
"""
Take a step using the given action.
Args:
action (int): the discrete action to perform
Returns:
a tuple of:
- (numpy.ndarray) the state as a result of the action
- (float) the reward achieved by taking the action
- (bool) a flag denoting whether the episode has ended
- (dict) a dictionary of extra information
"""
# take the step and record the output
return self.env.step(self._action_map[action])
def reset(self):
"""Reset the environment and return the initial observation."""
return self.env.reset()
def get_action_meanings(self):
"""Return a list of actions meanings."""
actions = sorted(self._action_meanings.keys())
return [self._action_meanings[action] for action in actions]
|
Kautenja/nes-py | nes_py/wrappers/binary_to_discrete_space_env.py | BinarySpaceToDiscreteSpaceEnv.get_action_meanings | python | def get_action_meanings(self):
actions = sorted(self._action_meanings.keys())
return [self._action_meanings[action] for action in actions] | Return a list of actions meanings. | train | https://github.com/Kautenja/nes-py/blob/a113885198d418f38fcf24b8f79ac508975788c2/nes_py/wrappers/binary_to_discrete_space_env.py#L90-L93 | null | class BinarySpaceToDiscreteSpaceEnv(gym.Wrapper):
"""An environment wrapper to convert binary to discrete action space."""
# a mapping of buttons to binary values
_button_map = {
'right': 0b10000000,
'left': 0b01000000,
'down': 0b00100000,
'up': 0b00010000,
'start': 0b00001000,
'select': 0b00000100,
'B': 0b00000010,
'A': 0b00000001,
'NOOP': 0b00000000,
}
def __init__(self, env, actions):
"""
Initialize a new binary to discrete action space wrapper.
Args:
env (gym.Env): the environment to wrap
actions (list): an ordered list of actions (as lists of buttons).
The index of each button list is its discrete coded value
Returns:
None
"""
super(BinarySpaceToDiscreteSpaceEnv, self).__init__(env)
# create the new action space
self.action_space = gym.spaces.Discrete(len(actions))
# create the action map from the list of discrete actions
self._action_map = {}
self._action_meanings = {}
# iterate over all the actions (as button lists)
for action, button_list in enumerate(actions):
# the value of this action's bitmap
byte_action = 0
# iterate over the buttons in this button list
for button in button_list:
byte_action |= self._button_map[button]
# set this action maps value to the byte action value
self._action_map[action] = byte_action
self._action_meanings[action] = ' '.join(button_list)
def step(self, action):
"""
Take a step using the given action.
Args:
action (int): the discrete action to perform
Returns:
a tuple of:
- (numpy.ndarray) the state as a result of the action
- (float) the reward achieved by taking the action
- (bool) a flag denoting whether the episode has ended
- (dict) a dictionary of extra information
"""
# take the step and record the output
return self.env.step(self._action_map[action])
def reset(self):
"""Reset the environment and return the initial observation."""
return self.env.reset()
def get_keys_to_action(self):
"""Return the dictionary of keyboard keys to actions."""
# get the old mapping of keys to actions
old_keys_to_action = self.env.unwrapped.get_keys_to_action()
# invert the keys to action mapping to lookup key combos by action
action_to_keys = {v: k for k, v in old_keys_to_action.items()}
# create a new mapping of keys to actions
keys_to_action = {}
# iterate over the actions and their byte values in this mapper
for action, byte in self._action_map.items():
# get the keys to press for the action
keys = action_to_keys[byte]
# set the keys value in the dictionary to the current discrete act
keys_to_action[keys] = action
return keys_to_action
|
Kautenja/nes-py | nes_py/_rom.py | ROM.prg_rom | python | def prg_rom(self):
try:
return self.raw_data[self.prg_rom_start:self.prg_rom_stop]
except IndexError:
raise ValueError('failed to read PRG-ROM on ROM.') | Return the PRG ROM of the ROM file. | train | https://github.com/Kautenja/nes-py/blob/a113885198d418f38fcf24b8f79ac508975788c2/nes_py/_rom.py#L201-L206 | null | class ROM(object):
"""An abstraction of the NES Read-Only Memory (ROM)."""
# the magic bytes expected at the first four bytes of the header.
# It spells "NES<END>"
_MAGIC = np.array([0x4E, 0x45, 0x53, 0x1A])
def __init__(self, rom_path):
"""
Initialize a new ROM.
Args:
rom_path (str): the path to the ROM file
Returns:
None
"""
# make sure the rom path is a string
if not isinstance(rom_path, str):
raise TypeError('rom_path must be of type: str.')
# make sure the rom path exists
if not os.path.exists(rom_path):
msg = 'rom_path points to non-existent file: {}.'.format(rom_path)
raise ValueError(msg)
# read the binary data in the .nes ROM file
self.raw_data = np.fromfile(rom_path, dtype='uint8')
# ensure the first 4 bytes are 0x4E45531A (NES<EOF>)
if not np.array_equal(self._magic, self._MAGIC):
raise ValueError('ROM missing magic number in header.')
if self._zero_fill != 0:
raise ValueError("ROM header zero fill bytes are not zero.")
#
# MARK: Header
#
@property
def header(self):
"""Return the header of the ROM file as bytes."""
return self.raw_data[:16]
@property
def _magic(self):
"""Return the magic bytes in the first 4 bytes."""
return self.header[:4]
@property
def prg_rom_size(self):
"""Return the size of the PRG ROM in KB."""
return 16 * self.header[4]
@property
def chr_rom_size(self):
"""Return the size of the CHR ROM in KB."""
return 8 * self.header[5]
@property
def flags_6(self):
"""Return the flags at the 6th byte of the header."""
return '{:08b}'.format(self.header[6])
@property
def flags_7(self):
"""Return the flags at the 7th byte of the header."""
return '{:08b}'.format(self.header[7])
@property
def prg_ram_size(self):
"""Return the size of the PRG RAM in KB."""
size = self.header[8]
# size becomes 8 when it's zero for compatibility
if size == 0:
size = 1
return 8 * size
@property
def flags_9(self):
"""Return the flags at the 9th byte of the header."""
return '{:08b}'.format(self.header[9])
@property
def flags_10(self):
"""
Return the flags at the 10th byte of the header.
Notes:
- these flags are not part of official specification.
- ignored in this emulator
"""
return '{:08b}'.format(self.header[10])
@property
def _zero_fill(self):
"""Return the zero fill bytes at the end of the header."""
return self.header[11:].sum()
#
# MARK: Header Flags
#
@property
def mapper(self):
"""Return the mapper number this ROM uses."""
# the high nibble is in flags 7, the low nibble is in flags 6
return int(self.flags_7[:4] + self.flags_6[:4], 2)
@property
def is_ignore_mirroring(self):
"""Return a boolean determining if the ROM ignores mirroring."""
return bool(int(self.flags_6[4]))
@property
def has_trainer(self):
"""Return a boolean determining if the ROM has a trainer block."""
return bool(int(self.flags_6[5]))
@property
def has_battery_backed_ram(self):
"""Return a boolean determining if the ROM has a battery-backed RAM."""
return bool(int(self.flags_6[6]))
@property
def is_vertical_mirroring(self):
"""Return the mirroring mode this ROM uses."""
return bool(int(self.flags_6[7]))
@property
def has_play_choice_10(self):
"""
Return whether this cartridge uses PlayChoice-10.
Note:
- Play-Choice 10 uses different color palettes for a different PPU
- ignored in this emulator
"""
return bool(int(self.flags_7[6]))
@property
def has_vs_unisystem(self):
"""
Return whether this cartridge has VS Uni-system.
Note:
VS Uni-system is for ROMs that have a coin slot (Arcades).
- ignored in this emulator
"""
return bool(int(self.flags_7[7]))
@property
def is_pal(self):
"""Return the TV system this ROM supports."""
return bool(int(self.flags_9[7]))
#
# MARK: ROM
#
@property
def trainer_rom_start(self):
"""The inclusive starting index of the trainer ROM."""
return 16
@property
def trainer_rom_stop(self):
"""The exclusive stopping index of the trainer ROM."""
if self.has_trainer:
return 16 + 512
else:
return 16
@property
def trainer_rom(self):
"""Return the trainer ROM of the ROM file."""
return self.raw_data[self.trainer_rom_start:self.trainer_rom_stop]
@property
def prg_rom_start(self):
"""The inclusive starting index of the PRG ROM."""
return self.trainer_rom_stop
@property
def prg_rom_stop(self):
"""The exclusive stopping index of the PRG ROM."""
return self.prg_rom_start + self.prg_rom_size * 2**10
@property
@property
def chr_rom_start(self):
"""The inclusive starting index of the CHR ROM."""
return self.prg_rom_stop
@property
def chr_rom_stop(self):
"""The exclusive stopping index of the CHR ROM."""
return self.chr_rom_start + self.chr_rom_size * 2**10
@property
def chr_rom(self):
"""Return the CHR ROM of the ROM file."""
try:
return self.raw_data[self.chr_rom_start:self.chr_rom_stop]
except IndexError:
raise ValueError('failed to read CHR-ROM on ROM.')
|
Kautenja/nes-py | nes_py/_rom.py | ROM.chr_rom | python | def chr_rom(self):
try:
return self.raw_data[self.chr_rom_start:self.chr_rom_stop]
except IndexError:
raise ValueError('failed to read CHR-ROM on ROM.') | Return the CHR ROM of the ROM file. | train | https://github.com/Kautenja/nes-py/blob/a113885198d418f38fcf24b8f79ac508975788c2/nes_py/_rom.py#L219-L224 | null | class ROM(object):
"""An abstraction of the NES Read-Only Memory (ROM)."""
# the magic bytes expected at the first four bytes of the header.
# It spells "NES<END>"
_MAGIC = np.array([0x4E, 0x45, 0x53, 0x1A])
def __init__(self, rom_path):
"""
Initialize a new ROM.
Args:
rom_path (str): the path to the ROM file
Returns:
None
"""
# make sure the rom path is a string
if not isinstance(rom_path, str):
raise TypeError('rom_path must be of type: str.')
# make sure the rom path exists
if not os.path.exists(rom_path):
msg = 'rom_path points to non-existent file: {}.'.format(rom_path)
raise ValueError(msg)
# read the binary data in the .nes ROM file
self.raw_data = np.fromfile(rom_path, dtype='uint8')
# ensure the first 4 bytes are 0x4E45531A (NES<EOF>)
if not np.array_equal(self._magic, self._MAGIC):
raise ValueError('ROM missing magic number in header.')
if self._zero_fill != 0:
raise ValueError("ROM header zero fill bytes are not zero.")
#
# MARK: Header
#
@property
def header(self):
"""Return the header of the ROM file as bytes."""
return self.raw_data[:16]
@property
def _magic(self):
"""Return the magic bytes in the first 4 bytes."""
return self.header[:4]
@property
def prg_rom_size(self):
"""Return the size of the PRG ROM in KB."""
return 16 * self.header[4]
@property
def chr_rom_size(self):
"""Return the size of the CHR ROM in KB."""
return 8 * self.header[5]
@property
def flags_6(self):
"""Return the flags at the 6th byte of the header."""
return '{:08b}'.format(self.header[6])
@property
def flags_7(self):
"""Return the flags at the 7th byte of the header."""
return '{:08b}'.format(self.header[7])
@property
def prg_ram_size(self):
"""Return the size of the PRG RAM in KB."""
size = self.header[8]
# size becomes 8 when it's zero for compatibility
if size == 0:
size = 1
return 8 * size
@property
def flags_9(self):
"""Return the flags at the 9th byte of the header."""
return '{:08b}'.format(self.header[9])
@property
def flags_10(self):
"""
Return the flags at the 10th byte of the header.
Notes:
- these flags are not part of official specification.
- ignored in this emulator
"""
return '{:08b}'.format(self.header[10])
@property
def _zero_fill(self):
"""Return the zero fill bytes at the end of the header."""
return self.header[11:].sum()
#
# MARK: Header Flags
#
@property
def mapper(self):
"""Return the mapper number this ROM uses."""
# the high nibble is in flags 7, the low nibble is in flags 6
return int(self.flags_7[:4] + self.flags_6[:4], 2)
@property
def is_ignore_mirroring(self):
"""Return a boolean determining if the ROM ignores mirroring."""
return bool(int(self.flags_6[4]))
@property
def has_trainer(self):
"""Return a boolean determining if the ROM has a trainer block."""
return bool(int(self.flags_6[5]))
@property
def has_battery_backed_ram(self):
"""Return a boolean determining if the ROM has a battery-backed RAM."""
return bool(int(self.flags_6[6]))
@property
def is_vertical_mirroring(self):
"""Return the mirroring mode this ROM uses."""
return bool(int(self.flags_6[7]))
@property
def has_play_choice_10(self):
"""
Return whether this cartridge uses PlayChoice-10.
Note:
- Play-Choice 10 uses different color palettes for a different PPU
- ignored in this emulator
"""
return bool(int(self.flags_7[6]))
@property
def has_vs_unisystem(self):
"""
Return whether this cartridge has VS Uni-system.
Note:
VS Uni-system is for ROMs that have a coin slot (Arcades).
- ignored in this emulator
"""
return bool(int(self.flags_7[7]))
@property
def is_pal(self):
"""Return the TV system this ROM supports."""
return bool(int(self.flags_9[7]))
#
# MARK: ROM
#
@property
def trainer_rom_start(self):
"""The inclusive starting index of the trainer ROM."""
return 16
@property
def trainer_rom_stop(self):
"""The exclusive stopping index of the trainer ROM."""
if self.has_trainer:
return 16 + 512
else:
return 16
@property
def trainer_rom(self):
"""Return the trainer ROM of the ROM file."""
return self.raw_data[self.trainer_rom_start:self.trainer_rom_stop]
@property
def prg_rom_start(self):
"""The inclusive starting index of the PRG ROM."""
return self.trainer_rom_stop
@property
def prg_rom_stop(self):
"""The exclusive stopping index of the PRG ROM."""
return self.prg_rom_start + self.prg_rom_size * 2**10
@property
def prg_rom(self):
"""Return the PRG ROM of the ROM file."""
try:
return self.raw_data[self.prg_rom_start:self.prg_rom_stop]
except IndexError:
raise ValueError('failed to read PRG-ROM on ROM.')
@property
def chr_rom_start(self):
"""The inclusive starting index of the CHR ROM."""
return self.prg_rom_stop
@property
def chr_rom_stop(self):
"""The exclusive stopping index of the CHR ROM."""
return self.chr_rom_start + self.chr_rom_size * 2**10
@property
|
Kautenja/nes-py | nes_py/nes_env.py | NESEnv._screen_buffer | python | def _screen_buffer(self):
# get the address of the screen
address = _LIB.Screen(self._env)
# create a buffer from the contents of the address location
buffer_ = ctypes.cast(address, ctypes.POINTER(SCREEN_TENSOR)).contents
# create a NumPy array from the buffer
screen = np.frombuffer(buffer_, dtype='uint8')
# reshape the screen from a column vector to a tensor
screen = screen.reshape(SCREEN_SHAPE_32_BIT)
# flip the bytes if the machine is little-endian (which it likely is)
if sys.byteorder == 'little':
# invert the little-endian BGRx channels to big-endian xRGB
screen = screen[:, :, ::-1]
# remove the 0th axis (padding from storing colors in 32 bit)
return screen[:, :, 1:] | Setup the screen buffer from the C++ code. | train | https://github.com/Kautenja/nes-py/blob/a113885198d418f38fcf24b8f79ac508975788c2/nes_py/nes_env.py#L152-L167 | null | class NESEnv(gym.Env):
"""An NES environment based on the LaiNES emulator."""
# relevant meta-data about the environment
metadata = {
'render.modes': ['rgb_array', 'human'],
'video.frames_per_second': 60
}
# the legal range for rewards for this environment
reward_range = (-float('inf'), float('inf'))
# observation space for the environment is static across all instances
observation_space = Box(
low=0,
high=255,
shape=SCREEN_SHAPE_24_BIT,
dtype=np.uint8
)
# action space is a bitmap of button press values for the 8 NES buttons
action_space = Discrete(256)
def __init__(self, rom_path):
"""
Create a new NES environment.
Args:
rom_path (str): the path to the ROM for the environment
Returns:
None
"""
# create a ROM file from the ROM path
rom = ROM(rom_path)
# check that there is PRG ROM
if rom.prg_rom_size == 0:
raise ValueError('ROM has no PRG-ROM banks.')
# ensure that there is no trainer
if rom.has_trainer:
raise ValueError('ROM has trainer. trainer is not supported.')
# try to read the PRG ROM and raise a value error if it fails
_ = rom.prg_rom
# try to read the CHR ROM and raise a value error if it fails
_ = rom.chr_rom
# check the TV system
if rom.is_pal:
raise ValueError('ROM is PAL. PAL is not supported.')
# check that the mapper is implemented
elif rom.mapper not in {0, 1, 2, 3}:
msg = 'ROM has an unsupported mapper number {}.'
raise ValueError(msg.format(rom.mapper))
# create a dedicated random number generator for the environment
self.np_random = np.random.RandomState()
# store the ROM path
self._rom_path = rom_path
# initialize the C++ object for running the environment
self._env = _LIB.Initialize(self._rom_path)
# setup a placeholder for a 'human' render mode viewer
self.viewer = None
# setup a placeholder for a pointer to a backup state
self._has_backup = False
# setup a done flag
self.done = True
# setup the controllers, screen, and RAM buffers
self.controllers = [self._controller_buffer(port) for port in range(2)]
self.screen = self._screen_buffer()
self.ram = self._ram_buffer()
def _ram_buffer(self):
"""Setup the RAM buffer from the C++ code."""
# get the address of the RAM
address = _LIB.Memory(self._env)
# create a buffer from the contents of the address location
buffer_ = ctypes.cast(address, ctypes.POINTER(RAM_VECTOR)).contents
# create a NumPy array from the buffer
return np.frombuffer(buffer_, dtype='uint8')
def _controller_buffer(self, port):
"""
Find the pointer to a controller and setup a NumPy buffer.
Args:
port: the port of the controller to setup
Returns:
a NumPy buffer with the controller's binary data
"""
# get the address of the controller
address = _LIB.Controller(self._env, port)
# create a memory buffer using the ctypes pointer for this vector
buffer_ = ctypes.cast(address, ctypes.POINTER(CONTROLLER_VECTOR)).contents
# create a NumPy buffer from the binary data and return it
return np.frombuffer(buffer_, dtype='uint8')
def _frame_advance(self, action):
"""
Advance a frame in the emulator with an action.
Args:
action (byte): the action to press on the joy-pad
Returns:
None
"""
# set the action on the controller
self.controllers[0][:] = action
# perform a step on the emulator
_LIB.Step(self._env)
def _backup(self):
"""Backup the NES state in the emulator."""
_LIB.Backup(self._env)
self._has_backup = True
def _restore(self):
"""Restore the backup state into the NES emulator."""
_LIB.Restore(self._env)
def _will_reset(self):
"""Handle any RAM hacking after a reset occurs."""
pass
def seed(self, seed=None):
"""
Set the seed for this environment's random number generator.
Returns:
list<bigint>: Returns the list of seeds used in this env's random
number generators. The first value in the list should be the
"main" seed, or the value which a reproducer should pass to
'seed'. Often, the main seed equals the provided 'seed', but
this won't be true if seed=None, for example.
"""
# if there is no seed, return an empty list
if seed is None:
return []
# set the random number seed for the NumPy random number generator
self.np_random.seed(seed)
# return the list of seeds used by RNG(s) in the environment
return [seed]
def reset(self):
"""
Reset the state of the environment and returns an initial observation.
Returns:
state (np.ndarray): next frame as a result of the given action
"""
# call the before reset callback
self._will_reset()
# reset the emulator
if self._has_backup:
self._restore()
else:
_LIB.Reset(self._env)
# call the after reset callback
self._did_reset()
# set the done flag to false
self.done = False
# return the screen from the emulator
return self.screen
def _did_reset(self):
"""Handle any RAM hacking after a reset occurs."""
pass
def step(self, action):
"""
Run one frame of the NES and return the relevant observation data.
Args:
action (byte): the bitmap determining which buttons to press
Returns:
a tuple of:
- state (np.ndarray): next frame as a result of the given action
- reward (float) : amount of reward returned after given action
- done (boolean): whether the episode has ended
- info (dict): contains auxiliary diagnostic information
"""
# if the environment is done, raise an error
if self.done:
raise ValueError('cannot step in a done environment! call `reset`')
# set the action on the controller
self.controllers[0][:] = action
# pass the action to the emulator as an unsigned byte
_LIB.Step(self._env)
# get the reward for this step
reward = self._get_reward()
# get the done flag for this step
self.done = self._get_done()
# get the info for this step
info = self._get_info()
# call the after step callback
self._did_step(self.done)
# bound the reward in [min, max]
if reward < self.reward_range[0]:
reward = self.reward_range[0]
elif reward > self.reward_range[1]:
reward = self.reward_range[1]
# return the screen from the emulator and other relevant data
return self.screen, reward, self.done, info
def _get_reward(self):
"""Return the reward after a step occurs."""
return 0
def _get_done(self):
"""Return True if the episode is over, False otherwise."""
return False
def _get_info(self):
"""Return the info after a step occurs."""
return {}
def _did_step(self, done):
"""
Handle any RAM hacking after a step occurs.
Args:
done (bool): whether the done flag is set to true
Returns:
None
"""
pass
def close(self):
"""Close the environment."""
# make sure the environment hasn't already been closed
if self._env is None:
raise ValueError('env has already been closed.')
# purge the environment from C++ memory
_LIB.Close(self._env)
# deallocate the object locally
self._env = None
# if there is an image viewer open, delete it
if self.viewer is not None:
self.viewer.close()
def render(self, mode='human'):
"""
Render the environment.
Args:
mode (str): the mode to render with:
- human: render to the current display
- rgb_array: Return an numpy.ndarray with shape (x, y, 3),
representing RGB values for an x-by-y pixel image
Returns:
a numpy array if mode is 'rgb_array', None otherwise
"""
if mode == 'human':
# if the viewer isn't setup, import it and create one
if self.viewer is None:
from ._image_viewer import ImageViewer
# get the caption for the ImageViewer
if self.spec is None:
# if there is no spec, just use the .nes filename
caption = self._rom_path.split('/')[-1]
else:
# set the caption to the OpenAI Gym id
caption = self.spec.id
# create the ImageViewer to display frames
self.viewer = ImageViewer(
caption=caption,
height=SCREEN_HEIGHT,
width=SCREEN_WIDTH,
)
# show the screen on the image viewer
self.viewer.show(self.screen)
elif mode == 'rgb_array':
return self.screen
else:
# unpack the modes as comma delineated strings ('a', 'b', ...)
render_modes = [repr(x) for x in self.metadata['render.modes']]
msg = 'valid render modes are: {}'.format(', '.join(render_modes))
raise NotImplementedError(msg)
def get_keys_to_action(self):
"""Return the dictionary of keyboard keys to actions."""
# keyboard keys in an array ordered by their byte order in the bitmap
# i.e. right = 7, left = 6, ..., B = 1, A = 0
buttons = np.array([
ord('d'), # right
ord('a'), # left
ord('s'), # down
ord('w'), # up
ord('\r'), # start
ord(' '), # select
ord('p'), # B
ord('o'), # A
])
# the dictionary of key presses to controller codes
keys_to_action = {}
# the combination map of values for the controller
values = 8 * [[0, 1]]
# iterate over all the combinations
for combination in itertools.product(*values):
# unpack the tuple of bits into an integer
byte = int(''.join(map(str, combination)), 2)
# unwrap the pressed buttons based on the bitmap
pressed = buttons[list(map(bool, combination))]
# assign the pressed buttons to the output byte
keys_to_action[tuple(sorted(pressed))] = byte
return keys_to_action
def get_action_meanings(self):
"""Return a list of actions meanings."""
return ['NOOP']
|
Kautenja/nes-py | nes_py/nes_env.py | NESEnv._ram_buffer | python | def _ram_buffer(self):
# get the address of the RAM
address = _LIB.Memory(self._env)
# create a buffer from the contents of the address location
buffer_ = ctypes.cast(address, ctypes.POINTER(RAM_VECTOR)).contents
# create a NumPy array from the buffer
return np.frombuffer(buffer_, dtype='uint8') | Setup the RAM buffer from the C++ code. | train | https://github.com/Kautenja/nes-py/blob/a113885198d418f38fcf24b8f79ac508975788c2/nes_py/nes_env.py#L169-L176 | null | class NESEnv(gym.Env):
"""An NES environment based on the LaiNES emulator."""
# relevant meta-data about the environment
metadata = {
'render.modes': ['rgb_array', 'human'],
'video.frames_per_second': 60
}
# the legal range for rewards for this environment
reward_range = (-float('inf'), float('inf'))
# observation space for the environment is static across all instances
observation_space = Box(
low=0,
high=255,
shape=SCREEN_SHAPE_24_BIT,
dtype=np.uint8
)
# action space is a bitmap of button press values for the 8 NES buttons
action_space = Discrete(256)
def __init__(self, rom_path):
"""
Create a new NES environment.
Args:
rom_path (str): the path to the ROM for the environment
Returns:
None
"""
# create a ROM file from the ROM path
rom = ROM(rom_path)
# check that there is PRG ROM
if rom.prg_rom_size == 0:
raise ValueError('ROM has no PRG-ROM banks.')
# ensure that there is no trainer
if rom.has_trainer:
raise ValueError('ROM has trainer. trainer is not supported.')
# try to read the PRG ROM and raise a value error if it fails
_ = rom.prg_rom
# try to read the CHR ROM and raise a value error if it fails
_ = rom.chr_rom
# check the TV system
if rom.is_pal:
raise ValueError('ROM is PAL. PAL is not supported.')
# check that the mapper is implemented
elif rom.mapper not in {0, 1, 2, 3}:
msg = 'ROM has an unsupported mapper number {}.'
raise ValueError(msg.format(rom.mapper))
# create a dedicated random number generator for the environment
self.np_random = np.random.RandomState()
# store the ROM path
self._rom_path = rom_path
# initialize the C++ object for running the environment
self._env = _LIB.Initialize(self._rom_path)
# setup a placeholder for a 'human' render mode viewer
self.viewer = None
# setup a placeholder for a pointer to a backup state
self._has_backup = False
# setup a done flag
self.done = True
# setup the controllers, screen, and RAM buffers
self.controllers = [self._controller_buffer(port) for port in range(2)]
self.screen = self._screen_buffer()
self.ram = self._ram_buffer()
def _screen_buffer(self):
"""Setup the screen buffer from the C++ code."""
# get the address of the screen
address = _LIB.Screen(self._env)
# create a buffer from the contents of the address location
buffer_ = ctypes.cast(address, ctypes.POINTER(SCREEN_TENSOR)).contents
# create a NumPy array from the buffer
screen = np.frombuffer(buffer_, dtype='uint8')
# reshape the screen from a column vector to a tensor
screen = screen.reshape(SCREEN_SHAPE_32_BIT)
# flip the bytes if the machine is little-endian (which it likely is)
if sys.byteorder == 'little':
# invert the little-endian BGRx channels to big-endian xRGB
screen = screen[:, :, ::-1]
# remove the 0th axis (padding from storing colors in 32 bit)
return screen[:, :, 1:]
def _controller_buffer(self, port):
"""
Find the pointer to a controller and setup a NumPy buffer.
Args:
port: the port of the controller to setup
Returns:
a NumPy buffer with the controller's binary data
"""
# get the address of the controller
address = _LIB.Controller(self._env, port)
# create a memory buffer using the ctypes pointer for this vector
buffer_ = ctypes.cast(address, ctypes.POINTER(CONTROLLER_VECTOR)).contents
# create a NumPy buffer from the binary data and return it
return np.frombuffer(buffer_, dtype='uint8')
def _frame_advance(self, action):
"""
Advance a frame in the emulator with an action.
Args:
action (byte): the action to press on the joy-pad
Returns:
None
"""
# set the action on the controller
self.controllers[0][:] = action
# perform a step on the emulator
_LIB.Step(self._env)
def _backup(self):
"""Backup the NES state in the emulator."""
_LIB.Backup(self._env)
self._has_backup = True
def _restore(self):
"""Restore the backup state into the NES emulator."""
_LIB.Restore(self._env)
def _will_reset(self):
"""Handle any RAM hacking after a reset occurs."""
pass
def seed(self, seed=None):
"""
Set the seed for this environment's random number generator.
Returns:
list<bigint>: Returns the list of seeds used in this env's random
number generators. The first value in the list should be the
"main" seed, or the value which a reproducer should pass to
'seed'. Often, the main seed equals the provided 'seed', but
this won't be true if seed=None, for example.
"""
# if there is no seed, return an empty list
if seed is None:
return []
# set the random number seed for the NumPy random number generator
self.np_random.seed(seed)
# return the list of seeds used by RNG(s) in the environment
return [seed]
def reset(self):
"""
Reset the state of the environment and returns an initial observation.
Returns:
state (np.ndarray): next frame as a result of the given action
"""
# call the before reset callback
self._will_reset()
# reset the emulator
if self._has_backup:
self._restore()
else:
_LIB.Reset(self._env)
# call the after reset callback
self._did_reset()
# set the done flag to false
self.done = False
# return the screen from the emulator
return self.screen
def _did_reset(self):
"""Handle any RAM hacking after a reset occurs."""
pass
def step(self, action):
"""
Run one frame of the NES and return the relevant observation data.
Args:
action (byte): the bitmap determining which buttons to press
Returns:
a tuple of:
- state (np.ndarray): next frame as a result of the given action
- reward (float) : amount of reward returned after given action
- done (boolean): whether the episode has ended
- info (dict): contains auxiliary diagnostic information
"""
# if the environment is done, raise an error
if self.done:
raise ValueError('cannot step in a done environment! call `reset`')
# set the action on the controller
self.controllers[0][:] = action
# pass the action to the emulator as an unsigned byte
_LIB.Step(self._env)
# get the reward for this step
reward = self._get_reward()
# get the done flag for this step
self.done = self._get_done()
# get the info for this step
info = self._get_info()
# call the after step callback
self._did_step(self.done)
# bound the reward in [min, max]
if reward < self.reward_range[0]:
reward = self.reward_range[0]
elif reward > self.reward_range[1]:
reward = self.reward_range[1]
# return the screen from the emulator and other relevant data
return self.screen, reward, self.done, info
def _get_reward(self):
"""Return the reward after a step occurs."""
return 0
def _get_done(self):
"""Return True if the episode is over, False otherwise."""
return False
def _get_info(self):
"""Return the info after a step occurs."""
return {}
def _did_step(self, done):
"""
Handle any RAM hacking after a step occurs.
Args:
done (bool): whether the done flag is set to true
Returns:
None
"""
pass
def close(self):
"""Close the environment."""
# make sure the environment hasn't already been closed
if self._env is None:
raise ValueError('env has already been closed.')
# purge the environment from C++ memory
_LIB.Close(self._env)
# deallocate the object locally
self._env = None
# if there is an image viewer open, delete it
if self.viewer is not None:
self.viewer.close()
def render(self, mode='human'):
"""
Render the environment.
Args:
mode (str): the mode to render with:
- human: render to the current display
- rgb_array: Return an numpy.ndarray with shape (x, y, 3),
representing RGB values for an x-by-y pixel image
Returns:
a numpy array if mode is 'rgb_array', None otherwise
"""
if mode == 'human':
# if the viewer isn't setup, import it and create one
if self.viewer is None:
from ._image_viewer import ImageViewer
# get the caption for the ImageViewer
if self.spec is None:
# if there is no spec, just use the .nes filename
caption = self._rom_path.split('/')[-1]
else:
# set the caption to the OpenAI Gym id
caption = self.spec.id
# create the ImageViewer to display frames
self.viewer = ImageViewer(
caption=caption,
height=SCREEN_HEIGHT,
width=SCREEN_WIDTH,
)
# show the screen on the image viewer
self.viewer.show(self.screen)
elif mode == 'rgb_array':
return self.screen
else:
# unpack the modes as comma delineated strings ('a', 'b', ...)
render_modes = [repr(x) for x in self.metadata['render.modes']]
msg = 'valid render modes are: {}'.format(', '.join(render_modes))
raise NotImplementedError(msg)
def get_keys_to_action(self):
"""Return the dictionary of keyboard keys to actions."""
# keyboard keys in an array ordered by their byte order in the bitmap
# i.e. right = 7, left = 6, ..., B = 1, A = 0
buttons = np.array([
ord('d'), # right
ord('a'), # left
ord('s'), # down
ord('w'), # up
ord('\r'), # start
ord(' '), # select
ord('p'), # B
ord('o'), # A
])
# the dictionary of key presses to controller codes
keys_to_action = {}
# the combination map of values for the controller
values = 8 * [[0, 1]]
# iterate over all the combinations
for combination in itertools.product(*values):
# unpack the tuple of bits into an integer
byte = int(''.join(map(str, combination)), 2)
# unwrap the pressed buttons based on the bitmap
pressed = buttons[list(map(bool, combination))]
# assign the pressed buttons to the output byte
keys_to_action[tuple(sorted(pressed))] = byte
return keys_to_action
def get_action_meanings(self):
"""Return a list of actions meanings."""
return ['NOOP']
|
Kautenja/nes-py | nes_py/nes_env.py | NESEnv._controller_buffer | python | def _controller_buffer(self, port):
# get the address of the controller
address = _LIB.Controller(self._env, port)
# create a memory buffer using the ctypes pointer for this vector
buffer_ = ctypes.cast(address, ctypes.POINTER(CONTROLLER_VECTOR)).contents
# create a NumPy buffer from the binary data and return it
return np.frombuffer(buffer_, dtype='uint8') | Find the pointer to a controller and setup a NumPy buffer.
Args:
port: the port of the controller to setup
Returns:
a NumPy buffer with the controller's binary data | train | https://github.com/Kautenja/nes-py/blob/a113885198d418f38fcf24b8f79ac508975788c2/nes_py/nes_env.py#L178-L194 | null | class NESEnv(gym.Env):
"""An NES environment based on the LaiNES emulator."""
# relevant meta-data about the environment
metadata = {
'render.modes': ['rgb_array', 'human'],
'video.frames_per_second': 60
}
# the legal range for rewards for this environment
reward_range = (-float('inf'), float('inf'))
# observation space for the environment is static across all instances
observation_space = Box(
low=0,
high=255,
shape=SCREEN_SHAPE_24_BIT,
dtype=np.uint8
)
# action space is a bitmap of button press values for the 8 NES buttons
action_space = Discrete(256)
def __init__(self, rom_path):
"""
Create a new NES environment.
Args:
rom_path (str): the path to the ROM for the environment
Returns:
None
"""
# create a ROM file from the ROM path
rom = ROM(rom_path)
# check that there is PRG ROM
if rom.prg_rom_size == 0:
raise ValueError('ROM has no PRG-ROM banks.')
# ensure that there is no trainer
if rom.has_trainer:
raise ValueError('ROM has trainer. trainer is not supported.')
# try to read the PRG ROM and raise a value error if it fails
_ = rom.prg_rom
# try to read the CHR ROM and raise a value error if it fails
_ = rom.chr_rom
# check the TV system
if rom.is_pal:
raise ValueError('ROM is PAL. PAL is not supported.')
# check that the mapper is implemented
elif rom.mapper not in {0, 1, 2, 3}:
msg = 'ROM has an unsupported mapper number {}.'
raise ValueError(msg.format(rom.mapper))
# create a dedicated random number generator for the environment
self.np_random = np.random.RandomState()
# store the ROM path
self._rom_path = rom_path
# initialize the C++ object for running the environment
self._env = _LIB.Initialize(self._rom_path)
# setup a placeholder for a 'human' render mode viewer
self.viewer = None
# setup a placeholder for a pointer to a backup state
self._has_backup = False
# setup a done flag
self.done = True
# setup the controllers, screen, and RAM buffers
self.controllers = [self._controller_buffer(port) for port in range(2)]
self.screen = self._screen_buffer()
self.ram = self._ram_buffer()
def _screen_buffer(self):
"""Setup the screen buffer from the C++ code."""
# get the address of the screen
address = _LIB.Screen(self._env)
# create a buffer from the contents of the address location
buffer_ = ctypes.cast(address, ctypes.POINTER(SCREEN_TENSOR)).contents
# create a NumPy array from the buffer
screen = np.frombuffer(buffer_, dtype='uint8')
# reshape the screen from a column vector to a tensor
screen = screen.reshape(SCREEN_SHAPE_32_BIT)
# flip the bytes if the machine is little-endian (which it likely is)
if sys.byteorder == 'little':
# invert the little-endian BGRx channels to big-endian xRGB
screen = screen[:, :, ::-1]
# remove the 0th axis (padding from storing colors in 32 bit)
return screen[:, :, 1:]
def _ram_buffer(self):
"""Setup the RAM buffer from the C++ code."""
# get the address of the RAM
address = _LIB.Memory(self._env)
# create a buffer from the contents of the address location
buffer_ = ctypes.cast(address, ctypes.POINTER(RAM_VECTOR)).contents
# create a NumPy array from the buffer
return np.frombuffer(buffer_, dtype='uint8')
def _frame_advance(self, action):
"""
Advance a frame in the emulator with an action.
Args:
action (byte): the action to press on the joy-pad
Returns:
None
"""
# set the action on the controller
self.controllers[0][:] = action
# perform a step on the emulator
_LIB.Step(self._env)
def _backup(self):
"""Backup the NES state in the emulator."""
_LIB.Backup(self._env)
self._has_backup = True
def _restore(self):
"""Restore the backup state into the NES emulator."""
_LIB.Restore(self._env)
def _will_reset(self):
"""Handle any RAM hacking after a reset occurs."""
pass
def seed(self, seed=None):
"""
Set the seed for this environment's random number generator.
Returns:
list<bigint>: Returns the list of seeds used in this env's random
number generators. The first value in the list should be the
"main" seed, or the value which a reproducer should pass to
'seed'. Often, the main seed equals the provided 'seed', but
this won't be true if seed=None, for example.
"""
# if there is no seed, return an empty list
if seed is None:
return []
# set the random number seed for the NumPy random number generator
self.np_random.seed(seed)
# return the list of seeds used by RNG(s) in the environment
return [seed]
def reset(self):
"""
Reset the state of the environment and returns an initial observation.
Returns:
state (np.ndarray): next frame as a result of the given action
"""
# call the before reset callback
self._will_reset()
# reset the emulator
if self._has_backup:
self._restore()
else:
_LIB.Reset(self._env)
# call the after reset callback
self._did_reset()
# set the done flag to false
self.done = False
# return the screen from the emulator
return self.screen
def _did_reset(self):
"""Handle any RAM hacking after a reset occurs."""
pass
def step(self, action):
"""
Run one frame of the NES and return the relevant observation data.
Args:
action (byte): the bitmap determining which buttons to press
Returns:
a tuple of:
- state (np.ndarray): next frame as a result of the given action
- reward (float) : amount of reward returned after given action
- done (boolean): whether the episode has ended
- info (dict): contains auxiliary diagnostic information
"""
# if the environment is done, raise an error
if self.done:
raise ValueError('cannot step in a done environment! call `reset`')
# set the action on the controller
self.controllers[0][:] = action
# pass the action to the emulator as an unsigned byte
_LIB.Step(self._env)
# get the reward for this step
reward = self._get_reward()
# get the done flag for this step
self.done = self._get_done()
# get the info for this step
info = self._get_info()
# call the after step callback
self._did_step(self.done)
# bound the reward in [min, max]
if reward < self.reward_range[0]:
reward = self.reward_range[0]
elif reward > self.reward_range[1]:
reward = self.reward_range[1]
# return the screen from the emulator and other relevant data
return self.screen, reward, self.done, info
def _get_reward(self):
"""Return the reward after a step occurs."""
return 0
def _get_done(self):
"""Return True if the episode is over, False otherwise."""
return False
def _get_info(self):
"""Return the info after a step occurs."""
return {}
def _did_step(self, done):
"""
Handle any RAM hacking after a step occurs.
Args:
done (bool): whether the done flag is set to true
Returns:
None
"""
pass
def close(self):
"""Close the environment."""
# make sure the environment hasn't already been closed
if self._env is None:
raise ValueError('env has already been closed.')
# purge the environment from C++ memory
_LIB.Close(self._env)
# deallocate the object locally
self._env = None
# if there is an image viewer open, delete it
if self.viewer is not None:
self.viewer.close()
def render(self, mode='human'):
"""
Render the environment.
Args:
mode (str): the mode to render with:
- human: render to the current display
- rgb_array: Return an numpy.ndarray with shape (x, y, 3),
representing RGB values for an x-by-y pixel image
Returns:
a numpy array if mode is 'rgb_array', None otherwise
"""
if mode == 'human':
# if the viewer isn't setup, import it and create one
if self.viewer is None:
from ._image_viewer import ImageViewer
# get the caption for the ImageViewer
if self.spec is None:
# if there is no spec, just use the .nes filename
caption = self._rom_path.split('/')[-1]
else:
# set the caption to the OpenAI Gym id
caption = self.spec.id
# create the ImageViewer to display frames
self.viewer = ImageViewer(
caption=caption,
height=SCREEN_HEIGHT,
width=SCREEN_WIDTH,
)
# show the screen on the image viewer
self.viewer.show(self.screen)
elif mode == 'rgb_array':
return self.screen
else:
# unpack the modes as comma delineated strings ('a', 'b', ...)
render_modes = [repr(x) for x in self.metadata['render.modes']]
msg = 'valid render modes are: {}'.format(', '.join(render_modes))
raise NotImplementedError(msg)
def get_keys_to_action(self):
"""Return the dictionary of keyboard keys to actions."""
# keyboard keys in an array ordered by their byte order in the bitmap
# i.e. right = 7, left = 6, ..., B = 1, A = 0
buttons = np.array([
ord('d'), # right
ord('a'), # left
ord('s'), # down
ord('w'), # up
ord('\r'), # start
ord(' '), # select
ord('p'), # B
ord('o'), # A
])
# the dictionary of key presses to controller codes
keys_to_action = {}
# the combination map of values for the controller
values = 8 * [[0, 1]]
# iterate over all the combinations
for combination in itertools.product(*values):
# unpack the tuple of bits into an integer
byte = int(''.join(map(str, combination)), 2)
# unwrap the pressed buttons based on the bitmap
pressed = buttons[list(map(bool, combination))]
# assign the pressed buttons to the output byte
keys_to_action[tuple(sorted(pressed))] = byte
return keys_to_action
def get_action_meanings(self):
"""Return a list of actions meanings."""
return ['NOOP']
|
Kautenja/nes-py | nes_py/nes_env.py | NESEnv._frame_advance | python | def _frame_advance(self, action):
# set the action on the controller
self.controllers[0][:] = action
# perform a step on the emulator
_LIB.Step(self._env) | Advance a frame in the emulator with an action.
Args:
action (byte): the action to press on the joy-pad
Returns:
None | train | https://github.com/Kautenja/nes-py/blob/a113885198d418f38fcf24b8f79ac508975788c2/nes_py/nes_env.py#L196-L210 | null | class NESEnv(gym.Env):
"""An NES environment based on the LaiNES emulator."""
# relevant meta-data about the environment
metadata = {
'render.modes': ['rgb_array', 'human'],
'video.frames_per_second': 60
}
# the legal range for rewards for this environment
reward_range = (-float('inf'), float('inf'))
# observation space for the environment is static across all instances
observation_space = Box(
low=0,
high=255,
shape=SCREEN_SHAPE_24_BIT,
dtype=np.uint8
)
# action space is a bitmap of button press values for the 8 NES buttons
action_space = Discrete(256)
def __init__(self, rom_path):
"""
Create a new NES environment.
Args:
rom_path (str): the path to the ROM for the environment
Returns:
None
"""
# create a ROM file from the ROM path
rom = ROM(rom_path)
# check that there is PRG ROM
if rom.prg_rom_size == 0:
raise ValueError('ROM has no PRG-ROM banks.')
# ensure that there is no trainer
if rom.has_trainer:
raise ValueError('ROM has trainer. trainer is not supported.')
# try to read the PRG ROM and raise a value error if it fails
_ = rom.prg_rom
# try to read the CHR ROM and raise a value error if it fails
_ = rom.chr_rom
# check the TV system
if rom.is_pal:
raise ValueError('ROM is PAL. PAL is not supported.')
# check that the mapper is implemented
elif rom.mapper not in {0, 1, 2, 3}:
msg = 'ROM has an unsupported mapper number {}.'
raise ValueError(msg.format(rom.mapper))
# create a dedicated random number generator for the environment
self.np_random = np.random.RandomState()
# store the ROM path
self._rom_path = rom_path
# initialize the C++ object for running the environment
self._env = _LIB.Initialize(self._rom_path)
# setup a placeholder for a 'human' render mode viewer
self.viewer = None
# setup a placeholder for a pointer to a backup state
self._has_backup = False
# setup a done flag
self.done = True
# setup the controllers, screen, and RAM buffers
self.controllers = [self._controller_buffer(port) for port in range(2)]
self.screen = self._screen_buffer()
self.ram = self._ram_buffer()
def _screen_buffer(self):
"""Setup the screen buffer from the C++ code."""
# get the address of the screen
address = _LIB.Screen(self._env)
# create a buffer from the contents of the address location
buffer_ = ctypes.cast(address, ctypes.POINTER(SCREEN_TENSOR)).contents
# create a NumPy array from the buffer
screen = np.frombuffer(buffer_, dtype='uint8')
# reshape the screen from a column vector to a tensor
screen = screen.reshape(SCREEN_SHAPE_32_BIT)
# flip the bytes if the machine is little-endian (which it likely is)
if sys.byteorder == 'little':
# invert the little-endian BGRx channels to big-endian xRGB
screen = screen[:, :, ::-1]
# remove the 0th axis (padding from storing colors in 32 bit)
return screen[:, :, 1:]
def _ram_buffer(self):
"""Setup the RAM buffer from the C++ code."""
# get the address of the RAM
address = _LIB.Memory(self._env)
# create a buffer from the contents of the address location
buffer_ = ctypes.cast(address, ctypes.POINTER(RAM_VECTOR)).contents
# create a NumPy array from the buffer
return np.frombuffer(buffer_, dtype='uint8')
def _controller_buffer(self, port):
"""
Find the pointer to a controller and setup a NumPy buffer.
Args:
port: the port of the controller to setup
Returns:
a NumPy buffer with the controller's binary data
"""
# get the address of the controller
address = _LIB.Controller(self._env, port)
# create a memory buffer using the ctypes pointer for this vector
buffer_ = ctypes.cast(address, ctypes.POINTER(CONTROLLER_VECTOR)).contents
# create a NumPy buffer from the binary data and return it
return np.frombuffer(buffer_, dtype='uint8')
def _backup(self):
"""Backup the NES state in the emulator."""
_LIB.Backup(self._env)
self._has_backup = True
def _restore(self):
"""Restore the backup state into the NES emulator."""
_LIB.Restore(self._env)
def _will_reset(self):
"""Handle any RAM hacking after a reset occurs."""
pass
def seed(self, seed=None):
"""
Set the seed for this environment's random number generator.
Returns:
list<bigint>: Returns the list of seeds used in this env's random
number generators. The first value in the list should be the
"main" seed, or the value which a reproducer should pass to
'seed'. Often, the main seed equals the provided 'seed', but
this won't be true if seed=None, for example.
"""
# if there is no seed, return an empty list
if seed is None:
return []
# set the random number seed for the NumPy random number generator
self.np_random.seed(seed)
# return the list of seeds used by RNG(s) in the environment
return [seed]
def reset(self):
"""
Reset the state of the environment and returns an initial observation.
Returns:
state (np.ndarray): next frame as a result of the given action
"""
# call the before reset callback
self._will_reset()
# reset the emulator
if self._has_backup:
self._restore()
else:
_LIB.Reset(self._env)
# call the after reset callback
self._did_reset()
# set the done flag to false
self.done = False
# return the screen from the emulator
return self.screen
def _did_reset(self):
"""Handle any RAM hacking after a reset occurs."""
pass
def step(self, action):
"""
Run one frame of the NES and return the relevant observation data.
Args:
action (byte): the bitmap determining which buttons to press
Returns:
a tuple of:
- state (np.ndarray): next frame as a result of the given action
- reward (float) : amount of reward returned after given action
- done (boolean): whether the episode has ended
- info (dict): contains auxiliary diagnostic information
"""
# if the environment is done, raise an error
if self.done:
raise ValueError('cannot step in a done environment! call `reset`')
# set the action on the controller
self.controllers[0][:] = action
# pass the action to the emulator as an unsigned byte
_LIB.Step(self._env)
# get the reward for this step
reward = self._get_reward()
# get the done flag for this step
self.done = self._get_done()
# get the info for this step
info = self._get_info()
# call the after step callback
self._did_step(self.done)
# bound the reward in [min, max]
if reward < self.reward_range[0]:
reward = self.reward_range[0]
elif reward > self.reward_range[1]:
reward = self.reward_range[1]
# return the screen from the emulator and other relevant data
return self.screen, reward, self.done, info
def _get_reward(self):
"""Return the reward after a step occurs."""
return 0
def _get_done(self):
"""Return True if the episode is over, False otherwise."""
return False
def _get_info(self):
"""Return the info after a step occurs."""
return {}
def _did_step(self, done):
"""
Handle any RAM hacking after a step occurs.
Args:
done (bool): whether the done flag is set to true
Returns:
None
"""
pass
def close(self):
"""Close the environment."""
# make sure the environment hasn't already been closed
if self._env is None:
raise ValueError('env has already been closed.')
# purge the environment from C++ memory
_LIB.Close(self._env)
# deallocate the object locally
self._env = None
# if there is an image viewer open, delete it
if self.viewer is not None:
self.viewer.close()
def render(self, mode='human'):
"""
Render the environment.
Args:
mode (str): the mode to render with:
- human: render to the current display
- rgb_array: Return an numpy.ndarray with shape (x, y, 3),
representing RGB values for an x-by-y pixel image
Returns:
a numpy array if mode is 'rgb_array', None otherwise
"""
if mode == 'human':
# if the viewer isn't setup, import it and create one
if self.viewer is None:
from ._image_viewer import ImageViewer
# get the caption for the ImageViewer
if self.spec is None:
# if there is no spec, just use the .nes filename
caption = self._rom_path.split('/')[-1]
else:
# set the caption to the OpenAI Gym id
caption = self.spec.id
# create the ImageViewer to display frames
self.viewer = ImageViewer(
caption=caption,
height=SCREEN_HEIGHT,
width=SCREEN_WIDTH,
)
# show the screen on the image viewer
self.viewer.show(self.screen)
elif mode == 'rgb_array':
return self.screen
else:
# unpack the modes as comma delineated strings ('a', 'b', ...)
render_modes = [repr(x) for x in self.metadata['render.modes']]
msg = 'valid render modes are: {}'.format(', '.join(render_modes))
raise NotImplementedError(msg)
def get_keys_to_action(self):
"""Return the dictionary of keyboard keys to actions."""
# keyboard keys in an array ordered by their byte order in the bitmap
# i.e. right = 7, left = 6, ..., B = 1, A = 0
buttons = np.array([
ord('d'), # right
ord('a'), # left
ord('s'), # down
ord('w'), # up
ord('\r'), # start
ord(' '), # select
ord('p'), # B
ord('o'), # A
])
# the dictionary of key presses to controller codes
keys_to_action = {}
# the combination map of values for the controller
values = 8 * [[0, 1]]
# iterate over all the combinations
for combination in itertools.product(*values):
# unpack the tuple of bits into an integer
byte = int(''.join(map(str, combination)), 2)
# unwrap the pressed buttons based on the bitmap
pressed = buttons[list(map(bool, combination))]
# assign the pressed buttons to the output byte
keys_to_action[tuple(sorted(pressed))] = byte
return keys_to_action
def get_action_meanings(self):
"""Return a list of actions meanings."""
return ['NOOP']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.