after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.Error):
code = e.args[0]
if code in {
"08S01",
"01000",
"01002",
"08003",
"08007",
"08S02",
"08001",
"HYT00",
"HY010",
"10054",
}:
return True
return super(MSDialect_pyodbc, self).is_disconnect(e, connection, cursor)
|
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.Error):
code = e.args[0]
if code in (
"08S01",
"01002",
"08003",
"08007",
"08S02",
"08001",
"HYT00",
"HY010",
"10054",
):
return True
return super(MSDialect_pyodbc, self).is_disconnect(e, connection, cursor)
|
https://github.com/sqlalchemy/sqlalchemy/issues/5646
|
restart done?
Traceback (most recent call last):
File "C:\Users\xxx\PycharmProjects\sqlalchemy_bug\.venv\lib\site-packages\sqlalchemy\engine\base.py", line 1276, in _execute_context
self.dialect.do_execute(
File "C:\Users\xxx\PycharmProjects\sqlalchemy_bug\.venv\lib\site-packages\sqlalchemy\engine\default.py", line 593, in do_execute
cursor.execute(statement, parameters)
pyodbc.Error: ('01000', '[01000] [Microsoft][ODBC SQL Server Driver][DBNETLIB]ConnectionWrite (WrapperWrite()). (233) (SQLExecDirectW); [01000] [Microsoft][ODBC SQL Server Driver][DBNETLIB]Allgemeiner Netzwerkfehler. Weitere Informationen finden Sie in der Dokumentation über Netzwerke. (11)')
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:/Users/xxx/PycharmProjects/sqlalchemy_bug/main.py", line 12, in <module>
session.execute('SELECT 1')
File "C:\Users\xxx\PycharmProjects\sqlalchemy_bug\.venv\lib\site-packages\sqlalchemy\orm\session.py", line 1291, in execute
return self._connection_for_bind(bind, close_with_result=True).execute(
File "C:\Users\xxx\PycharmProjects\sqlalchemy_bug\.venv\lib\site-packages\sqlalchemy\engine\base.py", line 1011, in execute
return meth(self, multiparams, params)
File "C:\Users\xxx\PycharmProjects\sqlalchemy_bug\.venv\lib\site-packages\sqlalchemy\sql\elements.py", line 298, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "C:\Users\xxx\PycharmProjects\sqlalchemy_bug\.venv\lib\site-packages\sqlalchemy\engine\base.py", line 1124, in _execute_clauseelement
ret = self._execute_context(
File "C:\Users\xxx\PycharmProjects\sqlalchemy_bug\.venv\lib\site-packages\sqlalchemy\engine\base.py", line 1316, in _execute_context
self._handle_dbapi_exception(
File "C:\Users\xxx\PycharmProjects\sqlalchemy_bug\.venv\lib\site-packages\sqlalchemy\engine\base.py", line 1510, in _handle_dbapi_exception
util.raise_(
File "C:\Users\xxx\PycharmProjects\sqlalchemy_bug\.venv\lib\site-packages\sqlalchemy\util\compat.py", line 182, in raise_
raise exception
File "C:\Users\xxx\PycharmProjects\sqlalchemy_bug\.venv\lib\site-packages\sqlalchemy\engine\base.py", line 1276, in _execute_context
self.dialect.do_execute(
File "C:\Users\xxx\PycharmProjects\sqlalchemy_bug\.venv\lib\site-packages\sqlalchemy\engine\default.py", line 593, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.DBAPIError: (pyodbc.Error) ('01000', '[01000] [Microsoft][ODBC SQL Server Driver][DBNETLIB]ConnectionWrite (WrapperWrite()). (233) (SQLExecDirectW); [01000] [Microsoft][ODBC SQL Server Driver][DBNETLIB]Allgemeiner Netzwerkfehler. Weitere Informationen finden Sie in der Dokumentation über Netzwerke. (11)')
[SQL: SELECT 1]
(Background on this error at: http://sqlalche.me/e/13/dbapi)
Exception during reset or similar
Traceback (most recent call last):
File "C:\Users\xxx\PycharmProjects\sqlalchemy_bug\.venv\lib\site-packages\sqlalchemy\pool\base.py", line 697, in _finalize_fairy
fairy._reset(pool)
File "C:\Users\xxx\PycharmProjects\sqlalchemy_bug\.venv\lib\site-packages\sqlalchemy\pool\base.py", line 893, in _reset
pool._dialect.do_rollback(self)
File "C:\Users\xxx\PycharmProjects\sqlalchemy_bug\.venv\lib\site-packages\sqlalchemy\engine\default.py", line 543, in do_rollback
dbapi_connection.rollback()
pyodbc.OperationalError: ('08S01', '[08S01] [Microsoft][ODBC SQL Server Driver]Kommunikationsverbindungsfehler (0) (SQLEndTran)')
|
pyodbc.Error
|
def fetchone(self, result, dbapi_cursor, hard_close=False):
if not self._rowbuffer:
self._buffer_rows(result, dbapi_cursor)
if not self._rowbuffer:
try:
result._soft_close(hard=hard_close)
except BaseException as e:
self.handle_exception(result, dbapi_cursor, e)
return None
return self._rowbuffer.popleft()
|
def fetchone(self, result, dbapi_cursor, hard_close=False):
if not self._rowbuffer:
self._buffer_rows(result, dbapi_cursor)
if not self._rowbuffer:
try:
result._soft_close(hard=hard_close)
except BaseException as e:
self.handle_exception(result, e)
return None
return self._rowbuffer.popleft()
|
https://github.com/sqlalchemy/sqlalchemy/issues/5642
|
Traceback (most recent call last):
File "<private>/venv/lib/python3.8/site-packages/sqlalchemy/engine/cursor.py", line 1093, in fetchmany
buf.extend(dbapi_cursor.fetchmany(size - lb))
KeyboardInterrupt
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
...
File "<private>/venv/lib/python3.8/site-packages/sqlalchemy/engine/result.py", line 379, in iterrows
for row in self._fetchiter_impl():
File "<private>/venv/lib/python3.8/site-packages/sqlalchemy/orm/loading.py", line 105, in chunks
fetch = cursor.fetchmany(yield_per)
File "<private>/venv/lib/python3.8/site-packages/sqlalchemy/engine/result.py", line 1011, in fetchmany
return self._manyrow_getter(self, size)
File "<private>/venv/lib/python3.8/site-packages/sqlalchemy/engine/result.py", line 536, in manyrows
rows = self._fetchmany_impl(num)
File "<private>/venv/lib/python3.8/site-packages/sqlalchemy/engine/cursor.py", line 1740, in _fetchmany_impl
return self.cursor_strategy.fetchmany(self, self.cursor, size)
File "<private>/venv/lib/python3.8/site-packages/sqlalchemy/engine/cursor.py", line 1095, in fetchmany
self.handle_exception(result, e)
TypeError: handle_exception() missing 1 required positional argument: 'err'
|
TypeError
|
def fetchmany(self, result, dbapi_cursor, size=None):
if size is None:
return self.fetchall(result, dbapi_cursor)
buf = list(self._rowbuffer)
lb = len(buf)
if size > lb:
try:
buf.extend(dbapi_cursor.fetchmany(size - lb))
except BaseException as e:
self.handle_exception(result, dbapi_cursor, e)
result = buf[0:size]
self._rowbuffer = collections.deque(buf[size:])
return result
|
def fetchmany(self, result, dbapi_cursor, size=None):
if size is None:
return self.fetchall(result, dbapi_cursor)
buf = list(self._rowbuffer)
lb = len(buf)
if size > lb:
try:
buf.extend(dbapi_cursor.fetchmany(size - lb))
except BaseException as e:
self.handle_exception(result, e)
result = buf[0:size]
self._rowbuffer = collections.deque(buf[size:])
return result
|
https://github.com/sqlalchemy/sqlalchemy/issues/5642
|
Traceback (most recent call last):
File "<private>/venv/lib/python3.8/site-packages/sqlalchemy/engine/cursor.py", line 1093, in fetchmany
buf.extend(dbapi_cursor.fetchmany(size - lb))
KeyboardInterrupt
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
...
File "<private>/venv/lib/python3.8/site-packages/sqlalchemy/engine/result.py", line 379, in iterrows
for row in self._fetchiter_impl():
File "<private>/venv/lib/python3.8/site-packages/sqlalchemy/orm/loading.py", line 105, in chunks
fetch = cursor.fetchmany(yield_per)
File "<private>/venv/lib/python3.8/site-packages/sqlalchemy/engine/result.py", line 1011, in fetchmany
return self._manyrow_getter(self, size)
File "<private>/venv/lib/python3.8/site-packages/sqlalchemy/engine/result.py", line 536, in manyrows
rows = self._fetchmany_impl(num)
File "<private>/venv/lib/python3.8/site-packages/sqlalchemy/engine/cursor.py", line 1740, in _fetchmany_impl
return self.cursor_strategy.fetchmany(self, self.cursor, size)
File "<private>/venv/lib/python3.8/site-packages/sqlalchemy/engine/cursor.py", line 1095, in fetchmany
self.handle_exception(result, e)
TypeError: handle_exception() missing 1 required positional argument: 'err'
|
TypeError
|
def _sql_message(self, as_unicode):
util = _preloaded.preloaded.sql_util
details = [self._message(as_unicode=as_unicode)]
if self.statement:
if not as_unicode and not compat.py3k:
stmt_detail = "[SQL: %s]" % compat.safe_bytestring(self.statement)
else:
stmt_detail = "[SQL: %s]" % self.statement
details.append(stmt_detail)
if self.params:
if self.hide_parameters:
details.append("[SQL parameters hidden due to hide_parameters=True]")
else:
params_repr = util._repr_params(self.params, 10, ismulti=self.ismulti)
details.append("[parameters: %r]" % params_repr)
code_str = self._code_str()
if code_str:
details.append(code_str)
return "\n".join(["(%s)" % det for det in self.detail] + details)
|
def _sql_message(self, as_unicode):
from sqlalchemy.sql import util
details = [self._message(as_unicode=as_unicode)]
if self.statement:
if not as_unicode and not compat.py3k:
stmt_detail = "[SQL: %s]" % compat.safe_bytestring(self.statement)
else:
stmt_detail = "[SQL: %s]" % self.statement
details.append(stmt_detail)
if self.params:
if self.hide_parameters:
details.append("[SQL parameters hidden due to hide_parameters=True]")
else:
params_repr = util._repr_params(self.params, 10, ismulti=self.ismulti)
details.append("[parameters: %r]" % params_repr)
code_str = self._code_str()
if code_str:
details.append(code_str)
return "\n".join(["(%s)" % det for det in self.detail] + details)
|
https://github.com/sqlalchemy/sqlalchemy/issues/5632
|
Exception ignored in: <generator object it at 0x7f6aefc9a4a0>
Traceback (most recent call last):
File "run.py", line 36, in it
File ".../python3.8/site-packages/sqlalchemy/engine/base.py", line 1781, in __exit__
File ".../python3.8/site-packages/sqlalchemy/engine/base.py", line 1753, in rollback
File ".../python3.8/site-packages/sqlalchemy/engine/base.py", line 1791, in _do_rollback
File ".../python3.8/site-packages/sqlalchemy/engine/base.py", line 751, in _rollback_impl
File ".../python3.8/site-packages/sqlalchemy/engine/base.py", line 1510, in _handle_dbapi_exception
File ".../python3.8/site-packages/sqlalchemy/util/compat.py", line 182, in raise_
File ".../python3.8/site-packages/sqlalchemy/engine/base.py", line 749, in _rollback_impl
File ".../python3.8/site-packages/sqlalchemy/engine/default.py", line 543, in do_rollback
sqlalchemy.exc.ProgrammingError: <exception str() failed>
|
sqlalchemy.exc.ProgrammingError
|
def _message(self, as_unicode=compat.py3k):
# rules:
#
# 1. under py2k, for __str__ return single string arg as it was
# given without converting to unicode. for __unicode__
# do a conversion but check that it's not unicode already just in
# case
#
# 2. under py3k, single arg string will usually be a unicode
# object, but since __str__() must return unicode, check for
# bytestring just in case
#
# 3. for multiple self.args, this is not a case in current
# SQLAlchemy though this is happening in at least one known external
# library, call str() which does a repr().
#
if len(self.args) == 1:
text = self.args[0]
if as_unicode and isinstance(text, compat.binary_types):
text = compat.decode_backslashreplace(text, "utf-8")
# This is for when the argument is not a string of any sort.
# Otherwise, converting this exception to string would fail for
# non-string arguments.
elif compat.py3k or not as_unicode:
text = str(text)
else:
text = compat.text_type(text)
return text
else:
# this is not a normal case within SQLAlchemy but is here for
# compatibility with Exception.args - the str() comes out as
# a repr() of the tuple
return str(self.args)
|
def _message(self, as_unicode=compat.py3k):
# rules:
#
# 1. under py2k, for __str__ return single string arg as it was
# given without converting to unicode. for __unicode__
# do a conversion but check that it's not unicode already just in
# case
#
# 2. under py3k, single arg string will usually be a unicode
# object, but since __str__() must return unicode, check for
# bytestring just in case
#
# 3. for multiple self.args, this is not a case in current
# SQLAlchemy though this is happening in at least one known external
# library, call str() which does a repr().
#
if len(self.args) == 1:
text = self.args[0]
if as_unicode and isinstance(text, compat.binary_types):
return compat.decode_backslashreplace(text, "utf-8")
else:
return self.args[0]
else:
# this is not a normal case within SQLAlchemy but is here for
# compatibility with Exception.args - the str() comes out as
# a repr() of the tuple
return str(self.args)
|
https://github.com/sqlalchemy/sqlalchemy/issues/5599
|
from sqlalchemy.exc import SQLAlchemyError
class Foo(object):
... pass
str(SQLAlchemyError(Foo()))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: __str__ returned non-string (type Foo)
|
TypeError
|
def _adapt_to_context(self, context):
"""When using a cached Compiled construct that has a _result_map,
for a new statement that used the cached Compiled, we need to ensure
the keymap has the Column objects from our new statement as keys.
So here we rewrite keymap with new entries for the new columns
as matched to those of the cached statement.
"""
if not context.compiled._result_columns:
return self
compiled_statement = context.compiled.statement
invoked_statement = context.invoked_statement
if compiled_statement is invoked_statement:
return self
# make a copy and add the columns from the invoked statement
# to the result map.
md = self.__class__.__new__(self.__class__)
md._keymap = dict(self._keymap)
keymap_by_position = self._keymap_by_result_column_idx
for idx, new in enumerate(invoked_statement._exported_columns_iterator()):
try:
rec = keymap_by_position[idx]
except KeyError:
# this can happen when there are bogus column entries
# in a TextualSelect
pass
else:
md._keymap[new] = rec
md.case_sensitive = self.case_sensitive
md._processors = self._processors
assert not self._tuplefilter
md._tuplefilter = None
md._translated_indexes = None
md._keys = self._keys
md._keymap_by_result_column_idx = self._keymap_by_result_column_idx
md._safe_for_cache = self._safe_for_cache
return md
|
def _adapt_to_context(self, context):
"""When using a cached Compiled construct that has a _result_map,
for a new statement that used the cached Compiled, we need to ensure
the keymap has the Column objects from our new statement as keys.
So here we rewrite keymap with new entries for the new columns
as matched to those of the cached statement.
"""
if not context.compiled._result_columns:
return self
compiled_statement = context.compiled.statement
invoked_statement = context.invoked_statement
if compiled_statement is invoked_statement:
return self
# make a copy and add the columns from the invoked statement
# to the result map.
md = self.__class__.__new__(self.__class__)
md._keymap = dict(self._keymap)
# match up new columns positionally to the result columns
for existing, new in zip(
context.compiled._result_columns,
invoked_statement._exported_columns_iterator(),
):
if existing[RM_NAME] in md._keymap:
md._keymap[new] = md._keymap[existing[RM_NAME]]
md.case_sensitive = self.case_sensitive
md._processors = self._processors
assert not self._tuplefilter
md._tuplefilter = None
md._translated_indexes = None
md._keys = self._keys
return md
|
https://github.com/sqlalchemy/sqlalchemy/issues/5559
|
2020-09-05 14:00:42,711 INFO sqlalchemy.engine.Engine SELECT users.id, teams.id
FROM users LEFT OUTER JOIN teams ON teams.id = users.team_id
2020-09-05 14:00:42,711 INFO sqlalchemy.engine.Engine [generated in 0.00025s] {}
1
2020-09-05 14:00:42,712 INFO sqlalchemy.engine.Engine SELECT users.id, teams.id
FROM users LEFT OUTER JOIN teams ON teams.id = users.team_id
2020-09-05 14:00:42,712 INFO sqlalchemy.engine.Engine [cached since 0.001864s ago] {}
Traceback (most recent call last):
File "test_cache.py", line 28, in <module>
print(e.execute(build_query()).first()[users.c.id])
File "sqlalchemy/lib/sqlalchemy/engine/cursor.py", line 587, in _raise_for_ambiguous_column_name
raise exc.InvalidRequestError(
sqlalchemy.exc.InvalidRequestError: Ambiguous column name 'id' in result set column descriptions
|
sqlalchemy.exc.InvalidRequestError
|
def __init__(self, parent, cursor_description):
context = parent.context
dialect = context.dialect
self._tuplefilter = None
self._translated_indexes = None
self.case_sensitive = dialect.case_sensitive
self._safe_for_cache = False
if context.result_column_struct:
(
result_columns,
cols_are_ordered,
textual_ordered,
loose_column_name_matching,
) = context.result_column_struct
num_ctx_cols = len(result_columns)
else:
result_columns = cols_are_ordered = num_ctx_cols = (
loose_column_name_matching
) = textual_ordered = False
# merge cursor.description with the column info
# present in the compiled structure, if any
raw = self._merge_cursor_description(
context,
cursor_description,
result_columns,
num_ctx_cols,
cols_are_ordered,
textual_ordered,
loose_column_name_matching,
)
self._keymap = {}
# processors in key order for certain per-row
# views like __iter__ and slices
self._processors = [metadata_entry[MD_PROCESSOR] for metadata_entry in raw]
if context.compiled:
self._keymap_by_result_column_idx = {
metadata_entry[MD_RESULT_MAP_INDEX]: metadata_entry
for metadata_entry in raw
}
# keymap by primary string...
by_key = dict(
[(metadata_entry[MD_LOOKUP_KEY], metadata_entry) for metadata_entry in raw]
)
# for compiled SQL constructs, copy additional lookup keys into
# the key lookup map, such as Column objects, labels,
# column keys and other names
if num_ctx_cols:
# if by-primary-string dictionary smaller (or bigger?!) than
# number of columns, assume we have dupes, rewrite
# dupe records with "None" for index which results in
# ambiguous column exception when accessed.
if len(by_key) != num_ctx_cols:
# new in 1.4: get the complete set of all possible keys,
# strings, objects, whatever, that are dupes across two
# different records, first.
index_by_key = {}
dupes = set()
for metadata_entry in raw:
for key in (metadata_entry[MD_RENDERED_NAME],) + (
metadata_entry[MD_OBJECTS] or ()
):
if not self.case_sensitive and isinstance(key, util.string_types):
key = key.lower()
idx = metadata_entry[MD_INDEX]
# if this key has been associated with more than one
# positional index, it's a dupe
if index_by_key.setdefault(key, idx) != idx:
dupes.add(key)
# then put everything we have into the keymap excluding only
# those keys that are dupes.
self._keymap.update(
[
(obj_elem, metadata_entry)
for metadata_entry in raw
if metadata_entry[MD_OBJECTS]
for obj_elem in metadata_entry[MD_OBJECTS]
if obj_elem not in dupes
]
)
# then for the dupe keys, put the "ambiguous column"
# record into by_key.
by_key.update({key: (None, None, (), key) for key in dupes})
else:
# no dupes - copy secondary elements from compiled
# columns into self._keymap
self._keymap.update(
[
(obj_elem, metadata_entry)
for metadata_entry in raw
if metadata_entry[MD_OBJECTS]
for obj_elem in metadata_entry[MD_OBJECTS]
]
)
# update keymap with primary string names taking
# precedence
self._keymap.update(by_key)
# update keymap with "translated" names (sqlite-only thing)
if not num_ctx_cols and context._translate_colname:
self._keymap.update(
[
(
metadata_entry[MD_UNTRANSLATED],
self._keymap[metadata_entry[MD_LOOKUP_KEY]],
)
for metadata_entry in raw
if metadata_entry[MD_UNTRANSLATED]
]
)
|
def __init__(self, parent, cursor_description):
context = parent.context
dialect = context.dialect
self._tuplefilter = None
self._translated_indexes = None
self.case_sensitive = dialect.case_sensitive
self._safe_for_cache = False
if context.result_column_struct:
(
result_columns,
cols_are_ordered,
textual_ordered,
loose_column_name_matching,
) = context.result_column_struct
num_ctx_cols = len(result_columns)
else:
result_columns = cols_are_ordered = num_ctx_cols = (
loose_column_name_matching
) = textual_ordered = False
# merge cursor.description with the column info
# present in the compiled structure, if any
raw = self._merge_cursor_description(
context,
cursor_description,
result_columns,
num_ctx_cols,
cols_are_ordered,
textual_ordered,
loose_column_name_matching,
)
self._keymap = {}
# processors in key order for certain per-row
# views like __iter__ and slices
self._processors = [metadata_entry[MD_PROCESSOR] for metadata_entry in raw]
# keymap by primary string...
by_key = dict(
[(metadata_entry[MD_LOOKUP_KEY], metadata_entry) for metadata_entry in raw]
)
# for compiled SQL constructs, copy additional lookup keys into
# the key lookup map, such as Column objects, labels,
# column keys and other names
if num_ctx_cols:
# if by-primary-string dictionary smaller (or bigger?!) than
# number of columns, assume we have dupes, rewrite
# dupe records with "None" for index which results in
# ambiguous column exception when accessed.
if len(by_key) != num_ctx_cols:
# new in 1.4: get the complete set of all possible keys,
# strings, objects, whatever, that are dupes across two
# different records, first.
index_by_key = {}
dupes = set()
for metadata_entry in raw:
for key in (metadata_entry[MD_RENDERED_NAME],) + (
metadata_entry[MD_OBJECTS] or ()
):
if not self.case_sensitive and isinstance(key, util.string_types):
key = key.lower()
idx = metadata_entry[MD_INDEX]
# if this key has been associated with more than one
# positional index, it's a dupe
if index_by_key.setdefault(key, idx) != idx:
dupes.add(key)
# then put everything we have into the keymap excluding only
# those keys that are dupes.
self._keymap.update(
[
(obj_elem, metadata_entry)
for metadata_entry in raw
if metadata_entry[MD_OBJECTS]
for obj_elem in metadata_entry[MD_OBJECTS]
if obj_elem not in dupes
]
)
# then for the dupe keys, put the "ambiguous column"
# record into by_key.
by_key.update({key: (None, (), key) for key in dupes})
else:
# no dupes - copy secondary elements from compiled
# columns into self._keymap
self._keymap.update(
[
(obj_elem, metadata_entry)
for metadata_entry in raw
if metadata_entry[MD_OBJECTS]
for obj_elem in metadata_entry[MD_OBJECTS]
]
)
# update keymap with primary string names taking
# precedence
self._keymap.update(by_key)
# update keymap with "translated" names (sqlite-only thing)
if not num_ctx_cols and context._translate_colname:
self._keymap.update(
[
(
metadata_entry[MD_UNTRANSLATED],
self._keymap[metadata_entry[MD_LOOKUP_KEY]],
)
for metadata_entry in raw
if metadata_entry[MD_UNTRANSLATED]
]
)
|
https://github.com/sqlalchemy/sqlalchemy/issues/5559
|
2020-09-05 14:00:42,711 INFO sqlalchemy.engine.Engine SELECT users.id, teams.id
FROM users LEFT OUTER JOIN teams ON teams.id = users.team_id
2020-09-05 14:00:42,711 INFO sqlalchemy.engine.Engine [generated in 0.00025s] {}
1
2020-09-05 14:00:42,712 INFO sqlalchemy.engine.Engine SELECT users.id, teams.id
FROM users LEFT OUTER JOIN teams ON teams.id = users.team_id
2020-09-05 14:00:42,712 INFO sqlalchemy.engine.Engine [cached since 0.001864s ago] {}
Traceback (most recent call last):
File "test_cache.py", line 28, in <module>
print(e.execute(build_query()).first()[users.c.id])
File "sqlalchemy/lib/sqlalchemy/engine/cursor.py", line 587, in _raise_for_ambiguous_column_name
raise exc.InvalidRequestError(
sqlalchemy.exc.InvalidRequestError: Ambiguous column name 'id' in result set column descriptions
|
sqlalchemy.exc.InvalidRequestError
|
def _merge_cursor_description(
self,
context,
cursor_description,
result_columns,
num_ctx_cols,
cols_are_ordered,
textual_ordered,
loose_column_name_matching,
):
"""Merge a cursor.description with compiled result column information.
There are at least four separate strategies used here, selected
depending on the type of SQL construct used to start with.
The most common case is that of the compiled SQL expression construct,
which generated the column names present in the raw SQL string and
which has the identical number of columns as were reported by
cursor.description. In this case, we assume a 1-1 positional mapping
between the entries in cursor.description and the compiled object.
This is also the most performant case as we disregard extracting /
decoding the column names present in cursor.description since we
already have the desired name we generated in the compiled SQL
construct.
The next common case is that of the completely raw string SQL,
such as passed to connection.execute(). In this case we have no
compiled construct to work with, so we extract and decode the
names from cursor.description and index those as the primary
result row target keys.
The remaining fairly common case is that of the textual SQL
that includes at least partial column information; this is when
we use a :class:`_expression.TextualSelect` construct.
This construct may have
unordered or ordered column information. In the ordered case, we
merge the cursor.description and the compiled construct's information
positionally, and warn if there are additional description names
present, however we still decode the names in cursor.description
as we don't have a guarantee that the names in the columns match
on these. In the unordered case, we match names in cursor.description
to that of the compiled construct based on name matching.
In both of these cases, the cursor.description names and the column
expression objects and names are indexed as result row target keys.
The final case is much less common, where we have a compiled
non-textual SQL expression construct, but the number of columns
in cursor.description doesn't match what's in the compiled
construct. We make the guess here that there might be textual
column expressions in the compiled construct that themselves include
a comma in them causing them to split. We do the same name-matching
as with textual non-ordered columns.
The name-matched system of merging is the same as that used by
SQLAlchemy for all cases up through te 0.9 series. Positional
matching for compiled SQL expressions was introduced in 1.0 as a
major performance feature, and positional matching for textual
:class:`_expression.TextualSelect` objects in 1.1.
As name matching is no longer
a common case, it was acceptable to factor it into smaller generator-
oriented methods that are easier to understand, but incur slightly
more performance overhead.
"""
case_sensitive = context.dialect.case_sensitive
if (
num_ctx_cols
and cols_are_ordered
and not textual_ordered
and num_ctx_cols == len(cursor_description)
):
self._keys = [elem[0] for elem in result_columns]
# pure positional 1-1 case; doesn't need to read
# the names from cursor.description
# this metadata is safe to cache because we are guaranteed
# to have the columns in the same order for new executions
self._safe_for_cache = True
return [
(
idx,
idx,
rmap_entry[RM_OBJECTS],
rmap_entry[RM_NAME].lower()
if not case_sensitive
else rmap_entry[RM_NAME],
rmap_entry[RM_RENDERED_NAME],
context.get_result_processor(
rmap_entry[RM_TYPE],
rmap_entry[RM_RENDERED_NAME],
cursor_description[idx][1],
),
None,
)
for idx, rmap_entry in enumerate(result_columns)
]
else:
# name-based or text-positional cases, where we need
# to read cursor.description names
if textual_ordered:
self._safe_for_cache = True
# textual positional case
raw_iterator = self._merge_textual_cols_by_position(
context, cursor_description, result_columns
)
elif num_ctx_cols:
# compiled SQL with a mismatch of description cols
# vs. compiled cols, or textual w/ unordered columns
# the order of columns can change if the query is
# against a "select *", so not safe to cache
self._safe_for_cache = False
raw_iterator = self._merge_cols_by_name(
context,
cursor_description,
result_columns,
loose_column_name_matching,
)
else:
# no compiled SQL, just a raw string, order of columns
# can change for "select *"
self._safe_for_cache = False
raw_iterator = self._merge_cols_by_none(context, cursor_description)
return [
(
idx,
ridx,
obj,
cursor_colname,
cursor_colname,
context.get_result_processor(mapped_type, cursor_colname, coltype),
untranslated,
)
for (
idx,
ridx,
cursor_colname,
mapped_type,
coltype,
obj,
untranslated,
) in raw_iterator
]
|
def _merge_cursor_description(
self,
context,
cursor_description,
result_columns,
num_ctx_cols,
cols_are_ordered,
textual_ordered,
loose_column_name_matching,
):
"""Merge a cursor.description with compiled result column information.
There are at least four separate strategies used here, selected
depending on the type of SQL construct used to start with.
The most common case is that of the compiled SQL expression construct,
which generated the column names present in the raw SQL string and
which has the identical number of columns as were reported by
cursor.description. In this case, we assume a 1-1 positional mapping
between the entries in cursor.description and the compiled object.
This is also the most performant case as we disregard extracting /
decoding the column names present in cursor.description since we
already have the desired name we generated in the compiled SQL
construct.
The next common case is that of the completely raw string SQL,
such as passed to connection.execute(). In this case we have no
compiled construct to work with, so we extract and decode the
names from cursor.description and index those as the primary
result row target keys.
The remaining fairly common case is that of the textual SQL
that includes at least partial column information; this is when
we use a :class:`_expression.TextualSelect` construct.
This construct may have
unordered or ordered column information. In the ordered case, we
merge the cursor.description and the compiled construct's information
positionally, and warn if there are additional description names
present, however we still decode the names in cursor.description
as we don't have a guarantee that the names in the columns match
on these. In the unordered case, we match names in cursor.description
to that of the compiled construct based on name matching.
In both of these cases, the cursor.description names and the column
expression objects and names are indexed as result row target keys.
The final case is much less common, where we have a compiled
non-textual SQL expression construct, but the number of columns
in cursor.description doesn't match what's in the compiled
construct. We make the guess here that there might be textual
column expressions in the compiled construct that themselves include
a comma in them causing them to split. We do the same name-matching
as with textual non-ordered columns.
The name-matched system of merging is the same as that used by
SQLAlchemy for all cases up through te 0.9 series. Positional
matching for compiled SQL expressions was introduced in 1.0 as a
major performance feature, and positional matching for textual
:class:`_expression.TextualSelect` objects in 1.1.
As name matching is no longer
a common case, it was acceptable to factor it into smaller generator-
oriented methods that are easier to understand, but incur slightly
more performance overhead.
"""
case_sensitive = context.dialect.case_sensitive
if (
num_ctx_cols
and cols_are_ordered
and not textual_ordered
and num_ctx_cols == len(cursor_description)
):
self._keys = [elem[0] for elem in result_columns]
# pure positional 1-1 case; doesn't need to read
# the names from cursor.description
# this metadata is safe to cache because we are guaranteed
# to have the columns in the same order for new executions
self._safe_for_cache = True
return [
(
idx,
rmap_entry[RM_OBJECTS],
rmap_entry[RM_NAME].lower()
if not case_sensitive
else rmap_entry[RM_NAME],
rmap_entry[RM_RENDERED_NAME],
context.get_result_processor(
rmap_entry[RM_TYPE],
rmap_entry[RM_RENDERED_NAME],
cursor_description[idx][1],
),
None,
)
for idx, rmap_entry in enumerate(result_columns)
]
else:
# name-based or text-positional cases, where we need
# to read cursor.description names
if textual_ordered:
self._safe_for_cache = True
# textual positional case
raw_iterator = self._merge_textual_cols_by_position(
context, cursor_description, result_columns
)
elif num_ctx_cols:
# compiled SQL with a mismatch of description cols
# vs. compiled cols, or textual w/ unordered columns
# the order of columns can change if the query is
# against a "select *", so not safe to cache
self._safe_for_cache = False
raw_iterator = self._merge_cols_by_name(
context,
cursor_description,
result_columns,
loose_column_name_matching,
)
else:
# no compiled SQL, just a raw string, order of columns
# can change for "select *"
self._safe_for_cache = False
raw_iterator = self._merge_cols_by_none(context, cursor_description)
return [
(
idx,
obj,
cursor_colname,
cursor_colname,
context.get_result_processor(mapped_type, cursor_colname, coltype),
untranslated,
)
for (
idx,
cursor_colname,
mapped_type,
coltype,
obj,
untranslated,
) in raw_iterator
]
|
https://github.com/sqlalchemy/sqlalchemy/issues/5559
|
2020-09-05 14:00:42,711 INFO sqlalchemy.engine.Engine SELECT users.id, teams.id
FROM users LEFT OUTER JOIN teams ON teams.id = users.team_id
2020-09-05 14:00:42,711 INFO sqlalchemy.engine.Engine [generated in 0.00025s] {}
1
2020-09-05 14:00:42,712 INFO sqlalchemy.engine.Engine SELECT users.id, teams.id
FROM users LEFT OUTER JOIN teams ON teams.id = users.team_id
2020-09-05 14:00:42,712 INFO sqlalchemy.engine.Engine [cached since 0.001864s ago] {}
Traceback (most recent call last):
File "test_cache.py", line 28, in <module>
print(e.execute(build_query()).first()[users.c.id])
File "sqlalchemy/lib/sqlalchemy/engine/cursor.py", line 587, in _raise_for_ambiguous_column_name
raise exc.InvalidRequestError(
sqlalchemy.exc.InvalidRequestError: Ambiguous column name 'id' in result set column descriptions
|
sqlalchemy.exc.InvalidRequestError
|
def _merge_textual_cols_by_position(self, context, cursor_description, result_columns):
num_ctx_cols = len(result_columns) if result_columns else None
if num_ctx_cols > len(cursor_description):
util.warn(
"Number of columns in textual SQL (%d) is "
"smaller than number of columns requested (%d)"
% (num_ctx_cols, len(cursor_description))
)
seen = set()
for (
idx,
colname,
untranslated,
coltype,
) in self._colnames_from_description(context, cursor_description):
if idx < num_ctx_cols:
ctx_rec = result_columns[idx]
obj = ctx_rec[RM_OBJECTS]
ridx = idx
mapped_type = ctx_rec[RM_TYPE]
if obj[0] in seen:
raise exc.InvalidRequestError(
"Duplicate column expression requested in textual SQL: %r" % obj[0]
)
seen.add(obj[0])
else:
mapped_type = sqltypes.NULLTYPE
obj = None
ridx = None
yield idx, ridx, colname, mapped_type, coltype, obj, untranslated
|
def _merge_textual_cols_by_position(self, context, cursor_description, result_columns):
num_ctx_cols = len(result_columns) if result_columns else None
if num_ctx_cols > len(cursor_description):
util.warn(
"Number of columns in textual SQL (%d) is "
"smaller than number of columns requested (%d)"
% (num_ctx_cols, len(cursor_description))
)
seen = set()
for (
idx,
colname,
untranslated,
coltype,
) in self._colnames_from_description(context, cursor_description):
if idx < num_ctx_cols:
ctx_rec = result_columns[idx]
obj = ctx_rec[RM_OBJECTS]
mapped_type = ctx_rec[RM_TYPE]
if obj[0] in seen:
raise exc.InvalidRequestError(
"Duplicate column expression requested in textual SQL: %r" % obj[0]
)
seen.add(obj[0])
else:
mapped_type = sqltypes.NULLTYPE
obj = None
yield idx, colname, mapped_type, coltype, obj, untranslated
|
https://github.com/sqlalchemy/sqlalchemy/issues/5559
|
2020-09-05 14:00:42,711 INFO sqlalchemy.engine.Engine SELECT users.id, teams.id
FROM users LEFT OUTER JOIN teams ON teams.id = users.team_id
2020-09-05 14:00:42,711 INFO sqlalchemy.engine.Engine [generated in 0.00025s] {}
1
2020-09-05 14:00:42,712 INFO sqlalchemy.engine.Engine SELECT users.id, teams.id
FROM users LEFT OUTER JOIN teams ON teams.id = users.team_id
2020-09-05 14:00:42,712 INFO sqlalchemy.engine.Engine [cached since 0.001864s ago] {}
Traceback (most recent call last):
File "test_cache.py", line 28, in <module>
print(e.execute(build_query()).first()[users.c.id])
File "sqlalchemy/lib/sqlalchemy/engine/cursor.py", line 587, in _raise_for_ambiguous_column_name
raise exc.InvalidRequestError(
sqlalchemy.exc.InvalidRequestError: Ambiguous column name 'id' in result set column descriptions
|
sqlalchemy.exc.InvalidRequestError
|
def _merge_cols_by_name(
self,
context,
cursor_description,
result_columns,
loose_column_name_matching,
):
dialect = context.dialect
case_sensitive = dialect.case_sensitive
match_map = self._create_description_match_map(
result_columns, case_sensitive, loose_column_name_matching
)
for (
idx,
colname,
untranslated,
coltype,
) in self._colnames_from_description(context, cursor_description):
try:
ctx_rec = match_map[colname]
except KeyError:
mapped_type = sqltypes.NULLTYPE
obj = None
result_columns_idx = None
else:
obj = ctx_rec[1]
mapped_type = ctx_rec[2]
result_columns_idx = ctx_rec[3]
yield (
idx,
result_columns_idx,
colname,
mapped_type,
coltype,
obj,
untranslated,
)
|
def _merge_cols_by_name(
self,
context,
cursor_description,
result_columns,
loose_column_name_matching,
):
dialect = context.dialect
case_sensitive = dialect.case_sensitive
match_map = self._create_description_match_map(
result_columns, case_sensitive, loose_column_name_matching
)
for (
idx,
colname,
untranslated,
coltype,
) in self._colnames_from_description(context, cursor_description):
try:
ctx_rec = match_map[colname]
except KeyError:
mapped_type = sqltypes.NULLTYPE
obj = None
else:
obj = ctx_rec[1]
mapped_type = ctx_rec[2]
yield idx, colname, mapped_type, coltype, obj, untranslated
|
https://github.com/sqlalchemy/sqlalchemy/issues/5559
|
2020-09-05 14:00:42,711 INFO sqlalchemy.engine.Engine SELECT users.id, teams.id
FROM users LEFT OUTER JOIN teams ON teams.id = users.team_id
2020-09-05 14:00:42,711 INFO sqlalchemy.engine.Engine [generated in 0.00025s] {}
1
2020-09-05 14:00:42,712 INFO sqlalchemy.engine.Engine SELECT users.id, teams.id
FROM users LEFT OUTER JOIN teams ON teams.id = users.team_id
2020-09-05 14:00:42,712 INFO sqlalchemy.engine.Engine [cached since 0.001864s ago] {}
Traceback (most recent call last):
File "test_cache.py", line 28, in <module>
print(e.execute(build_query()).first()[users.c.id])
File "sqlalchemy/lib/sqlalchemy/engine/cursor.py", line 587, in _raise_for_ambiguous_column_name
raise exc.InvalidRequestError(
sqlalchemy.exc.InvalidRequestError: Ambiguous column name 'id' in result set column descriptions
|
sqlalchemy.exc.InvalidRequestError
|
def _create_description_match_map(
cls,
result_columns,
case_sensitive=True,
loose_column_name_matching=False,
):
"""when matching cursor.description to a set of names that are present
in a Compiled object, as is the case with TextualSelect, get all the
names we expect might match those in cursor.description.
"""
d = {}
for ridx, elem in enumerate(result_columns):
key = elem[RM_RENDERED_NAME]
if not case_sensitive:
key = key.lower()
if key in d:
# conflicting keyname - just add the column-linked objects
# to the existing record. if there is a duplicate column
# name in the cursor description, this will allow all of those
# objects to raise an ambiguous column error
e_name, e_obj, e_type, e_ridx = d[key]
d[key] = e_name, e_obj + elem[RM_OBJECTS], e_type, ridx
else:
d[key] = (elem[RM_NAME], elem[RM_OBJECTS], elem[RM_TYPE], ridx)
if loose_column_name_matching:
# when using a textual statement with an unordered set
# of columns that line up, we are expecting the user
# to be using label names in the SQL that match to the column
# expressions. Enable more liberal matching for this case;
# duplicate keys that are ambiguous will be fixed later.
for r_key in elem[RM_OBJECTS]:
d.setdefault(
r_key,
(elem[RM_NAME], elem[RM_OBJECTS], elem[RM_TYPE], ridx),
)
return d
|
def _create_description_match_map(
cls,
result_columns,
case_sensitive=True,
loose_column_name_matching=False,
):
"""when matching cursor.description to a set of names that are present
in a Compiled object, as is the case with TextualSelect, get all the
names we expect might match those in cursor.description.
"""
d = {}
for elem in result_columns:
key = elem[RM_RENDERED_NAME]
if not case_sensitive:
key = key.lower()
if key in d:
# conflicting keyname - just add the column-linked objects
# to the existing record. if there is a duplicate column
# name in the cursor description, this will allow all of those
# objects to raise an ambiguous column error
e_name, e_obj, e_type = d[key]
d[key] = e_name, e_obj + elem[RM_OBJECTS], e_type
else:
d[key] = (elem[RM_NAME], elem[RM_OBJECTS], elem[RM_TYPE])
if loose_column_name_matching:
# when using a textual statement with an unordered set
# of columns that line up, we are expecting the user
# to be using label names in the SQL that match to the column
# expressions. Enable more liberal matching for this case;
# duplicate keys that are ambiguous will be fixed later.
for r_key in elem[RM_OBJECTS]:
d.setdefault(r_key, (elem[RM_NAME], elem[RM_OBJECTS], elem[RM_TYPE]))
return d
|
https://github.com/sqlalchemy/sqlalchemy/issues/5559
|
2020-09-05 14:00:42,711 INFO sqlalchemy.engine.Engine SELECT users.id, teams.id
FROM users LEFT OUTER JOIN teams ON teams.id = users.team_id
2020-09-05 14:00:42,711 INFO sqlalchemy.engine.Engine [generated in 0.00025s] {}
1
2020-09-05 14:00:42,712 INFO sqlalchemy.engine.Engine SELECT users.id, teams.id
FROM users LEFT OUTER JOIN teams ON teams.id = users.team_id
2020-09-05 14:00:42,712 INFO sqlalchemy.engine.Engine [cached since 0.001864s ago] {}
Traceback (most recent call last):
File "test_cache.py", line 28, in <module>
print(e.execute(build_query()).first()[users.c.id])
File "sqlalchemy/lib/sqlalchemy/engine/cursor.py", line 587, in _raise_for_ambiguous_column_name
raise exc.InvalidRequestError(
sqlalchemy.exc.InvalidRequestError: Ambiguous column name 'id' in result set column descriptions
|
sqlalchemy.exc.InvalidRequestError
|
def _merge_cols_by_none(self, context, cursor_description):
for (
idx,
colname,
untranslated,
coltype,
) in self._colnames_from_description(context, cursor_description):
yield (
idx,
None,
colname,
sqltypes.NULLTYPE,
coltype,
None,
untranslated,
)
|
def _merge_cols_by_none(self, context, cursor_description):
for (
idx,
colname,
untranslated,
coltype,
) in self._colnames_from_description(context, cursor_description):
yield idx, colname, sqltypes.NULLTYPE, coltype, None, untranslated
|
https://github.com/sqlalchemy/sqlalchemy/issues/5559
|
2020-09-05 14:00:42,711 INFO sqlalchemy.engine.Engine SELECT users.id, teams.id
FROM users LEFT OUTER JOIN teams ON teams.id = users.team_id
2020-09-05 14:00:42,711 INFO sqlalchemy.engine.Engine [generated in 0.00025s] {}
1
2020-09-05 14:00:42,712 INFO sqlalchemy.engine.Engine SELECT users.id, teams.id
FROM users LEFT OUTER JOIN teams ON teams.id = users.team_id
2020-09-05 14:00:42,712 INFO sqlalchemy.engine.Engine [cached since 0.001864s ago] {}
Traceback (most recent call last):
File "test_cache.py", line 28, in <module>
print(e.execute(build_query()).first()[users.c.id])
File "sqlalchemy/lib/sqlalchemy/engine/cursor.py", line 587, in _raise_for_ambiguous_column_name
raise exc.InvalidRequestError(
sqlalchemy.exc.InvalidRequestError: Ambiguous column name 'id' in result set column descriptions
|
sqlalchemy.exc.InvalidRequestError
|
def __getstate__(self):
return {
"_keymap": {
key: (rec[MD_INDEX], rec[MD_RESULT_MAP_INDEX], _UNPICKLED, key)
for key, rec in self._keymap.items()
if isinstance(key, util.string_types + util.int_types)
},
"_keys": self._keys,
"case_sensitive": self.case_sensitive,
"_translated_indexes": self._translated_indexes,
"_tuplefilter": self._tuplefilter,
}
|
def __getstate__(self):
return {
"_keymap": {
key: (rec[MD_INDEX], _UNPICKLED, key)
for key, rec in self._keymap.items()
if isinstance(key, util.string_types + util.int_types)
},
"_keys": self._keys,
"case_sensitive": self.case_sensitive,
"_translated_indexes": self._translated_indexes,
"_tuplefilter": self._tuplefilter,
}
|
https://github.com/sqlalchemy/sqlalchemy/issues/5559
|
2020-09-05 14:00:42,711 INFO sqlalchemy.engine.Engine SELECT users.id, teams.id
FROM users LEFT OUTER JOIN teams ON teams.id = users.team_id
2020-09-05 14:00:42,711 INFO sqlalchemy.engine.Engine [generated in 0.00025s] {}
1
2020-09-05 14:00:42,712 INFO sqlalchemy.engine.Engine SELECT users.id, teams.id
FROM users LEFT OUTER JOIN teams ON teams.id = users.team_id
2020-09-05 14:00:42,712 INFO sqlalchemy.engine.Engine [cached since 0.001864s ago] {}
Traceback (most recent call last):
File "test_cache.py", line 28, in <module>
print(e.execute(build_query()).first()[users.c.id])
File "sqlalchemy/lib/sqlalchemy/engine/cursor.py", line 587, in _raise_for_ambiguous_column_name
raise exc.InvalidRequestError(
sqlalchemy.exc.InvalidRequestError: Ambiguous column name 'id' in result set column descriptions
|
sqlalchemy.exc.InvalidRequestError
|
def __setstate__(self, state):
self._processors = [None for _ in range(len(state["_keys"]))]
self._keymap = state["_keymap"]
self._keymap_by_result_column_idx = {
rec[MD_RESULT_MAP_INDEX]: rec for rec in self._keymap.values()
}
self._keys = state["_keys"]
self.case_sensitive = state["case_sensitive"]
if state["_translated_indexes"]:
self._translated_indexes = state["_translated_indexes"]
self._tuplefilter = tuplegetter(*self._translated_indexes)
else:
self._translated_indexes = self._tuplefilter = None
|
def __setstate__(self, state):
self._processors = [None for _ in range(len(state["_keys"]))]
self._keymap = state["_keymap"]
self._keys = state["_keys"]
self.case_sensitive = state["case_sensitive"]
if state["_translated_indexes"]:
self._translated_indexes = state["_translated_indexes"]
self._tuplefilter = tuplegetter(*self._translated_indexes)
else:
self._translated_indexes = self._tuplefilter = None
|
https://github.com/sqlalchemy/sqlalchemy/issues/5559
|
2020-09-05 14:00:42,711 INFO sqlalchemy.engine.Engine SELECT users.id, teams.id
FROM users LEFT OUTER JOIN teams ON teams.id = users.team_id
2020-09-05 14:00:42,711 INFO sqlalchemy.engine.Engine [generated in 0.00025s] {}
1
2020-09-05 14:00:42,712 INFO sqlalchemy.engine.Engine SELECT users.id, teams.id
FROM users LEFT OUTER JOIN teams ON teams.id = users.team_id
2020-09-05 14:00:42,712 INFO sqlalchemy.engine.Engine [cached since 0.001864s ago] {}
Traceback (most recent call last):
File "test_cache.py", line 28, in <module>
print(e.execute(build_query()).first()[users.c.id])
File "sqlalchemy/lib/sqlalchemy/engine/cursor.py", line 587, in _raise_for_ambiguous_column_name
raise exc.InvalidRequestError(
sqlalchemy.exc.InvalidRequestError: Ambiguous column name 'id' in result set column descriptions
|
sqlalchemy.exc.InvalidRequestError
|
def create_async_engine(*arg, **kw):
"""Create a new async engine instance.
Arguments passed to :func:`_asyncio.create_async_engine` are mostly
identical to those passed to the :func:`_sa.create_engine` function.
The specified dialect must be an asyncio-compatible dialect
such as :ref:`dialect-postgresql-asyncpg`.
.. versionadded:: 1.4
"""
if kw.get("server_side_cursors", False):
raise async_exc.AsyncMethodRequired(
"Can't set server_side_cursors for async engine globally; "
"use the connection.stream() method for an async "
"streaming result set"
)
kw["future"] = True
sync_engine = _create_engine(*arg, **kw)
return AsyncEngine(sync_engine)
|
def create_async_engine(*arg, **kw):
"""Create a new async engine instance.
Arguments passed to :func:`_asyncio.create_async_engine` are mostly
identical to those passed to the :func:`_sa.create_engine` function.
The specified dialect must be an asyncio-compatible dialect
such as :ref:`dialect-postgresql-asyncpg`.
.. versionadded:: 1.4
"""
if kw.get("server_side_cursors", False):
raise exc.AsyncMethodRequired(
"Can't set server_side_cursors for async engine globally; "
"use the connection.stream() method for an async "
"streaming result set"
)
kw["future"] = True
sync_engine = _create_engine(*arg, **kw)
return AsyncEngine(sync_engine)
|
https://github.com/sqlalchemy/sqlalchemy/issues/5529
|
In [1]: from sqlalchemy.ext.asyncio import create_async_engine
In [2]: await create_async_engine(server_side_cursors=True)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-2-2bcff6ed9bdc> in <module>
----> 1 await create_async_engine(server_side_cursors=True)
lib/sqlalchemy/ext/asyncio/engine.py in create_async_engine(*arg, **kw)
32
33 if kw.get("server_side_cursors", False):
---> 34 raise exc.AsyncMethodRequired(
35 "Can't set server_side_cursors for async engine globally; "
36 "use the connection.stream() method for an async "
AttributeError: module 'sqlalchemy.exc' has no attribute 'AsyncMethodRequired'
|
AttributeError
|
def _generate_path(
self,
path,
attr,
for_strategy,
wildcard_key,
raiseerr=True,
polymorphic_entity_context=None,
):
existing_of_type = self._of_type
self._of_type = None
if raiseerr and not path.has_entity:
if isinstance(path, TokenRegistry):
raise sa_exc.ArgumentError(
"Wildcard token cannot be followed by another entity"
)
else:
raise sa_exc.ArgumentError(
"Mapped attribute '%s' does not refer to a mapped entity" % (path.prop,)
)
if isinstance(attr, util.string_types):
default_token = attr.endswith(_DEFAULT_TOKEN)
attr_str_name = attr
if attr.endswith(_WILDCARD_TOKEN) or default_token:
if default_token:
self.propagate_to_loaders = False
if wildcard_key:
attr = "%s:%s" % (wildcard_key, attr)
# TODO: AliasedInsp inside the path for of_type is not
# working for a with_polymorphic entity because the
# relationship loaders don't render the with_poly into the
# path. See #4469 which will try to improve this
if existing_of_type and not existing_of_type.is_aliased_class:
path = path.parent[existing_of_type]
path = path.token(attr)
self.path = path
return path
if existing_of_type:
ent = inspect(existing_of_type)
else:
ent = path.entity
try:
# use getattr on the class to work around
# synonyms, hybrids, etc.
attr = getattr(ent.class_, attr)
except AttributeError as err:
if raiseerr:
util.raise_(
sa_exc.ArgumentError(
'Can\'t find property named "%s" on '
"%s in this Query." % (attr, ent)
),
replace_context=err,
)
else:
return None
else:
try:
attr = found_property = attr.property
except AttributeError as ae:
if not isinstance(attr, MapperProperty):
util.raise_(
sa_exc.ArgumentError(
'Expected attribute "%s" on %s to be a '
"mapped attribute; "
"instead got %s object." % (attr_str_name, ent, type(attr))
),
replace_context=ae,
)
else:
raise
path = path[attr]
elif _is_mapped_class(attr):
# TODO: this does not appear to be a valid codepath. "attr"
# would never be a mapper. This block is present in 1.2
# as well however does not seem to be accessed in any tests.
if not orm_util._entity_corresponds_to_use_path_impl(attr.parent, path[-1]):
if raiseerr:
raise sa_exc.ArgumentError(
"Attribute '%s' does not "
"link from element '%s'" % (attr, path.entity)
)
else:
return None
else:
prop = found_property = attr.property
if not orm_util._entity_corresponds_to_use_path_impl(attr.parent, path[-1]):
if raiseerr:
raise sa_exc.ArgumentError(
'Attribute "%s" does not '
'link from element "%s".%s'
% (
attr,
path.entity,
(
" Did you mean to use "
"%s.of_type(%s)?" % (path[-2], attr.class_.__name__)
if len(path) > 1
and path.entity.is_mapper
and attr.parent.is_aliased_class
else ""
),
)
)
else:
return None
if attr._extra_criteria:
self._extra_criteria = attr._extra_criteria
if getattr(attr, "_of_type", None):
ac = attr._of_type
ext_info = of_type_info = inspect(ac)
if polymorphic_entity_context is None:
polymorphic_entity_context = self.context
existing = path.entity_path[prop].get(
polymorphic_entity_context, "path_with_polymorphic"
)
if not ext_info.is_aliased_class:
ac = orm_util.with_polymorphic(
ext_info.mapper.base_mapper,
ext_info.mapper,
aliased=True,
_use_mapper_path=True,
_existing_alias=inspect(existing) if existing is not None else None,
)
ext_info = inspect(ac)
path.entity_path[prop].set(
polymorphic_entity_context, "path_with_polymorphic", ac
)
path = path[prop][ext_info]
self._of_type = of_type_info
else:
path = path[prop]
if for_strategy is not None:
found_property._get_strategy(for_strategy)
if path.has_entity:
path = path.entity_path
self.path = path
return path
|
def _generate_path(
self,
path,
attr,
for_strategy,
wildcard_key,
raiseerr=True,
polymorphic_entity_context=None,
):
existing_of_type = self._of_type
self._of_type = None
if raiseerr and not path.has_entity:
if isinstance(path, TokenRegistry):
raise sa_exc.ArgumentError(
"Wildcard token cannot be followed by another entity"
)
else:
raise sa_exc.ArgumentError(
"Mapped attribute '%s' does not refer to a mapped entity" % (path.prop,)
)
if isinstance(attr, util.string_types):
default_token = attr.endswith(_DEFAULT_TOKEN)
if attr.endswith(_WILDCARD_TOKEN) or default_token:
if default_token:
self.propagate_to_loaders = False
if wildcard_key:
attr = "%s:%s" % (wildcard_key, attr)
# TODO: AliasedInsp inside the path for of_type is not
# working for a with_polymorphic entity because the
# relationship loaders don't render the with_poly into the
# path. See #4469 which will try to improve this
if existing_of_type and not existing_of_type.is_aliased_class:
path = path.parent[existing_of_type]
path = path.token(attr)
self.path = path
return path
if existing_of_type:
ent = inspect(existing_of_type)
else:
ent = path.entity
try:
# use getattr on the class to work around
# synonyms, hybrids, etc.
attr = getattr(ent.class_, attr)
except AttributeError as err:
if raiseerr:
util.raise_(
sa_exc.ArgumentError(
'Can\'t find property named "%s" on '
"%s in this Query." % (attr, ent)
),
replace_context=err,
)
else:
return None
else:
attr = found_property = attr.property
path = path[attr]
elif _is_mapped_class(attr):
# TODO: this does not appear to be a valid codepath. "attr"
# would never be a mapper. This block is present in 1.2
# as well however does not seem to be accessed in any tests.
if not orm_util._entity_corresponds_to_use_path_impl(attr.parent, path[-1]):
if raiseerr:
raise sa_exc.ArgumentError(
"Attribute '%s' does not "
"link from element '%s'" % (attr, path.entity)
)
else:
return None
else:
prop = found_property = attr.property
if not orm_util._entity_corresponds_to_use_path_impl(attr.parent, path[-1]):
if raiseerr:
raise sa_exc.ArgumentError(
'Attribute "%s" does not '
'link from element "%s".%s'
% (
attr,
path.entity,
(
" Did you mean to use "
"%s.of_type(%s)?" % (path[-2], attr.class_.__name__)
if len(path) > 1
and path.entity.is_mapper
and attr.parent.is_aliased_class
else ""
),
)
)
else:
return None
if attr._extra_criteria:
self._extra_criteria = attr._extra_criteria
if getattr(attr, "_of_type", None):
ac = attr._of_type
ext_info = of_type_info = inspect(ac)
if polymorphic_entity_context is None:
polymorphic_entity_context = self.context
existing = path.entity_path[prop].get(
polymorphic_entity_context, "path_with_polymorphic"
)
if not ext_info.is_aliased_class:
ac = orm_util.with_polymorphic(
ext_info.mapper.base_mapper,
ext_info.mapper,
aliased=True,
_use_mapper_path=True,
_existing_alias=inspect(existing) if existing is not None else None,
)
ext_info = inspect(ac)
path.entity_path[prop].set(
polymorphic_entity_context, "path_with_polymorphic", ac
)
path = path[prop][ext_info]
self._of_type = of_type_info
else:
path = path[prop]
if for_strategy is not None:
found_property._get_strategy(for_strategy)
if path.has_entity:
path = path.entity_path
self.path = path
return path
|
https://github.com/sqlalchemy/sqlalchemy/issues/4589
|
Traceback (most recent call last):
File "/bugreport/test_orm_changed.py", line 73, in <module>
print s.query(Foo).options(joinedload('bar').load_only('biz')).first()
File "/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 1358, in options
return self._options(False, *args)
File "<string>", line 2, in _options
File "/lib/python2.7/site-packages/sqlalchemy/orm/base.py", line 201, in generate
fn(self, *args[1:], **kw)
File "/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 1375, in _options
opt.process_query(self)
File "/lib/python2.7/site-packages/sqlalchemy/orm/strategy_options.py", line 146, in process_query
self._process(query, True)
File "/lib/python2.7/site-packages/sqlalchemy/orm/strategy_options.py", line 468, in _process
query._current_path, query._attributes, raiseerr)
File "/lib/python2.7/site-packages/sqlalchemy/orm/strategy_options.py", line 639, in _bind_loader
loader.path, token, None, raiseerr):
File "/lib/python2.7/site-packages/sqlalchemy/orm/strategy_options.py", line 200, in _generate_path
attr = attr.property
AttributeError: 'property' object has no attribute 'property'
|
AttributeError
|
def _generate_path(self, path, attr, for_strategy, wildcard_key, raiseerr=True):
existing_of_type = self._of_type
self._of_type = None
if raiseerr and not path.has_entity:
if isinstance(path, TokenRegistry):
raise sa_exc.ArgumentError(
"Wildcard token cannot be followed by another entity"
)
else:
raise sa_exc.ArgumentError(
"Mapped attribute '%s' does not refer to a mapped entity" % (path.prop,)
)
if isinstance(attr, util.string_types):
default_token = attr.endswith(_DEFAULT_TOKEN)
attr_str_name = attr
if attr.endswith(_WILDCARD_TOKEN) or default_token:
if default_token:
self.propagate_to_loaders = False
if wildcard_key:
attr = "%s:%s" % (wildcard_key, attr)
# TODO: AliasedInsp inside the path for of_type is not
# working for a with_polymorphic entity because the
# relationship loaders don't render the with_poly into the
# path. See #4469 which will try to improve this
if existing_of_type and not existing_of_type.is_aliased_class:
path = path.parent[existing_of_type]
path = path.token(attr)
self.path = path
return path
if existing_of_type:
ent = inspect(existing_of_type)
else:
ent = path.entity
try:
# use getattr on the class to work around
# synonyms, hybrids, etc.
attr = getattr(ent.class_, attr)
except AttributeError as err:
if raiseerr:
util.raise_(
sa_exc.ArgumentError(
'Can\'t find property named "%s" on '
"%s in this Query." % (attr, ent)
),
replace_context=err,
)
else:
return None
else:
try:
attr = found_property = attr.property
except AttributeError as ae:
if not isinstance(attr, MapperProperty):
util.raise_(
sa_exc.ArgumentError(
'Expected attribute "%s" on %s to be a '
"mapped attribute; "
"instead got %s object." % (attr_str_name, ent, type(attr))
),
replace_context=ae,
)
else:
raise
path = path[attr]
elif _is_mapped_class(attr):
# TODO: this does not appear to be a valid codepath. "attr"
# would never be a mapper. This block is present in 1.2
# as well however does not seem to be accessed in any tests.
if not orm_util._entity_corresponds_to_use_path_impl(attr.parent, path[-1]):
if raiseerr:
raise sa_exc.ArgumentError(
"Attribute '%s' does not "
"link from element '%s'" % (attr, path.entity)
)
else:
return None
else:
prop = found_property = attr.property
if not orm_util._entity_corresponds_to_use_path_impl(attr.parent, path[-1]):
if raiseerr:
raise sa_exc.ArgumentError(
'Attribute "%s" does not '
'link from element "%s".%s'
% (
attr,
path.entity,
(
" Did you mean to use "
"%s.of_type(%s)?" % (path[-2], attr.class_.__name__)
if len(path) > 1
and path.entity.is_mapper
and attr.parent.is_aliased_class
else ""
),
)
)
else:
return None
if getattr(attr, "_of_type", None):
ac = attr._of_type
ext_info = of_type_info = inspect(ac)
existing = path.entity_path[prop].get(self.context, "path_with_polymorphic")
if not ext_info.is_aliased_class:
ac = orm_util.with_polymorphic(
ext_info.mapper.base_mapper,
ext_info.mapper,
aliased=True,
_use_mapper_path=True,
_existing_alias=inspect(existing) if existing is not None else None,
)
ext_info = inspect(ac)
path.entity_path[prop].set(self.context, "path_with_polymorphic", ac)
path = path[prop][ext_info]
self._of_type = of_type_info
else:
path = path[prop]
if for_strategy is not None:
found_property._get_strategy(for_strategy)
if path.has_entity:
path = path.entity_path
self.path = path
return path
|
def _generate_path(self, path, attr, for_strategy, wildcard_key, raiseerr=True):
existing_of_type = self._of_type
self._of_type = None
if raiseerr and not path.has_entity:
if isinstance(path, TokenRegistry):
raise sa_exc.ArgumentError(
"Wildcard token cannot be followed by another entity"
)
else:
raise sa_exc.ArgumentError(
"Mapped attribute '%s' does not refer to a mapped entity" % (path.prop,)
)
if isinstance(attr, util.string_types):
default_token = attr.endswith(_DEFAULT_TOKEN)
if attr.endswith(_WILDCARD_TOKEN) or default_token:
if default_token:
self.propagate_to_loaders = False
if wildcard_key:
attr = "%s:%s" % (wildcard_key, attr)
# TODO: AliasedInsp inside the path for of_type is not
# working for a with_polymorphic entity because the
# relationship loaders don't render the with_poly into the
# path. See #4469 which will try to improve this
if existing_of_type and not existing_of_type.is_aliased_class:
path = path.parent[existing_of_type]
path = path.token(attr)
self.path = path
return path
if existing_of_type:
ent = inspect(existing_of_type)
else:
ent = path.entity
try:
# use getattr on the class to work around
# synonyms, hybrids, etc.
attr = getattr(ent.class_, attr)
except AttributeError as err:
if raiseerr:
util.raise_(
sa_exc.ArgumentError(
'Can\'t find property named "%s" on '
"%s in this Query." % (attr, ent)
),
replace_context=err,
)
else:
return None
else:
attr = found_property = attr.property
path = path[attr]
elif _is_mapped_class(attr):
# TODO: this does not appear to be a valid codepath. "attr"
# would never be a mapper. This block is present in 1.2
# as well however does not seem to be accessed in any tests.
if not orm_util._entity_corresponds_to_use_path_impl(attr.parent, path[-1]):
if raiseerr:
raise sa_exc.ArgumentError(
"Attribute '%s' does not "
"link from element '%s'" % (attr, path.entity)
)
else:
return None
else:
prop = found_property = attr.property
if not orm_util._entity_corresponds_to_use_path_impl(attr.parent, path[-1]):
if raiseerr:
raise sa_exc.ArgumentError(
'Attribute "%s" does not '
'link from element "%s".%s'
% (
attr,
path.entity,
(
" Did you mean to use "
"%s.of_type(%s)?" % (path[-2], attr.class_.__name__)
if len(path) > 1
and path.entity.is_mapper
and attr.parent.is_aliased_class
else ""
),
)
)
else:
return None
if getattr(attr, "_of_type", None):
ac = attr._of_type
ext_info = of_type_info = inspect(ac)
existing = path.entity_path[prop].get(self.context, "path_with_polymorphic")
if not ext_info.is_aliased_class:
ac = orm_util.with_polymorphic(
ext_info.mapper.base_mapper,
ext_info.mapper,
aliased=True,
_use_mapper_path=True,
_existing_alias=inspect(existing) if existing is not None else None,
)
ext_info = inspect(ac)
path.entity_path[prop].set(self.context, "path_with_polymorphic", ac)
path = path[prop][ext_info]
self._of_type = of_type_info
else:
path = path[prop]
if for_strategy is not None:
found_property._get_strategy(for_strategy)
if path.has_entity:
path = path.entity_path
self.path = path
return path
|
https://github.com/sqlalchemy/sqlalchemy/issues/4589
|
Traceback (most recent call last):
File "/bugreport/test_orm_changed.py", line 73, in <module>
print s.query(Foo).options(joinedload('bar').load_only('biz')).first()
File "/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 1358, in options
return self._options(False, *args)
File "<string>", line 2, in _options
File "/lib/python2.7/site-packages/sqlalchemy/orm/base.py", line 201, in generate
fn(self, *args[1:], **kw)
File "/lib/python2.7/site-packages/sqlalchemy/orm/query.py", line 1375, in _options
opt.process_query(self)
File "/lib/python2.7/site-packages/sqlalchemy/orm/strategy_options.py", line 146, in process_query
self._process(query, True)
File "/lib/python2.7/site-packages/sqlalchemy/orm/strategy_options.py", line 468, in _process
query._current_path, query._attributes, raiseerr)
File "/lib/python2.7/site-packages/sqlalchemy/orm/strategy_options.py", line 639, in _bind_loader
loader.path, token, None, raiseerr):
File "/lib/python2.7/site-packages/sqlalchemy/orm/strategy_options.py", line 200, in _generate_path
attr = attr.property
AttributeError: 'property' object has no attribute 'property'
|
AttributeError
|
def __init__(self, dialect, connection, checkfirst=False, **kwargs):
super(ENUM.EnumDropper, self).__init__(connection, **kwargs)
self.checkfirst = checkfirst
|
def __init__(self, *enums, **kw):
"""Construct an :class:`_postgresql.ENUM`.
Arguments are the same as that of
:class:`_types.Enum`, but also including
the following parameters.
:param create_type: Defaults to True.
Indicates that ``CREATE TYPE`` should be
emitted, after optionally checking for the
presence of the type, when the parent
table is being created; and additionally
that ``DROP TYPE`` is called when the table
is dropped. When ``False``, no check
will be performed and no ``CREATE TYPE``
or ``DROP TYPE`` is emitted, unless
:meth:`~.postgresql.ENUM.create`
or :meth:`~.postgresql.ENUM.drop`
are called directly.
Setting to ``False`` is helpful
when invoking a creation scheme to a SQL file
without access to the actual database -
the :meth:`~.postgresql.ENUM.create` and
:meth:`~.postgresql.ENUM.drop` methods can
be used to emit SQL to a target bind.
"""
self.create_type = kw.pop("create_type", True)
super(ENUM, self).__init__(*enums, **kw)
|
https://github.com/sqlalchemy/sqlalchemy/issues/5520
|
Traceback (most recent call last):
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.DuplicateObject: type "mood" already exists
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/Users/robert_muil/Library/Application Support/JetBrains/PyCharm2020.2/scratches/scratch_9.py", line 51, in <module>
raise ex
File "/Users/robert_muil/Library/Application Support/JetBrains/PyCharm2020.2/scratches/scratch_9.py", line 48, in <module>
Base.metadata.create_all(bind=conn)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/schema.py", line 4546, in create_all
bind._run_visitor(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1657, in _run_visitor
visitorcallable(self.dialect, self, **kwargs).traverse_single(element)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/visitors.py", line 144, in traverse_single
return meth(obj, **kw)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/ddl.py", line 783, in visit_metadata
self.traverse_single(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/visitors.py", line 144, in traverse_single
return meth(obj, **kw)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/ddl.py", line 811, in visit_table
table.dispatch.before_create(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/event/attr.py", line 322, in __call__
fn(*args, **kw)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/util/langhelpers.py", line 645, in __call__
return getattr(self.target, self.name)(*arg, **kw)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1575, in _on_table_create
self.create(bind=bind, checkfirst=checkfirst)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1510, in create
bind.execute(CreateEnumType(self))
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1014, in execute
return meth(self, multiparams, params)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/ddl.py", line 72, in _execute_on_connection
return connection._execute_ddl(self, multiparams, params)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1071, in _execute_ddl
ret = self._execute_context(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1317, in _execute_context
self._handle_dbapi_exception(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1511, in _handle_dbapi_exception
util.raise_(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.ProgrammingError: (psycopg2.errors.DuplicateObject) type "mood" already exists
[SQL: CREATE TYPE minimal_failing_example.mood AS ENUM ('sad', 'ok', 'happy')]
(Background on this error at: http://sqlalche.me/e/13/f405)
|
sqlalchemy.exc.ProgrammingError
|
def create(self, bind=None, checkfirst=True):
"""Emit ``CREATE TYPE`` for this
:class:`_postgresql.ENUM`.
If the underlying dialect does not support
PostgreSQL CREATE TYPE, no action is taken.
:param bind: a connectable :class:`_engine.Engine`,
:class:`_engine.Connection`, or similar object to emit
SQL.
:param checkfirst: if ``True``, a query against
the PG catalog will be first performed to see
if the type does not exist already before
creating.
"""
if not bind.dialect.supports_native_enum:
return
bind._run_ddl_visitor(self.EnumGenerator, self, checkfirst=checkfirst)
|
def create(self, bind=None, checkfirst=True):
"""Emit ``CREATE TYPE`` for this
:class:`_postgresql.ENUM`.
If the underlying dialect does not support
PostgreSQL CREATE TYPE, no action is taken.
:param bind: a connectable :class:`_engine.Engine`,
:class:`_engine.Connection`, or similar object to emit
SQL.
:param checkfirst: if ``True``, a query against
the PG catalog will be first performed to see
if the type does not exist already before
creating.
"""
if not bind.dialect.supports_native_enum:
return
if not checkfirst or not bind.dialect.has_type(bind, self.name, schema=self.schema):
bind.execute(CreateEnumType(self))
|
https://github.com/sqlalchemy/sqlalchemy/issues/5520
|
Traceback (most recent call last):
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.DuplicateObject: type "mood" already exists
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/Users/robert_muil/Library/Application Support/JetBrains/PyCharm2020.2/scratches/scratch_9.py", line 51, in <module>
raise ex
File "/Users/robert_muil/Library/Application Support/JetBrains/PyCharm2020.2/scratches/scratch_9.py", line 48, in <module>
Base.metadata.create_all(bind=conn)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/schema.py", line 4546, in create_all
bind._run_visitor(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1657, in _run_visitor
visitorcallable(self.dialect, self, **kwargs).traverse_single(element)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/visitors.py", line 144, in traverse_single
return meth(obj, **kw)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/ddl.py", line 783, in visit_metadata
self.traverse_single(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/visitors.py", line 144, in traverse_single
return meth(obj, **kw)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/ddl.py", line 811, in visit_table
table.dispatch.before_create(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/event/attr.py", line 322, in __call__
fn(*args, **kw)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/util/langhelpers.py", line 645, in __call__
return getattr(self.target, self.name)(*arg, **kw)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1575, in _on_table_create
self.create(bind=bind, checkfirst=checkfirst)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1510, in create
bind.execute(CreateEnumType(self))
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1014, in execute
return meth(self, multiparams, params)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/ddl.py", line 72, in _execute_on_connection
return connection._execute_ddl(self, multiparams, params)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1071, in _execute_ddl
ret = self._execute_context(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1317, in _execute_context
self._handle_dbapi_exception(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1511, in _handle_dbapi_exception
util.raise_(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.ProgrammingError: (psycopg2.errors.DuplicateObject) type "mood" already exists
[SQL: CREATE TYPE minimal_failing_example.mood AS ENUM ('sad', 'ok', 'happy')]
(Background on this error at: http://sqlalche.me/e/13/f405)
|
sqlalchemy.exc.ProgrammingError
|
def drop(self, bind=None, checkfirst=True):
"""Emit ``DROP TYPE`` for this
:class:`_postgresql.ENUM`.
If the underlying dialect does not support
PostgreSQL DROP TYPE, no action is taken.
:param bind: a connectable :class:`_engine.Engine`,
:class:`_engine.Connection`, or similar object to emit
SQL.
:param checkfirst: if ``True``, a query against
the PG catalog will be first performed to see
if the type actually exists before dropping.
"""
if not bind.dialect.supports_native_enum:
return
bind._run_ddl_visitor(self.EnumDropper, self, checkfirst=checkfirst)
|
def drop(self, bind=None, checkfirst=True):
"""Emit ``DROP TYPE`` for this
:class:`_postgresql.ENUM`.
If the underlying dialect does not support
PostgreSQL DROP TYPE, no action is taken.
:param bind: a connectable :class:`_engine.Engine`,
:class:`_engine.Connection`, or similar object to emit
SQL.
:param checkfirst: if ``True``, a query against
the PG catalog will be first performed to see
if the type actually exists before dropping.
"""
if not bind.dialect.supports_native_enum:
return
if not checkfirst or bind.dialect.has_type(bind, self.name, schema=self.schema):
bind.execute(DropEnumType(self))
|
https://github.com/sqlalchemy/sqlalchemy/issues/5520
|
Traceback (most recent call last):
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.DuplicateObject: type "mood" already exists
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/Users/robert_muil/Library/Application Support/JetBrains/PyCharm2020.2/scratches/scratch_9.py", line 51, in <module>
raise ex
File "/Users/robert_muil/Library/Application Support/JetBrains/PyCharm2020.2/scratches/scratch_9.py", line 48, in <module>
Base.metadata.create_all(bind=conn)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/schema.py", line 4546, in create_all
bind._run_visitor(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1657, in _run_visitor
visitorcallable(self.dialect, self, **kwargs).traverse_single(element)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/visitors.py", line 144, in traverse_single
return meth(obj, **kw)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/ddl.py", line 783, in visit_metadata
self.traverse_single(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/visitors.py", line 144, in traverse_single
return meth(obj, **kw)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/ddl.py", line 811, in visit_table
table.dispatch.before_create(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/event/attr.py", line 322, in __call__
fn(*args, **kw)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/util/langhelpers.py", line 645, in __call__
return getattr(self.target, self.name)(*arg, **kw)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1575, in _on_table_create
self.create(bind=bind, checkfirst=checkfirst)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1510, in create
bind.execute(CreateEnumType(self))
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1014, in execute
return meth(self, multiparams, params)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/ddl.py", line 72, in _execute_on_connection
return connection._execute_ddl(self, multiparams, params)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1071, in _execute_ddl
ret = self._execute_context(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1317, in _execute_context
self._handle_dbapi_exception(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1511, in _handle_dbapi_exception
util.raise_(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.ProgrammingError: (psycopg2.errors.DuplicateObject) type "mood" already exists
[SQL: CREATE TYPE minimal_failing_example.mood AS ENUM ('sad', 'ok', 'happy')]
(Background on this error at: http://sqlalche.me/e/13/f405)
|
sqlalchemy.exc.ProgrammingError
|
def _on_table_create(self, target, bind, checkfirst=False, **kw):
if (
checkfirst
or (not self.metadata and not kw.get("_is_metadata_operation", False))
) and not self._check_for_name_in_memos(checkfirst, kw):
self.create(bind=bind, checkfirst=checkfirst)
|
def _on_table_create(self, target, bind, checkfirst=False, **kw):
if (
checkfirst
or (not self.metadata and not kw.get("_is_metadata_operation", False))
and not self._check_for_name_in_memos(checkfirst, kw)
):
self.create(bind=bind, checkfirst=checkfirst)
|
https://github.com/sqlalchemy/sqlalchemy/issues/5520
|
Traceback (most recent call last):
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.DuplicateObject: type "mood" already exists
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/Users/robert_muil/Library/Application Support/JetBrains/PyCharm2020.2/scratches/scratch_9.py", line 51, in <module>
raise ex
File "/Users/robert_muil/Library/Application Support/JetBrains/PyCharm2020.2/scratches/scratch_9.py", line 48, in <module>
Base.metadata.create_all(bind=conn)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/schema.py", line 4546, in create_all
bind._run_visitor(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1657, in _run_visitor
visitorcallable(self.dialect, self, **kwargs).traverse_single(element)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/visitors.py", line 144, in traverse_single
return meth(obj, **kw)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/ddl.py", line 783, in visit_metadata
self.traverse_single(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/visitors.py", line 144, in traverse_single
return meth(obj, **kw)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/ddl.py", line 811, in visit_table
table.dispatch.before_create(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/event/attr.py", line 322, in __call__
fn(*args, **kw)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/util/langhelpers.py", line 645, in __call__
return getattr(self.target, self.name)(*arg, **kw)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1575, in _on_table_create
self.create(bind=bind, checkfirst=checkfirst)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1510, in create
bind.execute(CreateEnumType(self))
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1014, in execute
return meth(self, multiparams, params)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/ddl.py", line 72, in _execute_on_connection
return connection._execute_ddl(self, multiparams, params)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1071, in _execute_ddl
ret = self._execute_context(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1317, in _execute_context
self._handle_dbapi_exception(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1511, in _handle_dbapi_exception
util.raise_(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.ProgrammingError: (psycopg2.errors.DuplicateObject) type "mood" already exists
[SQL: CREATE TYPE minimal_failing_example.mood AS ENUM ('sad', 'ok', 'happy')]
(Background on this error at: http://sqlalche.me/e/13/f405)
|
sqlalchemy.exc.ProgrammingError
|
def __init__(self, dialect, connection, checkfirst=False, **kwargs):
super(ENUM.EnumDropper, self).__init__(connection, **kwargs)
self.checkfirst = checkfirst
|
def __init__(
self, isolation_level=None, json_serializer=None, json_deserializer=None, **kwargs
):
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
self._json_deserializer = json_deserializer
self._json_serializer = json_serializer
|
https://github.com/sqlalchemy/sqlalchemy/issues/5520
|
Traceback (most recent call last):
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.DuplicateObject: type "mood" already exists
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/Users/robert_muil/Library/Application Support/JetBrains/PyCharm2020.2/scratches/scratch_9.py", line 51, in <module>
raise ex
File "/Users/robert_muil/Library/Application Support/JetBrains/PyCharm2020.2/scratches/scratch_9.py", line 48, in <module>
Base.metadata.create_all(bind=conn)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/schema.py", line 4546, in create_all
bind._run_visitor(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1657, in _run_visitor
visitorcallable(self.dialect, self, **kwargs).traverse_single(element)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/visitors.py", line 144, in traverse_single
return meth(obj, **kw)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/ddl.py", line 783, in visit_metadata
self.traverse_single(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/visitors.py", line 144, in traverse_single
return meth(obj, **kw)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/ddl.py", line 811, in visit_table
table.dispatch.before_create(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/event/attr.py", line 322, in __call__
fn(*args, **kw)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/util/langhelpers.py", line 645, in __call__
return getattr(self.target, self.name)(*arg, **kw)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1575, in _on_table_create
self.create(bind=bind, checkfirst=checkfirst)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1510, in create
bind.execute(CreateEnumType(self))
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1014, in execute
return meth(self, multiparams, params)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/ddl.py", line 72, in _execute_on_connection
return connection._execute_ddl(self, multiparams, params)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1071, in _execute_ddl
ret = self._execute_context(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1317, in _execute_context
self._handle_dbapi_exception(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1511, in _handle_dbapi_exception
util.raise_(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.ProgrammingError: (psycopg2.errors.DuplicateObject) type "mood" already exists
[SQL: CREATE TYPE minimal_failing_example.mood AS ENUM ('sad', 'ok', 'happy')]
(Background on this error at: http://sqlalche.me/e/13/f405)
|
sqlalchemy.exc.ProgrammingError
|
def visit_enum(self, enum):
if not self._can_drop_enum(enum):
return
self.connection.execute(DropEnumType(enum))
|
def visit_enum(self, type_, **kw):
if not type_.native_enum or not self.dialect.supports_native_enum:
return super(PGTypeCompiler, self).visit_enum(type_, **kw)
else:
return self.visit_ENUM(type_, **kw)
|
https://github.com/sqlalchemy/sqlalchemy/issues/5520
|
Traceback (most recent call last):
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.DuplicateObject: type "mood" already exists
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/Users/robert_muil/Library/Application Support/JetBrains/PyCharm2020.2/scratches/scratch_9.py", line 51, in <module>
raise ex
File "/Users/robert_muil/Library/Application Support/JetBrains/PyCharm2020.2/scratches/scratch_9.py", line 48, in <module>
Base.metadata.create_all(bind=conn)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/schema.py", line 4546, in create_all
bind._run_visitor(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1657, in _run_visitor
visitorcallable(self.dialect, self, **kwargs).traverse_single(element)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/visitors.py", line 144, in traverse_single
return meth(obj, **kw)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/ddl.py", line 783, in visit_metadata
self.traverse_single(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/visitors.py", line 144, in traverse_single
return meth(obj, **kw)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/ddl.py", line 811, in visit_table
table.dispatch.before_create(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/event/attr.py", line 322, in __call__
fn(*args, **kw)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/util/langhelpers.py", line 645, in __call__
return getattr(self.target, self.name)(*arg, **kw)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1575, in _on_table_create
self.create(bind=bind, checkfirst=checkfirst)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1510, in create
bind.execute(CreateEnumType(self))
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1014, in execute
return meth(self, multiparams, params)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/sql/ddl.py", line 72, in _execute_on_connection
return connection._execute_ddl(self, multiparams, params)
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1071, in _execute_ddl
ret = self._execute_context(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1317, in _execute_context
self._handle_dbapi_exception(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1511, in _handle_dbapi_exception
util.raise_(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1277, in _execute_context
self.dialect.do_execute(
File "/usr/local/anaconda3/envs/datacat/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 593, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.ProgrammingError: (psycopg2.errors.DuplicateObject) type "mood" already exists
[SQL: CREATE TYPE minimal_failing_example.mood AS ENUM ('sad', 'ok', 'happy')]
(Background on this error at: http://sqlalche.me/e/13/f405)
|
sqlalchemy.exc.ProgrammingError
|
def post_create_table(self, table):
"""Build table-level CREATE options like ENGINE and COLLATE."""
table_opts = []
opts = dict(
(k[len(self.dialect.name) + 1 :].upper(), v)
for k, v in table.kwargs.items()
if k.startswith("%s_" % self.dialect.name)
)
if table.comment is not None:
opts["COMMENT"] = table.comment
partition_options = [
"PARTITION_BY",
"PARTITIONS",
"SUBPARTITIONS",
"SUBPARTITION_BY",
]
nonpart_options = set(opts).difference(partition_options)
part_options = set(opts).intersection(partition_options)
for opt in topological.sort(
[
("DEFAULT_CHARSET", "COLLATE"),
("DEFAULT_CHARACTER_SET", "COLLATE"),
("CHARSET", "COLLATE"),
("CHARACTER_SET", "COLLATE"),
],
nonpart_options,
):
arg = opts[opt]
if opt in _reflection._options_of_type_string:
arg = self.sql_compiler.render_literal_value(arg, sqltypes.String())
if opt in (
"DATA_DIRECTORY",
"INDEX_DIRECTORY",
"DEFAULT_CHARACTER_SET",
"CHARACTER_SET",
"DEFAULT_CHARSET",
"DEFAULT_COLLATE",
):
opt = opt.replace("_", " ")
joiner = "="
if opt in (
"TABLESPACE",
"DEFAULT CHARACTER SET",
"CHARACTER SET",
"COLLATE",
):
joiner = " "
table_opts.append(joiner.join((opt, arg)))
for opt in topological.sort(
[
("PARTITION_BY", "PARTITIONS"),
("PARTITION_BY", "SUBPARTITION_BY"),
("PARTITION_BY", "SUBPARTITIONS"),
("PARTITIONS", "SUBPARTITIONS"),
("PARTITIONS", "SUBPARTITION_BY"),
("SUBPARTITION_BY", "SUBPARTITIONS"),
],
part_options,
):
arg = opts[opt]
if opt in _reflection._options_of_type_string:
arg = self.sql_compiler.render_literal_value(arg, sqltypes.String())
opt = opt.replace("_", " ")
joiner = " "
table_opts.append(joiner.join((opt, arg)))
return " ".join(table_opts)
|
def post_create_table(self, table):
"""Build table-level CREATE options like ENGINE and COLLATE."""
table_opts = []
opts = dict(
(k[len(self.dialect.name) + 1 :].upper(), v)
for k, v in table.kwargs.items()
if k.startswith("%s_" % self.dialect.name)
)
if table.comment is not None:
opts["COMMENT"] = table.comment
partition_options = [
"PARTITION_BY",
"PARTITIONS",
"SUBPARTITIONS",
"SUBPARTITION_BY",
]
nonpart_options = set(opts).difference(partition_options)
part_options = set(opts).intersection(partition_options)
for opt in topological.sort(
[
("DEFAULT_CHARSET", "COLLATE"),
("DEFAULT_CHARACTER_SET", "COLLATE"),
],
nonpart_options,
):
arg = opts[opt]
if opt in _reflection._options_of_type_string:
arg = self.sql_compiler.render_literal_value(arg, sqltypes.String())
if opt in (
"DATA_DIRECTORY",
"INDEX_DIRECTORY",
"DEFAULT_CHARACTER_SET",
"CHARACTER_SET",
"DEFAULT_CHARSET",
"DEFAULT_COLLATE",
):
opt = opt.replace("_", " ")
joiner = "="
if opt in (
"TABLESPACE",
"DEFAULT CHARACTER SET",
"CHARACTER SET",
"COLLATE",
):
joiner = " "
table_opts.append(joiner.join((opt, arg)))
for opt in topological.sort(
[
("PARTITION_BY", "PARTITIONS"),
("PARTITION_BY", "SUBPARTITION_BY"),
("PARTITION_BY", "SUBPARTITIONS"),
("PARTITIONS", "SUBPARTITIONS"),
("PARTITIONS", "SUBPARTITION_BY"),
("SUBPARTITION_BY", "SUBPARTITIONS"),
],
part_options,
):
arg = opts[opt]
if opt in _reflection._options_of_type_string:
arg = self.sql_compiler.render_literal_value(arg, sqltypes.String())
opt = opt.replace("_", " ")
joiner = " "
table_opts.append(joiner.join((opt, arg)))
return " ".join(table_opts)
|
https://github.com/sqlalchemy/sqlalchemy/issues/5411
|
/tmp/sqlalchemybug/env/bin/python /tmp/sqlalchemybug/test_bug.py
2020-06-20 11:18:15,850 INFO sqlalchemy.engine.base.Engine SHOW VARIABLES LIKE 'sql_mode'
2020-06-20 11:18:15,850 INFO sqlalchemy.engine.base.Engine ()
2020-06-20 11:18:15,851 INFO sqlalchemy.engine.base.Engine SHOW VARIABLES LIKE 'lower_case_table_names'
2020-06-20 11:18:15,851 INFO sqlalchemy.engine.base.Engine ()
2020-06-20 11:18:15,852 INFO sqlalchemy.engine.base.Engine SELECT DATABASE()
2020-06-20 11:18:15,852 INFO sqlalchemy.engine.base.Engine ()
2020-06-20 11:18:15,852 INFO sqlalchemy.engine.base.Engine show collation where `Charset` = 'utf8mb4' and `Collation` = 'utf8mb4_bin'
2020-06-20 11:18:15,852 INFO sqlalchemy.engine.base.Engine ()
2020-06-20 11:18:15,854 INFO sqlalchemy.engine.base.Engine SELECT CAST('test plain returns' AS CHAR(60)) AS anon_1
2020-06-20 11:18:15,854 INFO sqlalchemy.engine.base.Engine ()
2020-06-20 11:18:15,854 INFO sqlalchemy.engine.base.Engine SELECT CAST('test unicode returns' AS CHAR(60)) AS anon_1
2020-06-20 11:18:15,854 INFO sqlalchemy.engine.base.Engine ()
2020-06-20 11:18:15,855 INFO sqlalchemy.engine.base.Engine SELECT CAST('test collated returns' AS CHAR CHARACTER SET utf8mb4) COLLATE utf8mb4_bin AS anon_1
2020-06-20 11:18:15,855 INFO sqlalchemy.engine.base.Engine ()
2020-06-20 11:18:15,856 INFO sqlalchemy.engine.base.Engine DROP TABLE IF EXISTS test_table
2020-06-20 11:18:15,856 INFO sqlalchemy.engine.base.Engine ()
2020-06-20 11:18:15,859 INFO sqlalchemy.engine.base.Engine COMMIT
2020-06-20 11:18:15,860 INFO sqlalchemy.engine.base.Engine DESCRIBE `test_table`
2020-06-20 11:18:15,860 INFO sqlalchemy.engine.base.Engine ()
2020-06-20 11:18:15,863 INFO sqlalchemy.engine.base.Engine ROLLBACK
2020-06-20 11:18:15,863 INFO sqlalchemy.engine.base.Engine
CREATE TABLE test_table (
id INTEGER NOT NULL AUTO_INCREMENT,
PRIMARY KEY (id)
)COLLATE utf8_unicode_ci ENGINE=InnoDB CHARSET=utf8
2020-06-20 11:18:15,863 INFO sqlalchemy.engine.base.Engine ()
2020-06-20 11:18:15,875 INFO sqlalchemy.engine.base.Engine COMMIT
2020-06-20 11:18:15,876 INFO sqlalchemy.engine.base.Engine select TABLE_COLLATION from information_schema.TABLES where table_name = 'test_table'
2020-06-20 11:18:15,876 INFO sqlalchemy.engine.base.Engine ()
Table collation is utf8_general_ci
Traceback (most recent call last):
File "/tmp/sqlalchemybug/test_bug.py", line 36, in <module>
assert result.TABLE_COLLATION == "utf8_unicode_ci"
AssertionError
|
AssertionError
|
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
opts.update(url.query)
keys = opts
query = url.query
connect_args = {}
for param in ("ansi", "unicode_results", "autocommit"):
if param in keys:
connect_args[param] = util.asbool(keys.pop(param))
if "odbc_connect" in keys:
connectors = [util.unquote_plus(keys.pop("odbc_connect"))]
else:
def check_quote(token):
if ";" in str(token):
token = "{%s}" % token.replace("}", "}}")
return token
keys = dict((k, check_quote(v)) for k, v in keys.items())
dsn_connection = "dsn" in keys or ("host" in keys and "database" not in keys)
if dsn_connection:
connectors = ["dsn=%s" % (keys.pop("host", "") or keys.pop("dsn", ""))]
else:
port = ""
if "port" in keys and "port" not in query:
port = ",%d" % int(keys.pop("port"))
connectors = []
driver = keys.pop("driver", self.pyodbc_driver_name)
if driver is None and keys:
# note if keys is empty, this is a totally blank URL
util.warn(
"No driver name specified; "
"this is expected by PyODBC when using "
"DSN-less connections"
)
else:
connectors.append("DRIVER={%s}" % driver)
connectors.extend(
[
"Server=%s%s" % (keys.pop("host", ""), port),
"Database=%s" % keys.pop("database", ""),
]
)
user = keys.pop("user", None)
if user:
connectors.append("UID=%s" % user)
connectors.append("PWD=%s" % keys.pop("password", ""))
else:
connectors.append("Trusted_Connection=Yes")
# if set to 'Yes', the ODBC layer will try to automagically
# convert textual data from your database encoding to your
# client encoding. This should obviously be set to 'No' if
# you query a cp1253 encoded database from a latin1 client...
if "odbc_autotranslate" in keys:
connectors.append("AutoTranslate=%s" % keys.pop("odbc_autotranslate"))
connectors.extend(["%s=%s" % (k, v) for k, v in keys.items()])
return [[";".join(connectors)], connect_args]
|
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
opts.update(url.query)
keys = opts
query = url.query
connect_args = {}
for param in ("ansi", "unicode_results", "autocommit"):
if param in keys:
connect_args[param] = util.asbool(keys.pop(param))
if "odbc_connect" in keys:
connectors = [util.unquote_plus(keys.pop("odbc_connect"))]
else:
def check_quote(token):
if ";" in str(token):
token = "'%s'" % token
return token
keys = dict((k, check_quote(v)) for k, v in keys.items())
dsn_connection = "dsn" in keys or ("host" in keys and "database" not in keys)
if dsn_connection:
connectors = ["dsn=%s" % (keys.pop("host", "") or keys.pop("dsn", ""))]
else:
port = ""
if "port" in keys and "port" not in query:
port = ",%d" % int(keys.pop("port"))
connectors = []
driver = keys.pop("driver", self.pyodbc_driver_name)
if driver is None and keys:
# note if keys is empty, this is a totally blank URL
util.warn(
"No driver name specified; "
"this is expected by PyODBC when using "
"DSN-less connections"
)
else:
connectors.append("DRIVER={%s}" % driver)
connectors.extend(
[
"Server=%s%s" % (keys.pop("host", ""), port),
"Database=%s" % keys.pop("database", ""),
]
)
user = keys.pop("user", None)
if user:
connectors.append("UID=%s" % user)
connectors.append("PWD=%s" % keys.pop("password", ""))
else:
connectors.append("Trusted_Connection=Yes")
# if set to 'Yes', the ODBC layer will try to automagically
# convert textual data from your database encoding to your
# client encoding. This should obviously be set to 'No' if
# you query a cp1253 encoded database from a latin1 client...
if "odbc_autotranslate" in keys:
connectors.append("AutoTranslate=%s" % keys.pop("odbc_autotranslate"))
connectors.extend(["%s=%s" % (k, v) for k, v in keys.items()])
return [[";".join(connectors)], connect_args]
|
https://github.com/sqlalchemy/sqlalchemy/issues/5373
|
InterfaceError Traceback (most recent call last)
/opt/conda/lib/python3.7/site-packages/sqlalchemy/engine/base.py in _wrap_pool_connect(self, fn, connection)
2344 try:
-> 2345 return fn()
2346 except dialect.dbapi.Error as e:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in connect(self)
363 if not self._use_threadlocal:
--> 364 return _ConnectionFairy._checkout(self)
365
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in _checkout(cls, pool, threadconns, fairy)
777 if not fairy:
--> 778 fairy = _ConnectionRecord.checkout(pool)
779
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in checkout(cls, pool)
494 def checkout(cls, pool):
--> 495 rec = pool._do_get()
496 try:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/impl.py in _do_get(self)
139 with util.safe_reraise():
--> 140 self._dec_overflow()
141 else:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/util/langhelpers.py in __exit__(self, type_, value, traceback)
68 compat.raise_(
---> 69 exc_value, with_traceback=exc_tb,
70 )
/opt/conda/lib/python3.7/site-packages/sqlalchemy/util/compat.py in raise_(***failed resolving arguments***)
177 try:
--> 178 raise exception
179 finally:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/impl.py in _do_get(self)
136 try:
--> 137 return self._create_connection()
138 except:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in _create_connection(self)
308
--> 309 return _ConnectionRecord(self)
310
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in __init__(self, pool, connect)
439 if connect:
--> 440 self.__connect(first_connect_check=True)
441 self.finalize_callback = deque()
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in __connect(self, first_connect_check)
660 with util.safe_reraise():
--> 661 pool.logger.debug("Error on connect(): %s", e)
662 else:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/util/langhelpers.py in __exit__(self, type_, value, traceback)
68 compat.raise_(
---> 69 exc_value, with_traceback=exc_tb,
70 )
/opt/conda/lib/python3.7/site-packages/sqlalchemy/util/compat.py in raise_(***failed resolving arguments***)
177 try:
--> 178 raise exception
179 finally:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in __connect(self, first_connect_check)
655 self.starttime = time.time()
--> 656 connection = pool._invoke_creator(self)
657 pool.logger.debug("Created new connection %r", connection)
/opt/conda/lib/python3.7/site-packages/sqlalchemy/engine/strategies.py in connect(connection_record)
113 return connection
--> 114 return dialect.connect(*cargs, **cparams)
115
/opt/conda/lib/python3.7/site-packages/sqlalchemy/engine/default.py in connect(self, *cargs, **cparams)
489 # inherits the docstring from interfaces.Dialect.connect
--> 490 return self.dbapi.connect(*cargs, **cparams)
491
InterfaceError: ('28000', "[28000] [Microsoft][ODBC Driver 17 for SQL Server][SQL Server]Login failed for user 'username'. (18456) (SQLDriverConnect)")
The above exception was the direct cause of the following exception:
InterfaceError Traceback (most recent call last)
<ipython-input-7-a96561cd5686> in <module>
----> 1 df.to_sql("Test", connection1, 'username', if_exists='append')
/opt/conda/lib/python3.7/site-packages/pandas/core/generic.py in to_sql(self, name, con, schema, if_exists, index, index_label, chunksize, dtype, method)
2661 chunksize=chunksize,
2662 dtype=dtype,
-> 2663 method=method,
2664 )
2665
/opt/conda/lib/python3.7/site-packages/pandas/io/sql.py in to_sql(frame, name, con, schema, if_exists, index, index_label, chunksize, dtype, method)
519 chunksize=chunksize,
520 dtype=dtype,
--> 521 method=method,
522 )
523
/opt/conda/lib/python3.7/site-packages/pandas/io/sql.py in to_sql(self, frame, name, if_exists, index, index_label, schema, chunksize, dtype, method)
1314 dtype=dtype,
1315 )
-> 1316 table.create()
1317 table.insert(chunksize, method=method)
1318 if not name.isdigit() and not name.islower():
/opt/conda/lib/python3.7/site-packages/pandas/io/sql.py in create(self)
642
643 def create(self):
--> 644 if self.exists():
645 if self.if_exists == "fail":
646 raise ValueError(f"Table '{self.name}' already exists.")
/opt/conda/lib/python3.7/site-packages/pandas/io/sql.py in exists(self)
629
630 def exists(self):
--> 631 return self.pd_sql.has_table(self.name, self.schema)
632
633 def sql_schema(self):
/opt/conda/lib/python3.7/site-packages/pandas/io/sql.py in has_table(self, name, schema)
1339 def has_table(self, name, schema=None):
1340 return self.connectable.run_callable(
-> 1341 self.connectable.dialect.has_table, name, schema or self.meta.schema
1342 )
1343
/opt/conda/lib/python3.7/site-packages/sqlalchemy/engine/base.py in run_callable(self, callable_, *args, **kwargs)
2218
2219 """
-> 2220 with self._contextual_connect() as conn:
2221 return conn.run_callable(callable_, *args, **kwargs)
2222
/opt/conda/lib/python3.7/site-packages/sqlalchemy/engine/base.py in _contextual_connect(self, close_with_result, **kwargs)
2309 return self._connection_cls(
2310 self,
-> 2311 self._wrap_pool_connect(self.pool.connect, None),
2312 close_with_result=close_with_result,
2313 **kwargs
/opt/conda/lib/python3.7/site-packages/sqlalchemy/engine/base.py in _wrap_pool_connect(self, fn, connection)
2347 if connection is None:
2348 Connection._handle_dbapi_exception_noconnection(
-> 2349 e, dialect, self
2350 )
2351 else:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/engine/base.py in _handle_dbapi_exception_noconnection(cls, e, dialect, engine)
1589 elif should_wrap:
1590 util.raise_(
-> 1591 sqlalchemy_exception, with_traceback=exc_info[2], from_=e
1592 )
1593 else:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/util/compat.py in raise_(***failed resolving arguments***)
176
177 try:
--> 178 raise exception
179 finally:
180 # credit to
/opt/conda/lib/python3.7/site-packages/sqlalchemy/engine/base.py in _wrap_pool_connect(self, fn, connection)
2343 dialect = self.dialect
2344 try:
-> 2345 return fn()
2346 except dialect.dbapi.Error as e:
2347 if connection is None:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in connect(self)
362 """
363 if not self._use_threadlocal:
--> 364 return _ConnectionFairy._checkout(self)
365
366 try:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in _checkout(cls, pool, threadconns, fairy)
776 def _checkout(cls, pool, threadconns=None, fairy=None):
777 if not fairy:
--> 778 fairy = _ConnectionRecord.checkout(pool)
779
780 fairy._pool = pool
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in checkout(cls, pool)
493 @classmethod
494 def checkout(cls, pool):
--> 495 rec = pool._do_get()
496 try:
497 dbapi_connection = rec.get_connection()
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/impl.py in _do_get(self)
138 except:
139 with util.safe_reraise():
--> 140 self._dec_overflow()
141 else:
142 return self._do_get()
/opt/conda/lib/python3.7/site-packages/sqlalchemy/util/langhelpers.py in __exit__(self, type_, value, traceback)
67 if not self.warn_only:
68 compat.raise_(
---> 69 exc_value, with_traceback=exc_tb,
70 )
71 else:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/util/compat.py in raise_(***failed resolving arguments***)
176
177 try:
--> 178 raise exception
179 finally:
180 # credit to
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/impl.py in _do_get(self)
135 if self._inc_overflow():
136 try:
--> 137 return self._create_connection()
138 except:
139 with util.safe_reraise():
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in _create_connection(self)
307 """Called by subclasses to create a new ConnectionRecord."""
308
--> 309 return _ConnectionRecord(self)
310
311 def _invalidate(self, connection, exception=None, _checkin=True):
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in __init__(self, pool, connect)
438 self.__pool = pool
439 if connect:
--> 440 self.__connect(first_connect_check=True)
441 self.finalize_callback = deque()
442
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in __connect(self, first_connect_check)
659 except Exception as e:
660 with util.safe_reraise():
--> 661 pool.logger.debug("Error on connect(): %s", e)
662 else:
663 if first_connect_check:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/util/langhelpers.py in __exit__(self, type_, value, traceback)
67 if not self.warn_only:
68 compat.raise_(
---> 69 exc_value, with_traceback=exc_tb,
70 )
71 else:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/util/compat.py in raise_(***failed resolving arguments***)
176
177 try:
--> 178 raise exception
179 finally:
180 # credit to
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in __connect(self, first_connect_check)
654 try:
655 self.starttime = time.time()
--> 656 connection = pool._invoke_creator(self)
657 pool.logger.debug("Created new connection %r", connection)
658 self.connection = connection
/opt/conda/lib/python3.7/site-packages/sqlalchemy/engine/strategies.py in connect(connection_record)
112 if connection is not None:
113 return connection
--> 114 return dialect.connect(*cargs, **cparams)
115
116 creator = pop_kwarg("creator", connect)
/opt/conda/lib/python3.7/site-packages/sqlalchemy/engine/default.py in connect(self, *cargs, **cparams)
488 def connect(self, *cargs, **cparams):
489 # inherits the docstring from interfaces.Dialect.connect
--> 490 return self.dbapi.connect(*cargs, **cparams)
491
492 def create_connect_args(self, url):
InterfaceError: (pyodbc.InterfaceError) ('28000', "[28000] [Microsoft][ODBC Driver 17 for SQL Server][SQL Server]Login failed for user 'username'. (18456) (SQLDriverConnect)")
(Background on this error at: http://sqlalche.me/e/rvf5)
|
InterfaceError
|
def check_quote(token):
if ";" in str(token):
token = "{%s}" % token.replace("}", "}}")
return token
|
def check_quote(token):
if ";" in str(token):
token = "'%s'" % token
return token
|
https://github.com/sqlalchemy/sqlalchemy/issues/5373
|
InterfaceError Traceback (most recent call last)
/opt/conda/lib/python3.7/site-packages/sqlalchemy/engine/base.py in _wrap_pool_connect(self, fn, connection)
2344 try:
-> 2345 return fn()
2346 except dialect.dbapi.Error as e:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in connect(self)
363 if not self._use_threadlocal:
--> 364 return _ConnectionFairy._checkout(self)
365
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in _checkout(cls, pool, threadconns, fairy)
777 if not fairy:
--> 778 fairy = _ConnectionRecord.checkout(pool)
779
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in checkout(cls, pool)
494 def checkout(cls, pool):
--> 495 rec = pool._do_get()
496 try:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/impl.py in _do_get(self)
139 with util.safe_reraise():
--> 140 self._dec_overflow()
141 else:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/util/langhelpers.py in __exit__(self, type_, value, traceback)
68 compat.raise_(
---> 69 exc_value, with_traceback=exc_tb,
70 )
/opt/conda/lib/python3.7/site-packages/sqlalchemy/util/compat.py in raise_(***failed resolving arguments***)
177 try:
--> 178 raise exception
179 finally:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/impl.py in _do_get(self)
136 try:
--> 137 return self._create_connection()
138 except:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in _create_connection(self)
308
--> 309 return _ConnectionRecord(self)
310
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in __init__(self, pool, connect)
439 if connect:
--> 440 self.__connect(first_connect_check=True)
441 self.finalize_callback = deque()
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in __connect(self, first_connect_check)
660 with util.safe_reraise():
--> 661 pool.logger.debug("Error on connect(): %s", e)
662 else:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/util/langhelpers.py in __exit__(self, type_, value, traceback)
68 compat.raise_(
---> 69 exc_value, with_traceback=exc_tb,
70 )
/opt/conda/lib/python3.7/site-packages/sqlalchemy/util/compat.py in raise_(***failed resolving arguments***)
177 try:
--> 178 raise exception
179 finally:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in __connect(self, first_connect_check)
655 self.starttime = time.time()
--> 656 connection = pool._invoke_creator(self)
657 pool.logger.debug("Created new connection %r", connection)
/opt/conda/lib/python3.7/site-packages/sqlalchemy/engine/strategies.py in connect(connection_record)
113 return connection
--> 114 return dialect.connect(*cargs, **cparams)
115
/opt/conda/lib/python3.7/site-packages/sqlalchemy/engine/default.py in connect(self, *cargs, **cparams)
489 # inherits the docstring from interfaces.Dialect.connect
--> 490 return self.dbapi.connect(*cargs, **cparams)
491
InterfaceError: ('28000', "[28000] [Microsoft][ODBC Driver 17 for SQL Server][SQL Server]Login failed for user 'username'. (18456) (SQLDriverConnect)")
The above exception was the direct cause of the following exception:
InterfaceError Traceback (most recent call last)
<ipython-input-7-a96561cd5686> in <module>
----> 1 df.to_sql("Test", connection1, 'username', if_exists='append')
/opt/conda/lib/python3.7/site-packages/pandas/core/generic.py in to_sql(self, name, con, schema, if_exists, index, index_label, chunksize, dtype, method)
2661 chunksize=chunksize,
2662 dtype=dtype,
-> 2663 method=method,
2664 )
2665
/opt/conda/lib/python3.7/site-packages/pandas/io/sql.py in to_sql(frame, name, con, schema, if_exists, index, index_label, chunksize, dtype, method)
519 chunksize=chunksize,
520 dtype=dtype,
--> 521 method=method,
522 )
523
/opt/conda/lib/python3.7/site-packages/pandas/io/sql.py in to_sql(self, frame, name, if_exists, index, index_label, schema, chunksize, dtype, method)
1314 dtype=dtype,
1315 )
-> 1316 table.create()
1317 table.insert(chunksize, method=method)
1318 if not name.isdigit() and not name.islower():
/opt/conda/lib/python3.7/site-packages/pandas/io/sql.py in create(self)
642
643 def create(self):
--> 644 if self.exists():
645 if self.if_exists == "fail":
646 raise ValueError(f"Table '{self.name}' already exists.")
/opt/conda/lib/python3.7/site-packages/pandas/io/sql.py in exists(self)
629
630 def exists(self):
--> 631 return self.pd_sql.has_table(self.name, self.schema)
632
633 def sql_schema(self):
/opt/conda/lib/python3.7/site-packages/pandas/io/sql.py in has_table(self, name, schema)
1339 def has_table(self, name, schema=None):
1340 return self.connectable.run_callable(
-> 1341 self.connectable.dialect.has_table, name, schema or self.meta.schema
1342 )
1343
/opt/conda/lib/python3.7/site-packages/sqlalchemy/engine/base.py in run_callable(self, callable_, *args, **kwargs)
2218
2219 """
-> 2220 with self._contextual_connect() as conn:
2221 return conn.run_callable(callable_, *args, **kwargs)
2222
/opt/conda/lib/python3.7/site-packages/sqlalchemy/engine/base.py in _contextual_connect(self, close_with_result, **kwargs)
2309 return self._connection_cls(
2310 self,
-> 2311 self._wrap_pool_connect(self.pool.connect, None),
2312 close_with_result=close_with_result,
2313 **kwargs
/opt/conda/lib/python3.7/site-packages/sqlalchemy/engine/base.py in _wrap_pool_connect(self, fn, connection)
2347 if connection is None:
2348 Connection._handle_dbapi_exception_noconnection(
-> 2349 e, dialect, self
2350 )
2351 else:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/engine/base.py in _handle_dbapi_exception_noconnection(cls, e, dialect, engine)
1589 elif should_wrap:
1590 util.raise_(
-> 1591 sqlalchemy_exception, with_traceback=exc_info[2], from_=e
1592 )
1593 else:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/util/compat.py in raise_(***failed resolving arguments***)
176
177 try:
--> 178 raise exception
179 finally:
180 # credit to
/opt/conda/lib/python3.7/site-packages/sqlalchemy/engine/base.py in _wrap_pool_connect(self, fn, connection)
2343 dialect = self.dialect
2344 try:
-> 2345 return fn()
2346 except dialect.dbapi.Error as e:
2347 if connection is None:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in connect(self)
362 """
363 if not self._use_threadlocal:
--> 364 return _ConnectionFairy._checkout(self)
365
366 try:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in _checkout(cls, pool, threadconns, fairy)
776 def _checkout(cls, pool, threadconns=None, fairy=None):
777 if not fairy:
--> 778 fairy = _ConnectionRecord.checkout(pool)
779
780 fairy._pool = pool
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in checkout(cls, pool)
493 @classmethod
494 def checkout(cls, pool):
--> 495 rec = pool._do_get()
496 try:
497 dbapi_connection = rec.get_connection()
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/impl.py in _do_get(self)
138 except:
139 with util.safe_reraise():
--> 140 self._dec_overflow()
141 else:
142 return self._do_get()
/opt/conda/lib/python3.7/site-packages/sqlalchemy/util/langhelpers.py in __exit__(self, type_, value, traceback)
67 if not self.warn_only:
68 compat.raise_(
---> 69 exc_value, with_traceback=exc_tb,
70 )
71 else:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/util/compat.py in raise_(***failed resolving arguments***)
176
177 try:
--> 178 raise exception
179 finally:
180 # credit to
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/impl.py in _do_get(self)
135 if self._inc_overflow():
136 try:
--> 137 return self._create_connection()
138 except:
139 with util.safe_reraise():
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in _create_connection(self)
307 """Called by subclasses to create a new ConnectionRecord."""
308
--> 309 return _ConnectionRecord(self)
310
311 def _invalidate(self, connection, exception=None, _checkin=True):
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in __init__(self, pool, connect)
438 self.__pool = pool
439 if connect:
--> 440 self.__connect(first_connect_check=True)
441 self.finalize_callback = deque()
442
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in __connect(self, first_connect_check)
659 except Exception as e:
660 with util.safe_reraise():
--> 661 pool.logger.debug("Error on connect(): %s", e)
662 else:
663 if first_connect_check:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/util/langhelpers.py in __exit__(self, type_, value, traceback)
67 if not self.warn_only:
68 compat.raise_(
---> 69 exc_value, with_traceback=exc_tb,
70 )
71 else:
/opt/conda/lib/python3.7/site-packages/sqlalchemy/util/compat.py in raise_(***failed resolving arguments***)
176
177 try:
--> 178 raise exception
179 finally:
180 # credit to
/opt/conda/lib/python3.7/site-packages/sqlalchemy/pool/base.py in __connect(self, first_connect_check)
654 try:
655 self.starttime = time.time()
--> 656 connection = pool._invoke_creator(self)
657 pool.logger.debug("Created new connection %r", connection)
658 self.connection = connection
/opt/conda/lib/python3.7/site-packages/sqlalchemy/engine/strategies.py in connect(connection_record)
112 if connection is not None:
113 return connection
--> 114 return dialect.connect(*cargs, **cparams)
115
116 creator = pop_kwarg("creator", connect)
/opt/conda/lib/python3.7/site-packages/sqlalchemy/engine/default.py in connect(self, *cargs, **cparams)
488 def connect(self, *cargs, **cparams):
489 # inherits the docstring from interfaces.Dialect.connect
--> 490 return self.dbapi.connect(*cargs, **cparams)
491
492 def create_connect_args(self, url):
InterfaceError: (pyodbc.InterfaceError) ('28000', "[28000] [Microsoft][ODBC Driver 17 for SQL Server][SQL Server]Login failed for user 'username'. (18456) (SQLDriverConnect)")
(Background on this error at: http://sqlalche.me/e/rvf5)
|
InterfaceError
|
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.Error):
code = e.args[0]
if code in (
"08S01",
"01002",
"08003",
"08007",
"08S02",
"08001",
"HYT00",
"HY010",
"10054",
):
return True
return super(MSDialect_pyodbc, self).is_disconnect(e, connection, cursor)
|
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.Error):
for code in (
"08S01",
"01002",
"08003",
"08007",
"08S02",
"08001",
"HYT00",
"HY010",
"10054",
):
if code in str(e):
return True
return super(MSDialect_pyodbc, self).is_disconnect(e, connection, cursor)
|
https://github.com/sqlalchemy/sqlalchemy/issues/5359
|
Traceback (most recent call last):
File "/home/knutj/python_venvs/notifier/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1209, in _execute_context
conn = self._revalidate_connection()
File "/home/knutj/python_venvs/notifier/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 473, in _revalidate_connection
"Can't reconnect until invalid "
sqlalchemy.exc.InvalidRequestError: Can't reconnect until invalid transaction is rolled back
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/knutj/.config/JetBrains/PyCharmCE2020.1/scratches/scratch_3.py", line 45, in <module>
r = conn.execute(sqlalchemy.select([test_table.c.id]).where(test_table.c.id == test_id))
File "/home/knutj/python_venvs/notifier/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1020, in execute
return meth(self, multiparams, params)
File "/home/knutj/python_venvs/notifier/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 298, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/home/knutj/python_venvs/notifier/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1139, in _execute_clauseelement
distilled_params,
File "/home/knutj/python_venvs/notifier/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1214, in _execute_context
e, util.text_type(statement), parameters, None, None
File "/home/knutj/python_venvs/notifier/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1518, in _handle_dbapi_exception
sqlalchemy_exception, with_traceback=exc_info[2], from_=e
File "/home/knutj/python_venvs/notifier/lib/python3.6/site-packages/sqlalchemy/util/compat.py", line 178, in raise_
raise exception
File "/home/knutj/python_venvs/notifier/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1209, in _execute_context
conn = self._revalidate_connection()
File "/home/knutj/python_venvs/notifier/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 473, in _revalidate_connection
"Can't reconnect until invalid "
sqlalchemy.exc.StatementError: (sqlalchemy.exc.InvalidRequestError) Can't reconnect until invalid transaction is rolled back
[SQL: SELECT test_table.id
FROM test_table
WHERE test_table.id = ?]
|
sqlalchemy.exc.InvalidRequestError
|
def __init__(self, iterable=None):
self._members = dict()
if iterable:
self.update(iterable)
|
def __init__(self, iterable=None):
self._members = dict()
if iterable:
for o in iterable:
self.add(o)
|
https://github.com/sqlalchemy/sqlalchemy/issues/5304
|
Traceback (most recent call last):
File "sandbox/sqlalchemy_collections.py", line 29, in <module>
parent.children = [
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/orm/attributes.py", line 271, in __set__
self.impl.set(
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/orm/attributes.py", line 1335, in set
collections.bulk_replace(
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/orm/collections.py", line 790, in bulk_replace
constants = existing_idset.intersection(values or ())
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/util/_collections.py", line 615, in intersection
result._members.update(self._working_set(members).intersection(other))
File "sandbox/sqlalchemy_collections.py", line 23, in __hash__
raise NotImplementedError
NotImplementedError
|
NotImplementedError
|
def issubset(self, iterable):
other = self.__class__(iterable)
if len(self) > len(other):
return False
for m in itertools_filterfalse(
other._members.__contains__, iter(self._members.keys())
):
return False
return True
|
def issubset(self, iterable):
other = type(self)(iterable)
if len(self) > len(other):
return False
for m in itertools_filterfalse(
other._members.__contains__, iter(self._members.keys())
):
return False
return True
|
https://github.com/sqlalchemy/sqlalchemy/issues/5304
|
Traceback (most recent call last):
File "sandbox/sqlalchemy_collections.py", line 29, in <module>
parent.children = [
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/orm/attributes.py", line 271, in __set__
self.impl.set(
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/orm/attributes.py", line 1335, in set
collections.bulk_replace(
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/orm/collections.py", line 790, in bulk_replace
constants = existing_idset.intersection(values or ())
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/util/_collections.py", line 615, in intersection
result._members.update(self._working_set(members).intersection(other))
File "sandbox/sqlalchemy_collections.py", line 23, in __hash__
raise NotImplementedError
NotImplementedError
|
NotImplementedError
|
def issuperset(self, iterable):
other = self.__class__(iterable)
if len(self) < len(other):
return False
for m in itertools_filterfalse(
self._members.__contains__, iter(other._members.keys())
):
return False
return True
|
def issuperset(self, iterable):
other = type(self)(iterable)
if len(self) < len(other):
return False
for m in itertools_filterfalse(
self._members.__contains__, iter(other._members.keys())
):
return False
return True
|
https://github.com/sqlalchemy/sqlalchemy/issues/5304
|
Traceback (most recent call last):
File "sandbox/sqlalchemy_collections.py", line 29, in <module>
parent.children = [
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/orm/attributes.py", line 271, in __set__
self.impl.set(
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/orm/attributes.py", line 1335, in set
collections.bulk_replace(
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/orm/collections.py", line 790, in bulk_replace
constants = existing_idset.intersection(values or ())
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/util/_collections.py", line 615, in intersection
result._members.update(self._working_set(members).intersection(other))
File "sandbox/sqlalchemy_collections.py", line 23, in __hash__
raise NotImplementedError
NotImplementedError
|
NotImplementedError
|
def union(self, iterable):
result = self.__class__()
members = self._members
result._members.update(members)
result._members.update((id(obj), obj) for obj in iterable)
return result
|
def union(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
members = self._member_id_tuples()
other = _iter_id(iterable)
result._members.update(self._working_set(members).union(other))
return result
|
https://github.com/sqlalchemy/sqlalchemy/issues/5304
|
Traceback (most recent call last):
File "sandbox/sqlalchemy_collections.py", line 29, in <module>
parent.children = [
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/orm/attributes.py", line 271, in __set__
self.impl.set(
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/orm/attributes.py", line 1335, in set
collections.bulk_replace(
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/orm/collections.py", line 790, in bulk_replace
constants = existing_idset.intersection(values or ())
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/util/_collections.py", line 615, in intersection
result._members.update(self._working_set(members).intersection(other))
File "sandbox/sqlalchemy_collections.py", line 23, in __hash__
raise NotImplementedError
NotImplementedError
|
NotImplementedError
|
def update(self, iterable):
self._members.update((id(obj), obj) for obj in iterable)
|
def update(self, iterable):
self._members = self.union(iterable)._members
|
https://github.com/sqlalchemy/sqlalchemy/issues/5304
|
Traceback (most recent call last):
File "sandbox/sqlalchemy_collections.py", line 29, in <module>
parent.children = [
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/orm/attributes.py", line 271, in __set__
self.impl.set(
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/orm/attributes.py", line 1335, in set
collections.bulk_replace(
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/orm/collections.py", line 790, in bulk_replace
constants = existing_idset.intersection(values or ())
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/util/_collections.py", line 615, in intersection
result._members.update(self._working_set(members).intersection(other))
File "sandbox/sqlalchemy_collections.py", line 23, in __hash__
raise NotImplementedError
NotImplementedError
|
NotImplementedError
|
def difference(self, iterable):
result = self.__class__()
members = self._members
other = {id(obj) for obj in iterable}
result._members.update(((k, v) for k, v in members.items() if k not in other))
return result
|
def difference(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
members = self._member_id_tuples()
other = _iter_id(iterable)
result._members.update(self._working_set(members).difference(other))
return result
|
https://github.com/sqlalchemy/sqlalchemy/issues/5304
|
Traceback (most recent call last):
File "sandbox/sqlalchemy_collections.py", line 29, in <module>
parent.children = [
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/orm/attributes.py", line 271, in __set__
self.impl.set(
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/orm/attributes.py", line 1335, in set
collections.bulk_replace(
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/orm/collections.py", line 790, in bulk_replace
constants = existing_idset.intersection(values or ())
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/util/_collections.py", line 615, in intersection
result._members.update(self._working_set(members).intersection(other))
File "sandbox/sqlalchemy_collections.py", line 23, in __hash__
raise NotImplementedError
NotImplementedError
|
NotImplementedError
|
def intersection(self, iterable):
result = self.__class__()
members = self._members
other = {id(obj) for obj in iterable}
result._members.update((k, v) for k, v in members.items() if k in other)
return result
|
def intersection(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
members = self._member_id_tuples()
other = _iter_id(iterable)
result._members.update(self._working_set(members).intersection(other))
return result
|
https://github.com/sqlalchemy/sqlalchemy/issues/5304
|
Traceback (most recent call last):
File "sandbox/sqlalchemy_collections.py", line 29, in <module>
parent.children = [
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/orm/attributes.py", line 271, in __set__
self.impl.set(
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/orm/attributes.py", line 1335, in set
collections.bulk_replace(
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/orm/collections.py", line 790, in bulk_replace
constants = existing_idset.intersection(values or ())
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/util/_collections.py", line 615, in intersection
result._members.update(self._working_set(members).intersection(other))
File "sandbox/sqlalchemy_collections.py", line 23, in __hash__
raise NotImplementedError
NotImplementedError
|
NotImplementedError
|
def symmetric_difference(self, iterable):
result = self.__class__()
members = self._members
other = {id(obj): obj for obj in iterable}
result._members.update(((k, v) for k, v in members.items() if k not in other))
result._members.update(((k, v) for k, v in other.items() if k not in members))
return result
|
def symmetric_difference(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
members = self._member_id_tuples()
other = _iter_id(iterable)
result._members.update(self._working_set(members).symmetric_difference(other))
return result
|
https://github.com/sqlalchemy/sqlalchemy/issues/5304
|
Traceback (most recent call last):
File "sandbox/sqlalchemy_collections.py", line 29, in <module>
parent.children = [
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/orm/attributes.py", line 271, in __set__
self.impl.set(
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/orm/attributes.py", line 1335, in set
collections.bulk_replace(
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/orm/collections.py", line 790, in bulk_replace
constants = existing_idset.intersection(values or ())
File "/Users/james/Projects/new-realtimerail/.pyenv/lib/python3.8/site-packages/sqlalchemy/util/_collections.py", line 615, in intersection
result._members.update(self._working_set(members).intersection(other))
File "sandbox/sqlalchemy_collections.py", line 23, in __hash__
raise NotImplementedError
NotImplementedError
|
NotImplementedError
|
def _join_determine_implicit_left_side(self, left, right, onclause):
"""When join conditions don't express the left side explicitly,
determine if an existing FROM or entity in this query
can serve as the left hand side.
"""
# when we are here, it means join() was called without an ORM-
# specific way of telling us what the "left" side is, e.g.:
#
# join(RightEntity)
#
# or
#
# join(RightEntity, RightEntity.foo == LeftEntity.bar)
#
r_info = inspect(right)
replace_from_obj_index = use_entity_index = None
if self._from_obj:
# we have a list of FROMs already. So by definition this
# join has to connect to one of those FROMs.
indexes = sql_util.find_left_clause_to_join_from(
self._from_obj, r_info.selectable, onclause
)
if len(indexes) == 1:
replace_from_obj_index = indexes[0]
left = self._from_obj[replace_from_obj_index]
elif len(indexes) > 1:
raise sa_exc.InvalidRequestError(
"Can't determine which FROM clause to join "
"from, there are multiple FROMS which can "
"join to this entity. Please use the .select_from() "
"method to establish an explicit left side, as well as "
"providing an explcit ON clause if not present already to "
"help resolve the ambiguity."
)
else:
raise sa_exc.InvalidRequestError(
"Don't know how to join to %r. "
"Please use the .select_from() "
"method to establish an explicit left side, as well as "
"providing an explcit ON clause if not present already to "
"help resolve the ambiguity." % (right,)
)
elif self._entities:
# we have no explicit FROMs, so the implicit left has to
# come from our list of entities.
potential = {}
for entity_index, ent in enumerate(self._entities):
entity = ent.entity_zero_or_selectable
if entity is None:
continue
ent_info = inspect(entity)
if ent_info is r_info: # left and right are the same, skip
continue
# by using a dictionary with the selectables as keys this
# de-duplicates those selectables as occurs when the query is
# against a series of columns from the same selectable
if isinstance(ent, _MapperEntity):
potential[ent.selectable] = (entity_index, entity)
else:
potential[ent_info.selectable] = (None, entity)
all_clauses = list(potential.keys())
indexes = sql_util.find_left_clause_to_join_from(
all_clauses, r_info.selectable, onclause
)
if len(indexes) == 1:
use_entity_index, left = potential[all_clauses[indexes[0]]]
elif len(indexes) > 1:
raise sa_exc.InvalidRequestError(
"Can't determine which FROM clause to join "
"from, there are multiple FROMS which can "
"join to this entity. Please use the .select_from() "
"method to establish an explicit left side, as well as "
"providing an explcit ON clause if not present already to "
"help resolve the ambiguity."
)
else:
raise sa_exc.InvalidRequestError(
"Don't know how to join to %r. "
"Please use the .select_from() "
"method to establish an explicit left side, as well as "
"providing an explcit ON clause if not present already to "
"help resolve the ambiguity." % (right,)
)
else:
raise sa_exc.InvalidRequestError(
"No entities to join from; please use "
"select_from() to establish the left "
"entity/selectable of this join"
)
return left, replace_from_obj_index, use_entity_index
|
def _join_determine_implicit_left_side(self, left, right, onclause):
"""When join conditions don't express the left side explicitly,
determine if an existing FROM or entity in this query
can serve as the left hand side.
"""
# when we are here, it means join() was called without an ORM-
# specific way of telling us what the "left" side is, e.g.:
#
# join(RightEntity)
#
# or
#
# join(RightEntity, RightEntity.foo == LeftEntity.bar)
#
r_info = inspect(right)
replace_from_obj_index = use_entity_index = None
if self._from_obj:
# we have a list of FROMs already. So by definition this
# join has to connect to one of those FROMs.
indexes = sql_util.find_left_clause_to_join_from(
self._from_obj, r_info.selectable, onclause
)
if len(indexes) == 1:
replace_from_obj_index = indexes[0]
left = self._from_obj[replace_from_obj_index]
elif len(indexes) > 1:
raise sa_exc.InvalidRequestError(
"Can't determine which FROM clause to join "
"from, there are multiple FROMS which can "
"join to this entity. Try adding an explicit ON clause "
"to help resolve the ambiguity."
)
else:
raise sa_exc.InvalidRequestError(
"Don't know how to join to %s; please use "
"an ON clause to more clearly establish the left "
"side of this join" % (right,)
)
elif self._entities:
# we have no explicit FROMs, so the implicit left has to
# come from our list of entities.
potential = {}
for entity_index, ent in enumerate(self._entities):
entity = ent.entity_zero_or_selectable
if entity is None:
continue
ent_info = inspect(entity)
if ent_info is r_info: # left and right are the same, skip
continue
# by using a dictionary with the selectables as keys this
# de-duplicates those selectables as occurs when the query is
# against a series of columns from the same selectable
if isinstance(ent, _MapperEntity):
potential[ent.selectable] = (entity_index, entity)
else:
potential[ent_info.selectable] = (None, entity)
all_clauses = list(potential.keys())
indexes = sql_util.find_left_clause_to_join_from(
all_clauses, r_info.selectable, onclause
)
if len(indexes) == 1:
use_entity_index, left = potential[all_clauses[indexes[0]]]
elif len(indexes) > 1:
raise sa_exc.InvalidRequestError(
"Can't determine which FROM clause to join "
"from, there are multiple FROMS which can "
"join to this entity. Try adding an explicit ON clause "
"to help resolve the ambiguity."
)
else:
raise sa_exc.InvalidRequestError(
"Don't know how to join to %s; please use "
"an ON clause to more clearly establish the left "
"side of this join" % (right,)
)
else:
raise sa_exc.InvalidRequestError(
"No entities to join from; please use "
"select_from() to establish the left "
"entity/selectable of this join"
)
return left, replace_from_obj_index, use_entity_index
|
https://github.com/sqlalchemy/sqlalchemy/issues/5194
|
Traceback (most recent call last):
File "/home/mahenzon/.PyCharm2019.2/config/scratches/scratch_32.py", line 283, in <module>
main()
File "/home/mahenzon/.PyCharm2019.2/config/scratches/scratch_32.py", line 279, in main
run_cte_query()
File "/home/mahenzon/.PyCharm2019.2/config/scratches/scratch_32.py", line 269, in run_cte_query
query = create_cte_query()
File "/home/mahenzon/.PyCharm2019.2/config/scratches/scratch_32.py", line 258, in create_cte_query
full=True,
File "/home/mahenzon/.local/share/virtualenvs/core-C3GrfXXb/src/sqlalchemy/lib/sqlalchemy/orm/query.py", line 2246, in join
from_joinpoint=from_joinpoint,
File "<string>", line 2, in _join
File "/home/mahenzon/.local/share/virtualenvs/core-C3GrfXXb/src/sqlalchemy/lib/sqlalchemy/orm/base.py", line 220, in generate
fn(self, *args[1:], **kw)
File "/home/mahenzon/.local/share/virtualenvs/core-C3GrfXXb/src/sqlalchemy/lib/sqlalchemy/orm/query.py", line 2425, in _join
left, right, onclause, prop, create_aliases, outerjoin, full
File "/home/mahenzon/.local/share/virtualenvs/core-C3GrfXXb/src/sqlalchemy/lib/sqlalchemy/orm/query.py", line 2448, in _join_left_to_right
) = self._join_determine_implicit_left_side(left, right, onclause)
File "/home/mahenzon/.local/share/virtualenvs/core-C3GrfXXb/src/sqlalchemy/lib/sqlalchemy/orm/query.py", line 2590, in _join_determine_implicit_left_side
"side of this join" % (right,)
sqlalchemy.exc.InvalidRequestError: Don't know how to join to ; please use an ON clause to more clearly establish the left side of this join
|
sqlalchemy.exc.InvalidRequestError
|
def __init__(self, query, column, namespace=None):
self.expr = column
self.namespace = namespace
search_entities = True
check_column = False
if isinstance(column, util.string_types):
util.warn_deprecated(
"Plain string expression passed to Query() should be "
"explicitly declared using literal_column(); "
"automatic coercion of this value will be removed in "
"SQLAlchemy 1.4"
)
column = sql.literal_column(column)
self._label_name = column.name
search_entities = False
check_column = True
_entity = None
elif isinstance(column, (attributes.QueryableAttribute, interfaces.PropComparator)):
_entity = getattr(column, "_parententity", None)
if _entity is not None:
search_entities = False
self._label_name = column.key
column = column._query_clause_element()
check_column = True
if isinstance(column, Bundle):
_BundleEntity(query, column)
return
if not isinstance(column, sql.ColumnElement):
if hasattr(column, "_select_iterable"):
# break out an object like Table into
# individual columns
for c in column._select_iterable:
if c is column:
break
_ColumnEntity(query, c, namespace=column)
else:
return
raise sa_exc.InvalidRequestError(
"SQL expression, column, or mapped entity expected - got '%r'" % (column,)
)
elif not check_column:
self._label_name = getattr(column, "key", None)
search_entities = True
self.type = type_ = column.type
self.use_id_for_hash = not type_.hashable
# If the Column is unnamed, give it a
# label() so that mutable column expressions
# can be located in the result even
# if the expression's identity has been changed
# due to adaption.
if not column._label and not getattr(column, "is_literal", False):
column = column.label(self._label_name)
query._entities.append(self)
self.column = column
self.froms = set()
# look for ORM entities represented within the
# given expression. Try to count only entities
# for columns whose FROM object is in the actual list
# of FROMs for the overall expression - this helps
# subqueries which were built from ORM constructs from
# leaking out their entities into the main select construct
self.actual_froms = list(column._from_objects)
actual_froms = set(self.actual_froms)
if not search_entities:
self.entity_zero = _entity
if _entity:
self.entities = [_entity]
self.mapper = _entity.mapper
else:
self.entities = []
self.mapper = None
self._from_entities = set(self.entities)
else:
all_elements = [
elem
for elem in sql_util.surface_column_elements(
column, include_scalar_selects=False
)
if "parententity" in elem._annotations
]
self.entities = util.unique_list(
[
elem._annotations["parententity"]
for elem in all_elements
if "parententity" in elem._annotations
]
)
self._from_entities = set(
[
elem._annotations["parententity"]
for elem in all_elements
if "parententity" in elem._annotations
and actual_froms.intersection(elem._from_objects)
]
)
if self.entities:
self.entity_zero = self.entities[0]
self.mapper = self.entity_zero.mapper
elif self.namespace is not None:
self.entity_zero = self.namespace
self.mapper = None
else:
self.entity_zero = None
self.mapper = None
|
def __init__(self, query, column, namespace=None):
self.expr = column
self.namespace = namespace
search_entities = True
check_column = False
if isinstance(column, util.string_types):
util.warn_deprecated(
"Plain string expression passed to Query() should be "
"explicitly declared using literal_column(); "
"automatic coercion of this value will be removed in "
"SQLAlchemy 1.4"
)
column = sql.literal_column(column)
self._label_name = column.name
search_entities = False
check_column = True
_entity = None
elif isinstance(column, (attributes.QueryableAttribute, interfaces.PropComparator)):
_entity = getattr(column, "_parententity", None)
if _entity is not None:
search_entities = False
self._label_name = column.key
column = column._query_clause_element()
check_column = True
if isinstance(column, Bundle):
_BundleEntity(query, column)
return
if not isinstance(column, sql.ColumnElement):
if hasattr(column, "_select_iterable"):
# break out an object like Table into
# individual columns
for c in column._select_iterable:
if c is column:
break
_ColumnEntity(query, c, namespace=column)
else:
return
raise sa_exc.InvalidRequestError(
"SQL expression, column, or mapped entity expected - got '%r'" % (column,)
)
elif not check_column:
self._label_name = getattr(column, "key", None)
search_entities = True
self.type = type_ = column.type
self.use_id_for_hash = not type_.hashable
# If the Column is unnamed, give it a
# label() so that mutable column expressions
# can be located in the result even
# if the expression's identity has been changed
# due to adaption.
if not column._label and not getattr(column, "is_literal", False):
column = column.label(self._label_name)
query._entities.append(self)
self.column = column
self.froms = set()
# look for ORM entities represented within the
# given expression. Try to count only entities
# for columns whose FROM object is in the actual list
# of FROMs for the overall expression - this helps
# subqueries which were built from ORM constructs from
# leaking out their entities into the main select construct
self.actual_froms = actual_froms = set(column._from_objects)
if not search_entities:
self.entity_zero = _entity
if _entity:
self.entities = [_entity]
self.mapper = _entity.mapper
else:
self.entities = []
self.mapper = None
self._from_entities = set(self.entities)
else:
all_elements = [
elem
for elem in sql_util.surface_column_elements(
column, include_scalar_selects=False
)
if "parententity" in elem._annotations
]
self.entities = util.unique_list(
[
elem._annotations["parententity"]
for elem in all_elements
if "parententity" in elem._annotations
]
)
self._from_entities = set(
[
elem._annotations["parententity"]
for elem in all_elements
if "parententity" in elem._annotations
and actual_froms.intersection(elem._from_objects)
]
)
if self.entities:
self.entity_zero = self.entities[0]
self.mapper = self.entity_zero.mapper
elif self.namespace is not None:
self.entity_zero = self.namespace
self.mapper = None
else:
self.entity_zero = None
self.mapper = None
|
https://github.com/sqlalchemy/sqlalchemy/issues/5194
|
Traceback (most recent call last):
File "/home/mahenzon/.PyCharm2019.2/config/scratches/scratch_32.py", line 283, in <module>
main()
File "/home/mahenzon/.PyCharm2019.2/config/scratches/scratch_32.py", line 279, in main
run_cte_query()
File "/home/mahenzon/.PyCharm2019.2/config/scratches/scratch_32.py", line 269, in run_cte_query
query = create_cte_query()
File "/home/mahenzon/.PyCharm2019.2/config/scratches/scratch_32.py", line 258, in create_cte_query
full=True,
File "/home/mahenzon/.local/share/virtualenvs/core-C3GrfXXb/src/sqlalchemy/lib/sqlalchemy/orm/query.py", line 2246, in join
from_joinpoint=from_joinpoint,
File "<string>", line 2, in _join
File "/home/mahenzon/.local/share/virtualenvs/core-C3GrfXXb/src/sqlalchemy/lib/sqlalchemy/orm/base.py", line 220, in generate
fn(self, *args[1:], **kw)
File "/home/mahenzon/.local/share/virtualenvs/core-C3GrfXXb/src/sqlalchemy/lib/sqlalchemy/orm/query.py", line 2425, in _join
left, right, onclause, prop, create_aliases, outerjoin, full
File "/home/mahenzon/.local/share/virtualenvs/core-C3GrfXXb/src/sqlalchemy/lib/sqlalchemy/orm/query.py", line 2448, in _join_left_to_right
) = self._join_determine_implicit_left_side(left, right, onclause)
File "/home/mahenzon/.local/share/virtualenvs/core-C3GrfXXb/src/sqlalchemy/lib/sqlalchemy/orm/query.py", line 2590, in _join_determine_implicit_left_side
"side of this join" % (right,)
sqlalchemy.exc.InvalidRequestError: Don't know how to join to ; please use an ON clause to more clearly establish the left side of this join
|
sqlalchemy.exc.InvalidRequestError
|
def entity_zero_or_selectable(self):
if self.entity_zero is not None:
return self.entity_zero
elif self.actual_froms:
return self.actual_froms[0]
else:
return None
|
def entity_zero_or_selectable(self):
if self.entity_zero is not None:
return self.entity_zero
elif self.actual_froms:
return list(self.actual_froms)[0]
else:
return None
|
https://github.com/sqlalchemy/sqlalchemy/issues/5194
|
Traceback (most recent call last):
File "/home/mahenzon/.PyCharm2019.2/config/scratches/scratch_32.py", line 283, in <module>
main()
File "/home/mahenzon/.PyCharm2019.2/config/scratches/scratch_32.py", line 279, in main
run_cte_query()
File "/home/mahenzon/.PyCharm2019.2/config/scratches/scratch_32.py", line 269, in run_cte_query
query = create_cte_query()
File "/home/mahenzon/.PyCharm2019.2/config/scratches/scratch_32.py", line 258, in create_cte_query
full=True,
File "/home/mahenzon/.local/share/virtualenvs/core-C3GrfXXb/src/sqlalchemy/lib/sqlalchemy/orm/query.py", line 2246, in join
from_joinpoint=from_joinpoint,
File "<string>", line 2, in _join
File "/home/mahenzon/.local/share/virtualenvs/core-C3GrfXXb/src/sqlalchemy/lib/sqlalchemy/orm/base.py", line 220, in generate
fn(self, *args[1:], **kw)
File "/home/mahenzon/.local/share/virtualenvs/core-C3GrfXXb/src/sqlalchemy/lib/sqlalchemy/orm/query.py", line 2425, in _join
left, right, onclause, prop, create_aliases, outerjoin, full
File "/home/mahenzon/.local/share/virtualenvs/core-C3GrfXXb/src/sqlalchemy/lib/sqlalchemy/orm/query.py", line 2448, in _join_left_to_right
) = self._join_determine_implicit_left_side(left, right, onclause)
File "/home/mahenzon/.local/share/virtualenvs/core-C3GrfXXb/src/sqlalchemy/lib/sqlalchemy/orm/query.py", line 2590, in _join_determine_implicit_left_side
"side of this join" % (right,)
sqlalchemy.exc.InvalidRequestError: Don't know how to join to ; please use an ON clause to more clearly establish the left side of this join
|
sqlalchemy.exc.InvalidRequestError
|
def setup_entity(self, ext_info, aliased_adapter):
if "selectable" not in self.__dict__:
self.selectable = ext_info.selectable
if set(self.actual_froms).intersection(ext_info.selectable._from_objects):
self.froms.add(ext_info.selectable)
|
def setup_entity(self, ext_info, aliased_adapter):
if "selectable" not in self.__dict__:
self.selectable = ext_info.selectable
if self.actual_froms.intersection(ext_info.selectable._from_objects):
self.froms.add(ext_info.selectable)
|
https://github.com/sqlalchemy/sqlalchemy/issues/5194
|
Traceback (most recent call last):
File "/home/mahenzon/.PyCharm2019.2/config/scratches/scratch_32.py", line 283, in <module>
main()
File "/home/mahenzon/.PyCharm2019.2/config/scratches/scratch_32.py", line 279, in main
run_cte_query()
File "/home/mahenzon/.PyCharm2019.2/config/scratches/scratch_32.py", line 269, in run_cte_query
query = create_cte_query()
File "/home/mahenzon/.PyCharm2019.2/config/scratches/scratch_32.py", line 258, in create_cte_query
full=True,
File "/home/mahenzon/.local/share/virtualenvs/core-C3GrfXXb/src/sqlalchemy/lib/sqlalchemy/orm/query.py", line 2246, in join
from_joinpoint=from_joinpoint,
File "<string>", line 2, in _join
File "/home/mahenzon/.local/share/virtualenvs/core-C3GrfXXb/src/sqlalchemy/lib/sqlalchemy/orm/base.py", line 220, in generate
fn(self, *args[1:], **kw)
File "/home/mahenzon/.local/share/virtualenvs/core-C3GrfXXb/src/sqlalchemy/lib/sqlalchemy/orm/query.py", line 2425, in _join
left, right, onclause, prop, create_aliases, outerjoin, full
File "/home/mahenzon/.local/share/virtualenvs/core-C3GrfXXb/src/sqlalchemy/lib/sqlalchemy/orm/query.py", line 2448, in _join_left_to_right
) = self._join_determine_implicit_left_side(left, right, onclause)
File "/home/mahenzon/.local/share/virtualenvs/core-C3GrfXXb/src/sqlalchemy/lib/sqlalchemy/orm/query.py", line 2590, in _join_determine_implicit_left_side
"side of this join" % (right,)
sqlalchemy.exc.InvalidRequestError: Don't know how to join to ; please use an ON clause to more clearly establish the left side of this join
|
sqlalchemy.exc.InvalidRequestError
|
def _setup_crud_params(compiler, stmt, local_stmt_type, **kw):
restore_isinsert = compiler.isinsert
restore_isupdate = compiler.isupdate
restore_isdelete = compiler.isdelete
should_restore = (
(restore_isinsert or restore_isupdate or restore_isdelete)
or len(compiler.stack) > 1
or "visiting_cte" in kw
)
if local_stmt_type is ISINSERT:
compiler.isupdate = False
compiler.isinsert = True
elif local_stmt_type is ISUPDATE:
compiler.isupdate = True
compiler.isinsert = False
elif local_stmt_type is ISDELETE:
if not should_restore:
compiler.isdelete = True
else:
assert False, "ISINSERT, ISUPDATE, or ISDELETE expected"
try:
if local_stmt_type in (ISINSERT, ISUPDATE):
return _get_crud_params(compiler, stmt, **kw)
finally:
if should_restore:
compiler.isinsert = restore_isinsert
compiler.isupdate = restore_isupdate
compiler.isdelete = restore_isdelete
|
def _setup_crud_params(compiler, stmt, local_stmt_type, **kw):
restore_isinsert = compiler.isinsert
restore_isupdate = compiler.isupdate
restore_isdelete = compiler.isdelete
should_restore = (restore_isinsert or restore_isupdate or restore_isdelete) or len(
compiler.stack
) > 1
if local_stmt_type is ISINSERT:
compiler.isupdate = False
compiler.isinsert = True
elif local_stmt_type is ISUPDATE:
compiler.isupdate = True
compiler.isinsert = False
elif local_stmt_type is ISDELETE:
if not should_restore:
compiler.isdelete = True
else:
assert False, "ISINSERT, ISUPDATE, or ISDELETE expected"
try:
if local_stmt_type in (ISINSERT, ISUPDATE):
return _get_crud_params(compiler, stmt, **kw)
finally:
if should_restore:
compiler.isinsert = restore_isinsert
compiler.isupdate = restore_isupdate
compiler.isdelete = restore_isdelete
|
https://github.com/sqlalchemy/sqlalchemy/issues/5181
|
python ref_cte.py
Traceback (most recent call last):
File "ref_cte.py", line 28, in <module>
test_pg_example_one()
File "ref_cte.py", line 23, in test_pg_example_one
c = stmt.compile(dialect=dialect)
File "<string>", line 1, in <lambda>
File "/Users/xtof/Documents/Dev/Python/bug-sa/venv/lib/python3.7/site-packages/sqlalchemy/sql/elements.py", line 468, in compile
return self._compiler(dialect, bind=bind, **kw)
File "/Users/xtof/Documents/Dev/Python/bug-sa/venv/lib/python3.7/site-packages/sqlalchemy/sql/elements.py", line 474, in _compiler
return dialect.statement_compiler(dialect, self, **kw)
File "/Users/xtof/Documents/Dev/Python/bug-sa/venv/lib/python3.7/site-packages/sqlalchemy/sql/compiler.py", line 593, in __init__
) and statement._returning:
AttributeError: 'Select' object has no attribute '_returning'
|
AttributeError
|
def __init__(
self,
key,
value=NO_ARG,
type_=None,
unique=False,
required=NO_ARG,
quote=None,
callable_=None,
expanding=False,
isoutparam=False,
literal_execute=False,
_compared_to_operator=None,
_compared_to_type=None,
):
r"""Produce a "bound expression".
The return value is an instance of :class:`.BindParameter`; this
is a :class:`.ColumnElement` subclass which represents a so-called
"placeholder" value in a SQL expression, the value of which is
supplied at the point at which the statement in executed against a
database connection.
In SQLAlchemy, the :func:`.bindparam` construct has
the ability to carry along the actual value that will be ultimately
used at expression time. In this way, it serves not just as
a "placeholder" for eventual population, but also as a means of
representing so-called "unsafe" values which should not be rendered
directly in a SQL statement, but rather should be passed along
to the :term:`DBAPI` as values which need to be correctly escaped
and potentially handled for type-safety.
When using :func:`.bindparam` explicitly, the use case is typically
one of traditional deferment of parameters; the :func:`.bindparam`
construct accepts a name which can then be referred to at execution
time::
from sqlalchemy import bindparam
stmt = select([users_table]).\
where(users_table.c.name == bindparam('username'))
The above statement, when rendered, will produce SQL similar to::
SELECT id, name FROM user WHERE name = :username
In order to populate the value of ``:username`` above, the value
would typically be applied at execution time to a method
like :meth:`.Connection.execute`::
result = connection.execute(stmt, username='wendy')
Explicit use of :func:`.bindparam` is also common when producing
UPDATE or DELETE statements that are to be invoked multiple times,
where the WHERE criterion of the statement is to change on each
invocation, such as::
stmt = (users_table.update().
where(user_table.c.name == bindparam('username')).
values(fullname=bindparam('fullname'))
)
connection.execute(
stmt, [{"username": "wendy", "fullname": "Wendy Smith"},
{"username": "jack", "fullname": "Jack Jones"},
]
)
SQLAlchemy's Core expression system makes wide use of
:func:`.bindparam` in an implicit sense. It is typical that Python
literal values passed to virtually all SQL expression functions are
coerced into fixed :func:`.bindparam` constructs. For example, given
a comparison operation such as::
expr = users_table.c.name == 'Wendy'
The above expression will produce a :class:`.BinaryExpression`
construct, where the left side is the :class:`.Column` object
representing the ``name`` column, and the right side is a
:class:`.BindParameter` representing the literal value::
print(repr(expr.right))
BindParameter('%(4327771088 name)s', 'Wendy', type_=String())
The expression above will render SQL such as::
user.name = :name_1
Where the ``:name_1`` parameter name is an anonymous name. The
actual string ``Wendy`` is not in the rendered string, but is carried
along where it is later used within statement execution. If we
invoke a statement like the following::
stmt = select([users_table]).where(users_table.c.name == 'Wendy')
result = connection.execute(stmt)
We would see SQL logging output as::
SELECT "user".id, "user".name
FROM "user"
WHERE "user".name = %(name_1)s
{'name_1': 'Wendy'}
Above, we see that ``Wendy`` is passed as a parameter to the database,
while the placeholder ``:name_1`` is rendered in the appropriate form
for the target database, in this case the PostgreSQL database.
Similarly, :func:`.bindparam` is invoked automatically
when working with :term:`CRUD` statements as far as the "VALUES"
portion is concerned. The :func:`.insert` construct produces an
``INSERT`` expression which will, at statement execution time,
generate bound placeholders based on the arguments passed, as in::
stmt = users_table.insert()
result = connection.execute(stmt, name='Wendy')
The above will produce SQL output as::
INSERT INTO "user" (name) VALUES (%(name)s)
{'name': 'Wendy'}
The :class:`.Insert` construct, at compilation/execution time,
rendered a single :func:`.bindparam` mirroring the column
name ``name`` as a result of the single ``name`` parameter
we passed to the :meth:`.Connection.execute` method.
:param key:
the key (e.g. the name) for this bind param.
Will be used in the generated
SQL statement for dialects that use named parameters. This
value may be modified when part of a compilation operation,
if other :class:`BindParameter` objects exist with the same
key, or if its length is too long and truncation is
required.
:param value:
Initial value for this bind param. Will be used at statement
execution time as the value for this parameter passed to the
DBAPI, if no other value is indicated to the statement execution
method for this particular parameter name. Defaults to ``None``.
:param callable\_:
A callable function that takes the place of "value". The function
will be called at statement execution time to determine the
ultimate value. Used for scenarios where the actual bind
value cannot be determined at the point at which the clause
construct is created, but embedded bind values are still desirable.
:param type\_:
A :class:`.TypeEngine` class or instance representing an optional
datatype for this :func:`.bindparam`. If not passed, a type
may be determined automatically for the bind, based on the given
value; for example, trivial Python types such as ``str``,
``int``, ``bool``
may result in the :class:`.String`, :class:`.Integer` or
:class:`.Boolean` types being automatically selected.
The type of a :func:`.bindparam` is significant especially in that
the type will apply pre-processing to the value before it is
passed to the database. For example, a :func:`.bindparam` which
refers to a datetime value, and is specified as holding the
:class:`.DateTime` type, may apply conversion needed to the
value (such as stringification on SQLite) before passing the value
to the database.
:param unique:
if True, the key name of this :class:`.BindParameter` will be
modified if another :class:`.BindParameter` of the same name
already has been located within the containing
expression. This flag is used generally by the internals
when producing so-called "anonymous" bound expressions, it
isn't generally applicable to explicitly-named :func:`.bindparam`
constructs.
:param required:
If ``True``, a value is required at execution time. If not passed,
it defaults to ``True`` if neither :paramref:`.bindparam.value`
or :paramref:`.bindparam.callable` were passed. If either of these
parameters are present, then :paramref:`.bindparam.required`
defaults to ``False``.
:param quote:
True if this parameter name requires quoting and is not
currently known as a SQLAlchemy reserved word; this currently
only applies to the Oracle backend, where bound names must
sometimes be quoted.
:param isoutparam:
if True, the parameter should be treated like a stored procedure
"OUT" parameter. This applies to backends such as Oracle which
support OUT parameters.
:param expanding:
if True, this parameter will be treated as an "expanding" parameter
at execution time; the parameter value is expected to be a sequence,
rather than a scalar value, and the string SQL statement will
be transformed on a per-execution basis to accommodate the sequence
with a variable number of parameter slots passed to the DBAPI.
This is to allow statement caching to be used in conjunction with
an IN clause.
.. seealso::
:meth:`.ColumnOperators.in_`
:ref:`baked_in` - with baked queries
.. note:: The "expanding" feature does not support "executemany"-
style parameter sets.
.. versionadded:: 1.2
.. versionchanged:: 1.3 the "expanding" bound parameter feature now
supports empty lists.
.. seealso::
:ref:`coretutorial_bind_param`
:ref:`coretutorial_insert_expressions`
:func:`.outparam`
:param literal_execute:
if True, the bound parameter will be rendered in the compile phase
with a special "POSTCOMPILE" token, and the SQLAlchemy compiler will
render the final value of the parameter into the SQL statement at
statement execution time, omitting the value from the parameter
dictionary / list passed to DBAPI ``cursor.execute()``. This
produces a similar effect as that of using the ``literal_binds``,
compilation flag, however takes place as the statement is sent to
the DBAPI ``cursor.execute()`` method, rather than when the statement
is compiled. The primary use of this
capability is for rendering LIMIT / OFFSET clauses for database
drivers that can't accommodate for bound parameters in these
contexts, while allowing SQL constructs to be cacheable at the
compilation level.
.. versionadded:: 1.4 Added "post compile" bound parameters
.. seealso::
:ref:`change_4808`.
"""
if isinstance(key, ColumnClause):
type_ = key.type
key = key.key
if required is NO_ARG:
required = value is NO_ARG and callable_ is None
if value is NO_ARG:
self._value_required_for_cache = False
value = None
else:
self._value_required_for_cache = True
if quote is not None:
key = quoted_name(key, quote)
if unique:
self.key = _anonymous_label(
"%%(%d %s)s"
% (
id(self),
re.sub(r"[%\(\) \$]+", "_", key).strip("_")
if key is not None
else "param",
)
)
else:
self.key = key or _anonymous_label("%%(%d param)s" % id(self))
# identifying key that won't change across
# clones, used to identify the bind's logical
# identity
self._identifying_key = self.key
# key that was passed in the first place, used to
# generate new keys
self._orig_key = key or "param"
self.unique = unique
self.value = value
self.callable = callable_
self.isoutparam = isoutparam
self.required = required
self.expanding = expanding
self.literal_execute = literal_execute
if type_ is None:
if _compared_to_type is not None:
self.type = _compared_to_type.coerce_compared_value(
_compared_to_operator, value
)
else:
self.type = type_api._resolve_value_to_type(value)
elif isinstance(type_, type):
self.type = type_()
else:
self.type = type_
|
def __init__(
self,
key,
value=NO_ARG,
type_=None,
unique=False,
required=NO_ARG,
quote=None,
callable_=None,
expanding=False,
isoutparam=False,
literal_execute=False,
_compared_to_operator=None,
_compared_to_type=None,
):
r"""Produce a "bound expression".
The return value is an instance of :class:`.BindParameter`; this
is a :class:`.ColumnElement` subclass which represents a so-called
"placeholder" value in a SQL expression, the value of which is
supplied at the point at which the statement in executed against a
database connection.
In SQLAlchemy, the :func:`.bindparam` construct has
the ability to carry along the actual value that will be ultimately
used at expression time. In this way, it serves not just as
a "placeholder" for eventual population, but also as a means of
representing so-called "unsafe" values which should not be rendered
directly in a SQL statement, but rather should be passed along
to the :term:`DBAPI` as values which need to be correctly escaped
and potentially handled for type-safety.
When using :func:`.bindparam` explicitly, the use case is typically
one of traditional deferment of parameters; the :func:`.bindparam`
construct accepts a name which can then be referred to at execution
time::
from sqlalchemy import bindparam
stmt = select([users_table]).\
where(users_table.c.name == bindparam('username'))
The above statement, when rendered, will produce SQL similar to::
SELECT id, name FROM user WHERE name = :username
In order to populate the value of ``:username`` above, the value
would typically be applied at execution time to a method
like :meth:`.Connection.execute`::
result = connection.execute(stmt, username='wendy')
Explicit use of :func:`.bindparam` is also common when producing
UPDATE or DELETE statements that are to be invoked multiple times,
where the WHERE criterion of the statement is to change on each
invocation, such as::
stmt = (users_table.update().
where(user_table.c.name == bindparam('username')).
values(fullname=bindparam('fullname'))
)
connection.execute(
stmt, [{"username": "wendy", "fullname": "Wendy Smith"},
{"username": "jack", "fullname": "Jack Jones"},
]
)
SQLAlchemy's Core expression system makes wide use of
:func:`.bindparam` in an implicit sense. It is typical that Python
literal values passed to virtually all SQL expression functions are
coerced into fixed :func:`.bindparam` constructs. For example, given
a comparison operation such as::
expr = users_table.c.name == 'Wendy'
The above expression will produce a :class:`.BinaryExpression`
construct, where the left side is the :class:`.Column` object
representing the ``name`` column, and the right side is a
:class:`.BindParameter` representing the literal value::
print(repr(expr.right))
BindParameter('%(4327771088 name)s', 'Wendy', type_=String())
The expression above will render SQL such as::
user.name = :name_1
Where the ``:name_1`` parameter name is an anonymous name. The
actual string ``Wendy`` is not in the rendered string, but is carried
along where it is later used within statement execution. If we
invoke a statement like the following::
stmt = select([users_table]).where(users_table.c.name == 'Wendy')
result = connection.execute(stmt)
We would see SQL logging output as::
SELECT "user".id, "user".name
FROM "user"
WHERE "user".name = %(name_1)s
{'name_1': 'Wendy'}
Above, we see that ``Wendy`` is passed as a parameter to the database,
while the placeholder ``:name_1`` is rendered in the appropriate form
for the target database, in this case the PostgreSQL database.
Similarly, :func:`.bindparam` is invoked automatically
when working with :term:`CRUD` statements as far as the "VALUES"
portion is concerned. The :func:`.insert` construct produces an
``INSERT`` expression which will, at statement execution time,
generate bound placeholders based on the arguments passed, as in::
stmt = users_table.insert()
result = connection.execute(stmt, name='Wendy')
The above will produce SQL output as::
INSERT INTO "user" (name) VALUES (%(name)s)
{'name': 'Wendy'}
The :class:`.Insert` construct, at compilation/execution time,
rendered a single :func:`.bindparam` mirroring the column
name ``name`` as a result of the single ``name`` parameter
we passed to the :meth:`.Connection.execute` method.
:param key:
the key (e.g. the name) for this bind param.
Will be used in the generated
SQL statement for dialects that use named parameters. This
value may be modified when part of a compilation operation,
if other :class:`BindParameter` objects exist with the same
key, or if its length is too long and truncation is
required.
:param value:
Initial value for this bind param. Will be used at statement
execution time as the value for this parameter passed to the
DBAPI, if no other value is indicated to the statement execution
method for this particular parameter name. Defaults to ``None``.
:param callable\_:
A callable function that takes the place of "value". The function
will be called at statement execution time to determine the
ultimate value. Used for scenarios where the actual bind
value cannot be determined at the point at which the clause
construct is created, but embedded bind values are still desirable.
:param type\_:
A :class:`.TypeEngine` class or instance representing an optional
datatype for this :func:`.bindparam`. If not passed, a type
may be determined automatically for the bind, based on the given
value; for example, trivial Python types such as ``str``,
``int``, ``bool``
may result in the :class:`.String`, :class:`.Integer` or
:class:`.Boolean` types being automatically selected.
The type of a :func:`.bindparam` is significant especially in that
the type will apply pre-processing to the value before it is
passed to the database. For example, a :func:`.bindparam` which
refers to a datetime value, and is specified as holding the
:class:`.DateTime` type, may apply conversion needed to the
value (such as stringification on SQLite) before passing the value
to the database.
:param unique:
if True, the key name of this :class:`.BindParameter` will be
modified if another :class:`.BindParameter` of the same name
already has been located within the containing
expression. This flag is used generally by the internals
when producing so-called "anonymous" bound expressions, it
isn't generally applicable to explicitly-named :func:`.bindparam`
constructs.
:param required:
If ``True``, a value is required at execution time. If not passed,
it defaults to ``True`` if neither :paramref:`.bindparam.value`
or :paramref:`.bindparam.callable` were passed. If either of these
parameters are present, then :paramref:`.bindparam.required`
defaults to ``False``.
:param quote:
True if this parameter name requires quoting and is not
currently known as a SQLAlchemy reserved word; this currently
only applies to the Oracle backend, where bound names must
sometimes be quoted.
:param isoutparam:
if True, the parameter should be treated like a stored procedure
"OUT" parameter. This applies to backends such as Oracle which
support OUT parameters.
:param expanding:
if True, this parameter will be treated as an "expanding" parameter
at execution time; the parameter value is expected to be a sequence,
rather than a scalar value, and the string SQL statement will
be transformed on a per-execution basis to accommodate the sequence
with a variable number of parameter slots passed to the DBAPI.
This is to allow statement caching to be used in conjunction with
an IN clause.
.. seealso::
:meth:`.ColumnOperators.in_`
:ref:`baked_in` - with baked queries
.. note:: The "expanding" feature does not support "executemany"-
style parameter sets.
.. versionadded:: 1.2
.. versionchanged:: 1.3 the "expanding" bound parameter feature now
supports empty lists.
.. seealso::
:ref:`coretutorial_bind_param`
:ref:`coretutorial_insert_expressions`
:func:`.outparam`
:param literal_execute:
if True, the bound parameter will be rendered in the compile phase
with a special "POSTCOMPILE" token, and the SQLAlchemy compiler will
render the final value of the parameter into the SQL statement at
statement execution time, omitting the value from the parameter
dictionary / list passed to DBAPI ``cursor.execute()``. This
produces a similar effect as that of using the ``literal_binds``,
compilation flag, however takes place as the statement is sent to
the DBAPI ``cursor.execute()`` method, rather than when the statement
is compiled. The primary use of this
capability is for rendering LIMIT / OFFSET clauses for database
drivers that can't accommodate for bound parameters in these
contexts, while allowing SQL constructs to be cacheable at the
compilation level.
.. versionadded:: 1.4 Added "post compile" bound parameters
.. seealso::
:ref:`change_4808`.
"""
if isinstance(key, ColumnClause):
type_ = key.type
key = key.key
if required is NO_ARG:
required = value is NO_ARG and callable_ is None
if value is NO_ARG:
self._value_required_for_cache = False
value = None
else:
self._value_required_for_cache = True
if quote is not None:
key = quoted_name(key, quote)
if unique:
self.key = _anonymous_label("%%(%d %s)s" % (id(self), key or "param"))
else:
self.key = key or _anonymous_label("%%(%d param)s" % id(self))
# identifying key that won't change across
# clones, used to identify the bind's logical
# identity
self._identifying_key = self.key
# key that was passed in the first place, used to
# generate new keys
self._orig_key = key or "param"
self.unique = unique
self.value = value
self.callable = callable_
self.isoutparam = isoutparam
self.required = required
self.expanding = expanding
self.literal_execute = literal_execute
if type_ is None:
if _compared_to_type is not None:
self.type = _compared_to_type.coerce_compared_value(
_compared_to_operator, value
)
else:
self.type = type_api._resolve_value_to_type(value)
elif isinstance(type_, type):
self.type = type_()
else:
self.type = type_
|
https://github.com/sqlalchemy/sqlalchemy/issues/4837
|
Traceback (most recent call last):
File "/Users/yuany/tmp/special_chars.py", line 14, in <module>
engine.execute(select(['*']).where(table.c[colname] > 1))
File "/Users/yuany/envs/dbe/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 2144, in execute
return connection.execute(statement, *multiparams, **params)
File "/Users/yuany/envs/dbe/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 980, in execute
return meth(self, multiparams, params)
File "/Users/yuany/envs/dbe/lib/python3.7/site-packages/sqlalchemy/sql/elements.py", line 273, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/Users/yuany/envs/dbe/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1099, in _execute_clauseelement
distilled_params,
File "/Users/yuany/envs/dbe/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1240, in _execute_context
e, statement, parameters, cursor, context
File "/Users/yuany/envs/dbe/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1460, in _handle_dbapi_exception
util.reraise(*exc_info)
File "/Users/yuany/envs/dbe/lib/python3.7/site-packages/sqlalchemy/util/compat.py", line 277, in reraise
raise value
File "/Users/yuany/envs/dbe/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1236, in _execute_context
cursor, statement, parameters, context
File "/Users/yuany/envs/dbe/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 536, in do_execute
cursor.execute(statement, parameters)
KeyError: 'size(kb'
|
KeyError
|
def __init__(
self,
key,
value=NO_ARG,
type_=None,
unique=False,
required=NO_ARG,
quote=None,
callable_=None,
expanding=False,
isoutparam=False,
_compared_to_operator=None,
_compared_to_type=None,
):
r"""Produce a "bound expression".
The return value is an instance of :class:`.BindParameter`; this
is a :class:`.ColumnElement` subclass which represents a so-called
"placeholder" value in a SQL expression, the value of which is
supplied at the point at which the statement in executed against a
database connection.
In SQLAlchemy, the :func:`.bindparam` construct has
the ability to carry along the actual value that will be ultimately
used at expression time. In this way, it serves not just as
a "placeholder" for eventual population, but also as a means of
representing so-called "unsafe" values which should not be rendered
directly in a SQL statement, but rather should be passed along
to the :term:`DBAPI` as values which need to be correctly escaped
and potentially handled for type-safety.
When using :func:`.bindparam` explicitly, the use case is typically
one of traditional deferment of parameters; the :func:`.bindparam`
construct accepts a name which can then be referred to at execution
time::
from sqlalchemy import bindparam
stmt = select([users_table]).\
where(users_table.c.name == bindparam('username'))
The above statement, when rendered, will produce SQL similar to::
SELECT id, name FROM user WHERE name = :username
In order to populate the value of ``:username`` above, the value
would typically be applied at execution time to a method
like :meth:`.Connection.execute`::
result = connection.execute(stmt, username='wendy')
Explicit use of :func:`.bindparam` is also common when producing
UPDATE or DELETE statements that are to be invoked multiple times,
where the WHERE criterion of the statement is to change on each
invocation, such as::
stmt = (users_table.update().
where(user_table.c.name == bindparam('username')).
values(fullname=bindparam('fullname'))
)
connection.execute(
stmt, [{"username": "wendy", "fullname": "Wendy Smith"},
{"username": "jack", "fullname": "Jack Jones"},
]
)
SQLAlchemy's Core expression system makes wide use of
:func:`.bindparam` in an implicit sense. It is typical that Python
literal values passed to virtually all SQL expression functions are
coerced into fixed :func:`.bindparam` constructs. For example, given
a comparison operation such as::
expr = users_table.c.name == 'Wendy'
The above expression will produce a :class:`.BinaryExpression`
construct, where the left side is the :class:`.Column` object
representing the ``name`` column, and the right side is a
:class:`.BindParameter` representing the literal value::
print(repr(expr.right))
BindParameter('%(4327771088 name)s', 'Wendy', type_=String())
The expression above will render SQL such as::
user.name = :name_1
Where the ``:name_1`` parameter name is an anonymous name. The
actual string ``Wendy`` is not in the rendered string, but is carried
along where it is later used within statement execution. If we
invoke a statement like the following::
stmt = select([users_table]).where(users_table.c.name == 'Wendy')
result = connection.execute(stmt)
We would see SQL logging output as::
SELECT "user".id, "user".name
FROM "user"
WHERE "user".name = %(name_1)s
{'name_1': 'Wendy'}
Above, we see that ``Wendy`` is passed as a parameter to the database,
while the placeholder ``:name_1`` is rendered in the appropriate form
for the target database, in this case the PostgreSQL database.
Similarly, :func:`.bindparam` is invoked automatically
when working with :term:`CRUD` statements as far as the "VALUES"
portion is concerned. The :func:`.insert` construct produces an
``INSERT`` expression which will, at statement execution time,
generate bound placeholders based on the arguments passed, as in::
stmt = users_table.insert()
result = connection.execute(stmt, name='Wendy')
The above will produce SQL output as::
INSERT INTO "user" (name) VALUES (%(name)s)
{'name': 'Wendy'}
The :class:`.Insert` construct, at compilation/execution time,
rendered a single :func:`.bindparam` mirroring the column
name ``name`` as a result of the single ``name`` parameter
we passed to the :meth:`.Connection.execute` method.
:param key:
the key (e.g. the name) for this bind param.
Will be used in the generated
SQL statement for dialects that use named parameters. This
value may be modified when part of a compilation operation,
if other :class:`BindParameter` objects exist with the same
key, or if its length is too long and truncation is
required.
:param value:
Initial value for this bind param. Will be used at statement
execution time as the value for this parameter passed to the
DBAPI, if no other value is indicated to the statement execution
method for this particular parameter name. Defaults to ``None``.
:param callable\_:
A callable function that takes the place of "value". The function
will be called at statement execution time to determine the
ultimate value. Used for scenarios where the actual bind
value cannot be determined at the point at which the clause
construct is created, but embedded bind values are still desirable.
:param type\_:
A :class:`.TypeEngine` class or instance representing an optional
datatype for this :func:`.bindparam`. If not passed, a type
may be determined automatically for the bind, based on the given
value; for example, trivial Python types such as ``str``,
``int``, ``bool``
may result in the :class:`.String`, :class:`.Integer` or
:class:`.Boolean` types being automatically selected.
The type of a :func:`.bindparam` is significant especially in that
the type will apply pre-processing to the value before it is
passed to the database. For example, a :func:`.bindparam` which
refers to a datetime value, and is specified as holding the
:class:`.DateTime` type, may apply conversion needed to the
value (such as stringification on SQLite) before passing the value
to the database.
:param unique:
if True, the key name of this :class:`.BindParameter` will be
modified if another :class:`.BindParameter` of the same name
already has been located within the containing
expression. This flag is used generally by the internals
when producing so-called "anonymous" bound expressions, it
isn't generally applicable to explicitly-named :func:`.bindparam`
constructs.
:param required:
If ``True``, a value is required at execution time. If not passed,
it defaults to ``True`` if neither :paramref:`.bindparam.value`
or :paramref:`.bindparam.callable` were passed. If either of these
parameters are present, then :paramref:`.bindparam.required`
defaults to ``False``.
:param quote:
True if this parameter name requires quoting and is not
currently known as a SQLAlchemy reserved word; this currently
only applies to the Oracle backend, where bound names must
sometimes be quoted.
:param isoutparam:
if True, the parameter should be treated like a stored procedure
"OUT" parameter. This applies to backends such as Oracle which
support OUT parameters.
:param expanding:
if True, this parameter will be treated as an "expanding" parameter
at execution time; the parameter value is expected to be a sequence,
rather than a scalar value, and the string SQL statement will
be transformed on a per-execution basis to accommodate the sequence
with a variable number of parameter slots passed to the DBAPI.
This is to allow statement caching to be used in conjunction with
an IN clause.
.. seealso::
:meth:`.ColumnOperators.in_`
:ref:`baked_in` - with baked queries
.. note:: The "expanding" feature does not support "executemany"-
style parameter sets.
.. versionadded:: 1.2
.. versionchanged:: 1.3 the "expanding" bound parameter feature now
supports empty lists.
.. seealso::
:ref:`coretutorial_bind_param`
:ref:`coretutorial_insert_expressions`
:func:`.outparam`
"""
if isinstance(key, ColumnClause):
type_ = key.type
key = key.key
if required is NO_ARG:
required = value is NO_ARG and callable_ is None
if value is NO_ARG:
value = None
if quote is not None:
key = quoted_name(key, quote)
if unique:
self.key = _anonymous_label(
"%%(%d %s)s"
% (
id(self),
re.sub(r"[%\(\) \$]+", "_", key).strip("_")
if key is not None
else "param",
)
)
else:
self.key = key or _anonymous_label("%%(%d param)s" % id(self))
# identifying key that won't change across
# clones, used to identify the bind's logical
# identity
self._identifying_key = self.key
# key that was passed in the first place, used to
# generate new keys
self._orig_key = key or "param"
self.unique = unique
self.value = value
self.callable = callable_
self.isoutparam = isoutparam
self.required = required
self.expanding = expanding
if type_ is None:
if _compared_to_type is not None:
self.type = _compared_to_type.coerce_compared_value(
_compared_to_operator, value
)
else:
self.type = type_api._resolve_value_to_type(value)
elif isinstance(type_, type):
self.type = type_()
else:
self.type = type_
|
def __init__(
self,
key,
value=NO_ARG,
type_=None,
unique=False,
required=NO_ARG,
quote=None,
callable_=None,
expanding=False,
isoutparam=False,
_compared_to_operator=None,
_compared_to_type=None,
):
r"""Produce a "bound expression".
The return value is an instance of :class:`.BindParameter`; this
is a :class:`.ColumnElement` subclass which represents a so-called
"placeholder" value in a SQL expression, the value of which is
supplied at the point at which the statement in executed against a
database connection.
In SQLAlchemy, the :func:`.bindparam` construct has
the ability to carry along the actual value that will be ultimately
used at expression time. In this way, it serves not just as
a "placeholder" for eventual population, but also as a means of
representing so-called "unsafe" values which should not be rendered
directly in a SQL statement, but rather should be passed along
to the :term:`DBAPI` as values which need to be correctly escaped
and potentially handled for type-safety.
When using :func:`.bindparam` explicitly, the use case is typically
one of traditional deferment of parameters; the :func:`.bindparam`
construct accepts a name which can then be referred to at execution
time::
from sqlalchemy import bindparam
stmt = select([users_table]).\
where(users_table.c.name == bindparam('username'))
The above statement, when rendered, will produce SQL similar to::
SELECT id, name FROM user WHERE name = :username
In order to populate the value of ``:username`` above, the value
would typically be applied at execution time to a method
like :meth:`.Connection.execute`::
result = connection.execute(stmt, username='wendy')
Explicit use of :func:`.bindparam` is also common when producing
UPDATE or DELETE statements that are to be invoked multiple times,
where the WHERE criterion of the statement is to change on each
invocation, such as::
stmt = (users_table.update().
where(user_table.c.name == bindparam('username')).
values(fullname=bindparam('fullname'))
)
connection.execute(
stmt, [{"username": "wendy", "fullname": "Wendy Smith"},
{"username": "jack", "fullname": "Jack Jones"},
]
)
SQLAlchemy's Core expression system makes wide use of
:func:`.bindparam` in an implicit sense. It is typical that Python
literal values passed to virtually all SQL expression functions are
coerced into fixed :func:`.bindparam` constructs. For example, given
a comparison operation such as::
expr = users_table.c.name == 'Wendy'
The above expression will produce a :class:`.BinaryExpression`
construct, where the left side is the :class:`.Column` object
representing the ``name`` column, and the right side is a
:class:`.BindParameter` representing the literal value::
print(repr(expr.right))
BindParameter('%(4327771088 name)s', 'Wendy', type_=String())
The expression above will render SQL such as::
user.name = :name_1
Where the ``:name_1`` parameter name is an anonymous name. The
actual string ``Wendy`` is not in the rendered string, but is carried
along where it is later used within statement execution. If we
invoke a statement like the following::
stmt = select([users_table]).where(users_table.c.name == 'Wendy')
result = connection.execute(stmt)
We would see SQL logging output as::
SELECT "user".id, "user".name
FROM "user"
WHERE "user".name = %(name_1)s
{'name_1': 'Wendy'}
Above, we see that ``Wendy`` is passed as a parameter to the database,
while the placeholder ``:name_1`` is rendered in the appropriate form
for the target database, in this case the PostgreSQL database.
Similarly, :func:`.bindparam` is invoked automatically
when working with :term:`CRUD` statements as far as the "VALUES"
portion is concerned. The :func:`.insert` construct produces an
``INSERT`` expression which will, at statement execution time,
generate bound placeholders based on the arguments passed, as in::
stmt = users_table.insert()
result = connection.execute(stmt, name='Wendy')
The above will produce SQL output as::
INSERT INTO "user" (name) VALUES (%(name)s)
{'name': 'Wendy'}
The :class:`.Insert` construct, at compilation/execution time,
rendered a single :func:`.bindparam` mirroring the column
name ``name`` as a result of the single ``name`` parameter
we passed to the :meth:`.Connection.execute` method.
:param key:
the key (e.g. the name) for this bind param.
Will be used in the generated
SQL statement for dialects that use named parameters. This
value may be modified when part of a compilation operation,
if other :class:`BindParameter` objects exist with the same
key, or if its length is too long and truncation is
required.
:param value:
Initial value for this bind param. Will be used at statement
execution time as the value for this parameter passed to the
DBAPI, if no other value is indicated to the statement execution
method for this particular parameter name. Defaults to ``None``.
:param callable\_:
A callable function that takes the place of "value". The function
will be called at statement execution time to determine the
ultimate value. Used for scenarios where the actual bind
value cannot be determined at the point at which the clause
construct is created, but embedded bind values are still desirable.
:param type\_:
A :class:`.TypeEngine` class or instance representing an optional
datatype for this :func:`.bindparam`. If not passed, a type
may be determined automatically for the bind, based on the given
value; for example, trivial Python types such as ``str``,
``int``, ``bool``
may result in the :class:`.String`, :class:`.Integer` or
:class:`.Boolean` types being automatically selected.
The type of a :func:`.bindparam` is significant especially in that
the type will apply pre-processing to the value before it is
passed to the database. For example, a :func:`.bindparam` which
refers to a datetime value, and is specified as holding the
:class:`.DateTime` type, may apply conversion needed to the
value (such as stringification on SQLite) before passing the value
to the database.
:param unique:
if True, the key name of this :class:`.BindParameter` will be
modified if another :class:`.BindParameter` of the same name
already has been located within the containing
expression. This flag is used generally by the internals
when producing so-called "anonymous" bound expressions, it
isn't generally applicable to explicitly-named :func:`.bindparam`
constructs.
:param required:
If ``True``, a value is required at execution time. If not passed,
it defaults to ``True`` if neither :paramref:`.bindparam.value`
or :paramref:`.bindparam.callable` were passed. If either of these
parameters are present, then :paramref:`.bindparam.required`
defaults to ``False``.
:param quote:
True if this parameter name requires quoting and is not
currently known as a SQLAlchemy reserved word; this currently
only applies to the Oracle backend, where bound names must
sometimes be quoted.
:param isoutparam:
if True, the parameter should be treated like a stored procedure
"OUT" parameter. This applies to backends such as Oracle which
support OUT parameters.
:param expanding:
if True, this parameter will be treated as an "expanding" parameter
at execution time; the parameter value is expected to be a sequence,
rather than a scalar value, and the string SQL statement will
be transformed on a per-execution basis to accommodate the sequence
with a variable number of parameter slots passed to the DBAPI.
This is to allow statement caching to be used in conjunction with
an IN clause.
.. seealso::
:meth:`.ColumnOperators.in_`
:ref:`baked_in` - with baked queries
.. note:: The "expanding" feature does not support "executemany"-
style parameter sets.
.. versionadded:: 1.2
.. versionchanged:: 1.3 the "expanding" bound parameter feature now
supports empty lists.
.. seealso::
:ref:`coretutorial_bind_param`
:ref:`coretutorial_insert_expressions`
:func:`.outparam`
"""
if isinstance(key, ColumnClause):
type_ = key.type
key = key.key
if required is NO_ARG:
required = value is NO_ARG and callable_ is None
if value is NO_ARG:
value = None
if quote is not None:
key = quoted_name(key, quote)
if unique:
self.key = _anonymous_label("%%(%d %s)s" % (id(self), key or "param"))
else:
self.key = key or _anonymous_label("%%(%d param)s" % id(self))
# identifying key that won't change across
# clones, used to identify the bind's logical
# identity
self._identifying_key = self.key
# key that was passed in the first place, used to
# generate new keys
self._orig_key = key or "param"
self.unique = unique
self.value = value
self.callable = callable_
self.isoutparam = isoutparam
self.required = required
self.expanding = expanding
if type_ is None:
if _compared_to_type is not None:
self.type = _compared_to_type.coerce_compared_value(
_compared_to_operator, value
)
else:
self.type = type_api._resolve_value_to_type(value)
elif isinstance(type_, type):
self.type = type_()
else:
self.type = type_
|
https://github.com/sqlalchemy/sqlalchemy/issues/4837
|
Traceback (most recent call last):
File "/Users/yuany/tmp/special_chars.py", line 14, in <module>
engine.execute(select(['*']).where(table.c[colname] > 1))
File "/Users/yuany/envs/dbe/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 2144, in execute
return connection.execute(statement, *multiparams, **params)
File "/Users/yuany/envs/dbe/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 980, in execute
return meth(self, multiparams, params)
File "/Users/yuany/envs/dbe/lib/python3.7/site-packages/sqlalchemy/sql/elements.py", line 273, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/Users/yuany/envs/dbe/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1099, in _execute_clauseelement
distilled_params,
File "/Users/yuany/envs/dbe/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1240, in _execute_context
e, statement, parameters, cursor, context
File "/Users/yuany/envs/dbe/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1460, in _handle_dbapi_exception
util.reraise(*exc_info)
File "/Users/yuany/envs/dbe/lib/python3.7/site-packages/sqlalchemy/util/compat.py", line 277, in reraise
raise value
File "/Users/yuany/envs/dbe/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1236, in _execute_context
cursor, statement, parameters, context
File "/Users/yuany/envs/dbe/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 536, in do_execute
cursor.execute(statement, parameters)
KeyError: 'size(kb'
|
KeyError
|
def create_engine(url, **kwargs):
"""Create a new :class:`.Engine` instance.
The standard calling form is to send the URL as the
first positional argument, usually a string
that indicates database dialect and connection arguments::
engine = create_engine("postgresql://scott:tiger@localhost/test")
Additional keyword arguments may then follow it which
establish various options on the resulting :class:`.Engine`
and its underlying :class:`.Dialect` and :class:`.Pool`
constructs::
engine = create_engine("mysql://scott:tiger@hostname/dbname",
encoding='latin1', echo=True)
The string form of the URL is
``dialect[+driver]://user:password@host/dbname[?key=value..]``, where
``dialect`` is a database name such as ``mysql``, ``oracle``,
``postgresql``, etc., and ``driver`` the name of a DBAPI, such as
``psycopg2``, ``pyodbc``, ``cx_oracle``, etc. Alternatively,
the URL can be an instance of :class:`~sqlalchemy.engine.url.URL`.
``**kwargs`` takes a wide variety of options which are routed
towards their appropriate components. Arguments may be specific to
the :class:`.Engine`, the underlying :class:`.Dialect`, as well as the
:class:`.Pool`. Specific dialects also accept keyword arguments that
are unique to that dialect. Here, we describe the parameters
that are common to most :func:`.create_engine()` usage.
Once established, the newly resulting :class:`.Engine` will
request a connection from the underlying :class:`.Pool` once
:meth:`.Engine.connect` is called, or a method which depends on it
such as :meth:`.Engine.execute` is invoked. The :class:`.Pool` in turn
will establish the first actual DBAPI connection when this request
is received. The :func:`.create_engine` call itself does **not**
establish any actual DBAPI connections directly.
.. seealso::
:doc:`/core/engines`
:doc:`/dialects/index`
:ref:`connections_toplevel`
:param case_sensitive=True: if False, result column names
will match in a case-insensitive fashion, that is,
``row['SomeColumn']``.
:param connect_args: a dictionary of options which will be
passed directly to the DBAPI's ``connect()`` method as
additional keyword arguments. See the example
at :ref:`custom_dbapi_args`.
:param convert_unicode=False: if set to True, causes
all :class:`.String` datatypes to act as though the
:paramref:`.String.convert_unicode` flag has been set to ``True``,
regardless of a setting of ``False`` on an individual :class:`.String`
type. This has the effect of causing all :class:`.String` -based
columns to accommodate Python Unicode objects directly as though the
datatype were the :class:`.Unicode` type.
.. deprecated:: 1.3
The :paramref:`.create_engine.convert_unicode` parameter
is deprecated and will be removed in a future release.
All modern DBAPIs now support Python Unicode directly and this
parameter is unnecessary.
:param creator: a callable which returns a DBAPI connection.
This creation function will be passed to the underlying
connection pool and will be used to create all new database
connections. Usage of this function causes connection
parameters specified in the URL argument to be bypassed.
:param echo=False: if True, the Engine will log all statements
as well as a ``repr()`` of their parameter lists to the default log
handler, which defaults to ``sys.stdout`` for output. If set to the
string ``"debug"``, result rows will be printed to the standard output
as well. The ``echo`` attribute of ``Engine`` can be modified at any
time to turn logging on and off; direct control of logging is also
available using the standard Python ``logging`` module.
.. seealso::
:ref:`dbengine_logging` - further detail on how to configure
logging.
:param echo_pool=False: if True, the connection pool will log
informational output such as when connections are invalidated
as well as when connections are recycled to the default log handler,
which defaults to ``sys.stdout`` for output. If set to the string
``"debug"``, the logging will include pool checkouts and checkins.
Direct control of logging is also available using the standard Python
``logging`` module.
.. seealso::
:ref:`dbengine_logging` - further detail on how to configure
logging.
:param empty_in_strategy: The SQL compilation strategy to use when
rendering an IN or NOT IN expression for :meth:`.ColumnOperators.in_`
where the right-hand side
is an empty set. This is a string value that may be one of
``static``, ``dynamic``, or ``dynamic_warn``. The ``static``
strategy is the default, and an IN comparison to an empty set
will generate a simple false expression "1 != 1". The ``dynamic``
strategy behaves like that of SQLAlchemy 1.1 and earlier, emitting
a false expression of the form "expr != expr", which has the effect
of evaluting to NULL in the case of a null expression.
``dynamic_warn`` is the same as ``dynamic``, however also emits a
warning when an empty set is encountered; this because the "dynamic"
comparison is typically poorly performing on most databases.
.. versionadded:: 1.2 Added the ``empty_in_strategy`` setting and
additionally defaulted the behavior for empty-set IN comparisons
to a static boolean expression.
:param encoding: Defaults to ``utf-8``. This is the string
encoding used by SQLAlchemy for string encode/decode
operations which occur within SQLAlchemy, **outside of
the DBAPI.** Most modern DBAPIs feature some degree of
direct support for Python ``unicode`` objects,
what you see in Python 2 as a string of the form
``u'some string'``. For those scenarios where the
DBAPI is detected as not supporting a Python ``unicode``
object, this encoding is used to determine the
source/destination encoding. It is **not used**
for those cases where the DBAPI handles unicode
directly.
To properly configure a system to accommodate Python
``unicode`` objects, the DBAPI should be
configured to handle unicode to the greatest
degree as is appropriate - see
the notes on unicode pertaining to the specific
target database in use at :ref:`dialect_toplevel`.
Areas where string encoding may need to be accommodated
outside of the DBAPI include zero or more of:
* the values passed to bound parameters, corresponding to
the :class:`.Unicode` type or the :class:`.String` type
when ``convert_unicode`` is ``True``;
* the values returned in result set columns corresponding
to the :class:`.Unicode` type or the :class:`.String`
type when ``convert_unicode`` is ``True``;
* the string SQL statement passed to the DBAPI's
``cursor.execute()`` method;
* the string names of the keys in the bound parameter
dictionary passed to the DBAPI's ``cursor.execute()``
as well as ``cursor.setinputsizes()`` methods;
* the string column names retrieved from the DBAPI's
``cursor.description`` attribute.
When using Python 3, the DBAPI is required to support
*all* of the above values as Python ``unicode`` objects,
which in Python 3 are just known as ``str``. In Python 2,
the DBAPI does not specify unicode behavior at all,
so SQLAlchemy must make decisions for each of the above
values on a per-DBAPI basis - implementations are
completely inconsistent in their behavior.
:param execution_options: Dictionary execution options which will
be applied to all connections. See
:meth:`~sqlalchemy.engine.Connection.execution_options`
:param implicit_returning=True: When ``True``, a RETURNING-
compatible construct, if available, will be used to
fetch newly generated primary key values when a single row
INSERT statement is emitted with no existing returning()
clause. This applies to those backends which support RETURNING
or a compatible construct, including PostgreSQL, Firebird, Oracle,
Microsoft SQL Server. Set this to ``False`` to disable
the automatic usage of RETURNING.
:param isolation_level: this string parameter is interpreted by various
dialects in order to affect the transaction isolation level of the
database connection. The parameter essentially accepts some subset of
these string arguments: ``"SERIALIZABLE"``, ``"REPEATABLE_READ"``,
``"READ_COMMITTED"``, ``"READ_UNCOMMITTED"`` and ``"AUTOCOMMIT"``.
Behavior here varies per backend, and
individual dialects should be consulted directly.
Note that the isolation level can also be set on a
per-:class:`.Connection` basis as well, using the
:paramref:`.Connection.execution_options.isolation_level`
feature.
.. seealso::
:attr:`.Connection.default_isolation_level` - view default level
:paramref:`.Connection.execution_options.isolation_level`
- set per :class:`.Connection` isolation level
:ref:`SQLite Transaction Isolation <sqlite_isolation_level>`
:ref:`PostgreSQL Transaction Isolation <postgresql_isolation_level>`
:ref:`MySQL Transaction Isolation <mysql_isolation_level>`
:ref:`session_transaction_isolation` - for the ORM
:param json_deserializer: for dialects that support the :class:`.JSON`
datatype, this is a Python callable that will convert a JSON string
to a Python object. By default, the Python ``json.loads`` function is
used.
.. versionchanged:: 1.3.7 The SQLite dialect renamed this from
``_json_deserializer``.
:param json_serializer: for dialects that support the :class:`.JSON`
datatype, this is a Python callable that will render a given object
as JSON. By default, the Python ``json.dumps`` function is used.
.. versionchanged:: 1.3.7 The SQLite dialect renamed this from
``_json_serializer``.
:param label_length=None: optional integer value which limits
the size of dynamically generated column labels to that many
characters. If less than 6, labels are generated as
"_(counter)". If ``None``, the value of
``dialect.max_identifier_length`` is used instead.
:param listeners: A list of one or more
:class:`~sqlalchemy.interfaces.PoolListener` objects which will
receive connection pool events.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.engine" logger. Defaults to a hexstring of the
object's id.
:param max_overflow=10: the number of connections to allow in
connection pool "overflow", that is connections that can be
opened above and beyond the pool_size setting, which defaults
to five. this is only used with :class:`~sqlalchemy.pool.QueuePool`.
:param module=None: reference to a Python module object (the module
itself, not its string name). Specifies an alternate DBAPI module to
be used by the engine's dialect. Each sub-dialect references a
specific DBAPI which will be imported before first connect. This
parameter causes the import to be bypassed, and the given module to
be used instead. Can be used for testing of DBAPIs as well as to
inject "mock" DBAPI implementations into the :class:`.Engine`.
:param paramstyle=None: The `paramstyle <http://legacy.python.org/dev/peps/pep-0249/#paramstyle>`_
to use when rendering bound parameters. This style defaults to the
one recommended by the DBAPI itself, which is retrieved from the
``.paramstyle`` attribute of the DBAPI. However, most DBAPIs accept
more than one paramstyle, and in particular it may be desirable
to change a "named" paramstyle into a "positional" one, or vice versa.
When this attribute is passed, it should be one of the values
``"qmark"``, ``"numeric"``, ``"named"``, ``"format"`` or
``"pyformat"``, and should correspond to a parameter style known
to be supported by the DBAPI in use.
:param pool=None: an already-constructed instance of
:class:`~sqlalchemy.pool.Pool`, such as a
:class:`~sqlalchemy.pool.QueuePool` instance. If non-None, this
pool will be used directly as the underlying connection pool
for the engine, bypassing whatever connection parameters are
present in the URL argument. For information on constructing
connection pools manually, see :ref:`pooling_toplevel`.
:param poolclass=None: a :class:`~sqlalchemy.pool.Pool`
subclass, which will be used to create a connection pool
instance using the connection parameters given in the URL. Note
this differs from ``pool`` in that you don't actually
instantiate the pool in this case, you just indicate what type
of pool to be used.
:param pool_logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param pool_pre_ping: boolean, if True will enable the connection pool
"pre-ping" feature that tests connections for liveness upon
each checkout.
.. versionadded:: 1.2
.. seealso::
:ref:`pool_disconnects_pessimistic`
:param pool_size=5: the number of connections to keep open
inside the connection pool. This used with
:class:`~sqlalchemy.pool.QueuePool` as
well as :class:`~sqlalchemy.pool.SingletonThreadPool`. With
:class:`~sqlalchemy.pool.QueuePool`, a ``pool_size`` setting
of 0 indicates no limit; to disable pooling, set ``poolclass`` to
:class:`~sqlalchemy.pool.NullPool` instead.
:param pool_recycle=-1: this setting causes the pool to recycle
connections after the given number of seconds has passed. It
defaults to -1, or no timeout. For example, setting to 3600
means connections will be recycled after one hour. Note that
MySQL in particular will disconnect automatically if no
activity is detected on a connection for eight hours (although
this is configurable with the MySQLDB connection itself and the
server configuration as well).
.. seealso::
:ref:`pool_setting_recycle`
:param pool_reset_on_return='rollback': set the
:paramref:`.Pool.reset_on_return` parameter of the underlying
:class:`.Pool` object, which can be set to the values
``"rollback"``, ``"commit"``, or ``None``.
.. seealso::
:paramref:`.Pool.reset_on_return`
:param pool_timeout=30: number of seconds to wait before giving
up on getting a connection from the pool. This is only used
with :class:`~sqlalchemy.pool.QueuePool`.
:param pool_use_lifo=False: use LIFO (last-in-first-out) when retrieving
connections from :class:`.QueuePool` instead of FIFO
(first-in-first-out). Using LIFO, a server-side timeout scheme can
reduce the number of connections used during non- peak periods of
use. When planning for server-side timeouts, ensure that a recycle or
pre-ping strategy is in use to gracefully handle stale connections.
.. versionadded:: 1.3
.. seealso::
:ref:`pool_use_lifo`
:ref:`pool_disconnects`
:param plugins: string list of plugin names to load. See
:class:`.CreateEnginePlugin` for background.
.. versionadded:: 1.2.3
""" # noqa
if "strategy" in kwargs:
strat = kwargs.pop("strategy")
if strat == "mock":
return create_mock_engine(url, **kwargs)
else:
raise exc.ArgumentError("unknown strategy: %r" % strat)
# create url.URL object
u = _url.make_url(url)
plugins = u._instantiate_plugins(kwargs)
u.query.pop("plugin", None)
kwargs.pop("plugins", None)
entrypoint = u._get_entrypoint()
dialect_cls = entrypoint.get_dialect_cls(u)
if kwargs.pop("_coerce_config", False):
def pop_kwarg(key, default=None):
value = kwargs.pop(key, default)
if key in dialect_cls.engine_config_types:
value = dialect_cls.engine_config_types[key](value)
return value
else:
pop_kwarg = kwargs.pop
dialect_args = {}
# consume dialect arguments from kwargs
for k in util.get_cls_kwargs(dialect_cls):
if k in kwargs:
dialect_args[k] = pop_kwarg(k)
dbapi = kwargs.pop("module", None)
if dbapi is None:
dbapi_args = {}
for k in util.get_func_kwargs(dialect_cls.dbapi):
if k in kwargs:
dbapi_args[k] = pop_kwarg(k)
dbapi = dialect_cls.dbapi(**dbapi_args)
dialect_args["dbapi"] = dbapi
for plugin in plugins:
plugin.handle_dialect_kwargs(dialect_cls, dialect_args)
# create dialect
dialect = dialect_cls(**dialect_args)
# assemble connection arguments
(cargs, cparams) = dialect.create_connect_args(u)
cparams.update(pop_kwarg("connect_args", {}))
cargs = list(cargs) # allow mutability
# look for existing pool or create
pool = pop_kwarg("pool", None)
if pool is None:
def connect(connection_record=None):
if dialect._has_events:
for fn in dialect.dispatch.do_connect:
connection = fn(dialect, connection_record, cargs, cparams)
if connection is not None:
return connection
return dialect.connect(*cargs, **cparams)
creator = pop_kwarg("creator", connect)
poolclass = pop_kwarg("poolclass", None)
if poolclass is None:
poolclass = dialect_cls.get_pool_class(u)
pool_args = {"dialect": dialect}
# consume pool arguments from kwargs, translating a few of
# the arguments
translate = {
"logging_name": "pool_logging_name",
"echo": "echo_pool",
"timeout": "pool_timeout",
"recycle": "pool_recycle",
"events": "pool_events",
"reset_on_return": "pool_reset_on_return",
"pre_ping": "pool_pre_ping",
"use_lifo": "pool_use_lifo",
}
for k in util.get_cls_kwargs(poolclass):
tk = translate.get(k, k)
if tk in kwargs:
pool_args[k] = pop_kwarg(tk)
for plugin in plugins:
plugin.handle_pool_kwargs(poolclass, pool_args)
pool = poolclass(creator, **pool_args)
else:
if isinstance(pool, poollib.dbapi_proxy._DBProxy):
pool = pool.get_pool(*cargs, **cparams)
else:
pool = pool
pool._dialect = dialect
# create engine.
engineclass = base.Engine
engine_args = {}
for k in util.get_cls_kwargs(engineclass):
if k in kwargs:
engine_args[k] = pop_kwarg(k)
_initialize = kwargs.pop("_initialize", True)
# all kwargs should be consumed
if kwargs:
raise TypeError(
"Invalid argument(s) %s sent to create_engine(), "
"using configuration %s/%s/%s. Please check that the "
"keyword arguments are appropriate for this combination "
"of components."
% (
",".join("'%s'" % k for k in kwargs),
dialect.__class__.__name__,
pool.__class__.__name__,
engineclass.__name__,
)
)
engine = engineclass(pool, dialect, u, **engine_args)
if _initialize:
do_on_connect = dialect.on_connect()
if do_on_connect:
def on_connect(dbapi_connection, connection_record):
conn = getattr(dbapi_connection, "_sqla_unwrap", dbapi_connection)
if conn is None:
return
do_on_connect(conn)
event.listen(pool, "first_connect", on_connect)
event.listen(pool, "connect", on_connect)
def first_connect(dbapi_connection, connection_record):
c = base.Connection(engine, connection=dbapi_connection, _has_events=False)
c._execution_options = util.immutabledict()
dialect.initialize(c)
dialect.do_rollback(c.connection)
event.listen(pool, "first_connect", first_connect, _once_unless_exception=True)
dialect_cls.engine_created(engine)
if entrypoint is not dialect_cls:
entrypoint.engine_created(engine)
for plugin in plugins:
plugin.engine_created(engine)
return engine
|
def create_engine(url, **kwargs):
"""Create a new :class:`.Engine` instance.
The standard calling form is to send the URL as the
first positional argument, usually a string
that indicates database dialect and connection arguments::
engine = create_engine("postgresql://scott:tiger@localhost/test")
Additional keyword arguments may then follow it which
establish various options on the resulting :class:`.Engine`
and its underlying :class:`.Dialect` and :class:`.Pool`
constructs::
engine = create_engine("mysql://scott:tiger@hostname/dbname",
encoding='latin1', echo=True)
The string form of the URL is
``dialect[+driver]://user:password@host/dbname[?key=value..]``, where
``dialect`` is a database name such as ``mysql``, ``oracle``,
``postgresql``, etc., and ``driver`` the name of a DBAPI, such as
``psycopg2``, ``pyodbc``, ``cx_oracle``, etc. Alternatively,
the URL can be an instance of :class:`~sqlalchemy.engine.url.URL`.
``**kwargs`` takes a wide variety of options which are routed
towards their appropriate components. Arguments may be specific to
the :class:`.Engine`, the underlying :class:`.Dialect`, as well as the
:class:`.Pool`. Specific dialects also accept keyword arguments that
are unique to that dialect. Here, we describe the parameters
that are common to most :func:`.create_engine()` usage.
Once established, the newly resulting :class:`.Engine` will
request a connection from the underlying :class:`.Pool` once
:meth:`.Engine.connect` is called, or a method which depends on it
such as :meth:`.Engine.execute` is invoked. The :class:`.Pool` in turn
will establish the first actual DBAPI connection when this request
is received. The :func:`.create_engine` call itself does **not**
establish any actual DBAPI connections directly.
.. seealso::
:doc:`/core/engines`
:doc:`/dialects/index`
:ref:`connections_toplevel`
:param case_sensitive=True: if False, result column names
will match in a case-insensitive fashion, that is,
``row['SomeColumn']``.
:param connect_args: a dictionary of options which will be
passed directly to the DBAPI's ``connect()`` method as
additional keyword arguments. See the example
at :ref:`custom_dbapi_args`.
:param convert_unicode=False: if set to True, causes
all :class:`.String` datatypes to act as though the
:paramref:`.String.convert_unicode` flag has been set to ``True``,
regardless of a setting of ``False`` on an individual :class:`.String`
type. This has the effect of causing all :class:`.String` -based
columns to accommodate Python Unicode objects directly as though the
datatype were the :class:`.Unicode` type.
.. deprecated:: 1.3
The :paramref:`.create_engine.convert_unicode` parameter
is deprecated and will be removed in a future release.
All modern DBAPIs now support Python Unicode directly and this
parameter is unnecessary.
:param creator: a callable which returns a DBAPI connection.
This creation function will be passed to the underlying
connection pool and will be used to create all new database
connections. Usage of this function causes connection
parameters specified in the URL argument to be bypassed.
:param echo=False: if True, the Engine will log all statements
as well as a ``repr()`` of their parameter lists to the default log
handler, which defaults to ``sys.stdout`` for output. If set to the
string ``"debug"``, result rows will be printed to the standard output
as well. The ``echo`` attribute of ``Engine`` can be modified at any
time to turn logging on and off; direct control of logging is also
available using the standard Python ``logging`` module.
.. seealso::
:ref:`dbengine_logging` - further detail on how to configure
logging.
:param echo_pool=False: if True, the connection pool will log
informational output such as when connections are invalidated
as well as when connections are recycled to the default log handler,
which defaults to ``sys.stdout`` for output. If set to the string
``"debug"``, the logging will include pool checkouts and checkins.
Direct control of logging is also available using the standard Python
``logging`` module.
.. seealso::
:ref:`dbengine_logging` - further detail on how to configure
logging.
:param empty_in_strategy: The SQL compilation strategy to use when
rendering an IN or NOT IN expression for :meth:`.ColumnOperators.in_`
where the right-hand side
is an empty set. This is a string value that may be one of
``static``, ``dynamic``, or ``dynamic_warn``. The ``static``
strategy is the default, and an IN comparison to an empty set
will generate a simple false expression "1 != 1". The ``dynamic``
strategy behaves like that of SQLAlchemy 1.1 and earlier, emitting
a false expression of the form "expr != expr", which has the effect
of evaluting to NULL in the case of a null expression.
``dynamic_warn`` is the same as ``dynamic``, however also emits a
warning when an empty set is encountered; this because the "dynamic"
comparison is typically poorly performing on most databases.
.. versionadded:: 1.2 Added the ``empty_in_strategy`` setting and
additionally defaulted the behavior for empty-set IN comparisons
to a static boolean expression.
:param encoding: Defaults to ``utf-8``. This is the string
encoding used by SQLAlchemy for string encode/decode
operations which occur within SQLAlchemy, **outside of
the DBAPI.** Most modern DBAPIs feature some degree of
direct support for Python ``unicode`` objects,
what you see in Python 2 as a string of the form
``u'some string'``. For those scenarios where the
DBAPI is detected as not supporting a Python ``unicode``
object, this encoding is used to determine the
source/destination encoding. It is **not used**
for those cases where the DBAPI handles unicode
directly.
To properly configure a system to accommodate Python
``unicode`` objects, the DBAPI should be
configured to handle unicode to the greatest
degree as is appropriate - see
the notes on unicode pertaining to the specific
target database in use at :ref:`dialect_toplevel`.
Areas where string encoding may need to be accommodated
outside of the DBAPI include zero or more of:
* the values passed to bound parameters, corresponding to
the :class:`.Unicode` type or the :class:`.String` type
when ``convert_unicode`` is ``True``;
* the values returned in result set columns corresponding
to the :class:`.Unicode` type or the :class:`.String`
type when ``convert_unicode`` is ``True``;
* the string SQL statement passed to the DBAPI's
``cursor.execute()`` method;
* the string names of the keys in the bound parameter
dictionary passed to the DBAPI's ``cursor.execute()``
as well as ``cursor.setinputsizes()`` methods;
* the string column names retrieved from the DBAPI's
``cursor.description`` attribute.
When using Python 3, the DBAPI is required to support
*all* of the above values as Python ``unicode`` objects,
which in Python 3 are just known as ``str``. In Python 2,
the DBAPI does not specify unicode behavior at all,
so SQLAlchemy must make decisions for each of the above
values on a per-DBAPI basis - implementations are
completely inconsistent in their behavior.
:param execution_options: Dictionary execution options which will
be applied to all connections. See
:meth:`~sqlalchemy.engine.Connection.execution_options`
:param implicit_returning=True: When ``True``, a RETURNING-
compatible construct, if available, will be used to
fetch newly generated primary key values when a single row
INSERT statement is emitted with no existing returning()
clause. This applies to those backends which support RETURNING
or a compatible construct, including PostgreSQL, Firebird, Oracle,
Microsoft SQL Server. Set this to ``False`` to disable
the automatic usage of RETURNING.
:param isolation_level: this string parameter is interpreted by various
dialects in order to affect the transaction isolation level of the
database connection. The parameter essentially accepts some subset of
these string arguments: ``"SERIALIZABLE"``, ``"REPEATABLE_READ"``,
``"READ_COMMITTED"``, ``"READ_UNCOMMITTED"`` and ``"AUTOCOMMIT"``.
Behavior here varies per backend, and
individual dialects should be consulted directly.
Note that the isolation level can also be set on a
per-:class:`.Connection` basis as well, using the
:paramref:`.Connection.execution_options.isolation_level`
feature.
.. seealso::
:attr:`.Connection.default_isolation_level` - view default level
:paramref:`.Connection.execution_options.isolation_level`
- set per :class:`.Connection` isolation level
:ref:`SQLite Transaction Isolation <sqlite_isolation_level>`
:ref:`PostgreSQL Transaction Isolation <postgresql_isolation_level>`
:ref:`MySQL Transaction Isolation <mysql_isolation_level>`
:ref:`session_transaction_isolation` - for the ORM
:param json_deserializer: for dialects that support the :class:`.JSON`
datatype, this is a Python callable that will convert a JSON string
to a Python object. By default, the Python ``json.loads`` function is
used.
.. versionchanged:: 1.3.7 The SQLite dialect renamed this from
``_json_deserializer``.
:param json_serializer: for dialects that support the :class:`.JSON`
datatype, this is a Python callable that will render a given object
as JSON. By default, the Python ``json.dumps`` function is used.
.. versionchanged:: 1.3.7 The SQLite dialect renamed this from
``_json_serializer``.
:param label_length=None: optional integer value which limits
the size of dynamically generated column labels to that many
characters. If less than 6, labels are generated as
"_(counter)". If ``None``, the value of
``dialect.max_identifier_length`` is used instead.
:param listeners: A list of one or more
:class:`~sqlalchemy.interfaces.PoolListener` objects which will
receive connection pool events.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.engine" logger. Defaults to a hexstring of the
object's id.
:param max_overflow=10: the number of connections to allow in
connection pool "overflow", that is connections that can be
opened above and beyond the pool_size setting, which defaults
to five. this is only used with :class:`~sqlalchemy.pool.QueuePool`.
:param module=None: reference to a Python module object (the module
itself, not its string name). Specifies an alternate DBAPI module to
be used by the engine's dialect. Each sub-dialect references a
specific DBAPI which will be imported before first connect. This
parameter causes the import to be bypassed, and the given module to
be used instead. Can be used for testing of DBAPIs as well as to
inject "mock" DBAPI implementations into the :class:`.Engine`.
:param paramstyle=None: The `paramstyle <http://legacy.python.org/dev/peps/pep-0249/#paramstyle>`_
to use when rendering bound parameters. This style defaults to the
one recommended by the DBAPI itself, which is retrieved from the
``.paramstyle`` attribute of the DBAPI. However, most DBAPIs accept
more than one paramstyle, and in particular it may be desirable
to change a "named" paramstyle into a "positional" one, or vice versa.
When this attribute is passed, it should be one of the values
``"qmark"``, ``"numeric"``, ``"named"``, ``"format"`` or
``"pyformat"``, and should correspond to a parameter style known
to be supported by the DBAPI in use.
:param pool=None: an already-constructed instance of
:class:`~sqlalchemy.pool.Pool`, such as a
:class:`~sqlalchemy.pool.QueuePool` instance. If non-None, this
pool will be used directly as the underlying connection pool
for the engine, bypassing whatever connection parameters are
present in the URL argument. For information on constructing
connection pools manually, see :ref:`pooling_toplevel`.
:param poolclass=None: a :class:`~sqlalchemy.pool.Pool`
subclass, which will be used to create a connection pool
instance using the connection parameters given in the URL. Note
this differs from ``pool`` in that you don't actually
instantiate the pool in this case, you just indicate what type
of pool to be used.
:param pool_logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param pool_pre_ping: boolean, if True will enable the connection pool
"pre-ping" feature that tests connections for liveness upon
each checkout.
.. versionadded:: 1.2
.. seealso::
:ref:`pool_disconnects_pessimistic`
:param pool_size=5: the number of connections to keep open
inside the connection pool. This used with
:class:`~sqlalchemy.pool.QueuePool` as
well as :class:`~sqlalchemy.pool.SingletonThreadPool`. With
:class:`~sqlalchemy.pool.QueuePool`, a ``pool_size`` setting
of 0 indicates no limit; to disable pooling, set ``poolclass`` to
:class:`~sqlalchemy.pool.NullPool` instead.
:param pool_recycle=-1: this setting causes the pool to recycle
connections after the given number of seconds has passed. It
defaults to -1, or no timeout. For example, setting to 3600
means connections will be recycled after one hour. Note that
MySQL in particular will disconnect automatically if no
activity is detected on a connection for eight hours (although
this is configurable with the MySQLDB connection itself and the
server configuration as well).
.. seealso::
:ref:`pool_setting_recycle`
:param pool_reset_on_return='rollback': set the
:paramref:`.Pool.reset_on_return` parameter of the underlying
:class:`.Pool` object, which can be set to the values
``"rollback"``, ``"commit"``, or ``None``.
.. seealso::
:paramref:`.Pool.reset_on_return`
:param pool_timeout=30: number of seconds to wait before giving
up on getting a connection from the pool. This is only used
with :class:`~sqlalchemy.pool.QueuePool`.
:param pool_use_lifo=False: use LIFO (last-in-first-out) when retrieving
connections from :class:`.QueuePool` instead of FIFO
(first-in-first-out). Using LIFO, a server-side timeout scheme can
reduce the number of connections used during non- peak periods of
use. When planning for server-side timeouts, ensure that a recycle or
pre-ping strategy is in use to gracefully handle stale connections.
.. versionadded:: 1.3
.. seealso::
:ref:`pool_use_lifo`
:ref:`pool_disconnects`
:param plugins: string list of plugin names to load. See
:class:`.CreateEnginePlugin` for background.
.. versionadded:: 1.2.3
""" # noqa
if "strategy" in kwargs:
strat = kwargs.pop("strategy")
if strat == "mock":
return create_mock_engine(url, **kwargs)
else:
raise exc.ArgumentError("unknown strategy: %r" % strat)
# create url.URL object
u = _url.make_url(url)
plugins = u._instantiate_plugins(kwargs)
u.query.pop("plugin", None)
kwargs.pop("plugins", None)
entrypoint = u._get_entrypoint()
dialect_cls = entrypoint.get_dialect_cls(u)
if kwargs.pop("_coerce_config", False):
def pop_kwarg(key, default=None):
value = kwargs.pop(key, default)
if key in dialect_cls.engine_config_types:
value = dialect_cls.engine_config_types[key](value)
return value
else:
pop_kwarg = kwargs.pop
dialect_args = {}
# consume dialect arguments from kwargs
for k in util.get_cls_kwargs(dialect_cls):
if k in kwargs:
dialect_args[k] = pop_kwarg(k)
dbapi = kwargs.pop("module", None)
if dbapi is None:
dbapi_args = {}
for k in util.get_func_kwargs(dialect_cls.dbapi):
if k in kwargs:
dbapi_args[k] = pop_kwarg(k)
dbapi = dialect_cls.dbapi(**dbapi_args)
dialect_args["dbapi"] = dbapi
for plugin in plugins:
plugin.handle_dialect_kwargs(dialect_cls, dialect_args)
# create dialect
dialect = dialect_cls(**dialect_args)
# assemble connection arguments
(cargs, cparams) = dialect.create_connect_args(u)
cparams.update(pop_kwarg("connect_args", {}))
cargs = list(cargs) # allow mutability
# look for existing pool or create
pool = pop_kwarg("pool", None)
if pool is None:
def connect(connection_record=None):
if dialect._has_events:
for fn in dialect.dispatch.do_connect:
connection = fn(dialect, connection_record, cargs, cparams)
if connection is not None:
return connection
return dialect.connect(*cargs, **cparams)
creator = pop_kwarg("creator", connect)
poolclass = pop_kwarg("poolclass", None)
if poolclass is None:
poolclass = dialect_cls.get_pool_class(u)
pool_args = {"dialect": dialect}
# consume pool arguments from kwargs, translating a few of
# the arguments
translate = {
"logging_name": "pool_logging_name",
"echo": "echo_pool",
"timeout": "pool_timeout",
"recycle": "pool_recycle",
"events": "pool_events",
"reset_on_return": "pool_reset_on_return",
"pre_ping": "pool_pre_ping",
"use_lifo": "pool_use_lifo",
}
for k in util.get_cls_kwargs(poolclass):
tk = translate.get(k, k)
if tk in kwargs:
pool_args[k] = pop_kwarg(tk)
for plugin in plugins:
plugin.handle_pool_kwargs(poolclass, pool_args)
pool = poolclass(creator, **pool_args)
else:
if isinstance(pool, poollib.dbapi_proxy._DBProxy):
pool = pool.get_pool(*cargs, **cparams)
else:
pool = pool
pool._dialect = dialect
# create engine.
engineclass = base.Engine
engine_args = {}
for k in util.get_cls_kwargs(engineclass):
if k in kwargs:
engine_args[k] = pop_kwarg(k)
_initialize = kwargs.pop("_initialize", True)
# all kwargs should be consumed
if kwargs:
raise TypeError(
"Invalid argument(s) %s sent to create_engine(), "
"using configuration %s/%s/%s. Please check that the "
"keyword arguments are appropriate for this combination "
"of components."
% (
",".join("'%s'" % k for k in kwargs),
dialect.__class__.__name__,
pool.__class__.__name__,
engineclass.__name__,
)
)
engine = engineclass(pool, dialect, u, **engine_args)
if _initialize:
do_on_connect = dialect.on_connect()
if do_on_connect:
def on_connect(dbapi_connection, connection_record):
conn = getattr(dbapi_connection, "_sqla_unwrap", dbapi_connection)
if conn is None:
return
do_on_connect(conn)
event.listen(pool, "first_connect", on_connect)
event.listen(pool, "connect", on_connect)
def first_connect(dbapi_connection, connection_record):
c = base.Connection(engine, connection=dbapi_connection, _has_events=False)
c._execution_options = util.immutabledict()
dialect.initialize(c)
dialect.do_rollback(c.connection)
event.listen(pool, "first_connect", first_connect, once=True)
dialect_cls.engine_created(engine)
if entrypoint is not dialect_cls:
entrypoint.engine_created(engine)
for plugin in plugins:
plugin.engine_created(engine)
return engine
|
https://github.com/sqlalchemy/sqlalchemy/issues/4807
|
Traceback (most recent call last):
File "sandbox.py", line 54, in <module>
print(engine.execute(product.select().where(product.c.id == 709765476)).fetchall())
File "/Users/vm/ws/app/env/lib/python3.7/site-packages/sqlalchemy/engine/result.py", line 170, in __repr__
return repr(sql_util._repr_row(self))
File "/Users/vm/ws/app/env/lib/python3.7/site-packages/sqlalchemy/sql/util.py", line 458, in __repr__
", ".join(trunc(value) for value in self.row),
File "/Users/vm/ws/app/env/lib/python3.7/site-packages/sqlalchemy/sql/sqltypes.py", line 2278, in process
return json_deserializer(value)
File "/usr/local/Cellar/python/3.7.2_2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/json/__init__.py", line 341, in loads
raise TypeError(f'the JSON object must be str, bytes or bytearray, '
TypeError: the JSON object must be str, bytes or bytearray, not dict
|
TypeError
|
def exec_once(self, *args, **kw):
"""Execute this event, but only if it has not been
executed already for this collection."""
if not self._exec_once:
self._exec_once_impl(False, *args, **kw)
|
def exec_once(self, *args, **kw):
"""Execute this event, but only if it has not been
executed already for this collection."""
if not self._exec_once:
with self._exec_once_mutex:
if not self._exec_once:
try:
self(*args, **kw)
finally:
self._exec_once = True
|
https://github.com/sqlalchemy/sqlalchemy/issues/4807
|
Traceback (most recent call last):
File "sandbox.py", line 54, in <module>
print(engine.execute(product.select().where(product.c.id == 709765476)).fetchall())
File "/Users/vm/ws/app/env/lib/python3.7/site-packages/sqlalchemy/engine/result.py", line 170, in __repr__
return repr(sql_util._repr_row(self))
File "/Users/vm/ws/app/env/lib/python3.7/site-packages/sqlalchemy/sql/util.py", line 458, in __repr__
", ".join(trunc(value) for value in self.row),
File "/Users/vm/ws/app/env/lib/python3.7/site-packages/sqlalchemy/sql/sqltypes.py", line 2278, in process
return json_deserializer(value)
File "/usr/local/Cellar/python/3.7.2_2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/json/__init__.py", line 341, in loads
raise TypeError(f'the JSON object must be str, bytes or bytearray, '
TypeError: the JSON object must be str, bytes or bytearray, not dict
|
TypeError
|
def listen(self, *args, **kw):
once = kw.pop("once", False)
once_unless_exception = kw.pop("_once_unless_exception", False)
named = kw.pop("named", False)
target, identifier, fn = (
self.dispatch_target,
self.identifier,
self._listen_fn,
)
dispatch_collection = getattr(target.dispatch, identifier)
adjusted_fn = dispatch_collection._adjust_fn_spec(fn, named)
self = self.with_wrapper(adjusted_fn)
stub_function = getattr(self.dispatch_target.dispatch._events, self.identifier)
if hasattr(stub_function, "_sa_warn"):
stub_function._sa_warn()
if once or once_unless_exception:
self.with_wrapper(
util.only_once(self._listen_fn, retry_on_exception=once_unless_exception)
).listen(*args, **kw)
else:
self.dispatch_target.dispatch._listen(self, *args, **kw)
|
def listen(self, *args, **kw):
once = kw.pop("once", False)
named = kw.pop("named", False)
target, identifier, fn = (
self.dispatch_target,
self.identifier,
self._listen_fn,
)
dispatch_collection = getattr(target.dispatch, identifier)
adjusted_fn = dispatch_collection._adjust_fn_spec(fn, named)
self = self.with_wrapper(adjusted_fn)
stub_function = getattr(self.dispatch_target.dispatch._events, self.identifier)
if hasattr(stub_function, "_sa_warn"):
stub_function._sa_warn()
if once:
self.with_wrapper(util.only_once(self._listen_fn)).listen(*args, **kw)
else:
self.dispatch_target.dispatch._listen(self, *args, **kw)
|
https://github.com/sqlalchemy/sqlalchemy/issues/4807
|
Traceback (most recent call last):
File "sandbox.py", line 54, in <module>
print(engine.execute(product.select().where(product.c.id == 709765476)).fetchall())
File "/Users/vm/ws/app/env/lib/python3.7/site-packages/sqlalchemy/engine/result.py", line 170, in __repr__
return repr(sql_util._repr_row(self))
File "/Users/vm/ws/app/env/lib/python3.7/site-packages/sqlalchemy/sql/util.py", line 458, in __repr__
", ".join(trunc(value) for value in self.row),
File "/Users/vm/ws/app/env/lib/python3.7/site-packages/sqlalchemy/sql/sqltypes.py", line 2278, in process
return json_deserializer(value)
File "/usr/local/Cellar/python/3.7.2_2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/json/__init__.py", line 341, in loads
raise TypeError(f'the JSON object must be str, bytes or bytearray, '
TypeError: the JSON object must be str, bytes or bytearray, not dict
|
TypeError
|
def __connect(self, first_connect_check=False):
pool = self.__pool
# ensure any existing connection is removed, so that if
# creator fails, this attribute stays None
self.connection = None
try:
self.starttime = time.time()
connection = pool._invoke_creator(self)
pool.logger.debug("Created new connection %r", connection)
self.connection = connection
except Exception as e:
pool.logger.debug("Error on connect(): %s", e)
raise
else:
if first_connect_check:
pool.dispatch.first_connect.for_modify(
pool.dispatch
).exec_once_unless_exception(self.connection, self)
if pool.dispatch.connect:
pool.dispatch.connect(self.connection, self)
|
def __connect(self, first_connect_check=False):
pool = self.__pool
# ensure any existing connection is removed, so that if
# creator fails, this attribute stays None
self.connection = None
try:
self.starttime = time.time()
connection = pool._invoke_creator(self)
pool.logger.debug("Created new connection %r", connection)
self.connection = connection
except Exception as e:
pool.logger.debug("Error on connect(): %s", e)
raise
else:
if first_connect_check:
pool.dispatch.first_connect.for_modify(pool.dispatch).exec_once(
self.connection, self
)
if pool.dispatch.connect:
pool.dispatch.connect(self.connection, self)
|
https://github.com/sqlalchemy/sqlalchemy/issues/4807
|
Traceback (most recent call last):
File "sandbox.py", line 54, in <module>
print(engine.execute(product.select().where(product.c.id == 709765476)).fetchall())
File "/Users/vm/ws/app/env/lib/python3.7/site-packages/sqlalchemy/engine/result.py", line 170, in __repr__
return repr(sql_util._repr_row(self))
File "/Users/vm/ws/app/env/lib/python3.7/site-packages/sqlalchemy/sql/util.py", line 458, in __repr__
", ".join(trunc(value) for value in self.row),
File "/Users/vm/ws/app/env/lib/python3.7/site-packages/sqlalchemy/sql/sqltypes.py", line 2278, in process
return json_deserializer(value)
File "/usr/local/Cellar/python/3.7.2_2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/json/__init__.py", line 341, in loads
raise TypeError(f'the JSON object must be str, bytes or bytearray, '
TypeError: the JSON object must be str, bytes or bytearray, not dict
|
TypeError
|
def only_once(fn, retry_on_exception):
"""Decorate the given function to be a no-op after it is called exactly
once."""
once = [fn]
def go(*arg, **kw):
# strong reference fn so that it isn't garbage collected,
# which interferes with the event system's expectations
strong_fn = fn # noqa
if once:
once_fn = once.pop()
try:
return once_fn(*arg, **kw)
except:
if retry_on_exception:
once.insert(0, once_fn)
raise
return go
|
def only_once(fn):
"""Decorate the given function to be a no-op after it is called exactly
once."""
once = [fn]
def go(*arg, **kw):
# strong reference fn so that it isn't garbage collected,
# which interferes with the event system's expectations
strong_fn = fn # noqa
if once:
once_fn = once.pop()
return once_fn(*arg, **kw)
return go
|
https://github.com/sqlalchemy/sqlalchemy/issues/4807
|
Traceback (most recent call last):
File "sandbox.py", line 54, in <module>
print(engine.execute(product.select().where(product.c.id == 709765476)).fetchall())
File "/Users/vm/ws/app/env/lib/python3.7/site-packages/sqlalchemy/engine/result.py", line 170, in __repr__
return repr(sql_util._repr_row(self))
File "/Users/vm/ws/app/env/lib/python3.7/site-packages/sqlalchemy/sql/util.py", line 458, in __repr__
", ".join(trunc(value) for value in self.row),
File "/Users/vm/ws/app/env/lib/python3.7/site-packages/sqlalchemy/sql/sqltypes.py", line 2278, in process
return json_deserializer(value)
File "/usr/local/Cellar/python/3.7.2_2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/json/__init__.py", line 341, in loads
raise TypeError(f'the JSON object must be str, bytes or bytearray, '
TypeError: the JSON object must be str, bytes or bytearray, not dict
|
TypeError
|
def go(*arg, **kw):
# strong reference fn so that it isn't garbage collected,
# which interferes with the event system's expectations
strong_fn = fn # noqa
if once:
once_fn = once.pop()
try:
return once_fn(*arg, **kw)
except:
if retry_on_exception:
once.insert(0, once_fn)
raise
|
def go(*arg, **kw):
# strong reference fn so that it isn't garbage collected,
# which interferes with the event system's expectations
strong_fn = fn # noqa
if once:
once_fn = once.pop()
return once_fn(*arg, **kw)
|
https://github.com/sqlalchemy/sqlalchemy/issues/4807
|
Traceback (most recent call last):
File "sandbox.py", line 54, in <module>
print(engine.execute(product.select().where(product.c.id == 709765476)).fetchall())
File "/Users/vm/ws/app/env/lib/python3.7/site-packages/sqlalchemy/engine/result.py", line 170, in __repr__
return repr(sql_util._repr_row(self))
File "/Users/vm/ws/app/env/lib/python3.7/site-packages/sqlalchemy/sql/util.py", line 458, in __repr__
", ".join(trunc(value) for value in self.row),
File "/Users/vm/ws/app/env/lib/python3.7/site-packages/sqlalchemy/sql/sqltypes.py", line 2278, in process
return json_deserializer(value)
File "/usr/local/Cellar/python/3.7.2_2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/json/__init__.py", line 341, in loads
raise TypeError(f'the JSON object must be str, bytes or bytearray, '
TypeError: the JSON object must be str, bytes or bytearray, not dict
|
TypeError
|
def create(self, name_or_url, **kwargs):
# create url.URL object
u = url.make_url(name_or_url)
plugins = u._instantiate_plugins(kwargs)
u.query.pop("plugin", None)
kwargs.pop("plugins", None)
entrypoint = u._get_entrypoint()
dialect_cls = entrypoint.get_dialect_cls(u)
if kwargs.pop("_coerce_config", False):
def pop_kwarg(key, default=None):
value = kwargs.pop(key, default)
if key in dialect_cls.engine_config_types:
value = dialect_cls.engine_config_types[key](value)
return value
else:
pop_kwarg = kwargs.pop
dialect_args = {}
# consume dialect arguments from kwargs
for k in util.get_cls_kwargs(dialect_cls):
if k in kwargs:
dialect_args[k] = pop_kwarg(k)
dbapi = kwargs.pop("module", None)
if dbapi is None:
dbapi_args = {}
for k in util.get_func_kwargs(dialect_cls.dbapi):
if k in kwargs:
dbapi_args[k] = pop_kwarg(k)
dbapi = dialect_cls.dbapi(**dbapi_args)
dialect_args["dbapi"] = dbapi
for plugin in plugins:
plugin.handle_dialect_kwargs(dialect_cls, dialect_args)
# create dialect
dialect = dialect_cls(**dialect_args)
# assemble connection arguments
(cargs, cparams) = dialect.create_connect_args(u)
cparams.update(pop_kwarg("connect_args", {}))
cargs = list(cargs) # allow mutability
# look for existing pool or create
pool = pop_kwarg("pool", None)
if pool is None:
def connect(connection_record=None):
if dialect._has_events:
for fn in dialect.dispatch.do_connect:
connection = fn(dialect, connection_record, cargs, cparams)
if connection is not None:
return connection
return dialect.connect(*cargs, **cparams)
creator = pop_kwarg("creator", connect)
poolclass = pop_kwarg("poolclass", None)
if poolclass is None:
poolclass = dialect_cls.get_pool_class(u)
pool_args = {"dialect": dialect}
# consume pool arguments from kwargs, translating a few of
# the arguments
translate = {
"logging_name": "pool_logging_name",
"echo": "echo_pool",
"timeout": "pool_timeout",
"recycle": "pool_recycle",
"events": "pool_events",
"use_threadlocal": "pool_threadlocal",
"reset_on_return": "pool_reset_on_return",
"pre_ping": "pool_pre_ping",
"use_lifo": "pool_use_lifo",
}
for k in util.get_cls_kwargs(poolclass):
tk = translate.get(k, k)
if tk in kwargs:
pool_args[k] = pop_kwarg(tk)
for plugin in plugins:
plugin.handle_pool_kwargs(poolclass, pool_args)
pool = poolclass(creator, **pool_args)
else:
if isinstance(pool, poollib.dbapi_proxy._DBProxy):
pool = pool.get_pool(*cargs, **cparams)
else:
pool = pool
pool._dialect = dialect
# create engine.
engineclass = self.engine_cls
engine_args = {}
for k in util.get_cls_kwargs(engineclass):
if k in kwargs:
engine_args[k] = pop_kwarg(k)
_initialize = kwargs.pop("_initialize", True)
# all kwargs should be consumed
if kwargs:
raise TypeError(
"Invalid argument(s) %s sent to create_engine(), "
"using configuration %s/%s/%s. Please check that the "
"keyword arguments are appropriate for this combination "
"of components."
% (
",".join("'%s'" % k for k in kwargs),
dialect.__class__.__name__,
pool.__class__.__name__,
engineclass.__name__,
)
)
engine = engineclass(pool, dialect, u, **engine_args)
if _initialize:
do_on_connect = dialect.on_connect()
if do_on_connect:
def on_connect(dbapi_connection, connection_record):
conn = getattr(dbapi_connection, "_sqla_unwrap", dbapi_connection)
if conn is None:
return
do_on_connect(conn)
event.listen(pool, "first_connect", on_connect)
event.listen(pool, "connect", on_connect)
def first_connect(dbapi_connection, connection_record):
c = base.Connection(engine, connection=dbapi_connection, _has_events=False)
c._execution_options = util.immutabledict()
dialect.initialize(c)
dialect.do_rollback(c.connection)
event.listen(
pool,
"first_connect",
first_connect,
_once_unless_exception=True,
)
dialect_cls.engine_created(engine)
if entrypoint is not dialect_cls:
entrypoint.engine_created(engine)
for plugin in plugins:
plugin.engine_created(engine)
return engine
|
def create(self, name_or_url, **kwargs):
# create url.URL object
u = url.make_url(name_or_url)
plugins = u._instantiate_plugins(kwargs)
u.query.pop("plugin", None)
kwargs.pop("plugins", None)
entrypoint = u._get_entrypoint()
dialect_cls = entrypoint.get_dialect_cls(u)
if kwargs.pop("_coerce_config", False):
def pop_kwarg(key, default=None):
value = kwargs.pop(key, default)
if key in dialect_cls.engine_config_types:
value = dialect_cls.engine_config_types[key](value)
return value
else:
pop_kwarg = kwargs.pop
dialect_args = {}
# consume dialect arguments from kwargs
for k in util.get_cls_kwargs(dialect_cls):
if k in kwargs:
dialect_args[k] = pop_kwarg(k)
dbapi = kwargs.pop("module", None)
if dbapi is None:
dbapi_args = {}
for k in util.get_func_kwargs(dialect_cls.dbapi):
if k in kwargs:
dbapi_args[k] = pop_kwarg(k)
dbapi = dialect_cls.dbapi(**dbapi_args)
dialect_args["dbapi"] = dbapi
for plugin in plugins:
plugin.handle_dialect_kwargs(dialect_cls, dialect_args)
# create dialect
dialect = dialect_cls(**dialect_args)
# assemble connection arguments
(cargs, cparams) = dialect.create_connect_args(u)
cparams.update(pop_kwarg("connect_args", {}))
cargs = list(cargs) # allow mutability
# look for existing pool or create
pool = pop_kwarg("pool", None)
if pool is None:
def connect(connection_record=None):
if dialect._has_events:
for fn in dialect.dispatch.do_connect:
connection = fn(dialect, connection_record, cargs, cparams)
if connection is not None:
return connection
return dialect.connect(*cargs, **cparams)
creator = pop_kwarg("creator", connect)
poolclass = pop_kwarg("poolclass", None)
if poolclass is None:
poolclass = dialect_cls.get_pool_class(u)
pool_args = {"dialect": dialect}
# consume pool arguments from kwargs, translating a few of
# the arguments
translate = {
"logging_name": "pool_logging_name",
"echo": "echo_pool",
"timeout": "pool_timeout",
"recycle": "pool_recycle",
"events": "pool_events",
"use_threadlocal": "pool_threadlocal",
"reset_on_return": "pool_reset_on_return",
"pre_ping": "pool_pre_ping",
"use_lifo": "pool_use_lifo",
}
for k in util.get_cls_kwargs(poolclass):
tk = translate.get(k, k)
if tk in kwargs:
pool_args[k] = pop_kwarg(tk)
for plugin in plugins:
plugin.handle_pool_kwargs(poolclass, pool_args)
pool = poolclass(creator, **pool_args)
else:
if isinstance(pool, poollib.dbapi_proxy._DBProxy):
pool = pool.get_pool(*cargs, **cparams)
else:
pool = pool
pool._dialect = dialect
# create engine.
engineclass = self.engine_cls
engine_args = {}
for k in util.get_cls_kwargs(engineclass):
if k in kwargs:
engine_args[k] = pop_kwarg(k)
_initialize = kwargs.pop("_initialize", True)
# all kwargs should be consumed
if kwargs:
raise TypeError(
"Invalid argument(s) %s sent to create_engine(), "
"using configuration %s/%s/%s. Please check that the "
"keyword arguments are appropriate for this combination "
"of components."
% (
",".join("'%s'" % k for k in kwargs),
dialect.__class__.__name__,
pool.__class__.__name__,
engineclass.__name__,
)
)
engine = engineclass(pool, dialect, u, **engine_args)
if _initialize:
do_on_connect = dialect.on_connect()
if do_on_connect:
def on_connect(dbapi_connection, connection_record):
conn = getattr(dbapi_connection, "_sqla_unwrap", dbapi_connection)
if conn is None:
return
do_on_connect(conn)
event.listen(pool, "first_connect", on_connect)
event.listen(pool, "connect", on_connect)
def first_connect(dbapi_connection, connection_record):
c = base.Connection(engine, connection=dbapi_connection, _has_events=False)
c._execution_options = util.immutabledict()
dialect.initialize(c)
dialect.do_rollback(c.connection)
event.listen(pool, "first_connect", first_connect, once=True)
dialect_cls.engine_created(engine)
if entrypoint is not dialect_cls:
entrypoint.engine_created(engine)
for plugin in plugins:
plugin.engine_created(engine)
return engine
|
https://github.com/sqlalchemy/sqlalchemy/issues/4807
|
Traceback (most recent call last):
File "sandbox.py", line 54, in <module>
print(engine.execute(product.select().where(product.c.id == 709765476)).fetchall())
File "/Users/vm/ws/app/env/lib/python3.7/site-packages/sqlalchemy/engine/result.py", line 170, in __repr__
return repr(sql_util._repr_row(self))
File "/Users/vm/ws/app/env/lib/python3.7/site-packages/sqlalchemy/sql/util.py", line 458, in __repr__
", ".join(trunc(value) for value in self.row),
File "/Users/vm/ws/app/env/lib/python3.7/site-packages/sqlalchemy/sql/sqltypes.py", line 2278, in process
return json_deserializer(value)
File "/usr/local/Cellar/python/3.7.2_2/Frameworks/Python.framework/Versions/3.7/lib/python3.7/json/__init__.py", line 341, in loads
raise TypeError(f'the JSON object must be str, bytes or bytearray, '
TypeError: the JSON object must be str, bytes or bytearray, not dict
|
TypeError
|
def slice(self, start, stop):
"""Computes the "slice" of the :class:`.Query` represented by
the given indices and returns the resulting :class:`.Query`.
The start and stop indices behave like the argument to Python's
built-in :func:`range` function. This method provides an
alternative to using ``LIMIT``/``OFFSET`` to get a slice of the
query.
For example, ::
session.query(User).order_by(User.id).slice(1, 3)
renders as
.. sourcecode:: sql
SELECT users.id AS users_id,
users.name AS users_name
FROM users ORDER BY users.id
LIMIT ? OFFSET ?
(2, 1)
.. seealso::
:meth:`.Query.limit`
:meth:`.Query.offset`
"""
if start is not None and stop is not None:
self._offset = self._offset if self._offset is not None else 0
if start != 0:
self._offset += start
self._limit = stop - start
elif start is None and stop is not None:
self._limit = stop
elif start is not None and stop is None:
self._offset = self._offset if self._offset is not None else 0
if start != 0:
self._offset += start
if isinstance(self._offset, int) and self._offset == 0:
self._offset = None
|
def slice(self, start, stop):
"""Computes the "slice" of the :class:`.Query` represented by
the given indices and returns the resulting :class:`.Query`.
The start and stop indices behave like the argument to Python's
built-in :func:`range` function. This method provides an
alternative to using ``LIMIT``/``OFFSET`` to get a slice of the
query.
For example, ::
session.query(User).order_by(User.id).slice(1, 3)
renders as
.. sourcecode:: sql
SELECT users.id AS users_id,
users.name AS users_name
FROM users ORDER BY users.id
LIMIT ? OFFSET ?
(2, 1)
.. seealso::
:meth:`.Query.limit`
:meth:`.Query.offset`
"""
if start is not None and stop is not None:
self._offset = (self._offset or 0) + start
self._limit = stop - start
elif start is None and stop is not None:
self._limit = stop
elif start is not None and stop is None:
self._offset = (self._offset or 0) + start
if self._offset == 0:
self._offset = None
|
https://github.com/sqlalchemy/sqlalchemy/issues/4803
|
/home/artslob/.virtualenvs/sqlalchemy-bug-report/bin/python /home/artslob/projects/python/sqlalchemy-bug-report/main.py
2019-08-11 12:57:29,715 INFO sqlalchemy.engine.base.Engine select version()
2019-08-11 12:57:29,716 INFO sqlalchemy.engine.base.Engine {}
2019-08-11 12:57:29,717 INFO sqlalchemy.engine.base.Engine select current_schema()
2019-08-11 12:57:29,717 INFO sqlalchemy.engine.base.Engine {}
2019-08-11 12:57:29,718 INFO sqlalchemy.engine.base.Engine SELECT CAST('test plain returns' AS VARCHAR(60)) AS anon_1
2019-08-11 12:57:29,718 INFO sqlalchemy.engine.base.Engine {}
2019-08-11 12:57:29,719 INFO sqlalchemy.engine.base.Engine SELECT CAST('test unicode returns' AS VARCHAR(60)) AS anon_1
2019-08-11 12:57:29,719 INFO sqlalchemy.engine.base.Engine {}
2019-08-11 12:57:29,720 INFO sqlalchemy.engine.base.Engine show standard_conforming_strings
2019-08-11 12:57:29,720 INFO sqlalchemy.engine.base.Engine {}
2019-08-11 12:57:29,721 INFO sqlalchemy.engine.base.Engine select relname from pg_class c join pg_namespace n on n.oid=c.relnamespace where pg_catalog.pg_table_is_visible(c.oid) and relname=%(name)s
2019-08-11 12:57:29,721 INFO sqlalchemy.engine.base.Engine {'name': 'users'}
2019-08-11 12:57:29,724 INFO sqlalchemy.engine.base.Engine BEGIN (implicit)
2019-08-11 12:57:29,724 INFO sqlalchemy.engine.base.Engine DELETE FROM users
2019-08-11 12:57:29,724 INFO sqlalchemy.engine.base.Engine {}
2019-08-11 12:57:29,727 INFO sqlalchemy.engine.base.Engine INSERT INTO users (id, name) VALUES (%(id)s, %(name)s)
2019-08-11 12:57:29,727 INFO sqlalchemy.engine.base.Engine ({'id': 1, 'name': 'user 1'}, {'id': 2, 'name': 'user 2'}, {'id': 3, 'name': 'user 3'}, {'id': 4, 'name': 'user 4'}, {'id': 5, 'name': 'user 5'}, {'id': 6, 'name': 'user 6'}, {'id': 7, 'name': 'user 7'}, {'id': 8, 'name': 'user 8'} ... displaying 10 of 15 total bound parameter sets ... {'id': 14, 'name': 'user 14'}, {'id': 15, 'name': 'user 15'})
2019-08-11 12:57:29,731 INFO sqlalchemy.engine.base.Engine COMMIT
2019-08-11 12:57:29,736 INFO sqlalchemy.engine.base.Engine BEGIN (implicit)
2019-08-11 12:57:29,737 INFO sqlalchemy.engine.base.Engine SELECT users.id AS users_id, users.name AS users_name
FROM users
LIMIT %(param_1)s OFFSET floor(random() * (SELECT count(users.id) AS count_1
FROM users))
2019-08-11 12:57:29,737 INFO sqlalchemy.engine.base.Engine {'param_1': 1}
[user: (id=15, name='user 15')]
Traceback (most recent call last):
File "/home/artslob/projects/python/sqlalchemy-bug-report/main.py", line 46, in <module>
main()
File "/home/artslob/projects/python/sqlalchemy-bug-report/main.py", line 34, in main
print(bad(query))
File "/home/artslob/projects/python/sqlalchemy-bug-report/main.py", line 42, in bad
return query.first()
File "/home/artslob/.virtualenvs/sqlalchemy-bug-report/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 3228, in first
ret = list(self[0:1])
File "/home/artslob/.virtualenvs/sqlalchemy-bug-report/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 3014, in __getitem__
res = self.slice(start, stop)
File "<string>", line 2, in slice
File "/home/artslob/.virtualenvs/sqlalchemy-bug-report/lib/python3.6/site-packages/sqlalchemy/orm/base.py", line 220, in generate
fn(self, *args[1:], **kw)
File "/home/artslob/.virtualenvs/sqlalchemy-bug-report/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 3057, in slice
self._offset = (self._offset or 0) + start
File "/home/artslob/.virtualenvs/sqlalchemy-bug-report/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 516, in __bool__
raise TypeError("Boolean value of this clause is not defined")
TypeError: Boolean value of this clause is not defined
Process finished with exit code 1
|
TypeError
|
def _to_schema_column_or_string(element):
if element is None:
return element
elif hasattr(element, "__clause_element__"):
element = element.__clause_element__()
if not isinstance(element, util.string_types + (ColumnElement,)):
msg = "Element %r is not a string name or column element"
raise exc.ArgumentError(msg % element)
return element
|
def _to_schema_column_or_string(element):
if hasattr(element, "__clause_element__"):
element = element.__clause_element__()
if not isinstance(element, util.string_types + (ColumnElement,)):
msg = "Element %r is not a string name or column element"
raise exc.ArgumentError(msg % element)
return element
|
https://github.com/sqlalchemy/sqlalchemy/issues/4778
|
Traceback (most recent call last):
File "temp.py", line 24, in <module>
Base.metadata.create_all(engine)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/schema.py", line 4287, in create_all
ddl.SchemaGenerator, self, checkfirst=checkfirst, tables=tables
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 2036, in _run_visitor
conn._run_visitor(visitorcallable, element, **kwargs)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1607, in _run_visitor
visitorcallable(self.dialect, self, **kwargs).traverse_single(element)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/visitors.py", line 131, in traverse_single
return meth(obj, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 781, in visit_metadata
_is_metadata_operation=True,
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/visitors.py", line 131, in traverse_single
return meth(obj, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 833, in visit_table
self.traverse_single(index)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/visitors.py", line 131, in traverse_single
return meth(obj, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 862, in visit_index
self.connection.execute(CreateIndex(index))
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 988, in execute
return meth(self, multiparams, params)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 72, in _execute_on_connection
return connection._execute_ddl(self, multiparams, params)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1043, in _execute_ddl
else None,
File "<string>", line 1, in <lambda>
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/elements.py", line 462, in compile
return self._compiler(dialect, bind=bind, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 29, in _compiler
return dialect.ddl_compiler(dialect, self, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/compiler.py", line 319, in __init__
self.string = self.process(self.statement, **compile_kwargs)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/compiler.py", line 350, in process
return obj._compiler_dispatch(self, **kwargs)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/visitors.py", line 91, in _compiler_dispatch
return meth(self, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1943, in visit_create_index
for expr in index.expressions
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1943, in <listcomp>
for expr in index.expressions
AttributeError: 'NoneType' object has no attribute 'self_group'
|
AttributeError
|
def _set_parent(self, table):
for col in self._col_expressions(table):
if col is not None:
self.columns.add(col)
|
def _set_parent(self, table):
for col in self._pending_colargs:
if isinstance(col, util.string_types):
col = table.c[col]
self.columns.add(col)
|
https://github.com/sqlalchemy/sqlalchemy/issues/4778
|
Traceback (most recent call last):
File "temp.py", line 24, in <module>
Base.metadata.create_all(engine)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/schema.py", line 4287, in create_all
ddl.SchemaGenerator, self, checkfirst=checkfirst, tables=tables
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 2036, in _run_visitor
conn._run_visitor(visitorcallable, element, **kwargs)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1607, in _run_visitor
visitorcallable(self.dialect, self, **kwargs).traverse_single(element)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/visitors.py", line 131, in traverse_single
return meth(obj, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 781, in visit_metadata
_is_metadata_operation=True,
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/visitors.py", line 131, in traverse_single
return meth(obj, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 833, in visit_table
self.traverse_single(index)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/visitors.py", line 131, in traverse_single
return meth(obj, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 862, in visit_index
self.connection.execute(CreateIndex(index))
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 988, in execute
return meth(self, multiparams, params)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 72, in _execute_on_connection
return connection._execute_ddl(self, multiparams, params)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1043, in _execute_ddl
else None,
File "<string>", line 1, in <lambda>
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/elements.py", line 462, in compile
return self._compiler(dialect, bind=bind, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 29, in _compiler
return dialect.ddl_compiler(dialect, self, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/compiler.py", line 319, in __init__
self.string = self.process(self.statement, **compile_kwargs)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/compiler.py", line 350, in process
return obj._compiler_dispatch(self, **kwargs)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/visitors.py", line 91, in _compiler_dispatch
return meth(self, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1943, in visit_create_index
for expr in index.expressions
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1943, in <listcomp>
for expr in index.expressions
AttributeError: 'NoneType' object has no attribute 'self_group'
|
AttributeError
|
def __init__(self, name, *expressions, **kw):
r"""Construct an index object.
:param name:
The name of the index
:param \*expressions:
Column expressions to include in the index. The expressions
are normally instances of :class:`.Column`, but may also
be arbitrary SQL expressions which ultimately refer to a
:class:`.Column`.
:param unique=False:
Keyword only argument; if True, create a unique index.
:param quote=None:
Keyword only argument; whether to apply quoting to the name of
the index. Works in the same manner as that of
:paramref:`.Column.quote`.
:param info=None: Optional data dictionary which will be populated
into the :attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param \**kw: Additional keyword arguments not mentioned above are
dialect specific, and passed in the form
``<dialectname>_<argname>``. See the documentation regarding an
individual dialect at :ref:`dialect_toplevel` for detail on
documented arguments.
"""
self.table = table = None
columns = []
processed_expressions = []
for (
expr,
column,
strname,
add_element,
) in coercions.expect_col_expression_collection(
roles.DDLConstraintColumnRole, expressions
):
columns.append(add_element)
processed_expressions.append(expr)
self.expressions = processed_expressions
self.name = quoted_name(name, kw.pop("quote", None))
self.unique = kw.pop("unique", False)
_column_flag = kw.pop("_column_flag", False)
if "info" in kw:
self.info = kw.pop("info")
# TODO: consider "table" argument being public, but for
# the purpose of the fix here, it starts as private.
if "_table" in kw:
table = kw.pop("_table")
self._validate_dialect_kwargs(kw)
# will call _set_parent() if table-bound column
# objects are present
ColumnCollectionMixin.__init__(self, *columns, _column_flag=_column_flag)
if table is not None:
self._set_parent(table)
|
def __init__(self, name, *expressions, **kw):
r"""Construct an index object.
:param name:
The name of the index
:param \*expressions:
Column expressions to include in the index. The expressions
are normally instances of :class:`.Column`, but may also
be arbitrary SQL expressions which ultimately refer to a
:class:`.Column`.
:param unique=False:
Keyword only argument; if True, create a unique index.
:param quote=None:
Keyword only argument; whether to apply quoting to the name of
the index. Works in the same manner as that of
:paramref:`.Column.quote`.
:param info=None: Optional data dictionary which will be populated
into the :attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param \**kw: Additional keyword arguments not mentioned above are
dialect specific, and passed in the form
``<dialectname>_<argname>``. See the documentation regarding an
individual dialect at :ref:`dialect_toplevel` for detail on
documented arguments.
"""
self.table = table = None
columns = []
processed_expressions = []
for (
expr,
column,
strname,
add_element,
) in coercions.expect_col_expression_collection(
roles.DDLConstraintColumnRole, expressions
):
if add_element is not None:
columns.append(add_element)
processed_expressions.append(expr)
self.expressions = processed_expressions
self.name = quoted_name(name, kw.pop("quote", None))
self.unique = kw.pop("unique", False)
_column_flag = kw.pop("_column_flag", False)
if "info" in kw:
self.info = kw.pop("info")
# TODO: consider "table" argument being public, but for
# the purpose of the fix here, it starts as private.
if "_table" in kw:
table = kw.pop("_table")
self._validate_dialect_kwargs(kw)
# will call _set_parent() if table-bound column
# objects are present
ColumnCollectionMixin.__init__(self, *columns, _column_flag=_column_flag)
if table is not None:
self._set_parent(table)
|
https://github.com/sqlalchemy/sqlalchemy/issues/4778
|
Traceback (most recent call last):
File "temp.py", line 24, in <module>
Base.metadata.create_all(engine)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/schema.py", line 4287, in create_all
ddl.SchemaGenerator, self, checkfirst=checkfirst, tables=tables
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 2036, in _run_visitor
conn._run_visitor(visitorcallable, element, **kwargs)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1607, in _run_visitor
visitorcallable(self.dialect, self, **kwargs).traverse_single(element)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/visitors.py", line 131, in traverse_single
return meth(obj, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 781, in visit_metadata
_is_metadata_operation=True,
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/visitors.py", line 131, in traverse_single
return meth(obj, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 833, in visit_table
self.traverse_single(index)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/visitors.py", line 131, in traverse_single
return meth(obj, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 862, in visit_index
self.connection.execute(CreateIndex(index))
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 988, in execute
return meth(self, multiparams, params)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 72, in _execute_on_connection
return connection._execute_ddl(self, multiparams, params)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1043, in _execute_ddl
else None,
File "<string>", line 1, in <lambda>
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/elements.py", line 462, in compile
return self._compiler(dialect, bind=bind, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 29, in _compiler
return dialect.ddl_compiler(dialect, self, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/compiler.py", line 319, in __init__
self.string = self.process(self.statement, **compile_kwargs)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/compiler.py", line 350, in process
return obj._compiler_dispatch(self, **kwargs)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/visitors.py", line 91, in _compiler_dispatch
return meth(self, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1943, in visit_create_index
for expr in index.expressions
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1943, in <listcomp>
for expr in index.expressions
AttributeError: 'NoneType' object has no attribute 'self_group'
|
AttributeError
|
def _set_parent(self, table):
ColumnCollectionMixin._set_parent(self, table)
if self.table is not None and table is not self.table:
raise exc.ArgumentError(
"Index '%s' is against table '%s', and "
"cannot be associated with table '%s'."
% (self.name, self.table.description, table.description)
)
self.table = table
table.indexes.add(self)
expressions = self.expressions
col_expressions = self._col_expressions(table)
assert len(expressions) == len(col_expressions)
self.expressions = [
expr if isinstance(expr, ClauseElement) else colexpr
for expr, colexpr in zip(expressions, col_expressions)
]
|
def _set_parent(self, table):
ColumnCollectionMixin._set_parent(self, table)
if self.table is not None and table is not self.table:
raise exc.ArgumentError(
"Index '%s' is against table '%s', and "
"cannot be associated with table '%s'."
% (self.name, self.table.description, table.description)
)
self.table = table
table.indexes.add(self)
self.expressions = [
expr if isinstance(expr, ClauseElement) else colexpr
for expr, colexpr in util.zip_longest(self.expressions, self.columns)
]
|
https://github.com/sqlalchemy/sqlalchemy/issues/4778
|
Traceback (most recent call last):
File "temp.py", line 24, in <module>
Base.metadata.create_all(engine)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/schema.py", line 4287, in create_all
ddl.SchemaGenerator, self, checkfirst=checkfirst, tables=tables
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 2036, in _run_visitor
conn._run_visitor(visitorcallable, element, **kwargs)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1607, in _run_visitor
visitorcallable(self.dialect, self, **kwargs).traverse_single(element)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/visitors.py", line 131, in traverse_single
return meth(obj, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 781, in visit_metadata
_is_metadata_operation=True,
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/visitors.py", line 131, in traverse_single
return meth(obj, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 833, in visit_table
self.traverse_single(index)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/visitors.py", line 131, in traverse_single
return meth(obj, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 862, in visit_index
self.connection.execute(CreateIndex(index))
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 988, in execute
return meth(self, multiparams, params)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 72, in _execute_on_connection
return connection._execute_ddl(self, multiparams, params)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1043, in _execute_ddl
else None,
File "<string>", line 1, in <lambda>
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/elements.py", line 462, in compile
return self._compiler(dialect, bind=bind, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 29, in _compiler
return dialect.ddl_compiler(dialect, self, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/compiler.py", line 319, in __init__
self.string = self.process(self.statement, **compile_kwargs)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/compiler.py", line 350, in process
return obj._compiler_dispatch(self, **kwargs)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/visitors.py", line 91, in _compiler_dispatch
return meth(self, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1943, in visit_create_index
for expr in index.expressions
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1943, in <listcomp>
for expr in index.expressions
AttributeError: 'NoneType' object has no attribute 'self_group'
|
AttributeError
|
def __init__(self, name, *expressions, **kw):
r"""Construct an index object.
:param name:
The name of the index
:param \*expressions:
Column expressions to include in the index. The expressions
are normally instances of :class:`.Column`, but may also
be arbitrary SQL expressions which ultimately refer to a
:class:`.Column`.
:param unique=False:
Keyword only argument; if True, create a unique index.
:param quote=None:
Keyword only argument; whether to apply quoting to the name of
the index. Works in the same manner as that of
:paramref:`.Column.quote`.
:param info=None: Optional data dictionary which will be populated
into the :attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param \**kw: Additional keyword arguments not mentioned above are
dialect specific, and passed in the form
``<dialectname>_<argname>``. See the documentation regarding an
individual dialect at :ref:`dialect_toplevel` for detail on
documented arguments.
"""
self.table = table = None
columns = []
processed_expressions = []
for (
expr,
column,
strname,
add_element,
) in self._extract_col_expression_collection(expressions):
columns.append(add_element)
processed_expressions.append(expr)
self.expressions = processed_expressions
self.name = quoted_name(name, kw.pop("quote", None))
self.unique = kw.pop("unique", False)
_column_flag = kw.pop("_column_flag", False)
if "info" in kw:
self.info = kw.pop("info")
# TODO: consider "table" argument being public, but for
# the purpose of the fix here, it starts as private.
if "_table" in kw:
table = kw.pop("_table")
self._validate_dialect_kwargs(kw)
# will call _set_parent() if table-bound column
# objects are present
ColumnCollectionMixin.__init__(self, *columns, _column_flag=_column_flag)
if table is not None:
self._set_parent(table)
|
def __init__(self, name, *expressions, **kw):
r"""Construct an index object.
:param name:
The name of the index
:param \*expressions:
Column expressions to include in the index. The expressions
are normally instances of :class:`.Column`, but may also
be arbitrary SQL expressions which ultimately refer to a
:class:`.Column`.
:param unique=False:
Keyword only argument; if True, create a unique index.
:param quote=None:
Keyword only argument; whether to apply quoting to the name of
the index. Works in the same manner as that of
:paramref:`.Column.quote`.
:param info=None: Optional data dictionary which will be populated
into the :attr:`.SchemaItem.info` attribute of this object.
.. versionadded:: 1.0.0
:param \**kw: Additional keyword arguments not mentioned above are
dialect specific, and passed in the form
``<dialectname>_<argname>``. See the documentation regarding an
individual dialect at :ref:`dialect_toplevel` for detail on
documented arguments.
"""
self.table = table = None
columns = []
processed_expressions = []
for (
expr,
column,
strname,
add_element,
) in self._extract_col_expression_collection(expressions):
if add_element is not None:
columns.append(add_element)
processed_expressions.append(expr)
self.expressions = processed_expressions
self.name = quoted_name(name, kw.pop("quote", None))
self.unique = kw.pop("unique", False)
_column_flag = kw.pop("_column_flag", False)
if "info" in kw:
self.info = kw.pop("info")
# TODO: consider "table" argument being public, but for
# the purpose of the fix here, it starts as private.
if "_table" in kw:
table = kw.pop("_table")
self._validate_dialect_kwargs(kw)
# will call _set_parent() if table-bound column
# objects are present
ColumnCollectionMixin.__init__(self, *columns, _column_flag=_column_flag)
if table is not None:
self._set_parent(table)
|
https://github.com/sqlalchemy/sqlalchemy/issues/4778
|
Traceback (most recent call last):
File "temp.py", line 24, in <module>
Base.metadata.create_all(engine)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/schema.py", line 4287, in create_all
ddl.SchemaGenerator, self, checkfirst=checkfirst, tables=tables
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 2036, in _run_visitor
conn._run_visitor(visitorcallable, element, **kwargs)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1607, in _run_visitor
visitorcallable(self.dialect, self, **kwargs).traverse_single(element)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/visitors.py", line 131, in traverse_single
return meth(obj, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 781, in visit_metadata
_is_metadata_operation=True,
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/visitors.py", line 131, in traverse_single
return meth(obj, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 833, in visit_table
self.traverse_single(index)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/visitors.py", line 131, in traverse_single
return meth(obj, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 862, in visit_index
self.connection.execute(CreateIndex(index))
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 988, in execute
return meth(self, multiparams, params)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 72, in _execute_on_connection
return connection._execute_ddl(self, multiparams, params)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1043, in _execute_ddl
else None,
File "<string>", line 1, in <lambda>
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/elements.py", line 462, in compile
return self._compiler(dialect, bind=bind, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/ddl.py", line 29, in _compiler
return dialect.ddl_compiler(dialect, self, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/compiler.py", line 319, in __init__
self.string = self.process(self.statement, **compile_kwargs)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/compiler.py", line 350, in process
return obj._compiler_dispatch(self, **kwargs)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/sql/visitors.py", line 91, in _compiler_dispatch
return meth(self, **kw)
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1943, in visit_create_index
for expr in index.expressions
File "/home/jmabey/Code/bug/env/lib/python3.7/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1943, in <listcomp>
for expr in index.expressions
AttributeError: 'NoneType' object has no attribute 'self_group'
|
AttributeError
|
def create_proxied_attribute(descriptor):
"""Create an QueryableAttribute / user descriptor hybrid.
Returns a new QueryableAttribute type that delegates descriptor
behavior and getattr() to the given descriptor.
"""
# TODO: can move this to descriptor_props if the need for this
# function is removed from ext/hybrid.py
class Proxy(QueryableAttribute):
"""Presents the :class:`.QueryableAttribute` interface as a
proxy on top of a Python descriptor / :class:`.PropComparator`
combination.
"""
def __init__(
self,
class_,
key,
descriptor,
comparator,
adapt_to_entity=None,
doc=None,
original_property=None,
):
self.class_ = class_
self.key = key
self.descriptor = descriptor
self.original_property = original_property
self._comparator = comparator
self._adapt_to_entity = adapt_to_entity
self.__doc__ = doc
_is_internal_proxy = True
@property
def _impl_uses_objects(self):
return (
self.original_property is not None
and getattr(self.class_, self.key).impl.uses_objects
)
@property
def property(self):
return self.comparator.property
@util.memoized_property
def comparator(self):
if util.callable(self._comparator):
self._comparator = self._comparator()
if self._adapt_to_entity:
self._comparator = self._comparator.adapt_to_entity(
self._adapt_to_entity
)
return self._comparator
def adapt_to_entity(self, adapt_to_entity):
return self.__class__(
adapt_to_entity.entity,
self.key,
self.descriptor,
self._comparator,
adapt_to_entity,
)
def __get__(self, instance, owner):
retval = self.descriptor.__get__(instance, owner)
# detect if this is a plain Python @property, which just returns
# itself for class level access. If so, then return us.
# Otherwise, return the object returned by the descriptor.
if retval is self.descriptor and instance is None:
return self
else:
return retval
def __str__(self):
return "%s.%s" % (self.class_.__name__, self.key)
def __getattr__(self, attribute):
"""Delegate __getattr__ to the original descriptor and/or
comparator."""
try:
return getattr(descriptor, attribute)
except AttributeError:
if attribute == "comparator":
raise AttributeError("comparator")
try:
# comparator itself might be unreachable
comparator = self.comparator
except AttributeError:
raise AttributeError(
"Neither %r object nor unconfigured comparator "
"object associated with %s has an attribute %r"
% (type(descriptor).__name__, self, attribute)
)
else:
try:
return getattr(comparator, attribute)
except AttributeError:
raise AttributeError(
"Neither %r object nor %r object "
"associated with %s has an attribute %r"
% (
type(descriptor).__name__,
type(comparator).__name__,
self,
attribute,
)
)
Proxy.__name__ = type(descriptor).__name__ + "Proxy"
util.monkeypatch_proxied_specials(
Proxy, type(descriptor), name="descriptor", from_instance=descriptor
)
return Proxy
|
def create_proxied_attribute(descriptor):
"""Create an QueryableAttribute / user descriptor hybrid.
Returns a new QueryableAttribute type that delegates descriptor
behavior and getattr() to the given descriptor.
"""
# TODO: can move this to descriptor_props if the need for this
# function is removed from ext/hybrid.py
class Proxy(QueryableAttribute):
"""Presents the :class:`.QueryableAttribute` interface as a
proxy on top of a Python descriptor / :class:`.PropComparator`
combination.
"""
def __init__(
self,
class_,
key,
descriptor,
comparator,
adapt_to_entity=None,
doc=None,
original_property=None,
):
self.class_ = class_
self.key = key
self.descriptor = descriptor
self.original_property = original_property
self._comparator = comparator
self._adapt_to_entity = adapt_to_entity
self.__doc__ = doc
_is_internal_proxy = True
@property
def _impl_uses_objects(self):
return (
self.original_property is not None
and getattr(self.class_, self.key).impl.uses_objects
)
@property
def property(self):
return self.comparator.property
@util.memoized_property
def comparator(self):
if util.callable(self._comparator):
self._comparator = self._comparator()
if self._adapt_to_entity:
self._comparator = self._comparator.adapt_to_entity(
self._adapt_to_entity
)
return self._comparator
def adapt_to_entity(self, adapt_to_entity):
return self.__class__(
adapt_to_entity.entity,
self.key,
self.descriptor,
self._comparator,
adapt_to_entity,
)
def __get__(self, instance, owner):
retval = self.descriptor.__get__(instance, owner)
# detect if this is a plain Python @property, which just returns
# itself for class level access. If so, then return us.
# Otherwise, return the object returned by the descriptor.
if retval is self.descriptor and instance is None:
return self
else:
return retval
def __str__(self):
return "%s.%s" % (self.class_.__name__, self.key)
def __getattr__(self, attribute):
"""Delegate __getattr__ to the original descriptor and/or
comparator."""
try:
return getattr(descriptor, attribute)
except AttributeError:
try:
return getattr(self.comparator, attribute)
except AttributeError:
raise AttributeError(
"Neither %r object nor %r object associated with %s "
"has an attribute %r"
% (
type(descriptor).__name__,
type(self.comparator).__name__,
self,
attribute,
)
)
Proxy.__name__ = type(descriptor).__name__ + "Proxy"
util.monkeypatch_proxied_specials(
Proxy, type(descriptor), name="descriptor", from_instance=descriptor
)
return Proxy
|
https://github.com/sqlalchemy/sqlalchemy/issues/4767
|
from depth import Child
Child.parent.__name__
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File ".../site-packages/sqlalchemy/orm/attributes.py", line 306, in __getattr__
return getattr(self.comparator, attribute)
File ".../site-packages/sqlalchemy/orm/attributes.py", line 306, in __getattr__
return getattr(self.comparator, attribute)
...
File ".../site-packages/sqlalchemy/util/langhelpers.py", line 853, in __getattr__
return self._fallback_getattr(key)
File ".../site-packages/sqlalchemy/util/langhelpers.py", line 831, in _fallback_getattr
raise AttributeError(key)
RuntimeError: maximum recursion depth exceeded while calling a Python object
|
RuntimeError
|
def __getattr__(self, attribute):
"""Delegate __getattr__ to the original descriptor and/or
comparator."""
try:
return getattr(descriptor, attribute)
except AttributeError:
if attribute == "comparator":
raise AttributeError("comparator")
try:
# comparator itself might be unreachable
comparator = self.comparator
except AttributeError:
raise AttributeError(
"Neither %r object nor unconfigured comparator "
"object associated with %s has an attribute %r"
% (type(descriptor).__name__, self, attribute)
)
else:
try:
return getattr(comparator, attribute)
except AttributeError:
raise AttributeError(
"Neither %r object nor %r object "
"associated with %s has an attribute %r"
% (
type(descriptor).__name__,
type(comparator).__name__,
self,
attribute,
)
)
|
def __getattr__(self, attribute):
"""Delegate __getattr__ to the original descriptor and/or
comparator."""
try:
return getattr(descriptor, attribute)
except AttributeError:
try:
return getattr(self.comparator, attribute)
except AttributeError:
raise AttributeError(
"Neither %r object nor %r object associated with %s "
"has an attribute %r"
% (
type(descriptor).__name__,
type(self.comparator).__name__,
self,
attribute,
)
)
|
https://github.com/sqlalchemy/sqlalchemy/issues/4767
|
from depth import Child
Child.parent.__name__
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File ".../site-packages/sqlalchemy/orm/attributes.py", line 306, in __getattr__
return getattr(self.comparator, attribute)
File ".../site-packages/sqlalchemy/orm/attributes.py", line 306, in __getattr__
return getattr(self.comparator, attribute)
...
File ".../site-packages/sqlalchemy/util/langhelpers.py", line 853, in __getattr__
return self._fallback_getattr(key)
File ".../site-packages/sqlalchemy/util/langhelpers.py", line 831, in _fallback_getattr
raise AttributeError(key)
RuntimeError: maximum recursion depth exceeded while calling a Python object
|
RuntimeError
|
def set_isolation_level(self, connection, level):
level = level.replace("_", " ")
if level not in self._isolation_lookup:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s"
% (level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
cursor.execute("SET TRANSACTION ISOLATION LEVEL %s" % level)
cursor.close()
if level == "SNAPSHOT":
connection.commit()
|
def set_isolation_level(self, connection, level):
level = level.replace("_", " ")
if level not in self._isolation_lookup:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s"
% (level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
cursor.execute("SET TRANSACTION ISOLATION LEVEL %s" % level)
cursor.close()
|
https://github.com/sqlalchemy/sqlalchemy/issues/4536
|
from sqlalchemy import create_engine
engine= create_engine("ConnectionUrl")
conn = engine.connect()
conn = engine.connect()
conn.execution_options(
... isolation_level="SNAPSHOT")
<sqlalchemy.engine.base.Connection object at 0x000002269FE28F28>
conn.execute("SELECT TOP 10 * FROM Staging.CoreRespdet")
Traceback (most recent call last):
File "C:\Users\*****\source\repos\*****\ETLApp_Dev\ETLApp\ETLAppEnv_3_7\lib\site-packages\sqlalchemy\engine\base.py", line 1236, in _execute_context
cursor, statement, parameters, context
File "C:\Users\*****\source\repos\*****\ETLApp_Dev\ETLApp\ETLAppEnv_3_7\lib\site-packages\sqlalchemy\engine\default.py", line 536, in do_execute
cursor.execute(statement, parameters)
pyodbc.ProgrammingError: ('42000', "[42000] [Microsoft][ODBC Driver 17 for SQL Server][SQL Server]Transaction failed in database 'ETL' because the statement was run under snapshot isolation but the transaction did not start in snapshot isolation. You cannot change the isolation level of the transaction to snapshot after the transaction has started unless the transaction was originally started under snapshot isolation level. (3951) (SQLExecDirectW)")
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Users\*****\source\repos\*****\ETLApp_Dev\ETLApp\ETLAppEnv_3_7\lib\site-packages\sqlalchemy\engine\base.py", line 974, in execute
return self._execute_text(object_, multiparams, params)
File "C:\Users\*****\source\repos\*****\ETLApp_Dev\ETLApp\ETLAppEnv_3_7\lib\site-packages\sqlalchemy\engine\base.py", line 1147, in _execute_text
parameters,
File "C:\Users\*****\source\repos\*****\ETLApp_Dev\ETLApp\ETLAppEnv_3_7\lib\site-packages\sqlalchemy\engine\base.py", line 1240, in _execute_context
e, statement, parameters, cursor, context
File "C:\Users\*****\source\repos\*****\ETLApp_Dev\ETLApp\ETLAppEnv_3_7\lib\site-packages\sqlalchemy\engine\base.py", line 1458, in _handle_dbapi_exception
util.raise_from_cause(sqlalchemy_exception, exc_info)
File "C:\Users\*****\source\repos\*****\ETLApp_Dev\ETLApp\ETLAppEnv_3_7\lib\site-packages\sqlalchemy\util\compat.py", line 296, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "C:\Users\*****\source\repos\*****\ETLApp_Dev\ETLApp\ETLAppEnv_3_7\lib\site-packages\sqlalchemy\util\compat.py", line 276, in reraise
raise value.with_traceback(tb)
File "C:\Users\*****\source\repos\*****\ETLApp_Dev\ETLApp\ETLAppEnv_3_7\lib\site-packages\sqlalchemy\engine\base.py", line 1236, in _execute_context
cursor, statement, parameters, context
File "C:\Users\*****\source\repos\*****\ETLApp_Dev\ETLApp\ETLAppEnv_3_7\lib\site-packages\sqlalchemy\engine\default.py", line 536, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.ProgrammingError: (pyodbc.ProgrammingError) ('42000', "[42000] [Microsoft][ODBC Driver 17 for SQL Server][SQL Server]Transaction failed in database 'ETL' because the statement was run under snapshot isolation but the transaction did not start in snapshot isolation. You cannot change the isolation level of the transaction to snapshot after the transaction has started unless the transaction was originally started under snapshot isolation level. (3951) (SQLExecDirectW)") [SQL: 'SELECT TOP 10 * FROM Staging.CoreRespdet'] (Background on this error at: http://sqlalche.me/e/f405)
|
pyodbc.ProgrammingError
|
def should_evaluate_none(self, value):
self.none_as_null = not value
|
def should_evaluate_none(self):
return not self.none_as_null
|
https://github.com/sqlalchemy/sqlalchemy/issues/4485
|
AttributeError Traceback (most recent call last)
/src/local_dev/shell_context.pyc in <module>()
----> 1 test_col.type = test_col.type.evaluates_none()
/home/.venv/local/lib/python2.7/site-packages/sqlalchemy/sql/type_api.pyc in evaluates_none(self)
204 """
205 typ = self.copy()
--> 206 typ.should_evaluate_none = True
207 return typ
208
AttributeError: can't set attribute
|
AttributeError
|
def _generate_path(self, path, attr, wildcard_key, raiseerr=True):
existing_of_type = self._of_type
self._of_type = None
if raiseerr and not path.has_entity:
if isinstance(path, TokenRegistry):
raise sa_exc.ArgumentError(
"Wildcard token cannot be followed by another entity"
)
else:
raise sa_exc.ArgumentError(
"Attribute '%s' of entity '%s' does not "
"refer to a mapped entity" % (path.prop.key, path.parent.entity)
)
if isinstance(attr, util.string_types):
default_token = attr.endswith(_DEFAULT_TOKEN)
if attr.endswith(_WILDCARD_TOKEN) or default_token:
if default_token:
self.propagate_to_loaders = False
if wildcard_key:
attr = "%s:%s" % (wildcard_key, attr)
path = path.token(attr)
self.path = path
return path
if existing_of_type:
ent = inspect(existing_of_type)
else:
ent = path.entity
try:
# use getattr on the class to work around
# synonyms, hybrids, etc.
attr = getattr(ent.class_, attr)
except AttributeError:
if raiseerr:
raise sa_exc.ArgumentError(
"Can't find property named '%s' on the "
"mapped entity %s in this Query. " % (attr, ent)
)
else:
return None
else:
attr = attr.property
path = path[attr]
elif _is_mapped_class(attr):
if not attr.common_parent(path.mapper):
if raiseerr:
raise sa_exc.ArgumentError(
"Attribute '%s' does not "
"link from element '%s'" % (attr, path.entity)
)
else:
return None
else:
prop = attr.property
if not prop.parent.common_parent(path.mapper):
if raiseerr:
raise sa_exc.ArgumentError(
"Attribute '%s' does not "
"link from element '%s'" % (attr, path.entity)
)
else:
return None
if getattr(attr, "_of_type", None):
ac = attr._of_type
ext_info = of_type_info = inspect(ac)
existing = path.entity_path[prop].get(self.context, "path_with_polymorphic")
if not ext_info.is_aliased_class:
ac = orm_util.with_polymorphic(
ext_info.mapper.base_mapper,
ext_info.mapper,
aliased=True,
_use_mapper_path=True,
_existing_alias=existing,
)
ext_info = inspect(ac)
elif not ext_info.with_polymorphic_mappers:
ext_info = orm_util.AliasedInsp(
ext_info.entity,
ext_info.mapper.base_mapper,
ext_info.selectable,
ext_info.name,
ext_info.with_polymorphic_mappers or [ext_info.mapper],
ext_info.polymorphic_on,
ext_info._base_alias,
ext_info._use_mapper_path,
ext_info._adapt_on_names,
ext_info.represents_outer_join,
)
path.entity_path[prop].set(self.context, "path_with_polymorphic", ext_info)
# the path here will go into the context dictionary and
# needs to match up to how the class graph is traversed.
# so we can't put an AliasedInsp in the path here, needs
# to be the base mapper.
path = path[prop][ext_info.mapper]
# but, we need to know what the original of_type()
# argument is for cache key purposes. so....store that too.
# it might be better for "path" to really represent,
# "the path", but trying to keep the impact of the cache
# key feature localized for now
self._of_type = of_type_info
else:
path = path[prop]
if path.has_entity:
path = path.entity_path
self.path = path
return path
|
def _generate_path(self, path, attr, wildcard_key, raiseerr=True):
self._of_type = None
if raiseerr and not path.has_entity:
if isinstance(path, TokenRegistry):
raise sa_exc.ArgumentError(
"Wildcard token cannot be followed by another entity"
)
else:
raise sa_exc.ArgumentError(
"Attribute '%s' of entity '%s' does not "
"refer to a mapped entity" % (path.prop.key, path.parent.entity)
)
if isinstance(attr, util.string_types):
default_token = attr.endswith(_DEFAULT_TOKEN)
if attr.endswith(_WILDCARD_TOKEN) or default_token:
if default_token:
self.propagate_to_loaders = False
if wildcard_key:
attr = "%s:%s" % (wildcard_key, attr)
path = path.token(attr)
self.path = path
return path
try:
# use getattr on the class to work around
# synonyms, hybrids, etc.
attr = getattr(path.entity.class_, attr)
except AttributeError:
if raiseerr:
raise sa_exc.ArgumentError(
"Can't find property named '%s' on the "
"mapped entity %s in this Query. " % (attr, path.entity)
)
else:
return None
else:
attr = attr.property
path = path[attr]
elif _is_mapped_class(attr):
if not attr.common_parent(path.mapper):
if raiseerr:
raise sa_exc.ArgumentError(
"Attribute '%s' does not "
"link from element '%s'" % (attr, path.entity)
)
else:
return None
else:
prop = attr.property
if not prop.parent.common_parent(path.mapper):
if raiseerr:
raise sa_exc.ArgumentError(
"Attribute '%s' does not "
"link from element '%s'" % (attr, path.entity)
)
else:
return None
if getattr(attr, "_of_type", None):
ac = attr._of_type
ext_info = of_type_info = inspect(ac)
existing = path.entity_path[prop].get(self.context, "path_with_polymorphic")
if not ext_info.is_aliased_class:
ac = orm_util.with_polymorphic(
ext_info.mapper.base_mapper,
ext_info.mapper,
aliased=True,
_use_mapper_path=True,
_existing_alias=existing,
)
ext_info = inspect(ac)
elif not ext_info.with_polymorphic_mappers:
ext_info = orm_util.AliasedInsp(
ext_info.entity,
ext_info.mapper.base_mapper,
ext_info.selectable,
ext_info.name,
ext_info.with_polymorphic_mappers or [ext_info.mapper],
ext_info.polymorphic_on,
ext_info._base_alias,
ext_info._use_mapper_path,
ext_info._adapt_on_names,
ext_info.represents_outer_join,
)
path.entity_path[prop].set(self.context, "path_with_polymorphic", ext_info)
# the path here will go into the context dictionary and
# needs to match up to how the class graph is traversed.
# so we can't put an AliasedInsp in the path here, needs
# to be the base mapper.
path = path[prop][ext_info.mapper]
# but, we need to know what the original of_type()
# argument is for cache key purposes. so....store that too.
# it might be better for "path" to really represent,
# "the path", but trying to keep the impact of the cache
# key feature localized for now
self._of_type = of_type_info
else:
path = path[prop]
if path.has_entity:
path = path.entity_path
self.path = path
return path
|
https://github.com/sqlalchemy/sqlalchemy/issues/4400
|
root@ac36af00d6f9:/service# python main.py
Traceback (most recent call last):
File "/usr/local/lib/python3.5/site-packages/sqlalchemy/orm/strategy_options.py", line 194, in _generate_path
attr = getattr(path.entity.class_, attr)
AttributeError: type object 'Parent' has no attribute 'ext'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "main.py", line 57, in <module>
contains_eager(Main.child1.of_type(child1), alias=child1).load_only('ext')
File "/usr/local/lib/python3.5/site-packages/sqlalchemy/orm/query.py", line 1449, in options
return self._options(False, *args)
File "<string>", line 2, in _options
File "/usr/local/lib/python3.5/site-packages/sqlalchemy/orm/base.py", line 201, in generate
fn(self, *args[1:], **kw)
File "/usr/local/lib/python3.5/site-packages/sqlalchemy/orm/query.py", line 1468, in _options
opt.process_query(self)
File "/usr/local/lib/python3.5/site-packages/sqlalchemy/orm/strategy_options.py", line 151, in process_query
self._process(query, True)
File "/usr/local/lib/python3.5/site-packages/sqlalchemy/orm/strategy_options.py", line 484, in _process
query._current_path, query._attributes, raiseerr)
File "/usr/local/lib/python3.5/site-packages/sqlalchemy/orm/strategy_options.py", line 655, in _bind_loader
loader.path, token, None, raiseerr):
File "/usr/local/lib/python3.5/site-packages/sqlalchemy/orm/strategy_options.py", line 200, in _generate_path
attr, path.entity)
sqlalchemy.exc.ArgumentError: Can't find property named 'ext' on the mapped entity Mapper|Parent|parent in this Query.
|
AttributeError
|
def parse_path(path):
"""Parse a dataset's identifier or path into its parts
Parameters
----------
path : str or path-like object
The path to be parsed.
Returns
-------
ParsedPath or UnparsedPath
Notes
-----
When legacy GDAL filenames are encountered, they will be returned
in a UnparsedPath.
"""
if isinstance(path, Path):
return path
# Windows drive letters (e.g. "C:\") confuse `urlparse` as they look like
# URL schemes
elif sys.platform == "win32" and re.match("^[a-zA-Z]\\:", path):
return UnparsedPath(path)
elif path.startswith("/vsi"):
return UnparsedPath(path)
elif re.match("^[a-z\\+]*://", path):
parts = urlparse(path)
# if the scheme is not one of Rasterio's supported schemes, we
# return an UnparsedPath.
if parts.scheme and not all(p in SCHEMES for p in parts.scheme.split("+")):
return UnparsedPath(path)
else:
return ParsedPath.from_uri(path)
else:
return UnparsedPath(path)
|
def parse_path(path):
"""Parse a dataset's identifier or path into its parts
Parameters
----------
path : str or path-like object
The path to be parsed.
Returns
-------
ParsedPath or UnparsedPath
Notes
-----
When legacy GDAL filenames are encountered, they will be returned
in a UnparsedPath.
"""
if isinstance(path, Path):
return path
# Windows drive letters (e.g. "C:\") confuse `urlparse` as they look like
# URL schemes
elif sys.platform == "win32" and re.match("^[a-zA-Z]\\:", path):
return UnparsedPath(path)
elif path.startswith("/vsi"):
return UnparsedPath(path)
else:
parts = urlparse(path)
# if the scheme is not one of Rasterio's supported schemes, we
# return an UnparsedPath.
if parts.scheme and not all(p in SCHEMES for p in parts.scheme.split("+")):
return UnparsedPath(path)
else:
return ParsedPath.from_uri(path)
|
https://github.com/Toblerity/Fiona/issues/742
|
mkdir !test
copy test.geojson !test\test.geojson
python
Python 3.6.5 |Anaconda, Inc.| (default, Mar 29 2018, 13:32:41) [MSC v.1900 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license" for more information.
import fiona
with fiona.open("!test\test.geojson") as src:
... src.crs
...
Traceback (most recent call last):
File "fiona\_shim.pyx", line 73, in fiona._shim.gdal_open_vector
File "fiona\_err.pyx", line 270, in fiona._err.exc_wrap_pointer
fiona._err.CPLE_OpenFailedError: 'test est.geojson' does not exist in the file system, and is not recognized as a supported dataset name.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "Q:\projects\Mempr\Wind_Mapping\scripts\windsites\venv_36\lib\site-packages\fiona\env.py", line 406, in wrapper
return f(*args, **kwargs)
File "Q:\projects\Mempr\Wind_Mapping\scripts\windsites\venv_36\lib\site-packages\fiona\__init__.py", line 257, in open
layer=layer, enabled_drivers=enabled_drivers, **kwargs)
File "Q:\projects\Mempr\Wind_Mapping\scripts\windsites\venv_36\lib\site-packages\fiona\collection.py", line 159, in __init__
self.session.start(self, **kwargs)
File "fiona\ogrext.pyx", line 484, in fiona.ogrext.Session.start
File "fiona\_shim.pyx", line 80, in fiona._shim.gdal_open_vector
fiona.errors.DriverError: 'test est.geojson' does not exist in the file system, and is not recognized as a supported dataset name.
|
fiona._err.CPLE_OpenFailedError
|
def __init__(
self,
path,
mode="r",
driver=None,
schema=None,
crs=None,
encoding=None,
layer=None,
vsi=None,
archive=None,
enabled_drivers=None,
crs_wkt=None,
**kwargs,
):
"""The required ``path`` is the absolute or relative path to
a file, such as '/data/test_uk.shp'. In ``mode`` 'r', data can
be read only. In ``mode`` 'a', data can be appended to a file.
In ``mode`` 'w', data overwrites the existing contents of
a file.
In ``mode`` 'w', an OGR ``driver`` name and a ``schema`` are
required. A Proj4 ``crs`` string is recommended. If both ``crs``
and ``crs_wkt`` keyword arguments are passed, the latter will
trump the former.
In 'w' mode, kwargs will be mapped to OGR layer creation
options.
"""
if not isinstance(path, string_types):
raise TypeError("invalid path: %r" % path)
if not isinstance(mode, string_types) or mode not in ("r", "w", "a"):
raise TypeError("invalid mode: %r" % mode)
if driver and not isinstance(driver, string_types):
raise TypeError("invalid driver: %r" % driver)
if schema and not hasattr(schema, "get"):
raise TypeError("invalid schema: %r" % schema)
if crs and not isinstance(crs, compat.DICT_TYPES + string_types):
raise TypeError("invalid crs: %r" % crs)
if crs_wkt and not isinstance(crs_wkt, string_types):
raise TypeError("invalid crs_wkt: %r" % crs_wkt)
if encoding and not isinstance(encoding, string_types):
raise TypeError("invalid encoding: %r" % encoding)
if layer and not isinstance(layer, tuple(list(string_types) + [int])):
raise TypeError("invalid name: %r" % layer)
if vsi:
if not isinstance(vsi, string_types) or not vfs.valid_vsi(vsi):
raise TypeError("invalid vsi: %r" % vsi)
if archive and not isinstance(archive, string_types):
raise TypeError("invalid archive: %r" % archive)
# Check GDAL version against drivers
if driver == "GPKG" and get_gdal_version_num() < calc_gdal_version_num(1, 11, 0):
raise DriverError(
"GPKG driver requires GDAL 1.11.0, fiona was compiled against: {}".format(
get_gdal_release_name()
)
)
self.session = None
self.iterator = None
self._len = 0
self._bounds = None
self._driver = None
self._schema = None
self._crs = None
self._crs_wkt = None
self.env = None
self.enabled_drivers = enabled_drivers
self.path = vfs.vsi_path(path, vsi, archive)
if mode == "w":
if layer and not isinstance(layer, string_types):
raise ValueError("in 'w' mode, layer names must be strings")
if driver == "GeoJSON":
if layer is not None:
raise ValueError("the GeoJSON format does not have layers")
self.name = "OgrGeoJSON"
# TODO: raise ValueError as above for other single-layer formats.
else:
self.name = layer or os.path.basename(os.path.splitext(path)[0])
else:
if layer in (0, None):
self.name = 0
else:
self.name = layer or os.path.basename(os.path.splitext(path)[0])
self.mode = mode
if self.mode == "w":
if driver == "Shapefile":
driver = "ESRI Shapefile"
if not driver:
raise DriverError("no driver")
elif driver not in supported_drivers:
raise DriverError("unsupported driver: %r" % driver)
elif self.mode not in supported_drivers[driver]:
raise DriverError("unsupported mode: %r" % self.mode)
self._driver = driver
if not schema:
raise SchemaError("no schema")
elif "properties" not in schema:
raise SchemaError("schema lacks: properties")
elif "geometry" not in schema:
raise SchemaError("schema lacks: geometry")
self._schema = schema
if crs_wkt:
self._crs_wkt = crs_wkt
elif crs:
if "init" in crs or "proj" in crs or "epsg" in crs.lower():
self._crs = crs
else:
raise CRSError("crs lacks init or proj parameter")
if driver_count == 0:
# create a local manager and enter
self.env = AWSGDALEnv()
else:
self.env = AWSGDALEnv()
self.env.__enter__()
self._driver = driver
kwargs.update(encoding=encoding or "")
self.encoding = encoding
try:
if self.mode == "r":
self.session = Session()
self.session.start(self, **kwargs)
elif self.mode in ("a", "w"):
self.session = WritingSession()
self.session.start(self, **kwargs)
except IOError:
self.session = None
raise
if self.session is not None:
self.guard_driver_mode()
if not self.encoding:
self.encoding = self.session.get_fileencoding().lower()
|
def __init__(
self,
path,
mode="r",
driver=None,
schema=None,
crs=None,
encoding=None,
layer=None,
vsi=None,
archive=None,
enabled_drivers=None,
crs_wkt=None,
**kwargs,
):
"""The required ``path`` is the absolute or relative path to
a file, such as '/data/test_uk.shp'. In ``mode`` 'r', data can
be read only. In ``mode`` 'a', data can be appended to a file.
In ``mode`` 'w', data overwrites the existing contents of
a file.
In ``mode`` 'w', an OGR ``driver`` name and a ``schema`` are
required. A Proj4 ``crs`` string is recommended. If both ``crs``
and ``crs_wkt`` keyword arguments are passed, the latter will
trump the former.
In 'w' mode, kwargs will be mapped to OGR layer creation
options.
"""
if not isinstance(path, string_types):
raise TypeError("invalid path: %r" % path)
if not isinstance(mode, string_types) or mode not in ("r", "w", "a"):
raise TypeError("invalid mode: %r" % mode)
if driver and not isinstance(driver, string_types):
raise TypeError("invalid driver: %r" % driver)
if schema and not hasattr(schema, "get"):
raise TypeError("invalid schema: %r" % schema)
if crs and not isinstance(crs, compat.DICT_TYPES + string_types):
raise TypeError("invalid crs: %r" % crs)
if crs_wkt and not isinstance(crs_wkt, string_types):
raise TypeError("invalid crs_wkt: %r" % crs_wkt)
if encoding and not isinstance(encoding, string_types):
raise TypeError("invalid encoding: %r" % encoding)
if layer and not isinstance(layer, tuple(list(string_types) + [int])):
raise TypeError("invalid name: %r" % layer)
if vsi:
if not isinstance(vsi, string_types) or not vfs.valid_vsi(vsi):
raise TypeError("invalid vsi: %r" % vsi)
if archive and not isinstance(archive, string_types):
raise TypeError("invalid archive: %r" % archive)
# Check GDAL version against drivers
if driver == "GPKG" and get_gdal_version_num() < calc_gdal_version_num(1, 11, 0):
raise DriverError(
"GPKG driver requires GDAL 1.11.0, fiona was compiled against: {}".format(
get_gdal_release_name()
)
)
self.session = None
self.iterator = None
self._len = 0
self._bounds = None
self._driver = None
self._schema = None
self._crs = None
self._crs_wkt = None
self.env = None
self.enabled_drivers = enabled_drivers
self.path = vfs.vsi_path(path, vsi, archive)
if mode == "w":
if layer and not isinstance(layer, string_types):
raise ValueError("in 'w' mode, layer names must be strings")
if driver == "GeoJSON":
if layer is not None:
raise ValueError("the GeoJSON format does not have layers")
self.name = "OgrGeoJSON"
# TODO: raise ValueError as above for other single-layer formats.
else:
self.name = layer or os.path.basename(os.path.splitext(path)[0])
else:
if layer in (0, None):
self.name = 0
else:
self.name = layer or os.path.basename(os.path.splitext(path)[0])
self.mode = mode
if self.mode == "w":
if driver == "Shapefile":
driver = "ESRI Shapefile"
if not driver:
raise DriverError("no driver")
elif driver not in supported_drivers:
raise DriverError("unsupported driver: %r" % driver)
elif self.mode not in supported_drivers[driver]:
raise DriverError("unsupported mode: %r" % self.mode)
self._driver = driver
if not schema:
raise SchemaError("no schema")
elif "properties" not in schema:
raise SchemaError("schema lacks: properties")
elif "geometry" not in schema:
raise SchemaError("schema lacks: geometry")
self._schema = schema
if crs_wkt:
self._crs_wkt = crs_wkt
elif crs:
if "init" in crs or "proj" in crs or "epsg" in crs.lower():
self._crs = crs
else:
raise CRSError("crs lacks init or proj parameter")
if driver_count == 0:
# create a local manager and enter
self.env = AWSGDALEnv()
else:
self.env = AWSGDALEnv()
self.env.__enter__()
self._driver = driver
self.encoding = encoding
try:
if self.mode == "r":
self.session = Session()
self.session.start(self)
elif self.mode in ("a", "w"):
self.session = WritingSession()
self.session.start(self, **kwargs)
except IOError:
self.session = None
raise
if self.session is not None:
self.guard_driver_mode()
if not self.encoding:
self.encoding = self.session.get_fileencoding().lower()
|
https://github.com/Toblerity/Fiona/issues/512
|
name = 'Rhône-Alpes'
name
'Rhône-Alpes'
import json
json.dumps(name)
'"Rh\\u00f4ne-Alpes"'
name_utf8 = name.encode('utf-8')
name_utf8
b'Rh\xc3\xb4ne-Alpes'
name_utf8.decode('Windows-1252')
'Rhône-Alpes'
name_utf8.decode('ascii')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 2: ordinal not in range(128)
|
UnicodeDecodeError
|
def open(
path,
mode="r",
driver=None,
schema=None,
crs=None,
encoding=None,
layer=None,
vfs=None,
enabled_drivers=None,
crs_wkt=None,
**kwargs,
):
"""Open file at ``path`` in ``mode`` "r" (read), "a" (append), or
"w" (write) and return a ``Collection`` object.
In write mode, a driver name such as "ESRI Shapefile" or "GPX" (see
OGR docs or ``ogr2ogr --help`` on the command line) and a schema
mapping such as:
{'geometry': 'Point',
'properties': [('class', 'int'), ('label', 'str'),
('value', 'float')]}
must be provided. If a particular ordering of properties ("fields"
in GIS parlance) in the written file is desired, a list of (key,
value) pairs as above or an ordered dict is required. If no ordering
is needed, a standard dict will suffice.
A coordinate reference system for collections in write mode can be
defined by the ``crs`` parameter. It takes Proj4 style mappings like
{'proj': 'longlat', 'ellps': 'WGS84', 'datum': 'WGS84',
'no_defs': True}
short hand strings like
EPSG:4326
or WKT representations of coordinate reference systems.
The drivers used by Fiona will try to detect the encoding of data
files. If they fail, you may provide the proper ``encoding``, such
as 'Windows-1252' for the Natural Earth datasets.
When the provided path is to a file containing multiple named layers
of data, a layer can be singled out by ``layer``.
A virtual filesystem can be specified. The ``vfs`` parameter may be
an Apache Commons VFS style string beginning with "zip://" or
"tar://"". In this case, the ``path`` must be an absolute path
within that container.
The drivers enabled for opening datasets may be restricted to those
listed in the ``enabled_drivers`` parameter. This and the ``driver``
parameter afford much control over opening of files.
# Trying only the GeoJSON driver when opening to read, the
# following raises ``DataIOError``:
fiona.open('example.shp', driver='GeoJSON')
# Trying first the GeoJSON driver, then the Shapefile driver,
# the following succeeds:
fiona.open(
'example.shp', enabled_drivers=['GeoJSON', 'ESRI Shapefile'])
"""
# Parse the vfs into a vsi and an archive path.
path, vsi, archive = parse_paths(path, vfs)
if mode in ("a", "r"):
if archive:
if not os.path.exists(archive):
raise IOError("no such archive file: %r" % archive)
elif path != "-" and not os.path.exists(path):
raise IOError("no such file or directory: %r" % path)
c = Collection(
path,
mode,
driver=driver,
encoding=encoding,
layer=layer,
vsi=vsi,
archive=archive,
enabled_drivers=enabled_drivers,
**kwargs,
)
elif mode == "w":
if schema:
# Make an ordered dict of schema properties.
this_schema = schema.copy()
this_schema["properties"] = OrderedDict(schema["properties"])
else:
this_schema = None
c = Collection(
path,
mode,
crs=crs,
driver=driver,
schema=this_schema,
encoding=encoding,
layer=layer,
vsi=vsi,
archive=archive,
enabled_drivers=enabled_drivers,
crs_wkt=crs_wkt,
**kwargs,
)
else:
raise ValueError("mode string must be one of 'r', 'w', or 'a', not %s" % mode)
return c
|
def open(
path,
mode="r",
driver=None,
schema=None,
crs=None,
encoding=None,
layer=None,
vfs=None,
enabled_drivers=None,
crs_wkt=None,
):
"""Open file at ``path`` in ``mode`` "r" (read), "a" (append), or
"w" (write) and return a ``Collection`` object.
In write mode, a driver name such as "ESRI Shapefile" or "GPX" (see
OGR docs or ``ogr2ogr --help`` on the command line) and a schema
mapping such as:
{'geometry': 'Point',
'properties': [('class', 'int'), ('label', 'str'),
('value', 'float')]}
must be provided. If a particular ordering of properties ("fields"
in GIS parlance) in the written file is desired, a list of (key,
value) pairs as above or an ordered dict is required. If no ordering
is needed, a standard dict will suffice.
A coordinate reference system for collections in write mode can be
defined by the ``crs`` parameter. It takes Proj4 style mappings like
{'proj': 'longlat', 'ellps': 'WGS84', 'datum': 'WGS84',
'no_defs': True}
short hand strings like
EPSG:4326
or WKT representations of coordinate reference systems.
The drivers used by Fiona will try to detect the encoding of data
files. If they fail, you may provide the proper ``encoding``, such
as 'Windows-1252' for the Natural Earth datasets.
When the provided path is to a file containing multiple named layers
of data, a layer can be singled out by ``layer``.
A virtual filesystem can be specified. The ``vfs`` parameter may be
an Apache Commons VFS style string beginning with "zip://" or
"tar://"". In this case, the ``path`` must be an absolute path
within that container.
The drivers enabled for opening datasets may be restricted to those
listed in the ``enabled_drivers`` parameter. This and the ``driver``
parameter afford much control over opening of files.
# Trying only the GeoJSON driver when opening to read, the
# following raises ``DataIOError``:
fiona.open('example.shp', driver='GeoJSON')
# Trying first the GeoJSON driver, then the Shapefile driver,
# the following succeeds:
fiona.open(
'example.shp', enabled_drivers=['GeoJSON', 'ESRI Shapefile'])
"""
# Parse the vfs into a vsi and an archive path.
path, vsi, archive = parse_paths(path, vfs)
if mode in ("a", "r"):
if archive:
if not os.path.exists(archive):
raise IOError("no such archive file: %r" % archive)
elif path != "-" and not os.path.exists(path):
raise IOError("no such file or directory: %r" % path)
c = Collection(
path,
mode,
driver=driver,
encoding=encoding,
layer=layer,
vsi=vsi,
archive=archive,
enabled_drivers=enabled_drivers,
)
elif mode == "w":
if schema:
# Make an ordered dict of schema properties.
this_schema = schema.copy()
this_schema["properties"] = OrderedDict(schema["properties"])
else:
this_schema = None
c = Collection(
path,
mode,
crs=crs,
driver=driver,
schema=this_schema,
encoding=encoding,
layer=layer,
vsi=vsi,
archive=archive,
enabled_drivers=enabled_drivers,
crs_wkt=crs_wkt,
)
else:
raise ValueError("mode string must be one of 'r', 'w', or 'a', not %s" % mode)
return c
|
https://github.com/Toblerity/Fiona/issues/388
|
WARNING:Fiona:CPLE_NotSupported in dataset /app/tmp/filegdb-bug/test.gdb does not support layer creation option ENCODING
ERROR:Fiona:CPLE_AppDefined in Failed at writing Row to Table in CreateFeature. (The spatial index grid size is invalid.)
ERROR:fio:Exception caught during processing
Traceback (most recent call last):
File "/usr/local/lib/python2.7/site-packages/fiona/fio/load.py", line 103, in load
dst.writerecords(source)
File "/usr/local/lib/python2.7/site-packages/fiona/collection.py", line 327, in writerecords
self.session.writerecs(records, self)
File "fiona/ogrext.pyx", line 1038, in fiona.ogrext.WritingSession.writerecs (fiona/ogrext2.c:17532)
RuntimeError: Failed to write record: {u'geometry': {u'type': u'Polygon', u'coordinates': [[[127.29999999999998, -29.01], [119.49, -32.28], [118.43, -27.1], [125.9, -21.44], [127.29999999999998, -29.01]]]}, u'type': u'Feature', u'id': u'1', u'properties': {}}
Aborted!
|
RuntimeError
|
def _git_toplevel(path):
try:
with open(os.devnull, "wb") as devnull:
out = subprocess.check_output(
["git", "rev-parse", "--show-toplevel"],
cwd=(path or "."),
universal_newlines=True,
stderr=devnull,
)
trace("find files toplevel", out)
return os.path.normcase(os.path.realpath(out.strip()))
except subprocess.CalledProcessError:
# git returned error, we are not in a git repo
return None
except OSError:
# git command not found, probably
return None
|
def _git_toplevel(path):
try:
with open(os.devnull, "wb") as devnull:
out = subprocess.check_output(
["git", "rev-parse", "--show-toplevel"],
cwd=(path or "."),
universal_newlines=True,
stderr=devnull,
)
return os.path.normcase(os.path.realpath(out.strip()))
except subprocess.CalledProcessError:
# git returned error, we are not in a git repo
return None
except OSError:
# git command not found, probably
return None
|
https://github.com/pypa/setuptools_scm/issues/298
|
/usr/lib64/python3.7/distutils/dist.py:274: UserWarning: Unknown distribution option: 'test_requires'
warnings.warn(msg)
/home/laura/dev/libs/rio-redis/.eggs/setuptools_scm-3.0.6-py3.7.egg/setuptools_scm/version.py:191: UserWarning: meta invoked without explicit configuration, will use defaults where required.
"meta invoked without explicit configuration,"
running egg_info
writing rio_redis.egg-info/PKG-INFO
writing dependency_links to rio_redis.egg-info/dependency_links.txt
writing requirements to rio_redis.egg-info/requires.txt
writing top-level names to rio_redis.egg-info/top_level.txt
fatal: Not a valid object name
Traceback (most recent call last):
File "/home/laura/.local/share/virtualenvs/rio-redis-wQCA5cC-/lib/python3.7/tarfile.py", line 2287, in next
tarinfo = self.tarinfo.fromtarfile(self)
File "/home/laura/.local/share/virtualenvs/rio-redis-wQCA5cC-/lib/python3.7/tarfile.py", line 1093, in fromtarfile
obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
File "/home/laura/.local/share/virtualenvs/rio-redis-wQCA5cC-/lib/python3.7/tarfile.py", line 1029, in frombuf
raise EmptyHeaderError("empty header")
tarfile.EmptyHeaderError: empty header
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "setup.py", line 39, in <module>
python_requires=">=3.6.0",
File "/home/laura/.local/share/virtualenvs/rio-redis-wQCA5cC-/lib/python3.7/site-packages/setuptools/__init__.py", line 131, in setup
return distutils.core.setup(**attrs)
File "/usr/lib64/python3.7/distutils/core.py", line 148, in setup
dist.run_commands()
File "/usr/lib64/python3.7/distutils/dist.py", line 966, in run_commands
self.run_command(cmd)
File "/usr/lib64/python3.7/distutils/dist.py", line 985, in run_command
cmd_obj.run()
File "/home/laura/.local/share/virtualenvs/rio-redis-wQCA5cC-/lib/python3.7/site-packages/setuptools/command/egg_info.py", line 278, in run
self.find_sources()
File "/home/laura/.local/share/virtualenvs/rio-redis-wQCA5cC-/lib/python3.7/site-packages/setuptools/command/egg_info.py", line 293, in find_sources
mm.run()
File "/home/laura/.local/share/virtualenvs/rio-redis-wQCA5cC-/lib/python3.7/site-packages/setuptools/command/egg_info.py", line 524, in run
self.add_defaults()
File "/home/laura/.local/share/virtualenvs/rio-redis-wQCA5cC-/lib/python3.7/site-packages/setuptools/command/egg_info.py", line 563, in add_defaults
rcfiles = list(walk_revctrl())
File "/home/laura/.local/share/virtualenvs/rio-redis-wQCA5cC-/lib/python3.7/site-packages/setuptools/command/sdist.py", line 20, in walk_revctrl
for item in ep.load()(dirname):
File "/home/laura/dev/libs/rio-redis/.eggs/setuptools_scm-3.0.6-py3.7.egg/setuptools_scm/integration.py", line 33, in find_files
res = command(path)
File "/home/laura/dev/libs/rio-redis/.eggs/setuptools_scm-3.0.6-py3.7.egg/setuptools_scm/file_finder_git.py", line 47, in git_find_files
git_files, git_dirs = _git_ls_files_and_dirs(toplevel)
File "/home/laura/dev/libs/rio-redis/.eggs/setuptools_scm-3.0.6-py3.7.egg/setuptools_scm/file_finder_git.py", line 31, in _git_ls_files_and_dirs
tf = tarfile.open(fileobj=proc.stdout, mode="r|*")
File "/home/laura/.local/share/virtualenvs/rio-redis-wQCA5cC-/lib/python3.7/tarfile.py", line 1601, in open
t = cls(name, filemode, stream, **kwargs)
File "/home/laura/.local/share/virtualenvs/rio-redis-wQCA5cC-/lib/python3.7/tarfile.py", line 1482, in __init__
self.firstmember = self.next()
File "/home/laura/.local/share/virtualenvs/rio-redis-wQCA5cC-/lib/python3.7/tarfile.py", line 2302, in next
raise ReadError("empty file")
tarfile.ReadError: empty file
|
tarfile.EmptyHeaderError
|
def _git_ls_files_and_dirs(toplevel):
# use git archive instead of git ls-file to honor
# export-ignore git attribute
cmd = ["git", "archive", "--prefix", toplevel + os.path.sep, "HEAD"]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, cwd=toplevel)
try:
return _git_interpret_archive(proc.stdout, toplevel)
except Exception:
if proc.wait() != 0:
log.exception("listing git files failed - pretending there aren't any")
return (), ()
|
def _git_ls_files_and_dirs(toplevel):
# use git archive instead of git ls-file to honor
# export-ignore git attribute
cmd = ["git", "archive", "--prefix", toplevel + os.path.sep, "HEAD"]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, cwd=toplevel)
tf = tarfile.open(fileobj=proc.stdout, mode="r|*")
git_files = set()
git_dirs = {toplevel}
for member in tf.getmembers():
name = os.path.normcase(member.name).replace("/", os.path.sep)
if member.type == tarfile.DIRTYPE:
git_dirs.add(name)
else:
git_files.add(name)
return git_files, git_dirs
|
https://github.com/pypa/setuptools_scm/issues/298
|
/usr/lib64/python3.7/distutils/dist.py:274: UserWarning: Unknown distribution option: 'test_requires'
warnings.warn(msg)
/home/laura/dev/libs/rio-redis/.eggs/setuptools_scm-3.0.6-py3.7.egg/setuptools_scm/version.py:191: UserWarning: meta invoked without explicit configuration, will use defaults where required.
"meta invoked without explicit configuration,"
running egg_info
writing rio_redis.egg-info/PKG-INFO
writing dependency_links to rio_redis.egg-info/dependency_links.txt
writing requirements to rio_redis.egg-info/requires.txt
writing top-level names to rio_redis.egg-info/top_level.txt
fatal: Not a valid object name
Traceback (most recent call last):
File "/home/laura/.local/share/virtualenvs/rio-redis-wQCA5cC-/lib/python3.7/tarfile.py", line 2287, in next
tarinfo = self.tarinfo.fromtarfile(self)
File "/home/laura/.local/share/virtualenvs/rio-redis-wQCA5cC-/lib/python3.7/tarfile.py", line 1093, in fromtarfile
obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
File "/home/laura/.local/share/virtualenvs/rio-redis-wQCA5cC-/lib/python3.7/tarfile.py", line 1029, in frombuf
raise EmptyHeaderError("empty header")
tarfile.EmptyHeaderError: empty header
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "setup.py", line 39, in <module>
python_requires=">=3.6.0",
File "/home/laura/.local/share/virtualenvs/rio-redis-wQCA5cC-/lib/python3.7/site-packages/setuptools/__init__.py", line 131, in setup
return distutils.core.setup(**attrs)
File "/usr/lib64/python3.7/distutils/core.py", line 148, in setup
dist.run_commands()
File "/usr/lib64/python3.7/distutils/dist.py", line 966, in run_commands
self.run_command(cmd)
File "/usr/lib64/python3.7/distutils/dist.py", line 985, in run_command
cmd_obj.run()
File "/home/laura/.local/share/virtualenvs/rio-redis-wQCA5cC-/lib/python3.7/site-packages/setuptools/command/egg_info.py", line 278, in run
self.find_sources()
File "/home/laura/.local/share/virtualenvs/rio-redis-wQCA5cC-/lib/python3.7/site-packages/setuptools/command/egg_info.py", line 293, in find_sources
mm.run()
File "/home/laura/.local/share/virtualenvs/rio-redis-wQCA5cC-/lib/python3.7/site-packages/setuptools/command/egg_info.py", line 524, in run
self.add_defaults()
File "/home/laura/.local/share/virtualenvs/rio-redis-wQCA5cC-/lib/python3.7/site-packages/setuptools/command/egg_info.py", line 563, in add_defaults
rcfiles = list(walk_revctrl())
File "/home/laura/.local/share/virtualenvs/rio-redis-wQCA5cC-/lib/python3.7/site-packages/setuptools/command/sdist.py", line 20, in walk_revctrl
for item in ep.load()(dirname):
File "/home/laura/dev/libs/rio-redis/.eggs/setuptools_scm-3.0.6-py3.7.egg/setuptools_scm/integration.py", line 33, in find_files
res = command(path)
File "/home/laura/dev/libs/rio-redis/.eggs/setuptools_scm-3.0.6-py3.7.egg/setuptools_scm/file_finder_git.py", line 47, in git_find_files
git_files, git_dirs = _git_ls_files_and_dirs(toplevel)
File "/home/laura/dev/libs/rio-redis/.eggs/setuptools_scm-3.0.6-py3.7.egg/setuptools_scm/file_finder_git.py", line 31, in _git_ls_files_and_dirs
tf = tarfile.open(fileobj=proc.stdout, mode="r|*")
File "/home/laura/.local/share/virtualenvs/rio-redis-wQCA5cC-/lib/python3.7/tarfile.py", line 1601, in open
t = cls(name, filemode, stream, **kwargs)
File "/home/laura/.local/share/virtualenvs/rio-redis-wQCA5cC-/lib/python3.7/tarfile.py", line 1482, in __init__
self.firstmember = self.next()
File "/home/laura/.local/share/virtualenvs/rio-redis-wQCA5cC-/lib/python3.7/tarfile.py", line 2302, in next
raise ReadError("empty file")
tarfile.ReadError: empty file
|
tarfile.EmptyHeaderError
|
def parse(root, describe_command=DEFAULT_DESCRIBE, pre_parse=warn_on_shallow):
"""
:param pre_parse: experimental pre_parse action, may change at any time
"""
if not has_command("git"):
return
wd = GitWorkdir.from_potential_worktree(root)
if wd is None:
return
if pre_parse:
pre_parse(wd)
out, err, ret = wd.do_ex(describe_command)
if ret:
# If 'git describe' failed, try to get the information otherwise.
rev_node = wd.node()
dirty = wd.is_dirty()
if rev_node is None:
return meta("0.0", distance=0, dirty=dirty)
return meta(
"0.0",
distance=wd.count_all_nodes(),
node="g" + rev_node,
dirty=dirty,
branch=wd.get_branch(),
)
else:
tag, number, node, dirty = _git_parse_describe(out)
branch = wd.get_branch()
if number:
return meta(tag, distance=number, node=node, dirty=dirty, branch=branch)
else:
return meta(tag, node=node, dirty=dirty, branch=branch)
|
def parse(root, describe_command=DEFAULT_DESCRIBE, pre_parse=warn_on_shallow):
"""
:param pre_parse: experimental pre_parse action, may change at any time
"""
if not has_command("git"):
return
wd = GitWorkdir.from_potential_worktree(root)
if wd is None:
return
if pre_parse:
pre_parse(wd)
out, err, ret = wd.do_ex(describe_command)
if ret:
# If 'git describe' failed, try to get the information otherwise.
rev_node = wd.node()
dirty = wd.is_dirty()
if rev_node is None:
return meta("0.0", distance=0, dirty=dirty)
return meta(
"0.0",
distance=wd.count_all_nodes(),
node="g" + rev_node,
dirty=dirty,
branch=wd.get_branch(),
)
# 'out' looks e.g. like 'v1.5.0-0-g4060507' or
# 'v1.15.1rc1-37-g9bd1298-dirty'.
if out.endswith("-dirty"):
dirty = True
out = out[:-6]
else:
dirty = False
tag, number, node = out.rsplit("-", 2)
number = int(number)
branch = wd.get_branch()
if number:
return meta(tag, distance=number, node=node, dirty=dirty, branch=branch)
else:
return meta(tag, node=node, dirty=dirty, branch=branch)
|
https://github.com/pypa/setuptools_scm/issues/266
|
looking for ep setuptools_scm.parse_scm_fallback .
root '/Users/twall/plex/rd'
looking for ep setuptools_scm.parse_scm /Users/twall/plex/rd
found ep .git = setuptools_scm.git:parse
cmd 'git rev-parse --show-toplevel'
out b'/Users/twall/plex/rd\n'
real root /Users/twall/plex/rd
cmd 'git describe --dirty --tags --long --match *.*'
out b'3.3.1-rc26-0-g9df187b\n'
cmd 'git rev-parse --abbrev-ref HEAD'
out b'pyplex\n'
tag 3.3.1-rc26
version <LegacyVersion('rc26')>
version None
Traceback (most recent call last):
File "setup.py", line 46, in <module>
entry_points = {
File "/Users/twall/plex/rd/search/.venv-3.6/lib/python3.6/site-packages/setuptools/__init__.py", line 129, in setup
return distutils.core.setup(**attrs)
File "/anaconda3/lib/python3.6/distutils/core.py", line 108, in setup
_setup_distribution = dist = klass(attrs)
File "/Users/twall/plex/rd/search/.venv-3.6/lib/python3.6/site-packages/setuptools/dist.py", line 370, in __init__
k: v for k, v in attrs.items()
File "/anaconda3/lib/python3.6/distutils/dist.py", line 281, in __init__
self.finalize_options()
File "/Users/twall/plex/rd/search/.venv-3.6/lib/python3.6/site-packages/setuptools/dist.py", line 529, in finalize_options
ep.load()(self, ep.name, value)
File "/Users/twall/plex/rd/search/.venv-3.6/lib/python3.6/site-packages/setuptools_scm/integration.py", line 22, in version_keyword
dist.metadata.version = get_version(**value)
File "/Users/twall/plex/rd/search/.venv-3.6/lib/python3.6/site-packages/setuptools_scm/__init__.py", line 119, in get_version
parsed_version = _do_parse(root, parse)
File "/Users/twall/plex/rd/search/.venv-3.6/lib/python3.6/site-packages/setuptools_scm/__init__.py", line 83, in _do_parse
version = version_from_scm(root) or _version_from_entrypoint(
File "/Users/twall/plex/rd/search/.venv-3.6/lib/python3.6/site-packages/setuptools_scm/__init__.py", line 31, in version_from_scm
return _version_from_entrypoint(root, 'setuptools_scm.parse_scm')
File "/Users/twall/plex/rd/search/.venv-3.6/lib/python3.6/site-packages/setuptools_scm/__init__.py", line 36, in _version_from_entrypoint
version = ep.load()(root)
File "/Users/twall/plex/rd/search/.venv-3.6/lib/python3.6/site-packages/setuptools_scm/git.py", line 128, in parse
return meta(tag, node=node, dirty=dirty, branch=branch)
File "/Users/twall/plex/rd/search/.venv-3.6/lib/python3.6/site-packages/setuptools_scm/version.py", line 135, in meta
assert tag is not None, 'cant parse version %s' % tag
AssertionError: cant parse version None
|
AssertionError
|
def tag_to_version(tag):
"""
take a tag that might be prefixed with a keyword and return only the version part
"""
trace("tag", tag)
if "+" in tag:
warnings.warn("tag %r will be stripped of the local component" % tag)
tag = tag.split("+")[0]
# lstrip the v because of py2/py3 differences in setuptools
# also required for old versions of setuptools
prefix_match = TAG_PREFIX.match(tag)
if prefix_match is not None:
version = prefix_match.group(1)
else:
version = tag
trace("version pre parse", version)
if VERSION_CLASS is None:
return version
version = pkg_parse_version(version)
trace("version", repr(version))
if isinstance(version, VERSION_CLASS):
return version
|
def tag_to_version(tag):
trace("tag", tag)
if "+" in tag:
warnings.warn("tag %r will be stripped of the local component" % tag)
tag = tag.split("+")[0]
# lstrip the v because of py2/py3 differences in setuptools
# also required for old versions of setuptools
version = tag.rsplit("-", 1)[-1].lstrip("v")
if VERSION_CLASS is None:
return version
version = pkg_parse_version(version)
trace("version", repr(version))
if isinstance(version, VERSION_CLASS):
return version
|
https://github.com/pypa/setuptools_scm/issues/266
|
looking for ep setuptools_scm.parse_scm_fallback .
root '/Users/twall/plex/rd'
looking for ep setuptools_scm.parse_scm /Users/twall/plex/rd
found ep .git = setuptools_scm.git:parse
cmd 'git rev-parse --show-toplevel'
out b'/Users/twall/plex/rd\n'
real root /Users/twall/plex/rd
cmd 'git describe --dirty --tags --long --match *.*'
out b'3.3.1-rc26-0-g9df187b\n'
cmd 'git rev-parse --abbrev-ref HEAD'
out b'pyplex\n'
tag 3.3.1-rc26
version <LegacyVersion('rc26')>
version None
Traceback (most recent call last):
File "setup.py", line 46, in <module>
entry_points = {
File "/Users/twall/plex/rd/search/.venv-3.6/lib/python3.6/site-packages/setuptools/__init__.py", line 129, in setup
return distutils.core.setup(**attrs)
File "/anaconda3/lib/python3.6/distutils/core.py", line 108, in setup
_setup_distribution = dist = klass(attrs)
File "/Users/twall/plex/rd/search/.venv-3.6/lib/python3.6/site-packages/setuptools/dist.py", line 370, in __init__
k: v for k, v in attrs.items()
File "/anaconda3/lib/python3.6/distutils/dist.py", line 281, in __init__
self.finalize_options()
File "/Users/twall/plex/rd/search/.venv-3.6/lib/python3.6/site-packages/setuptools/dist.py", line 529, in finalize_options
ep.load()(self, ep.name, value)
File "/Users/twall/plex/rd/search/.venv-3.6/lib/python3.6/site-packages/setuptools_scm/integration.py", line 22, in version_keyword
dist.metadata.version = get_version(**value)
File "/Users/twall/plex/rd/search/.venv-3.6/lib/python3.6/site-packages/setuptools_scm/__init__.py", line 119, in get_version
parsed_version = _do_parse(root, parse)
File "/Users/twall/plex/rd/search/.venv-3.6/lib/python3.6/site-packages/setuptools_scm/__init__.py", line 83, in _do_parse
version = version_from_scm(root) or _version_from_entrypoint(
File "/Users/twall/plex/rd/search/.venv-3.6/lib/python3.6/site-packages/setuptools_scm/__init__.py", line 31, in version_from_scm
return _version_from_entrypoint(root, 'setuptools_scm.parse_scm')
File "/Users/twall/plex/rd/search/.venv-3.6/lib/python3.6/site-packages/setuptools_scm/__init__.py", line 36, in _version_from_entrypoint
version = ep.load()(root)
File "/Users/twall/plex/rd/search/.venv-3.6/lib/python3.6/site-packages/setuptools_scm/git.py", line 128, in parse
return meta(tag, node=node, dirty=dirty, branch=branch)
File "/Users/twall/plex/rd/search/.venv-3.6/lib/python3.6/site-packages/setuptools_scm/version.py", line 135, in meta
assert tag is not None, 'cant parse version %s' % tag
AssertionError: cant parse version None
|
AssertionError
|
def parse(root, describe_command=DEFAULT_DESCRIBE):
if not has_command("git"):
return
wd = GitWorkdir(root)
rev_node = wd.node()
dirty = wd.is_dirty()
if rev_node is None:
return meta("0.0", distance=0, dirty=dirty)
out, err, ret = do_ex(describe_command, root)
if ret:
return meta(
"0.0",
distance=wd.count_all_nodes(),
node=rev_node,
dirty=dirty,
)
tag, number, node = out.rsplit("-", 2)
number = int(number)
if number:
return meta(tag, distance=number, node=node, dirty=dirty)
else:
return meta(tag, node=node, dirty=dirty)
|
def parse(root, describe_command=DEFAULT_DESCRIBE):
wd = GitWorkdir(root)
rev_node = wd.node()
dirty = wd.is_dirty()
if rev_node is None:
return meta("0.0", distance=0, dirty=dirty)
out, err, ret = do_ex(describe_command, root)
if ret:
return meta(
"0.0",
distance=wd.count_all_nodes(),
node=rev_node,
dirty=dirty,
)
tag, number, node = out.rsplit("-", 2)
number = int(number)
if number:
return meta(tag, distance=number, node=node, dirty=dirty)
else:
return meta(tag, node=node, dirty=dirty)
|
https://github.com/pypa/setuptools_scm/issues/81
|
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/tmp/pip-qg_odikh-build/setup.py", line 234, in <module>
zip_safe=False)
File "/usr/lib/python3.5/distutils/core.py", line 108, in setup
_setup_distribution = dist = klass(attrs)
File "/appenv/lib/python3.5/site-packages/setuptools/dist.py", line 272, in __init__
_Distribution.__init__(self,attrs)
File "/usr/lib/python3.5/distutils/dist.py", line 281, in __init__
self.finalize_options()
File "/appenv/lib/python3.5/site-packages/setuptools/dist.py", line 327, in finalize_options
ep.load()(self, ep.name, value)
File "/tmp/pip-qg_odikh-build/.eggs/setuptools_scm-1.10.1-py3.5.egg/setuptools_scm/integration.py", line 19, in version_keyword
File "/tmp/pip-qg_odikh-build/.eggs/setuptools_scm-1.10.1-py3.5.egg/setuptools_scm/__init__.py", line 102, in get_version
File "/tmp/pip-qg_odikh-build/.eggs/setuptools_scm-1.10.1-py3.5.egg/setuptools_scm/__init__.py", line 69, in _do_parse
File "/tmp/pip-qg_odikh-build/.eggs/setuptools_scm-1.10.1-py3.5.egg/setuptools_scm/__init__.py", line 30, in version_from_scm
File "/tmp/pip-qg_odikh-build/.eggs/setuptools_scm-1.10.1-py3.5.egg/setuptools_scm/__init__.py", line 36, in _version_from_entrypoint
File "/tmp/pip-qg_odikh-build/.eggs/setuptools_scm-1.10.1-py3.5.egg/setuptools_scm/git.py", line 11, in parse
File "/tmp/pip-qg_odikh-build/.eggs/setuptools_scm-1.10.1-py3.5.egg/setuptools_scm/utils.py", line 58, in do_ex
File "/usr/lib/python3.5/subprocess.py", line 947, in __init__
restore_signals, start_new_session)
File "/usr/lib/python3.5/subprocess.py", line 1541, in _execute_child
raise child_exception_type(errno_num, err_msg)
FileNotFoundError: [Errno 2] No such file or directory: 'git'
|
FileNotFoundError
|
def parse(root):
if not has_command("hg"):
return
l = do("hg id -i -t", root).split()
node = l.pop(0)
tags = tags_to_versions(l)
# filter tip in degraded mode on old setuptools
tags = [x for x in tags if x != "tip"]
dirty = node[-1] == "+"
if tags:
return meta(tags[0], dirty=dirty)
if node.strip("+") == "0" * 12:
trace("initial node", root)
return meta("0.0", dirty=dirty)
# the newline is needed for merge stae, see issue 72
cmd = 'hg parents --template "{latesttag} {latesttagdistance}\n"'
out = do(cmd, root)
try:
# in merge state we assume parent 1 is fine
tags, dist = out.splitlines()[0].split()
# pick latest tag from tag list
tag = tags.split(":")[-1]
if tag == "null":
tag = "0.0"
dist = int(dist) + 1
return _hg_tagdist_normalize_tagcommit(root, tag, dist, node)
except ValueError:
pass # unpacking failed, old hg
|
def parse(root):
l = do("hg id -i -t", root).split()
node = l.pop(0)
tags = tags_to_versions(l)
# filter tip in degraded mode on old setuptools
tags = [x for x in tags if x != "tip"]
dirty = node[-1] == "+"
if tags:
return meta(tags[0], dirty=dirty)
if node.strip("+") == "0" * 12:
trace("initial node", root)
return meta("0.0", dirty=dirty)
# the newline is needed for merge stae, see issue 72
cmd = 'hg parents --template "{latesttag} {latesttagdistance}\n"'
out = do(cmd, root)
try:
# in merge state we assume parent 1 is fine
tags, dist = out.splitlines()[0].split()
# pick latest tag from tag list
tag = tags.split(":")[-1]
if tag == "null":
tag = "0.0"
dist = int(dist) + 1
return _hg_tagdist_normalize_tagcommit(root, tag, dist, node)
except ValueError:
pass # unpacking failed, old hg
|
https://github.com/pypa/setuptools_scm/issues/81
|
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/tmp/pip-qg_odikh-build/setup.py", line 234, in <module>
zip_safe=False)
File "/usr/lib/python3.5/distutils/core.py", line 108, in setup
_setup_distribution = dist = klass(attrs)
File "/appenv/lib/python3.5/site-packages/setuptools/dist.py", line 272, in __init__
_Distribution.__init__(self,attrs)
File "/usr/lib/python3.5/distutils/dist.py", line 281, in __init__
self.finalize_options()
File "/appenv/lib/python3.5/site-packages/setuptools/dist.py", line 327, in finalize_options
ep.load()(self, ep.name, value)
File "/tmp/pip-qg_odikh-build/.eggs/setuptools_scm-1.10.1-py3.5.egg/setuptools_scm/integration.py", line 19, in version_keyword
File "/tmp/pip-qg_odikh-build/.eggs/setuptools_scm-1.10.1-py3.5.egg/setuptools_scm/__init__.py", line 102, in get_version
File "/tmp/pip-qg_odikh-build/.eggs/setuptools_scm-1.10.1-py3.5.egg/setuptools_scm/__init__.py", line 69, in _do_parse
File "/tmp/pip-qg_odikh-build/.eggs/setuptools_scm-1.10.1-py3.5.egg/setuptools_scm/__init__.py", line 30, in version_from_scm
File "/tmp/pip-qg_odikh-build/.eggs/setuptools_scm-1.10.1-py3.5.egg/setuptools_scm/__init__.py", line 36, in _version_from_entrypoint
File "/tmp/pip-qg_odikh-build/.eggs/setuptools_scm-1.10.1-py3.5.egg/setuptools_scm/git.py", line 11, in parse
File "/tmp/pip-qg_odikh-build/.eggs/setuptools_scm-1.10.1-py3.5.egg/setuptools_scm/utils.py", line 58, in do_ex
File "/usr/lib/python3.5/subprocess.py", line 947, in __init__
restore_signals, start_new_session)
File "/usr/lib/python3.5/subprocess.py", line 1541, in _execute_child
raise child_exception_type(errno_num, err_msg)
FileNotFoundError: [Errno 2] No such file or directory: 'git'
|
FileNotFoundError
|
def do_ex(cmd, cwd="."):
trace("cmd", repr(cmd))
if not isinstance(cmd, (list, tuple)):
cmd = shlex.split(cmd)
p = _popen_pipes(cmd, cwd)
out, err = p.communicate()
if out:
trace("out", repr(out))
if err:
trace("err", repr(err))
if p.returncode:
trace("ret", p.returncode)
return ensure_stripped_str(out), ensure_stripped_str(err), p.returncode
|
def do_ex(cmd, cwd="."):
trace("cmd", repr(cmd))
if not isinstance(cmd, (list, tuple)):
cmd = shlex.split(cmd)
p = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=str(cwd),
env=_always_strings(
dict(
os.environ,
# try to disable i18n
LC_ALL="C",
LANGUAGE="",
HGPLAIN="1",
)
),
)
out, err = p.communicate()
if out:
trace("out", repr(out))
if err:
trace("err", repr(err))
if p.returncode:
trace("ret", p.returncode)
return ensure_stripped_str(out), ensure_stripped_str(err), p.returncode
|
https://github.com/pypa/setuptools_scm/issues/81
|
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/tmp/pip-qg_odikh-build/setup.py", line 234, in <module>
zip_safe=False)
File "/usr/lib/python3.5/distutils/core.py", line 108, in setup
_setup_distribution = dist = klass(attrs)
File "/appenv/lib/python3.5/site-packages/setuptools/dist.py", line 272, in __init__
_Distribution.__init__(self,attrs)
File "/usr/lib/python3.5/distutils/dist.py", line 281, in __init__
self.finalize_options()
File "/appenv/lib/python3.5/site-packages/setuptools/dist.py", line 327, in finalize_options
ep.load()(self, ep.name, value)
File "/tmp/pip-qg_odikh-build/.eggs/setuptools_scm-1.10.1-py3.5.egg/setuptools_scm/integration.py", line 19, in version_keyword
File "/tmp/pip-qg_odikh-build/.eggs/setuptools_scm-1.10.1-py3.5.egg/setuptools_scm/__init__.py", line 102, in get_version
File "/tmp/pip-qg_odikh-build/.eggs/setuptools_scm-1.10.1-py3.5.egg/setuptools_scm/__init__.py", line 69, in _do_parse
File "/tmp/pip-qg_odikh-build/.eggs/setuptools_scm-1.10.1-py3.5.egg/setuptools_scm/__init__.py", line 30, in version_from_scm
File "/tmp/pip-qg_odikh-build/.eggs/setuptools_scm-1.10.1-py3.5.egg/setuptools_scm/__init__.py", line 36, in _version_from_entrypoint
File "/tmp/pip-qg_odikh-build/.eggs/setuptools_scm-1.10.1-py3.5.egg/setuptools_scm/git.py", line 11, in parse
File "/tmp/pip-qg_odikh-build/.eggs/setuptools_scm-1.10.1-py3.5.egg/setuptools_scm/utils.py", line 58, in do_ex
File "/usr/lib/python3.5/subprocess.py", line 947, in __init__
restore_signals, start_new_session)
File "/usr/lib/python3.5/subprocess.py", line 1541, in _execute_child
raise child_exception_type(errno_num, err_msg)
FileNotFoundError: [Errno 2] No such file or directory: 'git'
|
FileNotFoundError
|
def parse(root, describe_command=DEFAULT_DESCRIBE):
real_root, _, ret = do_ex("git rev-parse --show-toplevel", root)
if ret:
return
trace("real root", real_root)
if normcase(abspath(realpath(real_root))) != normcase(abspath(realpath(root))):
return
rev_node, _, ret = do_ex("git rev-parse --verify --quiet HEAD", root)
if ret:
return meta("0.0")
rev_node = rev_node[:7]
out, err, ret = do_ex(describe_command, root)
if "-" not in out and "." not in out:
revs = do("git rev-list HEAD", root)
count = revs.count("\n")
if ret:
out = rev_node
return meta("0.0", distance=count + 1, node=out)
if ret:
return
dirty = out.endswith("-dirty")
if dirty:
out = out.rsplit("-", 1)[0]
tag, number, node = out.rsplit("-", 2)
number = int(number)
if number:
return meta(tag, distance=number, node=node, dirty=dirty)
else:
return meta(tag, dirty=dirty, node=node)
|
def parse(root, describe_command=DEFAULT_DESCRIBE):
real_root, _, ret = do_ex("git rev-parse --show-toplevel", root)
if ret:
return
trace("real root", real_root)
if abspath(realpath(real_root)) != abspath(realpath(root)):
return
rev_node, _, ret = do_ex("git rev-parse --verify --quiet HEAD", root)
if ret:
return meta("0.0")
rev_node = rev_node[:7]
out, err, ret = do_ex(describe_command, root)
if "-" not in out and "." not in out:
revs = do("git rev-list HEAD", root)
count = revs.count("\n")
if ret:
out = rev_node
return meta("0.0", distance=count + 1, node=out)
if ret:
return
dirty = out.endswith("-dirty")
if dirty:
out = out.rsplit("-", 1)[0]
tag, number, node = out.rsplit("-", 2)
number = int(number)
if number:
return meta(tag, distance=number, node=node, dirty=dirty)
else:
return meta(tag, dirty=dirty, node=node)
|
https://github.com/pypa/setuptools_scm/issues/89
|
(env35) C:\Users\scrut\code\phi\auth [develop +0 ~4 -0 | +0 ~16 -0 !]> dir
Directory: C:\Users\scrut\code\phi\auth
Mode LastWriteTime Length Name
---- ------------- ------ ----
d----- 6/12/2016 12:38 PM .git
d----- 6/12/2016 12:24 PM .tox
d----- 6/11/2016 9:02 PM doc
d----- 6/11/2016 9:41 PM src
d----- 6/11/2016 9:02 PM test
-a---- 6/11/2016 9:02 PM 395 .gitattributes
-a---- 6/12/2016 12:22 PM 1584 .gitignore
-a---- 6/11/2016 9:02 PM 116 .travis.yml
-a---- 6/11/2016 9:02 PM 1077 LICENSE.rst
-a---- 6/11/2016 9:02 PM 40 MANIFEST.in
-a---- 6/12/2016 12:09 PM 897 README.rst
-a---- 6/12/2016 12:38 PM 1452 setup.py
-a---- 6/11/2016 9:02 PM 610 tox.ini
(env35) C:\Users\scrut\code\phi\auth [develop +0 ~4 -0 | +0 ~16 -0 !]> python setup.py egg_info
Traceback (most recent call last):
File "setup.py", line 58, in <module>
entry_points={},
File "c:\users\scrut\appdata\local\programs\python\python35-32\Lib\distutils\core.py", line 108, in setup
_setup_distribution = dist = klass(attrs)
File "C:\Users\scrut\code\env35\lib\site-packages\setuptools\dist.py", line 272, in __init__
_Distribution.__init__(self,attrs)
File "c:\users\scrut\appdata\local\programs\python\python35-32\Lib\distutils\dist.py", line 281, in __init__
self.finalize_options()
File "C:\Users\scrut\code\env35\lib\site-packages\setuptools\dist.py", line 327, in finalize_options
ep.load()(self, ep.name, value)
File "c:\users\scrut\code\phi\auth\.eggs\setuptools_scm-1.11.0-py3.5.egg\setuptools_scm\integration.py", line 19, in version_keyword
File "c:\users\scrut\code\phi\auth\.eggs\setuptools_scm-1.11.0-py3.5.egg\setuptools_scm\__init__.py", line 104, in get_version
File "c:\users\scrut\code\phi\auth\.eggs\setuptools_scm-1.11.0-py3.5.egg\setuptools_scm\__init__.py", line 82, in _do_parse
LookupError: setuptools-scm was unable to detect version for 'C:\\Users\\scrut\\code\\phi\\auth'.
Make sure you're either building from a fully intact git repository or PyPI tarballs. Most other sources (such as GitHub's tarballs, a git checkout without the .git folder) don't contain the necessary metadata and will not work.
For example, if you're using pip, instead of https://github.com/user/proj/archive/master.zip use git+https://github.com/user/proj.git#egg=proj
|
LookupError
|
def parse(root):
l = do("hg id -i -t", root).split()
node = l.pop(0)
tags = tags_to_versions(l)
# filter tip in degraded mode on old setuptools
tags = [x for x in tags if x != "tip"]
dirty = node[-1] == "+"
if tags:
return meta(tags[0], dirty=dirty)
if node.strip("+") == "0" * 12:
trace("initial node", root)
return meta("0.0", dirty=dirty)
# the newline is needed for merge stae, see issue 72
cmd = 'hg parents --template "{latesttag} {latesttagdistance}\n"'
out = do(cmd, root)
try:
# in merge state we assume parent 1 is fine
tag, dist = out.splitlines()[0].split()
if tag == "null":
tag = "0.0"
dist = int(dist) + 1
return _hg_tagdist_normalize_tagcommit(root, tag, dist, node)
except ValueError:
pass # unpacking failed, old hg
|
def parse(root):
l = do("hg id -i -t", root).split()
node = l.pop(0)
tags = tags_to_versions(l)
# filter tip in degraded mode on old setuptools
tags = [x for x in tags if x != "tip"]
dirty = node[-1] == "+"
if tags:
return meta(tags[0], dirty=dirty)
if node.strip("+") == "0" * 12:
trace("initial node", root)
return meta("0.0", dirty=dirty)
cmd = 'hg parents --template "{latesttag} {latesttagdistance}"'
out = do(cmd, root)
try:
tag, dist = out.split()
if tag == "null":
tag = "0.0"
dist = int(dist) + 1
return _hg_tagdist_normalize_tagcommit(root, tag, dist, node)
except ValueError:
pass # unpacking failed, old hg
|
https://github.com/pypa/setuptools_scm/issues/72
|
$ python setup.py test
Traceback (most recent call last):
File "setup.py", line 61, in <module>
setuptools.setup(**setup_params)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/distutils/core.py", line 108, in setup
_setup_distribution = dist = klass(attrs)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/setuptools/dist.py", line 272, in __init__
_Distribution.__init__(self,attrs)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/distutils/dist.py", line 281, in __init__
self.finalize_options()
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/site-packages/setuptools/dist.py", line 327, in finalize_options
ep.load()(self, ep.name, value)
File "/Users/jaraco/Dropbox/code/yg/support/yg.jenkins/.eggs/setuptools_scm-1.10.0-py3.5.egg/setuptools_scm/integration.py", line 19, in version_keyword
File "/Users/jaraco/Dropbox/code/yg/support/yg.jenkins/.eggs/setuptools_scm-1.10.0-py3.5.egg/setuptools_scm/__init__.py", line 102, in get_version
File "/Users/jaraco/Dropbox/code/yg/support/yg.jenkins/.eggs/setuptools_scm-1.10.0-py3.5.egg/setuptools_scm/__init__.py", line 80, in _do_parse
LookupError: setuptools-scm was unable to detect version for '/Users/jaraco/Dropbox/code/yg/support/yg.jenkins'.
Make sure you're not using GitHub's tarballs (or similar ones), as those don't contain the necessary metadata. Use PyPI's tarballs instead.
For example, if you're using pip, instead of https://github.com/user/proj/archive/master.zip use git+https://github.com/user/proj.git#egg=proj
|
LookupError
|
def load(
cls,
path: Union[str, pathlib.Path, io.BufferedIOBase],
env: Optional[GymEnv] = None,
device: Union[th.device, str] = "auto",
**kwargs,
) -> "BaseAlgorithm":
"""
Load the model from a zip-file
:param path: path to the file (or a file-like) where to
load the agent from
:param env: the new environment to run the loaded model on
(can be None if you only need prediction from a trained model) has priority over any saved environment
:param device: Device on which the code should run.
:param kwargs: extra arguments to change the model when loading
"""
data, params, pytorch_variables = load_from_zip_file(path, device=device)
# Remove stored device information and replace with ours
if "policy_kwargs" in data:
if "device" in data["policy_kwargs"]:
del data["policy_kwargs"]["device"]
if "policy_kwargs" in kwargs and kwargs["policy_kwargs"] != data["policy_kwargs"]:
raise ValueError(
f"The specified policy kwargs do not equal the stored policy kwargs."
f"Stored kwargs: {data['policy_kwargs']}, specified kwargs: {kwargs['policy_kwargs']}"
)
if "observation_space" not in data or "action_space" not in data:
raise KeyError(
"The observation_space and action_space were not given, can't verify new environments"
)
if env is not None:
# Wrap first if needed
env = cls._wrap_env(env, data["verbose"])
# Check if given env is valid
check_for_correct_spaces(env, data["observation_space"], data["action_space"])
else:
# Use stored env, if one exists. If not, continue as is (can be used for predict)
if "env" in data:
env = data["env"]
# noinspection PyArgumentList
model = cls(
policy=data["policy_class"],
env=env,
device=device,
_init_setup_model=False, # pytype: disable=not-instantiable,wrong-keyword-args
)
# load parameters
model.__dict__.update(data)
model.__dict__.update(kwargs)
model._setup_model()
# put state_dicts back in place
model.set_parameters(params, exact_match=True, device=device)
# put other pytorch variables back in place
if pytorch_variables is not None:
for name in pytorch_variables:
recursive_setattr(model, name, pytorch_variables[name])
# Sample gSDE exploration matrix, so it uses the right device
# see issue #44
if model.use_sde:
model.policy.reset_noise() # pytype: disable=attribute-error
return model
|
def load(
cls,
path: Union[str, pathlib.Path, io.BufferedIOBase],
env: Optional[GymEnv] = None,
device: Union[th.device, str] = "auto",
**kwargs,
) -> "BaseAlgorithm":
"""
Load the model from a zip-file
:param path: path to the file (or a file-like) where to
load the agent from
:param env: the new environment to run the loaded model on
(can be None if you only need prediction from a trained model) has priority over any saved environment
:param device: Device on which the code should run.
:param kwargs: extra arguments to change the model when loading
"""
data, params, pytorch_variables = load_from_zip_file(path, device=device)
# Remove stored device information and replace with ours
if "policy_kwargs" in data:
if "device" in data["policy_kwargs"]:
del data["policy_kwargs"]["device"]
if "policy_kwargs" in kwargs and kwargs["policy_kwargs"] != data["policy_kwargs"]:
raise ValueError(
f"The specified policy kwargs do not equal the stored policy kwargs."
f"Stored kwargs: {data['policy_kwargs']}, specified kwargs: {kwargs['policy_kwargs']}"
)
if "observation_space" not in data or "action_space" not in data:
raise KeyError(
"The observation_space and action_space were not given, can't verify new environments"
)
if env is not None:
# Wrap first if needed
cls._wrap_env(env, data["verbose"])
# Check if given env is valid
check_for_correct_spaces(env, data["observation_space"], data["action_space"])
else:
# Use stored env, if one exists. If not, continue as is (can be used for predict)
if "env" in data:
env = data["env"]
# noinspection PyArgumentList
model = cls(
policy=data["policy_class"],
env=env,
device=device,
_init_setup_model=False, # pytype: disable=not-instantiable,wrong-keyword-args
)
# load parameters
model.__dict__.update(data)
model.__dict__.update(kwargs)
model._setup_model()
# put state_dicts back in place
model.set_parameters(params, exact_match=True, device=device)
# put other pytorch variables back in place
if pytorch_variables is not None:
for name in pytorch_variables:
recursive_setattr(model, name, pytorch_variables[name])
# Sample gSDE exploration matrix, so it uses the right device
# see issue #44
if model.use_sde:
model.policy.reset_noise() # pytype: disable=attribute-error
return model
|
https://github.com/DLR-RM/stable-baselines3/issues/202
|
Box(36, 36, 4)
Box(4, 36, 36)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-176-a54409b23a82> in <module>
1 model.save("custom_env")
----> 2 model = PPO.load("custom_env", custom_env)
/opt/conda/lib/python3.7/site-packages/stable_baselines3/common/base_class.py in load(cls, path, env, device, **kwargs)
595 cls._wrap_env(env, data["verbose"])
596 # Check if given env is valid
--> 597 check_for_correct_spaces(env, data["observation_space"], data["action_space"])
598 else:
599 # Use stored env, if one exists. If not, continue as is (can be used for predict)
/opt/conda/lib/python3.7/site-packages/stable_baselines3/common/utils.py in check_for_correct_spaces(env, observation_space, action_space)
204 """
205 if observation_space != env.observation_space:
--> 206 raise ValueError(f"Observation spaces do not match: {observation_space} != {env.observation_space}")
207 if action_space != env.action_space:
208 raise ValueError(f"Action spaces do not match: {action_space} != {env.action_space}")
ValueError: Observation spaces do not match: Box(4, 36, 36) != Box(36, 36, 4)
|
ValueError
|
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
formats = [
("auto", "(default) detect file type automatically"),
("pe", "Windows PE file"),
("sc32", "32-bit shellcode"),
("sc64", "64-bit shellcode"),
("freeze", "features previously frozen by capa"),
]
format_help = ", ".join(["%s: %s" % (f[0], f[1]) for f in formats])
desc = "The FLARE team's open-source tool to identify capabilities in executable files."
epilog = textwrap.dedent(
"""
By default, capa uses a default set of embedded rules.
You can see the rule set here:
https://github.com/fireeye/capa-rules
To provide your own rule set, use the `-r` flag:
capa --rules /path/to/rules suspicious.exe
capa -r /path/to/rules suspicious.exe
examples:
identify capabilities in a binary
capa suspicious.exe
identify capabilities in 32-bit shellcode, see `-f` for all supported formats
capa -f sc32 shellcode.bin
report match locations
capa -v suspicious.exe
report all feature match details
capa -vv suspicious.exe
filter rules by meta fields, e.g. rule name or namespace
capa -t "create TCP socket" suspicious.exe
"""
)
parser = argparse.ArgumentParser(
description=desc,
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
# in #328 we noticed that the sample path is not handled correctly if it contains non-ASCII characters
# https://stackoverflow.com/a/22947334/ offers a solution and decoding using getfilesystemencoding works
# in our testing, however other sources suggest `sys.stdin.encoding` (https://stackoverflow.com/q/4012571/)
"sample",
type=lambda s: s.decode(sys.getfilesystemencoding()),
help="path to sample to analyze",
)
parser.add_argument(
"--version",
action="version",
version="%(prog)s {:s}".format(capa.version.__version__),
)
parser.add_argument(
"-r",
"--rules",
type=str,
default=RULES_PATH_DEFAULT_STRING,
help="path to rule file or directory, use embedded rules by default",
)
parser.add_argument(
"-f",
"--format",
choices=[f[0] for f in formats],
default="auto",
help="select sample format, %s" % format_help,
)
parser.add_argument(
"-t", "--tag", type=str, help="filter on rule meta field values"
)
parser.add_argument(
"-j", "--json", action="store_true", help="emit JSON instead of text"
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="enable verbose result document (no effect with --json)",
)
parser.add_argument(
"-vv",
"--vverbose",
action="store_true",
help="enable very verbose result document (no effect with --json)",
)
parser.add_argument(
"-d", "--debug", action="store_true", help="enable debugging output on STDERR"
)
parser.add_argument(
"-q", "--quiet", action="store_true", help="disable all output but errors"
)
parser.add_argument(
"--color",
type=str,
choices=("auto", "always", "never"),
default="auto",
help="enable ANSI color codes in results, default: only during interactive session",
)
args = parser.parse_args(args=argv)
if args.quiet:
logging.basicConfig(level=logging.WARNING)
logging.getLogger().setLevel(logging.WARNING)
elif args.debug:
logging.basicConfig(level=logging.DEBUG)
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
logging.getLogger().setLevel(logging.INFO)
# disable vivisect-related logging, it's verbose and not relevant for capa users
set_vivisect_log_level(logging.CRITICAL)
try:
taste = get_file_taste(args.sample)
except IOError as e:
# per our research there's not a programmatic way to render the IOError with non-ASCII filename unless we
# handle the IOError separately and reach into the args
logger.error("%s", e.args[0])
return -1
# py2 doesn't know about cp65001, which is a variant of utf-8 on windows
# tqdm bails when trying to render the progress bar in this setup.
# because cp65001 is utf-8, we just map that codepage to the utf-8 codec.
# see #380 and: https://stackoverflow.com/a/3259271/87207
import codecs
codecs.register(lambda name: codecs.lookup("utf-8") if name == "cp65001" else None)
if args.rules == RULES_PATH_DEFAULT_STRING:
logger.debug("-" * 80)
logger.debug(" Using default embedded rules.")
logger.debug(
" To provide your own rules, use the form `capa.exe -r ./path/to/rules/ /path/to/mal.exe`."
)
logger.debug(" You can see the current default rule set here:")
logger.debug(" https://github.com/fireeye/capa-rules")
logger.debug("-" * 80)
if hasattr(sys, "frozen") and hasattr(sys, "_MEIPASS"):
logger.debug("detected running under PyInstaller")
rules_path = os.path.join(sys._MEIPASS, "rules")
logger.debug("default rule path (PyInstaller method): %s", rules_path)
else:
logger.debug("detected running from source")
rules_path = os.path.join(os.path.dirname(__file__), "..", "rules")
logger.debug("default rule path (source method): %s", rules_path)
if not os.path.exists(rules_path):
# when a users installs capa via pip,
# this pulls down just the source code - not the default rules.
# i'm not sure the default rules should even be written to the library directory,
# so in this case, we require the user to use -r to specify the rule directory.
logger.error(
"default embedded rules not found! (maybe you installed capa as a library?)"
)
logger.error("provide your own rule set via the `-r` option.")
return -1
else:
rules_path = args.rules
logger.debug("using rules path: %s", rules_path)
try:
rules = get_rules(rules_path, disable_progress=args.quiet)
rules = capa.rules.RuleSet(rules)
logger.debug(
"successfully loaded %s rules",
# during the load of the RuleSet, we extract subscope statements into their own rules
# that are subsequently `match`ed upon. this inflates the total rule count.
# so, filter out the subscope rules when reporting total number of loaded rules.
len(
filter(
lambda r: "capa/subscope-rule" not in r.meta, rules.rules.values()
)
),
)
if args.tag:
rules = rules.filter_rules_by_meta(args.tag)
logger.debug("selected %s rules", len(rules))
for i, r in enumerate(rules.rules, 1):
# TODO don't display subscope rules?
logger.debug(" %d. %s", i, r)
except (IOError, capa.rules.InvalidRule, capa.rules.InvalidRuleSet) as e:
logger.error("%s", str(e))
return -1
if (args.format == "freeze") or (
args.format == "auto" and capa.features.freeze.is_freeze(taste)
):
format = "freeze"
with open(args.sample, "rb") as f:
extractor = capa.features.freeze.load(f.read())
else:
format = args.format
try:
extractor = get_extractor(
args.sample, args.format, disable_progress=args.quiet
)
except UnsupportedFormatError:
logger.error("-" * 80)
logger.error(" Input file does not appear to be a PE file.")
logger.error(" ")
logger.error(
" capa currently only supports analyzing PE files (or shellcode, when using --format sc32|sc64)."
)
logger.error(
" If you don't know the input file type, you can try using the `file` utility to guess it."
)
logger.error("-" * 80)
return -1
except UnsupportedRuntimeError:
logger.error("-" * 80)
logger.error(" Unsupported runtime or Python interpreter.")
logger.error(" ")
logger.error(
" capa supports running under Python 2.7 using Vivisect for binary analysis."
)
logger.error(
" It can also run within IDA Pro, using either Python 2.7 or 3.5+."
)
logger.error(" ")
logger.error(
" If you're seeing this message on the command line, please ensure you're running Python 2.7."
)
logger.error("-" * 80)
return -1
meta = collect_metadata(argv, args.sample, args.rules, format, extractor)
capabilities, counts = find_capabilities(
rules, extractor, disable_progress=args.quiet
)
meta["analysis"].update(counts)
if has_file_limitation(rules, capabilities):
# bail if capa encountered file limitation e.g. a packed binary
# do show the output in verbose mode, though.
if not (args.verbose or args.vverbose or args.json):
return -1
if args.color == "always":
colorama.init(strip=False)
elif args.color == "auto":
# colorama will detect:
# - when on Windows console, and fixup coloring, and
# - when not an interactive session, and disable coloring
# renderers should use coloring and assume it will be stripped out if necessary.
colorama.init()
elif args.color == "never":
colorama.init(strip=True)
else:
raise RuntimeError("unexpected --color value: " + args.color)
if args.json:
print(capa.render.render_json(meta, rules, capabilities))
elif args.vverbose:
print(capa.render.render_vverbose(meta, rules, capabilities))
elif args.verbose:
print(capa.render.render_verbose(meta, rules, capabilities))
else:
print(capa.render.render_default(meta, rules, capabilities))
colorama.deinit()
logger.debug("done.")
return 0
|
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
formats = [
("auto", "(default) detect file type automatically"),
("pe", "Windows PE file"),
("sc32", "32-bit shellcode"),
("sc64", "64-bit shellcode"),
("freeze", "features previously frozen by capa"),
]
format_help = ", ".join(["%s: %s" % (f[0], f[1]) for f in formats])
desc = "The FLARE team's open-source tool to identify capabilities in executable files."
epilog = textwrap.dedent(
"""
By default, capa uses a default set of embedded rules.
You can see the rule set here:
https://github.com/fireeye/capa-rules
To provide your own rule set, use the `-r` flag:
capa --rules /path/to/rules suspicious.exe
capa -r /path/to/rules suspicious.exe
examples:
identify capabilities in a binary
capa suspicious.exe
identify capabilities in 32-bit shellcode, see `-f` for all supported formats
capa -f sc32 shellcode.bin
report match locations
capa -v suspicious.exe
report all feature match details
capa -vv suspicious.exe
filter rules by meta fields, e.g. rule name or namespace
capa -t "create TCP socket" suspicious.exe
"""
)
parser = argparse.ArgumentParser(
description=desc,
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("sample", type=str, help="path to sample to analyze")
parser.add_argument(
"--version",
action="version",
version="%(prog)s {:s}".format(capa.version.__version__),
)
parser.add_argument(
"-r",
"--rules",
type=str,
default=RULES_PATH_DEFAULT_STRING,
help="path to rule file or directory, use embedded rules by default",
)
parser.add_argument(
"-f",
"--format",
choices=[f[0] for f in formats],
default="auto",
help="select sample format, %s" % format_help,
)
parser.add_argument(
"-t", "--tag", type=str, help="filter on rule meta field values"
)
parser.add_argument(
"-j", "--json", action="store_true", help="emit JSON instead of text"
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="enable verbose result document (no effect with --json)",
)
parser.add_argument(
"-vv",
"--vverbose",
action="store_true",
help="enable very verbose result document (no effect with --json)",
)
parser.add_argument(
"-d", "--debug", action="store_true", help="enable debugging output on STDERR"
)
parser.add_argument(
"-q", "--quiet", action="store_true", help="disable all output but errors"
)
parser.add_argument(
"--color",
type=str,
choices=("auto", "always", "never"),
default="auto",
help="enable ANSI color codes in results, default: only during interactive session",
)
args = parser.parse_args(args=argv)
if args.quiet:
logging.basicConfig(level=logging.WARNING)
logging.getLogger().setLevel(logging.WARNING)
elif args.debug:
logging.basicConfig(level=logging.DEBUG)
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
logging.getLogger().setLevel(logging.INFO)
# disable vivisect-related logging, it's verbose and not relevant for capa users
set_vivisect_log_level(logging.CRITICAL)
try:
taste = get_file_taste(args.sample)
except IOError as e:
logger.error("%s", str(e))
return -1
# py2 doesn't know about cp65001, which is a variant of utf-8 on windows
# tqdm bails when trying to render the progress bar in this setup.
# because cp65001 is utf-8, we just map that codepage to the utf-8 codec.
# see #380 and: https://stackoverflow.com/a/3259271/87207
import codecs
codecs.register(lambda name: codecs.lookup("utf-8") if name == "cp65001" else None)
if args.rules == RULES_PATH_DEFAULT_STRING:
logger.debug("-" * 80)
logger.debug(" Using default embedded rules.")
logger.debug(
" To provide your own rules, use the form `capa.exe -r ./path/to/rules/ /path/to/mal.exe`."
)
logger.debug(" You can see the current default rule set here:")
logger.debug(" https://github.com/fireeye/capa-rules")
logger.debug("-" * 80)
if hasattr(sys, "frozen") and hasattr(sys, "_MEIPASS"):
logger.debug("detected running under PyInstaller")
rules_path = os.path.join(sys._MEIPASS, "rules")
logger.debug("default rule path (PyInstaller method): %s", rules_path)
else:
logger.debug("detected running from source")
rules_path = os.path.join(os.path.dirname(__file__), "..", "rules")
logger.debug("default rule path (source method): %s", rules_path)
if not os.path.exists(rules_path):
# when a users installs capa via pip,
# this pulls down just the source code - not the default rules.
# i'm not sure the default rules should even be written to the library directory,
# so in this case, we require the user to use -r to specify the rule directory.
logger.error(
"default embedded rules not found! (maybe you installed capa as a library?)"
)
logger.error("provide your own rule set via the `-r` option.")
return -1
else:
rules_path = args.rules
logger.debug("using rules path: %s", rules_path)
try:
rules = get_rules(rules_path, disable_progress=args.quiet)
rules = capa.rules.RuleSet(rules)
logger.debug(
"successfully loaded %s rules",
# during the load of the RuleSet, we extract subscope statements into their own rules
# that are subsequently `match`ed upon. this inflates the total rule count.
# so, filter out the subscope rules when reporting total number of loaded rules.
len(
filter(
lambda r: "capa/subscope-rule" not in r.meta, rules.rules.values()
)
),
)
if args.tag:
rules = rules.filter_rules_by_meta(args.tag)
logger.debug("selected %s rules", len(rules))
for i, r in enumerate(rules.rules, 1):
# TODO don't display subscope rules?
logger.debug(" %d. %s", i, r)
except (IOError, capa.rules.InvalidRule, capa.rules.InvalidRuleSet) as e:
logger.error("%s", str(e))
return -1
if (args.format == "freeze") or (
args.format == "auto" and capa.features.freeze.is_freeze(taste)
):
format = "freeze"
with open(args.sample, "rb") as f:
extractor = capa.features.freeze.load(f.read())
else:
format = args.format
try:
extractor = get_extractor(
args.sample, args.format, disable_progress=args.quiet
)
except UnsupportedFormatError:
logger.error("-" * 80)
logger.error(" Input file does not appear to be a PE file.")
logger.error(" ")
logger.error(
" capa currently only supports analyzing PE files (or shellcode, when using --format sc32|sc64)."
)
logger.error(
" If you don't know the input file type, you can try using the `file` utility to guess it."
)
logger.error("-" * 80)
return -1
except UnsupportedRuntimeError:
logger.error("-" * 80)
logger.error(" Unsupported runtime or Python interpreter.")
logger.error(" ")
logger.error(
" capa supports running under Python 2.7 using Vivisect for binary analysis."
)
logger.error(
" It can also run within IDA Pro, using either Python 2.7 or 3.5+."
)
logger.error(" ")
logger.error(
" If you're seeing this message on the command line, please ensure you're running Python 2.7."
)
logger.error("-" * 80)
return -1
meta = collect_metadata(argv, args.sample, args.rules, format, extractor)
capabilities, counts = find_capabilities(
rules, extractor, disable_progress=args.quiet
)
meta["analysis"].update(counts)
if has_file_limitation(rules, capabilities):
# bail if capa encountered file limitation e.g. a packed binary
# do show the output in verbose mode, though.
if not (args.verbose or args.vverbose or args.json):
return -1
if args.color == "always":
colorama.init(strip=False)
elif args.color == "auto":
# colorama will detect:
# - when on Windows console, and fixup coloring, and
# - when not an interactive session, and disable coloring
# renderers should use coloring and assume it will be stripped out if necessary.
colorama.init()
elif args.color == "never":
colorama.init(strip=True)
else:
raise RuntimeError("unexpected --color value: " + args.color)
if args.json:
print(capa.render.render_json(meta, rules, capabilities))
elif args.vverbose:
print(capa.render.render_vverbose(meta, rules, capabilities))
elif args.verbose:
print(capa.render.render_verbose(meta, rules, capabilities))
else:
print(capa.render.render_default(meta, rules, capabilities))
colorama.deinit()
logger.debug("done.")
return 0
|
https://github.com/fireeye/capa/issues/328
|
./capa ../temp/一202009253623543164364364534.exe
loading : 100%|██████████████████████████████████████████████████████████████████████████████████████| 345/345 [00:02<00:00, 160.84 rules/s]
matching: 100%|█████████████████████████████████████████████████████████████████████████████████████| 1592/1592 [00:28<00:00, 54.95 functions/s]
Traceback (most recent call last):
File "capa/main.py", line 675, in <module>
File "capa/main.py", line 610, in main
File "capa/render/__init__.py", line 245, in render_default
File "capa/render/default.py", line 167, in render_default
File "capa/render/default.py", line 35, in render_meta
File "site-packages/tabulate.py", line 1450, in tabulate
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe4 in position 8: ordinal not in range(128)
|
UnicodeDecodeError
|
def slot_custom_context_menu_requested(self, pos):
"""slot connected to custom context menu request
displays custom context menu to user containing action relevant to the item selected
@param pos: cursor position
"""
model_index = self.indexAt(pos)
if not model_index.isValid():
return
item = self.map_index_to_source_item(model_index)
column = model_index.column()
menu = None
if CapaExplorerDataModel.COLUMN_INDEX_RULE_INFORMATION == column and isinstance(
item, CapaExplorerFunctionItem
):
# user hovered function item
menu = self.load_function_item_context_menu(pos, item, model_index)
else:
# user hovered default item
menu = self.load_default_context_menu(pos, item, model_index)
# show custom context menu at view position
self.show_custom_context_menu(menu, pos)
|
def slot_custom_context_menu_requested(self, pos):
"""slot connected to custom context menu request
displays custom context menu to user containing action relevant to the item selected
@param pos: cursor position
"""
model_index = self.indexAt(pos)
item = self.map_index_to_source_item(model_index)
column = model_index.column()
menu = None
if CapaExplorerDataModel.COLUMN_INDEX_RULE_INFORMATION == column and isinstance(
item, CapaExplorerFunctionItem
):
# user hovered function item
menu = self.load_function_item_context_menu(pos, item, model_index)
else:
# user hovered default item
menu = self.load_default_context_menu(pos, item, model_index)
# show custom context menu at view position
self.show_custom_context_menu(menu, pos)
|
https://github.com/fireeye/capa/issues/315
|
Traceback (most recent call last):
File "c:\users\steve\source\repos\capa\capa\ida\plugin\view.py", line 257, in slot_custom_context_menu_requested
item = self.map_index_to_source_item(model_index)
File "c:\users\steve\source\repos\capa\capa\ida\plugin\view.py", line 104, in map_index_to_source_item
raise ValueError("invalid index")
ValueError: invalid index
|
ValueError
|
def get_shellcode_vw(sample, arch="auto", should_save=True):
"""
Return shellcode workspace using explicit arch or via auto detect
"""
import viv_utils
with open(sample, "rb") as f:
sample_bytes = f.read()
if arch == "auto":
# choose arch with most functions, idea by Jay G.
vw_cands = []
for arch in ["i386", "amd64"]:
vw_cands.append(
viv_utils.getShellcodeWorkspace(
sample_bytes, arch, base=SHELLCODE_BASE, should_save=should_save
)
)
if not vw_cands:
raise ValueError("could not generate vivisect workspace")
vw = max(vw_cands, key=lambda vw: len(vw.getFunctions()))
else:
vw = viv_utils.getShellcodeWorkspace(
sample_bytes, arch, base=SHELLCODE_BASE, should_save=should_save
)
vw.setMeta("StorageName", "%s.viv" % sample)
return vw
|
def get_shellcode_vw(sample, arch="auto"):
"""
Return shellcode workspace using explicit arch or via auto detect
"""
import viv_utils
with open(sample, "rb") as f:
sample_bytes = f.read()
if arch == "auto":
# choose arch with most functions, idea by Jay G.
vw_cands = []
for arch in ["i386", "amd64"]:
vw_cands.append(
viv_utils.getShellcodeWorkspace(sample_bytes, arch, base=SHELLCODE_BASE)
)
if not vw_cands:
raise ValueError("could not generate vivisect workspace")
vw = max(vw_cands, key=lambda vw: len(vw.getFunctions()))
else:
vw = viv_utils.getShellcodeWorkspace(sample_bytes, arch, base=SHELLCODE_BASE)
return vw
|
https://github.com/fireeye/capa/issues/168
|
C:\tools\capa>capa-v1.0.0-win.exe -r capa-rules-master c:\windows\notepad.exe
WARNING:capa:skipping non-.yml file: LICENSE.txt
Unwind Info Version: 2 (bailing on .pdata)
Traceback (most recent call last):
File "capa\main.py", line 646, in <module>
File "capa\main.py", line 532, in main
File "capa\main.py", line 286, in get_extractor
File "capa\main.py", line 266, in get_extractor_py2
File "capa\main.py", line 252, in get_workspace
File "site-packages\viv_utils\__init__.py", line 86, in getWorkspace
File "site-packages\vivisect\__init__.py", line 2345, in saveWorkspace
File "site-packages\vivisect\storage\basicfile.py", line 15, in saveWorkspace
File "site-packages\vivisect\storage\basicfile.py", line 24, in vivEventsToFile
IOError: [Errno 13] Permission denied: 'c:\\windows\\notepad.exe.viv'
[2616] Failed to execute script main
|
IOError
|
def get_workspace(path, format, should_save=True):
import viv_utils
logger.debug("generating vivisect workspace for: %s", path)
if format == "auto":
if not is_supported_file_type(path):
raise UnsupportedFormatError()
vw = viv_utils.getWorkspace(path, should_save=should_save)
elif format == "pe":
vw = viv_utils.getWorkspace(path, should_save=should_save)
elif format == "sc32":
vw = get_shellcode_vw(path, arch="i386", should_save=should_save)
elif format == "sc64":
vw = get_shellcode_vw(path, arch="amd64", should_save=should_save)
logger.debug("%s", get_meta_str(vw))
return vw
|
def get_workspace(path, format):
import viv_utils
logger.debug("generating vivisect workspace for: %s", path)
if format == "auto":
if not is_supported_file_type(path):
raise UnsupportedFormatError()
vw = viv_utils.getWorkspace(path)
elif format == "pe":
vw = viv_utils.getWorkspace(path)
elif format == "sc32":
vw = get_shellcode_vw(path, arch="i386")
elif format == "sc64":
vw = get_shellcode_vw(path, arch="amd64")
logger.debug("%s", get_meta_str(vw))
return vw
|
https://github.com/fireeye/capa/issues/168
|
C:\tools\capa>capa-v1.0.0-win.exe -r capa-rules-master c:\windows\notepad.exe
WARNING:capa:skipping non-.yml file: LICENSE.txt
Unwind Info Version: 2 (bailing on .pdata)
Traceback (most recent call last):
File "capa\main.py", line 646, in <module>
File "capa\main.py", line 532, in main
File "capa\main.py", line 286, in get_extractor
File "capa\main.py", line 266, in get_extractor_py2
File "capa\main.py", line 252, in get_workspace
File "site-packages\viv_utils\__init__.py", line 86, in getWorkspace
File "site-packages\vivisect\__init__.py", line 2345, in saveWorkspace
File "site-packages\vivisect\storage\basicfile.py", line 15, in saveWorkspace
File "site-packages\vivisect\storage\basicfile.py", line 24, in vivEventsToFile
IOError: [Errno 13] Permission denied: 'c:\\windows\\notepad.exe.viv'
[2616] Failed to execute script main
|
IOError
|
def get_extractor_py2(path, format):
import capa.features.extractors.viv
vw = get_workspace(path, format, should_save=False)
try:
vw.saveWorkspace()
except IOError:
# see #168 for discussion around how to handle non-writable directories
logger.info(
"source directory is not writable, won't save intermediate workspace"
)
return capa.features.extractors.viv.VivisectFeatureExtractor(vw, path)
|
def get_extractor_py2(path, format):
import capa.features.extractors.viv
vw = get_workspace(path, format)
return capa.features.extractors.viv.VivisectFeatureExtractor(vw, path)
|
https://github.com/fireeye/capa/issues/168
|
C:\tools\capa>capa-v1.0.0-win.exe -r capa-rules-master c:\windows\notepad.exe
WARNING:capa:skipping non-.yml file: LICENSE.txt
Unwind Info Version: 2 (bailing on .pdata)
Traceback (most recent call last):
File "capa\main.py", line 646, in <module>
File "capa\main.py", line 532, in main
File "capa\main.py", line 286, in get_extractor
File "capa\main.py", line 266, in get_extractor_py2
File "capa\main.py", line 252, in get_workspace
File "site-packages\viv_utils\__init__.py", line 86, in getWorkspace
File "site-packages\vivisect\__init__.py", line 2345, in saveWorkspace
File "site-packages\vivisect\storage\basicfile.py", line 15, in saveWorkspace
File "site-packages\vivisect\storage\basicfile.py", line 24, in vivEventsToFile
IOError: [Errno 13] Permission denied: 'c:\\windows\\notepad.exe.viv'
[2616] Failed to execute script main
|
IOError
|
def render_statement(ostream, match, statement, indent=0):
ostream.write(" " * indent)
if statement["type"] in ("and", "or", "optional"):
ostream.write(statement["type"])
ostream.writeln(":")
elif statement["type"] == "not":
# this statement is handled specially in `render_match` using the MODE_SUCCESS/MODE_FAILURE flags.
ostream.writeln("not:")
elif statement["type"] == "some":
ostream.write("%d or more" % (statement["count"]))
ostream.writeln(":")
elif statement["type"] == "range":
# `range` is a weird node, its almost a hybrid of statement+feature.
# it is a specific feature repeated multiple times.
# there's no additional logic in the feature part, just the existence of a feature.
# so, we have to inline some of the feature rendering here.
child = statement["child"]
if child[child["type"]]:
value = rutils.bold2(child[child["type"]])
if child.get("description"):
ostream.write(
"count(%s(%s = %s)): "
% (child["type"], value, child["description"])
)
else:
ostream.write("count(%s(%s)): " % (child["type"], value))
else:
ostream.write("count(%s): " % child["type"])
if statement["max"] == statement["min"]:
ostream.write("%d" % (statement["min"]))
elif statement["min"] == 0:
ostream.write("%d or fewer" % (statement["max"]))
elif statement["max"] == (1 << 64 - 1):
ostream.write("%d or more" % (statement["min"]))
else:
ostream.write("between %d and %d" % (statement["min"], statement["max"]))
render_locations(ostream, match)
ostream.write("\n")
elif statement["type"] == "subscope":
ostream.write(statement["subscope"])
ostream.writeln(":")
else:
raise RuntimeError("unexpected match statement type: " + str(statement))
|
def render_statement(ostream, match, statement, indent=0):
ostream.write(" " * indent)
if statement["type"] in ("and", "or", "optional"):
ostream.write(statement["type"])
ostream.writeln(":")
elif statement["type"] == "not":
# this statement is handled specially in `render_match` using the MODE_SUCCESS/MODE_FAILURE flags.
ostream.writeln("not:")
elif statement["type"] == "some":
ostream.write(statement["count"] + " or more")
ostream.writeln(":")
elif statement["type"] == "range":
# `range` is a weird node, its almost a hybrid of statement+feature.
# it is a specific feature repeated multiple times.
# there's no additional logic in the feature part, just the existence of a feature.
# so, we have to inline some of the feature rendering here.
child = statement["child"]
if child[child["type"]]:
value = rutils.bold2(child[child["type"]])
if child.get("description"):
ostream.write(
"count(%s(%s = %s)): "
% (child["type"], value, child["description"])
)
else:
ostream.write("count(%s(%s)): " % (child["type"], value))
else:
ostream.write("count(%s): " % child["type"])
if statement["max"] == statement["min"]:
ostream.write("%d" % (statement["min"]))
elif statement["min"] == 0:
ostream.write("%d or fewer" % (statement["max"]))
elif statement["max"] == (1 << 64 - 1):
ostream.write("%d or more" % (statement["min"]))
else:
ostream.write("between %d and %d" % (statement["min"], statement["max"]))
render_locations(ostream, match)
ostream.write("\n")
elif statement["type"] == "subscope":
ostream.write(statement["subscope"])
ostream.writeln(":")
else:
raise RuntimeError("unexpected match statement type: " + str(statement))
|
https://github.com/fireeye/capa/issues/182
|
61 functions [00:00, 130.17 functions/s]
Traceback (most recent call last):
File "/usr/local/bin/capa", line 11, in <module>
load_entry_point('flare-capa', 'console_scripts', 'capa')()
File "/opt/capa/capa/main.py", line 581, in main
print(capa.render.render_vverbose(meta, rules, capabilities))
File "/opt/capa/capa/render/__init__.py", line 235, in render_vverbose
return capa.render.vverbose.render_vverbose(doc)
File "/opt/capa/capa/render/vverbose.py", line 221, in render_vverbose
render_rules(ostream, doc)
File "/opt/capa/capa/render/vverbose.py", line 208, in render_rules
render_match(ostream, match, indent=1)
File "/opt/capa/capa/render/vverbose.py", line 146, in render_match
render_node(ostream, match, match["node"], indent=indent)
File "/opt/capa/capa/render/vverbose.py", line 102, in render_node
render_statement(ostream, match, node["statement"], indent=indent)
File "/opt/capa/capa/render/vverbose.py", line 40, in render_statement
ostream.write(statement["count"] + " or more")
TypeError: unsupported operand type(s) for +: 'int' and 'str'
|
TypeError
|
def render_rules(ostream, doc):
"""
like:
## rules
check for OutputDebugString error
namespace anti-analysis/anti-debugging/debugger-detection
author michael.hunhoff@fireeye.com
scope function
mbc Anti-Behavioral Analysis::Detect Debugger::OutputDebugString
examples Practical Malware Analysis Lab 16-02.exe_:0x401020
function @ 0x10004706
and:
api: kernel32.SetLastError @ 0x100047C2
api: kernel32.GetLastError @ 0x10004A87
api: kernel32.OutputDebugString @ 0x10004767, 0x10004787, 0x10004816, 0x10004895
"""
had_match = False
for rule in rutils.capability_rules(doc):
count = len(rule["matches"])
if count == 1:
capability = rutils.bold(rule["meta"]["name"])
else:
capability = "%s (%d matches)" % (rutils.bold(rule["meta"]["name"]), count)
ostream.writeln(capability)
had_match = True
rows = []
for key in capa.rules.META_KEYS:
if key == "name" or key not in rule["meta"]:
continue
v = rule["meta"][key]
if isinstance(v, list) and len(v) == 1:
v = v[0]
elif isinstance(v, list) and len(v) > 1:
v = ", ".join(v)
rows.append((key, v))
ostream.writeln(tabulate.tabulate(rows, tablefmt="plain"))
if rule["meta"]["scope"] == capa.rules.FILE_SCOPE:
matches = list(doc["rules"][rule["meta"]["name"]]["matches"].values())
if len(matches) != 1:
# i think there should only ever be one match per file-scope rule,
# because we do the file-scope evaluation a single time.
# but i'm not 100% sure if this is/will always be true.
# so, lets be explicit about our assumptions and raise an exception if they fail.
raise RuntimeError(
"unexpected file scope match count: %d" % (len(matches))
)
render_match(ostream, matches[0], indent=0)
else:
for location, match in sorted(
doc["rules"][rule["meta"]["name"]]["matches"].items()
):
ostream.write(rule["meta"]["scope"])
ostream.write(" @ ")
ostream.writeln(rutils.hex(location))
render_match(ostream, match, indent=1)
ostream.write("\n")
if not had_match:
ostream.writeln(rutils.bold("no capabilities found"))
|
def render_rules(ostream, doc):
"""
like:
## rules
check for OutputDebugString error
namespace anti-analysis/anti-debugging/debugger-detection
author michael.hunhoff@fireeye.com
scope function
mbc Anti-Behavioral Analysis::Detect Debugger::OutputDebugString
examples Practical Malware Analysis Lab 16-02.exe_:0x401020
function @ 0x10004706
and:
api: kernel32.SetLastError @ 0x100047C2
api: kernel32.GetLastError @ 0x10004A87
api: kernel32.OutputDebugString @ 0x10004767, 0x10004787, 0x10004816, 0x10004895
"""
had_match = False
for rule in rutils.capability_rules(doc):
count = len(rule["matches"])
if count == 1:
capability = rutils.bold(rule["meta"]["name"])
else:
capability = "%s (%d matches)" % (rutils.bold(rule["meta"]["name"]), count)
ostream.writeln(capability)
had_match = True
rows = []
for key in capa.rules.META_KEYS:
if key == "name" or key not in rule["meta"]:
continue
v = rule["meta"][key]
if isinstance(v, list) and len(v) == 1:
v = v[0]
elif isinstance(v, list) and len(v) > 1:
v = ", ".join(v)
rows.append((key, v))
ostream.writeln(tabulate.tabulate(rows, tablefmt="plain"))
if rule["meta"]["scope"] == capa.rules.FILE_SCOPE:
matches = list(doc["rules"][rule["meta"]["name"]]["matches"].values())
if len(matches) != 1:
# i think there should only ever be one match per file-scope rule,
# because we do the file-scope evaluation a single time.
# but i'm not 100% sure if this is/will always be true.
# so, lets be explicit about our assumptions and raise an exception if they fail.
raise RuntimeError("unexpected file scope match count: " + len(matches))
render_match(ostream, matches[0], indent=0)
else:
for location, match in sorted(
doc["rules"][rule["meta"]["name"]]["matches"].items()
):
ostream.write(rule["meta"]["scope"])
ostream.write(" @ ")
ostream.writeln(rutils.hex(location))
render_match(ostream, match, indent=1)
ostream.write("\n")
if not had_match:
ostream.writeln(rutils.bold("no capabilities found"))
|
https://github.com/fireeye/capa/issues/182
|
61 functions [00:00, 130.17 functions/s]
Traceback (most recent call last):
File "/usr/local/bin/capa", line 11, in <module>
load_entry_point('flare-capa', 'console_scripts', 'capa')()
File "/opt/capa/capa/main.py", line 581, in main
print(capa.render.render_vverbose(meta, rules, capabilities))
File "/opt/capa/capa/render/__init__.py", line 235, in render_vverbose
return capa.render.vverbose.render_vverbose(doc)
File "/opt/capa/capa/render/vverbose.py", line 221, in render_vverbose
render_rules(ostream, doc)
File "/opt/capa/capa/render/vverbose.py", line 208, in render_rules
render_match(ostream, match, indent=1)
File "/opt/capa/capa/render/vverbose.py", line 146, in render_match
render_node(ostream, match, match["node"], indent=indent)
File "/opt/capa/capa/render/vverbose.py", line 102, in render_node
render_statement(ostream, match, node["statement"], indent=indent)
File "/opt/capa/capa/render/vverbose.py", line 40, in render_statement
ostream.write(statement["count"] + " or more")
TypeError: unsupported operand type(s) for +: 'int' and 'str'
|
TypeError
|
def get_functions(self):
import capa.features.extractors.ida.helpers as ida_helpers
for f in ida_helpers.get_functions(ignore_thunks=True, ignore_libs=True):
yield add_va_int_cast(f)
|
def get_functions(self):
for f in capa.features.extractors.ida.helpers.get_functions(
ignore_thunks=True, ignore_libs=True
):
yield add_va_int_cast(f)
|
https://github.com/fireeye/capa/issues/93
|
IDAPython: Error while calling Python callback <OnCreate>:
Traceback (most recent call last):
File "ida_capa_explorer.py", line 99, in OnCreate
self.load_capa_results()
File "capa/capa/ida/ida_capa_explorer.py", line 342, in load_capa_results
capabilities = capa.main.find_capabilities(rules, capa.features.extractors.ida.IdaFeatureExtractor(), True)
File "capa\capa\main.py", line 99, in find_capabilities
for f in tqdm.tqdm(extractor.get_functions(), disable=disable_progress, unit=" functions"):
File "C:\Python27\lib\site-packages\tqdm\_tqdm.py", line 997, in __iter__
for obj in iterable:
File "capa\capa\features\extractors\ida\__init__.py", line 54, in get_functions
from capa.features.extractors.ida import helpers
ImportError: cannot import name helpers
INFO:capa:form closed.
Python>sys.version
'2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]'
|
ImportError
|
def get_instructions(self, f, bb):
import capa.features.extractors.ida.helpers as ida_helpers
for insn in ida_helpers.get_instructions_in_range(bb.start_ea, bb.end_ea):
yield add_va_int_cast(insn)
|
def get_instructions(self, f, bb):
for insn in capa.features.extractors.ida.helpers.get_instructions_in_range(
bb.start_ea, bb.end_ea
):
yield add_va_int_cast(insn)
|
https://github.com/fireeye/capa/issues/93
|
IDAPython: Error while calling Python callback <OnCreate>:
Traceback (most recent call last):
File "ida_capa_explorer.py", line 99, in OnCreate
self.load_capa_results()
File "capa/capa/ida/ida_capa_explorer.py", line 342, in load_capa_results
capabilities = capa.main.find_capabilities(rules, capa.features.extractors.ida.IdaFeatureExtractor(), True)
File "capa\capa\main.py", line 99, in find_capabilities
for f in tqdm.tqdm(extractor.get_functions(), disable=disable_progress, unit=" functions"):
File "C:\Python27\lib\site-packages\tqdm\_tqdm.py", line 997, in __iter__
for obj in iterable:
File "capa\capa\features\extractors\ida\__init__.py", line 54, in get_functions
from capa.features.extractors.ida import helpers
ImportError: cannot import name helpers
INFO:capa:form closed.
Python>sys.version
'2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]'
|
ImportError
|
def wrapped_strategy(self):
if self.__wrapped_strategy is None:
if not inspect.isfunction(self.__definition):
raise InvalidArgument(
(
"Excepted a definition to be a function but got %r of type"
" %s instead."
)
% (self.__definition, type(self.__definition).__name__)
)
result = self.__definition()
if result is self:
raise InvalidArgument("Cannot define a deferred strategy to be itself")
if not isinstance(result, SearchStrategy):
raise InvalidArgument(
(
"Expected definition to return a SearchStrategy but "
"returned %r of type %s"
)
% (result, type(result).__name__)
)
self.__wrapped_strategy = result
self.__definition = None
return self.__wrapped_strategy
|
def wrapped_strategy(self):
if self.__wrapped_strategy is None:
if not inspect.isfunction(self.__definition):
raise InvalidArgument(
(
"Excepted a definition to be a function but got %r of type"
" %s instead."
)
% (self.__definition, type(self.__definition).__name__)
)
result = self.__definition()
if result is self:
raise InvalidArgument("Cannot define a deferred strategy to be itself")
if not isinstance(result, SearchStrategy):
raise InvalidArgument(
(
"Expected definition to return a SearchStrategy but "
"returned %r of type %s"
)
% (result, type(result).__name__)
)
self.__wrapped_strategy = result
del self.__definition
return self.__wrapped_strategy
|
https://github.com/HypothesisWorks/hypothesis/issues/2722
|
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "...\hypothesis\strategies\_internal\strategies.py", line 314, in example
example_generating_inner_function()
File "...\hypothesis\strategies\_internal\strategies.py", line 302, in example_generating_inner_function
@settings(
File "...\hypothesis\core.py", line 1023, in wrapped_test
processed_args = process_arguments_to_given(
File "...\hypothesis\core.py", line 441, in process_arguments_to_given
search_strategy.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\collections.py", line 39, in do_validate
s.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 638, in do_validate
self.mapped_strategy.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\lazy.py", line 118, in do_validate
w.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 638, in do_validate
self.mapped_strategy.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\collections.py", line 39, in do_validate
s.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 595, in do_validate
for e in self.element_strategies:
File "...\hypothesis\strategies\_internal\strategies.py", line 570, in element_strategies
if not arg.is_empty:
File "...\hypothesis\strategies\_internal\strategies.py", line 125, in accept
recur(self)
File "...\hypothesis\strategies\_internal\strategies.py", line 121, in recur
mapping[strat] = getattr(strat, calculation)(recur)
File "...\hypothesis\strategies\_internal\lazy.py", line 86, in calc_is_empty
return recur(self.wrapped_strategy)
File "...\hypothesis\strategies\_internal\strategies.py", line 121, in recur
mapping[strat] = getattr(strat, calculation)(recur)
File "...\hypothesis\strategies\_internal\deferred.py", line 80, in calc_is_empty
return recur(self.wrapped_strategy)
File "...\hypothesis\strategies\_internal\deferred.py", line 55, in wrapped_strategy
del self.__definition
AttributeError: _DeferredStrategy__definition
|
AttributeError
|
def assert_can_release():
assert not IS_PULL_REQUEST, "Cannot release from pull requests"
|
def assert_can_release():
assert not IS_PULL_REQUEST, "Cannot release from pull requests"
assert has_travis_secrets(), "Cannot release without travis secure vars"
|
https://github.com/HypothesisWorks/hypothesis/issues/2722
|
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "...\hypothesis\strategies\_internal\strategies.py", line 314, in example
example_generating_inner_function()
File "...\hypothesis\strategies\_internal\strategies.py", line 302, in example_generating_inner_function
@settings(
File "...\hypothesis\core.py", line 1023, in wrapped_test
processed_args = process_arguments_to_given(
File "...\hypothesis\core.py", line 441, in process_arguments_to_given
search_strategy.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\collections.py", line 39, in do_validate
s.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 638, in do_validate
self.mapped_strategy.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\lazy.py", line 118, in do_validate
w.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 638, in do_validate
self.mapped_strategy.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\collections.py", line 39, in do_validate
s.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 595, in do_validate
for e in self.element_strategies:
File "...\hypothesis\strategies\_internal\strategies.py", line 570, in element_strategies
if not arg.is_empty:
File "...\hypothesis\strategies\_internal\strategies.py", line 125, in accept
recur(self)
File "...\hypothesis\strategies\_internal\strategies.py", line 121, in recur
mapping[strat] = getattr(strat, calculation)(recur)
File "...\hypothesis\strategies\_internal\lazy.py", line 86, in calc_is_empty
return recur(self.wrapped_strategy)
File "...\hypothesis\strategies\_internal\strategies.py", line 121, in recur
mapping[strat] = getattr(strat, calculation)(recur)
File "...\hypothesis\strategies\_internal\deferred.py", line 80, in calc_is_empty
return recur(self.wrapped_strategy)
File "...\hypothesis\strategies\_internal\deferred.py", line 55, in wrapped_strategy
del self.__definition
AttributeError: _DeferredStrategy__definition
|
AttributeError
|
def deploy():
print("Current head: ", HEAD)
print("Current master:", MASTER)
if not tools.is_ancestor(HEAD, MASTER):
print("Not deploying due to not being on master")
sys.exit(0)
if "PYPI_TOKEN" not in os.environ:
print("Running without access to secure variables, so no deployment")
sys.exit(0)
tools.configure_git()
for project in tools.all_projects():
do_release(project)
sys.exit(0)
|
def deploy():
print("Current head: ", HEAD)
print("Current master:", MASTER)
if not tools.is_ancestor(HEAD, MASTER):
print("Not deploying due to not being on master")
sys.exit(0)
if not tools.has_travis_secrets():
print("Running without access to secure variables, so no deployment")
sys.exit(0)
print("Decrypting secrets")
tools.decrypt_secrets()
tools.configure_git()
for project in tools.all_projects():
do_release(project)
sys.exit(0)
|
https://github.com/HypothesisWorks/hypothesis/issues/2722
|
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "...\hypothesis\strategies\_internal\strategies.py", line 314, in example
example_generating_inner_function()
File "...\hypothesis\strategies\_internal\strategies.py", line 302, in example_generating_inner_function
@settings(
File "...\hypothesis\core.py", line 1023, in wrapped_test
processed_args = process_arguments_to_given(
File "...\hypothesis\core.py", line 441, in process_arguments_to_given
search_strategy.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\collections.py", line 39, in do_validate
s.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 638, in do_validate
self.mapped_strategy.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\lazy.py", line 118, in do_validate
w.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 638, in do_validate
self.mapped_strategy.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\collections.py", line 39, in do_validate
s.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 595, in do_validate
for e in self.element_strategies:
File "...\hypothesis\strategies\_internal\strategies.py", line 570, in element_strategies
if not arg.is_empty:
File "...\hypothesis\strategies\_internal\strategies.py", line 125, in accept
recur(self)
File "...\hypothesis\strategies\_internal\strategies.py", line 121, in recur
mapping[strat] = getattr(strat, calculation)(recur)
File "...\hypothesis\strategies\_internal\lazy.py", line 86, in calc_is_empty
return recur(self.wrapped_strategy)
File "...\hypothesis\strategies\_internal\strategies.py", line 121, in recur
mapping[strat] = getattr(strat, calculation)(recur)
File "...\hypothesis\strategies\_internal\deferred.py", line 80, in calc_is_empty
return recur(self.wrapped_strategy)
File "...\hypothesis\strategies\_internal\deferred.py", line 55, in wrapped_strategy
del self.__definition
AttributeError: _DeferredStrategy__definition
|
AttributeError
|
def upload_distribution():
"""Upload the built package to crates.io."""
tools.assert_can_release()
# Yes, cargo really will only look in this file. Yes this is terrible.
# This only runs in CI, so we may be assumed to own it, but still.
unlink_if_present(CARGO_CREDENTIALS)
# symlink so that the actual secret credentials can't be leaked via the
# cache.
os.symlink(tools.CARGO_API_KEY, CARGO_CREDENTIALS)
# Give the key the right permissions.
os.chmod(CARGO_CREDENTIALS, int("0600", 8))
cargo("publish")
|
def upload_distribution():
"""Upload the built package to crates.io."""
tools.assert_can_release()
# Yes, cargo really will only look in this file. Yes this is terrible.
# This only runs on Travis, so we may be assumed to own it, but still.
unlink_if_present(CARGO_CREDENTIALS)
# symlink so that the actual secret credentials can't be leaked via the
# cache.
os.symlink(tools.CARGO_API_KEY, CARGO_CREDENTIALS)
# Give the key the right permissions.
os.chmod(CARGO_CREDENTIALS, int("0600", 8))
cargo("publish")
|
https://github.com/HypothesisWorks/hypothesis/issues/2722
|
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "...\hypothesis\strategies\_internal\strategies.py", line 314, in example
example_generating_inner_function()
File "...\hypothesis\strategies\_internal\strategies.py", line 302, in example_generating_inner_function
@settings(
File "...\hypothesis\core.py", line 1023, in wrapped_test
processed_args = process_arguments_to_given(
File "...\hypothesis\core.py", line 441, in process_arguments_to_given
search_strategy.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\collections.py", line 39, in do_validate
s.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 638, in do_validate
self.mapped_strategy.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\lazy.py", line 118, in do_validate
w.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 638, in do_validate
self.mapped_strategy.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\collections.py", line 39, in do_validate
s.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 595, in do_validate
for e in self.element_strategies:
File "...\hypothesis\strategies\_internal\strategies.py", line 570, in element_strategies
if not arg.is_empty:
File "...\hypothesis\strategies\_internal\strategies.py", line 125, in accept
recur(self)
File "...\hypothesis\strategies\_internal\strategies.py", line 121, in recur
mapping[strat] = getattr(strat, calculation)(recur)
File "...\hypothesis\strategies\_internal\lazy.py", line 86, in calc_is_empty
return recur(self.wrapped_strategy)
File "...\hypothesis\strategies\_internal\strategies.py", line 121, in recur
mapping[strat] = getattr(strat, calculation)(recur)
File "...\hypothesis\strategies\_internal\deferred.py", line 80, in calc_is_empty
return recur(self.wrapped_strategy)
File "...\hypothesis\strategies\_internal\deferred.py", line 55, in wrapped_strategy
del self.__definition
AttributeError: _DeferredStrategy__definition
|
AttributeError
|
def upload_distribution():
tools.assert_can_release()
subprocess.check_call(
[
sys.executable,
"-m",
"twine",
"upload",
"--skip-existing",
"--username=__token__",
os.path.join(DIST, "*"),
]
)
# Construct plain-text + markdown version of this changelog entry,
# with link to canonical source.
build_docs(builder="text")
textfile = os.path.join(HYPOTHESIS_PYTHON, "docs", "_build", "text", "changes.txt")
with open(textfile) as f:
lines = f.readlines()
entries = [i for i, l in enumerate(lines) if CHANGELOG_HEADER.match(l)]
changelog_body = "".join(lines[entries[0] + 2 : entries[1]]).strip() + (
"\n\n*[The canonical version of these notes (with links) is on readthedocs.]"
"(https://hypothesis.readthedocs.io/en/latest/changes.html#v%s)*"
% (current_version().replace(".", "-"),)
)
# Create a GitHub release, to trigger Zenodo DOI minting. See
# https://developer.github.com/v3/repos/releases/#create-a-release
requests.post(
"https://api.github.com/repos/HypothesisWorks/hypothesis/releases",
json={
"tag_name": tag_name(),
"name": "Hypothesis for Python - version " + current_version(),
"body": changelog_body,
},
timeout=120, # seconds
).raise_for_status()
|
def upload_distribution():
tools.assert_can_release()
subprocess.check_call(
[
sys.executable,
"-m",
"twine",
"upload",
"--skip-existing",
"--config-file",
tools.PYPIRC,
os.path.join(DIST, "*"),
]
)
# Construct plain-text + markdown version of this changelog entry,
# with link to canonical source.
build_docs(builder="text")
textfile = os.path.join(HYPOTHESIS_PYTHON, "docs", "_build", "text", "changes.txt")
with open(textfile) as f:
lines = f.readlines()
entries = [i for i, l in enumerate(lines) if CHANGELOG_HEADER.match(l)]
changelog_body = "".join(lines[entries[0] + 2 : entries[1]]).strip() + (
"\n\n*[The canonical version of these notes (with links) is on readthedocs.]"
"(https://hypothesis.readthedocs.io/en/latest/changes.html#v%s)*"
% (current_version().replace(".", "-"),)
)
# Create a GitHub release, to trigger Zenodo DOI minting. See
# https://developer.github.com/v3/repos/releases/#create-a-release
requests.post(
"https://api.github.com/repos/HypothesisWorks/hypothesis/releases",
json={
"tag_name": tag_name(),
"name": "Hypothesis for Python - version " + current_version(),
"body": changelog_body,
},
timeout=120, # seconds
# Scoped personal access token, stored in Travis environ variable
auth=("Zac-HD", os.environ["Zac_release_token"]),
).raise_for_status()
# Post the release notes to Tidelift too - see https://tidelift.com/docs/api
requests.post(
"https://api.tidelift.com/external-api/lifting/pypi/hypothesis/release-notes/"
+ current_version(),
json={"body": changelog_body},
headers={"Authorization": "Bearer {}".format(os.environ["TIDELIFT_API_TOKEN"])},
timeout=120, # seconds
).raise_for_status()
|
https://github.com/HypothesisWorks/hypothesis/issues/2722
|
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "...\hypothesis\strategies\_internal\strategies.py", line 314, in example
example_generating_inner_function()
File "...\hypothesis\strategies\_internal\strategies.py", line 302, in example_generating_inner_function
@settings(
File "...\hypothesis\core.py", line 1023, in wrapped_test
processed_args = process_arguments_to_given(
File "...\hypothesis\core.py", line 441, in process_arguments_to_given
search_strategy.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\collections.py", line 39, in do_validate
s.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 638, in do_validate
self.mapped_strategy.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\lazy.py", line 118, in do_validate
w.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 638, in do_validate
self.mapped_strategy.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\collections.py", line 39, in do_validate
s.validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 377, in validate
self.do_validate()
File "...\hypothesis\strategies\_internal\strategies.py", line 595, in do_validate
for e in self.element_strategies:
File "...\hypothesis\strategies\_internal\strategies.py", line 570, in element_strategies
if not arg.is_empty:
File "...\hypothesis\strategies\_internal\strategies.py", line 125, in accept
recur(self)
File "...\hypothesis\strategies\_internal\strategies.py", line 121, in recur
mapping[strat] = getattr(strat, calculation)(recur)
File "...\hypothesis\strategies\_internal\lazy.py", line 86, in calc_is_empty
return recur(self.wrapped_strategy)
File "...\hypothesis\strategies\_internal\strategies.py", line 121, in recur
mapping[strat] = getattr(strat, calculation)(recur)
File "...\hypothesis\strategies\_internal\deferred.py", line 80, in calc_is_empty
return recur(self.wrapped_strategy)
File "...\hypothesis\strategies\_internal\deferred.py", line 55, in wrapped_strategy
del self.__definition
AttributeError: _DeferredStrategy__definition
|
AttributeError
|
def execute_explicit_examples(state, wrapped_test, arguments, kwargs):
original_argspec = getfullargspec(state.test)
for example in reversed(getattr(wrapped_test, "hypothesis_explicit_examples", ())):
example_kwargs = dict(original_argspec.kwonlydefaults or {})
if example.args:
if len(example.args) > len(original_argspec.args):
raise InvalidArgument(
"example has too many arguments for test. "
"Expected at most %d but got %d"
% (len(original_argspec.args), len(example.args))
)
example_kwargs.update(
dict(zip(original_argspec.args[-len(example.args) :], example.args))
)
else:
example_kwargs.update(example.kwargs)
if Phase.explicit not in state.settings.phases:
continue
example_kwargs.update(kwargs)
with local_settings(state.settings):
fragments_reported = []
try:
with with_reporter(fragments_reported.append):
state.execute_once(
ArtificialDataForExample(example_kwargs),
is_final=True,
print_example=True,
)
except UnsatisfiedAssumption:
# Odd though it seems, we deliberately support explicit examples that
# are then rejected by a call to `assume()`. As well as iterative
# development, this is rather useful to replay Hypothesis' part of
# a saved failure when other arguments are supplied by e.g. pytest.
# See https://github.com/HypothesisWorks/hypothesis/issues/2125
pass
except BaseException as err:
# In order to support reporting of multiple failing examples, we yield
# each of the (report text, error) pairs we find back to the top-level
# runner. This also ensures that user-facing stack traces have as few
# frames of Hypothesis internals as possible.
err = err.with_traceback(get_trimmed_traceback())
# One user error - whether misunderstanding or typo - we've seen a few
# times is to pass strategies to @example() where values are expected.
# Checking is easy, and false-positives not much of a problem, so:
if any(
isinstance(arg, SearchStrategy)
for arg in example.args + tuple(example.kwargs.values())
):
new = HypothesisWarning(
"The @example() decorator expects to be passed values, but "
"you passed strategies instead. See https://hypothesis."
"readthedocs.io/en/latest/reproducing.html for details."
)
new.__cause__ = err
err = new
yield (fragments_reported, err)
if state.settings.report_multiple_bugs:
continue
break
finally:
if fragments_reported:
assert fragments_reported[0].startswith("Falsifying example")
fragments_reported[0] = fragments_reported[0].replace(
"Falsifying example", "Falsifying explicit example", 1
)
if fragments_reported:
verbose_report(fragments_reported[0].replace("Falsifying", "Trying", 1))
for f in fragments_reported[1:]:
verbose_report(f)
|
def execute_explicit_examples(state, wrapped_test, arguments, kwargs):
original_argspec = getfullargspec(state.test)
for example in reversed(getattr(wrapped_test, "hypothesis_explicit_examples", ())):
example_kwargs = dict(original_argspec.kwonlydefaults or {})
if example.args:
if len(example.args) > len(original_argspec.args):
raise InvalidArgument(
"example has too many arguments for test. "
"Expected at most %d but got %d"
% (len(original_argspec.args), len(example.args))
)
example_kwargs.update(
dict(zip(original_argspec.args[-len(example.args) :], example.args))
)
else:
example_kwargs.update(example.kwargs)
if Phase.explicit not in state.settings.phases:
continue
example_kwargs.update(kwargs)
with local_settings(state.settings):
fragments_reported = []
try:
with with_reporter(fragments_reported.append):
state.execute_once(
ArtificialDataForExample(example_kwargs),
is_final=True,
print_example=True,
)
except UnsatisfiedAssumption:
# Odd though it seems, we deliberately support explicit examples that
# are then rejected by a call to `assume()`. As well as iterative
# development, this is rather useful to replay Hypothesis' part of
# a saved failure when other arguments are supplied by e.g. pytest.
# See https://github.com/HypothesisWorks/hypothesis/issues/2125
pass
except BaseException as err:
# In order to support reporting of multiple failing examples, we yield
# each of the (report text, error) pairs we find back to the top-level
# runner. This also ensures that user-facing stack traces have as few
# frames of Hypothesis internals as possible.
err = err.with_traceback(get_trimmed_traceback())
# One user error - whether misunderstanding or typo - we've seen a few
# times is to pass strategies to @example() where values are expected.
# Checking is easy, and false-positives not much of a problem, so:
if any(
isinstance(arg, SearchStrategy)
for arg in example.args + tuple(example.kwargs.values())
):
new = HypothesisWarning(
"The @example() decorator expects to be passed values, but "
"you passed strategies instead. See https://hypothesis."
"readthedocs.io/en/latest/reproducing.html for details."
)
new.__cause__ = err
err = new
yield (fragments_reported, err)
if state.settings.report_multiple_bugs:
continue
break
finally:
assert fragments_reported[0].startswith("Falsifying example")
fragments_reported[0] = fragments_reported[0].replace(
"Falsifying example", "Falsifying explicit example", 1
)
verbose_report(fragments_reported[0].replace("Falsifying", "Trying", 1))
for f in fragments_reported[1:]:
verbose_report(f)
|
https://github.com/HypothesisWorks/hypothesis/issues/2696
|
Traceback (most recent call last):
File "tmp.py", line 10, in <module>
f()
File "tmp.py", line 5, in f
@hypothesis.settings(verbosity=hypothesis.Verbosity.quiet)
File "/home/gram/.local/lib/python3.8/site-packages/hypothesis/core.py", line 1090, in wrapped_test
errors = list(
File "/home/gram/.local/lib/python3.8/site-packages/hypothesis/core.py", line 379, in execute_explicit_examples
assert fragments_reported[0].startswith("Falsifying example")
IndexError: list index out of range
|
IndexError
|
def get_entry_points():
yield from ()
|
def get_entry_points():
yield from pkg_resources.iter_entry_points("hypothesis")
|
https://github.com/HypothesisWorks/hypothesis/issues/2668
|
Traceback (most recent call last):
File "/run/shm/bazel-sandbox.58afe50bce693a45c42be3530f1579c8f12116963ef7e303ad72c1a2b06ed6f2/processwrapper-sandbox/4962/execroot/__main__/bazel-out/k8-fastbuild/bin/perception/cloud/proto_format_test.runfiles/internal_pip_dependency_hypothesis/pypi__hypothesis/hypothesis/entry_points.py", line 27, in <module>
from importlib import metadata as importlib_metadata
ImportError: cannot import name 'metadata'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/run/shm/bazel-sandbox.58afe50bce693a45c42be3530f1579c8f12116963ef7e303ad72c1a2b06ed6f2/processwrapper-sandbox/4962/execroot/__main__/bazel-out/k8-fastbuild/bin/perception/cloud/proto_format_test.runfiles/internal_pip_dependency_hypothesis/pypi__hypothesis/hypothesis/entry_points.py", line 29, in <module>
import importlib_metadata # type: ignore # mypy thinks this is a redefinition
ModuleNotFoundError: No module named 'importlib_metadata'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "common/python/runtime/python3_wrapper.py", line 36, in <module>
eval(compiled_code, module.__dict__)
File "/run/shm/bazel-sandbox.58afe50bce693a45c42be3530f1579c8f12116963ef7e303ad72c1a2b06ed6f2/processwrapper-sandbox/4962/execroot/__main__/bazel-out/k8-fastbuild/bin/perception/cloud/proto_format_test.runfiles/__main__/perception/cloud/proto_format_test.py", line 7, in <module>
import hypothesis as hyp
File "/run/shm/bazel-sandbox.58afe50bce693a45c42be3530f1579c8f12116963ef7e303ad72c1a2b06ed6f2/processwrapper-sandbox/4962/execroot/__main__/bazel-out/k8-fastbuild/bin/perception/cloud/proto_format_test.runfiles/internal_pip_dependency_hypothesis/pypi__hypothesis/hypothesis/__init__.py", line 27, in <module>
from hypothesis.entry_points import run
File "/run/shm/bazel-sandbox.58afe50bce693a45c42be3530f1579c8f12116963ef7e303ad72c1a2b06ed6f2/processwrapper-sandbox/4962/execroot/__main__/bazel-out/k8-fastbuild/bin/perception/cloud/proto_format_test.runfiles/internal_pip_dependency_hypothesis/pypi__hypothesis/hypothesis/entry_points.py", line 38, in <module>
import pkg_resources
ModuleNotFoundError: No module named 'pkg_resources'
|
ImportError
|
def given(
*_given_arguments: Union[SearchStrategy, InferType],
**_given_kwargs: Union[SearchStrategy, InferType],
) -> Callable[[Callable[..., None]], Callable[..., None]]:
"""A decorator for turning a test function that accepts arguments into a
randomized test.
This is the main entry point to Hypothesis.
"""
def run_test_as_given(test):
if inspect.isclass(test):
# Provide a meaningful error to users, instead of exceptions from
# internals that assume we're dealing with a function.
raise InvalidArgument("@given cannot be applied to a class.")
given_arguments = tuple(_given_arguments)
given_kwargs = dict(_given_kwargs)
original_argspec = getfullargspec(test)
check_invalid = is_invalid_test(
test.__name__, original_argspec, given_arguments, given_kwargs
)
# If the argument check found problems, return a dummy test function
# that will raise an error if it is actually called.
if check_invalid is not None:
return check_invalid
# Because the argument check succeeded, we can convert @given's
# positional arguments into keyword arguments for simplicity.
if given_arguments:
assert not given_kwargs
for name, strategy in zip(
reversed(original_argspec.args), reversed(given_arguments)
):
given_kwargs[name] = strategy
# These have been converted, so delete them to prevent accidental use.
del given_arguments
argspec = new_given_argspec(original_argspec, given_kwargs)
# Use type information to convert "infer" arguments into appropriate strategies.
if infer in given_kwargs.values():
hints = get_type_hints(test)
for name in [name for name, value in given_kwargs.items() if value is infer]:
if name not in hints:
# As usual, we want to emit this error when the test is executed,
# not when it's decorated.
@impersonate(test)
@define_function_signature(test.__name__, test.__doc__, argspec)
def wrapped_test(*arguments, **kwargs):
__tracebackhide__ = True
raise InvalidArgument(
"passed %s=infer for %s, but %s has no type annotation"
% (name, test.__name__, name)
)
return wrapped_test
given_kwargs[name] = st.from_type(hints[name])
@impersonate(test)
@define_function_signature(test.__name__, test.__doc__, argspec)
def wrapped_test(*arguments, **kwargs):
# Tell pytest to omit the body of this function from tracebacks
__tracebackhide__ = True
test = wrapped_test.hypothesis.inner_test
if getattr(test, "is_hypothesis_test", False):
raise InvalidArgument(
(
"You have applied @given to the test %s more than once, which "
"wraps the test several times and is extremely slow. A "
"similar effect can be gained by combining the arguments "
"of the two calls to given. For example, instead of "
"@given(booleans()) @given(integers()), you could write "
"@given(booleans(), integers())"
)
% (test.__name__,)
)
settings = wrapped_test._hypothesis_internal_use_settings
random = get_random_for_wrapped_test(test, wrapped_test)
processed_args = process_arguments_to_given(
wrapped_test,
arguments,
kwargs,
given_kwargs,
argspec,
settings,
)
arguments, kwargs, test_runner, search_strategy = processed_args
runner = getattr(search_strategy, "runner", None)
if isinstance(runner, TestCase) and test.__name__ in dir(TestCase):
msg = (
"You have applied @given to the method %s, which is "
"used by the unittest runner but is not itself a test."
" This is not useful in any way." % test.__name__
)
fail_health_check(settings, msg, HealthCheck.not_a_test_method)
if bad_django_TestCase(runner): # pragma: no cover
# Covered by the Django tests, but not the pytest coverage task
raise InvalidArgument(
"You have applied @given to a method on %s, but this "
"class does not inherit from the supported versions in "
"`hypothesis.extra.django`. Use the Hypothesis variants "
"to ensure that each example is run in a separate "
"database transaction." % qualname(type(runner))
)
state = StateForActualGivenExecution(
test_runner,
search_strategy,
test,
settings,
random,
wrapped_test,
)
reproduce_failure = wrapped_test._hypothesis_internal_use_reproduce_failure
# If there was a @reproduce_failure decorator, use it to reproduce
# the error (or complain that we couldn't). Either way, this will
# always raise some kind of error.
if reproduce_failure is not None:
expected_version, failure = reproduce_failure
if expected_version != __version__:
raise InvalidArgument(
(
"Attempting to reproduce a failure from a different "
"version of Hypothesis. This failure is from %s, but "
"you are currently running %r. Please change your "
"Hypothesis version to a matching one."
)
% (expected_version, __version__)
)
try:
state.execute_once(
ConjectureData.for_buffer(decode_failure(failure)),
print_example=True,
is_final=True,
)
raise DidNotReproduce(
"Expected the test to raise an error, but it "
"completed successfully."
)
except StopTest:
raise DidNotReproduce(
"The shape of the test data has changed in some way "
"from where this blob was defined. Are you sure "
"you're running the same test?"
)
except UnsatisfiedAssumption:
raise DidNotReproduce(
"The test data failed to satisfy an assumption in the "
"test. Have you added it since this blob was "
"generated?"
)
# There was no @reproduce_failure, so start by running any explicit
# examples from @example decorators.
errors = list(
execute_explicit_examples(state, wrapped_test, arguments, kwargs)
)
with local_settings(state.settings):
if len(errors) > 1:
# If we're not going to report multiple bugs, we would have
# stopped running explicit examples at the first failure.
assert state.settings.report_multiple_bugs
for fragments, err in errors:
for f in fragments:
report(f)
tb_lines = traceback.format_exception(
type(err), err, err.__traceback__
)
report("".join(tb_lines))
msg = "Hypothesis found %d failures in explicit examples."
raise MultipleFailures(msg % (len(errors)))
elif errors:
fragments, the_error_hypothesis_found = errors[0]
for f in fragments:
report(f)
raise the_error_hypothesis_found
# If there were any explicit examples, they all ran successfully.
# The next step is to use the Conjecture engine to run the test on
# many different inputs.
if not (
Phase.reuse in settings.phases or Phase.generate in settings.phases
):
return
try:
if isinstance(runner, TestCase) and hasattr(runner, "subTest"):
subTest = runner.subTest
try:
runner.subTest = types.MethodType(fake_subTest, runner)
state.run_engine()
finally:
runner.subTest = subTest
else:
state.run_engine()
except BaseException as e:
# The exception caught here should either be an actual test
# failure (or MultipleFailures), or some kind of fatal error
# that caused the engine to stop.
generated_seed = wrapped_test._hypothesis_internal_use_generated_seed
with local_settings(settings):
if not (state.failed_normally or generated_seed is None):
if running_under_pytest:
report(
"You can add @seed(%(seed)d) to this test or "
"run pytest with --hypothesis-seed=%(seed)d "
"to reproduce this failure." % {"seed": generated_seed}
)
else:
report(
"You can add @seed(%d) to this test to "
"reproduce this failure." % (generated_seed,)
)
# The dance here is to avoid showing users long tracebacks
# full of Hypothesis internals they don't care about.
# We have to do this inline, to avoid adding another
# internal stack frame just when we've removed the rest.
#
# Using a variable for our trimmed error ensures that the line
# which will actually appear in tracebacks is as clear as
# possible - "raise the_error_hypothesis_found".
the_error_hypothesis_found = e.with_traceback(
get_trimmed_traceback()
)
raise the_error_hypothesis_found
def _get_fuzz_target() -> Callable[
[Union[bytes, bytearray, memoryview, BinaryIO]], Optional[bytes]
]:
# Because fuzzing interfaces are very performance-sensitive, we use a
# somewhat more complicated structure here. `_get_fuzz_target()` is
# called by the `HypothesisHandle.fuzz_one_input` property, allowing
# us to defer our collection of the settings, random instance, and
# reassignable `inner_test` (etc) until `fuzz_one_input` is accessed.
#
# We then share the performance cost of setting up `state` between
# many invocations of the target. We explicitly force `deadline=None`
# for performance reasons, saving ~40% the runtime of an empty test.
test = wrapped_test.hypothesis.inner_test
settings = Settings(
parent=wrapped_test._hypothesis_internal_use_settings, deadline=None
)
random = get_random_for_wrapped_test(test, wrapped_test)
_args, _kwargs, test_runner, search_strategy = process_arguments_to_given(
wrapped_test,
(),
{},
given_kwargs,
argspec,
settings,
)
assert not _args
assert not _kwargs
state = StateForActualGivenExecution(
test_runner,
search_strategy,
test,
settings,
random,
wrapped_test,
)
digest = function_digest(test)
def fuzz_one_input(
buffer: Union[bytes, bytearray, memoryview, BinaryIO],
) -> Optional[bytes]:
# This inner part is all that the fuzzer will actually run,
# so we keep it as small and as fast as possible.
if isinstance(buffer, io.IOBase):
buffer = buffer.read()
assert isinstance(buffer, (bytes, bytearray, memoryview))
data = ConjectureData.for_buffer(buffer)
try:
state.execute_once(data)
except (StopTest, UnsatisfiedAssumption):
return None
except BaseException:
if settings.database is not None:
settings.database.save(digest, bytes(data.buffer))
raise
return bytes(data.buffer)
fuzz_one_input.__doc__ = HypothesisHandle.fuzz_one_input.__doc__
return fuzz_one_input
# After having created the decorated test function, we need to copy
# over some attributes to make the switch as seamless as possible.
for attrib in dir(test):
if not (attrib.startswith("_") or hasattr(wrapped_test, attrib)):
setattr(wrapped_test, attrib, getattr(test, attrib))
wrapped_test.is_hypothesis_test = True
if hasattr(test, "_hypothesis_internal_settings_applied"):
# Used to check if @settings is applied twice.
wrapped_test._hypothesis_internal_settings_applied = True
wrapped_test._hypothesis_internal_use_seed = getattr(
test, "_hypothesis_internal_use_seed", None
)
wrapped_test._hypothesis_internal_use_settings = (
getattr(test, "_hypothesis_internal_use_settings", None) or Settings.default
)
wrapped_test._hypothesis_internal_use_reproduce_failure = getattr(
test, "_hypothesis_internal_use_reproduce_failure", None
)
wrapped_test.hypothesis = HypothesisHandle(test, _get_fuzz_target)
return wrapped_test
return run_test_as_given
|
def given(
*_given_arguments: Union[SearchStrategy, InferType],
**_given_kwargs: Union[SearchStrategy, InferType],
) -> Callable[[Callable[..., None]], Callable[..., None]]:
"""A decorator for turning a test function that accepts arguments into a
randomized test.
This is the main entry point to Hypothesis.
"""
def run_test_as_given(test):
if inspect.isclass(test):
# Provide a meaningful error to users, instead of exceptions from
# internals that assume we're dealing with a function.
raise InvalidArgument("@given cannot be applied to a class.")
given_arguments = tuple(_given_arguments)
given_kwargs = dict(_given_kwargs)
original_argspec = getfullargspec(test)
check_invalid = is_invalid_test(
test.__name__, original_argspec, given_arguments, given_kwargs
)
# If the argument check found problems, return a dummy test function
# that will raise an error if it is actually called.
if check_invalid is not None:
return check_invalid
# Because the argument check succeeded, we can convert @given's
# positional arguments into keyword arguments for simplicity.
if given_arguments:
assert not given_kwargs
for name, strategy in zip(
reversed(original_argspec.args), reversed(given_arguments)
):
given_kwargs[name] = strategy
# These have been converted, so delete them to prevent accidental use.
del given_arguments
argspec = new_given_argspec(original_argspec, given_kwargs)
# Use type information to convert "infer" arguments into appropriate strategies.
if infer in given_kwargs.values():
hints = get_type_hints(test)
for name in [name for name, value in given_kwargs.items() if value is infer]:
if name not in hints:
# As usual, we want to emit this error when the test is executed,
# not when it's decorated.
@impersonate(test)
@define_function_signature(test.__name__, test.__doc__, argspec)
def wrapped_test(*arguments, **kwargs):
__tracebackhide__ = True
raise InvalidArgument(
"passed %s=infer for %s, but %s has no type annotation"
% (name, test.__name__, name)
)
return wrapped_test
given_kwargs[name] = st.from_type(hints[name])
@impersonate(test)
@define_function_signature(test.__name__, test.__doc__, argspec)
def wrapped_test(*arguments, **kwargs):
# Tell pytest to omit the body of this function from tracebacks
__tracebackhide__ = True
test = wrapped_test.hypothesis.inner_test
if getattr(test, "is_hypothesis_test", False):
raise InvalidArgument(
(
"You have applied @given to the test %s more than once, which "
"wraps the test several times and is extremely slow. A "
"similar effect can be gained by combining the arguments "
"of the two calls to given. For example, instead of "
"@given(booleans()) @given(integers()), you could write "
"@given(booleans(), integers())"
)
% (test.__name__,)
)
settings = wrapped_test._hypothesis_internal_use_settings
random = get_random_for_wrapped_test(test, wrapped_test)
processed_args = process_arguments_to_given(
wrapped_test,
arguments,
kwargs,
given_kwargs,
argspec,
settings,
)
arguments, kwargs, test_runner, search_strategy = processed_args
runner = getattr(search_strategy, "runner", None)
if isinstance(runner, TestCase) and test.__name__ in dir(TestCase):
msg = (
"You have applied @given to the method %s, which is "
"used by the unittest runner but is not itself a test."
" This is not useful in any way." % test.__name__
)
fail_health_check(settings, msg, HealthCheck.not_a_test_method)
if bad_django_TestCase(runner): # pragma: no cover
# Covered by the Django tests, but not the pytest coverage task
raise InvalidArgument(
"You have applied @given to a method on %s, but this "
"class does not inherit from the supported versions in "
"`hypothesis.extra.django`. Use the Hypothesis variants "
"to ensure that each example is run in a separate "
"database transaction." % qualname(type(runner))
)
state = StateForActualGivenExecution(
test_runner,
search_strategy,
test,
settings,
random,
wrapped_test,
)
reproduce_failure = wrapped_test._hypothesis_internal_use_reproduce_failure
# If there was a @reproduce_failure decorator, use it to reproduce
# the error (or complain that we couldn't). Either way, this will
# always raise some kind of error.
if reproduce_failure is not None:
expected_version, failure = reproduce_failure
if expected_version != __version__:
raise InvalidArgument(
(
"Attempting to reproduce a failure from a different "
"version of Hypothesis. This failure is from %s, but "
"you are currently running %r. Please change your "
"Hypothesis version to a matching one."
)
% (expected_version, __version__)
)
try:
state.execute_once(
ConjectureData.for_buffer(decode_failure(failure)),
print_example=True,
is_final=True,
)
raise DidNotReproduce(
"Expected the test to raise an error, but it "
"completed successfully."
)
except StopTest:
raise DidNotReproduce(
"The shape of the test data has changed in some way "
"from where this blob was defined. Are you sure "
"you're running the same test?"
)
except UnsatisfiedAssumption:
raise DidNotReproduce(
"The test data failed to satisfy an assumption in the "
"test. Have you added it since this blob was "
"generated?"
)
# There was no @reproduce_failure, so start by running any explicit
# examples from @example decorators.
errors = list(
execute_explicit_examples(state, wrapped_test, arguments, kwargs)
)
with local_settings(state.settings):
if len(errors) > 1:
# If we're not going to report multiple bugs, we would have
# stopped running explicit examples at the first failure.
assert state.settings.report_multiple_bugs
for fragments, err in errors:
for f in fragments:
report(f)
tb_lines = traceback.format_exception(
type(err), err, err.__traceback__
)
report("".join(tb_lines))
msg = "Hypothesis found %d failures in explicit examples."
raise MultipleFailures(msg % (len(errors)))
elif errors:
fragments, the_error_hypothesis_found = errors[0]
for f in fragments:
report(f)
raise the_error_hypothesis_found
# If there were any explicit examples, they all ran successfully.
# The next step is to use the Conjecture engine to run the test on
# many different inputs.
if not (
Phase.reuse in settings.phases or Phase.generate in settings.phases
):
return
try:
if isinstance(runner, TestCase) and hasattr(runner, "subTest"):
subTest = runner.subTest
try:
runner.subTest = fake_subTest
state.run_engine()
finally:
runner.subTest = subTest
else:
state.run_engine()
except BaseException as e:
# The exception caught here should either be an actual test
# failure (or MultipleFailures), or some kind of fatal error
# that caused the engine to stop.
generated_seed = wrapped_test._hypothesis_internal_use_generated_seed
with local_settings(settings):
if not (state.failed_normally or generated_seed is None):
if running_under_pytest:
report(
"You can add @seed(%(seed)d) to this test or "
"run pytest with --hypothesis-seed=%(seed)d "
"to reproduce this failure." % {"seed": generated_seed}
)
else:
report(
"You can add @seed(%d) to this test to "
"reproduce this failure." % (generated_seed,)
)
# The dance here is to avoid showing users long tracebacks
# full of Hypothesis internals they don't care about.
# We have to do this inline, to avoid adding another
# internal stack frame just when we've removed the rest.
#
# Using a variable for our trimmed error ensures that the line
# which will actually appear in tracebacks is as clear as
# possible - "raise the_error_hypothesis_found".
the_error_hypothesis_found = e.with_traceback(
get_trimmed_traceback()
)
raise the_error_hypothesis_found
def _get_fuzz_target() -> Callable[
[Union[bytes, bytearray, memoryview, BinaryIO]], Optional[bytes]
]:
# Because fuzzing interfaces are very performance-sensitive, we use a
# somewhat more complicated structure here. `_get_fuzz_target()` is
# called by the `HypothesisHandle.fuzz_one_input` property, allowing
# us to defer our collection of the settings, random instance, and
# reassignable `inner_test` (etc) until `fuzz_one_input` is accessed.
#
# We then share the performance cost of setting up `state` between
# many invocations of the target. We explicitly force `deadline=None`
# for performance reasons, saving ~40% the runtime of an empty test.
test = wrapped_test.hypothesis.inner_test
settings = Settings(
parent=wrapped_test._hypothesis_internal_use_settings, deadline=None
)
random = get_random_for_wrapped_test(test, wrapped_test)
_args, _kwargs, test_runner, search_strategy = process_arguments_to_given(
wrapped_test,
(),
{},
given_kwargs,
argspec,
settings,
)
assert not _args
assert not _kwargs
state = StateForActualGivenExecution(
test_runner,
search_strategy,
test,
settings,
random,
wrapped_test,
)
digest = function_digest(test)
def fuzz_one_input(
buffer: Union[bytes, bytearray, memoryview, BinaryIO],
) -> Optional[bytes]:
# This inner part is all that the fuzzer will actually run,
# so we keep it as small and as fast as possible.
if isinstance(buffer, io.IOBase):
buffer = buffer.read()
assert isinstance(buffer, (bytes, bytearray, memoryview))
data = ConjectureData.for_buffer(buffer)
try:
state.execute_once(data)
except (StopTest, UnsatisfiedAssumption):
return None
except BaseException:
if settings.database is not None:
settings.database.save(digest, bytes(data.buffer))
raise
return bytes(data.buffer)
fuzz_one_input.__doc__ = HypothesisHandle.fuzz_one_input.__doc__
return fuzz_one_input
# After having created the decorated test function, we need to copy
# over some attributes to make the switch as seamless as possible.
for attrib in dir(test):
if not (attrib.startswith("_") or hasattr(wrapped_test, attrib)):
setattr(wrapped_test, attrib, getattr(test, attrib))
wrapped_test.is_hypothesis_test = True
if hasattr(test, "_hypothesis_internal_settings_applied"):
# Used to check if @settings is applied twice.
wrapped_test._hypothesis_internal_settings_applied = True
wrapped_test._hypothesis_internal_use_seed = getattr(
test, "_hypothesis_internal_use_seed", None
)
wrapped_test._hypothesis_internal_use_settings = (
getattr(test, "_hypothesis_internal_use_settings", None) or Settings.default
)
wrapped_test._hypothesis_internal_use_reproduce_failure = getattr(
test, "_hypothesis_internal_use_reproduce_failure", None
)
wrapped_test.hypothesis = HypothesisHandle(test, _get_fuzz_target)
return wrapped_test
return run_test_as_given
|
https://github.com/HypothesisWorks/hypothesis/issues/2462
|
Traceback (most recent call last):
File "/tests/common/test_components.py", line 91, in test__located_component_multiple_components_registration
c1_name=registry_identifier_strategy(),
File "/lib/python3.8/site-packages/hypothesis/core.py", line 1102, in wrapped_test
raise the_error_hypothesis_found
File "/tests/common/test_components.py", line 150, in test__located_component_multiple_components_registration
with self.subTest(action='count'):
File "/usr/lib/python3.8/contextlib.py", line 240, in helper
return _GeneratorContextManager(func, args, kwds)
File "/usr/lib/python3.8/contextlib.py", line 83, in __init__
self.gen = func(*args, **kwds)
TypeError: fake_subTest() missing 1 required positional argument: 'self'
|
TypeError
|
def resolve_Type(thing):
if getattr(thing, "__args__", None) is None:
return st.just(type)
args = (thing.__args__[0],)
if getattr(args[0], "__origin__", None) is typing.Union:
args = args[0].__args__
elif hasattr(args[0], "__union_params__"): # pragma: no cover
args = args[0].__union_params__
if isinstance(ForwardRef, type): # pragma: no cover
# Duplicate check from from_type here - only paying when needed.
for a in args:
if type(a) == ForwardRef:
raise ResolutionFailed(
"thing=%s cannot be resolved. Upgrading to "
"python>=3.6 may fix this problem via improvements "
"to the typing module." % (thing,)
)
return st.sampled_from(sorted(args, key=type_sorting_key))
|
def resolve_Type(thing):
if thing.__args__ is None:
return st.just(type)
args = (thing.__args__[0],)
if getattr(args[0], "__origin__", None) is typing.Union:
args = args[0].__args__
elif hasattr(args[0], "__union_params__"): # pragma: no cover
args = args[0].__union_params__
if isinstance(ForwardRef, type): # pragma: no cover
# Duplicate check from from_type here - only paying when needed.
for a in args:
if type(a) == ForwardRef:
raise ResolutionFailed(
"thing=%s cannot be resolved. Upgrading to "
"python>=3.6 may fix this problem via improvements "
"to the typing module." % (thing,)
)
return st.sampled_from(sorted(args, key=type_sorting_key))
|
https://github.com/HypothesisWorks/hypothesis/issues/2444
|
______________________________________________________ test_resolves_weird_types[typ0] _______________________________________________________
Traceback (most recent call last):
File "/home/pviktori/dev/hypothesis/hypothesis-python/tests/cover/test_lookup.py", line 249, in test_resolves_weird_types
from_type(typ).example()
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/strategies/_internal/strategies.py", line 315, in example
example_generating_inner_function()
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/strategies/_internal/strategies.py", line 303, in example_generating_inner_function
@settings(
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/core.py", line 1102, in wrapped_test
raise the_error_hypothesis_found
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/core.py", line 1071, in wrapped_test
state.run_engine()
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/core.py", line 732, in run_engine
runner.run()
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/internal/conjecture/engine.py", line 458, in run
self._run()
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/internal/conjecture/engine.py", line 857, in _run
self.generate_new_examples()
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/internal/conjecture/engine.py", line 661, in generate_new_examples
minimal_example = self.cached_test_function(
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/internal/conjecture/engine.py", line 1034, in cached_test_function
self.test_function(data)
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/internal/conjecture/engine.py", line 180, in test_function
self.__stoppable_test_function(data)
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/internal/conjecture/engine.py", line 162, in __stoppable_test_function
self._test_function(data)
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/core.py", line 685, in _execute_once_for_engine
escalate_hypothesis_internal_error()
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/core.py", line 656, in _execute_once_for_engine
result = self.execute_once(data)
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/core.py", line 611, in execute_once
result = self.test_runner(data, run)
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/executors.py", line 52, in default_new_style_executor
return function(data)
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/core.py", line 552, in run
args, kwargs = data.draw(self.search_strategy)
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/internal/conjecture/data.py", line 889, in draw
return strategy.do_draw(self)
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/strategies/_internal/collections.py", line 57, in do_draw
return tuple(data.draw(e) for e in self.element_strategies)
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/strategies/_internal/collections.py", line 57, in <genexpr>
return tuple(data.draw(e) for e in self.element_strategies)
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/internal/conjecture/data.py", line 884, in draw
return strategy.do_draw(self)
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/strategies/_internal/strategies.py", line 658, in do_draw
result = self.pack(data.draw(self.mapped_strategy))
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/internal/conjecture/data.py", line 884, in draw
return strategy.do_draw(self)
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/strategies/_internal/lazy.py", line 150, in do_draw
return data.draw(self.wrapped_strategy)
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/internal/conjecture/data.py", line 884, in draw
return strategy.do_draw(self)
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/strategies/_internal/strategies.py", line 658, in do_draw
result = self.pack(data.draw(self.mapped_strategy))
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/internal/conjecture/data.py", line 884, in draw
return strategy.do_draw(self)
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/strategies/_internal/collections.py", line 57, in do_draw
return tuple(data.draw(e) for e in self.element_strategies)
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/strategies/_internal/collections.py", line 57, in <genexpr>
return tuple(data.draw(e) for e in self.element_strategies)
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/internal/conjecture/data.py", line 884, in draw
return strategy.do_draw(self)
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/strategies/_internal/flatmapped.py", line 42, in do_draw
return data.draw(expanded_source)
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/internal/conjecture/data.py", line 871, in draw
strategy.validate()
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/strategies/_internal/strategies.py", line 378, in validate
self.do_validate()
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/strategies/_internal/lazy.py", line 118, in do_validate
w.validate()
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/strategies/_internal/strategies.py", line 379, in validate
self.is_empty
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/strategies/_internal/strategies.py", line 126, in accept
recur(self)
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/strategies/_internal/strategies.py", line 122, in recur
mapping[strat] = getattr(strat, calculation)(recur)
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/strategies/_internal/deferred.py", line 80, in calc_is_empty
return recur(self.wrapped_strategy)
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/strategies/_internal/deferred.py", line 43, in wrapped_strategy
result = self.__definition()
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/strategies/_internal/core.py", line 1338, in <lambda>
lambda thing: deferred(lambda: _from_type(thing)),
File "/home/pviktori/dev/hypothesis/hypothesis-python/src/hypothesis/strategies/_internal/core.py", line 1452, in _from_type
raise ResolutionFailed(
hypothesis.errors.ResolutionFailed: Could not resolve <class 'sortedcontainers.sorteddict.SortedItemsView'> to a strategy; consider using register_type_strategy
|
hypothesis.errors.ResolutionFailed
|
def cacheable(fn: T) -> T:
@proxies(fn)
def cached_strategy(*args, **kwargs):
try:
kwargs_cache_key = {(k, convert_value(v)) for k, v in kwargs.items()}
except TypeError:
return fn(*args, **kwargs)
cache_key = (fn, tuple(map(convert_value, args)), frozenset(kwargs_cache_key))
cache = get_cache()
try:
if cache_key in cache:
return cache[cache_key]
except TypeError:
return fn(*args, **kwargs)
else:
result = fn(*args, **kwargs)
if not isinstance(result, SearchStrategy) or result.is_cacheable:
cache[cache_key] = result
return result
cached_strategy.__clear_cache = clear_cache
return cached_strategy
|
def cacheable(fn: T) -> T:
@proxies(fn)
def cached_strategy(*args, **kwargs):
try:
kwargs_cache_key = {(k, convert_value(v)) for k, v in kwargs.items()}
except TypeError:
return fn(*args, **kwargs)
cache_key = (fn, tuple(map(convert_value, args)), frozenset(kwargs_cache_key))
try:
if cache_key in STRATEGY_CACHE:
return STRATEGY_CACHE[cache_key]
except TypeError:
return fn(*args, **kwargs)
else:
result = fn(*args, **kwargs)
if not isinstance(result, SearchStrategy) or result.is_cacheable:
STRATEGY_CACHE[cache_key] = result
return result
cached_strategy.__clear_cache = STRATEGY_CACHE.clear
return cached_strategy
|
https://github.com/HypothesisWorks/hypothesis/issues/2433
|
File /.virtualenvs/schemathesis/lib/python3.8/site-packages/hypothesis/strategies/_internal/core.py", line 172, in cached_strategy
E STRATEGY_CACHE[cache_key] = result
E File "/.virtualenvs/schemathesis/lib/python3.8/site-packages/hypothesis/internal/cache.py", line 121, in __setitem__
E assert evicted.score <= self.data[0].score
E AssertionError: assert [1, 10062] <= [1, 6901]
|
AssertionError
|
def cached_strategy(*args, **kwargs):
try:
kwargs_cache_key = {(k, convert_value(v)) for k, v in kwargs.items()}
except TypeError:
return fn(*args, **kwargs)
cache_key = (fn, tuple(map(convert_value, args)), frozenset(kwargs_cache_key))
cache = get_cache()
try:
if cache_key in cache:
return cache[cache_key]
except TypeError:
return fn(*args, **kwargs)
else:
result = fn(*args, **kwargs)
if not isinstance(result, SearchStrategy) or result.is_cacheable:
cache[cache_key] = result
return result
|
def cached_strategy(*args, **kwargs):
try:
kwargs_cache_key = {(k, convert_value(v)) for k, v in kwargs.items()}
except TypeError:
return fn(*args, **kwargs)
cache_key = (fn, tuple(map(convert_value, args)), frozenset(kwargs_cache_key))
try:
if cache_key in STRATEGY_CACHE:
return STRATEGY_CACHE[cache_key]
except TypeError:
return fn(*args, **kwargs)
else:
result = fn(*args, **kwargs)
if not isinstance(result, SearchStrategy) or result.is_cacheable:
STRATEGY_CACHE[cache_key] = result
return result
|
https://github.com/HypothesisWorks/hypothesis/issues/2433
|
File /.virtualenvs/schemathesis/lib/python3.8/site-packages/hypothesis/strategies/_internal/core.py", line 172, in cached_strategy
E STRATEGY_CACHE[cache_key] = result
E File "/.virtualenvs/schemathesis/lib/python3.8/site-packages/hypothesis/internal/cache.py", line 121, in __setitem__
E assert evicted.score <= self.data[0].score
E AssertionError: assert [1, 10062] <= [1, 6901]
|
AssertionError
|
def generate_new_examples(self):
if Phase.generate not in self.settings.phases:
return
if self.interesting_examples:
# The example database has failing examples from a previous run,
# so we'd rather report that they're still failing ASAP than take
# the time to look for additional failures.
return
zero_data = self.cached_test_function(hbytes(self.settings.buffer_size))
if zero_data.status > Status.OVERRUN:
self.__data_cache.pin(zero_data.buffer)
if zero_data.status == Status.OVERRUN or (
zero_data.status == Status.VALID
and len(zero_data.buffer) * 2 > self.settings.buffer_size
):
fail_health_check(
self.settings,
"The smallest natural example for your test is extremely "
"large. This makes it difficult for Hypothesis to generate "
"good examples, especially when trying to reduce failing ones "
"at the end. Consider reducing the size of your data if it is "
"of a fixed size. You could also fix this by improving how "
"your data shrinks (see https://hypothesis.readthedocs.io/en/"
"latest/data.html#shrinking for details), or by introducing "
"default values inside your strategy. e.g. could you replace "
"some arguments with their defaults by using "
"one_of(none(), some_complex_strategy)?",
HealthCheck.large_base_example,
)
if zero_data is not Overrun:
# If the language starts with writes of length >= cap then there is
# only one string in it: Everything after cap is forced to be zero (or
# to be whatever value is written there). That means that once we've
# tried the zero value, there's nothing left for us to do, so we
# exit early here.
has_non_forced = False
# It's impossible to fall out of this loop normally because if we
# did then that would mean that all blocks are writes, so we would
# already have triggered the exhaustedness check on the tree and
# finished running.
for b in zero_data.blocks: # pragma: no branch
if b.start >= self.cap:
break
if not b.forced:
has_non_forced = True
break
if not has_non_forced:
self.exit_with(ExitReason.finished)
self.health_check_state = HealthCheckState()
def should_generate_more():
# If we haven't found a bug, keep looking. We check this before
# doing anything else as it's by far the most common case.
if not self.interesting_examples:
return True
# If we've found a bug and won't report more than one, stop looking.
elif not self.settings.report_multiple_bugs:
return False
assert self.first_bug_found_at <= self.last_bug_found_at <= self.call_count
# End the generation phase where we would have ended it if no bugs had
# been found. This reproduces the exit logic in `self.test_function`,
# but with the important distinction that this clause will move on to
# the shrinking phase having found one or more bugs, while the other
# will exit having found zero bugs.
if self.valid_examples >= self.settings.max_examples or self.call_count >= max(
self.settings.max_examples * 10, 1000
): # pragma: no cover
return False
# Otherwise, keep searching for between ten and 'a heuristic' calls.
# We cap 'calls after first bug' so errors are reported reasonably
# soon even for tests that are allowed to run for a very long time,
# or sooner if the latest half of our test effort has been fruitless.
return self.call_count < MIN_TEST_CALLS or self.call_count < min(
self.first_bug_found_at + 1000, self.last_bug_found_at * 2
)
count = 0
while should_generate_more() and (
count < 10
or self.health_check_state is not None
# If we have not found a valid prefix yet, the target selector will
# be empty and the mutation stage will fail with a very rare internal
# error. We therefore continue this initial random generation step
# until we have found at least one prefix to mutate.
or len(self.target_selector) == 0
):
prefix = self.generate_novel_prefix()
def draw_bytes(data, n):
if data.index < len(prefix):
result = prefix[data.index : data.index + n]
# We always draw prefixes as a whole number of blocks
assert len(result) == n
else:
result = uniform(self.random, n)
return self.__zero_bound(data, result)
last_data = self.new_conjecture_data(draw_bytes)
self.test_function(last_data)
last_data.freeze()
count += 1
mutations = 0
mutator = self._new_mutator()
zero_bound_queue = []
while should_generate_more():
if zero_bound_queue:
# Whenever we generated an example and it hits a bound
# which forces zero blocks into it, this creates a weird
# distortion effect by making certain parts of the data
# stream (especially ones to the right) much more likely
# to be zero. We fix this by redistributing the generated
# data by shuffling it randomly. This results in the
# zero data being spread evenly throughout the buffer.
# Hopefully the shrinking this causes will cause us to
# naturally fail to hit the bound.
# If it doesn't then we will queue the new version up again
# (now with more zeros) and try again.
overdrawn = zero_bound_queue.pop()
buffer = bytearray(overdrawn.buffer)
# These will have values written to them that are different
# from what's in them anyway, so the value there doesn't
# really "count" for distributional purposes, and if we
# leave them in then they can cause the fraction of non
# zero bytes to increase on redraw instead of decrease.
for i in overdrawn.forced_indices:
buffer[i] = 0
self.random.shuffle(buffer)
buffer = hbytes(buffer)
def draw_bytes(data, n):
result = buffer[data.index : data.index + n]
if len(result) < n:
result += hbytes(n - len(result))
return self.__rewrite(data, result)
data = self.new_conjecture_data(draw_bytes=draw_bytes)
self.test_function(data)
data.freeze()
else:
origin = self.target_selector.select()
mutations += 1
data = self.new_conjecture_data(draw_bytes=mutator(origin))
self.test_function(data)
data.freeze()
if data.status > origin.status:
mutations = 0
elif data.status < origin.status or mutations >= 10:
# Cap the variations of a single example and move on to
# an entirely fresh start. Ten is an entirely arbitrary
# constant, but it's been working well for years.
mutations = 0
mutator = self._new_mutator()
if getattr(data, "hit_zero_bound", False):
zero_bound_queue.append(data)
mutations += 1
|
def generate_new_examples(self):
if Phase.generate not in self.settings.phases:
return
if self.interesting_examples:
# The example database has failing examples from a previous run,
# so we'd rather report that they're still failing ASAP than take
# the time to look for additional failures.
return
zero_data = self.cached_test_function(hbytes(self.settings.buffer_size))
if zero_data.status > Status.OVERRUN:
self.__data_cache.pin(zero_data.buffer)
if zero_data.status == Status.OVERRUN or (
zero_data.status == Status.VALID
and len(zero_data.buffer) * 2 > self.settings.buffer_size
):
fail_health_check(
self.settings,
"The smallest natural example for your test is extremely "
"large. This makes it difficult for Hypothesis to generate "
"good examples, especially when trying to reduce failing ones "
"at the end. Consider reducing the size of your data if it is "
"of a fixed size. You could also fix this by improving how "
"your data shrinks (see https://hypothesis.readthedocs.io/en/"
"latest/data.html#shrinking for details), or by introducing "
"default values inside your strategy. e.g. could you replace "
"some arguments with their defaults by using "
"one_of(none(), some_complex_strategy)?",
HealthCheck.large_base_example,
)
if zero_data is not Overrun:
# If the language starts with writes of length >= cap then there is
# only one string in it: Everything after cap is forced to be zero (or
# to be whatever value is written there). That means that once we've
# tried the zero value, there's nothing left for us to do, so we
# exit early here.
has_non_forced = False
# It's impossible to fall out of this loop normally because if we
# did then that would mean that all blocks are writes, so we would
# already have triggered the exhaustedness check on the tree and
# finished running.
for b in zero_data.blocks: # pragma: no branch
if b.start >= self.cap:
break
if not b.forced:
has_non_forced = True
break
if not has_non_forced:
self.exit_with(ExitReason.finished)
self.health_check_state = HealthCheckState()
def should_generate_more():
# If we haven't found a bug, keep looking. We check this before
# doing anything else as it's by far the most common case.
if not self.interesting_examples:
return True
# If we've found a bug and won't report more than one, stop looking.
elif not self.settings.report_multiple_bugs:
return False
assert self.first_bug_found_at <= self.last_bug_found_at <= self.call_count
# End the generation phase where we would have ended it if no bugs had
# been found. This reproduces the exit logic in `self.test_function`,
# but with the important distinction that this clause will move on to
# the shrinking phase having found one or more bugs, while the other
# will exit having found zero bugs.
if self.valid_examples >= self.settings.max_examples or self.call_count >= max(
self.settings.max_examples * 10, 1000
): # pragma: no cover
return False
# Otherwise, keep searching for between ten and 'a heuristic' calls.
# We cap 'calls after first bug' so errors are reported reasonably
# soon even for tests that are allowed to run for a very long time,
# or sooner if the latest half of our test effort has been fruitless.
return self.call_count < MIN_TEST_CALLS or self.call_count < min(
self.first_bug_found_at + 1000, self.last_bug_found_at * 2
)
count = 0
while should_generate_more() and (
count < 10 or self.health_check_state is not None
):
prefix = self.generate_novel_prefix()
def draw_bytes(data, n):
if data.index < len(prefix):
result = prefix[data.index : data.index + n]
# We always draw prefixes as a whole number of blocks
assert len(result) == n
else:
result = uniform(self.random, n)
return self.__zero_bound(data, result)
last_data = self.new_conjecture_data(draw_bytes)
self.test_function(last_data)
last_data.freeze()
count += 1
mutations = 0
mutator = self._new_mutator()
zero_bound_queue = []
while should_generate_more():
if zero_bound_queue:
# Whenever we generated an example and it hits a bound
# which forces zero blocks into it, this creates a weird
# distortion effect by making certain parts of the data
# stream (especially ones to the right) much more likely
# to be zero. We fix this by redistributing the generated
# data by shuffling it randomly. This results in the
# zero data being spread evenly throughout the buffer.
# Hopefully the shrinking this causes will cause us to
# naturally fail to hit the bound.
# If it doesn't then we will queue the new version up again
# (now with more zeros) and try again.
overdrawn = zero_bound_queue.pop()
buffer = bytearray(overdrawn.buffer)
# These will have values written to them that are different
# from what's in them anyway, so the value there doesn't
# really "count" for distributional purposes, and if we
# leave them in then they can cause the fraction of non
# zero bytes to increase on redraw instead of decrease.
for i in overdrawn.forced_indices:
buffer[i] = 0
self.random.shuffle(buffer)
buffer = hbytes(buffer)
def draw_bytes(data, n):
result = buffer[data.index : data.index + n]
if len(result) < n:
result += hbytes(n - len(result))
return self.__rewrite(data, result)
data = self.new_conjecture_data(draw_bytes=draw_bytes)
self.test_function(data)
data.freeze()
else:
origin = self.target_selector.select()
mutations += 1
data = self.new_conjecture_data(draw_bytes=mutator(origin))
self.test_function(data)
data.freeze()
if data.status > origin.status:
mutations = 0
elif data.status < origin.status or mutations >= 10:
# Cap the variations of a single example and move on to
# an entirely fresh start. Ten is an entirely arbitrary
# constant, but it's been working well for years.
mutations = 0
mutator = self._new_mutator()
if getattr(data, "hit_zero_bound", False):
zero_bound_queue.append(data)
mutations += 1
|
https://github.com/HypothesisWorks/hypothesis/issues/1937
|
______________________ test_explore_an_arbitrary_language ______________________
[gw1] linux2 -- Python 2.7.14 /home/vsts/work/1/s/hypothesis-python/.tox/py27-full/bin/python
Traceback (most recent call last):
File "/home/vsts/work/1/s/hypothesis-python/tests/nocover/test_explore_arbitrary_languages.py", line 119, in test_explore_an_arbitrary_language
suppress_health_check=HealthCheck.all(),
File "/home/vsts/work/1/s/hypothesis-python/.tox/py27-full/lib/python2.7/site-packages/hypothesis/core.py", line 987, in wrapped_test
state.run()
File "/home/vsts/work/1/s/hypothesis-python/.tox/py27-full/lib/python2.7/site-packages/hypothesis/core.py", line 704, in run
info.__expected_traceback,
File "/home/vsts/work/1/s/hypothesis-python/.tox/py27-full/lib/python2.7/site-packages/hypothesis/core.py", line 571, in execute
result = self.test_runner(data, run)
File "/home/vsts/work/1/s/hypothesis-python/.tox/py27-full/lib/python2.7/site-packages/hypothesis/executors.py", line 56, in default_new_style_executor
return function(data)
File "/home/vsts/work/1/s/hypothesis-python/.tox/py27-full/lib/python2.7/site-packages/hypothesis/core.py", line 569, in run
return test(*args, **kwargs)
File "/home/vsts/work/1/s/hypothesis-python/tests/nocover/test_explore_arbitrary_languages.py", line 127, in test_explore_an_arbitrary_language
run_language_test_for(root, data, seed)
File "/home/vsts/work/1/s/hypothesis-python/tests/nocover/test_explore_arbitrary_languages.py", line 111, in run_language_test_for
runner.run()
File "/home/vsts/work/1/s/hypothesis-python/.tox/py27-full/lib/python2.7/site-packages/hypothesis/internal/conjecture/engine.py", line 398, in run
self._run()
File "/home/vsts/work/1/s/hypothesis-python/.tox/py27-full/lib/python2.7/site-packages/hypothesis/internal/conjecture/engine.py", line 763, in _run
self.generate_new_examples()
File "/home/vsts/work/1/s/hypothesis-python/.tox/py27-full/lib/python2.7/site-packages/hypothesis/internal/conjecture/engine.py", line 744, in generate_new_examples
origin = self.target_selector.select()
File "/home/vsts/work/1/s/hypothesis-python/.tox/py27-full/lib/python2.7/site-packages/hypothesis/internal/conjecture/engine.py", line 1003, in select
return self.random.choice(self.used_examples)
File "/home/vsts/.cache/hypothesis-build-runtimes/python-versions/2.7.14/lib/python2.7/random.py", line 277, in choice
return seq[int(self.random() * len(seq))] # raises IndexError if seq is empty
IndexError: list index out of range
---------------------------------- Hypothesis ----------------------------------
Falsifying example: test_explore_an_arbitrary_language(data=data(...))
Draw 1: Branch(bits=11, children={})
Draw 2: 187
Branch(bits=11, children={
0: Terminal(status=Status.INTERESTING, payload=5),
64: Terminal(status=Status.INTERESTING, payload=2),
1891: Terminal(status=Status.INTERESTING, payload=5),
1894: Terminal(status=Status.INTERESTING, payload=7),
1575: Terminal(status=Status.INTERESTING, payload=2),
1224: Terminal(status=Status.INTERESTING, payload=3),
528: Terminal(status=Status.INTERESTING, payload=2),
439: Terminal(status=Status.INTERESTING, payload=7),
1753: Terminal(status=Status.INTERESTING, payload=2),
1658: Write(value='\x05', child=Terminal(status=Status.INTERESTING, payload=5)),
543: Terminal(status=Status.INTERESTING, payload=7)
})
|
IndexError
|
def floats(
min_value=None, # type: Real
max_value=None, # type: Real
allow_nan=None, # type: bool
allow_infinity=None, # type: bool
width=64, # type: int
exclude_min=False, # type: bool
exclude_max=False, # type: bool
):
# type: (...) -> SearchStrategy[float]
"""Returns a strategy which generates floats.
- If min_value is not None, all values will be >= min_value.
- If max_value is not None, all values will be <= max_value.
- If min_value or max_value is not None, it is an error to enable
allow_nan.
- If both min_value and max_value are not None, it is an error to enable
allow_infinity.
Where not explicitly ruled out by the bounds, all of infinity, -infinity
and NaN are possible values generated by this strategy.
The width argument specifies the maximum number of bits of precision
required to represent the generated float. Valid values are 16, 32, or 64.
Passing ``width=32`` will still use the builtin 64-bit ``float`` class,
but always for values which can be exactly represented as a 32-bit float.
Half-precision floats (``width=16``) are only supported on Python 3.6, or
if :pypi:`Numpy` is installed.
The exclude_min and exclude_max argument can be used to generate numbers
from open or half-open intervals, by excluding the respective endpoints.
Attempting to exclude an endpoint which is None will raise an error;
use ``allow_infinity=False`` to generate finite floats. You can however
use e.g. ``min_value=float("-inf"), exclude_min=True`` to exclude only
one infinite endpoint.
Examples from this strategy have a complicated and hard to explain
shrinking behaviour, but it tries to improve "human readability". Finite
numbers will be preferred to infinity and infinity will be preferred to
NaN.
"""
check_type(bool, exclude_min, "exclude_min")
check_type(bool, exclude_max, "exclude_max")
if allow_nan is None:
allow_nan = bool(min_value is None and max_value is None)
elif allow_nan:
if min_value is not None or max_value is not None:
raise InvalidArgument(
"Cannot have allow_nan=%r, with min_value or max_value" % (allow_nan)
)
if width not in (16, 32, 64):
raise InvalidArgument(
"Got width=%r, but the only valid values are the integers 16, "
"32, and 64." % (width,)
)
if width == 16 and sys.version_info[:2] < (3, 6) and numpy is None:
raise InvalidArgument( # pragma: no cover
"width=16 requires either Numpy, or Python >= 3.6"
)
check_valid_bound(min_value, "min_value")
check_valid_bound(max_value, "max_value")
min_arg, max_arg = min_value, max_value
if min_value is not None:
min_value = float_of(min_value, width)
assert isinstance(min_value, float)
if max_value is not None:
max_value = float_of(max_value, width)
assert isinstance(max_value, float)
if min_value != min_arg:
note_deprecation(
"min_value=%r cannot be exactly represented as a float of width "
"%d, which will be an error in a future version. Use min_value=%r "
"instead." % (min_arg, width, min_value),
since="2018-10-10",
)
if max_value != max_arg:
note_deprecation(
"max_value=%r cannot be exactly represented as a float of width "
"%d, which will be an error in a future version. Use max_value=%r "
"instead" % (max_arg, width, max_value),
since="2018-10-10",
)
if exclude_min and (min_value is None or min_value == float("inf")):
raise InvalidArgument("Cannot exclude min_value=%r" % (min_value,))
if exclude_max and (max_value is None or max_value == float("-inf")):
raise InvalidArgument("Cannot exclude max_value=%r" % (max_value,))
if min_value is not None and (
exclude_min or (min_arg is not None and min_value < min_arg)
):
min_value = next_up(min_value, width)
assert min_value > min_arg or min_value == min_arg == 0 # type: ignore
if max_value is not None and (
exclude_max or (max_arg is not None and max_value > max_arg)
):
max_value = next_down(max_value, width)
assert max_value < max_arg or max_value == max_arg == 0 # type: ignore
if min_value == float("-inf"):
min_value = None
if max_value == float("inf"):
max_value = None
bad_zero_bounds = (
min_value == max_value == 0
and is_negative(max_value)
and not is_negative(min_value)
)
if (
min_value is not None
and max_value is not None
and (min_value > max_value or bad_zero_bounds)
):
# This is a custom alternative to check_valid_interval, because we want
# to include the bit-width and exclusion information in the message.
msg = (
"There are no %s-bit floating-point values between min_value=%r "
"and max_value=%r" % (width, min_arg, max_arg)
)
if exclude_min or exclude_max:
msg += ", exclude_min=%r and exclude_max=%r" % (exclude_min, exclude_max)
if bad_zero_bounds:
note_deprecation(msg, since="RELEASEDAY")
else:
raise InvalidArgument(msg)
if allow_infinity is None:
allow_infinity = bool(min_value is None or max_value is None)
elif allow_infinity:
if min_value is not None and max_value is not None:
raise InvalidArgument(
"Cannot have allow_infinity=%r, with both min_value and "
"max_value" % (allow_infinity)
)
elif min_value == float("inf"):
raise InvalidArgument("allow_infinity=False excludes min_value=inf")
elif max_value == float("-inf"):
raise InvalidArgument("allow_infinity=False excludes max_value=-inf")
if min_value is None and max_value is None:
result = FloatStrategy(allow_infinity=allow_infinity, allow_nan=allow_nan) # type: SearchStrategy[float]
elif min_value is not None and max_value is not None:
if min_value == max_value:
assert isinstance(min_value, float)
result = just(min_value)
elif is_negative(min_value):
if is_negative(max_value):
result = floats(min_value=-max_value, max_value=-min_value).map(
operator.neg
)
else:
result = floats(min_value=0.0, max_value=max_value) | floats(
min_value=0.0, max_value=-min_value
).map(operator.neg)
elif count_between_floats(min_value, max_value) > 1000:
result = FixedBoundedFloatStrategy(
lower_bound=min_value, upper_bound=max_value
)
else:
ub_int = float_to_int(max_value, width)
lb_int = float_to_int(min_value, width)
assert lb_int <= ub_int
result = integers(min_value=lb_int, max_value=ub_int).map(
lambda x: int_to_float(x, width)
)
elif min_value is not None:
assert isinstance(min_value, float)
if min_value < 0:
result = floats(min_value=0.0, allow_infinity=allow_infinity) | floats(
min_value=min_value, max_value=-0.0
)
else:
result = floats(allow_infinity=allow_infinity, allow_nan=False).map(
lambda x: min_value + abs(x) # type: ignore
)
if not allow_infinity:
result = result.filter(lambda x: not math.isinf(x))
if min_value == 0 and not is_negative(min_value):
result = result.filter(lambda x: math.copysign(1.0, x) == 1)
else:
assert isinstance(max_value, float)
if max_value > 0:
result = floats(min_value=0.0, max_value=max_value) | floats(
max_value=-0.0, allow_infinity=allow_infinity
)
else:
result = floats(allow_infinity=allow_infinity, allow_nan=False).map(
lambda x: max_value - abs(x) # type: ignore
)
if not allow_infinity:
result = result.filter(lambda x: not math.isinf(x))
if max_value == 0 and is_negative(max_value):
result = result.filter(is_negative)
if width < 64:
def downcast(x):
try:
return float_of(x, width)
except OverflowError:
reject()
return result.map(downcast)
return result
|
def floats(
min_value=None, # type: Real
max_value=None, # type: Real
allow_nan=None, # type: bool
allow_infinity=None, # type: bool
width=64, # type: int
exclude_min=False, # type: bool
exclude_max=False, # type: bool
):
# type: (...) -> SearchStrategy[float]
"""Returns a strategy which generates floats.
- If min_value is not None, all values will be >= min_value.
- If max_value is not None, all values will be <= max_value.
- If min_value or max_value is not None, it is an error to enable
allow_nan.
- If both min_value and max_value are not None, it is an error to enable
allow_infinity.
Where not explicitly ruled out by the bounds, all of infinity, -infinity
and NaN are possible values generated by this strategy.
The width argument specifies the maximum number of bits of precision
required to represent the generated float. Valid values are 16, 32, or 64.
Passing ``width=32`` will still use the builtin 64-bit ``float`` class,
but always for values which can be exactly represented as a 32-bit float.
Half-precision floats (``width=16``) are only supported on Python 3.6, or
if :pypi:`Numpy` is installed.
The exclude_min and exclude_max argument can be used to generate numbers
from open or half-open intervals, by excluding the respective endpoints.
Attempting to exclude an endpoint which is None will raise an error;
use ``allow_infinity=False`` to generate finite floats. You can however
use e.g. ``min_value=float("-inf"), exclude_min=True`` to exclude only
one infinite endpoint.
Examples from this strategy have a complicated and hard to explain
shrinking behaviour, but it tries to improve "human readability". Finite
numbers will be preferred to infinity and infinity will be preferred to
NaN.
"""
check_type(bool, exclude_min, "exclude_min")
check_type(bool, exclude_max, "exclude_max")
if allow_nan is None:
allow_nan = bool(min_value is None and max_value is None)
elif allow_nan:
if min_value is not None or max_value is not None:
raise InvalidArgument(
"Cannot have allow_nan=%r, with min_value or max_value" % (allow_nan)
)
if width not in (16, 32, 64):
raise InvalidArgument(
"Got width=%r, but the only valid values are the integers 16, "
"32, and 64." % (width,)
)
if width == 16 and sys.version_info[:2] < (3, 6) and numpy is None:
raise InvalidArgument( # pragma: no cover
"width=16 requires either Numpy, or Python >= 3.6"
)
check_valid_bound(min_value, "min_value")
check_valid_bound(max_value, "max_value")
min_arg, max_arg = min_value, max_value
if min_value is not None:
min_value = float_of(min_value, width)
assert isinstance(min_value, float)
if max_value is not None:
max_value = float_of(max_value, width)
assert isinstance(max_value, float)
if min_value != min_arg:
note_deprecation(
"min_value=%r cannot be exactly represented as a float of width "
"%d, which will be an error in a future version. Use min_value=%r "
"instead." % (min_arg, width, min_value),
since="2018-10-10",
)
if max_value != max_arg:
note_deprecation(
"max_value=%r cannot be exactly represented as a float of width "
"%d, which will be an error in a future version. Use max_value=%r "
"instead" % (max_arg, width, max_value),
since="2018-10-10",
)
if exclude_min:
if min_value is None:
raise InvalidArgument("Cannot exclude min_value=None")
min_value = next_up(min_value, width=width)
if exclude_max:
if max_value is None:
raise InvalidArgument("Cannot exclude max_value=None")
max_value = next_down(max_value, width=width)
check_valid_interval(min_value, max_value, "min_value", "max_value")
if min_value == float("-inf"):
min_value = None
if max_value == float("inf"):
max_value = None
if min_value is not None and min_arg is not None and min_value < min_arg:
min_value = next_up(min_value, width)
assert min_value > min_arg # type: ignore
if max_value is not None and max_arg is not None and max_value > max_arg:
max_value = next_down(max_value, width)
assert max_value < max_arg # type: ignore
if min_value is not None and max_value is not None and min_value > max_value:
raise InvalidArgument(
"There are no %s-bit floating-point values between min_value=%r "
"and max_value=%r" % (width, min_arg, max_arg)
)
if allow_infinity is None:
allow_infinity = bool(min_value is None or max_value is None)
elif allow_infinity:
if min_value is not None and max_value is not None:
raise InvalidArgument(
"Cannot have allow_infinity=%r, with both min_value and "
"max_value" % (allow_infinity)
)
elif min_value == float("inf"):
raise InvalidArgument("allow_infinity=False excludes min_value=inf")
elif max_value == float("-inf"):
raise InvalidArgument("allow_infinity=False excludes max_value=-inf")
if min_value is None and max_value is None:
result = FloatStrategy(allow_infinity=allow_infinity, allow_nan=allow_nan) # type: SearchStrategy[float]
elif min_value is not None and max_value is not None:
if min_value == max_value:
assert isinstance(min_value, float)
result = just(min_value)
elif is_negative(min_value):
if is_negative(max_value):
result = floats(min_value=-max_value, max_value=-min_value).map(
operator.neg
)
else:
result = floats(min_value=0.0, max_value=max_value) | floats(
min_value=0.0, max_value=-min_value
).map(operator.neg)
elif count_between_floats(min_value, max_value) > 1000:
result = FixedBoundedFloatStrategy(
lower_bound=min_value, upper_bound=max_value
)
else:
ub_int = float_to_int(max_value, width)
lb_int = float_to_int(min_value, width)
assert lb_int <= ub_int
result = integers(min_value=lb_int, max_value=ub_int).map(
lambda x: int_to_float(x, width)
)
elif min_value is not None:
assert isinstance(min_value, float)
if min_value < 0:
result = floats(min_value=0.0, allow_infinity=allow_infinity) | floats(
min_value=min_value, max_value=-0.0
)
else:
result = floats(allow_infinity=allow_infinity, allow_nan=False).map(
lambda x: min_value + abs(x) # type: ignore
)
if not allow_infinity:
result = result.filter(lambda x: not math.isinf(x))
if min_value == 0 and not is_negative(min_value):
result = result.filter(lambda x: math.copysign(1.0, x) == 1)
else:
assert isinstance(max_value, float)
if max_value > 0:
result = floats(min_value=0.0, max_value=max_value) | floats(
max_value=-0.0, allow_infinity=allow_infinity
)
else:
result = floats(allow_infinity=allow_infinity, allow_nan=False).map(
lambda x: max_value - abs(x) # type: ignore
)
if not allow_infinity:
result = result.filter(lambda x: not math.isinf(x))
if max_value == 0 and is_negative(max_value):
result = result.filter(is_negative)
if width < 64:
def downcast(x):
try:
return float_of(x, width)
except OverflowError:
reject()
return result.map(downcast)
return result
|
https://github.com/HypothesisWorks/hypothesis/issues/1859
|
================================================================================================================================= FAILURES =================================================================================================================================
________________________________________________________________________________________________________________________ test_should_be_always_true ________________________________________________________________________________________________________________________
@given(val=floats(max_value=float('-inf'), allow_nan=False, exclude_max=True))
@settings(verbosity=Verbosity.verbose)
def test_should_be_always_true(val):
float_inf_strat.py:6:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
val = -inf
@given(val=floats(max_value=float('-inf'), allow_nan=False, exclude_max=True))
@settings(verbosity=Verbosity.verbose)
def test_should_be_always_true(val):
assert val != float('-inf')
E AssertionError: assert -inf != -inf
E + where -inf = float('-inf')
float_inf_strat.py:8: AssertionError
-------------------------------------------------------------------------------------------------------------------------------- Hypothesis --------------------------------------------------------------------------------------------------------------------------------
Trying example: test_should_be_always_true(val=-inf)
Traceback (most recent call last):
File "/home/alexv/Projects/tmp/float_inf_strat.py", line 8, in test_should_be_always_true
assert val != float('-inf')
AssertionError: assert -inf != -inf
+ where -inf = float('-inf')
Trying example: test_should_be_always_true(val=-inf)
Traceback (most recent call last):
File "/home/alexv/Projects/tmp/float_inf_strat.py", line 8, in test_should_be_always_true
assert val != float('-inf')
AssertionError: assert -inf != -inf
+ where -inf = float('-inf')
Falsifying example: test_should_be_always_true(val=-inf)
____________________________________________________________________________________________________________________ test_should_be_always_true_aswell _____________________________________________________________________________________________________________________
@given(val=floats(min_value=float('inf'), allow_nan=False, exclude_min=True))
@settings(verbosity=Verbosity.verbose)
def test_should_be_always_true_aswell(val):
float_inf_strat.py:11:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
val = inf
@given(val=floats(min_value=float('inf'), allow_nan=False, exclude_min=True))
@settings(verbosity=Verbosity.verbose)
def test_should_be_always_true_aswell(val):
assert val != float('inf')
E AssertionError: assert inf != inf
E + where inf = float('inf')
float_inf_strat.py:13: AssertionError
-------------------------------------------------------------------------------------------------------------------------------- Hypothesis --------------------------------------------------------------------------------------------------------------------------------
Trying example: test_should_be_always_true_aswell(val=inf)
Traceback (most recent call last):
File "/home/alexv/Projects/tmp/float_inf_strat.py", line 13, in test_should_be_always_true_aswell
assert val != float('inf')
AssertionError: assert inf != inf
+ where inf = float('inf')
Trying example: test_should_be_always_true_aswell(val=inf)
Traceback (most recent call last):
File "/home/alexv/Projects/tmp/float_inf_strat.py", line 13, in test_should_be_always_true_aswell
assert val != float('inf')
AssertionError: assert inf != inf
+ where inf = float('inf')
Falsifying example: test_should_be_always_true_aswell(val=inf)
============================================================================================================================= warnings summary =============================================================================================================================
/home/alexv/.virtualenvs/tmptesthyp/lib/python3.6/site-packages/_pytest/config/__init__.py:752
/home/alexv/.virtualenvs/tmptesthyp/lib/python3.6/site-packages/_pytest/config/__init__.py:752: PytestWarning: Module already imported so cannot be rewritten: hypothesis
self._mark_plugins_for_rewrite(hook)
-- Docs: https://docs.pytest.org/en/latest/warnings.html
=================================================================================================================== 2 failed, 1 warnings in 0.08 seconds ===================================================================================================================
|
AssertionError
|
def next_up(value, width=64):
"""Return the first float larger than finite `val` - IEEE 754's `nextUp`.
From https://stackoverflow.com/a/10426033, with thanks to Mark Dickinson.
"""
assert isinstance(value, float)
if math.isnan(value) or (math.isinf(value) and value > 0):
return value
if value == 0.0 and is_negative(value):
return 0.0
fmt_int, fmt_flt = STRUCT_FORMATS[width]
# Note: n is signed; float_to_int returns unsigned
fmt_int = fmt_int.lower()
n = reinterpret_bits(value, fmt_flt, fmt_int)
if n >= 0:
n += 1
else:
n -= 1
return reinterpret_bits(n, fmt_int, fmt_flt)
|
def next_up(value, width=64):
"""Return the first float larger than finite `val` - IEEE 754's `nextUp`.
From https://stackoverflow.com/a/10426033, with thanks to Mark Dickinson.
"""
assert isinstance(value, float)
if math.isnan(value) or (math.isinf(value) and value > 0):
return value
if value == 0.0:
value = 0.0
fmt_int, fmt_flt = STRUCT_FORMATS[width]
# Note: n is signed; float_to_int returns unsigned
fmt_int = fmt_int.lower()
n = reinterpret_bits(value, fmt_flt, fmt_int)
if n >= 0:
n += 1
else:
n -= 1
return reinterpret_bits(n, fmt_int, fmt_flt)
|
https://github.com/HypothesisWorks/hypothesis/issues/1859
|
================================================================================================================================= FAILURES =================================================================================================================================
________________________________________________________________________________________________________________________ test_should_be_always_true ________________________________________________________________________________________________________________________
@given(val=floats(max_value=float('-inf'), allow_nan=False, exclude_max=True))
@settings(verbosity=Verbosity.verbose)
def test_should_be_always_true(val):
float_inf_strat.py:6:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
val = -inf
@given(val=floats(max_value=float('-inf'), allow_nan=False, exclude_max=True))
@settings(verbosity=Verbosity.verbose)
def test_should_be_always_true(val):
assert val != float('-inf')
E AssertionError: assert -inf != -inf
E + where -inf = float('-inf')
float_inf_strat.py:8: AssertionError
-------------------------------------------------------------------------------------------------------------------------------- Hypothesis --------------------------------------------------------------------------------------------------------------------------------
Trying example: test_should_be_always_true(val=-inf)
Traceback (most recent call last):
File "/home/alexv/Projects/tmp/float_inf_strat.py", line 8, in test_should_be_always_true
assert val != float('-inf')
AssertionError: assert -inf != -inf
+ where -inf = float('-inf')
Trying example: test_should_be_always_true(val=-inf)
Traceback (most recent call last):
File "/home/alexv/Projects/tmp/float_inf_strat.py", line 8, in test_should_be_always_true
assert val != float('-inf')
AssertionError: assert -inf != -inf
+ where -inf = float('-inf')
Falsifying example: test_should_be_always_true(val=-inf)
____________________________________________________________________________________________________________________ test_should_be_always_true_aswell _____________________________________________________________________________________________________________________
@given(val=floats(min_value=float('inf'), allow_nan=False, exclude_min=True))
@settings(verbosity=Verbosity.verbose)
def test_should_be_always_true_aswell(val):
float_inf_strat.py:11:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
val = inf
@given(val=floats(min_value=float('inf'), allow_nan=False, exclude_min=True))
@settings(verbosity=Verbosity.verbose)
def test_should_be_always_true_aswell(val):
assert val != float('inf')
E AssertionError: assert inf != inf
E + where inf = float('inf')
float_inf_strat.py:13: AssertionError
-------------------------------------------------------------------------------------------------------------------------------- Hypothesis --------------------------------------------------------------------------------------------------------------------------------
Trying example: test_should_be_always_true_aswell(val=inf)
Traceback (most recent call last):
File "/home/alexv/Projects/tmp/float_inf_strat.py", line 13, in test_should_be_always_true_aswell
assert val != float('inf')
AssertionError: assert inf != inf
+ where inf = float('inf')
Trying example: test_should_be_always_true_aswell(val=inf)
Traceback (most recent call last):
File "/home/alexv/Projects/tmp/float_inf_strat.py", line 13, in test_should_be_always_true_aswell
assert val != float('inf')
AssertionError: assert inf != inf
+ where inf = float('inf')
Falsifying example: test_should_be_always_true_aswell(val=inf)
============================================================================================================================= warnings summary =============================================================================================================================
/home/alexv/.virtualenvs/tmptesthyp/lib/python3.6/site-packages/_pytest/config/__init__.py:752
/home/alexv/.virtualenvs/tmptesthyp/lib/python3.6/site-packages/_pytest/config/__init__.py:752: PytestWarning: Module already imported so cannot be rewritten: hypothesis
self._mark_plugins_for_rewrite(hook)
-- Docs: https://docs.pytest.org/en/latest/warnings.html
=================================================================================================================== 2 failed, 1 warnings in 0.08 seconds ===================================================================================================================
|
AssertionError
|
def extract_lambda_source(f):
"""Extracts a single lambda expression from the string source. Returns a
string indicating an unknown body if it gets confused in any way.
This is not a good function and I am sorry for it. Forgive me my
sins, oh lord
"""
argspec = getfullargspec(f)
arg_strings = []
# In Python 2 you can have destructuring arguments to functions. This
# results in an argspec with non-string values. I'm not very interested in
# handling these properly, but it's important to not crash on them.
bad_lambda = False
for a in argspec.args:
if isinstance(a, (tuple, list)): # pragma: no cover
arg_strings.append("(%s)" % (", ".join(a),))
bad_lambda = True
else:
assert isinstance(a, str)
arg_strings.append(a)
if argspec.varargs:
arg_strings.append("*" + argspec.varargs)
elif argspec.kwonlyargs:
arg_strings.append("*")
for a in argspec.kwonlyargs or []:
default = (argspec.kwonlydefaults or {}).get(a)
if default:
arg_strings.append("{}={}".format(a, default))
else:
arg_strings.append(a)
if_confused = "lambda %s: <unknown>" % (", ".join(arg_strings),)
if bad_lambda: # pragma: no cover
return if_confused
try:
source = inspect.getsource(f)
except IOError:
return if_confused
source = LINE_CONTINUATION.sub(" ", source)
source = WHITESPACE.sub(" ", source)
source = source.strip()
assert "lambda" in source
tree = None
try:
tree = ast.parse(source)
except SyntaxError:
for i in hrange(len(source) - 1, len("lambda"), -1):
prefix = source[:i]
if "lambda" not in prefix:
break
try:
tree = ast.parse(prefix)
source = prefix
break
except SyntaxError:
continue
if tree is None:
if source.startswith("@"):
# This will always eventually find a valid expression because
# the decorator must be a valid Python function call, so will
# eventually be syntactically valid and break out of the loop. Thus
# this loop can never terminate normally, so a no branch pragma is
# appropriate.
for i in hrange(len(source) + 1): # pragma: no branch
p = source[1:i]
if "lambda" in p:
try:
tree = ast.parse(p)
source = p
break
except SyntaxError:
pass
if tree is None:
return if_confused
all_lambdas = extract_all_lambdas(tree)
aligned_lambdas = [l for l in all_lambdas if args_for_lambda_ast(l) == argspec.args]
if len(aligned_lambdas) != 1:
return if_confused
lambda_ast = aligned_lambdas[0]
assert lambda_ast.lineno == 1
# If the source code contains Unicode characters, the bytes of the original
# file don't line up with the string indexes, and `col_offset` doesn't match
# the string we're using. We need to convert the source code into bytes
# before slicing.
#
# Under the hood, the inspect module is using `tokenize.detect_encoding` to
# detect the encoding of the original source file. We'll use the same
# approach to get the source code as bytes.
#
# See https://github.com/HypothesisWorks/hypothesis/issues/1700 for an
# example of what happens if you don't correct for this.
#
# Note: if the code doesn't come from a file (but, for example, a doctest),
# `getsourcefile` will return `None` and the `open()` call will fail with
# an OSError. Or if `f` is a built-in function, in which case we get a
# TypeError. In both cases, fall back to splitting the Unicode string.
# It's not perfect, but it's the best we can do.
#
# Note 2: You can only detect the encoding with `tokenize.detect_encoding`
# in Python 3.2 or later. But that's okay, because the only version that
# affects for us is Python 2.7, and 2.7 doesn't support non-ASCII identifiers:
# https://www.python.org/dev/peps/pep-3131/. In this case we'll get an
# AttributeError.
#
try:
encoding, _ = tokenize.detect_encoding(
open(inspect.getsourcefile(f), "rb").readline
)
source_bytes = source.encode(encoding)
source_bytes = source_bytes[lambda_ast.col_offset :].strip()
source = source_bytes.decode(encoding)
except (AttributeError, OSError, TypeError):
source = source[lambda_ast.col_offset :].strip()
# This ValueError can be thrown in Python 3 if:
#
# - There's a Unicode character in the line before the Lambda, and
# - For some reason we can't detect the source encoding of the file
#
# because slicing on `lambda_ast.col_offset` will account for bytes, but
# the slice will be on Unicode characters.
#
# In practice this seems relatively rare, so we just give up rather than
# trying to recover.
try:
source = source[source.index("lambda") :]
except ValueError:
return if_confused
for i in hrange(len(source), len("lambda"), -1): # pragma: no branch
try:
parsed = ast.parse(source[:i])
assert len(parsed.body) == 1
assert parsed.body
if isinstance(parsed.body[0].value, ast.Lambda):
source = source[:i]
break
except SyntaxError:
pass
lines = source.split("\n")
lines = [PROBABLY_A_COMMENT.sub("", l) for l in lines]
source = "\n".join(lines)
source = WHITESPACE.sub(" ", source)
source = SPACE_FOLLOWS_OPEN_BRACKET.sub("(", source)
source = SPACE_PRECEDES_CLOSE_BRACKET.sub(")", source)
source = source.strip()
return source
|
def extract_lambda_source(f):
"""Extracts a single lambda expression from the string source. Returns a
string indicating an unknown body if it gets confused in any way.
This is not a good function and I am sorry for it. Forgive me my
sins, oh lord
"""
argspec = getfullargspec(f)
arg_strings = []
# In Python 2 you can have destructuring arguments to functions. This
# results in an argspec with non-string values. I'm not very interested in
# handling these properly, but it's important to not crash on them.
bad_lambda = False
for a in argspec.args:
if isinstance(a, (tuple, list)): # pragma: no cover
arg_strings.append("(%s)" % (", ".join(a),))
bad_lambda = True
else:
assert isinstance(a, str)
arg_strings.append(a)
if argspec.varargs:
arg_strings.append("*" + argspec.varargs)
elif argspec.kwonlyargs:
arg_strings.append("*")
for a in argspec.kwonlyargs or []:
default = (argspec.kwonlydefaults or {}).get(a)
if default:
arg_strings.append("{}={}".format(a, default))
else:
arg_strings.append(a)
if_confused = "lambda %s: <unknown>" % (", ".join(arg_strings),)
if bad_lambda: # pragma: no cover
return if_confused
try:
source = inspect.getsource(f)
except IOError:
return if_confused
source = LINE_CONTINUATION.sub(" ", source)
source = WHITESPACE.sub(" ", source)
source = source.strip()
assert "lambda" in source
tree = None
try:
tree = ast.parse(source)
except SyntaxError:
for i in hrange(len(source) - 1, len("lambda"), -1):
prefix = source[:i]
if "lambda" not in prefix:
break
try:
tree = ast.parse(prefix)
source = prefix
break
except SyntaxError:
continue
if tree is None:
if source.startswith("@"):
# This will always eventually find a valid expression because
# the decorator must be a valid Python function call, so will
# eventually be syntactically valid and break out of the loop. Thus
# this loop can never terminate normally, so a no branch pragma is
# appropriate.
for i in hrange(len(source) + 1): # pragma: no branch
p = source[1:i]
if "lambda" in p:
try:
tree = ast.parse(p)
source = p
break
except SyntaxError:
pass
if tree is None:
return if_confused
all_lambdas = extract_all_lambdas(tree)
aligned_lambdas = [l for l in all_lambdas if args_for_lambda_ast(l) == argspec.args]
if len(aligned_lambdas) != 1:
return if_confused
lambda_ast = aligned_lambdas[0]
assert lambda_ast.lineno == 1
source = source[lambda_ast.col_offset :].strip()
source = source[source.index("lambda") :]
for i in hrange(len(source), len("lambda"), -1): # pragma: no branch
try:
parsed = ast.parse(source[:i])
assert len(parsed.body) == 1
assert parsed.body
if isinstance(parsed.body[0].value, ast.Lambda):
source = source[:i]
break
except SyntaxError:
pass
lines = source.split("\n")
lines = [PROBABLY_A_COMMENT.sub("", l) for l in lines]
source = "\n".join(lines)
source = WHITESPACE.sub(" ", source)
source = SPACE_FOLLOWS_OPEN_BRACKET.sub("(", source)
source = SPACE_PRECEDES_CLOSE_BRACKET.sub(")", source)
source = source.strip()
return source
|
https://github.com/HypothesisWorks/hypothesis/issues/1700
|
$ python3.6 -m pytest test2.py --tb=short
pyenv-implicit: found multiple python3.6 in pyenv. Use version 3.6.
================================================================ test session starts ================================================================
platform darwin -- Python 3.6.5, pytest-4.0.2, py-1.7.0, pluggy-0.8.0
hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('/Users/lwiman/personal/nerdsniped/sine_testing/.hypothesis/examples')
rootdir: /Users/lwiman/personal/nerdsniped/sine_testing, inifile:
plugins: hypothesis-3.83.2
collected 1 item
test2.py F [100%]
===================================================================== FAILURES ======================================================================
_____________________________________________________________________ test_foo ______________________________________________________________________
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/internal/conjecture/engine.py:174: in test_function
self._test_function(data)
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/core.py:625: in evaluate_test_data
escalate_hypothesis_internal_error()
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/core.py:605: in evaluate_test_data
result = self.execute(data)
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/core.py:574: in execute
result = self.test_runner(data, run)
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/executors.py:56: in default_new_style_executor
return function(data)
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/core.py:551: in run
args, kwargs = data.draw(self.search_strategy)
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/internal/conjecture/data.py:224: in draw
return self.__draw(strategy, label=label)
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/internal/conjecture/data.py:239: in __draw
return strategy.do_draw(self)
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/searchstrategy/collections.py:55: in do_draw
return tuple(data.draw(e) for e in self.element_strategies)
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/searchstrategy/collections.py:55: in <genexpr>
return tuple(data.draw(e) for e in self.element_strategies)
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/internal/conjecture/data.py:224: in draw
return self.__draw(strategy, label=label)
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/internal/conjecture/data.py:233: in __draw
return strategy.do_draw(self)
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/searchstrategy/strategies.py:576: in do_draw
result = self.pack(data.draw(self.mapped_strategy))
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/internal/conjecture/data.py:224: in draw
return self.__draw(strategy, label=label)
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/internal/conjecture/data.py:233: in __draw
return strategy.do_draw(self)
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/searchstrategy/lazy.py:156: in do_draw
return data.draw(self.wrapped_strategy)
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/internal/conjecture/data.py:224: in draw
return self.__draw(strategy, label=label)
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/internal/conjecture/data.py:233: in __draw
return strategy.do_draw(self)
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/searchstrategy/strategies.py:576: in do_draw
result = self.pack(data.draw(self.mapped_strategy))
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/internal/conjecture/data.py:224: in draw
return self.__draw(strategy, label=label)
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/internal/conjecture/data.py:233: in __draw
return strategy.do_draw(self)
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/searchstrategy/collections.py:55: in do_draw
return tuple(data.draw(e) for e in self.element_strategies)
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/searchstrategy/collections.py:55: in <genexpr>
return tuple(data.draw(e) for e in self.element_strategies)
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/internal/conjecture/data.py:224: in draw
return self.__draw(strategy, label=label)
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/internal/conjecture/data.py:233: in __draw
return strategy.do_draw(self)
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/searchstrategy/strategies.py:633: in do_draw
data.note_event("Aborted test because unable to satisfy %r" % (self,))
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/searchstrategy/strategies.py:610: in __repr__
get_pretty_function_description(self.condition),
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/internal/reflection.py:375: in get_pretty_function_description
result = extract_lambda_source(f)
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/internal/reflection.py:348: in extract_lambda_source
source = source[source.index("lambda") :]
E ValueError: substring not found
During handling of the above exception, another exception occurred:
test2.py:7: in test_foo
???
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/internal/lazyformat.py:29: in __str__
return self.__format_string % self.__args
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/searchstrategy/strategies.py:610: in __repr__
get_pretty_function_description(self.condition),
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/internal/reflection.py:375: in get_pretty_function_description
result = extract_lambda_source(f)
../../../.pyenv/versions/3.6/lib/python3.6/site-packages/hypothesis/internal/reflection.py:348: in extract_lambda_source
source = source[source.index("lambda") :]
E ValueError: substring not found
-------------------------------------------------------------------- Hypothesis ---------------------------------------------------------------------
You can add @seed(306900588268218898824024975885321058451) to this test or run pytest with --hypothesis-seed=306900588268218898824024975885321058451 to reproduce this failure.
============================================================= 1 failed in 0.32 seconds ==============================================================
|
ValueError
|
def given(
*given_arguments, # type: Union[SearchStrategy, InferType]
**given_kwargs, # type: Union[SearchStrategy, InferType]
):
# type: (...) -> Callable[[Callable[..., None]], Callable[..., None]]
"""A decorator for turning a test function that accepts arguments into a
randomized test.
This is the main entry point to Hypothesis.
"""
def run_test_with_generator(test):
if hasattr(test, "_hypothesis_internal_test_function_without_warning"):
# Pull out the original test function to avoid the warning we
# stuck in about using @settings without @given.
test = test._hypothesis_internal_test_function_without_warning
if inspect.isclass(test):
# Provide a meaningful error to users, instead of exceptions from
# internals that assume we're dealing with a function.
raise InvalidArgument("@given cannot be applied to a class.")
generator_arguments = tuple(given_arguments)
generator_kwargs = dict(given_kwargs)
original_argspec = getfullargspec(test)
check_invalid = is_invalid_test(
test.__name__, original_argspec, generator_arguments, generator_kwargs
)
if check_invalid is not None:
return check_invalid
for name, strategy in zip(
reversed(original_argspec.args), reversed(generator_arguments)
):
generator_kwargs[name] = strategy
argspec = new_given_argspec(original_argspec, generator_kwargs)
@impersonate(test)
@define_function_signature(test.__name__, test.__doc__, argspec)
def wrapped_test(*arguments, **kwargs):
# Tell pytest to omit the body of this function from tracebacks
__tracebackhide__ = True
test = wrapped_test.hypothesis.inner_test
if getattr(test, "is_hypothesis_test", False):
note_deprecation(
(
"You have applied @given to test: %s more than once. In "
"future this will be an error. Applying @given twice "
"wraps the test twice, which can be extremely slow. A "
"similar effect can be gained by combining the arguments "
"of the two calls to given. For example, instead of "
"@given(booleans()) @given(integers()), you could write "
"@given(booleans(), integers())"
)
% (test.__name__,)
)
settings = wrapped_test._hypothesis_internal_use_settings
random = get_random_for_wrapped_test(test, wrapped_test)
if infer in generator_kwargs.values():
hints = get_type_hints(test)
for name in [
name for name, value in generator_kwargs.items() if value is infer
]:
if name not in hints:
raise InvalidArgument(
"passed %s=infer for %s, but %s has no type annotation"
% (name, test.__name__, name)
)
generator_kwargs[name] = st.from_type(hints[name])
processed_args = process_arguments_to_given(
wrapped_test,
arguments,
kwargs,
generator_arguments,
generator_kwargs,
argspec,
test,
settings,
)
arguments, kwargs, test_runner, search_strategy = processed_args
runner = getattr(search_strategy, "runner", None)
if isinstance(runner, TestCase) and test.__name__ in dir(TestCase):
msg = (
"You have applied @given to the method %s, which is "
"used by the unittest runner but is not itself a test."
" This is not useful in any way." % test.__name__
)
fail_health_check(settings, msg, HealthCheck.not_a_test_method)
if bad_django_TestCase(runner): # pragma: no cover
# Covered by the Django tests, but not the pytest coverage task
raise InvalidArgument(
"You have applied @given to a method on %s, but this "
"class does not inherit from the supported versions in "
"`hypothesis.extra.django`. Use the Hypothesis variants "
"to ensure that each example is run in a separate "
"database transaction." % qualname(type(runner))
)
state = StateForActualGivenExecution(
test_runner,
search_strategy,
test,
settings,
random,
had_seed=wrapped_test._hypothesis_internal_use_seed,
)
reproduce_failure = wrapped_test._hypothesis_internal_use_reproduce_failure
if reproduce_failure is not None:
expected_version, failure = reproduce_failure
if expected_version != __version__:
raise InvalidArgument(
(
"Attempting to reproduce a failure from a different "
"version of Hypothesis. This failure is from %s, but "
"you are currently running %r. Please change your "
"Hypothesis version to a matching one."
)
% (expected_version, __version__)
)
try:
state.execute(
ConjectureData.for_buffer(decode_failure(failure)),
print_example=True,
is_final=True,
)
raise DidNotReproduce(
"Expected the test to raise an error, but it "
"completed successfully."
)
except StopTest:
raise DidNotReproduce(
"The shape of the test data has changed in some way "
"from where this blob was defined. Are you sure "
"you're running the same test?"
)
except UnsatisfiedAssumption:
raise DidNotReproduce(
"The test data failed to satisfy an assumption in the "
"test. Have you added it since this blob was "
"generated?"
)
execute_explicit_examples(
test_runner, test, wrapped_test, settings, arguments, kwargs
)
if settings.max_examples <= 0:
return
if not (
Phase.reuse in settings.phases or Phase.generate in settings.phases
):
return
try:
if isinstance(runner, TestCase) and hasattr(runner, "subTest"):
subTest = runner.subTest
try:
setattr(runner, "subTest", fake_subTest)
state.run()
finally:
setattr(runner, "subTest", subTest)
else:
state.run()
except BaseException as e:
generated_seed = wrapped_test._hypothesis_internal_use_generated_seed
with local_settings(settings):
if not (state.failed_normally or generated_seed is None):
if running_under_pytest:
report(
"You can add @seed(%(seed)d) to this test or "
"run pytest with --hypothesis-seed=%(seed)d "
"to reproduce this failure." % {"seed": generated_seed}
)
else:
report(
"You can add @seed(%d) to this test to "
"reproduce this failure." % (generated_seed,)
)
# The dance here is to avoid showing users long tracebacks
# full of Hypothesis internals they don't care about.
# We have to do this inline, to avoid adding another
# internal stack frame just when we've removed the rest.
if PY2:
# Python 2 doesn't have Exception.with_traceback(...);
# instead it has a three-argument form of the `raise`
# statement. Unfortunately this is a SyntaxError on
# Python 3, and before Python 2.7.9 it was *also* a
# SyntaxError to use it in a nested function so we
# can't `exec` or `eval` our way out (BPO-21591).
# So unless we break some versions of Python 2, none
# of them get traceback elision.
raise
# On Python 3, we swap out the real traceback for our
# trimmed version. Using a variable ensures that the line
# which will actually appear in trackbacks is as clear as
# possible - "raise the_error_hypothesis_found".
the_error_hypothesis_found = e.with_traceback(
get_trimmed_traceback()
)
raise the_error_hypothesis_found
for attrib in dir(test):
if not (attrib.startswith("_") or hasattr(wrapped_test, attrib)):
setattr(wrapped_test, attrib, getattr(test, attrib))
wrapped_test.is_hypothesis_test = True
if hasattr(test, "_hypothesis_internal_settings_applied"):
# Used to check if @settings is applied twice.
wrapped_test._hypothesis_internal_settings_applied = True
wrapped_test._hypothesis_internal_use_seed = getattr(
test, "_hypothesis_internal_use_seed", None
)
wrapped_test._hypothesis_internal_use_settings = (
getattr(test, "_hypothesis_internal_use_settings", None) or Settings.default
)
wrapped_test._hypothesis_internal_use_reproduce_failure = getattr(
test, "_hypothesis_internal_use_reproduce_failure", None
)
wrapped_test.hypothesis = HypothesisHandle(test)
return wrapped_test
return run_test_with_generator
|
def given(
*given_arguments, # type: Union[SearchStrategy, InferType]
**given_kwargs, # type: Union[SearchStrategy, InferType]
):
# type: (...) -> Callable[[Callable[..., None]], Callable[..., None]]
"""A decorator for turning a test function that accepts arguments into a
randomized test.
This is the main entry point to Hypothesis.
"""
def run_test_with_generator(test):
if hasattr(test, "_hypothesis_internal_test_function_without_warning"):
# Pull out the original test function to avoid the warning we
# stuck in about using @settings without @given.
test = test._hypothesis_internal_test_function_without_warning
if inspect.isclass(test):
# Provide a meaningful error to users, instead of exceptions from
# internals that assume we're dealing with a function.
raise InvalidArgument("@given cannot be applied to a class.")
generator_arguments = tuple(given_arguments)
generator_kwargs = dict(given_kwargs)
original_argspec = getfullargspec(test)
check_invalid = is_invalid_test(
test.__name__, original_argspec, generator_arguments, generator_kwargs
)
if check_invalid is not None:
return check_invalid
for name, strategy in zip(
reversed(original_argspec.args), reversed(generator_arguments)
):
generator_kwargs[name] = strategy
argspec = new_given_argspec(original_argspec, generator_kwargs)
@impersonate(test)
@define_function_signature(test.__name__, test.__doc__, argspec)
def wrapped_test(*arguments, **kwargs):
# Tell pytest to omit the body of this function from tracebacks
__tracebackhide__ = True
test = wrapped_test.hypothesis.inner_test
if getattr(test, "is_hypothesis_test", False):
note_deprecation(
(
"You have applied @given to test: %s more than once. In "
"future this will be an error. Applying @given twice "
"wraps the test twice, which can be extremely slow. A "
"similar effect can be gained by combining the arguments "
"of the two calls to given. For example, instead of "
"@given(booleans()) @given(integers()), you could write "
"@given(booleans(), integers())"
)
% (test.__name__,)
)
settings = wrapped_test._hypothesis_internal_use_settings
random = get_random_for_wrapped_test(test, wrapped_test)
if infer in generator_kwargs.values():
hints = get_type_hints(test)
for name in [
name for name, value in generator_kwargs.items() if value is infer
]:
if name not in hints:
raise InvalidArgument(
"passed %s=infer for %s, but %s has no type annotation"
% (name, test.__name__, name)
)
generator_kwargs[name] = st.from_type(hints[name])
processed_args = process_arguments_to_given(
wrapped_test,
arguments,
kwargs,
generator_arguments,
generator_kwargs,
argspec,
test,
settings,
)
arguments, kwargs, test_runner, search_strategy = processed_args
runner = getattr(search_strategy, "runner", None)
if isinstance(runner, TestCase) and test.__name__ in dir(TestCase):
msg = (
"You have applied @given to the method %s, which is "
"used by the unittest runner but is not itself a test."
" This is not useful in any way." % test.__name__
)
fail_health_check(settings, msg, HealthCheck.not_a_test_method)
if bad_django_TestCase(runner): # pragma: no cover
# Covered by the Django tests, but not the pytest coverage task
raise InvalidArgument(
"You have applied @given to a method on %s, but this "
"class does not inherit from the supported versions in "
"`hypothesis.extra.django`. Use the Hypothesis variants "
"to ensure that each example is run in a separate "
"database transaction." % qualname(type(runner))
)
state = StateForActualGivenExecution(
test_runner,
search_strategy,
test,
settings,
random,
had_seed=wrapped_test._hypothesis_internal_use_seed,
)
reproduce_failure = wrapped_test._hypothesis_internal_use_reproduce_failure
if reproduce_failure is not None:
expected_version, failure = reproduce_failure
if expected_version != __version__:
raise InvalidArgument(
(
"Attempting to reproduce a failure from a different "
"version of Hypothesis. This failure is from %s, but "
"you are currently running %r. Please change your "
"Hypothesis version to a matching one."
)
% (expected_version, __version__)
)
try:
state.execute(
ConjectureData.for_buffer(decode_failure(failure)),
print_example=True,
is_final=True,
)
raise DidNotReproduce(
"Expected the test to raise an error, but it "
"completed successfully."
)
except StopTest:
raise DidNotReproduce(
"The shape of the test data has changed in some way "
"from where this blob was defined. Are you sure "
"you're running the same test?"
)
except UnsatisfiedAssumption:
raise DidNotReproduce(
"The test data failed to satisfy an assumption in the "
"test. Have you added it since this blob was "
"generated?"
)
execute_explicit_examples(
test_runner, test, wrapped_test, settings, arguments, kwargs
)
if settings.max_examples <= 0:
return
if not (
Phase.reuse in settings.phases or Phase.generate in settings.phases
):
return
try:
if isinstance(runner, TestCase) and hasattr(runner, "subTest"):
subTest = runner.subTest
try:
setattr(runner, "subTest", fake_subTest)
state.run()
finally:
setattr(runner, "subTest", subTest)
else:
state.run()
except BaseException as e:
generated_seed = wrapped_test._hypothesis_internal_use_generated_seed
with local_settings(settings):
if not (state.failed_normally or generated_seed is None):
if running_under_pytest:
report(
"You can add @seed(%(seed)d) to this test or "
"run pytest with --hypothesis-seed=%(seed)d "
"to reproduce this failure." % {"seed": generated_seed}
)
else:
report(
"You can add @seed(%d) to this test to "
"reproduce this failure." % (generated_seed,)
)
# The dance here is to avoid showing users long tracebacks
# full of Hypothesis internals they don't care about.
# We have to do this inline, to avoid adding another
# internal stack frame just when we've removed the rest.
if PY2:
# Python 2 doesn't have Exception.with_traceback(...);
# instead it has a three-argument form of the `raise`
# statement. Which is a SyntaxError on Python 3.
exec(
"raise type(e), e, get_trimmed_traceback()",
globals(),
locals(),
)
# On Python 3, we swap out the real traceback for our
# trimmed version. Using a variable ensures that the line
# which will actually appear in trackbacks is as clear as
# possible - "raise the_error_hypothesis_found".
the_error_hypothesis_found = e.with_traceback(
get_trimmed_traceback()
)
raise the_error_hypothesis_found
for attrib in dir(test):
if not (attrib.startswith("_") or hasattr(wrapped_test, attrib)):
setattr(wrapped_test, attrib, getattr(test, attrib))
wrapped_test.is_hypothesis_test = True
if hasattr(test, "_hypothesis_internal_settings_applied"):
# Used to check if @settings is applied twice.
wrapped_test._hypothesis_internal_settings_applied = True
wrapped_test._hypothesis_internal_use_seed = getattr(
test, "_hypothesis_internal_use_seed", None
)
wrapped_test._hypothesis_internal_use_settings = (
getattr(test, "_hypothesis_internal_use_settings", None) or Settings.default
)
wrapped_test._hypothesis_internal_use_reproduce_failure = getattr(
test, "_hypothesis_internal_use_reproduce_failure", None
)
wrapped_test.hypothesis = HypothesisHandle(test)
return wrapped_test
return run_test_with_generator
|
https://github.com/HypothesisWorks/hypothesis/issues/1648
|
Traceback (most recent call last):
File "/home/jenkins/workspace/4485-S2JEK2AOWI4X57D@2/cryptography/.tox/py27/local/lib/python2.7/site-packages/pytest.py", line 77, in <module>
raise SystemExit(pytest.main())
File "/home/jenkins/workspace/4485-S2JEK2AOWI4X57D@2/cryptography/.tox/py27/local/lib/python2.7/site-packages/_pytest/config/__init__.py", line 49, in main
config = _prepareconfig(args, plugins)
File "/home/jenkins/workspace/4485-S2JEK2AOWI4X57D@2/cryptography/.tox/py27/local/lib/python2.7/site-packages/_pytest/config/__init__.py", line 186, in _prepareconfig
pluginmanager=pluginmanager, args=args
File "/home/jenkins/workspace/4485-S2JEK2AOWI4X57D@2/cryptography/.tox/py27/local/lib/python2.7/site-packages/pluggy/hooks.py", line 284, in __call__
return self._hookexec(self, self.get_hookimpls(), kwargs)
File "/home/jenkins/workspace/4485-S2JEK2AOWI4X57D@2/cryptography/.tox/py27/local/lib/python2.7/site-packages/pluggy/manager.py", line 67, in _hookexec
return self._inner_hookexec(hook, methods, kwargs)
File "/home/jenkins/workspace/4485-S2JEK2AOWI4X57D@2/cryptography/.tox/py27/local/lib/python2.7/site-packages/pluggy/manager.py", line 61, in <lambda>
firstresult=hook.spec.opts.get("firstresult") if hook.spec else False,
File "/home/jenkins/workspace/4485-S2JEK2AOWI4X57D@2/cryptography/.tox/py27/local/lib/python2.7/site-packages/pluggy/callers.py", line 203, in _multicall
gen.send(outcome)
File "/home/jenkins/workspace/4485-S2JEK2AOWI4X57D@2/cryptography/.tox/py27/local/lib/python2.7/site-packages/_pytest/helpconfig.py", line 89, in pytest_cmdline_parse
config = outcome.get_result()
File "/home/jenkins/workspace/4485-S2JEK2AOWI4X57D@2/cryptography/.tox/py27/local/lib/python2.7/site-packages/pluggy/callers.py", line 81, in get_result
_reraise(*ex) # noqa
File "/home/jenkins/workspace/4485-S2JEK2AOWI4X57D@2/cryptography/.tox/py27/local/lib/python2.7/site-packages/pluggy/callers.py", line 187, in _multicall
res = hook_impl.function(*args)
File "/home/jenkins/workspace/4485-S2JEK2AOWI4X57D@2/cryptography/.tox/py27/local/lib/python2.7/site-packages/_pytest/config/__init__.py", line 656, in pytest_cmdline_parse
self.parse(args)
File "/home/jenkins/workspace/4485-S2JEK2AOWI4X57D@2/cryptography/.tox/py27/local/lib/python2.7/site-packages/_pytest/config/__init__.py", line 828, in parse
self._preparse(args, addopts=addopts)
File "/home/jenkins/workspace/4485-S2JEK2AOWI4X57D@2/cryptography/.tox/py27/local/lib/python2.7/site-packages/_pytest/config/__init__.py", line 780, in _preparse
self.pluginmanager.load_setuptools_entrypoints("pytest11")
File "/home/jenkins/workspace/4485-S2JEK2AOWI4X57D@2/cryptography/.tox/py27/local/lib/python2.7/site-packages/pluggy/manager.py", line 267, in load_setuptools_entrypoints
plugin = ep.load()
File "/home/jenkins/workspace/4485-S2JEK2AOWI4X57D@2/cryptography/.tox/py27/local/lib/python2.7/site-packages/pkg_resources/__init__.py", line 2332, in load
return self.resolve()
File "/home/jenkins/workspace/4485-S2JEK2AOWI4X57D@2/cryptography/.tox/py27/local/lib/python2.7/site-packages/pkg_resources/__init__.py", line 2338, in resolve
module = __import__(self.module_name, fromlist=['__name__'], level=0)
File "/home/jenkins/workspace/4485-S2JEK2AOWI4X57D@2/cryptography/.tox/py27/local/lib/python2.7/site-packages/hypothesis/__init__.py", line 30, in <module>
from hypothesis.core import given, find, example, seed, reproduce_failure, \
File "/home/jenkins/workspace/4485-S2JEK2AOWI4X57D@2/cryptography/.tox/py27/local/lib/python2.7/site-packages/hypothesis/core.py", line 948
exec(
SyntaxError: unqualified exec is not allowed in function 'wrapped_test' it is a nested function
|
SyntaxError
|
def __init__(self, max_length, draw_bytes):
self.max_length = max_length
self.is_find = False
self._draw_bytes = draw_bytes
self.overdraw = 0
self.level = 0
self.block_starts = {}
self.blocks = []
self.buffer = bytearray()
self.output = ""
self.status = Status.VALID
self.frozen = False
global global_test_counter
self.testcounter = global_test_counter
global_test_counter += 1
self.start_time = benchmark_time()
self.events = set()
self.forced_indices = set()
self.forced_blocks = set()
self.capped_indices = {}
self.interesting_origin = None
self.tags = set()
self.draw_times = []
self.__intervals = None
self.examples = []
self.example_stack = []
self.has_discards = False
self.start_example(TOP_LABEL)
|
def __init__(self, max_length, draw_bytes):
self.max_length = max_length
self.is_find = False
self._draw_bytes = draw_bytes
self.overdraw = 0
self.level = 0
self.block_starts = {}
self.blocks = []
self.buffer = bytearray()
self.output = ""
self.status = Status.VALID
self.frozen = False
global global_test_counter
self.testcounter = global_test_counter
global_test_counter += 1
self.start_time = benchmark_time()
self.events = set()
self.forced_indices = set()
self.capped_indices = {}
self.interesting_origin = None
self.tags = set()
self.draw_times = []
self.__intervals = None
self.examples = []
self.example_stack = []
self.has_discards = False
self.start_example(TOP_LABEL)
|
https://github.com/HypothesisWorks/hypothesis/issues/1299
|
File "/home/david/hypothesis/hypothesis-python/src/hypothesis/internal/conjecture/engine.py", line 1325, in greedy_shrink
self.shrink_offset_pairs()
File "/home/david/hypothesis/hypothesis-python/src/hypothesis/internal/conjecture/engine.py", line 1581, in shrink_offset_pairs
block_val = int_from_block(j)
File "/home/david/hypothesis/hypothesis-python/src/hypothesis/internal/conjecture/engine.py", line 1551, in int_from_block
return int_from_bytes(current[i])
IndexError: list index out of range
|
IndexError
|
def write(self, string):
self.__assert_not_frozen("write")
self.__check_capacity(len(string))
assert isinstance(string, hbytes)
original = self.index
self.__write(string)
self.forced_indices.update(hrange(original, self.index))
self.forced_blocks.add(len(self.blocks) - 1)
return string
|
def write(self, string):
self.__assert_not_frozen("write")
self.__check_capacity(len(string))
assert isinstance(string, hbytes)
original = self.index
self.__write(string)
self.forced_indices.update(hrange(original, self.index))
return string
|
https://github.com/HypothesisWorks/hypothesis/issues/1299
|
File "/home/david/hypothesis/hypothesis-python/src/hypothesis/internal/conjecture/engine.py", line 1325, in greedy_shrink
self.shrink_offset_pairs()
File "/home/david/hypothesis/hypothesis-python/src/hypothesis/internal/conjecture/engine.py", line 1581, in shrink_offset_pairs
block_val = int_from_block(j)
File "/home/david/hypothesis/hypothesis-python/src/hypothesis/internal/conjecture/engine.py", line 1551, in int_from_block
return int_from_bytes(current[i])
IndexError: list index out of range
|
IndexError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.