after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def set_status(self, href, status, account):
"""sets the status of vcard"""
self._check_account(account)
sql_s = "UPDATE {0} SET STATUS = ? WHERE href = ?".format(account + "_m")
self.sql_ex(
sql_s,
(
status,
href,
),
)
|
def set_status(self, href, status, account):
"""sets the status of vcard"""
sql_s = "UPDATE {0} SET STATUS = ? WHERE href = ?".format(account + "_m")
self.sql_ex(
sql_s,
(
status,
href,
),
)
|
https://github.com/pimutils/khal/issues/7
|
Traceback (most recent call last):
File "/home/catern/src/khal/virt/bin/khal", line 5, in <module>
pkg_resources.run_script('khal==0.1.0.dev', 'khal')
File "/home/catern/src/khal/virt/lib/python2.7/site-packages/pkg_resources.py", line 540, in run_script
self.require(requires)[0].run_script(script_name, ns)
File "/home/catern/src/khal/virt/lib/python2.7/site-packages/pkg_resources.py", line 1462, in run_script
exec_(script_code, namespace, namespace)
File "/home/catern/src/khal/virt/lib/python2.7/site-packages/pkg_resources.py", line 41, in exec_
exec("""exec code in globs, locs""")
File "<string>", line 1, in <module>
File "/home/catern/src/khal/virt/lib/python2.7/site-packages/khal-0.1.0.dev-py2.7.egg/EGG-INFO/scripts/khal", line 72, in <module>
File "build/bdist.linux-x86_64/egg/khal/controllers.py", line 178, in __init__
File "build/bdist.linux-x86_64/egg/khal/backend.py", line 440, in get_allday_range
File "build/bdist.linux-x86_64/egg/khal/backend.py", line 164, in sql_ex
sqlite3.OperationalError: no such table: simple_d
|
sqlite3.OperationalError
|
def reset_flag(self, href, account):
"""
resets the status for a given href to 0 (=not edited locally)
"""
self._check_account(account)
sql_s = "UPDATE {0} SET status = ? WHERE href = ?".format(account + "_m")
self.sql_ex(
sql_s,
(
OK,
href,
),
)
|
def reset_flag(self, href, account):
"""
resets the status for a given href to 0 (=not edited locally)
"""
sql_s = "UPDATE {0} SET status = ? WHERE href = ?".format(account + "_m")
self.sql_ex(
sql_s,
(
OK,
href,
),
)
|
https://github.com/pimutils/khal/issues/7
|
Traceback (most recent call last):
File "/home/catern/src/khal/virt/bin/khal", line 5, in <module>
pkg_resources.run_script('khal==0.1.0.dev', 'khal')
File "/home/catern/src/khal/virt/lib/python2.7/site-packages/pkg_resources.py", line 540, in run_script
self.require(requires)[0].run_script(script_name, ns)
File "/home/catern/src/khal/virt/lib/python2.7/site-packages/pkg_resources.py", line 1462, in run_script
exec_(script_code, namespace, namespace)
File "/home/catern/src/khal/virt/lib/python2.7/site-packages/pkg_resources.py", line 41, in exec_
exec("""exec code in globs, locs""")
File "<string>", line 1, in <module>
File "/home/catern/src/khal/virt/lib/python2.7/site-packages/khal-0.1.0.dev-py2.7.egg/EGG-INFO/scripts/khal", line 72, in <module>
File "build/bdist.linux-x86_64/egg/khal/controllers.py", line 178, in __init__
File "build/bdist.linux-x86_64/egg/khal/backend.py", line 440, in get_allday_range
File "build/bdist.linux-x86_64/egg/khal/backend.py", line 164, in sql_ex
sqlite3.OperationalError: no such table: simple_d
|
sqlite3.OperationalError
|
def get_status(self, href, account):
"""
gets the status of the event associated with href in `account`
"""
self._check_account(account)
sql_s = "SELECT status FROM {0} WHERE href = (?)".format(account + "_m")
return self.sql_ex(sql_s, (href,))[0][0]
|
def get_status(self, href, account):
"""
gets the status of the event associated with href in `account`
"""
sql_s = "SELECT status FROM {0} WHERE href = (?)".format(account + "_m")
return self.sql_ex(sql_s, (href,))[0][0]
|
https://github.com/pimutils/khal/issues/7
|
Traceback (most recent call last):
File "/home/catern/src/khal/virt/bin/khal", line 5, in <module>
pkg_resources.run_script('khal==0.1.0.dev', 'khal')
File "/home/catern/src/khal/virt/lib/python2.7/site-packages/pkg_resources.py", line 540, in run_script
self.require(requires)[0].run_script(script_name, ns)
File "/home/catern/src/khal/virt/lib/python2.7/site-packages/pkg_resources.py", line 1462, in run_script
exec_(script_code, namespace, namespace)
File "/home/catern/src/khal/virt/lib/python2.7/site-packages/pkg_resources.py", line 41, in exec_
exec("""exec code in globs, locs""")
File "<string>", line 1, in <module>
File "/home/catern/src/khal/virt/lib/python2.7/site-packages/khal-0.1.0.dev-py2.7.egg/EGG-INFO/scripts/khal", line 72, in <module>
File "build/bdist.linux-x86_64/egg/khal/controllers.py", line 178, in __init__
File "build/bdist.linux-x86_64/egg/khal/backend.py", line 440, in get_allday_range
File "build/bdist.linux-x86_64/egg/khal/backend.py", line 164, in sql_ex
sqlite3.OperationalError: no such table: simple_d
|
sqlite3.OperationalError
|
def _sync_caldav(self):
syncer = caldav.Syncer(
self._resource,
user=self._username,
password=self._password,
verify=self._ssl_verify,
auth=self._auth,
)
# self._dbtool.check_account_table(self.name)
logging.debug("syncing events in the next 365 days")
start = datetime.datetime.utcnow() - datetime.timedelta(days=30)
start_utc = self._local_timezone.localize(start).astimezone(pytz.UTC)
end_utc = start_utc + datetime.timedelta(days=365)
href_etag_list = syncer.get_hel(start=start_utc, end=end_utc)
need_update = self._dbtool.needs_update(self.name, href_etag_list)
logging.debug("{number} event(s) need(s) an update".format(number=len(need_update)))
vhe_list = syncer.get_vevents(need_update)
for vevent, href, etag in vhe_list:
try:
self._dbtool.update(vevent, self.name, href=href, etag=etag)
except backend.UpdateFailed as error:
logging.error(error)
# syncing local new events
hrefs = self._dbtool.get_new(self.name)
logging.debug("{number} new events need to be uploaded".format(number=len(hrefs)))
try:
for href in hrefs:
event = self._dbtool.get_vevent_from_db(href, self.name)
(href_new, etag_new) = syncer.upload(event.vevent, self._default_timezone)
self._dbtool.update_href(href, href_new, self.name, status=OK)
except caldav.NoWriteSupport:
logging.info(
"failed to upload a new event, "
"you need to enable write support to use this feature"
", see the documentation."
)
# syncing locally modified events
hrefs = self._dbtool.get_changed(self.name)
for href in hrefs:
event = self._dbtool.get_vevent_from_db(href, self.name)
etag = syncer.update(event.vevent, event.href, event.etag)
# looking for events deleted on the server but still in the local db
locale_hrefs = self._dbtool.hrefs_by_time_range(start_utc, end_utc, self.name)
remote_hrefs = [href for href, _ in href_etag_list]
may_be_deleted = list(set(locale_hrefs) - set(remote_hrefs))
if may_be_deleted != list():
for href in may_be_deleted:
if (
syncer.test_deleted(href)
and self._dbtool.get_status(href, self.name) != NEW
):
logging.debug(
"removing remotely deleted event {0} from the local db".format(href)
)
self._dbtool.delete(href, self.name)
|
def _sync_caldav(self):
syncer = caldav.Syncer(
self._resource,
user=self._username,
password=self._password,
verify=self._ssl_verify,
auth=self._auth,
)
self._dbtool.check_account_table(self.name)
logging.debug("syncing events in the next 365 days")
start = datetime.datetime.utcnow() - datetime.timedelta(days=30)
start_utc = self._local_timezone.localize(start).astimezone(pytz.UTC)
end_utc = start_utc + datetime.timedelta(days=365)
href_etag_list = syncer.get_hel(start=start_utc, end=end_utc)
need_update = self._dbtool.needs_update(self.name, href_etag_list)
logging.debug("{number} event(s) need(s) an update".format(number=len(need_update)))
vhe_list = syncer.get_vevents(need_update)
for vevent, href, etag in vhe_list:
try:
self._dbtool.update(vevent, self.name, href=href, etag=etag)
except backend.UpdateFailed as error:
logging.error(error)
# syncing local new events
hrefs = self._dbtool.get_new(self.name)
logging.debug("{number} new events need to be uploaded".format(number=len(hrefs)))
try:
for href in hrefs:
event = self._dbtool.get_vevent_from_db(href, self.name)
(href_new, etag_new) = syncer.upload(event.vevent, self._default_timezone)
self._dbtool.update_href(href, href_new, self.name, status=OK)
except caldav.NoWriteSupport:
logging.info(
"failed to upload a new event, "
"you need to enable write support to use this feature"
", see the documentation."
)
# syncing locally modified events
hrefs = self._dbtool.get_changed(self.name)
for href in hrefs:
event = self._dbtool.get_vevent_from_db(href, self.name)
etag = syncer.update(event.vevent, event.href, event.etag)
# looking for events deleted on the server but still in the local db
locale_hrefs = self._dbtool.hrefs_by_time_range(start_utc, end_utc, self.name)
remote_hrefs = [href for href, _ in href_etag_list]
may_be_deleted = list(set(locale_hrefs) - set(remote_hrefs))
if may_be_deleted != list():
for href in may_be_deleted:
if (
syncer.test_deleted(href)
and self._dbtool.get_status(href, self.name) != NEW
):
logging.debug(
"removing remotely deleted event {0} from the local db".format(href)
)
self._dbtool.delete(href, self.name)
|
https://github.com/pimutils/khal/issues/7
|
Traceback (most recent call last):
File "/home/catern/src/khal/virt/bin/khal", line 5, in <module>
pkg_resources.run_script('khal==0.1.0.dev', 'khal')
File "/home/catern/src/khal/virt/lib/python2.7/site-packages/pkg_resources.py", line 540, in run_script
self.require(requires)[0].run_script(script_name, ns)
File "/home/catern/src/khal/virt/lib/python2.7/site-packages/pkg_resources.py", line 1462, in run_script
exec_(script_code, namespace, namespace)
File "/home/catern/src/khal/virt/lib/python2.7/site-packages/pkg_resources.py", line 41, in exec_
exec("""exec code in globs, locs""")
File "<string>", line 1, in <module>
File "/home/catern/src/khal/virt/lib/python2.7/site-packages/khal-0.1.0.dev-py2.7.egg/EGG-INFO/scripts/khal", line 72, in <module>
File "build/bdist.linux-x86_64/egg/khal/controllers.py", line 178, in __init__
File "build/bdist.linux-x86_64/egg/khal/backend.py", line 440, in get_allday_range
File "build/bdist.linux-x86_64/egg/khal/backend.py", line 164, in sql_ex
sqlite3.OperationalError: no such table: simple_d
|
sqlite3.OperationalError
|
def _sync_http(self):
"""
simple syncer to import events from .ics files
"""
import icalendar
self.syncer = caldav.HTTPSyncer(
self._resource,
user=self._username,
password=self._password,
verify=self._ssl_verify,
auth=self._auth,
)
# self._dbtool.check_account_table(self.name)
ics = self.syncer.get_ics()
cal = icalendar.Calendar.from_ical(ics)
remote_uids = list()
for component in cal.walk():
if component.name in ["VEVENT"]:
remote_uids.append(str(component["UID"]))
try:
self._dbtool.update(
component, self.name, href=str(component["UID"]), etag="", status=OK
)
except backend.UpdateFailed as error:
logging.error(error)
# events from an icalendar retrieved over stupid http have no href
# themselves, so their uid is safed in the `href` column
locale_uids = [
uid for uid, account in self._dbtool.get_all_href_from_db([self.name])
]
remote_deleted = list(set(locale_uids) - set(remote_uids))
if remote_deleted != list():
for uid in remote_deleted:
logging.debug(
"removing remotely deleted event {0} from the local db".format(uid)
)
self._dbtool.delete(uid, self.name)
|
def _sync_http(self):
"""
simple syncer to import events from .ics files
"""
import icalendar
self.syncer = caldav.HTTPSyncer(
self._resource,
user=self._username,
password=self._password,
verify=self._ssl_verify,
auth=self._auth,
)
self._dbtool.check_account_table(self.name)
ics = self.syncer.get_ics()
cal = icalendar.Calendar.from_ical(ics)
remote_uids = list()
for component in cal.walk():
if component.name in ["VEVENT"]:
remote_uids.append(str(component["UID"]))
try:
self._dbtool.update(
component, self.name, href=str(component["UID"]), etag="", status=OK
)
except backend.UpdateFailed as error:
logging.error(error)
# events from an icalendar retrieved over stupid http have no href
# themselves, so their uid is safed in the `href` column
locale_uids = [
uid for uid, account in self._dbtool.get_all_href_from_db([self.name])
]
remote_deleted = list(set(locale_uids) - set(remote_uids))
if remote_deleted != list():
for uid in remote_deleted:
logging.debug(
"removing remotely deleted event {0} from the local db".format(uid)
)
self._dbtool.delete(uid, self.name)
|
https://github.com/pimutils/khal/issues/7
|
Traceback (most recent call last):
File "/home/catern/src/khal/virt/bin/khal", line 5, in <module>
pkg_resources.run_script('khal==0.1.0.dev', 'khal')
File "/home/catern/src/khal/virt/lib/python2.7/site-packages/pkg_resources.py", line 540, in run_script
self.require(requires)[0].run_script(script_name, ns)
File "/home/catern/src/khal/virt/lib/python2.7/site-packages/pkg_resources.py", line 1462, in run_script
exec_(script_code, namespace, namespace)
File "/home/catern/src/khal/virt/lib/python2.7/site-packages/pkg_resources.py", line 41, in exec_
exec("""exec code in globs, locs""")
File "<string>", line 1, in <module>
File "/home/catern/src/khal/virt/lib/python2.7/site-packages/khal-0.1.0.dev-py2.7.egg/EGG-INFO/scripts/khal", line 72, in <module>
File "build/bdist.linux-x86_64/egg/khal/controllers.py", line 178, in __init__
File "build/bdist.linux-x86_64/egg/khal/backend.py", line 440, in get_allday_range
File "build/bdist.linux-x86_64/egg/khal/backend.py", line 164, in sql_ex
sqlite3.OperationalError: no such table: simple_d
|
sqlite3.OperationalError
|
def __init__(
self,
url: str = "sqlite://",
index: str = "document",
label_index: str = "label",
update_existing_documents: bool = False,
):
"""
An SQL backed DocumentStore. Currently supports SQLite, PostgreSQL and MySQL backends.
:param url: URL for SQL database as expected by SQLAlchemy. More info here: https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls
:param index: The documents are scoped to an index attribute that can be used when writing, querying, or deleting documents.
This parameter sets the default value for document index.
:param label_index: The default value of index attribute for the labels.
:param update_existing_documents: Whether to update any existing documents with the same ID when adding
documents. When set as True, any document with an existing ID gets updated.
If set to False, an error is raised if the document ID of the document being
added already exists. Using this parameter could cause performance degradation
for document insertion.
"""
engine = create_engine(url)
ORMBase.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
self.session = Session()
self.index = index
self.label_index = label_index
self.update_existing_documents = update_existing_documents
if getattr(self, "similarity", None) is None:
self.similarity = None
self.use_windowed_query = True
if "sqlite" in url:
import sqlite3
if sqlite3.sqlite_version < "3.25":
self.use_windowed_query = False
|
def __init__(
self,
url: str = "sqlite://",
index: str = "document",
label_index: str = "label",
update_existing_documents: bool = False,
):
"""
An SQL backed DocumentStore. Currently supports SQLite, PostgreSQL and MySQL backends.
:param url: URL for SQL database as expected by SQLAlchemy. More info here: https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls
:param index: The documents are scoped to an index attribute that can be used when writing, querying, or deleting documents.
This parameter sets the default value for document index.
:param label_index: The default value of index attribute for the labels.
:param update_existing_documents: Whether to update any existing documents with the same ID when adding
documents. When set as True, any document with an existing ID gets updated.
If set to False, an error is raised if the document ID of the document being
added already exists. Using this parameter could cause performance degradation
for document insertion.
"""
engine = create_engine(url)
ORMBase.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
self.session = Session()
self.index = index
self.label_index = label_index
self.update_existing_documents = update_existing_documents
if getattr(self, "similarity", None) is None:
self.similarity = None
|
https://github.com/deepset-ai/haystack/issues/758
|
01/21/2021 22:03:34 - INFO - haystack.document_store.faiss - Updating embeddings for 75 docs...
0%| | 0/75 [00:00<?, ?it/s]
---------------------------------------------------------------------------
OperationalError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py in _execute_context(self, dialect, constructor, statement, parameters, *args)
1276 self.dialect.do_execute(
-> 1277 cursor, statement, parameters, context
1278 )
17 frames
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/default.py in do_execute(self, cursor, statement, parameters, context)
608 def do_execute(self, cursor, statement, parameters, context=None):
--> 609 cursor.execute(statement, parameters)
610
OperationalError: near "(": syntax error
The above exception was the direct cause of the following exception:
OperationalError Traceback (most recent call last)
<ipython-input-8-0ae6e12f6b98> in <module>()
6
7 # Add documents embeddings to index
----> 8 document_store.update_embeddings(retriever=retriever)
/usr/local/lib/python3.6/dist-packages/haystack/document_store/faiss.py in update_embeddings(self, retriever, index, batch_size)
188 batched_documents = get_batches_from_generator(result, batch_size)
189 with tqdm(total=document_count) as progress_bar:
--> 190 for document_batch in batched_documents:
191 embeddings = retriever.embed_passages(document_batch) # type: ignore
192 assert len(document_batch) == len(embeddings)
/usr/local/lib/python3.6/dist-packages/haystack/utils.py in get_batches_from_generator(iterable, n)
122 """
123 it = iter(iterable)
--> 124 x = tuple(islice(it, n))
125 while x:
126 yield x
/usr/local/lib/python3.6/dist-packages/haystack/document_store/faiss.py in get_all_documents_generator(self, index, filters, return_embedding, batch_size)
241 return_embedding = self.return_embedding
242
--> 243 for doc in documents:
244 if return_embedding:
245 if doc.meta and doc.meta.get("vector_id") is not None:
/usr/local/lib/python3.6/dist-packages/haystack/document_store/sql.py in get_all_documents_generator(self, index, filters, return_embedding, batch_size)
183
184 documents_map = {}
--> 185 for i, row in enumerate(self._windowed_query(documents_query, DocumentORM.id, batch_size), start=1):
186 documents_map[row.id] = Document(
187 id=row.id,
/usr/local/lib/python3.6/dist-packages/haystack/document_store/sql.py in _windowed_query(self, q, column, windowsize)
464 for whereclause in self._column_windows(
465 q.session,
--> 466 column, windowsize):
467 for row in q.filter(whereclause).order_by(column):
468 yield row
/usr/local/lib/python3.6/dist-packages/haystack/document_store/sql.py in _column_windows(self, session, column, windowsize)
449 q = q.filter(text("rownum %% %d=1" % windowsize))
450
--> 451 intervals = [id for id, in q]
452
453 while intervals:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py in __iter__(self)
3533 if self._autoflush and not self._populate_existing:
3534 self.session._autoflush()
-> 3535 return self._execute_and_instances(context)
3536
3537 def __str__(self):
/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py in _execute_and_instances(self, querycontext)
3558 )
3559
-> 3560 result = conn.execute(querycontext.statement, self._params)
3561 return loading.instances(querycontext.query, result, querycontext)
3562
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py in execute(self, object_, *multiparams, **params)
1009 )
1010 else:
-> 1011 return meth(self, multiparams, params)
1012
1013 def _execute_function(self, func, multiparams, params):
/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/elements.py in _execute_on_connection(self, connection, multiparams, params)
296 def _execute_on_connection(self, connection, multiparams, params):
297 if self.supports_execution:
--> 298 return connection._execute_clauseelement(self, multiparams, params)
299 else:
300 raise exc.ObjectNotExecutableError(self)
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py in _execute_clauseelement(self, elem, multiparams, params)
1128 distilled_params,
1129 compiled_sql,
-> 1130 distilled_params,
1131 )
1132 if self._has_events or self.engine._has_events:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py in _execute_context(self, dialect, constructor, statement, parameters, *args)
1315 except BaseException as e:
1316 self._handle_dbapi_exception(
-> 1317 e, statement, parameters, cursor, context
1318 )
1319
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py in _handle_dbapi_exception(self, e, statement, parameters, cursor, context)
1509 elif should_wrap:
1510 util.raise_(
-> 1511 sqlalchemy_exception, with_traceback=exc_info[2], from_=e
1512 )
1513 else:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/compat.py in raise_(***failed resolving arguments***)
180
181 try:
--> 182 raise exception
183 finally:
184 # credit to
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py in _execute_context(self, dialect, constructor, statement, parameters, *args)
1275 if not evt_handled:
1276 self.dialect.do_execute(
-> 1277 cursor, statement, parameters, context
1278 )
1279
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/default.py in do_execute(self, cursor, statement, parameters, context)
607
608 def do_execute(self, cursor, statement, parameters, context=None):
--> 609 cursor.execute(statement, parameters)
610
611 def do_execute_no_params(self, cursor, statement, context=None):
OperationalError: (sqlite3.OperationalError) near "(": syntax error
[SQL: SELECT anon_1.document_id AS anon_1_document_id
FROM (SELECT document.id AS document_id, row_number() OVER (ORDER BY document.id) AS rownum
FROM document) AS anon_1
WHERE rownum % 10000=1]
(Background on this error at: http://sqlalche.me/e/13/e3q8)
|
OperationalError
|
def get_all_documents_generator(
self,
index: Optional[str] = None,
filters: Optional[Dict[str, List[str]]] = None,
return_embedding: Optional[bool] = None,
batch_size: int = 10_000,
) -> Generator[Document, None, None]:
"""
Get documents from the document store. Under-the-hood, documents are fetched in batches from the
document store and yielded as individual documents. This method can be used to iteratively process
a large number of documents without having to load all documents in memory.
:param index: Name of the index to get the documents from. If None, the
DocumentStore's default index (self.index) will be used.
:param filters: Optional filters to narrow down the documents to return.
Example: {"name": ["some", "more"], "category": ["only_one"]}
:param return_embedding: Whether to return the document embeddings.
"""
index = index or self.index
# Generally ORM objects kept in memory cause performance issue
# Hence using directly column name improve memory and performance.
# Refer https://stackoverflow.com/questions/23185319/why-is-loading-sqlalchemy-objects-via-the-orm-5-8x-slower-than-rows-via-a-raw-my
documents_query = self.session.query(
DocumentORM.id, DocumentORM.text, DocumentORM.vector_id
).filter_by(index=index)
if filters:
documents_query = documents_query.join(MetaORM)
for key, values in filters.items():
documents_query = documents_query.filter(
MetaORM.name == key,
MetaORM.value.in_(values),
DocumentORM.id == MetaORM.document_id,
)
documents_map = {}
if self.use_windowed_query:
documents_query = self._windowed_query(
documents_query, DocumentORM.id, batch_size
)
for i, row in enumerate(documents_query, start=1):
documents_map[row.id] = Document(
id=row.id,
text=row.text,
meta=None if row.vector_id is None else {"vector_id": row.vector_id}, # type: ignore
)
if i % batch_size == 0:
documents_map = self._get_documents_meta(documents_map)
yield from documents_map.values()
documents_map = {}
if documents_map:
documents_map = self._get_documents_meta(documents_map)
yield from documents_map.values()
|
def get_all_documents_generator(
self,
index: Optional[str] = None,
filters: Optional[Dict[str, List[str]]] = None,
return_embedding: Optional[bool] = None,
batch_size: int = 10_000,
) -> Generator[Document, None, None]:
"""
Get documents from the document store. Under-the-hood, documents are fetched in batches from the
document store and yielded as individual documents. This method can be used to iteratively process
a large number of documents without having to load all documents in memory.
:param index: Name of the index to get the documents from. If None, the
DocumentStore's default index (self.index) will be used.
:param filters: Optional filters to narrow down the documents to return.
Example: {"name": ["some", "more"], "category": ["only_one"]}
:param return_embedding: Whether to return the document embeddings.
"""
index = index or self.index
# Generally ORM objects kept in memory cause performance issue
# Hence using directly column name improve memory and performance.
# Refer https://stackoverflow.com/questions/23185319/why-is-loading-sqlalchemy-objects-via-the-orm-5-8x-slower-than-rows-via-a-raw-my
documents_query = self.session.query(
DocumentORM.id, DocumentORM.text, DocumentORM.vector_id
).filter_by(index=index)
if filters:
documents_query = documents_query.join(MetaORM)
for key, values in filters.items():
documents_query = documents_query.filter(
MetaORM.name == key,
MetaORM.value.in_(values),
DocumentORM.id == MetaORM.document_id,
)
documents_map = {}
for i, row in enumerate(
self._windowed_query(documents_query, DocumentORM.id, batch_size), start=1
):
documents_map[row.id] = Document(
id=row.id,
text=row.text,
meta=None if row.vector_id is None else {"vector_id": row.vector_id}, # type: ignore
)
if i % batch_size == 0:
documents_map = self._get_documents_meta(documents_map)
yield from documents_map.values()
documents_map = {}
if documents_map:
documents_map = self._get_documents_meta(documents_map)
yield from documents_map.values()
|
https://github.com/deepset-ai/haystack/issues/758
|
01/21/2021 22:03:34 - INFO - haystack.document_store.faiss - Updating embeddings for 75 docs...
0%| | 0/75 [00:00<?, ?it/s]
---------------------------------------------------------------------------
OperationalError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py in _execute_context(self, dialect, constructor, statement, parameters, *args)
1276 self.dialect.do_execute(
-> 1277 cursor, statement, parameters, context
1278 )
17 frames
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/default.py in do_execute(self, cursor, statement, parameters, context)
608 def do_execute(self, cursor, statement, parameters, context=None):
--> 609 cursor.execute(statement, parameters)
610
OperationalError: near "(": syntax error
The above exception was the direct cause of the following exception:
OperationalError Traceback (most recent call last)
<ipython-input-8-0ae6e12f6b98> in <module>()
6
7 # Add documents embeddings to index
----> 8 document_store.update_embeddings(retriever=retriever)
/usr/local/lib/python3.6/dist-packages/haystack/document_store/faiss.py in update_embeddings(self, retriever, index, batch_size)
188 batched_documents = get_batches_from_generator(result, batch_size)
189 with tqdm(total=document_count) as progress_bar:
--> 190 for document_batch in batched_documents:
191 embeddings = retriever.embed_passages(document_batch) # type: ignore
192 assert len(document_batch) == len(embeddings)
/usr/local/lib/python3.6/dist-packages/haystack/utils.py in get_batches_from_generator(iterable, n)
122 """
123 it = iter(iterable)
--> 124 x = tuple(islice(it, n))
125 while x:
126 yield x
/usr/local/lib/python3.6/dist-packages/haystack/document_store/faiss.py in get_all_documents_generator(self, index, filters, return_embedding, batch_size)
241 return_embedding = self.return_embedding
242
--> 243 for doc in documents:
244 if return_embedding:
245 if doc.meta and doc.meta.get("vector_id") is not None:
/usr/local/lib/python3.6/dist-packages/haystack/document_store/sql.py in get_all_documents_generator(self, index, filters, return_embedding, batch_size)
183
184 documents_map = {}
--> 185 for i, row in enumerate(self._windowed_query(documents_query, DocumentORM.id, batch_size), start=1):
186 documents_map[row.id] = Document(
187 id=row.id,
/usr/local/lib/python3.6/dist-packages/haystack/document_store/sql.py in _windowed_query(self, q, column, windowsize)
464 for whereclause in self._column_windows(
465 q.session,
--> 466 column, windowsize):
467 for row in q.filter(whereclause).order_by(column):
468 yield row
/usr/local/lib/python3.6/dist-packages/haystack/document_store/sql.py in _column_windows(self, session, column, windowsize)
449 q = q.filter(text("rownum %% %d=1" % windowsize))
450
--> 451 intervals = [id for id, in q]
452
453 while intervals:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py in __iter__(self)
3533 if self._autoflush and not self._populate_existing:
3534 self.session._autoflush()
-> 3535 return self._execute_and_instances(context)
3536
3537 def __str__(self):
/usr/local/lib/python3.6/dist-packages/sqlalchemy/orm/query.py in _execute_and_instances(self, querycontext)
3558 )
3559
-> 3560 result = conn.execute(querycontext.statement, self._params)
3561 return loading.instances(querycontext.query, result, querycontext)
3562
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py in execute(self, object_, *multiparams, **params)
1009 )
1010 else:
-> 1011 return meth(self, multiparams, params)
1012
1013 def _execute_function(self, func, multiparams, params):
/usr/local/lib/python3.6/dist-packages/sqlalchemy/sql/elements.py in _execute_on_connection(self, connection, multiparams, params)
296 def _execute_on_connection(self, connection, multiparams, params):
297 if self.supports_execution:
--> 298 return connection._execute_clauseelement(self, multiparams, params)
299 else:
300 raise exc.ObjectNotExecutableError(self)
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py in _execute_clauseelement(self, elem, multiparams, params)
1128 distilled_params,
1129 compiled_sql,
-> 1130 distilled_params,
1131 )
1132 if self._has_events or self.engine._has_events:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py in _execute_context(self, dialect, constructor, statement, parameters, *args)
1315 except BaseException as e:
1316 self._handle_dbapi_exception(
-> 1317 e, statement, parameters, cursor, context
1318 )
1319
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py in _handle_dbapi_exception(self, e, statement, parameters, cursor, context)
1509 elif should_wrap:
1510 util.raise_(
-> 1511 sqlalchemy_exception, with_traceback=exc_info[2], from_=e
1512 )
1513 else:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/compat.py in raise_(***failed resolving arguments***)
180
181 try:
--> 182 raise exception
183 finally:
184 # credit to
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py in _execute_context(self, dialect, constructor, statement, parameters, *args)
1275 if not evt_handled:
1276 self.dialect.do_execute(
-> 1277 cursor, statement, parameters, context
1278 )
1279
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/default.py in do_execute(self, cursor, statement, parameters, context)
607
608 def do_execute(self, cursor, statement, parameters, context=None):
--> 609 cursor.execute(statement, parameters)
610
611 def do_execute_no_params(self, cursor, statement, context=None):
OperationalError: (sqlite3.OperationalError) near "(": syntax error
[SQL: SELECT anon_1.document_id AS anon_1_document_id
FROM (SELECT document.id AS document_id, row_number() OVER (ORDER BY document.id) AS rownum
FROM document) AS anon_1
WHERE rownum % 10000=1]
(Background on this error at: http://sqlalche.me/e/13/e3q8)
|
OperationalError
|
def __init__(
self,
model_name_or_path: str = "facebook/rag-token-nq",
retriever: Optional[DensePassageRetriever] = None,
generator_type: RAGeneratorType = RAGeneratorType.TOKEN,
top_k_answers: int = 2,
max_length: int = 200,
min_length: int = 2,
num_beams: int = 2,
embed_title: bool = True,
prefix: Optional[str] = None,
use_gpu: bool = True,
):
"""
Load a RAG model from Transformers along with passage_embedding_model.
See https://huggingface.co/transformers/model_doc/rag.html for more details
:param model_name_or_path: Directory of a saved model or the name of a public model e.g.
'facebook/rag-token-nq', 'facebook/rag-sequence-nq'.
See https://huggingface.co/models for full list of available models.
:param retriever: `DensePassageRetriever` used to embedded passage
:param generator_type: Which RAG generator implementation to use? RAG-TOKEN or RAG-SEQUENCE
:param top_k_answers: Number of independently generated text to return
:param max_length: Maximum length of generated text
:param min_length: Minimum length of generated text
:param num_beams: Number of beams for beam search. 1 means no beam search.
:param embed_title: Embedded the title of passage while generating embedding
:param prefix: The prefix used by the generator's tokenizer.
:param use_gpu: Whether to use GPU (if available)
"""
self.model_name_or_path = model_name_or_path
self.max_length = max_length
self.min_length = min_length
self.generator_type = generator_type
self.num_beams = num_beams
self.embed_title = embed_title
self.prefix = prefix
self.retriever = retriever
if top_k_answers > self.num_beams:
top_k_answers = self.num_beams
logger.warning(
f"top_k_answers value should not be greater than num_beams, hence setting it to {num_beams}"
)
self.top_k_answers = top_k_answers
if use_gpu and torch.cuda.is_available():
self.device = torch.device("cuda")
raise AttributeError(
"Currently RAGenerator does not support GPU, try with use_gpu=False"
)
else:
self.device = torch.device("cpu")
self.tokenizer = RagTokenizer.from_pretrained(model_name_or_path)
if self.generator_type == RAGeneratorType.SEQUENCE:
raise NotImplementedError("RagSequenceForGeneration is not implemented yet")
# TODO: Enable when transformers have it. Refer https://github.com/huggingface/transformers/issues/7905
# Also refer refer https://github.com/huggingface/transformers/issues/7829
# self.model = RagSequenceForGeneration.from_pretrained(model_name_or_path)
else:
self.model = RagTokenForGeneration.from_pretrained(model_name_or_path)
|
def __init__(
self,
model_name_or_path: str = "facebook/rag-token-nq",
retriever: Optional[DensePassageRetriever] = None,
generator_type: RAGeneratorType = RAGeneratorType.TOKEN,
top_k_answers: int = 2,
max_length: int = 200,
min_length: int = 2,
num_beams: int = 2,
embed_title: bool = True,
prefix: Optional[str] = None,
use_gpu: bool = True,
):
"""
Load a RAG model from Transformers along with passage_embedding_model.
See https://huggingface.co/transformers/model_doc/rag.html for more details
:param model_name_or_path: Directory of a saved model or the name of a public model e.g.
'facebook/rag-token-nq', 'facebook/rag-sequence-nq'.
See https://huggingface.co/models for full list of available models.
:param retriever: `DensePassageRetriever` used to embedded passage
:param generator_type: Which RAG generator implementation to use? RAG-TOKEN or RAG-SEQUENCE
:param top_k_answers: Number of independently generated text to return
:param max_length: Maximum length of generated text
:param min_length: Minimum length of generated text
:param num_beams: Number of beams for beam search. 1 means no beam search.
:param embed_title: Embedded the title of passage while generating embedding
:param prefix: The prefix used by the generator's tokenizer.
:param use_gpu: Whether to use GPU (if available)
"""
self.model_name_or_path = model_name_or_path
self.top_k_answers = top_k_answers
self.max_length = max_length
self.min_length = min_length
self.generator_type = generator_type
self.num_beams = num_beams
self.embed_title = embed_title
self.prefix = prefix
self.retriever = retriever
if use_gpu and torch.cuda.is_available():
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
self.tokenizer = RagTokenizer.from_pretrained(model_name_or_path)
if self.generator_type == RAGeneratorType.SEQUENCE:
raise NotImplementedError("RagSequenceForGeneration is not implemented yet")
# TODO: Enable when transformers have it. Refer https://github.com/huggingface/transformers/issues/7905
# Also refer refer https://github.com/huggingface/transformers/issues/7829
# self.model = RagSequenceForGeneration.from_pretrained(model_name_or_path)
else:
self.model = RagTokenForGeneration.from_pretrained(model_name_or_path)
|
https://github.com/deepset-ai/haystack/issues/587
|
Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 56.41 Batches/s]
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-17-42737cee0272> in <module>()
10 question=question,
11 documents=retriever_results,
---> 12 top_k=1
13 )
14
/usr/local/lib/python3.6/dist-packages/haystack/generator/transformers.py in predict(self, question, documents, top_k)
236 # Compute doc scores from docs_embedding
237 doc_scores = torch.bmm(question_embedding.unsqueeze(1),
--> 238 passage_embeddings.unsqueeze(0).transpose(1, 2)).squeeze(1)
239
240 # TODO Need transformers 3.4.0
RuntimeError: Expected object of device type cuda but got device type cpu for argument #0 'result' in call to _th_bmm_out
|
RuntimeError
|
def predict(
self, question: str, documents: List[Document], top_k: Optional[int] = None
) -> Dict:
"""
Generate the answer to the input question. The generation will be conditioned on the supplied documents.
These document can for example be retrieved via the Retriever.
:param question: Question
:param documents: Related documents (e.g. coming from a retriever) that the answer shall be conditioned on.
:param top_k: Number of returned answers
:return: Generated answers plus additional infos in a dict like this:
```python
> {'question': 'who got the first nobel prize in physics',
> 'answers':
> [{'question': 'who got the first nobel prize in physics',
> 'answer': ' albert einstein',
> 'meta': { 'doc_ids': [...],
> 'doc_scores': [80.42758 ...],
> 'doc_probabilities': [40.71379089355469, ...
> 'texts': ['Albert Einstein was a ...]
> 'titles': ['"Albert Einstein"', ...]
> }}]}
```
"""
if len(documents) == 0:
raise AttributeError("generator need documents to predict the answer")
top_k_answers = top_k if top_k is not None else self.top_k_answers
if top_k_answers > self.num_beams:
top_k_answers = self.num_beams
logger.warning(
f"top_k_answers value should not be greater than num_beams, "
f"hence setting it to {top_k_answers}"
)
# Flatten the documents so easy to reference
flat_docs_dict: Dict[str, Any] = {}
for document in documents:
for k, v in document.__dict__.items():
if k not in flat_docs_dict:
flat_docs_dict[k] = []
flat_docs_dict[k].append(v)
# Extract title
titles = [d.meta["name"] if d.meta and "name" in d.meta else "" for d in documents]
# Raw document embedding and set device of question_embedding
passage_embeddings = self._prepare_passage_embeddings(
docs=documents, embeddings=flat_docs_dict["embedding"]
)
# Question tokenization
input_dict = self.tokenizer.prepare_seq2seq_batch(
src_texts=[question], return_tensors="pt"
)
# Question embedding
question_embedding = self.model.question_encoder(input_dict["input_ids"])[0]
# Prepare contextualized input_ids of documents
# (will be transformed into contextualized inputs inside generator)
context_input_ids, context_attention_mask = self._get_contextualized_inputs(
texts=flat_docs_dict["text"], titles=titles, question=question
)
# Compute doc scores from docs_embedding
doc_scores = torch.bmm(
question_embedding.unsqueeze(1), passage_embeddings.unsqueeze(0).transpose(1, 2)
).squeeze(1)
# TODO Need transformers 3.4.0
# Refer https://github.com/huggingface/transformers/issues/7874
# Pass it as parameter to generate function as follows -
# n_docs=len(flat_docs_dict["text"])
self.model.config.n_docs = len(flat_docs_dict["text"])
# Get generated ids from generator
generator_ids = self.model.generate(
# TODO: Need transformers 3.4.0
# Refer https://github.com/huggingface/transformers/issues/7871
# Remove input_ids parameter once upgraded to 3.4.0
input_ids=input_dict["input_ids"],
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
doc_scores=doc_scores,
num_return_sequences=top_k_answers,
num_beams=self.num_beams,
max_length=self.max_length,
min_length=self.min_length,
)
generated_answers = self.tokenizer.batch_decode(
generator_ids, skip_special_tokens=True
)
answers: List[Any] = []
for generated_answer in generated_answers:
cur_answer = {
"question": question,
"answer": generated_answer,
"meta": {
"doc_ids": flat_docs_dict["id"],
"doc_scores": flat_docs_dict["score"],
"doc_probabilities": flat_docs_dict["probability"],
"texts": flat_docs_dict["text"],
"titles": titles,
},
}
answers.append(cur_answer)
result = {"question": question, "answers": answers}
return result
|
def predict(
self, question: str, documents: List[Document], top_k: Optional[int] = None
) -> Dict:
"""
Generate the answer to the input question. The generation will be conditioned on the supplied documents.
These document can for example be retrieved via the Retriever.
:param question: Question
:param documents: Related documents (e.g. coming from a retriever) that the answer shall be conditioned on.
:param top_k: Number of returned answers
:return: Generated answers plus additional infos in a dict like this:
```python
> {'question': 'who got the first nobel prize in physics',
> 'answers':
> [{'question': 'who got the first nobel prize in physics',
> 'answer': ' albert einstein',
> 'meta': { 'doc_ids': [...],
> 'doc_scores': [80.42758 ...],
> 'doc_probabilities': [40.71379089355469, ...
> 'texts': ['Albert Einstein was a ...]
> 'titles': ['"Albert Einstein"', ...]
> }}]}
```
"""
if len(documents) == 0:
raise AttributeError("generator need documents to predict the answer")
top_k_answers = top_k if top_k is not None else self.top_k_answers
# Flatten the documents so easy to reference
flat_docs_dict: Dict[str, Any] = {}
for document in documents:
for k, v in document.__dict__.items():
if k not in flat_docs_dict:
flat_docs_dict[k] = []
flat_docs_dict[k].append(v)
# Extract title
titles = [d.meta["name"] if d.meta and "name" in d.meta else "" for d in documents]
# Raw document embedding and set device of question_embedding
passage_embeddings = self._prepare_passage_embeddings(
docs=documents, embeddings=flat_docs_dict["embedding"]
)
# Question tokenization
input_dict = self.tokenizer.prepare_seq2seq_batch(
src_texts=[question], return_tensors="pt"
)
# Question embedding
question_embedding = self.model.question_encoder(input_dict["input_ids"])[0]
# Prepare contextualized input_ids of documents
# (will be transformed into contextualized inputs inside generator)
context_input_ids, context_attention_mask = self._get_contextualized_inputs(
texts=flat_docs_dict["text"], titles=titles, question=question
)
# Compute doc scores from docs_embedding
doc_scores = torch.bmm(
question_embedding.unsqueeze(1), passage_embeddings.unsqueeze(0).transpose(1, 2)
).squeeze(1)
# TODO Need transformers 3.4.0
# Refer https://github.com/huggingface/transformers/issues/7874
# Pass it as parameter to generate function as follows -
# n_docs=len(flat_docs_dict["text"])
self.model.config.n_docs = len(flat_docs_dict["text"])
# Get generated ids from generator
generator_ids = self.model.generate(
# TODO: Need transformers 3.4.0
# Refer https://github.com/huggingface/transformers/issues/7871
# Remove input_ids parameter once upgraded to 3.4.0
input_ids=input_dict["input_ids"],
context_input_ids=context_input_ids,
context_attention_mask=context_attention_mask,
doc_scores=doc_scores,
num_return_sequences=top_k_answers,
num_beams=self.num_beams,
max_length=self.max_length,
min_length=self.min_length,
)
generated_answers = self.tokenizer.batch_decode(
generator_ids, skip_special_tokens=True
)
answers: List[Any] = []
for generated_answer in generated_answers:
cur_answer = {
"question": question,
"answer": generated_answer,
"meta": {
"doc_ids": flat_docs_dict["id"],
"doc_scores": flat_docs_dict["score"],
"doc_probabilities": flat_docs_dict["probability"],
"texts": flat_docs_dict["text"],
"titles": titles,
},
}
answers.append(cur_answer)
result = {"question": question, "answers": answers}
return result
|
https://github.com/deepset-ai/haystack/issues/587
|
Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 56.41 Batches/s]
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-17-42737cee0272> in <module>()
10 question=question,
11 documents=retriever_results,
---> 12 top_k=1
13 )
14
/usr/local/lib/python3.6/dist-packages/haystack/generator/transformers.py in predict(self, question, documents, top_k)
236 # Compute doc scores from docs_embedding
237 doc_scores = torch.bmm(question_embedding.unsqueeze(1),
--> 238 passage_embeddings.unsqueeze(0).transpose(1, 2)).squeeze(1)
239
240 # TODO Need transformers 3.4.0
RuntimeError: Expected object of device type cuda but got device type cpu for argument #0 'result' in call to _th_bmm_out
|
RuntimeError
|
def query_by_embedding(
self,
query_emb: np.array,
filters: Optional[Dict[str, List[str]]] = None,
top_k: int = 10,
index: Optional[str] = None,
) -> List[Document]:
if index is None:
index = self.index
if not self.embedding_field:
raise RuntimeError(
"Please specify arg `embedding_field` in ElasticsearchDocumentStore()"
)
else:
# +1 in similarity to avoid negative numbers (for cosine sim)
body = {
"size": top_k,
"query": {
"script_score": {
"query": {"match_all": {}},
"script": {
# offset score to ensure a positive range as required by Elasticsearch
"source": f"{self.similarity_fn_name}(params.query_vector,'{self.embedding_field}') + 1000",
"params": {"query_vector": query_emb.tolist()},
},
}
},
} # type: Dict[str,Any]
if filters:
for key, values in filters.items():
if type(values) != list:
raise ValueError(
f'Wrong filter format for key "{key}": Please provide a list of allowed values for each key. '
'Example: {"name": ["some", "more"], "category": ["only_one"]} '
)
body["query"]["script_score"]["query"] = {"terms": filters}
if self.excluded_meta_data:
body["_source"] = {"excludes": self.excluded_meta_data}
logger.debug(f"Retriever query: {body}")
result = self.client.search(index=index, body=body, request_timeout=300)[
"hits"
]["hits"]
documents = [
self._convert_es_hit_to_document(hit, adapt_score_for_embedding=True)
for hit in result
]
return documents
|
def query_by_embedding(
self,
query_emb: np.array,
filters: Optional[Dict[str, List[str]]] = None,
top_k: int = 10,
index: Optional[str] = None,
) -> List[Document]:
if index is None:
index = self.index
if not self.embedding_field:
raise RuntimeError(
"Please specify arg `embedding_field` in ElasticsearchDocumentStore()"
)
else:
# +1 in similarity to avoid negative numbers (for cosine sim)
body = {
"size": top_k,
"query": {
"script_score": {
"query": {"match_all": {}},
"script": {
"source": f"{self.similarity_fn_name}(params.query_vector,'{self.embedding_field}') + 1.0",
"params": {"query_vector": query_emb.tolist()},
},
}
},
} # type: Dict[str,Any]
if filters:
for key, values in filters.items():
if type(values) != list:
raise ValueError(
f'Wrong filter format for key "{key}": Please provide a list of allowed values for each key. '
'Example: {"name": ["some", "more"], "category": ["only_one"]} '
)
body["query"]["script_score"]["query"] = {"terms": filters}
if self.excluded_meta_data:
body["_source"] = {"excludes": self.excluded_meta_data}
logger.debug(f"Retriever query: {body}")
result = self.client.search(index=index, body=body, request_timeout=300)[
"hits"
]["hits"]
documents = [
self._convert_es_hit_to_document(hit, adapt_score_for_embedding=True)
for hit in result
]
return documents
|
https://github.com/deepset-ai/haystack/issues/483
|
10/13/2020 06:43:06 - WARNING - elasticsearch - POST http://localhost:9200/faq/_search [status:400 request:0.130s]
Traceback (most recent call last):
File "chatbots/haystack/faqbot.py", line 214, in <module>
main()
File "chatbots/haystack/faqbot.py", line 186, in main
faqbot.interact(question)
File "chatbots/haystack/faqbot.py", line 57, in interact
prediction = finder.get_answers_via_similar_questions(question=question, top_k_retriever=1)
File "/Users/aaronbriel/chatbots/haystack/.venv/src/farm-haystack/haystack/finder.py", line 95, in get_answers_via_similar_questions
documents = self.retriever.retrieve(question, top_k=top_k_retriever, filters=filters, index=index)
File "/Users/aaronbriel/chatbots/haystack/.venv/src/farm-haystack/haystack/retriever/dense.py", line 327, in retrieve
top_k=top_k, index=index)
File "/Users/aaronbriel/chatbots/haystack/.venv/src/farm-haystack/haystack/document_store/elasticsearch.py", line 465, in query_by_embedding
result = self.client.search(index=index, body=body, request_timeout=300)["hits"]["hits"]
File "/Users/aaronbriel/chatbots/haystack/.venv/lib/python3.7/site-packages/elasticsearch/client/utils.py", line 152, in _wrapped
return func(*args, params=params, headers=headers, **kwargs)
File "/Users/aaronbriel/chatbots/haystack/.venv/lib/python3.7/site-packages/elasticsearch/client/__init__.py", line 1617, in search
body=body,
File "/Users/aaronbriel/chatbots/haystack/.venv/lib/python3.7/site-packages/elasticsearch/transport.py", line 392, in perform_request
raise e
File "/Users/aaronbriel/chatbots/haystack/.venv/lib/python3.7/site-packages/elasticsearch/transport.py", line 365, in perform_request
timeout=timeout,
File "/Users/aaronbriel/chatbots/haystack/.venv/lib/python3.7/site-packages/elasticsearch/connection/http_urllib3.py", line 269, in perform_request
self._raise_error(response.status, raw_data)
File "/Users/aaronbriel/chatbots/haystack/.venv/lib/python3.7/site-packages/elasticsearch/connection/base.py", line 301, in _raise_error
status_code, error_message, additional_info
elasticsearch.exceptions.RequestError: RequestError(400, 'search_phase_execution_exception', 'script_score script returned an invalid score [-3.4487913] for doc [41]. Must be a non-negative score!')
make: *** [ask_faq] Error 1
|
elasticsearch.exceptions.RequestError
|
def _convert_es_hit_to_document(
self, hit: dict, adapt_score_for_embedding: bool = False
) -> Document:
# We put all additional data of the doc into meta_data and return it in the API
meta_data = {
k: v
for k, v in hit["_source"].items()
if k not in (self.text_field, self.faq_question_field, self.embedding_field)
}
name = meta_data.pop(self.name_field, None)
if name:
meta_data["name"] = name
score = hit["_score"] if hit["_score"] else None
if score:
if adapt_score_for_embedding:
score -= 1000
probability = (score + 1) / 2 # scaling probability from cosine similarity
else:
probability = float(
expit(np.asarray(score / 8))
) # scaling probability from TFIDF/BM25
else:
probability = None
document = Document(
id=hit["_id"],
text=hit["_source"].get(self.text_field),
meta=meta_data,
score=score,
probability=probability,
question=hit["_source"].get(self.faq_question_field),
embedding=hit["_source"].get(self.embedding_field),
)
return document
|
def _convert_es_hit_to_document(
self, hit: dict, adapt_score_for_embedding: bool = False
) -> Document:
# We put all additional data of the doc into meta_data and return it in the API
meta_data = {
k: v
for k, v in hit["_source"].items()
if k not in (self.text_field, self.faq_question_field, self.embedding_field)
}
name = meta_data.pop(self.name_field, None)
if name:
meta_data["name"] = name
score = hit["_score"] if hit["_score"] else None
if score:
if adapt_score_for_embedding:
score -= 1
probability = (score + 1) / 2 # scaling probability from cosine similarity
else:
probability = float(
expit(np.asarray(score / 8))
) # scaling probability from TFIDF/BM25
else:
probability = None
document = Document(
id=hit["_id"],
text=hit["_source"].get(self.text_field),
meta=meta_data,
score=score,
probability=probability,
question=hit["_source"].get(self.faq_question_field),
embedding=hit["_source"].get(self.embedding_field),
)
return document
|
https://github.com/deepset-ai/haystack/issues/483
|
10/13/2020 06:43:06 - WARNING - elasticsearch - POST http://localhost:9200/faq/_search [status:400 request:0.130s]
Traceback (most recent call last):
File "chatbots/haystack/faqbot.py", line 214, in <module>
main()
File "chatbots/haystack/faqbot.py", line 186, in main
faqbot.interact(question)
File "chatbots/haystack/faqbot.py", line 57, in interact
prediction = finder.get_answers_via_similar_questions(question=question, top_k_retriever=1)
File "/Users/aaronbriel/chatbots/haystack/.venv/src/farm-haystack/haystack/finder.py", line 95, in get_answers_via_similar_questions
documents = self.retriever.retrieve(question, top_k=top_k_retriever, filters=filters, index=index)
File "/Users/aaronbriel/chatbots/haystack/.venv/src/farm-haystack/haystack/retriever/dense.py", line 327, in retrieve
top_k=top_k, index=index)
File "/Users/aaronbriel/chatbots/haystack/.venv/src/farm-haystack/haystack/document_store/elasticsearch.py", line 465, in query_by_embedding
result = self.client.search(index=index, body=body, request_timeout=300)["hits"]["hits"]
File "/Users/aaronbriel/chatbots/haystack/.venv/lib/python3.7/site-packages/elasticsearch/client/utils.py", line 152, in _wrapped
return func(*args, params=params, headers=headers, **kwargs)
File "/Users/aaronbriel/chatbots/haystack/.venv/lib/python3.7/site-packages/elasticsearch/client/__init__.py", line 1617, in search
body=body,
File "/Users/aaronbriel/chatbots/haystack/.venv/lib/python3.7/site-packages/elasticsearch/transport.py", line 392, in perform_request
raise e
File "/Users/aaronbriel/chatbots/haystack/.venv/lib/python3.7/site-packages/elasticsearch/transport.py", line 365, in perform_request
timeout=timeout,
File "/Users/aaronbriel/chatbots/haystack/.venv/lib/python3.7/site-packages/elasticsearch/connection/http_urllib3.py", line 269, in perform_request
self._raise_error(response.status, raw_data)
File "/Users/aaronbriel/chatbots/haystack/.venv/lib/python3.7/site-packages/elasticsearch/connection/base.py", line 301, in _raise_error
status_code, error_message, additional_info
elasticsearch.exceptions.RequestError: RequestError(400, 'search_phase_execution_exception', 'script_score script returned an invalid score [-3.4487913] for doc [41]. Must be a non-negative score!')
make: *** [ask_faq] Error 1
|
elasticsearch.exceptions.RequestError
|
def convert_files_to_dicts(
dir_path: str, clean_func: Optional[Callable] = None, split_paragraphs: bool = False
) -> List[dict]:
"""
Convert all files(.txt, .pdf, .docx) in the sub-directories of the given path to Python dicts that can be written to a
Document Store.
:param dir_path: path for the documents to be written to the DocumentStore
:param clean_func: a custom cleaning function that gets applied to each doc (input: str, output:str)
:param split_paragraphs: split text in paragraphs.
:return: None
"""
file_paths = [p for p in Path(dir_path).glob("**/*")]
allowed_suffixes = [".pdf", ".txt", ".docx"]
suffix2converter: Dict[str, BaseConverter] = {}
suffix2paths: Dict[str, List[Path]] = {}
for path in file_paths:
file_suffix = path.suffix.lower()
if file_suffix in allowed_suffixes:
if file_suffix not in suffix2paths:
suffix2paths[file_suffix] = []
suffix2paths[file_suffix].append(path)
elif not path.is_dir():
logger.warning(
"Skipped file {0} as type {1} is not supported here. "
"See haystack.file_converter for support of more file types".format(
path, file_suffix
)
)
# No need to initialize converter if file type not present
for file_suffix in suffix2paths.keys():
if file_suffix == ".pdf":
suffix2converter[file_suffix] = PDFToTextConverter()
if file_suffix == ".txt":
suffix2converter[file_suffix] = TextConverter()
if file_suffix == ".docx":
suffix2converter[file_suffix] = DocxToTextConverter()
documents = []
for suffix, paths in suffix2paths.items():
for path in paths:
logger.info("Converting {}".format(path))
document = suffix2converter[suffix].convert(file_path=path, meta=None)
text = document["text"]
if clean_func:
text = clean_func(text)
if split_paragraphs:
for para in text.split("\n\n"):
if not para.strip(): # skip empty paragraphs
continue
documents.append({"text": para, "meta": {"name": path.name}})
else:
documents.append({"text": text, "meta": {"name": path.name}})
return documents
|
def convert_files_to_dicts(
dir_path: str, clean_func: Optional[Callable] = None, split_paragraphs: bool = False
) -> List[dict]:
"""
Convert all files(.txt, .pdf) in the sub-directories of the given path to Python dicts that can be written to a
Document Store.
:param dir_path: path for the documents to be written to the DocumentStore
:param clean_func: a custom cleaning function that gets applied to each doc (input: str, output:str)
:param split_paragraphs: split text in paragraphs.
:return: None
"""
file_paths = [p for p in Path(dir_path).glob("**/*")]
if ".pdf" in [p.suffix.lower() for p in file_paths]:
pdf_converter = PDFToTextConverter() # type: Optional[PDFToTextConverter]
else:
pdf_converter = None
documents = []
for path in file_paths:
if path.suffix.lower() == ".txt":
with open(path) as doc:
text = doc.read()
elif path.suffix.lower() == ".pdf" and pdf_converter:
document = pdf_converter.convert(path)
text = document["text"]
else:
raise Exception(
f"Indexing of {path.suffix} files is not currently supported."
)
if clean_func:
text = clean_func(text)
if split_paragraphs:
for para in text.split("\n\n"):
if not para.strip(): # skip empty paragraphs
continue
documents.append({"text": para, "meta": {"name": path.name}})
else:
documents.append({"text": text, "meta": {"name": path.name}})
return documents
|
https://github.com/deepset-ai/haystack/issues/453
|
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-3-fd0550140fb8> in <module>
5
6 # Convert files to dicts
----> 7 dicts = convert_files_to_dicts(dir_path=doc_dir, clean_func=clean_wiki_text, split_paragraphs=True)
8
9 # Now, let's write the dicts containing documents to our DB.
~/Library/Python/3.8/lib/python/site-packages/haystack/preprocessor/utils.py in convert_files_to_dicts(dir_path, clean_func, split_paragraphs)
101 text = document["text"]
102 else:
--> 103 raise Exception(f"Indexing of {path.suffix} files is not currently supported.")
104
105 if clean_func:
Exception: Indexing of files is not currently supported.
|
Exception
|
def tika_convert_files_to_dicts(
dir_path: str,
clean_func: Optional[Callable] = None,
split_paragraphs: bool = False,
merge_short: bool = True,
merge_lowercase: bool = True,
) -> List[dict]:
"""
Convert all files(.txt, .pdf) in the sub-directories of the given path to Python dicts that can be written to a
Document Store.
:param merge_lowercase: allow conversion of merged paragraph to lowercase
:param merge_short: allow merging of short paragraphs
:param dir_path: path for the documents to be written to the DocumentStore
:param clean_func: a custom cleaning function that gets applied to each doc (input: str, output:str)
:param split_paragraphs: split text in paragraphs.
:return: None
"""
converter = TikaConverter(remove_header_footer=True)
paths = [p for p in Path(dir_path).glob("**/*")]
allowed_suffixes = [".pdf", ".txt"]
file_paths: List[Path] = []
for path in paths:
file_suffix = path.suffix.lower()
if file_suffix in allowed_suffixes:
file_paths.append(path)
elif not path.is_dir():
logger.warning(
"Skipped file {0} as type {1} is not supported here. "
"See haystack.file_converter for support of more file types".format(
path, file_suffix
)
)
documents = []
for path in file_paths:
logger.info("Converting {}".format(path))
document = converter.convert(path)
meta = document["meta"] or {}
meta["name"] = path.name
text = document["text"]
pages = text.split("\f")
if split_paragraphs:
if pages:
paras = pages[0].split("\n\n")
# pop the last paragraph from the first page
last_para = paras.pop(-1) if paras else ""
for page in pages[1:]:
page_paras = page.split("\n\n")
# merge the last paragraph in previous page to the first paragraph in this page
if page_paras:
page_paras[0] = last_para + " " + page_paras[0]
last_para = page_paras.pop(-1)
paras += page_paras
if last_para:
paras.append(last_para)
if paras:
last_para = ""
for para in paras:
para = para.strip()
if not para:
continue
# merge paragraphs to improve qa
# merge this paragraph if less than 10 characters or 2 words
# or this paragraph starts with a lower case and last paragraph does not end with a punctuation
if (
merge_short
and len(para) < 10
or len(re.findall("\s+", para)) < 2
or merge_lowercase
and para
and para[0].islower()
and last_para
and last_para[-1] not in ".?!\"'\]\)"
):
last_para += " " + para
else:
if last_para:
documents.append({"text": last_para, "meta": meta})
last_para = para
# don't forget the last one
if last_para:
documents.append({"text": last_para, "meta": meta})
else:
if clean_func:
text = clean_func(text)
documents.append({"text": text, "meta": meta})
return documents
|
def tika_convert_files_to_dicts(
dir_path: str,
clean_func: Optional[Callable] = None,
split_paragraphs: bool = False,
merge_short: bool = True,
merge_lowercase: bool = True,
) -> List[dict]:
"""
Convert all files(.txt, .pdf) in the sub-directories of the given path to Python dicts that can be written to a
Document Store.
:param dir_path: path for the documents to be written to the DocumentStore
:param clean_func: a custom cleaning function that gets applied to each doc (input: str, output:str)
:param split_paragraphs: split text in paragraphs.
:return: None
"""
converter = TikaConverter(remove_header_footer=True)
file_paths = [p for p in Path(dir_path).glob("**/*")]
documents = []
for path in file_paths:
document = converter.convert(path)
meta = document["meta"] or {}
meta["name"] = path.name
text = document["text"]
pages = text.split("\f")
if split_paragraphs:
if pages:
paras = pages[0].split("\n\n")
# pop the last paragraph from the first page
last_para = paras.pop(-1) if paras else ""
for page in pages[1:]:
page_paras = page.split("\n\n")
# merge the last paragraph in previous page to the first paragraph in this page
if page_paras:
page_paras[0] = last_para + " " + page_paras[0]
last_para = page_paras.pop(-1)
paras += page_paras
if last_para:
paras.append(last_para)
if paras:
last_para = ""
for para in paras:
para = para.strip()
if not para:
continue
# merge paragraphs to improve qa
# merge this paragraph if less than 10 characters or 2 words
# or this paragraph starts with a lower case and last paragraph does not end with a punctuation
if (
merge_short
and len(para) < 10
or len(re.findall("\s+", para)) < 2
or merge_lowercase
and para
and para[0].islower()
and last_para
and last_para[-1] not in ".?!\"'\]\)"
):
last_para += " " + para
else:
if last_para:
documents.append({"text": last_para, "meta": meta})
last_para = para
# don't forget the last one
if last_para:
documents.append({"text": last_para, "meta": meta})
else:
if clean_func:
text = clean_func(text)
documents.append({"text": text, "meta": meta})
return documents
|
https://github.com/deepset-ai/haystack/issues/453
|
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-3-fd0550140fb8> in <module>
5
6 # Convert files to dicts
----> 7 dicts = convert_files_to_dicts(dir_path=doc_dir, clean_func=clean_wiki_text, split_paragraphs=True)
8
9 # Now, let's write the dicts containing documents to our DB.
~/Library/Python/3.8/lib/python/site-packages/haystack/preprocessor/utils.py in convert_files_to_dicts(dir_path, clean_func, split_paragraphs)
101 text = document["text"]
102 else:
--> 103 raise Exception(f"Indexing of {path.suffix} files is not currently supported.")
104
105 if clean_func:
Exception: Indexing of files is not currently supported.
|
Exception
|
def fetch_archive_from_http(url: str, output_dir: str, proxies: Optional[dict] = None):
"""
Fetch an archive (zip or tar.gz) from a url via http and extract content to an output directory.
:param url: http address
:type url: str
:param output_dir: local path
:type output_dir: str
:param proxies: proxies details as required by requests library
:type proxies: dict
:return: bool if anything got fetched
"""
# verify & prepare local directory
path = Path(output_dir)
if not path.exists():
path.mkdir(parents=True)
is_not_empty = len(list(Path(path).rglob("*"))) > 0
if is_not_empty:
logger.info(
f"Found data stored in `{output_dir}`. Delete this first if you really want to fetch new data."
)
return False
else:
logger.info(f"Fetching from {url} to `{output_dir}`")
# download & extract
with tempfile.NamedTemporaryFile() as temp_file:
http_get(url, temp_file, proxies=proxies)
temp_file.flush()
temp_file.seek(0) # making tempfile accessible
# extract
if url[-4:] == ".zip":
zip_archive = zipfile.ZipFile(temp_file.name)
zip_archive.extractall(output_dir)
elif url[-7:] == ".tar.gz":
tar_archive = tarfile.open(temp_file.name)
tar_archive.extractall(output_dir)
else:
logger.warning(
"Skipped url {0} as file type is not supported here. "
"See haystack documentation for support of more file types".format(
url
)
)
# temp_file gets deleted here
return True
|
def fetch_archive_from_http(url: str, output_dir: str, proxies: Optional[dict] = None):
"""
Fetch an archive (zip or tar.gz) from a url via http and extract content to an output directory.
:param url: http address
:type url: str
:param output_dir: local path
:type output_dir: str
:param proxies: proxies details as required by requests library
:type proxies: dict
:return: bool if anything got fetched
"""
# verify & prepare local directory
path = Path(output_dir)
if not path.exists():
path.mkdir(parents=True)
is_not_empty = len(list(Path(path).rglob("*"))) > 0
if is_not_empty:
logger.info(
f"Found data stored in `{output_dir}`. Delete this first if you really want to fetch new data."
)
return False
else:
logger.info(f"Fetching from {url} to `{output_dir}`")
# download & extract
with tempfile.NamedTemporaryFile() as temp_file:
http_get(url, temp_file, proxies=proxies)
temp_file.flush()
temp_file.seek(0) # making tempfile accessible
# extract
if url[-4:] == ".zip":
zip_archive = zipfile.ZipFile(temp_file.name)
zip_archive.extractall(output_dir)
elif url[-7:] == ".tar.gz":
tar_archive = tarfile.open(temp_file.name)
tar_archive.extractall(output_dir)
# temp_file gets deleted here
return True
|
https://github.com/deepset-ai/haystack/issues/453
|
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-3-fd0550140fb8> in <module>
5
6 # Convert files to dicts
----> 7 dicts = convert_files_to_dicts(dir_path=doc_dir, clean_func=clean_wiki_text, split_paragraphs=True)
8
9 # Now, let's write the dicts containing documents to our DB.
~/Library/Python/3.8/lib/python/site-packages/haystack/preprocessor/utils.py in convert_files_to_dicts(dir_path, clean_func, split_paragraphs)
101 text = document["text"]
102 else:
--> 103 raise Exception(f"Indexing of {path.suffix} files is not currently supported.")
104
105 if clean_func:
Exception: Indexing of files is not currently supported.
|
Exception
|
def __init__(
self,
text: str,
id: str = None,
query_score: Optional[float] = None,
question: Optional[str] = None,
meta: Dict[str, Any] = None,
tags: Optional[Dict[str, Any]] = None,
embedding: Optional[List[float]] = None,
):
"""
Object used to represent documents / passages in a standardized way within Haystack.
For example, this is what the retriever will return from the DocumentStore,
regardless if it's ElasticsearchDocumentStore or InMemoryDocumentStore.
Note that there can be multiple Documents originating from one file (e.g. PDF),
if you split the text into smaller passages. We'll have one Document per passage in this case.
:param id: ID used within the DocumentStore
:param text: Text of the document
:param query_score: Retriever's query score for a retrieved document
:param question: Question text for FAQs.
:param meta: Meta fields for a document like name, url, or author.
:param tags: Tags that allow filtering of the data
:param embedding: Vector encoding of the text
"""
self.text = text
# Create a unique ID (either new one, or one from user input)
if id:
self.id = str(id)
else:
self.id = str(uuid4())
self.query_score = query_score
self.question = question
self.meta = meta
self.tags = tags # deprecate?
self.embedding = embedding
|
def __init__(
self,
text: str,
id: Optional[Union[str, UUID]] = None,
query_score: Optional[float] = None,
question: Optional[str] = None,
meta: Dict[str, Any] = None,
tags: Optional[Dict[str, Any]] = None,
embedding: Optional[List[float]] = None,
):
"""
Object used to represent documents / passages in a standardized way within Haystack.
For example, this is what the retriever will return from the DocumentStore,
regardless if it's ElasticsearchDocumentStore or InMemoryDocumentStore.
Note that there can be multiple Documents originating from one file (e.g. PDF),
if you split the text into smaller passages. We'll have one Document per passage in this case.
:param id: ID used within the DocumentStore
:param text: Text of the document
:param query_score: Retriever's query score for a retrieved document
:param question: Question text for FAQs.
:param meta: Meta fields for a document like name, url, or author.
:param tags: Tags that allow filtering of the data
:param embedding: Vector encoding of the text
"""
self.text = text
# Create a unique ID (either new one, or one from user input)
if id:
if isinstance(id, str):
self.id = UUID(hex=str(id), version=4)
if isinstance(id, UUID):
self.id = id
else:
self.id = uuid4()
self.query_score = query_score
self.question = question
self.meta = meta
self.tags = tags # deprecate?
self.embedding = embedding
|
https://github.com/deepset-ai/haystack/issues/278
|
07/31/2020 16:12:59 - INFO - haystack.retriever.dpr_utils - Loading saved model from models/dpr/checkpoint/retriever/single/nq/bert-base-encoder.cp
07/31/2020 16:12:59 - INFO - haystack.retriever.dense - Loaded encoder params: {'do_lower_case': True, 'pretrained_model_cfg': 'bert-base-uncased', 'encoder_model_type': 'hf_bert', 'pretrained_file': None, 'projection_dim': 0, 'sequence_length': 256}
07/31/2020 16:13:09 - INFO - haystack.retriever.dense - Loading saved model state ...
07/31/2020 16:13:09 - INFO - haystack.retriever.dense - Loading saved model state ...
07/31/2020 16:13:09 - INFO - elasticsearch - POST https://df4bc7e5f2a54314ac10223dd343fe94.us-central1.gcp.cloud.es.io:9243/document/_search?scroll=5m&size=1000 [status:200 request:0.139s]
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-22-9372aaabee19> in <module>()
11 # At query time, we only need to embed the query and compare it the existing doc embeddings which is very fast.
12
---> 13 document_store.update_embeddings(retriever)
14
15 # ES retreivar
5 frames
/usr/lib/python3.6/uuid.py in __init__(self, hex, bytes, bytes_le, fields, int, version)
138 hex = hex.strip('{}').replace('-', '')
139 if len(hex) != 32:
--> 140 raise ValueError('badly formed hexadecimal UUID string')
141 int = int_(hex, 16)
142 if bytes_le is not None:
**ValueError: badly formed hexadecimal UUID string**
|
ValueError
|
def __init__(
self,
question: str,
answer: str,
is_correct_answer: bool,
is_correct_document: bool,
origin: str,
document_id: Optional[str] = None,
offset_start_in_doc: Optional[int] = None,
no_answer: Optional[bool] = None,
model_id: Optional[int] = None,
):
"""
Object used to represent label/feedback in a standardized way within Haystack.
This includes labels from dataset like SQuAD, annotations from labeling tools,
or, user-feedback from the Haystack REST API.
:param question: the question(or query) for finding answers.
:param answer: teh answer string.
:param is_correct_answer: whether the sample is positive or negative.
:param is_correct_document: in case of negative sample(is_correct_answer is False), there could be two cases;
incorrect answer but correct document & incorrect document. This flag denotes if
the returned document was correct.
:param origin: the source for the labels. It can be used to later for filtering.
:param document_id: the document_store's ID for the returned answer document.
:param offset_start_in_doc: the answer start offset in the document.
:param no_answer: whether the question in unanswerable.
:param model_id: model_id used for prediction(in-case of user feedback).
"""
self.no_answer = no_answer
self.origin = origin
self.question = question
self.is_correct_answer = is_correct_answer
self.is_correct_document = is_correct_document
self.document_id = document_id
self.answer = answer
self.offset_start_in_doc = offset_start_in_doc
self.model_id = model_id
|
def __init__(
self,
question: str,
answer: str,
is_correct_answer: bool,
is_correct_document: bool,
origin: str,
document_id: Optional[UUID] = None,
offset_start_in_doc: Optional[int] = None,
no_answer: Optional[bool] = None,
model_id: Optional[int] = None,
):
"""
Object used to represent label/feedback in a standardized way within Haystack.
This includes labels from dataset like SQuAD, annotations from labeling tools,
or, user-feedback from the Haystack REST API.
:param question: the question(or query) for finding answers.
:param answer: teh answer string.
:param is_correct_answer: whether the sample is positive or negative.
:param is_correct_document: in case of negative sample(is_correct_answer is False), there could be two cases;
incorrect answer but correct document & incorrect document. This flag denotes if
the returned document was correct.
:param origin: the source for the labels. It can be used to later for filtering.
:param document_id: the document_store's ID for the returned answer document.
:param offset_start_in_doc: the answer start offset in the document.
:param no_answer: whether the question in unanswerable.
:param model_id: model_id used for prediction(in-case of user feedback).
"""
self.no_answer = no_answer
self.origin = origin
self.question = question
self.is_correct_answer = is_correct_answer
self.is_correct_document = is_correct_document
if document_id:
if isinstance(document_id, str):
self.document_id: Optional[UUID] = UUID(hex=str(document_id), version=4)
if isinstance(document_id, UUID):
self.document_id = document_id
else:
self.document_id = document_id
self.answer = answer
self.offset_start_in_doc = offset_start_in_doc
self.model_id = model_id
|
https://github.com/deepset-ai/haystack/issues/278
|
07/31/2020 16:12:59 - INFO - haystack.retriever.dpr_utils - Loading saved model from models/dpr/checkpoint/retriever/single/nq/bert-base-encoder.cp
07/31/2020 16:12:59 - INFO - haystack.retriever.dense - Loaded encoder params: {'do_lower_case': True, 'pretrained_model_cfg': 'bert-base-uncased', 'encoder_model_type': 'hf_bert', 'pretrained_file': None, 'projection_dim': 0, 'sequence_length': 256}
07/31/2020 16:13:09 - INFO - haystack.retriever.dense - Loading saved model state ...
07/31/2020 16:13:09 - INFO - haystack.retriever.dense - Loading saved model state ...
07/31/2020 16:13:09 - INFO - elasticsearch - POST https://df4bc7e5f2a54314ac10223dd343fe94.us-central1.gcp.cloud.es.io:9243/document/_search?scroll=5m&size=1000 [status:200 request:0.139s]
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-22-9372aaabee19> in <module>()
11 # At query time, we only need to embed the query and compare it the existing doc embeddings which is very fast.
12
---> 13 document_store.update_embeddings(retriever)
14
15 # ES retreivar
5 frames
/usr/lib/python3.6/uuid.py in __init__(self, hex, bytes, bytes_le, fields, int, version)
138 hex = hex.strip('{}').replace('-', '')
139 if len(hex) != 32:
--> 140 raise ValueError('badly formed hexadecimal UUID string')
141 int = int_(hex, 16)
142 if bytes_le is not None:
**ValueError: badly formed hexadecimal UUID string**
|
ValueError
|
def get_document_by_id(
self, id: str, index: Optional[str] = None
) -> Optional[Document]:
pass
|
def get_document_by_id(
self, id: UUID, index: Optional[str] = None
) -> Optional[Document]:
pass
|
https://github.com/deepset-ai/haystack/issues/278
|
07/31/2020 16:12:59 - INFO - haystack.retriever.dpr_utils - Loading saved model from models/dpr/checkpoint/retriever/single/nq/bert-base-encoder.cp
07/31/2020 16:12:59 - INFO - haystack.retriever.dense - Loaded encoder params: {'do_lower_case': True, 'pretrained_model_cfg': 'bert-base-uncased', 'encoder_model_type': 'hf_bert', 'pretrained_file': None, 'projection_dim': 0, 'sequence_length': 256}
07/31/2020 16:13:09 - INFO - haystack.retriever.dense - Loading saved model state ...
07/31/2020 16:13:09 - INFO - haystack.retriever.dense - Loading saved model state ...
07/31/2020 16:13:09 - INFO - elasticsearch - POST https://df4bc7e5f2a54314ac10223dd343fe94.us-central1.gcp.cloud.es.io:9243/document/_search?scroll=5m&size=1000 [status:200 request:0.139s]
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-22-9372aaabee19> in <module>()
11 # At query time, we only need to embed the query and compare it the existing doc embeddings which is very fast.
12
---> 13 document_store.update_embeddings(retriever)
14
15 # ES retreivar
5 frames
/usr/lib/python3.6/uuid.py in __init__(self, hex, bytes, bytes_le, fields, int, version)
138 hex = hex.strip('{}').replace('-', '')
139 if len(hex) != 32:
--> 140 raise ValueError('badly formed hexadecimal UUID string')
141 int = int_(hex, 16)
142 if bytes_le is not None:
**ValueError: badly formed hexadecimal UUID string**
|
ValueError
|
def get_document_by_id(self, id: str, index=None) -> Optional[Document]:
if index is None:
index = self.index
query = {"query": {"ids": {"values": [id]}}}
result = self.client.search(index=index, body=query)["hits"]["hits"]
document = self._convert_es_hit_to_document(result[0]) if result else None
return document
|
def get_document_by_id(self, id: Union[UUID, str], index=None) -> Optional[Document]:
if index is None:
index = self.index
query = {"query": {"ids": {"values": [id]}}}
result = self.client.search(index=index, body=query)["hits"]["hits"]
document = self._convert_es_hit_to_document(result[0]) if result else None
return document
|
https://github.com/deepset-ai/haystack/issues/278
|
07/31/2020 16:12:59 - INFO - haystack.retriever.dpr_utils - Loading saved model from models/dpr/checkpoint/retriever/single/nq/bert-base-encoder.cp
07/31/2020 16:12:59 - INFO - haystack.retriever.dense - Loaded encoder params: {'do_lower_case': True, 'pretrained_model_cfg': 'bert-base-uncased', 'encoder_model_type': 'hf_bert', 'pretrained_file': None, 'projection_dim': 0, 'sequence_length': 256}
07/31/2020 16:13:09 - INFO - haystack.retriever.dense - Loading saved model state ...
07/31/2020 16:13:09 - INFO - haystack.retriever.dense - Loading saved model state ...
07/31/2020 16:13:09 - INFO - elasticsearch - POST https://df4bc7e5f2a54314ac10223dd343fe94.us-central1.gcp.cloud.es.io:9243/document/_search?scroll=5m&size=1000 [status:200 request:0.139s]
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-22-9372aaabee19> in <module>()
11 # At query time, we only need to embed the query and compare it the existing doc embeddings which is very fast.
12
---> 13 document_store.update_embeddings(retriever)
14
15 # ES retreivar
5 frames
/usr/lib/python3.6/uuid.py in __init__(self, hex, bytes, bytes_le, fields, int, version)
138 hex = hex.strip('{}').replace('-', '')
139 if len(hex) != 32:
--> 140 raise ValueError('badly formed hexadecimal UUID string')
141 int = int_(hex, 16)
142 if bytes_le is not None:
**ValueError: badly formed hexadecimal UUID string**
|
ValueError
|
def write_labels(
self, labels: Union[List[dict], List[Label]], index: Optional[str] = None
):
index = index or self.label_index
label_objects = [Label.from_dict(l) if isinstance(l, dict) else l for l in labels]
for label in label_objects:
label_id = str(uuid4())
self.indexes[index][label_id] = label
|
def write_labels(
self, labels: Union[List[dict], List[Label]], index: Optional[str] = None
):
index = index or self.label_index
label_objects = [Label.from_dict(l) if isinstance(l, dict) else l for l in labels]
for label in label_objects:
label_id = uuid.uuid4()
self.indexes[index][label_id] = label
|
https://github.com/deepset-ai/haystack/issues/278
|
07/31/2020 16:12:59 - INFO - haystack.retriever.dpr_utils - Loading saved model from models/dpr/checkpoint/retriever/single/nq/bert-base-encoder.cp
07/31/2020 16:12:59 - INFO - haystack.retriever.dense - Loaded encoder params: {'do_lower_case': True, 'pretrained_model_cfg': 'bert-base-uncased', 'encoder_model_type': 'hf_bert', 'pretrained_file': None, 'projection_dim': 0, 'sequence_length': 256}
07/31/2020 16:13:09 - INFO - haystack.retriever.dense - Loading saved model state ...
07/31/2020 16:13:09 - INFO - haystack.retriever.dense - Loading saved model state ...
07/31/2020 16:13:09 - INFO - elasticsearch - POST https://df4bc7e5f2a54314ac10223dd343fe94.us-central1.gcp.cloud.es.io:9243/document/_search?scroll=5m&size=1000 [status:200 request:0.139s]
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-22-9372aaabee19> in <module>()
11 # At query time, we only need to embed the query and compare it the existing doc embeddings which is very fast.
12
---> 13 document_store.update_embeddings(retriever)
14
15 # ES retreivar
5 frames
/usr/lib/python3.6/uuid.py in __init__(self, hex, bytes, bytes_le, fields, int, version)
138 hex = hex.strip('{}').replace('-', '')
139 if len(hex) != 32:
--> 140 raise ValueError('badly formed hexadecimal UUID string')
141 int = int_(hex, 16)
142 if bytes_le is not None:
**ValueError: badly formed hexadecimal UUID string**
|
ValueError
|
def get_document_by_id(self, id: str, index: Optional[str] = None) -> Document:
index = index or self.index
return self.indexes[index][id]
|
def get_document_by_id(
self, id: Union[str, UUID], index: Optional[str] = None
) -> Document:
index = index or self.index
return self.indexes[index][id]
|
https://github.com/deepset-ai/haystack/issues/278
|
07/31/2020 16:12:59 - INFO - haystack.retriever.dpr_utils - Loading saved model from models/dpr/checkpoint/retriever/single/nq/bert-base-encoder.cp
07/31/2020 16:12:59 - INFO - haystack.retriever.dense - Loaded encoder params: {'do_lower_case': True, 'pretrained_model_cfg': 'bert-base-uncased', 'encoder_model_type': 'hf_bert', 'pretrained_file': None, 'projection_dim': 0, 'sequence_length': 256}
07/31/2020 16:13:09 - INFO - haystack.retriever.dense - Loading saved model state ...
07/31/2020 16:13:09 - INFO - haystack.retriever.dense - Loading saved model state ...
07/31/2020 16:13:09 - INFO - elasticsearch - POST https://df4bc7e5f2a54314ac10223dd343fe94.us-central1.gcp.cloud.es.io:9243/document/_search?scroll=5m&size=1000 [status:200 request:0.139s]
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-22-9372aaabee19> in <module>()
11 # At query time, we only need to embed the query and compare it the existing doc embeddings which is very fast.
12
---> 13 document_store.update_embeddings(retriever)
14
15 # ES retreivar
5 frames
/usr/lib/python3.6/uuid.py in __init__(self, hex, bytes, bytes_le, fields, int, version)
138 hex = hex.strip('{}').replace('-', '')
139 if len(hex) != 32:
--> 140 raise ValueError('badly formed hexadecimal UUID string')
141 int = int_(hex, 16)
142 if bytes_le is not None:
**ValueError: badly formed hexadecimal UUID string**
|
ValueError
|
def get_document_by_id(self, id: str, index=None) -> Optional[Document]:
index = index or self.index
document_row = self.session.query(DocumentORM).filter_by(index=index, id=id).first()
document = document_row or self._convert_sql_row_to_document(document_row)
return document
|
def get_document_by_id(self, id: UUID, index=None) -> Optional[Document]:
index = index or self.index
document_row = self.session.query(DocumentORM).filter_by(index=index, id=id).first()
document = document_row or self._convert_sql_row_to_document(document_row)
return document
|
https://github.com/deepset-ai/haystack/issues/278
|
07/31/2020 16:12:59 - INFO - haystack.retriever.dpr_utils - Loading saved model from models/dpr/checkpoint/retriever/single/nq/bert-base-encoder.cp
07/31/2020 16:12:59 - INFO - haystack.retriever.dense - Loaded encoder params: {'do_lower_case': True, 'pretrained_model_cfg': 'bert-base-uncased', 'encoder_model_type': 'hf_bert', 'pretrained_file': None, 'projection_dim': 0, 'sequence_length': 256}
07/31/2020 16:13:09 - INFO - haystack.retriever.dense - Loading saved model state ...
07/31/2020 16:13:09 - INFO - haystack.retriever.dense - Loading saved model state ...
07/31/2020 16:13:09 - INFO - elasticsearch - POST https://df4bc7e5f2a54314ac10223dd343fe94.us-central1.gcp.cloud.es.io:9243/document/_search?scroll=5m&size=1000 [status:200 request:0.139s]
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-22-9372aaabee19> in <module>()
11 # At query time, we only need to embed the query and compare it the existing doc embeddings which is very fast.
12
---> 13 document_store.update_embeddings(retriever)
14
15 # ES retreivar
5 frames
/usr/lib/python3.6/uuid.py in __init__(self, hex, bytes, bytes_le, fields, int, version)
138 hex = hex.strip('{}').replace('-', '')
139 if len(hex) != 32:
--> 140 raise ValueError('badly formed hexadecimal UUID string')
141 int = int_(hex, 16)
142 if bytes_le is not None:
**ValueError: badly formed hexadecimal UUID string**
|
ValueError
|
def write_labels(self, labels, index=None):
labels = [Label.from_dict(l) if isinstance(l, dict) else l for l in labels]
index = index or self.index
for label in labels:
label_orm = LabelORM(
id=str(uuid4()),
document_id=label.document_id,
no_answer=label.no_answer,
origin=label.origin,
question=label.question,
is_correct_answer=label.is_correct_answer,
is_correct_document=label.is_correct_document,
answer=label.answer,
offset_start_in_doc=label.offset_start_in_doc,
model_id=label.model_id,
index=index,
)
self.session.add(label_orm)
self.session.commit()
|
def write_labels(self, labels, index=None):
labels = [Label.from_dict(l) if isinstance(l, dict) else l for l in labels]
index = index or self.index
for label in labels:
label_orm = LabelORM(
document_id=label.document_id,
no_answer=label.no_answer,
origin=label.origin,
question=label.question,
is_correct_answer=label.is_correct_answer,
is_correct_document=label.is_correct_document,
answer=label.answer,
offset_start_in_doc=label.offset_start_in_doc,
model_id=label.model_id,
index=index,
)
self.session.add(label_orm)
self.session.commit()
|
https://github.com/deepset-ai/haystack/issues/278
|
07/31/2020 16:12:59 - INFO - haystack.retriever.dpr_utils - Loading saved model from models/dpr/checkpoint/retriever/single/nq/bert-base-encoder.cp
07/31/2020 16:12:59 - INFO - haystack.retriever.dense - Loaded encoder params: {'do_lower_case': True, 'pretrained_model_cfg': 'bert-base-uncased', 'encoder_model_type': 'hf_bert', 'pretrained_file': None, 'projection_dim': 0, 'sequence_length': 256}
07/31/2020 16:13:09 - INFO - haystack.retriever.dense - Loading saved model state ...
07/31/2020 16:13:09 - INFO - haystack.retriever.dense - Loading saved model state ...
07/31/2020 16:13:09 - INFO - elasticsearch - POST https://df4bc7e5f2a54314ac10223dd343fe94.us-central1.gcp.cloud.es.io:9243/document/_search?scroll=5m&size=1000 [status:200 request:0.139s]
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-22-9372aaabee19> in <module>()
11 # At query time, we only need to embed the query and compare it the existing doc embeddings which is very fast.
12
---> 13 document_store.update_embeddings(retriever)
14
15 # ES retreivar
5 frames
/usr/lib/python3.6/uuid.py in __init__(self, hex, bytes, bytes_le, fields, int, version)
138 hex = hex.strip('{}').replace('-', '')
139 if len(hex) != 32:
--> 140 raise ValueError('badly formed hexadecimal UUID string')
141 int = int_(hex, 16)
142 if bytes_le is not None:
**ValueError: badly formed hexadecimal UUID string**
|
ValueError
|
def __init__(
self,
document_store: Type[BaseDocumentStore],
embedding_model: str,
gpu: bool = True,
model_format: str = "farm",
pooling_strategy: str = "reduce_mean",
emb_extraction_layer: int = -1,
):
"""
TODO
:param document_store:
:param embedding_model:
:param gpu:
:param model_format:
"""
self.document_store = document_store
self.model_format = model_format
self.embedding_model = embedding_model
self.pooling_strategy = pooling_strategy
self.emb_extraction_layer = emb_extraction_layer
logger.info(f"Init retriever using embeddings of model {embedding_model}")
if model_format == "farm" or model_format == "transformers":
self.embedding_model = Inferencer.load(
embedding_model,
task_type="embeddings",
extraction_strategy=self.pooling_strategy,
extraction_layer=self.emb_extraction_layer,
gpu=gpu,
batch_size=4,
max_seq_len=512,
num_processes=0,
)
elif model_format == "sentence_transformers":
from sentence_transformers import SentenceTransformer
# pretrained embedding models coming from: https://github.com/UKPLab/sentence-transformers#pretrained-models
# e.g. 'roberta-base-nli-stsb-mean-tokens'
if gpu:
device = "gpu"
else:
device = "cpu"
self.embedding_model = SentenceTransformer(embedding_model, device=device)
else:
raise NotImplementedError
|
def __init__(
self,
document_store: Type[BaseDocumentStore],
embedding_model: str,
gpu: bool = True,
model_format: str = "farm",
pooling_strategy: str = "reduce_mean",
emb_extraction_layer: int = -1,
):
"""
TODO
:param document_store:
:param embedding_model:
:param gpu:
:param model_format:
"""
self.document_store = document_store
self.model_format = model_format
self.embedding_model = embedding_model
self.pooling_strategy = pooling_strategy
self.emb_extraction_layer = emb_extraction_layer
logger.info(f"Init retriever using embeddings of model {embedding_model}")
if model_format == "farm" or model_format == "transformers":
self.embedding_model = Inferencer.load(
embedding_model,
task_type="embeddings",
extraction_strategy=self.pooling_strategy,
extraction_layer=self.emb_extraction_layer,
gpu=gpu,
batch_size=4,
max_seq_len=512,
num_processes=0,
)
elif model_format == "sentence_transformers":
from sentence_transformers import SentenceTransformer
# pretrained embedding models coming from: https://github.com/UKPLab/sentence-transformers#pretrained-models
# e.g. 'roberta-base-nli-stsb-mean-tokens'
self.embedding_model = SentenceTransformer(embedding_model)
else:
raise NotImplementedError
|
https://github.com/deepset-ai/haystack/issues/116
|
Traceback (most recent call last):
File "/home/pedro/.local/lib/python3.8/site-packages/elasticsearch/serializer.py", line 50, in dumps
return json.dumps(
File "/home/pedro/.local/lib/python3.8/site-packages/simplejson/__init__.py", line 398, in dumps
return cls(
File "/home/pedro/.local/lib/python3.8/site-packages/simplejson/encoder.py", line 296, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/home/pedro/.local/lib/python3.8/site-packages/simplejson/encoder.py", line 378, in iterencode
return _iterencode(o, 0)
File "/home/pedro/.local/lib/python3.8/site-packages/elasticsearch/serializer.py", line 36, in default
raise TypeError("Unable to serialize %r (type: %s)" % (data, type(data)))
TypeError: Unable to serialize -0.065520875 (type: <class 'numpy.float32'>)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "sdk/accuracy_retriever.py", line 75, in <module>
top_docs = haystack_retriever(q, doc_store)
File "/mnt/Documents/Projets/BotPress/R_D/R_D_q_a/sdk/retrievers.py", line 113, in haystack_retriever
top_docs = retriever.retrieve(query=q, top_k=10)
File "/mnt/Documents/Projets/git_clones/haystack/haystack/retriever/elasticsearch.py", line 92, in retrieve
documents = self.document_store.query_by_embedding(query_emb[0], top_k, candidate_doc_ids)
File "/mnt/Documents/Projets/git_clones/haystack/haystack/database/elasticsearch.py", line 184, in query_by_embedding
result = self.client.search(index=self.index, body=body)["hits"]["hits"]
File "/home/pedro/.local/lib/python3.8/site-packages/elasticsearch/client/utils.py", line 92, in _wrapped
return func(*args, params=params, headers=headers, **kwargs)
File "/home/pedro/.local/lib/python3.8/site-packages/elasticsearch/client/__init__.py", line 1622, in search
return self.transport.perform_request(
File "/home/pedro/.local/lib/python3.8/site-packages/elasticsearch/transport.py", line 321, in perform_request
body = self.serializer.dumps(body)
File "/home/pedro/.local/lib/python3.8/site-packages/elasticsearch/serializer.py", line 54, in dumps
raise SerializationError(data, e)
elasticsearch.exceptions.SerializationError: ({'size': 10, 'query': {'script_score': {'query': {'match_all': {}}, 'script': {'source': "cosineSimilarity(params.query_vector,doc['question_emb']) + 1.0", 'params': {'query_vector': [-0.065520875, 0.023728848, ... lot of numbers ..., 0.047961414]}}}}, '_source': {'excludes': ['question_emb']}}, TypeError("Unable to serialize -0.065520875 (type: <class 'numpy.float32'>)"))
|
TypeError
|
def create_embedding(self, texts: [str]):
"""
Create embeddings for each text in a list of texts using the retrievers model (`self.embedding_model`)
:param texts: texts to embed
:return: list of embeddings (one per input text). Each embedding is a list of floats.
"""
# for backward compatibility: cast pure str input
if type(texts) == str:
texts = [texts]
assert type(texts) == list, (
"Expecting a list of texts, i.e. create_embeddings(texts=['text1',...])"
)
if self.model_format == "farm":
res = self.embedding_model.inference_from_dicts(
dicts=[{"text": t} for t in texts]
)
emb = [list(r["vec"]) for r in res] # cast from numpy
elif self.model_format == "sentence_transformers":
# text is single string, sentence-transformers needs a list of strings
res = self.embedding_model.encode(
texts
) # get back list of numpy embedding vectors
emb = [list(r.astype("float64")) for r in res] # cast from numpy
return emb
|
def create_embedding(self, texts: [str]):
"""
Create embeddings for each text in a list of texts using the retrievers model (`self.embedding_model`)
:param texts: texts to embed
:return: list of embeddings (one per input text). Each embedding is a list of floats.
"""
# for backward compatibility: cast pure str input
if type(texts) == str:
texts = [texts]
assert type(texts) == list, (
"Expecting a list of texts, i.e. create_embeddings(texts=['text1',...])"
)
if self.model_format == "farm":
res = self.embedding_model.inference_from_dicts(
dicts=[{"text": t} for t in texts]
)
emb = [list(r["vec"]) for r in res] # cast from numpy
elif self.model_format == "sentence_transformers":
# text is single string, sentence-transformers needs a list of strings
res = self.embedding_model.encode(
texts
) # get back list of numpy embedding vectors
emb = [list(r) for r in res] # cast from numpy
return emb
|
https://github.com/deepset-ai/haystack/issues/116
|
Traceback (most recent call last):
File "/home/pedro/.local/lib/python3.8/site-packages/elasticsearch/serializer.py", line 50, in dumps
return json.dumps(
File "/home/pedro/.local/lib/python3.8/site-packages/simplejson/__init__.py", line 398, in dumps
return cls(
File "/home/pedro/.local/lib/python3.8/site-packages/simplejson/encoder.py", line 296, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/home/pedro/.local/lib/python3.8/site-packages/simplejson/encoder.py", line 378, in iterencode
return _iterencode(o, 0)
File "/home/pedro/.local/lib/python3.8/site-packages/elasticsearch/serializer.py", line 36, in default
raise TypeError("Unable to serialize %r (type: %s)" % (data, type(data)))
TypeError: Unable to serialize -0.065520875 (type: <class 'numpy.float32'>)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "sdk/accuracy_retriever.py", line 75, in <module>
top_docs = haystack_retriever(q, doc_store)
File "/mnt/Documents/Projets/BotPress/R_D/R_D_q_a/sdk/retrievers.py", line 113, in haystack_retriever
top_docs = retriever.retrieve(query=q, top_k=10)
File "/mnt/Documents/Projets/git_clones/haystack/haystack/retriever/elasticsearch.py", line 92, in retrieve
documents = self.document_store.query_by_embedding(query_emb[0], top_k, candidate_doc_ids)
File "/mnt/Documents/Projets/git_clones/haystack/haystack/database/elasticsearch.py", line 184, in query_by_embedding
result = self.client.search(index=self.index, body=body)["hits"]["hits"]
File "/home/pedro/.local/lib/python3.8/site-packages/elasticsearch/client/utils.py", line 92, in _wrapped
return func(*args, params=params, headers=headers, **kwargs)
File "/home/pedro/.local/lib/python3.8/site-packages/elasticsearch/client/__init__.py", line 1622, in search
return self.transport.perform_request(
File "/home/pedro/.local/lib/python3.8/site-packages/elasticsearch/transport.py", line 321, in perform_request
body = self.serializer.dumps(body)
File "/home/pedro/.local/lib/python3.8/site-packages/elasticsearch/serializer.py", line 54, in dumps
raise SerializationError(data, e)
elasticsearch.exceptions.SerializationError: ({'size': 10, 'query': {'script_score': {'query': {'match_all': {}}, 'script': {'source': "cosineSimilarity(params.query_vector,doc['question_emb']) + 1.0", 'params': {'query_vector': [-0.065520875, 0.023728848, ... lot of numbers ..., 0.047961414]}}}}, '_source': {'excludes': ['question_emb']}}, TypeError("Unable to serialize -0.065520875 (type: <class 'numpy.float32'>)"))
|
TypeError
|
def install_environment(
prefix: Prefix,
version: str,
additional_dependencies: Sequence[str],
) -> None:
helpers.assert_version_default("golang", version)
directory = prefix.path(
helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),
)
with clean_path_on_failure(directory):
remote = git.get_remote_url(prefix.prefix_dir)
repo_src_dir = os.path.join(directory, "src", guess_go_dir(remote))
# Clone into the goenv we'll create
cmd = ("git", "clone", "--recursive", ".", repo_src_dir)
helpers.run_setup_cmd(prefix, cmd)
if sys.platform == "cygwin": # pragma: no cover
_, gopath, _ = cmd_output("cygpath", "-w", directory)
gopath = gopath.strip()
else:
gopath = directory
env = dict(os.environ, GOPATH=gopath)
env.pop("GOBIN", None)
cmd_output_b("go", "get", "./...", cwd=repo_src_dir, env=env)
for dependency in additional_dependencies:
cmd_output_b("go", "get", dependency, cwd=repo_src_dir, env=env)
# Same some disk space, we don't need these after installation
rmtree(prefix.path(directory, "src"))
pkgdir = prefix.path(directory, "pkg")
if os.path.exists(pkgdir): # pragma: no cover (go<1.10)
rmtree(pkgdir)
|
def install_environment(
prefix: Prefix,
version: str,
additional_dependencies: Sequence[str],
) -> None:
helpers.assert_version_default("golang", version)
directory = prefix.path(
helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),
)
with clean_path_on_failure(directory):
remote = git.get_remote_url(prefix.prefix_dir)
repo_src_dir = os.path.join(directory, "src", guess_go_dir(remote))
# Clone into the goenv we'll create
helpers.run_setup_cmd(prefix, ("git", "clone", ".", repo_src_dir))
if sys.platform == "cygwin": # pragma: no cover
_, gopath, _ = cmd_output("cygpath", "-w", directory)
gopath = gopath.strip()
else:
gopath = directory
env = dict(os.environ, GOPATH=gopath)
env.pop("GOBIN", None)
cmd_output_b("go", "get", "./...", cwd=repo_src_dir, env=env)
for dependency in additional_dependencies:
cmd_output_b("go", "get", dependency, cwd=repo_src_dir, env=env)
# Same some disk space, we don't need these after installation
rmtree(prefix.path(directory, "src"))
pkgdir = prefix.path(directory, "pkg")
if os.path.exists(pkgdir): # pragma: no cover (go<1.10)
rmtree(pkgdir)
|
https://github.com/pre-commit/pre-commit/issues/1788
|
Traceback (most recent call last):
File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/error_handler.py", line 65, in error_handler
yield
File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/main.py", line 378, in main
return run(args.config, store, args)
File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/commands/run.py", line 403, in run
install_hook_envs(hooks, store)
File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/repository.py", line 224, in install_hook_envs
_hook_install(hook)
File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/repository.py", line 82, in _hook_install
lang.install_environment(
File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/languages/golang.py", line 81, in install_environment
cmd_output_b('go', 'get', './...', cwd=repo_src_dir, env=env)
File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/util.py", line 154, in cmd_output_b
raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b)
pre_commit.util.CalledProcessError: command: ('/usr/local/bin/go', 'get', './...')
return code: 2
expected return code: 0
stdout: (none)
stderr:
go: downloading github.com/sergi/go-diff v1.1.0
go: downloading github.com/fatih/color v1.9.0
go: downloading github.com/mattn/go-colorable v0.1.4
go: downloading github.com/mattn/go-isatty v0.0.11
go: downloading golang.org/x/sys v0.0.0-20191026070338-33540a1f6037
# github.com/google/go-jsonnet/c-bindings
libjsonnet.cpp:5:14: fatal error: 'libjsonnet.h' file not found
|
pre_commit.util.CalledProcessError
|
def get_root() -> str:
# Git 2.25 introduced a change to "rev-parse --show-toplevel" that exposed
# underlying volumes for Windows drives mapped with SUBST. We use
# "rev-parse --show-cdup" to get the appropriate path, but must perform
# an extra check to see if we are in the .git directory.
try:
root = os.path.realpath(
cmd_output("git", "rev-parse", "--show-cdup")[1].strip(),
)
git_dir = os.path.realpath(get_git_dir())
except CalledProcessError:
raise FatalError(
"git failed. Is it installed, and are you in a Git repository directory?",
)
if os.path.samefile(root, git_dir):
raise FatalError(
"git toplevel unexpectedly empty! make sure you are not "
"inside the `.git` directory of your repository.",
)
return root
|
def get_root() -> str:
# Git 2.25 introduced a change to "rev-parse --show-toplevel" that exposed
# underlying volumes for Windows drives mapped with SUBST. We use
# "rev-parse --show-cdup" to get the appropriate path, but must perform
# an extra check to see if we are in the .git directory.
try:
root = os.path.realpath(
cmd_output("git", "rev-parse", "--show-cdup")[1].strip(),
)
git_dir = os.path.realpath(get_git_dir())
except CalledProcessError:
raise FatalError(
"git failed. Is it installed, and are you in a Git repository directory?",
)
if os.path.commonpath((root, git_dir)) == git_dir:
raise FatalError(
"git toplevel unexpectedly empty! make sure you are not "
"inside the `.git` directory of your repository.",
)
return root
|
https://github.com/pre-commit/pre-commit/issues/1777
|
Traceback (most recent call last):
File "/home/vampas/.dotfiles/.ext/pyenv/versions/3.6.8/envs/SaltPriv-3.6/lib/python3.6/site-packages/pre_commit/error_handler.py", line 65, in error_handler
yield
File "/home/vampas/.dotfiles/.ext/pyenv/versions/3.6.8/envs/SaltPriv-3.6/lib/python3.6/site-packages/pre_commit/main.py", line 330, in main
_adjust_args_and_chdir(args)
File "/home/vampas/.dotfiles/.ext/pyenv/versions/3.6.8/envs/SaltPriv-3.6/lib/python3.6/site-packages/pre_commit/main.py", line 147, in _adjust_args_and_chdir
toplevel = git.get_root()
File "/home/vampas/.dotfiles/.ext/pyenv/versions/3.6.8/envs/SaltPriv-3.6/lib/python3.6/site-packages/pre_commit/git.py", line 66, in get_root
'git toplevel unexpectedly empty! make sure you are not '
pre_commit.errors.FatalError: git toplevel unexpectedly empty! make sure you are not inside the `.git` directory of your repository.
|
pre_commit.errors.FatalError
|
def get_env_patch(
venv: str,
language_version: str,
) -> PatchesT:
patches: PatchesT = (
("GEM_HOME", os.path.join(venv, "gems")),
("GEM_PATH", UNSET),
("BUNDLE_IGNORE_CONFIG", "1"),
)
if language_version == "system":
patches += (
(
"PATH",
(
os.path.join(venv, "gems", "bin"),
os.pathsep,
Var("PATH"),
),
),
)
else: # pragma: win32 no cover
patches += (
("RBENV_ROOT", venv),
(
"PATH",
(
os.path.join(venv, "gems", "bin"),
os.pathsep,
os.path.join(venv, "shims"),
os.pathsep,
os.path.join(venv, "bin"),
os.pathsep,
Var("PATH"),
),
),
)
if language_version not in {"system", "default"}: # pragma: win32 no cover
patches += (("RBENV_VERSION", language_version),)
return patches
|
def get_env_patch(
venv: str,
language_version: str,
) -> PatchesT:
patches: PatchesT = (
("GEM_HOME", os.path.join(venv, "gems")),
("GEM_PATH", UNSET),
("BUNDLE_IGNORE_CONFIG", "1"),
)
if language_version == "system":
patches += (
(
"PATH",
(
os.path.join(venv, "gems", "bin"),
os.pathsep,
Var("PATH"),
),
),
)
else: # pragma: win32 no cover
patches += (
("RBENV_ROOT", venv),
("RBENV_VERSION", language_version),
(
"PATH",
(
os.path.join(venv, "gems", "bin"),
os.pathsep,
os.path.join(venv, "shims"),
os.pathsep,
os.path.join(venv, "bin"),
os.pathsep,
Var("PATH"),
),
),
)
return patches
|
https://github.com/pre-commit/pre-commit/issues/1699
|
### version information
```
pre-commit version: 2.8.2
sys.version:
3.8.2 (default, Jul 7 2020, 11:55:37)
[Clang 11.0.3 (clang-1103.0.32.62)]
sys.executable: /Users/abuxton/.pyenv/versions/3.8.2/bin/python3.8
os.name: posix
sys.platform: darwin
```
### error information
```
An unexpected error has occurred: CalledProcessError: command: ('/usr/local/bin/bash', '/Users/abuxton/.rbenv/shims/gem', 'build', 'mdl.gemspec')
return code: 1
expected return code: 0
stdout: (none)
stderr:
rbenv: version `default' is not installed (set by RBENV_VERSION environment variable)
```
```
Traceback (most recent call last):
File "/Users/abuxton/.pyenv/versions/3.8.2/lib/python3.8/site-packages/pre_commit/error_handler.py", line 65, in error_handler
yield
File "/Users/abuxton/.pyenv/versions/3.8.2/lib/python3.8/site-packages/pre_commit/main.py", line 375, in main
return run(args.config, store, args)
File "/Users/abuxton/.pyenv/versions/3.8.2/lib/python3.8/site-packages/pre_commit/commands/run.py", line 388, in run
install_hook_envs(hooks, store)
File "/Users/abuxton/.pyenv/versions/3.8.2/lib/python3.8/site-packages/pre_commit/repository.py", line 206, in install_hook_envs
_hook_install(hook)
File "/Users/abuxton/.pyenv/versions/3.8.2/lib/python3.8/site-packages/pre_commit/repository.py", line 82, in _hook_install
lang.install_environment(
File "/Users/abuxton/.pyenv/versions/3.8.2/lib/python3.8/site-packages/pre_commit/languages/ruby.py", line 130, in install_environment
helpers.run_setup_cmd(
File "/Users/abuxton/.pyenv/versions/3.8.2/lib/python3.8/site-packages/pre_commit/languages/helpers.py", line 52, in run_setup_cmd
cmd_output_b(*cmd, cwd=prefix.prefix_dir)
File "/Users/abuxton/.pyenv/versions/3.8.2/lib/python3.8/site-packages/pre_commit/util.py", line 154, in cmd_output_b
raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b)
pre_commit.util.CalledProcessError: command: ('/usr/local/bin/bash', '/Users/abuxton/.rbenv/shims/gem', 'build', 'mdl.gemspec')
return code: 1
expected return code: 0
stdout: (none)
stderr:
rbenv: version `default' is not installed (set by RBENV_VERSION environment variable)
```
|
pre_commit.util.CalledProcessError
|
def get_default_version() -> str:
# nodeenv does not yet support `-n system` on windows
if sys.platform == "win32":
return C.DEFAULT
# if node is already installed, we can save a bunch of setup time by
# using the installed version
elif all(helpers.exe_exists(exe) for exe in ("node", "npm")):
return "system"
else:
return C.DEFAULT
|
def get_default_version() -> str:
# nodeenv does not yet support `-n system` on windows
if sys.platform == "win32":
return C.DEFAULT
# if node is already installed, we can save a bunch of setup time by
# using the installed version
elif all(parse_shebang.find_executable(exe) for exe in ("node", "npm")):
return "system"
else:
return C.DEFAULT
|
https://github.com/pre-commit/pre-commit/issues/1658
|
Traceback (most recent call last):
File "/usr/local/Cellar/pre-commit/2.7.1_1/libexec/lib/python3.9/site-packages/pre_commit/error_handler.py", line 63, in error_handler
yield
File "/usr/local/Cellar/pre-commit/2.7.1_1/libexec/lib/python3.9/site-packages/pre_commit/main.py", line 390, in main
return run(args.config, store, args)
File "/usr/local/Cellar/pre-commit/2.7.1_1/libexec/lib/python3.9/site-packages/pre_commit/commands/run.py", line 388, in run
install_hook_envs(hooks, store)
File "/usr/local/Cellar/pre-commit/2.7.1_1/libexec/lib/python3.9/site-packages/pre_commit/repository.py", line 206, in install_hook_envs
_hook_install(hook)
File "/usr/local/Cellar/pre-commit/2.7.1_1/libexec/lib/python3.9/site-packages/pre_commit/repository.py", line 92, in _hook_install
_write_state(hook.prefix, venv, _state(hook.additional_dependencies))
File "/usr/local/Cellar/pre-commit/2.7.1_1/libexec/lib/python3.9/site-packages/pre_commit/repository.py", line 48, in _write_state
with open(staging, 'w') as state_file:
FileNotFoundError: [Errno 2] No such file or directory: '/Users/adithyabalaji/.cache/pre-commit/repo09b5y8iv/rbenv-system/.install_state_v1staging'
|
FileNotFoundError
|
def get_default_version() -> str:
if all(helpers.exe_exists(exe) for exe in ("ruby", "gem")):
return "system"
else:
return C.DEFAULT
|
def get_default_version() -> str:
if all(parse_shebang.find_executable(exe) for exe in ("ruby", "gem")):
return "system"
else:
return C.DEFAULT
|
https://github.com/pre-commit/pre-commit/issues/1658
|
Traceback (most recent call last):
File "/usr/local/Cellar/pre-commit/2.7.1_1/libexec/lib/python3.9/site-packages/pre_commit/error_handler.py", line 63, in error_handler
yield
File "/usr/local/Cellar/pre-commit/2.7.1_1/libexec/lib/python3.9/site-packages/pre_commit/main.py", line 390, in main
return run(args.config, store, args)
File "/usr/local/Cellar/pre-commit/2.7.1_1/libexec/lib/python3.9/site-packages/pre_commit/commands/run.py", line 388, in run
install_hook_envs(hooks, store)
File "/usr/local/Cellar/pre-commit/2.7.1_1/libexec/lib/python3.9/site-packages/pre_commit/repository.py", line 206, in install_hook_envs
_hook_install(hook)
File "/usr/local/Cellar/pre-commit/2.7.1_1/libexec/lib/python3.9/site-packages/pre_commit/repository.py", line 92, in _hook_install
_write_state(hook.prefix, venv, _state(hook.additional_dependencies))
File "/usr/local/Cellar/pre-commit/2.7.1_1/libexec/lib/python3.9/site-packages/pre_commit/repository.py", line 48, in _write_state
with open(staging, 'w') as state_file:
FileNotFoundError: [Errno 2] No such file or directory: '/Users/adithyabalaji/.cache/pre-commit/repo09b5y8iv/rbenv-system/.install_state_v1staging'
|
FileNotFoundError
|
def _find_by_py_launcher(
version: str,
) -> Optional[str]: # pragma: no cover (windows only)
if version.startswith("python"):
num = version[len("python") :]
cmd = ("py", f"-{num}", "-c", "import sys; print(sys.executable)")
env = dict(os.environ, PYTHONIOENCODING="UTF-8")
try:
return cmd_output(*cmd, env=env)[1].strip()
except CalledProcessError:
pass
return None
|
def _find_by_py_launcher(
version: str,
) -> Optional[str]: # pragma: no cover (windows only)
if version.startswith("python"):
num = version[len("python") :]
try:
cmd = ("py", f"-{num}", "-c", "import sys; print(sys.executable)")
return cmd_output(*cmd)[1].strip()
except CalledProcessError:
pass
return None
|
https://github.com/pre-commit/pre-commit/issues/1472
|
Traceback (most recent call last):
File "c:\program files\git\dev\core\venv\lib\site-packages\pre_commit\error_handler.py", line 56, in error_handler
yield
File "c:\program files\git\dev\core\venv\lib\site-packages\pre_commit\main.py", line 372, in main
args=args.rest[1:],
File "c:\program files\git\dev\core\venv\lib\site-packages\pre_commit\commands\hook_impl.py", line 217, in hook_impl
return retv | run(config, store, ns)
File "c:\program files\git\dev\core\venv\lib\site-packages\pre_commit\commands\run.py", line 357, in run
for hook in all_hooks(config, store)
File "c:\program files\git\dev\core\venv\lib\site-packages\pre_commit\repository.py", line 206, in all_hooks
for repo in root_config['repos']
File "c:\program files\git\dev\core\venv\lib\site-packages\pre_commit\repository.py", line 207, in <genexpr>
for hook in _repository_hooks(repo, store, root_config)
File "c:\program files\git\dev\core\venv\lib\site-packages\pre_commit\repository.py", line 182, in _repository_hooks
return _cloned_repository_hooks(repo_config, store, root_config)
File "c:\program files\git\dev\core\venv\lib\site-packages\pre_commit\repository.py", line 162, in _cloned_repository_hooks
for hook in repo_config['hooks']
File "c:\program files\git\dev\core\venv\lib\site-packages\pre_commit\repository.py", line 162, in <listcomp>
for hook in repo_config['hooks']
File "c:\program files\git\dev\core\venv\lib\site-packages\pre_commit\repository.py", line 110, in _hook
ret['language_version'] = languages[lang].get_default_version()
File "c:\program files\git\dev\core\venv\lib\site-packages\pre_commit\languages\python.py", line 113, in get_default_version
if _find_by_py_launcher(exe):
File "c:\program files\git\dev\core\venv\lib\site-packages\pre_commit\languages\python.py", line 72, in _find_by_py_launcher
return cmd_output(*cmd)[1].strip()
File "c:\program files\git\dev\core\venv\lib\site-packages\pre_commit\util.py", line 164, in cmd_output
stdout = stdout_b.decode() if stdout_b is not None else None
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xc0 in position 9: invalid start byte
|
UnicodeDecodeError
|
def migrate_config(config_file: str, quiet: bool = False) -> int:
# ensure that the configuration is a valid pre-commit configuration
load_config(config_file)
with open(config_file) as f:
orig_contents = contents = f.read()
contents = _migrate_map(contents)
contents = _migrate_sha_to_rev(contents)
if contents != orig_contents:
with open(config_file, "w") as f:
f.write(contents)
print("Configuration has been migrated.")
elif not quiet:
print("Configuration is already migrated.")
return 0
|
def migrate_config(config_file: str, quiet: bool = False) -> int:
with open(config_file) as f:
orig_contents = contents = f.read()
contents = _migrate_map(contents)
contents = _migrate_sha_to_rev(contents)
if contents != orig_contents:
with open(config_file, "w") as f:
f.write(contents)
print("Configuration has been migrated.")
elif not quiet:
print("Configuration is already migrated.")
return 0
|
https://github.com/pre-commit/pre-commit/issues/1447
|
Traceback (most recent call last):
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/error_handler.py", line 56, in error_handler
yield
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/main.py", line 354, in main
return autoupdate(
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/commands/autoupdate.py", line 141, in autoupdate
migrate_config(config_file, quiet=True)
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/commands/migrate_config.py", line 49, in migrate_config
contents = _migrate_map(contents)
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/commands/migrate_config.py", line 28, in _migrate_map
if isinstance(yaml_load(contents), list):
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/yaml/__init__.py", line 114, in load
return loader.get_single_data()
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/yaml/constructor.py", line 49, in get_single_data
node = self.get_single_node()
File "ext/_yaml.pyx", line 707, in _yaml.CParser.get_single_node
File "ext/_yaml.pyx", line 726, in _yaml.CParser._compose_document
File "ext/_yaml.pyx", line 905, in _yaml.CParser._parse_next_event
yaml.scanner.ScannerError: mapping values are not allowed in this context
in "<unicode string>", line 2, column 6
|
yaml.scanner.ScannerError
|
def py_interface(
_dir: str,
_make_venv: Callable[[str, str], None],
) -> Tuple[
Callable[[Prefix, str], ContextManager[None]],
Callable[[Prefix, str], bool],
Callable[[Hook, Sequence[str], bool], Tuple[int, bytes]],
Callable[[Prefix, str, Sequence[str]], None],
]:
@contextlib.contextmanager
def in_env(
prefix: Prefix,
language_version: str,
) -> Generator[None, None, None]:
envdir = prefix.path(helpers.environment_dir(_dir, language_version))
with envcontext(get_env_patch(envdir)):
yield
def healthy(prefix: Prefix, language_version: str) -> bool:
envdir = helpers.environment_dir(_dir, language_version)
exe_name = "python.exe" if sys.platform == "win32" else "python"
py_exe = prefix.path(bin_dir(envdir), exe_name)
with in_env(prefix, language_version):
retcode, _, _ = cmd_output_b(
py_exe,
"-c",
"import ctypes, datetime, io, os, ssl, weakref",
cwd="/",
retcode=None,
)
return retcode == 0
def run_hook(
hook: Hook,
file_args: Sequence[str],
color: bool,
) -> Tuple[int, bytes]:
with in_env(hook.prefix, hook.language_version):
return helpers.run_xargs(hook, hook.cmd, file_args, color=color)
def install_environment(
prefix: Prefix,
version: str,
additional_dependencies: Sequence[str],
) -> None:
directory = helpers.environment_dir(_dir, version)
install = ("python", "-mpip", "install", ".", *additional_dependencies)
env_dir = prefix.path(directory)
with clean_path_on_failure(env_dir):
if version != C.DEFAULT:
python = norm_version(version)
else:
python = os.path.realpath(sys.executable)
_make_venv(env_dir, python)
with in_env(prefix, version):
helpers.run_setup_cmd(prefix, install)
return in_env, healthy, run_hook, install_environment
|
def py_interface(
_dir: str,
_make_venv: Callable[[str, str], None],
) -> Tuple[
Callable[[Prefix, str], ContextManager[None]],
Callable[[Prefix, str], bool],
Callable[[Hook, Sequence[str], bool], Tuple[int, bytes]],
Callable[[Prefix, str, Sequence[str]], None],
]:
@contextlib.contextmanager
def in_env(
prefix: Prefix,
language_version: str,
) -> Generator[None, None, None]:
envdir = prefix.path(helpers.environment_dir(_dir, language_version))
with envcontext(get_env_patch(envdir)):
yield
def healthy(prefix: Prefix, language_version: str) -> bool:
envdir = helpers.environment_dir(_dir, language_version)
exe_name = "python.exe" if sys.platform == "win32" else "python"
py_exe = prefix.path(bin_dir(envdir), exe_name)
with in_env(prefix, language_version):
retcode, _, _ = cmd_output_b(
py_exe,
"-c",
"import ctypes, datetime, io, os, ssl, weakref",
cwd="/",
retcode=None,
)
return retcode == 0
def run_hook(
hook: Hook,
file_args: Sequence[str],
color: bool,
) -> Tuple[int, bytes]:
with in_env(hook.prefix, hook.language_version):
return helpers.run_xargs(hook, hook.cmd, file_args, color=color)
def install_environment(
prefix: Prefix,
version: str,
additional_dependencies: Sequence[str],
) -> None:
additional_dependencies = tuple(additional_dependencies)
directory = helpers.environment_dir(_dir, version)
env_dir = prefix.path(directory)
with clean_path_on_failure(env_dir):
if version != C.DEFAULT:
python = norm_version(version)
else:
python = os.path.realpath(sys.executable)
_make_venv(env_dir, python)
with in_env(prefix, version):
helpers.run_setup_cmd(
prefix,
("pip", "install", ".") + additional_dependencies,
)
return in_env, healthy, run_hook, install_environment
|
https://github.com/pre-commit/pre-commit/issues/1398
|
Traceback (most recent call last):
File "c:\program files\python37\lib\site-packages\pre_commit\error_handler.py", line 56, in error_handler
yield
File "c:\program files\python37\lib\site-packages\pre_commit\main.py", line 372, in main
args=args.rest[1:],
File "c:\program files\python37\lib\site-packages\pre_commit\commands\hook_impl.py", line 187, in hook_impl
return retv | run(config, store, ns)
File "c:\program files\python37\lib\site-packages\pre_commit\commands\run.py", line 355, in run
install_hook_envs(hooks, store)
File "c:\program files\python37\lib\site-packages\pre_commit\repository.py", line 200, in install_hook_envs
_hook_install(hook)
File "c:\program files\python37\lib\site-packages\pre_commit\repository.py", line 83, in _hook_install
hook.prefix, hook.language_version, hook.additional_dependencies,
File "c:\program files\python37\lib\site-packages\pre_commit\languages\python.py", line 197, in install_environment
prefix, ('pip', 'install', '.') + additional_dependencies,
File "c:\program files\python37\lib\site-packages\pre_commit\languages\helpers.py", line 25, in run_setup_cmd
cmd_output_b(*cmd, cwd=prefix.prefix_dir)
File "c:\program files\python37\lib\site-packages\pre_commit\util.py", line 156, in cmd_output_b
raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b)
pre_commit.util.CalledProcessError: <unprintable CalledProcessError object>
|
pre_commit.util.CalledProcessError
|
def install_environment(
prefix: Prefix,
version: str,
additional_dependencies: Sequence[str],
) -> None:
directory = helpers.environment_dir(_dir, version)
install = ("python", "-mpip", "install", ".", *additional_dependencies)
env_dir = prefix.path(directory)
with clean_path_on_failure(env_dir):
if version != C.DEFAULT:
python = norm_version(version)
else:
python = os.path.realpath(sys.executable)
_make_venv(env_dir, python)
with in_env(prefix, version):
helpers.run_setup_cmd(prefix, install)
|
def install_environment(
prefix: Prefix,
version: str,
additional_dependencies: Sequence[str],
) -> None:
additional_dependencies = tuple(additional_dependencies)
directory = helpers.environment_dir(_dir, version)
env_dir = prefix.path(directory)
with clean_path_on_failure(env_dir):
if version != C.DEFAULT:
python = norm_version(version)
else:
python = os.path.realpath(sys.executable)
_make_venv(env_dir, python)
with in_env(prefix, version):
helpers.run_setup_cmd(
prefix,
("pip", "install", ".") + additional_dependencies,
)
|
https://github.com/pre-commit/pre-commit/issues/1398
|
Traceback (most recent call last):
File "c:\program files\python37\lib\site-packages\pre_commit\error_handler.py", line 56, in error_handler
yield
File "c:\program files\python37\lib\site-packages\pre_commit\main.py", line 372, in main
args=args.rest[1:],
File "c:\program files\python37\lib\site-packages\pre_commit\commands\hook_impl.py", line 187, in hook_impl
return retv | run(config, store, ns)
File "c:\program files\python37\lib\site-packages\pre_commit\commands\run.py", line 355, in run
install_hook_envs(hooks, store)
File "c:\program files\python37\lib\site-packages\pre_commit\repository.py", line 200, in install_hook_envs
_hook_install(hook)
File "c:\program files\python37\lib\site-packages\pre_commit\repository.py", line 83, in _hook_install
hook.prefix, hook.language_version, hook.additional_dependencies,
File "c:\program files\python37\lib\site-packages\pre_commit\languages\python.py", line 197, in install_environment
prefix, ('pip', 'install', '.') + additional_dependencies,
File "c:\program files\python37\lib\site-packages\pre_commit\languages\helpers.py", line 25, in run_setup_cmd
cmd_output_b(*cmd, cwd=prefix.prefix_dir)
File "c:\program files\python37\lib\site-packages\pre_commit\util.py", line 156, in cmd_output_b
raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b)
pre_commit.util.CalledProcessError: <unprintable CalledProcessError object>
|
pre_commit.util.CalledProcessError
|
def _log_and_exit(msg: str, exc: BaseException, formatted: str) -> None:
error_msg = f"{msg}: {type(exc).__name__}: ".encode() + force_bytes(exc)
output.write_line_b(error_msg)
log_path = os.path.join(Store().directory, "pre-commit.log")
output.write_line(f"Check the log at {log_path}")
with open(log_path, "wb") as log:
_log_line = functools.partial(output.write_line, stream=log)
_log_line_b = functools.partial(output.write_line_b, stream=log)
_log_line("### version information")
_log_line()
_log_line("```")
_log_line(f"pre-commit version: {C.VERSION}")
_log_line("sys.version:")
for line in sys.version.splitlines():
_log_line(f" {line}")
_log_line(f"sys.executable: {sys.executable}")
_log_line(f"os.name: {os.name}")
_log_line(f"sys.platform: {sys.platform}")
_log_line("```")
_log_line()
_log_line("### error information")
_log_line()
_log_line("```")
_log_line_b(error_msg)
_log_line("```")
_log_line()
_log_line("```")
_log_line(formatted)
_log_line("```")
raise SystemExit(1)
|
def _log_and_exit(msg: str, exc: BaseException, formatted: str) -> None:
error_msg = f"{msg}: {type(exc).__name__}: ".encode()
error_msg += _exception_to_bytes(exc)
output.write_line_b(error_msg)
log_path = os.path.join(Store().directory, "pre-commit.log")
output.write_line(f"Check the log at {log_path}")
with open(log_path, "wb") as log:
_log_line = functools.partial(output.write_line, stream=log)
_log_line_b = functools.partial(output.write_line_b, stream=log)
_log_line("### version information")
_log_line()
_log_line("```")
_log_line(f"pre-commit version: {C.VERSION}")
_log_line("sys.version:")
for line in sys.version.splitlines():
_log_line(f" {line}")
_log_line(f"sys.executable: {sys.executable}")
_log_line(f"os.name: {os.name}")
_log_line(f"sys.platform: {sys.platform}")
_log_line("```")
_log_line()
_log_line("### error information")
_log_line()
_log_line("```")
_log_line_b(error_msg)
_log_line("```")
_log_line()
_log_line("```")
_log_line(formatted)
_log_line("```")
raise SystemExit(1)
|
https://github.com/pre-commit/pre-commit/issues/1350
|
Traceback (most recent call last):
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/error_handler.py", line 54, in error_handler
yield
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/main.py", line 371, in main
return run(args.config, store, args)
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/commands/run.py", line 339, in run
return _run_hooks(config, hooks, args, environ)
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/commands/run.py", line 249, in _run_hooks
verbose=args.verbose, use_color=args.color,
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/commands/run.py", line 165, in _run_single_hook
retcode, out = language.run_hook(hook, filenames, use_color)
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/languages/script.py", line 19, in run_hook
return helpers.run_xargs(hook, cmd, file_args, color=color)
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/languages/helpers.py", line 109, in run_xargs
return xargs(cmd, file_args, **kwargs)
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/xargs.py", line 153, in xargs
for proc_retcode, proc_out, _ in results:
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/concurrent/futures/_base.py", line 598, in result_iterator
yield fs.pop().result()
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/concurrent/futures/_base.py", line 428, in result
return self.__get_result()
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/concurrent/futures/_base.py", line 384, in __get_result
raise self._exception
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/xargs.py", line 146, in run_cmd_partition
*run_cmd, retcode=None, stderr=subprocess.STDOUT, **kwargs,
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/util.py", line 208, in cmd_output_p
proc = subprocess.Popen(cmd, **kwargs)
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/subprocess.py", line 800, in __init__
restore_signals, start_new_session)
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/subprocess.py", line 1551, in _execute_child
raise child_exception_type(errno_num, err_msg, err_filename)
OSError: [Errno 8] Exec format error: '/[redacted]/foo.py'
|
OSError
|
def cmd_output_b(
*cmd: str,
retcode: Optional[int] = 0,
**kwargs: Any,
) -> Tuple[int, bytes, Optional[bytes]]:
_setdefault_kwargs(kwargs)
try:
cmd = parse_shebang.normalize_cmd(cmd)
except parse_shebang.ExecutableNotFoundError as e:
returncode, stdout_b, stderr_b = e.to_output()
else:
try:
proc = subprocess.Popen(cmd, **kwargs)
except OSError as e:
returncode, stdout_b, stderr_b = _oserror_to_output(e)
else:
stdout_b, stderr_b = proc.communicate()
returncode = proc.returncode
if retcode is not None and retcode != returncode:
raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b)
return returncode, stdout_b, stderr_b
|
def cmd_output_b(
*cmd: str,
retcode: Optional[int] = 0,
**kwargs: Any,
) -> Tuple[int, bytes, Optional[bytes]]:
_setdefault_kwargs(kwargs)
try:
cmd = parse_shebang.normalize_cmd(cmd)
except parse_shebang.ExecutableNotFoundError as e:
returncode, stdout_b, stderr_b = e.to_output()
else:
proc = subprocess.Popen(cmd, **kwargs)
stdout_b, stderr_b = proc.communicate()
returncode = proc.returncode
if retcode is not None and retcode != returncode:
raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b)
return returncode, stdout_b, stderr_b
|
https://github.com/pre-commit/pre-commit/issues/1350
|
Traceback (most recent call last):
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/error_handler.py", line 54, in error_handler
yield
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/main.py", line 371, in main
return run(args.config, store, args)
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/commands/run.py", line 339, in run
return _run_hooks(config, hooks, args, environ)
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/commands/run.py", line 249, in _run_hooks
verbose=args.verbose, use_color=args.color,
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/commands/run.py", line 165, in _run_single_hook
retcode, out = language.run_hook(hook, filenames, use_color)
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/languages/script.py", line 19, in run_hook
return helpers.run_xargs(hook, cmd, file_args, color=color)
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/languages/helpers.py", line 109, in run_xargs
return xargs(cmd, file_args, **kwargs)
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/xargs.py", line 153, in xargs
for proc_retcode, proc_out, _ in results:
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/concurrent/futures/_base.py", line 598, in result_iterator
yield fs.pop().result()
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/concurrent/futures/_base.py", line 428, in result
return self.__get_result()
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/concurrent/futures/_base.py", line 384, in __get_result
raise self._exception
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/xargs.py", line 146, in run_cmd_partition
*run_cmd, retcode=None, stderr=subprocess.STDOUT, **kwargs,
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/util.py", line 208, in cmd_output_p
proc = subprocess.Popen(cmd, **kwargs)
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/subprocess.py", line 800, in __init__
restore_signals, start_new_session)
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/subprocess.py", line 1551, in _execute_child
raise child_exception_type(errno_num, err_msg, err_filename)
OSError: [Errno 8] Exec format error: '/[redacted]/foo.py'
|
OSError
|
def cmd_output_p(
*cmd: str,
retcode: Optional[int] = 0,
**kwargs: Any,
) -> Tuple[int, bytes, Optional[bytes]]:
assert retcode is None
assert kwargs["stderr"] == subprocess.STDOUT, kwargs["stderr"]
_setdefault_kwargs(kwargs)
try:
cmd = parse_shebang.normalize_cmd(cmd)
except parse_shebang.ExecutableNotFoundError as e:
return e.to_output()
with open(os.devnull) as devnull, Pty() as pty:
assert pty.r is not None
kwargs.update({"stdin": devnull, "stdout": pty.w, "stderr": pty.w})
try:
proc = subprocess.Popen(cmd, **kwargs)
except OSError as e:
return _oserror_to_output(e)
pty.close_w()
buf = b""
while True:
try:
bts = os.read(pty.r, 4096)
except OSError as e:
if e.errno == errno.EIO:
bts = b""
else:
raise
else:
buf += bts
if not bts:
break
return proc.wait(), buf, None
|
def cmd_output_p(
*cmd: str,
retcode: Optional[int] = 0,
**kwargs: Any,
) -> Tuple[int, bytes, Optional[bytes]]:
assert retcode is None
assert kwargs["stderr"] == subprocess.STDOUT, kwargs["stderr"]
_setdefault_kwargs(kwargs)
try:
cmd = parse_shebang.normalize_cmd(cmd)
except parse_shebang.ExecutableNotFoundError as e:
return e.to_output()
with open(os.devnull) as devnull, Pty() as pty:
assert pty.r is not None
kwargs.update({"stdin": devnull, "stdout": pty.w, "stderr": pty.w})
proc = subprocess.Popen(cmd, **kwargs)
pty.close_w()
buf = b""
while True:
try:
bts = os.read(pty.r, 4096)
except OSError as e:
if e.errno == errno.EIO:
bts = b""
else:
raise
else:
buf += bts
if not bts:
break
return proc.wait(), buf, None
|
https://github.com/pre-commit/pre-commit/issues/1350
|
Traceback (most recent call last):
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/error_handler.py", line 54, in error_handler
yield
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/main.py", line 371, in main
return run(args.config, store, args)
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/commands/run.py", line 339, in run
return _run_hooks(config, hooks, args, environ)
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/commands/run.py", line 249, in _run_hooks
verbose=args.verbose, use_color=args.color,
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/commands/run.py", line 165, in _run_single_hook
retcode, out = language.run_hook(hook, filenames, use_color)
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/languages/script.py", line 19, in run_hook
return helpers.run_xargs(hook, cmd, file_args, color=color)
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/languages/helpers.py", line 109, in run_xargs
return xargs(cmd, file_args, **kwargs)
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/xargs.py", line 153, in xargs
for proc_retcode, proc_out, _ in results:
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/concurrent/futures/_base.py", line 598, in result_iterator
yield fs.pop().result()
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/concurrent/futures/_base.py", line 428, in result
return self.__get_result()
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/concurrent/futures/_base.py", line 384, in __get_result
raise self._exception
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/xargs.py", line 146, in run_cmd_partition
*run_cmd, retcode=None, stderr=subprocess.STDOUT, **kwargs,
File "/[redacted]/venv/lib/python3.7/site-packages/pre_commit/util.py", line 208, in cmd_output_p
proc = subprocess.Popen(cmd, **kwargs)
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/subprocess.py", line 800, in __init__
restore_signals, start_new_session)
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/subprocess.py", line 1551, in _execute_child
raise child_exception_type(errno_num, err_msg, err_filename)
OSError: [Errno 8] Exec format error: '/[redacted]/foo.py'
|
OSError
|
def no_git_env(_env=None):
# Too many bugs dealing with environment variables and GIT:
# https://github.com/pre-commit/pre-commit/issues/300
# In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running
# pre-commit hooks
# In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE
# while running pre-commit hooks in submodules.
# GIT_DIR: Causes git clone to clone wrong thing
# GIT_INDEX_FILE: Causes 'error invalid object ...' during commit
_env = _env if _env is not None else os.environ
return {
k: v
for k, v in _env.items()
if not k.startswith("GIT_")
or k in {"GIT_EXEC_PATH", "GIT_SSH", "GIT_SSH_COMMAND", "GIT_SSL_CAINFO"}
}
|
def no_git_env(_env=None):
# Too many bugs dealing with environment variables and GIT:
# https://github.com/pre-commit/pre-commit/issues/300
# In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running
# pre-commit hooks
# In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE
# while running pre-commit hooks in submodules.
# GIT_DIR: Causes git clone to clone wrong thing
# GIT_INDEX_FILE: Causes 'error invalid object ...' during commit
_env = _env if _env is not None else os.environ
return {
k: v
for k, v in _env.items()
if not k.startswith("GIT_")
or k in {"GIT_EXEC_PATH", "GIT_SSH", "GIT_SSH_COMMAND"}
}
|
https://github.com/pre-commit/pre-commit/issues/1253
|
An unexpected error has occurred: CalledProcessError: Command: ('/home/igankevich/.guix-profile/bin/git', 'fetch', 'origin', '--tags')
Return code: 128
Expected return code: 0
Output: (none)
Errors:
fatal: unable to access 'https://github.com/pre-commit/pre-commit-hooks/': server certificate verification failed. CAfile: none CRLfile: none
Traceback (most recent call last):
File "/gnu/store/35ppc0zpffbzc6zsw9xnks1hxmr7wh19-python-pre-commit-1.20.0/lib/python3.7/site-packages/pre_commit/store.py", line 168, in clone_strategy
self._shallow_clone(ref, _git_cmd)
File "/gnu/store/35ppc0zpffbzc6zsw9xnks1hxmr7wh19-python-pre-commit-1.20.0/lib/python3.7/site-packages/pre_commit/store.py", line 150, in _shallow_clone
git_cmd('-c', git_config, 'fetch', 'origin', ref, '--depth=1')
File "/gnu/store/35ppc0zpffbzc6zsw9xnks1hxmr7wh19-python-pre-commit-1.20.0/lib/python3.7/site-packages/pre_commit/store.py", line 165, in _git_cmd
cmd_output_b('git', *args, cwd=directory, env=env)
File "/gnu/store/35ppc0zpffbzc6zsw9xnks1hxmr7wh19-python-pre-commit-1.20.0/lib/python3.7/site-packages/pre_commit/util.py", line 147, in cmd_output_b
returncode, cmd, retcode, output=(stdout_b, stderr_b),
pre_commit.util.CalledProcessError: Command: ('/home/igankevich/.guix-profile/bin/git', '-c', 'protocol.version=2', 'fetch', 'origin', 'v2.4.0', '--depth=1')
Return code: 128
Expected return code: 0
Output: (none)
Errors:
fatal: unable to access 'https://github.com/pre-commit/pre-commit-hooks/': server certificate verification failed. CAfile: none CRLfile: none
|
pre_commit.util.CalledProcessError
|
def rmtree(path):
"""On windows, rmtree fails for readonly dirs."""
def handle_remove_readonly(func, path, exc):
excvalue = exc[1]
if func in (os.rmdir, os.remove, os.unlink) and excvalue.errno == errno.EACCES:
for p in (path, os.path.dirname(path)):
os.chmod(p, os.stat(p).st_mode | stat.S_IWUSR)
func(path)
else:
raise
shutil.rmtree(path, ignore_errors=False, onerror=handle_remove_readonly)
|
def rmtree(path):
"""On windows, rmtree fails for readonly dirs."""
def handle_remove_readonly(func, path, exc): # pragma: no cover (windows)
excvalue = exc[1]
if func in (os.rmdir, os.remove, os.unlink) and excvalue.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
func(path)
else:
raise
shutil.rmtree(path, ignore_errors=False, onerror=handle_remove_readonly)
|
https://github.com/pre-commit/pre-commit/issues/1042
|
An unexpected error has occurred: PermissionError: [Errno 13] Permission denied: '/Users/detailyang/.cache/pre-commit/repo2ba1f3b5/golangenv-default/pkg/mod/github.com/!burnt!sushi/toml@v0.3.1/.gitignore'
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/shutil.py", line 447, in _rmtree_safe_fd
os.unlink(entry.name, dir_fd=topfd)
PermissionError: [Errno 13] Permission denied: '.gitignore'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/pre_commit/error_handler.py", line 46, in error_handler
yield
File "/usr/local/lib/python3.7/site-packages/pre_commit/main.py", line 294, in main
return run(args.config, store, args)
File "/usr/local/lib/python3.7/site-packages/pre_commit/commands/run.py", line 285, in run
install_hook_envs(hooks, store)
File "/usr/local/lib/python3.7/site-packages/pre_commit/repository.py", line 215, in install_hook_envs
hook.install()
File "/usr/local/lib/python3.7/site-packages/pre_commit/repository.py", line 90, in install
rmtree(self.prefix.path(venv))
File "/usr/local/lib/python3.7/site-packages/pre_commit/util.py", line 171, in rmtree
shutil.rmtree(path, ignore_errors=False, onerror=handle_remove_readonly)
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/shutil.py", line 491, in rmtree
_rmtree_safe_fd(fd, path, onerror)
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/shutil.py", line 429, in _rmtree_safe_fd
_rmtree_safe_fd(dirfd, fullname, onerror)
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/shutil.py", line 429, in _rmtree_safe_fd
_rmtree_safe_fd(dirfd, fullname, onerror)
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/shutil.py", line 429, in _rmtree_safe_fd
_rmtree_safe_fd(dirfd, fullname, onerror)
[Previous line repeated 2 more times]
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/shutil.py", line 449, in _rmtree_safe_fd
onerror(os.unlink, fullname, sys.exc_info())
File "/usr/local/lib/python3.7/site-packages/pre_commit/util.py", line 168, in handle_remove_readonly
func(path)
PermissionError: [Errno 13] Permission denied: '/Users/detailyang/.cache/pre-commit/repo2ba1f3b5/golangenv-default/pkg/mod/github.com/!burnt!sushi/toml@v0.3.1/.gitignore'
|
PermissionError
|
def handle_remove_readonly(func, path, exc):
excvalue = exc[1]
if func in (os.rmdir, os.remove, os.unlink) and excvalue.errno == errno.EACCES:
for p in (path, os.path.dirname(path)):
os.chmod(p, os.stat(p).st_mode | stat.S_IWUSR)
func(path)
else:
raise
|
def handle_remove_readonly(func, path, exc): # pragma: no cover (windows)
excvalue = exc[1]
if func in (os.rmdir, os.remove, os.unlink) and excvalue.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
func(path)
else:
raise
|
https://github.com/pre-commit/pre-commit/issues/1042
|
An unexpected error has occurred: PermissionError: [Errno 13] Permission denied: '/Users/detailyang/.cache/pre-commit/repo2ba1f3b5/golangenv-default/pkg/mod/github.com/!burnt!sushi/toml@v0.3.1/.gitignore'
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/shutil.py", line 447, in _rmtree_safe_fd
os.unlink(entry.name, dir_fd=topfd)
PermissionError: [Errno 13] Permission denied: '.gitignore'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/pre_commit/error_handler.py", line 46, in error_handler
yield
File "/usr/local/lib/python3.7/site-packages/pre_commit/main.py", line 294, in main
return run(args.config, store, args)
File "/usr/local/lib/python3.7/site-packages/pre_commit/commands/run.py", line 285, in run
install_hook_envs(hooks, store)
File "/usr/local/lib/python3.7/site-packages/pre_commit/repository.py", line 215, in install_hook_envs
hook.install()
File "/usr/local/lib/python3.7/site-packages/pre_commit/repository.py", line 90, in install
rmtree(self.prefix.path(venv))
File "/usr/local/lib/python3.7/site-packages/pre_commit/util.py", line 171, in rmtree
shutil.rmtree(path, ignore_errors=False, onerror=handle_remove_readonly)
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/shutil.py", line 491, in rmtree
_rmtree_safe_fd(fd, path, onerror)
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/shutil.py", line 429, in _rmtree_safe_fd
_rmtree_safe_fd(dirfd, fullname, onerror)
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/shutil.py", line 429, in _rmtree_safe_fd
_rmtree_safe_fd(dirfd, fullname, onerror)
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/shutil.py", line 429, in _rmtree_safe_fd
_rmtree_safe_fd(dirfd, fullname, onerror)
[Previous line repeated 2 more times]
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/shutil.py", line 449, in _rmtree_safe_fd
onerror(os.unlink, fullname, sys.exc_info())
File "/usr/local/lib/python3.7/site-packages/pre_commit/util.py", line 168, in handle_remove_readonly
func(path)
PermissionError: [Errno 13] Permission denied: '/Users/detailyang/.cache/pre-commit/repo2ba1f3b5/golangenv-default/pkg/mod/github.com/!burnt!sushi/toml@v0.3.1/.gitignore'
|
PermissionError
|
def py_interface(_dir, _make_venv):
@contextlib.contextmanager
def in_env(prefix, language_version):
envdir = prefix.path(helpers.environment_dir(_dir, language_version))
with envcontext(get_env_patch(envdir)):
yield
def healthy(prefix, language_version):
with in_env(prefix, language_version):
retcode, _, _ = cmd_output(
"python",
"-c",
"import ctypes, datetime, io, os, ssl, weakref",
retcode=None,
encoding=None,
)
return retcode == 0
def run_hook(hook, file_args):
with in_env(hook.prefix, hook.language_version):
return helpers.run_xargs(hook, helpers.to_cmd(hook), file_args)
def install_environment(prefix, version, additional_dependencies):
additional_dependencies = tuple(additional_dependencies)
directory = helpers.environment_dir(_dir, version)
env_dir = prefix.path(directory)
with clean_path_on_failure(env_dir):
if version != C.DEFAULT:
python = norm_version(version)
else:
python = os.path.realpath(sys.executable)
_make_venv(env_dir, python)
with in_env(prefix, version):
helpers.run_setup_cmd(
prefix,
("pip", "install", ".") + additional_dependencies,
)
return in_env, healthy, run_hook, install_environment
|
def py_interface(_dir, _make_venv):
@contextlib.contextmanager
def in_env(prefix, language_version):
envdir = prefix.path(helpers.environment_dir(_dir, language_version))
with envcontext(get_env_patch(envdir)):
yield
def healthy(prefix, language_version):
with in_env(prefix, language_version):
retcode, _, _ = cmd_output(
"python",
"-c",
"import ctypes, datetime, io, os, ssl, weakref",
retcode=None,
)
return retcode == 0
def run_hook(hook, file_args):
with in_env(hook.prefix, hook.language_version):
return helpers.run_xargs(hook, helpers.to_cmd(hook), file_args)
def install_environment(prefix, version, additional_dependencies):
additional_dependencies = tuple(additional_dependencies)
directory = helpers.environment_dir(_dir, version)
env_dir = prefix.path(directory)
with clean_path_on_failure(env_dir):
if version != C.DEFAULT:
python = norm_version(version)
else:
python = os.path.realpath(sys.executable)
_make_venv(env_dir, python)
with in_env(prefix, version):
helpers.run_setup_cmd(
prefix,
("pip", "install", ".") + additional_dependencies,
)
return in_env, healthy, run_hook, install_environment
|
https://github.com/pre-commit/pre-commit/issues/1021
|
An unexpected error has occurred: UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe3 in position 282: invalid continuation byte
Traceback (most recent call last):
File "c:\pytest\.tox\linting\lib\site-packages\pre_commit\error_handler.py", line 46, in error_handler
yield
File "c:\pytest\.tox\linting\lib\site-packages\pre_commit\main.py", line 294, in main
return run(args.config, store, args)
File "c:\pytest\.tox\linting\lib\site-packages\pre_commit\commands\run.py", line 285, in run
install_hook_envs(hooks, store)
File "c:\pytest\.tox\linting\lib\site-packages\pre_commit\repository.py", line 210, in install_hook_envs
if not _need_installed():
File "c:\pytest\.tox\linting\lib\site-packages\pre_commit\repository.py", line 205, in _need_installed
if hook.install_key not in seen and not hook.installed():
File "c:\pytest\.tox\linting\lib\site-packages\pre_commit\repository.py", line 75, in installed
lang.healthy(self.prefix, self.language_version)
File "c:\pytest\.tox\linting\lib\site-packages\pre_commit\languages\python.py", line 139, in healthy
retcode, _, _ = cmd_output(
File "c:\pytest\.tox\linting\lib\site-packages\pre_commit\util.py", line 149, in cmd_output
stderr = stderr.decode(encoding)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe3 in position 282: invalid continuation byte
|
UnicodeDecodeError
|
def healthy(prefix, language_version):
with in_env(prefix, language_version):
retcode, _, _ = cmd_output(
"python",
"-c",
"import ctypes, datetime, io, os, ssl, weakref",
retcode=None,
encoding=None,
)
return retcode == 0
|
def healthy(prefix, language_version):
with in_env(prefix, language_version):
retcode, _, _ = cmd_output(
"python",
"-c",
"import ctypes, datetime, io, os, ssl, weakref",
retcode=None,
)
return retcode == 0
|
https://github.com/pre-commit/pre-commit/issues/1021
|
An unexpected error has occurred: UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe3 in position 282: invalid continuation byte
Traceback (most recent call last):
File "c:\pytest\.tox\linting\lib\site-packages\pre_commit\error_handler.py", line 46, in error_handler
yield
File "c:\pytest\.tox\linting\lib\site-packages\pre_commit\main.py", line 294, in main
return run(args.config, store, args)
File "c:\pytest\.tox\linting\lib\site-packages\pre_commit\commands\run.py", line 285, in run
install_hook_envs(hooks, store)
File "c:\pytest\.tox\linting\lib\site-packages\pre_commit\repository.py", line 210, in install_hook_envs
if not _need_installed():
File "c:\pytest\.tox\linting\lib\site-packages\pre_commit\repository.py", line 205, in _need_installed
if hook.install_key not in seen and not hook.installed():
File "c:\pytest\.tox\linting\lib\site-packages\pre_commit\repository.py", line 75, in installed
lang.healthy(self.prefix, self.language_version)
File "c:\pytest\.tox\linting\lib\site-packages\pre_commit\languages\python.py", line 139, in healthy
retcode, _, _ = cmd_output(
File "c:\pytest\.tox\linting\lib\site-packages\pre_commit\util.py", line 149, in cmd_output
stderr = stderr.decode(encoding)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe3 in position 282: invalid continuation byte
|
UnicodeDecodeError
|
def _repo_ref(tmpdir, repo, ref):
# if `ref` is explicitly passed, use it
if ref:
return repo, ref
ref = git.head_rev(repo)
# if it exists on disk, we'll try and clone it with the local changes
if os.path.exists(repo) and git.has_diff("HEAD", repo=repo):
logger.warning("Creating temporary repo with uncommitted changes...")
shadow = os.path.join(tmpdir, "shadow-repo")
cmd_output("git", "clone", repo, shadow)
cmd_output("git", "checkout", ref, "-b", "_pc_tmp", cwd=shadow)
idx = git.git_path("index", repo=shadow)
objs = git.git_path("objects", repo=shadow)
env = dict(os.environ, GIT_INDEX_FILE=idx, GIT_OBJECT_DIRECTORY=objs)
staged_files = git.get_staged_files(cwd=repo)
if staged_files:
xargs(("git", "add", "--"), staged_files, cwd=repo, env=env)
cmd_output("git", "add", "-u", cwd=repo, env=env)
git.commit(repo=shadow)
return shadow, git.head_rev(shadow)
else:
return repo, ref
|
def _repo_ref(tmpdir, repo, ref):
# if `ref` is explicitly passed, use it
if ref:
return repo, ref
ref = git.head_rev(repo)
# if it exists on disk, we'll try and clone it with the local changes
if os.path.exists(repo) and git.has_diff("HEAD", repo=repo):
logger.warning("Creating temporary repo with uncommitted changes...")
shadow = os.path.join(tmpdir, "shadow-repo")
cmd_output("git", "clone", repo, shadow)
cmd_output("git", "checkout", ref, "-b", "_pc_tmp", cwd=shadow)
idx = git.git_path("index", repo=shadow)
objs = git.git_path("objects", repo=shadow)
env = dict(os.environ, GIT_INDEX_FILE=idx, GIT_OBJECT_DIRECTORY=objs)
cmd_output("git", "add", "-u", cwd=repo, env=env)
git.commit(repo=shadow)
return shadow, git.head_rev(shadow)
else:
return repo, ref
|
https://github.com/pre-commit/pre-commit/issues/953
|
$ cat /home/asottile/.cache/pre-commit/pre-commit.log
An unexpected error has occurred: CalledProcessError: Command: ('/usr/bin/git', 'commit', '--no-edit', '--no-gpg-sign', '-n', '-minit')
Return code: 1
Expected return code: 0
Output:
On branch _pc_tmp
nothing to commit, working tree clean
Errors: (none)
Traceback (most recent call last):
File "/home/asottile/opt/venv/lib/python3.6/site-packages/pre_commit/error_handler.py", line 46, in error_handler
yield
File "/home/asottile/opt/venv/lib/python3.6/site-packages/pre_commit/main.py", line 295, in main
return try_repo(args)
File "/home/asottile/opt/venv/lib/python3.6/site-packages/pre_commit/commands/try_repo.py", line 48, in try_repo
repo, ref = _repo_ref(tempdir, args.repo, args.ref)
File "/home/asottile/opt/venv/lib/python3.6/site-packages/pre_commit/commands/try_repo.py", line 39, in _repo_ref
git.commit(repo=shadow)
File "/home/asottile/opt/venv/lib/python3.6/site-packages/pre_commit/git.py", line 143, in commit
cmd_output(*cmd, cwd=repo, env=env)
File "/home/asottile/opt/venv/lib/python3.6/site-packages/pre_commit/util.py", line 153, in cmd_output
returncode, cmd, retcode, output=(stdout, stderr),
pre_commit.util.CalledProcessError: Command: ('/usr/bin/git', 'commit', '--no-edit', '--no-gpg-sign', '-n', '-minit')
Return code: 1
Expected return code: 0
Output:
On branch _pc_tmp
nothing to commit, working tree clean
Errors: (none)
|
pre_commit.util.CalledProcessError
|
def get_staged_files(cwd=None):
return zsplit(
cmd_output(
"git",
"diff",
"--staged",
"--name-only",
"--no-ext-diff",
"-z",
# Everything except for D
"--diff-filter=ACMRTUXB",
cwd=cwd,
)[1]
)
|
def get_staged_files():
return zsplit(
cmd_output(
"git",
"diff",
"--staged",
"--name-only",
"--no-ext-diff",
"-z",
# Everything except for D
"--diff-filter=ACMRTUXB",
)[1]
)
|
https://github.com/pre-commit/pre-commit/issues/953
|
$ cat /home/asottile/.cache/pre-commit/pre-commit.log
An unexpected error has occurred: CalledProcessError: Command: ('/usr/bin/git', 'commit', '--no-edit', '--no-gpg-sign', '-n', '-minit')
Return code: 1
Expected return code: 0
Output:
On branch _pc_tmp
nothing to commit, working tree clean
Errors: (none)
Traceback (most recent call last):
File "/home/asottile/opt/venv/lib/python3.6/site-packages/pre_commit/error_handler.py", line 46, in error_handler
yield
File "/home/asottile/opt/venv/lib/python3.6/site-packages/pre_commit/main.py", line 295, in main
return try_repo(args)
File "/home/asottile/opt/venv/lib/python3.6/site-packages/pre_commit/commands/try_repo.py", line 48, in try_repo
repo, ref = _repo_ref(tempdir, args.repo, args.ref)
File "/home/asottile/opt/venv/lib/python3.6/site-packages/pre_commit/commands/try_repo.py", line 39, in _repo_ref
git.commit(repo=shadow)
File "/home/asottile/opt/venv/lib/python3.6/site-packages/pre_commit/git.py", line 143, in commit
cmd_output(*cmd, cwd=repo, env=env)
File "/home/asottile/opt/venv/lib/python3.6/site-packages/pre_commit/util.py", line 153, in cmd_output
returncode, cmd, retcode, output=(stdout, stderr),
pre_commit.util.CalledProcessError: Command: ('/usr/bin/git', 'commit', '--no-edit', '--no-gpg-sign', '-n', '-minit')
Return code: 1
Expected return code: 0
Output:
On branch _pc_tmp
nothing to commit, working tree clean
Errors: (none)
|
pre_commit.util.CalledProcessError
|
def xargs(cmd, varargs, **kwargs):
"""A simplified implementation of xargs.
negate: Make nonzero successful and zero a failure
target_concurrency: Target number of partitions to run concurrently
"""
negate = kwargs.pop("negate", False)
target_concurrency = kwargs.pop("target_concurrency", 1)
max_length = kwargs.pop("_max_length", _get_platform_max_length())
retcode = 0
stdout = b""
stderr = b""
try:
cmd = parse_shebang.normalize_cmd(cmd)
except parse_shebang.ExecutableNotFoundError as e:
return e.to_output()
partitions = partition(cmd, varargs, target_concurrency, max_length)
def run_cmd_partition(run_cmd):
return cmd_output(*run_cmd, encoding=None, retcode=None, **kwargs)
threads = min(len(partitions), target_concurrency)
with _thread_mapper(threads) as thread_map:
results = thread_map(run_cmd_partition, partitions)
for proc_retcode, proc_out, proc_err in results:
# This is *slightly* too clever so I'll explain it.
# First the xor boolean table:
# T | F |
# +-------+
# T | F | T |
# --+-------+
# F | T | F |
# --+-------+
# When negate is True, it has the effect of flipping the return
# code. Otherwise, the returncode is unchanged.
retcode |= bool(proc_retcode) ^ negate
stdout += proc_out
stderr += proc_err
return retcode, stdout, stderr
|
def xargs(cmd, varargs, **kwargs):
"""A simplified implementation of xargs.
negate: Make nonzero successful and zero a failure
target_concurrency: Target number of partitions to run concurrently
"""
negate = kwargs.pop("negate", False)
target_concurrency = kwargs.pop("target_concurrency", 1)
retcode = 0
stdout = b""
stderr = b""
try:
cmd = parse_shebang.normalize_cmd(cmd)
except parse_shebang.ExecutableNotFoundError as e:
return e.to_output()
partitions = partition(cmd, varargs, target_concurrency, **kwargs)
def run_cmd_partition(run_cmd):
return cmd_output(*run_cmd, encoding=None, retcode=None)
threads = min(len(partitions), target_concurrency)
with _thread_mapper(threads) as thread_map:
results = thread_map(run_cmd_partition, partitions)
for proc_retcode, proc_out, proc_err in results:
# This is *slightly* too clever so I'll explain it.
# First the xor boolean table:
# T | F |
# +-------+
# T | F | T |
# --+-------+
# F | T | F |
# --+-------+
# When negate is True, it has the effect of flipping the return
# code. Otherwise, the returncode is unchanged.
retcode |= bool(proc_retcode) ^ negate
stdout += proc_out
stderr += proc_err
return retcode, stdout, stderr
|
https://github.com/pre-commit/pre-commit/issues/953
|
$ cat /home/asottile/.cache/pre-commit/pre-commit.log
An unexpected error has occurred: CalledProcessError: Command: ('/usr/bin/git', 'commit', '--no-edit', '--no-gpg-sign', '-n', '-minit')
Return code: 1
Expected return code: 0
Output:
On branch _pc_tmp
nothing to commit, working tree clean
Errors: (none)
Traceback (most recent call last):
File "/home/asottile/opt/venv/lib/python3.6/site-packages/pre_commit/error_handler.py", line 46, in error_handler
yield
File "/home/asottile/opt/venv/lib/python3.6/site-packages/pre_commit/main.py", line 295, in main
return try_repo(args)
File "/home/asottile/opt/venv/lib/python3.6/site-packages/pre_commit/commands/try_repo.py", line 48, in try_repo
repo, ref = _repo_ref(tempdir, args.repo, args.ref)
File "/home/asottile/opt/venv/lib/python3.6/site-packages/pre_commit/commands/try_repo.py", line 39, in _repo_ref
git.commit(repo=shadow)
File "/home/asottile/opt/venv/lib/python3.6/site-packages/pre_commit/git.py", line 143, in commit
cmd_output(*cmd, cwd=repo, env=env)
File "/home/asottile/opt/venv/lib/python3.6/site-packages/pre_commit/util.py", line 153, in cmd_output
returncode, cmd, retcode, output=(stdout, stderr),
pre_commit.util.CalledProcessError: Command: ('/usr/bin/git', 'commit', '--no-edit', '--no-gpg-sign', '-n', '-minit')
Return code: 1
Expected return code: 0
Output:
On branch _pc_tmp
nothing to commit, working tree clean
Errors: (none)
|
pre_commit.util.CalledProcessError
|
def run_cmd_partition(run_cmd):
return cmd_output(*run_cmd, encoding=None, retcode=None, **kwargs)
|
def run_cmd_partition(run_cmd):
return cmd_output(*run_cmd, encoding=None, retcode=None)
|
https://github.com/pre-commit/pre-commit/issues/953
|
$ cat /home/asottile/.cache/pre-commit/pre-commit.log
An unexpected error has occurred: CalledProcessError: Command: ('/usr/bin/git', 'commit', '--no-edit', '--no-gpg-sign', '-n', '-minit')
Return code: 1
Expected return code: 0
Output:
On branch _pc_tmp
nothing to commit, working tree clean
Errors: (none)
Traceback (most recent call last):
File "/home/asottile/opt/venv/lib/python3.6/site-packages/pre_commit/error_handler.py", line 46, in error_handler
yield
File "/home/asottile/opt/venv/lib/python3.6/site-packages/pre_commit/main.py", line 295, in main
return try_repo(args)
File "/home/asottile/opt/venv/lib/python3.6/site-packages/pre_commit/commands/try_repo.py", line 48, in try_repo
repo, ref = _repo_ref(tempdir, args.repo, args.ref)
File "/home/asottile/opt/venv/lib/python3.6/site-packages/pre_commit/commands/try_repo.py", line 39, in _repo_ref
git.commit(repo=shadow)
File "/home/asottile/opt/venv/lib/python3.6/site-packages/pre_commit/git.py", line 143, in commit
cmd_output(*cmd, cwd=repo, env=env)
File "/home/asottile/opt/venv/lib/python3.6/site-packages/pre_commit/util.py", line 153, in cmd_output
returncode, cmd, retcode, output=(stdout, stderr),
pre_commit.util.CalledProcessError: Command: ('/usr/bin/git', 'commit', '--no-edit', '--no-gpg-sign', '-n', '-minit')
Return code: 1
Expected return code: 0
Output:
On branch _pc_tmp
nothing to commit, working tree clean
Errors: (none)
|
pre_commit.util.CalledProcessError
|
def normexe(orig):
def _error(msg):
raise ExecutableNotFoundError("Executable `{}` {}".format(orig, msg))
if os.sep not in orig and (not os.altsep or os.altsep not in orig):
exe = find_executable(orig)
if exe is None:
_error("not found")
return exe
elif not os.access(orig, os.X_OK):
_error("not found")
elif os.path.isdir(orig):
_error("is a directory")
else:
return orig
|
def normexe(orig_exe):
if os.sep not in orig_exe:
exe = find_executable(orig_exe)
if exe is None:
raise ExecutableNotFoundError(
"Executable `{}` not found".format(orig_exe),
)
return exe
else:
return orig_exe
|
https://github.com/pre-commit/pre-commit/issues/782
|
Bashate..................................................................An unexpected error has occurred: OSError: [Errno 2] No such file or directory
Check the log at /Users/ssbarnea/.cache/pre-commit/pre-commit.log
An unexpected error has occurred: OSError: [Errno 2] No such file or directory
Traceback (most recent call last):
File "/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/error_handler.py", line 47, in error_handler
yield
File "/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/main.py", line 258, in main
return run(runner, args)
File "/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/commands/run.py", line 270, in run
return _run_hooks(runner.config, repo_hooks, args, environ)
File "/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/commands/run.py", line 199, in _run_hooks
retval |= _run_single_hook(filenames, hook, repo, args, skips, cols)
File "/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/commands/run.py", line 110, in _run_single_hook
hook, tuple(filenames) if hook['pass_filenames'] else (),
File "/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/repository.py", line 207, in run_hook
return languages[language_name].run_hook(prefix, hook, file_args)
File "/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/languages/script.py", line 16, in run_hook
return xargs(cmd, file_args)
File "/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/xargs.py", line 63, in xargs
*run_cmd, encoding=None, retcode=None
File "/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/site-packages/pre_commit/util.py", line 167, in cmd_output
proc = subprocess.Popen(cmd, **popen_kwargs)
File "/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/subprocess.py", line 390, in __init__
errread, errwrite)
File "/Users/ssbarnea/.pyenv/versions/2.7.14/lib/python2.7/subprocess.py", line 1025, in _execute_child
raise child_exception
OSError: [Errno 2] No such file or directory
|
OSError
|
def make_local(self, deps):
def make_local_strategy(directory):
copy_tree_to_path(resource_filename("empty_template"), directory)
env = no_git_env()
name, email = "pre-commit", "asottile+pre-commit@umich.edu"
env["GIT_AUTHOR_NAME"] = env["GIT_COMMITTER_NAME"] = name
env["GIT_AUTHOR_EMAIL"] = env["GIT_COMMITTER_EMAIL"] = email
# initialize the git repository so it looks more like cloned repos
def _git_cmd(*args):
cmd_output("git", "-C", directory, *args, env=env)
_git_cmd("init", ".")
_git_cmd("config", "remote.origin.url", "<<unknown>>")
_git_cmd("add", ".")
_git_cmd("commit", "--no-edit", "--no-gpg-sign", "-n", "-minit")
return self._new_repo(
"local:{}".format(",".join(sorted(deps))),
C.LOCAL_REPO_VERSION,
make_local_strategy,
)
|
def make_local(self, deps):
def make_local_strategy(directory):
copy_tree_to_path(resource_filename("empty_template"), directory)
return self._new_repo(
"local:{}".format(",".join(sorted(deps))),
C.LOCAL_REPO_VERSION,
make_local_strategy,
)
|
https://github.com/pre-commit/pre-commit/issues/679
|
An unexpected error has occurred: CalledProcessError: Command: ('/usr/bin/git', 'config', 'remote.origin.url')
Return code: 1
Expected return code: 0
Output: (none)
Errors: (none)
Traceback (most recent call last):
File "/tmp/wat/venv/local/lib/python2.7/site-packages/pre_commit/error_handler.py", line 47, in error_handler
yield
File "/tmp/wat/venv/local/lib/python2.7/site-packages/pre_commit/main.py", line 259, in main
return run(runner, args)
File "/tmp/wat/venv/local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 256, in run
repo.require_installed()
File "/tmp/wat/venv/local/lib/python2.7/site-packages/pre_commit/repository.py", line 202, in require_installed
_install_all(self._venvs, self.repo_config['repo'], self.store)
File "/tmp/wat/venv/local/lib/python2.7/site-packages/pre_commit/repository.py", line 102, in _install_all
language.install_environment(cmd_runner, version, deps)
File "/tmp/wat/venv/local/lib/python2.7/site-packages/pre_commit/languages/golang.py", line 60, in install_environment
remote = git.get_remote_url(repo_cmd_runner.path())
File "/tmp/wat/venv/local/lib/python2.7/site-packages/pre_commit/git.py", line 41, in get_remote_url
ret = cmd_output('git', 'config', 'remote.origin.url', cwd=git_root)[1]
File "/tmp/wat/venv/local/lib/python2.7/site-packages/pre_commit/util.py", line 188, in cmd_output
returncode, cmd, retcode, output=(stdout, stderr),
CalledProcessError: Command: ('/usr/bin/git', 'config', 'remote.origin.url')
Return code: 1
Expected return code: 0
Output: (none)
Errors: (none)
|
CalledProcessError
|
def make_local_strategy(directory):
copy_tree_to_path(resource_filename("empty_template"), directory)
env = no_git_env()
name, email = "pre-commit", "asottile+pre-commit@umich.edu"
env["GIT_AUTHOR_NAME"] = env["GIT_COMMITTER_NAME"] = name
env["GIT_AUTHOR_EMAIL"] = env["GIT_COMMITTER_EMAIL"] = email
# initialize the git repository so it looks more like cloned repos
def _git_cmd(*args):
cmd_output("git", "-C", directory, *args, env=env)
_git_cmd("init", ".")
_git_cmd("config", "remote.origin.url", "<<unknown>>")
_git_cmd("add", ".")
_git_cmd("commit", "--no-edit", "--no-gpg-sign", "-n", "-minit")
|
def make_local_strategy(directory):
copy_tree_to_path(resource_filename("empty_template"), directory)
|
https://github.com/pre-commit/pre-commit/issues/679
|
An unexpected error has occurred: CalledProcessError: Command: ('/usr/bin/git', 'config', 'remote.origin.url')
Return code: 1
Expected return code: 0
Output: (none)
Errors: (none)
Traceback (most recent call last):
File "/tmp/wat/venv/local/lib/python2.7/site-packages/pre_commit/error_handler.py", line 47, in error_handler
yield
File "/tmp/wat/venv/local/lib/python2.7/site-packages/pre_commit/main.py", line 259, in main
return run(runner, args)
File "/tmp/wat/venv/local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 256, in run
repo.require_installed()
File "/tmp/wat/venv/local/lib/python2.7/site-packages/pre_commit/repository.py", line 202, in require_installed
_install_all(self._venvs, self.repo_config['repo'], self.store)
File "/tmp/wat/venv/local/lib/python2.7/site-packages/pre_commit/repository.py", line 102, in _install_all
language.install_environment(cmd_runner, version, deps)
File "/tmp/wat/venv/local/lib/python2.7/site-packages/pre_commit/languages/golang.py", line 60, in install_environment
remote = git.get_remote_url(repo_cmd_runner.path())
File "/tmp/wat/venv/local/lib/python2.7/site-packages/pre_commit/git.py", line 41, in get_remote_url
ret = cmd_output('git', 'config', 'remote.origin.url', cwd=git_root)[1]
File "/tmp/wat/venv/local/lib/python2.7/site-packages/pre_commit/util.py", line 188, in cmd_output
returncode, cmd, retcode, output=(stdout, stderr),
CalledProcessError: Command: ('/usr/bin/git', 'config', 'remote.origin.url')
Return code: 1
Expected return code: 0
Output: (none)
Errors: (none)
|
CalledProcessError
|
def staged_files_only(patch_dir):
"""Clear any unstaged changes from the git working directory inside this
context.
"""
# Determine if there are unstaged files
tree = cmd_output("git", "write-tree")[1].strip()
retcode, diff_stdout_binary, _ = cmd_output(
"git",
"diff-index",
"--ignore-submodules",
"--binary",
"--exit-code",
"--no-color",
"--no-ext-diff",
tree,
"--",
retcode=None,
encoding=None,
)
if retcode and diff_stdout_binary.strip():
patch_filename = "patch{}".format(int(time.time()))
patch_filename = os.path.join(patch_dir, patch_filename)
logger.warning("Unstaged files detected.")
logger.info(
"Stashing unstaged files to {}.".format(patch_filename),
)
# Save the current unstaged changes as a patch
mkdirp(patch_dir)
with io.open(patch_filename, "wb") as patch_file:
patch_file.write(diff_stdout_binary)
# Clear the working directory of unstaged changes
cmd_output("git", "checkout", "--", ".")
try:
yield
finally:
# Try to apply the patch we saved
try:
_git_apply(patch_filename)
except CalledProcessError:
logger.warning(
"Stashed changes conflicted with hook auto-fixes... "
"Rolling back fixes...",
)
# We failed to apply the patch, presumably due to fixes made
# by hooks.
# Roll back the changes made by hooks.
cmd_output("git", "checkout", "--", ".")
_git_apply(patch_filename)
logger.info("Restored changes from {}.".format(patch_filename))
else:
# There weren't any staged files so we don't need to do anything
# special
yield
|
def staged_files_only(patch_dir):
"""Clear any unstaged changes from the git working directory inside this
context.
"""
# Determine if there are unstaged files
tree = cmd_output("git", "write-tree")[1].strip()
retcode, diff_stdout_binary, _ = cmd_output(
"git",
"diff-index",
"--ignore-submodules",
"--binary",
"--exit-code",
"--no-color",
"--no-ext-diff",
tree,
"--",
retcode=None,
encoding=None,
)
if retcode and diff_stdout_binary.strip():
patch_filename = "patch{}".format(int(time.time()))
patch_filename = os.path.join(patch_dir, patch_filename)
logger.warning("Unstaged files detected.")
logger.info(
"Stashing unstaged files to {}.".format(patch_filename),
)
# Save the current unstaged changes as a patch
with io.open(patch_filename, "wb") as patch_file:
patch_file.write(diff_stdout_binary)
# Clear the working directory of unstaged changes
cmd_output("git", "checkout", "--", ".")
try:
yield
finally:
# Try to apply the patch we saved
try:
_git_apply(patch_filename)
except CalledProcessError:
logger.warning(
"Stashed changes conflicted with hook auto-fixes... "
"Rolling back fixes...",
)
# We failed to apply the patch, presumably due to fixes made
# by hooks.
# Roll back the changes made by hooks.
cmd_output("git", "checkout", "--", ".")
_git_apply(patch_filename)
logger.info("Restored changes from {}.".format(patch_filename))
else:
# There weren't any staged files so we don't need to do anything
# special
yield
|
https://github.com/pre-commit/pre-commit/issues/621
|
Traceback (most recent call last):
File "/home/asottile/workspace/pre-commit/pre_commit/error_handler.py", line 44, in error_handler
yield
File "/home/asottile/workspace/pre-commit/pre_commit/main.py", line 231, in main
return run(runner, args)
File "/home/asottile/workspace/pre-commit/pre_commit/commands/run.py", line 249, in run
with ctx:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/home/asottile/workspace/pre-commit/pre_commit/staged_files_only.py", line 46, in staged_files_only
with io.open(patch_filename, 'wb') as patch_file:
IOError: [Errno 2] No such file or directory: '/home/asottile/.cache/pre-commit/patch1505686307'
|
IOError
|
def staged_files_only(cmd_runner):
"""Clear any unstaged changes from the git working directory inside this
context.
Args:
cmd_runner - PrefixedCommandRunner
"""
# Determine if there are unstaged files
tree = cmd_runner.run(("git", "write-tree"))[1].strip()
retcode, diff_stdout_binary, _ = cmd_runner.run(
(
"git",
"diff-index",
"--ignore-submodules",
"--binary",
"--exit-code",
"--no-color",
"--no-ext-diff",
tree,
"--",
),
retcode=None,
encoding=None,
)
if retcode and diff_stdout_binary.strip():
patch_filename = cmd_runner.path("patch{}".format(int(time.time())))
logger.warning("Unstaged files detected.")
logger.info(
"Stashing unstaged files to {}.".format(patch_filename),
)
# Save the current unstaged changes as a patch
with io.open(patch_filename, "wb") as patch_file:
patch_file.write(diff_stdout_binary)
# Clear the working directory of unstaged changes
cmd_runner.run(("git", "checkout", "--", "."))
try:
yield
finally:
# Try to apply the patch we saved
try:
_git_apply(cmd_runner, patch_filename)
except CalledProcessError:
logger.warning(
"Stashed changes conflicted with hook auto-fixes... "
"Rolling back fixes...",
)
# We failed to apply the patch, presumably due to fixes made
# by hooks.
# Roll back the changes made by hooks.
cmd_runner.run(("git", "checkout", "--", "."))
_git_apply(cmd_runner, patch_filename)
logger.info("Restored changes from {}.".format(patch_filename))
else:
# There weren't any staged files so we don't need to do anything
# special
yield
|
def staged_files_only(cmd_runner):
"""Clear any unstaged changes from the git working directory inside this
context.
Args:
cmd_runner - PrefixedCommandRunner
"""
# Determine if there are unstaged files
tree = cmd_runner.run(("git", "write-tree"))[1].strip()
retcode, diff_stdout_binary, _ = cmd_runner.run(
(
"git",
"diff-index",
"--ignore-submodules",
"--binary",
"--exit-code",
"--no-color",
"--no-ext-diff",
tree,
"--",
),
retcode=None,
encoding=None,
)
if retcode and diff_stdout_binary.strip():
patch_filename = cmd_runner.path("patch{}".format(int(time.time())))
logger.warning("Unstaged files detected.")
logger.info(
"Stashing unstaged files to {}.".format(patch_filename),
)
# Save the current unstaged changes as a patch
with io.open(patch_filename, "wb") as patch_file:
patch_file.write(diff_stdout_binary)
# Clear the working directory of unstaged changes
cmd_runner.run(("git", "checkout", "--", "."))
try:
yield
finally:
# Try to apply the patch we saved
try:
cmd_runner.run(
("git", "apply", "--whitespace=nowarn", patch_filename),
encoding=None,
)
except CalledProcessError:
logger.warning(
"Stashed changes conflicted with hook auto-fixes... "
"Rolling back fixes...",
)
# We failed to apply the patch, presumably due to fixes made
# by hooks.
# Roll back the changes made by hooks.
cmd_runner.run(("git", "checkout", "--", "."))
cmd_runner.run(
("git", "apply", patch_filename, "--whitespace=nowarn"),
encoding=None,
)
logger.info("Restored changes from {}.".format(patch_filename))
else:
# There weren't any staged files so we don't need to do anything
# special
yield
|
https://github.com/pre-commit/pre-commit/issues/570
|
An unexpected error has occurred: CalledProcessError: Command: ('C:\\Program Files\\Git\\mingw64\\libexec\\git-core\\git.exe', 'apply', 'C:\\Users\\56929\\.pre-commit\\patch1501483011')
Return code: 1
Expected return code: 0
Output: (none)
Errors:
error: patch failed: svnchecker_stylelint_support/checks/Stylelint.py:20
error: svnchecker_stylelint_support/checks/Stylelint.py: patch does not apply
Traceback (most recent call last):
File "c:\python27\lib\site-packages\pre_commit\error_handler.py", line 48, in error_handler
yield
File "c:\python27\lib\site-packages\pre_commit\main.py", line 231, in main
return run(runner, args)
File "c:\python27\lib\site-packages\pre_commit\commands\run.py", line 273, in run
return _run_hooks(repo_hooks, args, environ)
File "c:\python27\lib\contextlib.py", line 24, in __exit__
self.gen.next()
File "c:\python27\lib\site-packages\pre_commit\staged_files_only.py", line 58, in staged_files_only
cmd_runner.run(('git', 'apply', patch_filename), encoding=None)
File "c:\python27\lib\site-packages\pre_commit\prefixed_command_runner.py", line 38, in run
return cmd_output(*replaced_cmd, __popen=self.__popen, **kwargs)
File "c:\python27\lib\site-packages\pre_commit\util.py", line 189, in cmd_output
returncode, cmd, retcode, output=(stdout, stderr),
CalledProcessError: Command: ('C:\\Program Files\\Git\\mingw64\\libexec\\git-core\\git.exe', 'apply', 'C:\\Users\\56929\\.pre-commit\\patch1501483011')
Return code: 1
Expected return code: 0
Output: (none)
Errors:
error: patch failed: svnchecker_stylelint_support/checks/Stylelint.py:20
error: svnchecker_stylelint_support/checks/Stylelint.py: patch does not apply
|
CalledProcessError
|
def _run_single_hook(hook, repo, args, skips, cols):
filenames = get_filenames(args, hook.get("files", ""), hook["exclude"])
if hook["id"] in skips:
output.write(
get_hook_message(
_hook_msg_start(hook, args.verbose),
end_msg=SKIPPED,
end_color=color.YELLOW,
use_color=args.color,
cols=cols,
)
)
return 0
elif not filenames and not hook["always_run"]:
output.write(
get_hook_message(
_hook_msg_start(hook, args.verbose),
postfix=NO_FILES,
end_msg=SKIPPED,
end_color=color.TURQUOISE,
use_color=args.color,
cols=cols,
)
)
return 0
# Print the hook and the dots first in case the hook takes hella long to
# run.
output.write(
get_hook_message(
_hook_msg_start(hook, args.verbose),
end_len=6,
cols=cols,
)
)
sys.stdout.flush()
diff_before = cmd_output("git", "diff", retcode=None, encoding=None)
retcode, stdout, stderr = repo.run_hook(
hook,
tuple(filenames) if hook["pass_filenames"] else (),
)
diff_after = cmd_output("git", "diff", retcode=None, encoding=None)
file_modifications = diff_before != diff_after
# If the hook makes changes, fail the commit
if file_modifications:
retcode = 1
if retcode:
retcode = 1
print_color = color.RED
pass_fail = "Failed"
else:
retcode = 0
print_color = color.GREEN
pass_fail = "Passed"
output.write_line(color.format_color(pass_fail, print_color, args.color))
if (stdout or stderr or file_modifications) and (retcode or args.verbose):
output.write_line("hookid: {}\n".format(hook["id"]))
# Print a message if failing due to file modifications
if file_modifications:
output.write("Files were modified by this hook.")
if stdout or stderr:
output.write_line(" Additional output:")
output.write_line()
for out in (stdout, stderr):
assert type(out) is bytes, type(out)
if out.strip():
output.write_line(out.strip(), logfile_name=hook["log_file"])
output.write_line()
return retcode
|
def _run_single_hook(hook, repo, args, skips, cols):
filenames = get_filenames(args, hook["files"], hook["exclude"])
if hook["id"] in skips:
output.write(
get_hook_message(
_hook_msg_start(hook, args.verbose),
end_msg=SKIPPED,
end_color=color.YELLOW,
use_color=args.color,
cols=cols,
)
)
return 0
elif not filenames and not hook["always_run"]:
output.write(
get_hook_message(
_hook_msg_start(hook, args.verbose),
postfix=NO_FILES,
end_msg=SKIPPED,
end_color=color.TURQUOISE,
use_color=args.color,
cols=cols,
)
)
return 0
# Print the hook and the dots first in case the hook takes hella long to
# run.
output.write(
get_hook_message(
_hook_msg_start(hook, args.verbose),
end_len=6,
cols=cols,
)
)
sys.stdout.flush()
diff_before = cmd_output("git", "diff", retcode=None, encoding=None)
retcode, stdout, stderr = repo.run_hook(
hook,
tuple(filenames) if hook["pass_filenames"] else (),
)
diff_after = cmd_output("git", "diff", retcode=None, encoding=None)
file_modifications = diff_before != diff_after
# If the hook makes changes, fail the commit
if file_modifications:
retcode = 1
if retcode:
retcode = 1
print_color = color.RED
pass_fail = "Failed"
else:
retcode = 0
print_color = color.GREEN
pass_fail = "Passed"
output.write_line(color.format_color(pass_fail, print_color, args.color))
if (stdout or stderr or file_modifications) and (retcode or args.verbose):
output.write_line("hookid: {}\n".format(hook["id"]))
# Print a message if failing due to file modifications
if file_modifications:
output.write("Files were modified by this hook.")
if stdout or stderr:
output.write_line(" Additional output:")
output.write_line()
for out in (stdout, stderr):
assert type(out) is bytes, type(out)
if out.strip():
output.write_line(out.strip(), logfile_name=hook["log_file"])
output.write_line()
return retcode
|
https://github.com/pre-commit/pre-commit/issues/533
|
$ cat ~/.pre-commit/pre-commit.log
An unexpected error has occurred: KeyError: u'files'
Traceback (most recent call last):
File "/tmp/foo/pre-commit/pre_commit/error_handler.py", line 48, in error_handler
yield
File "/tmp/foo/pre-commit/pre_commit/main.py", line 226, in main
return run(runner, args)
File "/tmp/foo/pre-commit/pre_commit/commands/run.py", line 235, in run
return _run_hooks(repo_hooks, args, environ)
File "/tmp/foo/pre-commit/pre_commit/commands/run.py", line 155, in _run_hooks
retval |= _run_single_hook(hook, repo, args, skips, cols)
File "/tmp/foo/pre-commit/pre_commit/commands/run.py", line 60, in _run_single_hook
filenames = get_filenames(args, hook['files'], hook['exclude'])
KeyError: u'files'
|
KeyError
|
def _run_single_hook(hook, repo, args, skips, cols):
filenames = get_filenames(args, hook.get("files", "^$"), hook["exclude"])
if hook["id"] in skips:
output.write(
get_hook_message(
_hook_msg_start(hook, args.verbose),
end_msg=SKIPPED,
end_color=color.YELLOW,
use_color=args.color,
cols=cols,
)
)
return 0
elif not filenames and not hook["always_run"]:
output.write(
get_hook_message(
_hook_msg_start(hook, args.verbose),
postfix=NO_FILES,
end_msg=SKIPPED,
end_color=color.TURQUOISE,
use_color=args.color,
cols=cols,
)
)
return 0
# Print the hook and the dots first in case the hook takes hella long to
# run.
output.write(
get_hook_message(
_hook_msg_start(hook, args.verbose),
end_len=6,
cols=cols,
)
)
sys.stdout.flush()
diff_before = cmd_output("git", "diff", retcode=None, encoding=None)
retcode, stdout, stderr = repo.run_hook(
hook,
tuple(filenames) if hook["pass_filenames"] else (),
)
diff_after = cmd_output("git", "diff", retcode=None, encoding=None)
file_modifications = diff_before != diff_after
# If the hook makes changes, fail the commit
if file_modifications:
retcode = 1
if retcode:
retcode = 1
print_color = color.RED
pass_fail = "Failed"
else:
retcode = 0
print_color = color.GREEN
pass_fail = "Passed"
output.write_line(color.format_color(pass_fail, print_color, args.color))
if (stdout or stderr or file_modifications) and (retcode or args.verbose):
output.write_line("hookid: {}\n".format(hook["id"]))
# Print a message if failing due to file modifications
if file_modifications:
output.write("Files were modified by this hook.")
if stdout or stderr:
output.write_line(" Additional output:")
output.write_line()
for out in (stdout, stderr):
assert type(out) is bytes, type(out)
if out.strip():
output.write_line(out.strip(), logfile_name=hook["log_file"])
output.write_line()
return retcode
|
def _run_single_hook(hook, repo, args, skips, cols):
filenames = get_filenames(args, hook.get("files", ""), hook["exclude"])
if hook["id"] in skips:
output.write(
get_hook_message(
_hook_msg_start(hook, args.verbose),
end_msg=SKIPPED,
end_color=color.YELLOW,
use_color=args.color,
cols=cols,
)
)
return 0
elif not filenames and not hook["always_run"]:
output.write(
get_hook_message(
_hook_msg_start(hook, args.verbose),
postfix=NO_FILES,
end_msg=SKIPPED,
end_color=color.TURQUOISE,
use_color=args.color,
cols=cols,
)
)
return 0
# Print the hook and the dots first in case the hook takes hella long to
# run.
output.write(
get_hook_message(
_hook_msg_start(hook, args.verbose),
end_len=6,
cols=cols,
)
)
sys.stdout.flush()
diff_before = cmd_output("git", "diff", retcode=None, encoding=None)
retcode, stdout, stderr = repo.run_hook(
hook,
tuple(filenames) if hook["pass_filenames"] else (),
)
diff_after = cmd_output("git", "diff", retcode=None, encoding=None)
file_modifications = diff_before != diff_after
# If the hook makes changes, fail the commit
if file_modifications:
retcode = 1
if retcode:
retcode = 1
print_color = color.RED
pass_fail = "Failed"
else:
retcode = 0
print_color = color.GREEN
pass_fail = "Passed"
output.write_line(color.format_color(pass_fail, print_color, args.color))
if (stdout or stderr or file_modifications) and (retcode or args.verbose):
output.write_line("hookid: {}\n".format(hook["id"]))
# Print a message if failing due to file modifications
if file_modifications:
output.write("Files were modified by this hook.")
if stdout or stderr:
output.write_line(" Additional output:")
output.write_line()
for out in (stdout, stderr):
assert type(out) is bytes, type(out)
if out.strip():
output.write_line(out.strip(), logfile_name=hook["log_file"])
output.write_line()
return retcode
|
https://github.com/pre-commit/pre-commit/issues/533
|
$ cat ~/.pre-commit/pre-commit.log
An unexpected error has occurred: KeyError: u'files'
Traceback (most recent call last):
File "/tmp/foo/pre-commit/pre_commit/error_handler.py", line 48, in error_handler
yield
File "/tmp/foo/pre-commit/pre_commit/main.py", line 226, in main
return run(runner, args)
File "/tmp/foo/pre-commit/pre_commit/commands/run.py", line 235, in run
return _run_hooks(repo_hooks, args, environ)
File "/tmp/foo/pre-commit/pre_commit/commands/run.py", line 155, in _run_hooks
retval |= _run_single_hook(hook, repo, args, skips, cols)
File "/tmp/foo/pre-commit/pre_commit/commands/run.py", line 60, in _run_single_hook
filenames = get_filenames(args, hook['files'], hook['exclude'])
KeyError: u'files'
|
KeyError
|
def install_environment(
repo_cmd_runner,
version="default",
additional_dependencies=(),
):
additional_dependencies = tuple(additional_dependencies)
directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
# Install a virtualenv
with clean_path_on_failure(repo_cmd_runner.path(directory)):
venv_cmd = [
sys.executable,
"-m",
"virtualenv",
"{{prefix}}{}".format(directory),
]
if version != "default":
venv_cmd.extend(["-p", norm_version(version)])
else:
venv_cmd.extend(["-p", os.path.realpath(sys.executable)])
repo_cmd_runner.run(venv_cmd)
with in_env(repo_cmd_runner, version):
helpers.run_setup_cmd(
repo_cmd_runner,
("pip", "install", ".") + additional_dependencies,
)
|
def install_environment(
repo_cmd_runner,
version="default",
additional_dependencies=(),
):
additional_dependencies = tuple(additional_dependencies)
directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
# Install a virtualenv
with clean_path_on_failure(repo_cmd_runner.path(directory)):
venv_cmd = [
sys.executable,
"-m",
"virtualenv",
"{{prefix}}{}".format(directory),
]
if version != "default":
venv_cmd.extend(["-p", norm_version(version)])
repo_cmd_runner.run(venv_cmd)
with in_env(repo_cmd_runner, version):
helpers.run_setup_cmd(
repo_cmd_runner,
("pip", "install", ".") + additional_dependencies,
)
|
https://github.com/pre-commit/pre-commit/issues/419
|
An unexpected error has occurred: CalledProcessError: Command: ('/Users/amcgregor/Projects/marrow/.venv/bin/python3', '-m', 'virtualenv', '/Users/amcgregor/.pre-commit/repofu57ylaa/py_env-default')
Return code: 100
Expected return code: 0
Output:
Using base prefix '/usr/local/bin/../../../Library/Frameworks/Python.framework/Versions/3.5'
New python executable in /Users/amcgregor/.pre-commit/repofu57ylaa/py_env-default/bin/python3
Also creating executable in /Users/amcgregor/.pre-commit/repofu57ylaa/py_env-default/bin/python
ERROR: The executable /Users/amcgregor/.pre-commit/repofu57ylaa/py_env-default/bin/python3 is not functioning
ERROR: It thinks sys.prefix is '/Library/Frameworks/Python.framework/Versions/3.5' (should be '/Users/amcgregor/.pre-commit/repofu57ylaa/py_env-default')
ERROR: virtualenv is not compatible with this system or executable
Errors: (none)
Traceback (most recent call last):
File "/Users/amcgregor/Projects/marrow/.venv/lib/python3.5/site-packages/pre_commit/error_handler.py", line 47, in error_handler
yield
File "/Users/amcgregor/Projects/marrow/.venv/lib/python3.5/site-packages/pre_commit/main.py", line 157, in main
return run(runner, args)
File "/Users/amcgregor/Projects/marrow/.venv/lib/python3.5/site-packages/pre_commit/commands/run.py", line 212, in run
return _run_hooks(repo_hooks, args, write, environ)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/contextlib.py", line 77, in __exit__
self.gen.throw(type, value, traceback)
File "/Users/amcgregor/Projects/marrow/.venv/lib/python3.5/site-packages/pre_commit/staged_files_only.py", line 63, in staged_files_only
yield
File "/Users/amcgregor/Projects/marrow/.venv/lib/python3.5/site-packages/pre_commit/commands/run.py", line 195, in run
repo_hooks = list(get_repo_hooks(runner))
File "/Users/amcgregor/Projects/marrow/.venv/lib/python3.5/site-packages/pre_commit/commands/run.py", line 141, in get_repo_hooks
for repo in runner.repositories:
File "/Users/amcgregor/Projects/marrow/.venv/lib/python3.5/site-packages/cached_property.py", line 26, in __get__
value = obj.__dict__[self.func.__name__] = self.func(obj)
File "/Users/amcgregor/Projects/marrow/.venv/lib/python3.5/site-packages/pre_commit/runner.py", line 47, in repositories
repository.require_installed()
File "/Users/amcgregor/Projects/marrow/.venv/lib/python3.5/site-packages/pre_commit/repository.py", line 117, in require_installed
self.install()
File "/Users/amcgregor/Projects/marrow/.venv/lib/python3.5/site-packages/pre_commit/repository.py", line 187, in install
self.additional_dependencies[language_name][language_version],
File "/Users/amcgregor/Projects/marrow/.venv/lib/python3.5/site-packages/pre_commit/languages/python.py", line 78, in install_environment
('pip', 'install', '.') + additional_dependencies,
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/contextlib.py", line 77, in __exit__
self.gen.throw(type, value, traceback)
File "/Users/amcgregor/Projects/marrow/.venv/lib/python3.5/site-packages/pre_commit/util.py", line 58, in clean_path_on_failure
yield
File "/Users/amcgregor/Projects/marrow/.venv/lib/python3.5/site-packages/pre_commit/languages/python.py", line 74, in install_environment
repo_cmd_runner.run(venv_cmd)
File "/Users/amcgregor/Projects/marrow/.venv/lib/python3.5/site-packages/pre_commit/prefixed_command_runner.py", line 39, in run
return cmd_output(*replaced_cmd, __popen=self.__popen, **kwargs)
File "/Users/amcgregor/Projects/marrow/.venv/lib/python3.5/site-packages/pre_commit/util.py", line 189, in cmd_output
returncode, cmd, retcode, output=(stdout, stderr),
pre_commit.util.CalledProcessError: Command: ('/Users/amcgregor/Projects/marrow/.venv/bin/python3', '-m', 'virtualenv', '/Users/amcgregor/.pre-commit/repofu57ylaa/py_env-default')
Return code: 100
Expected return code: 0
Output:
Using base prefix '/usr/local/bin/../../../Library/Frameworks/Python.framework/Versions/3.5'
New python executable in /Users/amcgregor/.pre-commit/repofu57ylaa/py_env-default/bin/python3
Also creating executable in /Users/amcgregor/.pre-commit/repofu57ylaa/py_env-default/bin/python
ERROR: The executable /Users/amcgregor/.pre-commit/repofu57ylaa/py_env-default/bin/python3 is not functioning
ERROR: It thinks sys.prefix is '/Library/Frameworks/Python.framework/Versions/3.5' (should be '/Users/amcgregor/.pre-commit/repofu57ylaa/py_env-default')
ERROR: virtualenv is not compatible with this system or executable
Errors: (none)
|
pre_commit.util.CalledProcessError
|
def run(self, cmd, **kwargs):
self._create_path_if_not_exists()
replaced_cmd = [part.replace("{prefix}", self.prefix_dir) for part in cmd]
return cmd_output(*replaced_cmd, __popen=self.__popen, **kwargs)
|
def run(self, cmd, **kwargs):
self._create_path_if_not_exists()
replaced_cmd = _replace_cmd(cmd, prefix=self.prefix_dir)
return cmd_output(*replaced_cmd, __popen=self.__popen, **kwargs)
|
https://github.com/pre-commit/pre-commit/issues/314
|
An unexpected error has occurred: IndexError: tuple index out of range
Traceback (most recent call last):
File "/usr/local/lib/python2.7/site-packages/pre_commit/error_handler.py", line 36, in error_handler
yield
File "/usr/local/lib/python2.7/site-packages/pre_commit/main.py", line 150, in main
return run(runner, args)
File "/usr/local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 212, in run
return _run_hooks(repo_hooks, args, write, environ)
File "/usr/local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 136, in _run_hooks
retval |= _run_single_hook(hook, repo, args, write, skips)
File "/usr/local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 89, in _run_single_hook
retcode, stdout, stderr = repo.run_hook(hook, filenames)
File "/usr/local/lib/python2.7/site-packages/pre_commit/repository.py", line 145, in run_hook
self.cmd_runner, hook, file_args,
File "/usr/local/lib/python2.7/site-packages/pre_commit/languages/script.py", line 23, in run_hook
encoding=None,
File "/usr/local/lib/python2.7/site-packages/pre_commit/prefixed_command_runner.py", line 40, in run
replaced_cmd = _replace_cmd(cmd, prefix=self.prefix_dir)
File "/usr/local/lib/python2.7/site-packages/pre_commit/prefixed_command_runner.py", line 11, in _replace_cmd
return [part.format(**kwargs) for part in cmd]
IndexError: tuple index out of range
|
IndexError
|
def _run_single_hook(hook, repo, args, write, skips=frozenset()):
filenames = get_filenames(args, hook["files"], hook["exclude"])
if hook["id"] in skips:
_print_user_skipped(hook, write, args)
return 0
elif not filenames:
_print_no_files_skipped(hook, write, args)
return 0
# Print the hook and the dots first in case the hook takes hella long to
# run.
write(get_hook_message(_hook_msg_start(hook, args.verbose), end_len=6))
sys.stdout.flush()
retcode, stdout, stderr = repo.run_hook(hook, filenames)
if retcode != hook["expected_return_value"]:
retcode = 1
print_color = color.RED
pass_fail = "Failed"
else:
retcode = 0
print_color = color.GREEN
pass_fail = "Passed"
write(color.format_color(pass_fail, print_color, args.color) + "\n")
if (stdout or stderr) and (retcode or args.verbose):
write("hookid: {0}\n".format(hook["id"]))
write("\n")
for output in (stdout, stderr):
assert type(output) is bytes, type(output)
if output.strip():
write(output.strip() + b"\n")
write("\n")
return retcode
|
def _run_single_hook(hook, repo, args, write, skips=frozenset()):
filenames = get_filenames(args, hook["files"], hook["exclude"])
if hook["id"] in skips:
_print_user_skipped(hook, write, args)
return 0
elif not filenames:
_print_no_files_skipped(hook, write, args)
return 0
# Print the hook and the dots first in case the hook takes hella long to
# run.
write(get_hook_message(_hook_msg_start(hook, args.verbose), end_len=6))
sys.stdout.flush()
retcode, stdout, stderr = repo.run_hook(hook, filenames)
if retcode != hook["expected_return_value"]:
retcode = 1
print_color = color.RED
pass_fail = "Failed"
else:
retcode = 0
print_color = color.GREEN
pass_fail = "Passed"
write(color.format_color(pass_fail, print_color, args.color) + "\n")
if (stdout or stderr) and (retcode or args.verbose):
write("hookid: {0}\n".format(hook["id"]))
write("\n")
for output in (stdout, stderr):
if output.strip():
write(output.strip() + "\n")
write("\n")
return retcode
|
https://github.com/pre-commit/pre-commit/issues/245
|
An unexpected error has occurred: UnicodeDecodeError: 'utf8' codec can't decode byte 0xbb in position 23: invalid start byte
Traceback (most recent call last):
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/error_handler.py", line 34, in error_handler
yield
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/main.py", line 144, in main
return run(runner, args)
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 185, in run
return _run_hooks(repo_hooks, args, write, environ)
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 117, in _run_hooks
retval |= _run_single_hook(hook, repo, args, write, skips)
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 88, in _run_single_hook
retcode, stdout, stderr = repo.run_hook(hook, filenames)
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/repository.py", line 123, in run_hook
self.cmd_runner, hook, file_args,
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/languages/script.py", line 19, in run_hook
retcode=None,
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/prefixed_command_runner.py", line 41, in run
return cmd_output(*replaced_cmd, __popen=self.__popen, **kwargs)
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/util.py", line 153, in cmd_output
stdout = stdout.decode(encoding)
File "/usr/lib/python2.7/encodings/utf_8.py", line 16, in decode
return codecs.utf_8_decode(input, errors, True)
UnicodeDecodeError: 'utf8' codec can't decode byte 0xbb in position 23: invalid start byte
|
UnicodeDecodeError
|
def run_hook(env, hook, file_args):
quoted_args = [pipes.quote(arg) for arg in hook["args"]]
return env.run(
# Use -s 4000 (slightly less than posix mandated minimum)
# This is to prevent "xargs: ... Bad file number" on windows
" ".join(["xargs", "-0", "-s4000", hook["entry"]] + quoted_args),
stdin=file_args_to_stdin(file_args),
retcode=None,
encoding=None,
)
|
def run_hook(env, hook, file_args):
quoted_args = [pipes.quote(arg) for arg in hook["args"]]
return env.run(
# Use -s 4000 (slightly less than posix mandated minimum)
# This is to prevent "xargs: ... Bad file number" on windows
" ".join(["xargs", "-0", "-s4000", hook["entry"]] + quoted_args),
stdin=file_args_to_stdin(file_args),
retcode=None,
)
|
https://github.com/pre-commit/pre-commit/issues/245
|
An unexpected error has occurred: UnicodeDecodeError: 'utf8' codec can't decode byte 0xbb in position 23: invalid start byte
Traceback (most recent call last):
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/error_handler.py", line 34, in error_handler
yield
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/main.py", line 144, in main
return run(runner, args)
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 185, in run
return _run_hooks(repo_hooks, args, write, environ)
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 117, in _run_hooks
retval |= _run_single_hook(hook, repo, args, write, skips)
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 88, in _run_single_hook
retcode, stdout, stderr = repo.run_hook(hook, filenames)
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/repository.py", line 123, in run_hook
self.cmd_runner, hook, file_args,
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/languages/script.py", line 19, in run_hook
retcode=None,
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/prefixed_command_runner.py", line 41, in run
return cmd_output(*replaced_cmd, __popen=self.__popen, **kwargs)
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/util.py", line 153, in cmd_output
stdout = stdout.decode(encoding)
File "/usr/lib/python2.7/encodings/utf_8.py", line 16, in decode
return codecs.utf_8_decode(input, errors, True)
UnicodeDecodeError: 'utf8' codec can't decode byte 0xbb in position 23: invalid start byte
|
UnicodeDecodeError
|
def run_hook(repo_cmd_runner, hook, file_args):
# For PCRE the entry is the regular expression to match
return repo_cmd_runner.run(
[
"xargs",
"-0",
"sh",
"-c",
# Grep usually returns 0 for matches, and nonzero for non-matches
# so we flip it here.
"! grep -H -n -P {0} $@".format(shell_escape(hook["entry"])),
"--",
],
stdin=file_args_to_stdin(file_args),
retcode=None,
encoding=None,
)
|
def run_hook(repo_cmd_runner, hook, file_args):
# For PCRE the entry is the regular expression to match
return repo_cmd_runner.run(
[
"xargs",
"-0",
"sh",
"-c",
# Grep usually returns 0 for matches, and nonzero for non-matches
# so we flip it here.
"! grep -H -n -P {0} $@".format(shell_escape(hook["entry"])),
"--",
],
stdin=file_args_to_stdin(file_args),
retcode=None,
)
|
https://github.com/pre-commit/pre-commit/issues/245
|
An unexpected error has occurred: UnicodeDecodeError: 'utf8' codec can't decode byte 0xbb in position 23: invalid start byte
Traceback (most recent call last):
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/error_handler.py", line 34, in error_handler
yield
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/main.py", line 144, in main
return run(runner, args)
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 185, in run
return _run_hooks(repo_hooks, args, write, environ)
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 117, in _run_hooks
retval |= _run_single_hook(hook, repo, args, write, skips)
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 88, in _run_single_hook
retcode, stdout, stderr = repo.run_hook(hook, filenames)
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/repository.py", line 123, in run_hook
self.cmd_runner, hook, file_args,
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/languages/script.py", line 19, in run_hook
retcode=None,
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/prefixed_command_runner.py", line 41, in run
return cmd_output(*replaced_cmd, __popen=self.__popen, **kwargs)
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/util.py", line 153, in cmd_output
stdout = stdout.decode(encoding)
File "/usr/lib/python2.7/encodings/utf_8.py", line 16, in decode
return codecs.utf_8_decode(input, errors, True)
UnicodeDecodeError: 'utf8' codec can't decode byte 0xbb in position 23: invalid start byte
|
UnicodeDecodeError
|
def run_hook(repo_cmd_runner, hook, file_args):
return repo_cmd_runner.run(
["xargs", "-0", "{{prefix}}{0}".format(hook["entry"])] + hook["args"],
# TODO: this is duplicated in pre_commit/languages/helpers.py
stdin=file_args_to_stdin(file_args),
retcode=None,
encoding=None,
)
|
def run_hook(repo_cmd_runner, hook, file_args):
return repo_cmd_runner.run(
["xargs", "-0", "{{prefix}}{0}".format(hook["entry"])] + hook["args"],
# TODO: this is duplicated in pre_commit/languages/helpers.py
stdin=file_args_to_stdin(file_args),
retcode=None,
)
|
https://github.com/pre-commit/pre-commit/issues/245
|
An unexpected error has occurred: UnicodeDecodeError: 'utf8' codec can't decode byte 0xbb in position 23: invalid start byte
Traceback (most recent call last):
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/error_handler.py", line 34, in error_handler
yield
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/main.py", line 144, in main
return run(runner, args)
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 185, in run
return _run_hooks(repo_hooks, args, write, environ)
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 117, in _run_hooks
retval |= _run_single_hook(hook, repo, args, write, skips)
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 88, in _run_single_hook
retcode, stdout, stderr = repo.run_hook(hook, filenames)
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/repository.py", line 123, in run_hook
self.cmd_runner, hook, file_args,
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/languages/script.py", line 19, in run_hook
retcode=None,
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/prefixed_command_runner.py", line 41, in run
return cmd_output(*replaced_cmd, __popen=self.__popen, **kwargs)
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/util.py", line 153, in cmd_output
stdout = stdout.decode(encoding)
File "/usr/lib/python2.7/encodings/utf_8.py", line 16, in decode
return codecs.utf_8_decode(input, errors, True)
UnicodeDecodeError: 'utf8' codec can't decode byte 0xbb in position 23: invalid start byte
|
UnicodeDecodeError
|
def run_hook(repo_cmd_runner, hook, file_args):
return repo_cmd_runner.run(
["xargs", "-0"] + shlex.split(hook["entry"]) + hook["args"],
stdin=file_args_to_stdin(file_args),
retcode=None,
encoding=None,
)
|
def run_hook(repo_cmd_runner, hook, file_args):
return repo_cmd_runner.run(
["xargs", "-0"] + shlex.split(hook["entry"]) + hook["args"],
stdin=file_args_to_stdin(file_args),
retcode=None,
)
|
https://github.com/pre-commit/pre-commit/issues/245
|
An unexpected error has occurred: UnicodeDecodeError: 'utf8' codec can't decode byte 0xbb in position 23: invalid start byte
Traceback (most recent call last):
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/error_handler.py", line 34, in error_handler
yield
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/main.py", line 144, in main
return run(runner, args)
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 185, in run
return _run_hooks(repo_hooks, args, write, environ)
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 117, in _run_hooks
retval |= _run_single_hook(hook, repo, args, write, skips)
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 88, in _run_single_hook
retcode, stdout, stderr = repo.run_hook(hook, filenames)
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/repository.py", line 123, in run_hook
self.cmd_runner, hook, file_args,
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/languages/script.py", line 19, in run_hook
retcode=None,
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/prefixed_command_runner.py", line 41, in run
return cmd_output(*replaced_cmd, __popen=self.__popen, **kwargs)
File "/home/lucas/.local/lib/python2.7/site-packages/pre_commit/util.py", line 153, in cmd_output
stdout = stdout.decode(encoding)
File "/usr/lib/python2.7/encodings/utf_8.py", line 16, in decode
return codecs.utf_8_decode(input, errors, True)
UnicodeDecodeError: 'utf8' codec can't decode byte 0xbb in position 23: invalid start byte
|
UnicodeDecodeError
|
def autoupdate(runner):
"""Auto-update the pre-commit config to the latest versions of repos."""
retv = 0
output_configs = []
changed = False
input_configs = load_config(
runner.config_file_path,
load_strategy=ordered_load,
)
for repo_config in input_configs:
if is_local_hooks(repo_config):
continue
sys.stdout.write("Updating {0}...".format(repo_config["repo"]))
sys.stdout.flush()
try:
new_repo_config = _update_repository(repo_config, runner)
except RepositoryCannotBeUpdatedError as error:
print(error.args[0])
output_configs.append(repo_config)
retv = 1
continue
if new_repo_config["sha"] != repo_config["sha"]:
changed = True
print(
"updating {0} -> {1}.".format(
repo_config["sha"],
new_repo_config["sha"],
)
)
output_configs.append(new_repo_config)
else:
print("already up to date.")
output_configs.append(repo_config)
if changed:
with open(runner.config_file_path, "w") as config_file:
config_file.write(
ordered_dump(
remove_defaults(output_configs, CONFIG_JSON_SCHEMA),
**C.YAML_DUMP_KWARGS,
)
)
return retv
|
def autoupdate(runner):
"""Auto-update the pre-commit config to the latest versions of repos."""
retv = 0
output_configs = []
changed = False
input_configs = load_config(
runner.config_file_path,
load_strategy=ordered_load,
)
for repo_config in input_configs:
sys.stdout.write("Updating {0}...".format(repo_config["repo"]))
sys.stdout.flush()
try:
new_repo_config = _update_repository(repo_config, runner)
except RepositoryCannotBeUpdatedError as error:
print(error.args[0])
output_configs.append(repo_config)
retv = 1
continue
if new_repo_config["sha"] != repo_config["sha"]:
changed = True
print(
"updating {0} -> {1}.".format(
repo_config["sha"],
new_repo_config["sha"],
)
)
output_configs.append(new_repo_config)
else:
print("already up to date.")
output_configs.append(repo_config)
if changed:
with open(runner.config_file_path, "w") as config_file:
config_file.write(
ordered_dump(
remove_defaults(output_configs, CONFIG_JSON_SCHEMA),
**C.YAML_DUMP_KWARGS,
)
)
return retv
|
https://github.com/pre-commit/pre-commit/issues/238
|
$ pre-commit autoupdate
Updating git@github.com:pre-commit/pre-commit-hooks...updating 9ce45609a92f648c87b42207410386fd69a5d1e5 -> cf550fcab3f12015f8676b8278b30e1a5bc10e70.
Updating git@github.com:pre-commit/pre-commit...updating 4352d45451296934bc17494073b82bcacca3205c -> 1c46446427ab0dfa6293221426b855420533ef8d.
Updating git@github.com:asottile/reorder_python_imports...updating aeda21eb7df6af8c9f6cd990abb086375c71c953 -> 3d86483455ab5bd06cc1069fdd5ac57be5463f10.
Updating local...An unexpected error has occurred: AttributeError: 'NoneType' object has no attribute 'repo_path'
Check the log at ~/.pre-commit/pre-commit.log
(venv-pre_commit)asottile@work:~/workspace/pre-commit$ cat ~/.pre-commit/pre-commit.log
An unexpected error has occurred: AttributeError: 'NoneType' object has no attribute 'repo_path'
Traceback (most recent call last):
File "/home/asottile/workspace/pre-commit/pre_commit/error_handler.py", line 34, in error_handler
yield
File "/home/asottile/workspace/pre-commit/pre_commit/main.py", line 142, in main
return autoupdate(runner)
File "/home/asottile/workspace/pre-commit/pre_commit/commands/autoupdate.py", line 73, in autoupdate
new_repo_config = _update_repository(repo_config, runner)
File "/home/asottile/workspace/pre-commit/pre_commit/commands/autoupdate.py", line 33, in _update_repository
with cwd(repo.repo_path_getter.repo_path):
AttributeError: 'NoneType' object has no attribute 'repo_path'
(venv-pre_commit)asottile@work:~/workspace/pre-commit$ git diff
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 397ee72..20393a7 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -20,3 +20,10 @@
sha: aeda21eb7df6af8c9f6cd990abb086375c71c953
hooks:
- id: reorder-python-imports
+- repo: local
+ hooks:
+ - id: herp
+ name: Herp
+ entry: echo derp
+ language: system
+ files: ^$
|
AttributeError
|
def __init__(self, repo_config):
super(LocalRepository, self).__init__(repo_config, None)
|
def __init__(self, repo_config, repo_path_getter=None):
repo_path_getter = None
super(LocalRepository, self).__init__(repo_config, repo_path_getter)
|
https://github.com/pre-commit/pre-commit/issues/238
|
$ pre-commit autoupdate
Updating git@github.com:pre-commit/pre-commit-hooks...updating 9ce45609a92f648c87b42207410386fd69a5d1e5 -> cf550fcab3f12015f8676b8278b30e1a5bc10e70.
Updating git@github.com:pre-commit/pre-commit...updating 4352d45451296934bc17494073b82bcacca3205c -> 1c46446427ab0dfa6293221426b855420533ef8d.
Updating git@github.com:asottile/reorder_python_imports...updating aeda21eb7df6af8c9f6cd990abb086375c71c953 -> 3d86483455ab5bd06cc1069fdd5ac57be5463f10.
Updating local...An unexpected error has occurred: AttributeError: 'NoneType' object has no attribute 'repo_path'
Check the log at ~/.pre-commit/pre-commit.log
(venv-pre_commit)asottile@work:~/workspace/pre-commit$ cat ~/.pre-commit/pre-commit.log
An unexpected error has occurred: AttributeError: 'NoneType' object has no attribute 'repo_path'
Traceback (most recent call last):
File "/home/asottile/workspace/pre-commit/pre_commit/error_handler.py", line 34, in error_handler
yield
File "/home/asottile/workspace/pre-commit/pre_commit/main.py", line 142, in main
return autoupdate(runner)
File "/home/asottile/workspace/pre-commit/pre_commit/commands/autoupdate.py", line 73, in autoupdate
new_repo_config = _update_repository(repo_config, runner)
File "/home/asottile/workspace/pre-commit/pre_commit/commands/autoupdate.py", line 33, in _update_repository
with cwd(repo.repo_path_getter.repo_path):
AttributeError: 'NoneType' object has no attribute 'repo_path'
(venv-pre_commit)asottile@work:~/workspace/pre-commit$ git diff
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 397ee72..20393a7 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -20,3 +20,10 @@
sha: aeda21eb7df6af8c9f6cd990abb086375c71c953
hooks:
- id: reorder-python-imports
+- repo: local
+ hooks:
+ - id: herp
+ name: Herp
+ entry: echo derp
+ language: system
+ files: ^$
|
AttributeError
|
def main(argv=None):
argv = argv if argv is not None else sys.argv[1:]
argv = [five.to_text(arg) for arg in argv]
parser = argparse.ArgumentParser()
# http://stackoverflow.com/a/8521644/812183
parser.add_argument(
"-V",
"--version",
action="version",
version="%(prog)s {0}".format(
pkg_resources.get_distribution("pre-commit").version
),
)
subparsers = parser.add_subparsers(dest="command")
install_parser = subparsers.add_parser(
"install",
help="Install the pre-commit script.",
)
install_parser.add_argument(
"-f",
"--overwrite",
action="store_true",
help="Overwrite existing hooks / remove migration mode.",
)
install_parser.add_argument(
"--install-hooks",
action="store_true",
help=(
"Whether to install hook environments for all environments "
"in the config file."
),
)
install_parser.add_argument(
"-t",
"--hook-type",
choices=("pre-commit", "pre-push"),
default="pre-commit",
)
uninstall_parser = subparsers.add_parser(
"uninstall",
help="Uninstall the pre-commit script.",
)
uninstall_parser.add_argument(
"-t",
"--hook-type",
choices=("pre-commit", "pre-push"),
default="pre-commit",
)
subparsers.add_parser("clean", help="Clean out pre-commit files.")
subparsers.add_parser(
"autoupdate",
help="Auto-update pre-commit config to the latest repos' versions.",
)
run_parser = subparsers.add_parser("run", help="Run hooks.")
run_parser.add_argument("hook", nargs="?", help="A single hook-id to run")
run_parser.add_argument(
"--color",
default="auto",
type=color.use_color,
help="Whether to use color in output. Defaults to `auto`",
)
run_parser.add_argument(
"--no-stash",
default=False,
action="store_true",
help="Use this option to prevent auto stashing of unstaged files.",
)
run_parser.add_argument(
"--verbose",
"-v",
action="store_true",
default=False,
)
run_parser.add_argument(
"--origin",
"-o",
help='The origin branch"s commit_id when using `git push`',
)
run_parser.add_argument(
"--source",
"-s",
help='The remote branch"s commit_id when using `git push`',
)
run_parser.add_argument(
"--allow-unstaged-config",
default=False,
action="store_true",
help="Allow an unstaged config to be present. Note that this will"
"be stashed before parsing unless --no-stash is specified",
)
run_mutex_group = run_parser.add_mutually_exclusive_group(required=False)
run_mutex_group.add_argument(
"--all-files",
"-a",
action="store_true",
default=False,
help="Run on all the files in the repo. Implies --no-stash.",
)
run_mutex_group.add_argument(
"--files",
nargs="*",
help="Specific filenames to run hooks on.",
)
help = subparsers.add_parser("help", help="Show help for a specific command.")
help.add_argument("help_cmd", nargs="?", help="Command to show help for.")
# Argparse doesn't really provide a way to use a `default` subparser
if len(argv) == 0:
argv = ["run"]
args = parser.parse_args(argv)
if args.command == "help":
if args.help_cmd:
parser.parse_args([args.help_cmd, "--help"])
else:
parser.parse_args(["--help"])
with error_handler():
runner = Runner.create()
if args.command == "install":
return install(
runner,
overwrite=args.overwrite,
hooks=args.install_hooks,
hook_type=args.hook_type,
)
elif args.command == "uninstall":
return uninstall(runner, hook_type=args.hook_type)
elif args.command == "clean":
return clean(runner)
elif args.command == "autoupdate":
return autoupdate(runner)
elif args.command == "run":
return run(runner, args)
else:
raise NotImplementedError(
"Command {0} not implemented.".format(args.command)
)
raise AssertionError(
"Command {0} failed to exit with a returncode".format(args.command)
)
|
def main(argv=None):
argv = argv if argv is not None else sys.argv[1:]
parser = argparse.ArgumentParser()
# http://stackoverflow.com/a/8521644/812183
parser.add_argument(
"-V",
"--version",
action="version",
version="%(prog)s {0}".format(
pkg_resources.get_distribution("pre-commit").version
),
)
subparsers = parser.add_subparsers(dest="command")
install_parser = subparsers.add_parser(
"install",
help="Install the pre-commit script.",
)
install_parser.add_argument(
"-f",
"--overwrite",
action="store_true",
help="Overwrite existing hooks / remove migration mode.",
)
install_parser.add_argument(
"--install-hooks",
action="store_true",
help=(
"Whether to install hook environments for all environments "
"in the config file."
),
)
install_parser.add_argument(
"-t",
"--hook-type",
choices=("pre-commit", "pre-push"),
default="pre-commit",
)
uninstall_parser = subparsers.add_parser(
"uninstall",
help="Uninstall the pre-commit script.",
)
uninstall_parser.add_argument(
"-t",
"--hook-type",
choices=("pre-commit", "pre-push"),
default="pre-commit",
)
subparsers.add_parser("clean", help="Clean out pre-commit files.")
subparsers.add_parser(
"autoupdate",
help="Auto-update pre-commit config to the latest repos' versions.",
)
run_parser = subparsers.add_parser("run", help="Run hooks.")
run_parser.add_argument("hook", nargs="?", help="A single hook-id to run")
run_parser.add_argument(
"--color",
default="auto",
type=color.use_color,
help="Whether to use color in output. Defaults to `auto`",
)
run_parser.add_argument(
"--no-stash",
default=False,
action="store_true",
help="Use this option to prevent auto stashing of unstaged files.",
)
run_parser.add_argument(
"--verbose",
"-v",
action="store_true",
default=False,
)
run_parser.add_argument(
"--origin",
"-o",
help='The origin branch"s commit_id when using `git push`',
)
run_parser.add_argument(
"--source",
"-s",
help='The remote branch"s commit_id when using `git push`',
)
run_parser.add_argument(
"--allow-unstaged-config",
default=False,
action="store_true",
help="Allow an unstaged config to be present. Note that this will"
"be stashed before parsing unless --no-stash is specified",
)
run_mutex_group = run_parser.add_mutually_exclusive_group(required=False)
run_mutex_group.add_argument(
"--all-files",
"-a",
action="store_true",
default=False,
help="Run on all the files in the repo. Implies --no-stash.",
)
run_mutex_group.add_argument(
"--files",
nargs="*",
help="Specific filenames to run hooks on.",
)
help = subparsers.add_parser("help", help="Show help for a specific command.")
help.add_argument("help_cmd", nargs="?", help="Command to show help for.")
# Argparse doesn't really provide a way to use a `default` subparser
if len(argv) == 0:
argv = ["run"]
args = parser.parse_args(argv)
if args.command == "help":
if args.help_cmd:
parser.parse_args([args.help_cmd, "--help"])
else:
parser.parse_args(["--help"])
with error_handler():
runner = Runner.create()
if args.command == "install":
return install(
runner,
overwrite=args.overwrite,
hooks=args.install_hooks,
hook_type=args.hook_type,
)
elif args.command == "uninstall":
return uninstall(runner, hook_type=args.hook_type)
elif args.command == "clean":
return clean(runner)
elif args.command == "autoupdate":
return autoupdate(runner)
elif args.command == "run":
return run(runner, args)
else:
raise NotImplementedError(
"Command {0} not implemented.".format(args.command)
)
raise AssertionError(
"Command {0} failed to exit with a returncode".format(args.command)
)
|
https://github.com/pre-commit/pre-commit/issues/207
|
$ pre-commit run ☃
An unexpected error has occurred: UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 0: ordinal not in range(128)
Check the log at ~/.pre-commit/pre-commit.log
$ cat ~/.pre-commit/pre-commit.log
An unexpected error has occurred: UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 0: ordinal not in range(128)
Traceback (most recent call last):
File "/home/asottile/workspace/pre-commit/pre_commit/error_handler.py", line 34, in error_handler
yield
File "/home/asottile/workspace/pre-commit/pre_commit/main.py", line 129, in main
return run(runner, args)
File "/home/asottile/workspace/pre-commit/pre_commit/commands/run.py", line 163, in run
return _run_hook(runner, args, write=write)
File "/home/asottile/workspace/pre-commit/pre_commit/commands/run.py", line 133, in _run_hook
write('No hook with id `{0}`\n'.format(hook_id))
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 0: ordinal not in range(128)
|
UnicodeDecodeError
|
def sys_stdout_write_wrapper(s, stream=stdout_byte_stream):
stream.write(five.to_bytes(s))
|
def sys_stdout_write_wrapper(s, stream=sys.stdout):
"""Python 2.6 chokes on unicode being passed to sys.stdout.write.
This is an adapter because PY2 is ok with bytes and PY3 requires text.
"""
assert type(s) is five.text
if five.PY2: # pragma: no cover (PY2)
s = s.encode("UTF-8")
stream.write(s)
|
https://github.com/pre-commit/pre-commit/issues/207
|
$ pre-commit run ☃
An unexpected error has occurred: UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 0: ordinal not in range(128)
Check the log at ~/.pre-commit/pre-commit.log
$ cat ~/.pre-commit/pre-commit.log
An unexpected error has occurred: UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 0: ordinal not in range(128)
Traceback (most recent call last):
File "/home/asottile/workspace/pre-commit/pre_commit/error_handler.py", line 34, in error_handler
yield
File "/home/asottile/workspace/pre-commit/pre_commit/main.py", line 129, in main
return run(runner, args)
File "/home/asottile/workspace/pre-commit/pre_commit/commands/run.py", line 163, in run
return _run_hook(runner, args, write=write)
File "/home/asottile/workspace/pre-commit/pre_commit/commands/run.py", line 133, in _run_hook
write('No hook with id `{0}`\n'.format(hook_id))
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 0: ordinal not in range(128)
|
UnicodeDecodeError
|
def run(runner, args, write=sys_stdout_write_wrapper, environ=os.environ):
# Set up our logging handler
logger.addHandler(LoggingHandler(args.color, write=write))
logger.setLevel(logging.INFO)
# Check if we have unresolved merge conflict files and fail fast.
if _has_unmerged_paths(runner):
logger.error("Unmerged files. Resolve before committing.")
return 1
if args.no_stash or args.all_files:
ctx = noop_context()
else:
ctx = staged_files_only(runner.cmd_runner)
with ctx:
if args.hook:
return _run_hook(runner, args, write=write)
else:
return _run_hooks(runner, args, write=write, environ=environ)
|
def run(runner, args, write=sys.stdout.write, environ=os.environ):
# Set up our logging handler
logger.addHandler(LoggingHandler(args.color, write=write))
logger.setLevel(logging.INFO)
# Check if we have unresolved merge conflict files and fail fast.
if _has_unmerged_paths(runner):
logger.error("Unmerged files. Resolve before committing.")
return 1
if args.no_stash or args.all_files:
ctx = noop_context()
else:
ctx = staged_files_only(runner.cmd_runner)
with ctx:
if args.hook:
return _run_hook(runner, args, write=write)
else:
return _run_hooks(runner, args, write=write, environ=environ)
|
https://github.com/pre-commit/pre-commit/issues/161
|
$ pre-commit run fixmyjs
fixmyjs............................................................................................................................................................................................Failed
hookid: fixmyjs
Traceback (most recent call last):
File "virtualenv_run/bin/pre-commit", line 14, in <module>
sys.exit(main())
File "virtualenv_run/lib/python2.6/site-packages/pre_commit/util.py", line 41, in wrapper
return func(argv)
File "virtualenv_run/lib/python2.6/site-packages/pre_commit/main.py", line 99, in main
return run(runner, args)
File "virtualenv_run/lib/python2.6/site-packages/pre_commit/commands/run.py", line 144, in run
return _run_hook(runner, args, write=write)
File "virtualenv_run/lib/python2.6/site-packages/pre_commit/commands/run.py", line 116, in _run_hook
return _run_single_hook(runner, repo, hook_id, args, write=write)
File "virtualenv_run/lib/python2.6/site-packages/pre_commit/commands/run.py", line 91, in _run_single_hook
write(output.strip() + '\n')
UnicodeEncodeError: 'ascii' codec can't encode character u'\u2713' in position 0: ordinal not in range(128)
|
UnicodeEncodeError
|
def staged_files_only(cmd_runner):
"""Clear any unstaged changes from the git working directory inside this
context.
Args:
cmd_runner - PrefixedCommandRunner
"""
# Determine if there are unstaged files
retcode, diff_stdout, _ = cmd_runner.run(
["git", "diff", "--ignore-submodules", "--binary", "--exit-code"],
retcode=None,
)
if retcode and diff_stdout.strip():
patch_filename = cmd_runner.path("patch{0}".format(int(time.time())))
logger.warning("Unstaged files detected.")
logger.info(
"Stashing unstaged files to {0}.".format(patch_filename),
)
# Save the current unstaged changes as a patch
with io.open(patch_filename, "w", encoding="utf-8") as patch_file:
patch_file.write(diff_stdout)
# Clear the working directory of unstaged changes
cmd_runner.run(["git", "checkout", "--", "."])
try:
yield
finally:
# Try to apply the patch we saved
try:
cmd_runner.run(["git", "apply", patch_filename])
except CalledProcessError:
logger.warning(
"Stashed changes conflicted with hook auto-fixes... "
"Rolling back fixes..."
)
# We failed to apply the patch, presumably due to fixes made
# by hooks.
# Roll back the changes made by hooks.
cmd_runner.run(["git", "checkout", "--", "."])
cmd_runner.run(["git", "apply", patch_filename])
logger.info("Restored changes from {0}.".format(patch_filename))
else:
# There weren't any staged files so we don't need to do anything
# special
yield
|
def staged_files_only(cmd_runner):
"""Clear any unstaged changes from the git working directory inside this
context.
Args:
cmd_runner - PrefixedCommandRunner
"""
# Determine if there are unstaged files
retcode, diff_stdout, _ = cmd_runner.run(
["git", "diff", "--ignore-submodules", "--binary", "--exit-code"],
retcode=None,
)
if retcode and diff_stdout.strip():
patch_filename = cmd_runner.path("patch{0}".format(int(time.time())))
logger.warning("Unstaged files detected.")
logger.info(
"Stashing unstaged files to {0}.".format(patch_filename),
)
# Save the current unstaged changes as a patch
with open(patch_filename, "w") as patch_file:
patch_file.write(diff_stdout)
# Clear the working directory of unstaged changes
cmd_runner.run(["git", "checkout", "--", "."])
try:
yield
finally:
# Try to apply the patch we saved
try:
cmd_runner.run(["git", "apply", patch_filename])
except CalledProcessError:
logger.warning(
"Stashed changes conflicted with hook auto-fixes... "
"Rolling back fixes..."
)
# We failed to apply the patch, presumably due to fixes made
# by hooks.
# Roll back the changes made by hooks.
cmd_runner.run(["git", "checkout", "--", "."])
cmd_runner.run(["git", "apply", patch_filename])
logger.info("Restored changes from {0}.".format(patch_filename))
else:
# There weren't any staged files so we don't need to do anything
# special
yield
|
https://github.com/pre-commit/pre-commit/issues/85
|
$ pre-commit
[WARNING] Unstaged files detected.
[INFO] Stashing unstaged files to .../.pre-commit-files/patch1397853050.
Traceback (most recent call last):
File ".../bin/pre-commit", line 9, in <module>
load_entry_point('pre-commit==0.0.0', 'console_scripts', 'pre-commit')()
File ".../lib/python2.6/site-packages/pre_commit/util.py", line 52, in wrapper
return func(argv)
File ".../lib/python2.6/site-packages/pre_commit/run.py", line 59, in run
return commands.run(runner, args)
File ".../lib/python2.6/site-packages/pre_commit/commands.py", line 254, in run
with ctx:
File "/usr/lib64/python2.6/contextlib.py", line 16, in __enter__
return self.gen.next()
File ".../lib/python2.6/site-packages/pre_commit/staged_files_only.py", line 32, in staged_files_only
patch_file.write(diff_stdout)
UnicodeEncodeError: 'ascii' codec can't encode character u'\xfc' in position 3795: ordinal not in range(128)
|
UnicodeEncodeError
|
def run(runner, args, write=sys.stdout.write):
# Set up our logging handler
logger.addHandler(LoggingHandler(args.color, write=write))
logger.setLevel(logging.INFO)
# Check if we have unresolved merge conflict files and fail fast.
if _has_unmerged_paths(runner):
logger.error("Unmerged files. Resolve before committing.")
return 1
if args.no_stash or args.all_files:
ctx = noop_context()
else:
ctx = staged_files_only(runner.cmd_runner)
with ctx:
if args.hook:
return _run_hook(runner, args.hook, args, write=write)
else:
return _run_hooks(runner, args, write=write)
|
def run(runner, args, write=sys.stdout.write):
# Set up our logging handler
logger.addHandler(LoggingHandler(args.color, write=write))
logger.setLevel(logging.INFO)
if args.no_stash or args.all_files:
ctx = noop_context()
else:
ctx = staged_files_only(runner.cmd_runner)
with ctx:
if args.hook:
return _run_hook(runner, args.hook, args, write=write)
else:
return _run_hooks(runner, args, write=write)
|
https://github.com/pre-commit/pre-commit/issues/82
|
$ git diff --exit-code
diff --cc foo.txt
index 8ff26e7,c148433..0000000
--- a/foo.txt
+++ b/foo.txt
@@@ -1,4 -1,5 +1,11 @@@
asdf
++<<<<<<< HEAD
+fdsa
+yeah
+yeah
++=======
+ asdf
+ asdf
+ asdf
+
++>>>>>>> derp
diff --git a/git_code_debt/generate.py b/git_code_debt/generate.py
index 12ceec6..967506e 100644
--- a/git_code_debt/generate.py
+++ b/git_code_debt/generate.py
@@ -12,6 +12,7 @@ from git_code_debt.logic import get_previous_sha
from git_code_debt.logic import insert_metric_values
from git_code_debt.repo_parser import RepoParser
+
def get_metrics(diff, metric_parsers):
def get_all_metrics(file_diff_stats):
for metric_parser_cls in metric_parsers:
(py_env)[anthony@anthony-VirtualBox git-code-debt (herp|MERGING)]$ echo $?
1
(py_env)[anthony@anthony-VirtualBox git-code-debt (herp|MERGING)]$ pre-commit
[WARNING] Unstaged files detected.
[INFO] Stashing unstaged files to /tmp/git-code-debt/.pre-commit-files/patch1397455577.
Traceback (most recent call last):
File "/tmp/git-code-debt/py_env/bin/pre-commit", line 9, in <module>
load_entry_point('pre-commit==0.0.0', 'console_scripts', 'pre-commit')()
File "/tmp/git-code-debt/py_env/local/lib/python2.7/site-packages/pre_commit/util.py", line 52, in wrapper
return func(argv)
File "/tmp/git-code-debt/py_env/local/lib/python2.7/site-packages/pre_commit/run.py", line 59, in run
return commands.run(runner, args)
File "/tmp/git-code-debt/py_env/local/lib/python2.7/site-packages/pre_commit/commands.py", line 242, in run
with ctx:
File "/usr/lib/python2.7/contextlib.py", line 17, in __enter__
return self.gen.next()
File "/tmp/git-code-debt/py_env/local/lib/python2.7/site-packages/pre_commit/staged_files_only.py", line 35, in staged_files_only
cmd_runner.run(['git', 'checkout', '--', '.'])
File "/tmp/git-code-debt/py_env/local/lib/python2.7/site-packages/pre_commit/prefixed_command_runner.py", line 77, in run
returncode, replaced_cmd, retcode, output=(stdout, stderr),
pre_commit.prefixed_command_runner.CalledProcessError: Command: ['git', 'checkout', '--', '.']
Return code: 1
Expected return code: 0
Output: (u'', u"error: path 'foo.txt' is unmerged\n")
(py_env)[anthony@anthony-VirtualBox git-code-debt (herp|MERGING)]$
|
pre_commit.prefixed_command_runner.CalledProcessError
|
def staged_files_only(cmd_runner):
"""Clear any unstaged changes from the git working directory inside this
context.
Args:
cmd_runner - PrefixedCommandRunner
"""
# Determine if there are unstaged files
retcode, diff_stdout, _ = cmd_runner.run(
["git", "diff", "--ignore-submodules", "--binary", "--exit-code"],
retcode=None,
)
if retcode and diff_stdout.strip():
patch_filename = cmd_runner.path("patch{0}".format(int(time.time())))
logger.warning("Unstaged files detected.")
logger.info(
"Stashing unstaged files to {0}.".format(patch_filename),
)
# Save the current unstaged changes as a patch
with open(patch_filename, "w") as patch_file:
patch_file.write(diff_stdout)
# Clear the working directory of unstaged changes
cmd_runner.run(["git", "checkout", "--", "."])
try:
yield
finally:
# Try to apply the patch we saved
try:
cmd_runner.run(["git", "apply", patch_filename])
except CalledProcessError:
logger.warning(
"Stashed changes conflicted with hook auto-fixes... "
"Rolling back fixes..."
)
# We failed to apply the patch, presumably due to fixes made
# by hooks.
# Roll back the changes made by hooks.
cmd_runner.run(["git", "checkout", "--", "."])
cmd_runner.run(["git", "apply", patch_filename])
logger.info("Restored changes from {0}.".format(patch_filename))
else:
# There weren't any staged files so we don't need to do anything
# special
yield
|
def staged_files_only(cmd_runner):
"""Clear any unstaged changes from the git working directory inside this
context.
Args:
cmd_runner - PrefixedCommandRunner
"""
# Determine if there are unstaged files
retcode, _, _ = cmd_runner.run(
["git", "diff-files", "--quiet"],
retcode=None,
)
if retcode:
patch_filename = cmd_runner.path("patch{0}".format(int(time.time())))
logger.warning("Unstaged files detected.")
logger.info(
"Stashing unstaged files to {0}.".format(patch_filename),
)
# Save the current unstaged changes as a patch
with open(patch_filename, "w") as patch_file:
cmd_runner.run(["git", "diff", "--binary"], stdout=patch_file)
# Clear the working directory of unstaged changes
cmd_runner.run(["git", "checkout", "--", "."])
try:
yield
finally:
# Try to apply the patch we saved
try:
cmd_runner.run(["git", "apply", patch_filename])
except CalledProcessError:
logger.warning(
"Stashed changes conflicted with hook auto-fixes... "
"Rolling back fixes..."
)
# We failed to apply the patch, presumably due to fixes made
# by hooks.
# Roll back the changes made by hooks.
cmd_runner.run(["git", "checkout", "--", "."])
cmd_runner.run(["git", "apply", patch_filename])
logger.info("Restored changes from {0}.".format(patch_filename))
else:
# There weren't any staged files so we don't need to do anything
# special
yield
|
https://github.com/pre-commit/pre-commit/issues/76
|
$ pre-commit
[WARNING] Unstaged files detected.
Stashing unstaged files to /home/anthony/workspace/pre-commit/.pre-commit-files/patch1397370090.
Trim Trailing Whitespace............................................Passed
Fix End of Files....................................................Passed
Check Yaml..........................................................Passed
Debug Statements (Python)...........................................Passed
Tests should end in _test.py........................................Passed
Pyflakes............................................................Passed
Validate Pre-Commit Config..........................................Passed
Validate Pre-Commit Manifest........................................Passed
[WARNING] Stashed changes conflicted with hook auto-fixes... Rolling back fixes...
Traceback (most recent call last):
File "/home/anthony/workspace/pre-commit/venv-pre_commit/bin/pre-commit", line 9, in <module>
load_entry_point('pre-commit==0.0.0', 'console_scripts', 'pre-commit')()
File "/home/anthony/workspace/pre-commit/pre_commit/util.py", line 52, in wrapper
return func(argv)
File "/home/anthony/workspace/pre-commit/pre_commit/run.py", line 143, in run
return _run(runner, args)
File "/home/anthony/workspace/pre-commit/pre_commit/run.py", line 95, in _run
return run_hooks(runner, args)
File "/usr/lib/python2.7/contextlib.py", line 24, in __exit__
self.gen.next()
File "/home/anthony/workspace/pre-commit/pre_commit/staged_files_only.py", line 51, in staged_files_only
cmd_runner.run(['git', 'apply', patch_filename])
File "/home/anthony/workspace/pre-commit/pre_commit/prefixed_command_runner.py", line 67, in run
returncode, replaced_cmd, retcode, output=(stdout, stderr),
pre_commit.prefixed_command_runner.CalledProcessError: Command: ['git', 'apply', '/home/anthony/workspace/pre-commit/.pre-commit-files/patch1397370090']
Return code: 128
Expected return code: 0
Output: ('', 'fatal: unrecognized input\n')
$ git status
# On branch rebuild_venv
# Changes to be committed:
# (use "git reset HEAD <file>..." to unstage)
#
# modified: .gitignore
# modified: Makefile
#
|
pre_commit.prefixed_command_runner.CalledProcessError
|
def thumbnail(self, size, resample=BICUBIC, reducing_gap=2.0):
"""
Make this image into a thumbnail. This method modifies the
image to contain a thumbnail version of itself, no larger than
the given size. This method calculates an appropriate thumbnail
size to preserve the aspect of the image, calls the
:py:meth:`~PIL.Image.Image.draft` method to configure the file reader
(where applicable), and finally resizes the image.
Note that this function modifies the :py:class:`~PIL.Image.Image`
object in place. If you need to use the full resolution image as well,
apply this method to a :py:meth:`~PIL.Image.Image.copy` of the original
image.
:param size: Requested size.
:param resample: Optional resampling filter. This can be one
of :py:attr:`PIL.Image.NEAREST`, :py:attr:`PIL.Image.BILINEAR`,
:py:attr:`PIL.Image.BICUBIC`, or :py:attr:`PIL.Image.LANCZOS`.
If omitted, it defaults to :py:attr:`PIL.Image.BICUBIC`.
(was :py:attr:`PIL.Image.NEAREST` prior to version 2.5.0).
See: :ref:`concept-filters`.
:param reducing_gap: Apply optimization by resizing the image
in two steps. First, reducing the image by integer times
using :py:meth:`~PIL.Image.Image.reduce` or
:py:meth:`~PIL.Image.Image.draft` for JPEG images.
Second, resizing using regular resampling. The last step
changes size no less than by ``reducing_gap`` times.
``reducing_gap`` may be None (no first step is performed)
or should be greater than 1.0. The bigger ``reducing_gap``,
the closer the result to the fair resampling.
The smaller ``reducing_gap``, the faster resizing.
With ``reducing_gap`` greater or equal to 3.0, the result is
indistinguishable from fair resampling in most cases.
The default value is 2.0 (very close to fair resampling
while still being faster in many cases).
:returns: None
"""
x, y = map(math.floor, size)
if x >= self.width and y >= self.height:
return
def round_aspect(number, key):
return max(min(math.floor(number), math.ceil(number), key=key), 1)
# preserve aspect ratio
aspect = self.width / self.height
if x / y >= aspect:
x = round_aspect(y * aspect, key=lambda n: abs(aspect - n / y))
else:
y = round_aspect(x / aspect, key=lambda n: 0 if n == 0 else abs(aspect - x / n))
size = (x, y)
box = None
if reducing_gap is not None:
res = self.draft(None, (size[0] * reducing_gap, size[1] * reducing_gap))
if res is not None:
box = res[1]
if self.size != size:
im = self.resize(size, resample, box=box, reducing_gap=reducing_gap)
self.im = im.im
self._size = size
self.mode = self.im.mode
self.readonly = 0
self.pyaccess = None
|
def thumbnail(self, size, resample=BICUBIC, reducing_gap=2.0):
"""
Make this image into a thumbnail. This method modifies the
image to contain a thumbnail version of itself, no larger than
the given size. This method calculates an appropriate thumbnail
size to preserve the aspect of the image, calls the
:py:meth:`~PIL.Image.Image.draft` method to configure the file reader
(where applicable), and finally resizes the image.
Note that this function modifies the :py:class:`~PIL.Image.Image`
object in place. If you need to use the full resolution image as well,
apply this method to a :py:meth:`~PIL.Image.Image.copy` of the original
image.
:param size: Requested size.
:param resample: Optional resampling filter. This can be one
of :py:attr:`PIL.Image.NEAREST`, :py:attr:`PIL.Image.BILINEAR`,
:py:attr:`PIL.Image.BICUBIC`, or :py:attr:`PIL.Image.LANCZOS`.
If omitted, it defaults to :py:attr:`PIL.Image.BICUBIC`.
(was :py:attr:`PIL.Image.NEAREST` prior to version 2.5.0).
See: :ref:`concept-filters`.
:param reducing_gap: Apply optimization by resizing the image
in two steps. First, reducing the image by integer times
using :py:meth:`~PIL.Image.Image.reduce` or
:py:meth:`~PIL.Image.Image.draft` for JPEG images.
Second, resizing using regular resampling. The last step
changes size no less than by ``reducing_gap`` times.
``reducing_gap`` may be None (no first step is performed)
or should be greater than 1.0. The bigger ``reducing_gap``,
the closer the result to the fair resampling.
The smaller ``reducing_gap``, the faster resizing.
With ``reducing_gap`` greater or equal to 3.0, the result is
indistinguishable from fair resampling in most cases.
The default value is 2.0 (very close to fair resampling
while still being faster in many cases).
:returns: None
"""
x, y = map(math.floor, size)
if x >= self.width and y >= self.height:
return
def round_aspect(number, key):
return max(min(math.floor(number), math.ceil(number), key=key), 1)
# preserve aspect ratio
aspect = self.width / self.height
if x / y >= aspect:
x = round_aspect(y * aspect, key=lambda n: abs(aspect - n / y))
else:
y = round_aspect(x / aspect, key=lambda n: abs(aspect - x / n))
size = (x, y)
box = None
if reducing_gap is not None:
res = self.draft(None, (size[0] * reducing_gap, size[1] * reducing_gap))
if res is not None:
box = res[1]
if self.size != size:
im = self.resize(size, resample, box=box, reducing_gap=reducing_gap)
self.im = im.im
self._size = size
self.mode = self.im.mode
self.readonly = 0
self.pyaccess = None
|
https://github.com/python-pillow/Pillow/issues/4624
|
Traceback (most recent call last):
File "<interactive input>", line 1, in <module>
File "C:\Python38\lib\site-packages\PIL\Image.py", line 2279, in thumbnail
y = round_aspect(x / aspect, key=lambda n: abs(aspect - x / n))
File "C:\Python38\lib\site-packages\PIL\Image.py", line 2272, in round_aspect
return max(min(math.floor(number), math.ceil(number), key=key), 1)
File "C:\Python38\lib\site-packages\PIL\Image.py", line 2279, in <lambda>
y = round_aspect(x / aspect, key=lambda n: abs(aspect - x / n))
ZeroDivisionError: division by zero
|
ZeroDivisionError
|
def _open(self):
if self.fp.read(8) != _MAGIC:
raise SyntaxError("not a PNG file")
self.__fp = self.fp
self.__frame = 0
#
# Parse headers up to the first IDAT or fDAT chunk
self.png = PngStream(self.fp)
while True:
#
# get next chunk
cid, pos, length = self.png.read()
try:
s = self.png.call(cid, pos, length)
except EOFError:
break
except AttributeError:
logger.debug("%r %s %s (unknown)", cid, pos, length)
s = ImageFile._safe_read(self.fp, length)
self.png.crc(cid, s)
#
# Copy relevant attributes from the PngStream. An alternative
# would be to let the PngStream class modify these attributes
# directly, but that introduces circular references which are
# difficult to break if things go wrong in the decoder...
# (believe me, I've tried ;-)
self.mode = self.png.im_mode
self._size = self.png.im_size
self.info = self.png.im_info
self._text = None
self.tile = self.png.im_tile
self.custom_mimetype = self.png.im_custom_mimetype
self.n_frames = self.png.im_n_frames or 1
self.default_image = self.info.get("default_image", False)
if self.png.im_palette:
rawmode, data = self.png.im_palette
self.palette = ImagePalette.raw(rawmode, data)
if cid == b"fdAT":
self.__prepare_idat = length - 4
else:
self.__prepare_idat = length # used by load_prepare()
if self.png.im_n_frames is not None:
self._close_exclusive_fp_after_loading = False
self.png.save_rewind()
self.__rewind_idat = self.__prepare_idat
self.__rewind = self.__fp.tell()
if self.default_image:
# IDAT chunk contains default image and not first animation frame
self.n_frames += 1
self._seek(0)
self.is_animated = self.n_frames > 1
|
def _open(self):
if self.fp.read(8) != _MAGIC:
raise SyntaxError("not a PNG file")
self.__fp = self.fp
self.__frame = 0
#
# Parse headers up to the first IDAT or fDAT chunk
self.png = PngStream(self.fp)
while True:
#
# get next chunk
cid, pos, length = self.png.read()
try:
s = self.png.call(cid, pos, length)
except EOFError:
break
except AttributeError:
logger.debug("%r %s %s (unknown)", cid, pos, length)
s = ImageFile._safe_read(self.fp, length)
self.png.crc(cid, s)
#
# Copy relevant attributes from the PngStream. An alternative
# would be to let the PngStream class modify these attributes
# directly, but that introduces circular references which are
# difficult to break if things go wrong in the decoder...
# (believe me, I've tried ;-)
self.mode = self.png.im_mode
self._size = self.png.im_size
self.info = self.png.im_info
self._text = None
self.tile = self.png.im_tile
self.custom_mimetype = self.png.im_custom_mimetype
self._n_frames = self.png.im_n_frames
self.default_image = self.info.get("default_image", False)
if self.png.im_palette:
rawmode, data = self.png.im_palette
self.palette = ImagePalette.raw(rawmode, data)
if cid == b"fdAT":
self.__prepare_idat = length - 4
else:
self.__prepare_idat = length # used by load_prepare()
if self._n_frames is not None:
self._close_exclusive_fp_after_loading = False
self.png.save_rewind()
self.__rewind_idat = self.__prepare_idat
self.__rewind = self.__fp.tell()
if self.default_image:
# IDAT chunk contains default image and not first animation frame
self._n_frames += 1
self._seek(0)
|
https://github.com/python-pillow/Pillow/issues/4518
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/opt/miniconda/envs/sunpy/lib/python3.8/site-packages/skimage/data/__init__.py", line 109, in camera
return _load("camera.png")
File "/opt/miniconda/envs/sunpy/lib/python3.8/site-packages/skimage/data/__init__.py", line 96, in _load
return imread(_os.path.join(data_dir, f), plugin='pil', as_gray=as_gray)
File "/opt/miniconda/envs/sunpy/lib/python3.8/site-packages/skimage/io/_io.py", line 48, in imread
img = call_plugin('imread', fname, plugin=plugin, **plugin_args)
File "/opt/miniconda/envs/sunpy/lib/python3.8/site-packages/skimage/io/manage_plugins.py", line 210, in call_plugin
return func(*args, **kwargs)
File "/opt/miniconda/envs/sunpy/lib/python3.8/site-packages/skimage/io/_plugins/pil_plugin.py", line 36, in imread
return pil_to_ndarray(im, dtype=dtype, img_num=img_num)
File "/opt/miniconda/envs/sunpy/lib/python3.8/site-packages/skimage/io/_plugins/pil_plugin.py", line 66, in pil_to_ndarray
image.seek(i)
File "/opt/miniconda/envs/sunpy/lib/python3.8/site-packages/PIL/PngImagePlugin.py", line 748, in seek
self._seek(f)
File "/opt/miniconda/envs/sunpy/lib/python3.8/site-packages/PIL/PngImagePlugin.py", line 791, in _seek
cid, pos, length = self.png.read()
AttributeError: 'NoneType' object has no attribute 'read'
|
AttributeError
|
def fit(image, size, method=Image.NEAREST, bleed=0.0, centering=(0.5, 0.5)):
"""
Returns a sized and cropped version of the image, cropped to the
requested aspect ratio and size.
This function was contributed by Kevin Cazabon.
:param image: The image to size and crop.
:param size: The requested output size in pixels, given as a
(width, height) tuple.
:param method: What resampling method to use. Default is
:py:attr:`PIL.Image.NEAREST`.
:param bleed: Remove a border around the outside of the image from all
four edges. The value is a decimal percentage (use 0.01 for
one percent). The default value is 0 (no border).
Cannot be greater than or equal to 0.5.
:param centering: Control the cropping position. Use (0.5, 0.5) for
center cropping (e.g. if cropping the width, take 50% off
of the left side, and therefore 50% off the right side).
(0.0, 0.0) will crop from the top left corner (i.e. if
cropping the width, take all of the crop off of the right
side, and if cropping the height, take all of it off the
bottom). (1.0, 0.0) will crop from the bottom left
corner, etc. (i.e. if cropping the width, take all of the
crop off the left side, and if cropping the height take
none from the top, and therefore all off the bottom).
:return: An image.
"""
# by Kevin Cazabon, Feb 17/2000
# kevin@cazabon.com
# http://www.cazabon.com
# ensure centering is mutable
centering = list(centering)
if not 0.0 <= centering[0] <= 1.0:
centering[0] = 0.5
if not 0.0 <= centering[1] <= 1.0:
centering[1] = 0.5
if not 0.0 <= bleed < 0.5:
bleed = 0.0
# calculate the area to use for resizing and cropping, subtracting
# the 'bleed' around the edges
# number of pixels to trim off on Top and Bottom, Left and Right
bleed_pixels = (bleed * image.size[0], bleed * image.size[1])
live_size = (
image.size[0] - bleed_pixels[0] * 2,
image.size[1] - bleed_pixels[1] * 2,
)
# calculate the aspect ratio of the live_size
live_size_ratio = float(live_size[0]) / live_size[1]
# calculate the aspect ratio of the output image
output_ratio = float(size[0]) / size[1]
# figure out if the sides or top/bottom will be cropped off
if live_size_ratio == output_ratio:
# live_size is already the needed ratio
crop_width = live_size[0]
crop_height = live_size[1]
elif live_size_ratio >= output_ratio:
# live_size is wider than what's needed, crop the sides
crop_width = output_ratio * live_size[1]
crop_height = live_size[1]
else:
# live_size is taller than what's needed, crop the top and bottom
crop_width = live_size[0]
crop_height = live_size[0] / output_ratio
# make the crop
crop_left = bleed_pixels[0] + (live_size[0] - crop_width) * centering[0]
crop_top = bleed_pixels[1] + (live_size[1] - crop_height) * centering[1]
crop = (crop_left, crop_top, crop_left + crop_width, crop_top + crop_height)
# resize the image and return it
return image.resize(size, method, box=crop)
|
def fit(image, size, method=Image.NEAREST, bleed=0.0, centering=(0.5, 0.5)):
"""
Returns a sized and cropped version of the image, cropped to the
requested aspect ratio and size.
This function was contributed by Kevin Cazabon.
:param image: The image to size and crop.
:param size: The requested output size in pixels, given as a
(width, height) tuple.
:param method: What resampling method to use. Default is
:py:attr:`PIL.Image.NEAREST`.
:param bleed: Remove a border around the outside of the image from all
four edges. The value is a decimal percentage (use 0.01 for
one percent). The default value is 0 (no border).
Cannot be greater than or equal to 0.5.
:param centering: Control the cropping position. Use (0.5, 0.5) for
center cropping (e.g. if cropping the width, take 50% off
of the left side, and therefore 50% off the right side).
(0.0, 0.0) will crop from the top left corner (i.e. if
cropping the width, take all of the crop off of the right
side, and if cropping the height, take all of it off the
bottom). (1.0, 0.0) will crop from the bottom left
corner, etc. (i.e. if cropping the width, take all of the
crop off the left side, and if cropping the height take
none from the top, and therefore all off the bottom).
:return: An image.
"""
# by Kevin Cazabon, Feb 17/2000
# kevin@cazabon.com
# http://www.cazabon.com
# ensure centering is mutable
centering = list(centering)
if not 0.0 <= centering[0] <= 1.0:
centering[0] = 0.5
if not 0.0 <= centering[1] <= 1.0:
centering[1] = 0.5
if not 0.0 <= bleed < 0.5:
bleed = 0.0
# calculate the area to use for resizing and cropping, subtracting
# the 'bleed' around the edges
# number of pixels to trim off on Top and Bottom, Left and Right
bleed_pixels = (bleed * image.size[0], bleed * image.size[1])
live_size = (
image.size[0] - bleed_pixels[0] * 2,
image.size[1] - bleed_pixels[1] * 2,
)
# calculate the aspect ratio of the live_size
live_size_ratio = float(live_size[0]) / live_size[1]
# calculate the aspect ratio of the output image
output_ratio = float(size[0]) / size[1]
# figure out if the sides or top/bottom will be cropped off
if live_size_ratio >= output_ratio:
# live_size is wider than what's needed, crop the sides
crop_width = output_ratio * live_size[1]
crop_height = live_size[1]
else:
# live_size is taller than what's needed, crop the top and bottom
crop_width = live_size[0]
crop_height = live_size[0] / output_ratio
# make the crop
crop_left = bleed_pixels[0] + (live_size[0] - crop_width) * centering[0]
crop_top = bleed_pixels[1] + (live_size[1] - crop_height) * centering[1]
crop = (crop_left, crop_top, crop_left + crop_width, crop_top + crop_height)
# resize the image and return it
return image.resize(size, method, box=crop)
|
https://github.com/python-pillow/Pillow/issues/4087
|
Traceback (most recent call last):
File "example.py", line 6, in <module>
PIL.ImageOps.fit(img, (600, 453), PIL.Image.ANTIALIAS)
File ".../lib/python3.7/site-packages/PIL/ImageOps.py", line 445, in fit
return image.resize(size, method, box=crop)
File ".../lib/python3.7/site-packages/PIL/Image.py", line 1892, in resize
return self._new(self.im.resize(size, resample, box))
ValueError: box offset can't be negative
|
ValueError
|
def _load_libtiff(self):
"""Overload method triggered when we detect a compressed tiff
Calls out to libtiff"""
pixel = Image.Image.load(self)
if self.tile is None:
raise IOError("cannot load this image")
if not self.tile:
return pixel
self.load_prepare()
if not len(self.tile) == 1:
raise IOError("Not exactly one tile")
# (self._compression, (extents tuple),
# 0, (rawmode, self._compression, fp))
extents = self.tile[0][1]
args = list(self.tile[0][3])
# To be nice on memory footprint, if there's a
# file descriptor, use that instead of reading
# into a string in python.
# libtiff closes the file descriptor, so pass in a dup.
try:
fp = hasattr(self.fp, "fileno") and os.dup(self.fp.fileno())
# flush the file descriptor, prevents error on pypy 2.4+
# should also eliminate the need for fp.tell for py3
# in _seek
if hasattr(self.fp, "flush"):
self.fp.flush()
except IOError:
# io.BytesIO have a fileno, but returns an IOError if
# it doesn't use a file descriptor.
fp = False
if fp:
args[2] = fp
decoder = Image._getdecoder(self.mode, "libtiff", tuple(args), self.decoderconfig)
try:
decoder.setimage(self.im, extents)
except ValueError:
raise IOError("Couldn't set the image")
if hasattr(self.fp, "getvalue"):
# We've got a stringio like thing passed in. Yay for all in memory.
# The decoder needs the entire file in one shot, so there's not
# a lot we can do here other than give it the entire file.
# unless we could do something like get the address of the
# underlying string for stringio.
#
# Rearranging for supporting byteio items, since they have a fileno
# that returns an IOError if there's no underlying fp. Easier to
# deal with here by reordering.
if DEBUG:
print("have getvalue. just sending in a string from getvalue")
n, err = decoder.decode(self.fp.getvalue())
elif hasattr(self.fp, "fileno"):
# we've got a actual file on disk, pass in the fp.
if DEBUG:
print("have fileno, calling fileno version of the decoder.")
self.fp.seek(0)
# 4 bytes, otherwise the trace might error out
n, err = decoder.decode(b"fpfp")
else:
# we have something else.
if DEBUG:
print("don't have fileno or getvalue. just reading")
# UNDONE -- so much for that buffer size thing.
n, err = decoder.decode(self.fp.read())
self.tile = []
self.readonly = 0
# libtiff closed the fp in a, we need to close self.fp, if possible
if self._exclusive_fp and not self._is_animated:
self.fp.close()
self.fp = None # might be shared
if err < 0:
raise IOError(err)
return Image.Image.load(self)
|
def _load_libtiff(self):
"""Overload method triggered when we detect a compressed tiff
Calls out to libtiff"""
pixel = Image.Image.load(self)
if self.tile is None:
raise IOError("cannot load this image")
if not self.tile:
return pixel
self.load_prepare()
if not len(self.tile) == 1:
raise IOError("Not exactly one tile")
# (self._compression, (extents tuple),
# 0, (rawmode, self._compression, fp))
extents = self.tile[0][1]
args = list(self.tile[0][3]) + [self.tag_v2.offset]
# To be nice on memory footprint, if there's a
# file descriptor, use that instead of reading
# into a string in python.
# libtiff closes the file descriptor, so pass in a dup.
try:
fp = hasattr(self.fp, "fileno") and os.dup(self.fp.fileno())
# flush the file descriptor, prevents error on pypy 2.4+
# should also eliminate the need for fp.tell for py3
# in _seek
if hasattr(self.fp, "flush"):
self.fp.flush()
except IOError:
# io.BytesIO have a fileno, but returns an IOError if
# it doesn't use a file descriptor.
fp = False
if fp:
args[2] = fp
decoder = Image._getdecoder(self.mode, "libtiff", tuple(args), self.decoderconfig)
try:
decoder.setimage(self.im, extents)
except ValueError:
raise IOError("Couldn't set the image")
if hasattr(self.fp, "getvalue"):
# We've got a stringio like thing passed in. Yay for all in memory.
# The decoder needs the entire file in one shot, so there's not
# a lot we can do here other than give it the entire file.
# unless we could do something like get the address of the
# underlying string for stringio.
#
# Rearranging for supporting byteio items, since they have a fileno
# that returns an IOError if there's no underlying fp. Easier to
# deal with here by reordering.
if DEBUG:
print("have getvalue. just sending in a string from getvalue")
n, err = decoder.decode(self.fp.getvalue())
elif hasattr(self.fp, "fileno"):
# we've got a actual file on disk, pass in the fp.
if DEBUG:
print("have fileno, calling fileno version of the decoder.")
self.fp.seek(0)
# 4 bytes, otherwise the trace might error out
n, err = decoder.decode(b"fpfp")
else:
# we have something else.
if DEBUG:
print("don't have fileno or getvalue. just reading")
# UNDONE -- so much for that buffer size thing.
n, err = decoder.decode(self.fp.read())
self.tile = []
self.readonly = 0
# libtiff closed the fp in a, we need to close self.fp, if possible
if self._exclusive_fp and not self._is_animated:
self.fp.close()
self.fp = None # might be shared
if err < 0:
raise IOError(err)
return Image.Image.load(self)
|
https://github.com/python-pillow/Pillow/issues/3756
|
['DEFAULT_STRATEGY', 'FILTERED', 'FIXED', 'HAVE_LIBJPEGTURBO', 'HUFFMAN_ONLY', 'PILLOW_VERSION', 'RLE', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__spec__', 'alpha_composite', 'bcn_decoder', 'bit_decoder', 'blend', 'clear_cache', 'convert', 'draw', 'effect_mandelbrot', 'effect_noise', 'eps_encoder', 'fill', 'fli_decoder', 'font', 'get_alignment', 'get_block_size', 'get_blocks_max', 'get_stats', 'getcodecstatus', 'gif_decoder', 'gif_encoder', 'hex_decoder', 'hex_encoder', 'jp2klib_version', 'jpeg2k_decoder', 'jpeg2k_encoder', 'jpeg_decoder', 'jpeg_encoder', 'jpeglib_version', 'libtiff_decoder', 'libtiff_encoder', 'libtiff_version', 'linear_gradient', 'map_buffer', 'merge', 'new', 'outline', 'packbits_decoder', 'path', 'pcd_decoder', 'pcx_decoder', 'pcx_encoder', 'radial_gradient', 'raw_decoder', 'raw_encoder', 'reset_stats', 'set_alignment', 'set_block_size', 'set_blocks_max', 'sgi_rle_decoder', 'sun_rle_decoder', 'tga_rle_decoder', 'tga_rle_encoder', 'wedge', 'xbm_decoder', 'xbm_encoder', 'zip_decoder', 'zip_encoder', 'zlib_version']
5.4.1
Traceback (most recent call last):
File "/usr/local/lib/python3.7/dist-packages/PIL/Image.py", line 455, in _getdecoder
decoder = getattr(core, decoder_name + "_decoder")
AttributeError: module 'PIL._imaging' has no attribute 'group4_decoder'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "bug.py", line 36, in <module>
print(dimensionsFromBlobImage(blob))
File "bug.py", line 24, in dimensionsFromBlobImage
ImPar.feed(chunk)
File "/usr/local/lib/python3.7/dist-packages/PIL/ImageFile.py", line 416, in feed
im.mode, d, a, im.decoderconfig
File "/usr/local/lib/python3.7/dist-packages/PIL/Image.py", line 458, in _getdecoder
raise IOError("decoder %s not available" % decoder_name)
OSError: decoder group4 not available
|
AttributeError
|
def _setup(self):
"""Setup this image object based on current tags"""
if 0xBC01 in self.tag_v2:
raise IOError("Windows Media Photo files not yet supported")
# extract relevant tags
self._compression = COMPRESSION_INFO[self.tag_v2.get(COMPRESSION, 1)]
self._planar_configuration = self.tag_v2.get(PLANAR_CONFIGURATION, 1)
# photometric is a required tag, but not everyone is reading
# the specification
photo = self.tag_v2.get(PHOTOMETRIC_INTERPRETATION, 0)
# old style jpeg compression images most certainly are YCbCr
if self._compression == "tiff_jpeg":
photo = 6
fillorder = self.tag_v2.get(FILLORDER, 1)
if DEBUG:
print("*** Summary ***")
print("- compression:", self._compression)
print("- photometric_interpretation:", photo)
print("- planar_configuration:", self._planar_configuration)
print("- fill_order:", fillorder)
print("- YCbCr subsampling:", self.tag.get(530))
# size
xsize = self.tag_v2.get(IMAGEWIDTH)
ysize = self.tag_v2.get(IMAGELENGTH)
self._size = xsize, ysize
if DEBUG:
print("- size:", self.size)
sampleFormat = self.tag_v2.get(SAMPLEFORMAT, (1,))
if len(sampleFormat) > 1 and max(sampleFormat) == min(sampleFormat) == 1:
# SAMPLEFORMAT is properly per band, so an RGB image will
# be (1,1,1). But, we don't support per band pixel types,
# and anything more than one band is a uint8. So, just
# take the first element. Revisit this if adding support
# for more exotic images.
sampleFormat = (1,)
bps_tuple = self.tag_v2.get(BITSPERSAMPLE, (1,))
extra_tuple = self.tag_v2.get(EXTRASAMPLES, ())
if photo in (2, 6, 8): # RGB, YCbCr, LAB
bps_count = 3
elif photo == 5: # CMYK
bps_count = 4
else:
bps_count = 1
bps_count += len(extra_tuple)
# Some files have only one value in bps_tuple,
# while should have more. Fix it
if bps_count > len(bps_tuple) and len(bps_tuple) == 1:
bps_tuple = bps_tuple * bps_count
# mode: check photometric interpretation and bits per pixel
key = (
self.tag_v2.prefix,
photo,
sampleFormat,
fillorder,
bps_tuple,
extra_tuple,
)
if DEBUG:
print("format key:", key)
try:
self.mode, rawmode = OPEN_INFO[key]
except KeyError:
if DEBUG:
print("- unsupported format")
raise SyntaxError("unknown pixel mode")
if DEBUG:
print("- raw mode:", rawmode)
print("- pil mode:", self.mode)
self.info["compression"] = self._compression
xres = self.tag_v2.get(X_RESOLUTION, 1)
yres = self.tag_v2.get(Y_RESOLUTION, 1)
if xres and yres:
resunit = self.tag_v2.get(RESOLUTION_UNIT)
if resunit == 2: # dots per inch
self.info["dpi"] = int(xres + 0.5), int(yres + 0.5)
elif resunit == 3: # dots per centimeter. convert to dpi
self.info["dpi"] = int(xres * 2.54 + 0.5), int(yres * 2.54 + 0.5)
elif resunit is None: # used to default to 1, but now 2)
self.info["dpi"] = int(xres + 0.5), int(yres + 0.5)
# For backward compatibility,
# we also preserve the old behavior
self.info["resolution"] = xres, yres
else: # No absolute unit of measurement
self.info["resolution"] = xres, yres
# build tile descriptors
x = y = layer = 0
self.tile = []
self.use_load_libtiff = READ_LIBTIFF or self._compression != "raw"
if self.use_load_libtiff:
# Decoder expects entire file as one tile.
# There's a buffer size limit in load (64k)
# so large g4 images will fail if we use that
# function.
#
# Setup the one tile for the whole image, then
# use the _load_libtiff function.
# libtiff handles the fillmode for us, so 1;IR should
# actually be 1;I. Including the R double reverses the
# bits, so stripes of the image are reversed. See
# https://github.com/python-pillow/Pillow/issues/279
if fillorder == 2:
# Replace fillorder with fillorder=1
key = key[:3] + (1,) + key[4:]
if DEBUG:
print("format key:", key)
# this should always work, since all the
# fillorder==2 modes have a corresponding
# fillorder=1 mode
self.mode, rawmode = OPEN_INFO[key]
# libtiff always returns the bytes in native order.
# we're expecting image byte order. So, if the rawmode
# contains I;16, we need to convert from native to image
# byte order.
if rawmode == "I;16":
rawmode = "I;16N"
if ";16B" in rawmode:
rawmode = rawmode.replace(";16B", ";16N")
if ";16L" in rawmode:
rawmode = rawmode.replace(";16L", ";16N")
# Offset in the tile tuple is 0, we go from 0,0 to
# w,h, and we only do this once -- eds
a = (rawmode, self._compression, False, self.tag_v2.offset)
self.tile.append(("libtiff", (0, 0, xsize, ysize), 0, a))
elif STRIPOFFSETS in self.tag_v2 or TILEOFFSETS in self.tag_v2:
# striped image
if STRIPOFFSETS in self.tag_v2:
offsets = self.tag_v2[STRIPOFFSETS]
h = self.tag_v2.get(ROWSPERSTRIP, ysize)
w = self.size[0]
else:
# tiled image
offsets = self.tag_v2[TILEOFFSETS]
w = self.tag_v2.get(322)
h = self.tag_v2.get(323)
for offset in offsets:
if x + w > xsize:
stride = w * sum(bps_tuple) / 8 # bytes per line
else:
stride = 0
tile_rawmode = rawmode
if self._planar_configuration == 2:
# each band on it's own layer
tile_rawmode = rawmode[layer]
# adjust stride width accordingly
stride /= bps_count
a = (tile_rawmode, int(stride), 1)
self.tile.append(
(
self._compression,
(x, y, min(x + w, xsize), min(y + h, ysize)),
offset,
a,
)
)
x = x + w
if x >= self.size[0]:
x, y = 0, y + h
if y >= self.size[1]:
x = y = 0
layer += 1
else:
if DEBUG:
print("- unsupported data organization")
raise SyntaxError("unknown data organization")
# Fix up info.
if ICCPROFILE in self.tag_v2:
self.info["icc_profile"] = self.tag_v2[ICCPROFILE]
# fixup palette descriptor
if self.mode in ["P", "PA"]:
palette = [o8(b // 256) for b in self.tag_v2[COLORMAP]]
self.palette = ImagePalette.raw("RGB;L", b"".join(palette))
|
def _setup(self):
"""Setup this image object based on current tags"""
if 0xBC01 in self.tag_v2:
raise IOError("Windows Media Photo files not yet supported")
# extract relevant tags
self._compression = COMPRESSION_INFO[self.tag_v2.get(COMPRESSION, 1)]
self._planar_configuration = self.tag_v2.get(PLANAR_CONFIGURATION, 1)
# photometric is a required tag, but not everyone is reading
# the specification
photo = self.tag_v2.get(PHOTOMETRIC_INTERPRETATION, 0)
# old style jpeg compression images most certainly are YCbCr
if self._compression == "tiff_jpeg":
photo = 6
fillorder = self.tag_v2.get(FILLORDER, 1)
if DEBUG:
print("*** Summary ***")
print("- compression:", self._compression)
print("- photometric_interpretation:", photo)
print("- planar_configuration:", self._planar_configuration)
print("- fill_order:", fillorder)
print("- YCbCr subsampling:", self.tag.get(530))
# size
xsize = self.tag_v2.get(IMAGEWIDTH)
ysize = self.tag_v2.get(IMAGELENGTH)
self._size = xsize, ysize
if DEBUG:
print("- size:", self.size)
sampleFormat = self.tag_v2.get(SAMPLEFORMAT, (1,))
if len(sampleFormat) > 1 and max(sampleFormat) == min(sampleFormat) == 1:
# SAMPLEFORMAT is properly per band, so an RGB image will
# be (1,1,1). But, we don't support per band pixel types,
# and anything more than one band is a uint8. So, just
# take the first element. Revisit this if adding support
# for more exotic images.
sampleFormat = (1,)
bps_tuple = self.tag_v2.get(BITSPERSAMPLE, (1,))
extra_tuple = self.tag_v2.get(EXTRASAMPLES, ())
if photo in (2, 6, 8): # RGB, YCbCr, LAB
bps_count = 3
elif photo == 5: # CMYK
bps_count = 4
else:
bps_count = 1
bps_count += len(extra_tuple)
# Some files have only one value in bps_tuple,
# while should have more. Fix it
if bps_count > len(bps_tuple) and len(bps_tuple) == 1:
bps_tuple = bps_tuple * bps_count
# mode: check photometric interpretation and bits per pixel
key = (
self.tag_v2.prefix,
photo,
sampleFormat,
fillorder,
bps_tuple,
extra_tuple,
)
if DEBUG:
print("format key:", key)
try:
self.mode, rawmode = OPEN_INFO[key]
except KeyError:
if DEBUG:
print("- unsupported format")
raise SyntaxError("unknown pixel mode")
if DEBUG:
print("- raw mode:", rawmode)
print("- pil mode:", self.mode)
self.info["compression"] = self._compression
xres = self.tag_v2.get(X_RESOLUTION, 1)
yres = self.tag_v2.get(Y_RESOLUTION, 1)
if xres and yres:
resunit = self.tag_v2.get(RESOLUTION_UNIT)
if resunit == 2: # dots per inch
self.info["dpi"] = int(xres + 0.5), int(yres + 0.5)
elif resunit == 3: # dots per centimeter. convert to dpi
self.info["dpi"] = int(xres * 2.54 + 0.5), int(yres * 2.54 + 0.5)
elif resunit is None: # used to default to 1, but now 2)
self.info["dpi"] = int(xres + 0.5), int(yres + 0.5)
# For backward compatibility,
# we also preserve the old behavior
self.info["resolution"] = xres, yres
else: # No absolute unit of measurement
self.info["resolution"] = xres, yres
# build tile descriptors
x = y = layer = 0
self.tile = []
self.use_load_libtiff = READ_LIBTIFF or self._compression != "raw"
if self.use_load_libtiff:
# Decoder expects entire file as one tile.
# There's a buffer size limit in load (64k)
# so large g4 images will fail if we use that
# function.
#
# Setup the one tile for the whole image, then
# use the _load_libtiff function.
# libtiff handles the fillmode for us, so 1;IR should
# actually be 1;I. Including the R double reverses the
# bits, so stripes of the image are reversed. See
# https://github.com/python-pillow/Pillow/issues/279
if fillorder == 2:
# Replace fillorder with fillorder=1
key = key[:3] + (1,) + key[4:]
if DEBUG:
print("format key:", key)
# this should always work, since all the
# fillorder==2 modes have a corresponding
# fillorder=1 mode
self.mode, rawmode = OPEN_INFO[key]
# libtiff always returns the bytes in native order.
# we're expecting image byte order. So, if the rawmode
# contains I;16, we need to convert from native to image
# byte order.
if rawmode == "I;16":
rawmode = "I;16N"
if ";16B" in rawmode:
rawmode = rawmode.replace(";16B", ";16N")
if ";16L" in rawmode:
rawmode = rawmode.replace(";16L", ";16N")
# Offset in the tile tuple is 0, we go from 0,0 to
# w,h, and we only do this once -- eds
a = (rawmode, self._compression, False)
self.tile.append((self._compression, (0, 0, xsize, ysize), 0, a))
elif STRIPOFFSETS in self.tag_v2 or TILEOFFSETS in self.tag_v2:
# striped image
if STRIPOFFSETS in self.tag_v2:
offsets = self.tag_v2[STRIPOFFSETS]
h = self.tag_v2.get(ROWSPERSTRIP, ysize)
w = self.size[0]
else:
# tiled image
offsets = self.tag_v2[TILEOFFSETS]
w = self.tag_v2.get(322)
h = self.tag_v2.get(323)
for offset in offsets:
if x + w > xsize:
stride = w * sum(bps_tuple) / 8 # bytes per line
else:
stride = 0
tile_rawmode = rawmode
if self._planar_configuration == 2:
# each band on it's own layer
tile_rawmode = rawmode[layer]
# adjust stride width accordingly
stride /= bps_count
a = (tile_rawmode, int(stride), 1)
self.tile.append(
(
self._compression,
(x, y, min(x + w, xsize), min(y + h, ysize)),
offset,
a,
)
)
x = x + w
if x >= self.size[0]:
x, y = 0, y + h
if y >= self.size[1]:
x = y = 0
layer += 1
else:
if DEBUG:
print("- unsupported data organization")
raise SyntaxError("unknown data organization")
# Fix up info.
if ICCPROFILE in self.tag_v2:
self.info["icc_profile"] = self.tag_v2[ICCPROFILE]
# fixup palette descriptor
if self.mode in ["P", "PA"]:
palette = [o8(b // 256) for b in self.tag_v2[COLORMAP]]
self.palette = ImagePalette.raw("RGB;L", b"".join(palette))
|
https://github.com/python-pillow/Pillow/issues/3756
|
['DEFAULT_STRATEGY', 'FILTERED', 'FIXED', 'HAVE_LIBJPEGTURBO', 'HUFFMAN_ONLY', 'PILLOW_VERSION', 'RLE', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__spec__', 'alpha_composite', 'bcn_decoder', 'bit_decoder', 'blend', 'clear_cache', 'convert', 'draw', 'effect_mandelbrot', 'effect_noise', 'eps_encoder', 'fill', 'fli_decoder', 'font', 'get_alignment', 'get_block_size', 'get_blocks_max', 'get_stats', 'getcodecstatus', 'gif_decoder', 'gif_encoder', 'hex_decoder', 'hex_encoder', 'jp2klib_version', 'jpeg2k_decoder', 'jpeg2k_encoder', 'jpeg_decoder', 'jpeg_encoder', 'jpeglib_version', 'libtiff_decoder', 'libtiff_encoder', 'libtiff_version', 'linear_gradient', 'map_buffer', 'merge', 'new', 'outline', 'packbits_decoder', 'path', 'pcd_decoder', 'pcx_decoder', 'pcx_encoder', 'radial_gradient', 'raw_decoder', 'raw_encoder', 'reset_stats', 'set_alignment', 'set_block_size', 'set_blocks_max', 'sgi_rle_decoder', 'sun_rle_decoder', 'tga_rle_decoder', 'tga_rle_encoder', 'wedge', 'xbm_decoder', 'xbm_encoder', 'zip_decoder', 'zip_encoder', 'zlib_version']
5.4.1
Traceback (most recent call last):
File "/usr/local/lib/python3.7/dist-packages/PIL/Image.py", line 455, in _getdecoder
decoder = getattr(core, decoder_name + "_decoder")
AttributeError: module 'PIL._imaging' has no attribute 'group4_decoder'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "bug.py", line 36, in <module>
print(dimensionsFromBlobImage(blob))
File "bug.py", line 24, in dimensionsFromBlobImage
ImPar.feed(chunk)
File "/usr/local/lib/python3.7/dist-packages/PIL/ImageFile.py", line 416, in feed
im.mode, d, a, im.decoderconfig
File "/usr/local/lib/python3.7/dist-packages/PIL/Image.py", line 458, in _getdecoder
raise IOError("decoder %s not available" % decoder_name)
OSError: decoder group4 not available
|
AttributeError
|
def _seek(self, frame):
if frame == 0:
# rewind
self.__offset = 0
self.dispose = None
self.dispose_extent = [0, 0, 0, 0] # x0, y0, x1, y1
self.__frame = -1
self.__fp.seek(self.__rewind)
self._prev_im = None
self.disposal_method = 0
else:
# ensure that the previous frame was loaded
if not self.im:
self.load()
if frame != self.__frame + 1:
raise ValueError("cannot seek to frame %d" % frame)
self.__frame = frame
self.tile = []
self.fp = self.__fp
if self.__offset:
# backup to last frame
self.fp.seek(self.__offset)
while self.data():
pass
self.__offset = 0
if self.dispose:
self.im.paste(self.dispose, self.dispose_extent)
from copy import copy
self.palette = copy(self.global_palette)
info = {}
while True:
s = self.fp.read(1)
if not s or s == b";":
break
elif s == b"!":
#
# extensions
#
s = self.fp.read(1)
block = self.data()
if i8(s) == 249:
#
# graphic control extension
#
flags = i8(block[0])
if flags & 1:
info["transparency"] = i8(block[3])
info["duration"] = i16(block[1:3]) * 10
# disposal method - find the value of bits 4 - 6
dispose_bits = 0b00011100 & flags
dispose_bits = dispose_bits >> 2
if dispose_bits:
# only set the dispose if it is not
# unspecified. I'm not sure if this is
# correct, but it seems to prevent the last
# frame from looking odd for some animations
self.disposal_method = dispose_bits
elif i8(s) == 254:
#
# comment extension
#
while block:
if "comment" in info:
info["comment"] += block
else:
info["comment"] = block
block = self.data()
continue
elif i8(s) == 255:
#
# application extension
#
info["extension"] = block, self.fp.tell()
if block[:11] == b"NETSCAPE2.0":
block = self.data()
if len(block) >= 3 and i8(block[0]) == 1:
info["loop"] = i16(block[1:3])
while self.data():
pass
elif s == b",":
#
# local image
#
s = self.fp.read(9)
# extent
x0, y0 = i16(s[0:]), i16(s[2:])
x1, y1 = x0 + i16(s[4:]), y0 + i16(s[6:])
if x1 > self.size[0] or y1 > self.size[1]:
self._size = max(x1, self.size[0]), max(y1, self.size[1])
self.dispose_extent = x0, y0, x1, y1
flags = i8(s[8])
interlace = (flags & 64) != 0
if flags & 128:
bits = (flags & 7) + 1
self.palette = ImagePalette.raw("RGB", self.fp.read(3 << bits))
# image data
bits = i8(self.fp.read(1))
self.__offset = self.fp.tell()
self.tile = [("gif", (x0, y0, x1, y1), self.__offset, (bits, interlace))]
break
else:
pass
# raise IOError, "illegal GIF tag `%x`" % i8(s)
try:
if self.disposal_method < 2:
# do not dispose or none specified
self.dispose = None
elif self.disposal_method == 2:
# replace with background colour
self.dispose = Image.core.fill("P", self.size, self.info["background"])
else:
# replace with previous contents
if self.im:
self.dispose = self.im.copy()
# only dispose the extent in this frame
if self.dispose:
self.dispose = self._crop(self.dispose, self.dispose_extent)
except (AttributeError, KeyError):
pass
if not self.tile:
# self.__fp = None
raise EOFError
for k in ["transparency", "duration", "comment", "extension", "loop"]:
if k in info:
self.info[k] = info[k]
elif k in self.info:
del self.info[k]
self.mode = "L"
if self.palette:
self.mode = "P"
|
def _seek(self, frame):
if frame == 0:
# rewind
self.__offset = 0
self.dispose = None
self.dispose_extent = [0, 0, 0, 0] # x0, y0, x1, y1
self.__frame = -1
self.__fp.seek(self.__rewind)
self._prev_im = None
self.disposal_method = 0
else:
# ensure that the previous frame was loaded
if not self.im:
self.load()
if frame != self.__frame + 1:
raise ValueError("cannot seek to frame %d" % frame)
self.__frame = frame
self.tile = []
self.fp = self.__fp
if self.__offset:
# backup to last frame
self.fp.seek(self.__offset)
while self.data():
pass
self.__offset = 0
if self.dispose:
self.im.paste(self.dispose, self.dispose_extent)
from copy import copy
self.palette = copy(self.global_palette)
info = {}
while True:
s = self.fp.read(1)
if not s or s == b";":
break
elif s == b"!":
#
# extensions
#
s = self.fp.read(1)
block = self.data()
if i8(s) == 249:
#
# graphic control extension
#
flags = i8(block[0])
if flags & 1:
info["transparency"] = i8(block[3])
info["duration"] = i16(block[1:3]) * 10
# disposal method - find the value of bits 4 - 6
dispose_bits = 0b00011100 & flags
dispose_bits = dispose_bits >> 2
if dispose_bits:
# only set the dispose if it is not
# unspecified. I'm not sure if this is
# correct, but it seems to prevent the last
# frame from looking odd for some animations
self.disposal_method = dispose_bits
elif i8(s) == 254:
#
# comment extension
#
while block:
if "comment" in info:
info["comment"] += block
else:
info["comment"] = block
block = self.data()
continue
elif i8(s) == 255:
#
# application extension
#
info["extension"] = block, self.fp.tell()
if block[:11] == b"NETSCAPE2.0":
block = self.data()
if len(block) >= 3 and i8(block[0]) == 1:
info["loop"] = i16(block[1:3])
while self.data():
pass
elif s == b",":
#
# local image
#
s = self.fp.read(9)
# extent
x0, y0 = i16(s[0:]), i16(s[2:])
x1, y1 = x0 + i16(s[4:]), y0 + i16(s[6:])
self.dispose_extent = x0, y0, x1, y1
flags = i8(s[8])
interlace = (flags & 64) != 0
if flags & 128:
bits = (flags & 7) + 1
self.palette = ImagePalette.raw("RGB", self.fp.read(3 << bits))
# image data
bits = i8(self.fp.read(1))
self.__offset = self.fp.tell()
self.tile = [("gif", (x0, y0, x1, y1), self.__offset, (bits, interlace))]
break
else:
pass
# raise IOError, "illegal GIF tag `%x`" % i8(s)
try:
if self.disposal_method < 2:
# do not dispose or none specified
self.dispose = None
elif self.disposal_method == 2:
# replace with background colour
self.dispose = Image.core.fill("P", self.size, self.info["background"])
else:
# replace with previous contents
if self.im:
self.dispose = self.im.copy()
# only dispose the extent in this frame
if self.dispose:
self.dispose = self._crop(self.dispose, self.dispose_extent)
except (AttributeError, KeyError):
pass
if not self.tile:
# self.__fp = None
raise EOFError
for k in ["transparency", "duration", "comment", "extension", "loop"]:
if k in info:
self.info[k] = info[k]
elif k in self.info:
del self.info[k]
self.mode = "L"
if self.palette:
self.mode = "P"
|
https://github.com/python-pillow/Pillow/issues/2383
|
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/discord/ext/commands/core.py", line 50, in wrapped
ret = yield from coro(*args, **kwargs)
File "/root/discord/mods/Fun.py", line 313, in gmagik
for image in glob.glob(gif_dir+"*_{0}.png".format(rand)):
File "/usr/local/lib/python3.6/concurrent/futures/thread.py", line 55, in run
result = self.fn(*self.args, **self.kwargs)
File "/root/discord/mods/Fun.py", line 232, in do_gmagik
try:
File "/usr/local/lib/python3.6/site-packages/PIL/Image.py", line 1698, in save
self.load()
File "/usr/local/lib/python3.6/site-packages/PIL/ImageFile.py", line 242, in load
if not self.map and not LOAD_TRUNCATED_IMAGES and err_code < 0:
UnboundLocalError: local variable 'err_code' referenced before assignment
|
UnboundLocalError
|
def __new__(cls, text, lang=None, tkey=None):
"""
:param cls: the class to use when creating the instance
:param text: value for this key
:param lang: language code
:param tkey: UTF-8 version of the key name
"""
self = str.__new__(cls, text)
self.lang = lang
self.tkey = tkey
return self
|
def __new__(cls, text, lang, tkey):
"""
:param cls: the class to use when creating the instance
:param text: value for this key
:param lang: language code
:param tkey: UTF-8 version of the key name
"""
self = str.__new__(cls, text)
self.lang = lang
self.tkey = tkey
return self
|
https://github.com/python-pillow/Pillow/issues/1434
|
Traceback (most recent call last):
File "example.py", line 6, in <module>
new_im = pickle.loads(p)
TypeError: __new__() missing 2 required positional arguments: 'lang' and 'tkey'
|
TypeError
|
def load_end(self):
"internal: finished reading image data"
while True:
self.fp.read(4) # CRC
try:
cid, pos, length = self.png.read()
except (struct.error, SyntaxError):
break
if cid == b"IEND":
break
try:
self.png.call(cid, pos, length)
except UnicodeDecodeError:
break
except EOFError:
ImageFile._safe_read(self.fp, length)
self._text = self.png.im_text
self.png.close()
self.png = None
|
def load_end(self):
"internal: finished reading image data"
while True:
self.fp.read(4) # CRC
try:
cid, pos, length = self.png.read()
except (struct.error, SyntaxError):
break
if cid == b"IEND":
break
try:
self.png.call(cid, pos, length)
except UnicodeDecodeError:
break
self._text = self.png.im_text
self.png.close()
self.png = None
|
https://github.com/python-pillow/Pillow/issues/3527
|
EOFError Traceback (most recent call last)
<ipython-input-11-d946298b95dd> in <module>
----> 1 image.show()
~/py/ocean/ocean_ai/env/lib/python3.6/site-packages/PIL/Image.py in show(self, title, command)
2039 """
2040
-> 2041 _show(self, title=title, command=command)
2042
2043 def split(self):
~/py/ocean/ocean_ai/env/lib/python3.6/site-packages/PIL/Image.py in _show(image, **options)
2904 def _show(image, **options):
2905 # override me, as necessary
-> 2906 _showxv(image, **options)
2907
2908
~/py/ocean/ocean_ai/env/lib/python3.6/site-packages/PIL/Image.py in _showxv(image, title, **options)
2909 def _showxv(image, title=None, **options):
2910 from . import ImageShow
-> 2911 ImageShow.show(image, title, **options)
2912
2913
~/py/ocean/ocean_ai/env/lib/python3.6/site-packages/PIL/ImageShow.py in show(image, title, **options)
51 """
52 for viewer in _viewers:
---> 53 if viewer.show(image, title=title, **options):
54 return 1
55 return 0
~/py/ocean/ocean_ai/env/lib/python3.6/site-packages/PIL/ImageShow.py in show(self, image, **options)
75 image = image.convert(base)
76
---> 77 return self.show_image(image, **options)
78
79 # hook methods
~/py/ocean/ocean_ai/env/lib/python3.6/site-packages/PIL/ImageShow.py in show_image(self, image, **options)
95 def show_image(self, image, **options):
96 """Display given image"""
---> 97 return self.show_file(self.save_image(image), **options)
98
99 def show_file(self, file, **options):
~/py/ocean/ocean_ai/env/lib/python3.6/site-packages/PIL/ImageShow.py in save_image(self, image)
91 def save_image(self, image):
92 """Save to temporary file, and return filename"""
---> 93 return image._dump(format=self.get_format(image), **self.options)
94
95 def show_image(self, image, **options):
~/py/ocean/ocean_ai/env/lib/python3.6/site-packages/PIL/Image.py in _dump(self, file, format, **options)
646 filename = filename + suffix
647
--> 648 self.load()
649
650 if not format or format == "PPM":
~/py/ocean/ocean_ai/env/lib/python3.6/site-packages/PIL/ImageFile.py in load(self)
248 self.readonly = readonly
249
--> 250 self.load_end()
251
252 if self._exclusive_fp and self._close_exclusive_fp_after_loading:
~/py/ocean/ocean_ai/env/lib/python3.6/site-packages/PIL/PngImagePlugin.py in load_end(self)
675
676 try:
--> 677 self.png.call(cid, pos, length)
678 except UnicodeDecodeError:
679 break
~/py/ocean/ocean_ai/env/lib/python3.6/site-packages/PIL/PngImagePlugin.py in call(self, cid, pos, length)
138
139 logger.debug("STREAM %r %s %s", cid, pos, length)
--> 140 return getattr(self, "chunk_" + cid.decode('ascii'))(pos, length)
141
142 def crc(self, cid, data):
~/py/ocean/ocean_ai/env/lib/python3.6/site-packages/PIL/PngImagePlugin.py in chunk_IDAT(self, pos, length)
354 self.im_tile = [("zip", (0, 0)+self.im_size, pos, self.im_rawmode)]
355 self.im_idat = length
--> 356 raise EOFError
357
358 def chunk_IEND(self, pos, length):
EOFError:
|
EOFError
|
def convert(self, mode=None, matrix=None, dither=None, palette=WEB, colors=256):
"""
Returns a converted copy of this image. For the "P" mode, this
method translates pixels through the palette. If mode is
omitted, a mode is chosen so that all information in the image
and the palette can be represented without a palette.
The current version supports all possible conversions between
"L", "RGB" and "CMYK." The **matrix** argument only supports "L"
and "RGB".
When translating a color image to black and white (mode "L"),
the library uses the ITU-R 601-2 luma transform::
L = R * 299/1000 + G * 587/1000 + B * 114/1000
The default method of converting a greyscale ("L") or "RGB"
image into a bilevel (mode "1") image uses Floyd-Steinberg
dither to approximate the original image luminosity levels. If
dither is NONE, all non-zero values are set to 255 (white). To
use other thresholds, use the :py:meth:`~PIL.Image.Image.point`
method.
:param mode: The requested mode. See: :ref:`concept-modes`.
:param matrix: An optional conversion matrix. If given, this
should be 4- or 12-tuple containing floating point values.
:param dither: Dithering method, used when converting from
mode "RGB" to "P" or from "RGB" or "L" to "1".
Available methods are NONE or FLOYDSTEINBERG (default).
:param palette: Palette to use when converting from mode "RGB"
to "P". Available palettes are WEB or ADAPTIVE.
:param colors: Number of colors to use for the ADAPTIVE palette.
Defaults to 256.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
if not mode and self.mode == "P":
# determine default mode
if self.palette:
mode = self.palette.mode
else:
mode = "RGB"
if not mode or (mode == self.mode and not matrix):
return self.copy()
has_transparency = self.info.get("transparency") is not None
if matrix:
# matrix conversion
if mode not in ("L", "RGB"):
raise ValueError("illegal conversion")
im = self.im.convert_matrix(mode, matrix)
new = self._new(im)
if has_transparency and self.im.bands == 3:
transparency = new.info["transparency"]
def convert_transparency(m, v):
v = m[0] * v[0] + m[1] * v[1] + m[2] * v[2] + m[3] * 0.5
return max(0, min(255, int(v)))
if mode == "L":
transparency = convert_transparency(matrix, transparency)
elif len(mode) == 3:
transparency = tuple(
[
convert_transparency(matrix[i * 4 : i * 4 + 4], transparency)
for i in range(0, len(transparency))
]
)
new.info["transparency"] = transparency
return new
if mode == "P" and self.mode == "RGBA":
return self.quantize(colors)
trns = None
delete_trns = False
# transparency handling
if has_transparency:
if self.mode in ("L", "RGB") and mode == "RGBA":
# Use transparent conversion to promote from transparent
# color to an alpha channel.
new_im = self._new(
self.im.convert_transparent(mode, self.info["transparency"])
)
del new_im.info["transparency"]
return new_im
elif self.mode in ("L", "RGB", "P") and mode in ("L", "RGB", "P"):
t = self.info["transparency"]
if isinstance(t, bytes):
# Dragons. This can't be represented by a single color
warnings.warn(
"Palette images with Transparency "
+ " expressed in bytes should be converted "
+ "to RGBA images"
)
delete_trns = True
else:
# get the new transparency color.
# use existing conversions
trns_im = Image()._new(core.new(self.mode, (1, 1)))
if self.mode == "P":
trns_im.putpalette(self.palette)
if isinstance(t, tuple):
try:
t = trns_im.palette.getcolor(t)
except:
raise ValueError(
"Couldn't allocate a palette color for transparency"
)
trns_im.putpixel((0, 0), t)
if mode in ("L", "RGB"):
trns_im = trns_im.convert(mode)
else:
# can't just retrieve the palette number, got to do it
# after quantization.
trns_im = trns_im.convert("RGB")
trns = trns_im.getpixel((0, 0))
elif self.mode == "P" and mode == "RGBA":
t = self.info["transparency"]
delete_trns = True
if isinstance(t, bytes):
self.im.putpalettealphas(t)
elif isinstance(t, int):
self.im.putpalettealpha(t, 0)
else:
raise ValueError("Transparency for P mode should" + " be bytes or int")
if mode == "P" and palette == ADAPTIVE:
im = self.im.quantize(colors)
new = self._new(im)
from . import ImagePalette
new.palette = ImagePalette.raw("RGB", new.im.getpalette("RGB"))
if delete_trns:
# This could possibly happen if we requantize to fewer colors.
# The transparency would be totally off in that case.
del new.info["transparency"]
if trns is not None:
try:
new.info["transparency"] = new.palette.getcolor(trns)
except:
# if we can't make a transparent color, don't leave the old
# transparency hanging around to mess us up.
del new.info["transparency"]
warnings.warn("Couldn't allocate palette entry " + "for transparency")
return new
# colorspace conversion
if dither is None:
dither = FLOYDSTEINBERG
try:
im = self.im.convert(mode, dither)
except ValueError:
try:
# normalize source image and try again
im = self.im.convert(getmodebase(self.mode))
im = im.convert(mode, dither)
except KeyError:
raise ValueError("illegal conversion")
new_im = self._new(im)
if delete_trns:
# crash fail if we leave a bytes transparency in an rgb/l mode.
del new_im.info["transparency"]
if trns is not None:
if new_im.mode == "P":
try:
new_im.info["transparency"] = new_im.palette.getcolor(trns)
except:
del new_im.info["transparency"]
warnings.warn("Couldn't allocate palette entry " + "for transparency")
else:
new_im.info["transparency"] = trns
return new_im
|
def convert(self, mode=None, matrix=None, dither=None, palette=WEB, colors=256):
"""
Returns a converted copy of this image. For the "P" mode, this
method translates pixels through the palette. If mode is
omitted, a mode is chosen so that all information in the image
and the palette can be represented without a palette.
The current version supports all possible conversions between
"L", "RGB" and "CMYK." The **matrix** argument only supports "L"
and "RGB".
When translating a color image to black and white (mode "L"),
the library uses the ITU-R 601-2 luma transform::
L = R * 299/1000 + G * 587/1000 + B * 114/1000
The default method of converting a greyscale ("L") or "RGB"
image into a bilevel (mode "1") image uses Floyd-Steinberg
dither to approximate the original image luminosity levels. If
dither is NONE, all non-zero values are set to 255 (white). To
use other thresholds, use the :py:meth:`~PIL.Image.Image.point`
method.
:param mode: The requested mode. See: :ref:`concept-modes`.
:param matrix: An optional conversion matrix. If given, this
should be 4- or 12-tuple containing floating point values.
:param dither: Dithering method, used when converting from
mode "RGB" to "P" or from "RGB" or "L" to "1".
Available methods are NONE or FLOYDSTEINBERG (default).
:param palette: Palette to use when converting from mode "RGB"
to "P". Available palettes are WEB or ADAPTIVE.
:param colors: Number of colors to use for the ADAPTIVE palette.
Defaults to 256.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
if not mode and self.mode == "P":
# determine default mode
if self.palette:
mode = self.palette.mode
else:
mode = "RGB"
if not mode or (mode == self.mode and not matrix):
return self.copy()
if matrix:
# matrix conversion
if mode not in ("L", "RGB"):
raise ValueError("illegal conversion")
im = self.im.convert_matrix(mode, matrix)
return self._new(im)
if mode == "P" and self.mode == "RGBA":
return self.quantize(colors)
trns = None
delete_trns = False
# transparency handling
if "transparency" in self.info and self.info["transparency"] is not None:
if self.mode in ("L", "RGB") and mode == "RGBA":
# Use transparent conversion to promote from transparent
# color to an alpha channel.
new_im = self._new(
self.im.convert_transparent(mode, self.info["transparency"])
)
del new_im.info["transparency"]
return new_im
elif self.mode in ("L", "RGB", "P") and mode in ("L", "RGB", "P"):
t = self.info["transparency"]
if isinstance(t, bytes):
# Dragons. This can't be represented by a single color
warnings.warn(
"Palette images with Transparency "
+ " expressed in bytes should be converted "
+ "to RGBA images"
)
delete_trns = True
else:
# get the new transparency color.
# use existing conversions
trns_im = Image()._new(core.new(self.mode, (1, 1)))
if self.mode == "P":
trns_im.putpalette(self.palette)
if isinstance(t, tuple):
try:
t = trns_im.palette.getcolor(t)
except:
raise ValueError(
"Couldn't allocate a palette color for transparency"
)
trns_im.putpixel((0, 0), t)
if mode in ("L", "RGB"):
trns_im = trns_im.convert(mode)
else:
# can't just retrieve the palette number, got to do it
# after quantization.
trns_im = trns_im.convert("RGB")
trns = trns_im.getpixel((0, 0))
elif self.mode == "P" and mode == "RGBA":
t = self.info["transparency"]
delete_trns = True
if isinstance(t, bytes):
self.im.putpalettealphas(t)
elif isinstance(t, int):
self.im.putpalettealpha(t, 0)
else:
raise ValueError("Transparency for P mode should" + " be bytes or int")
if mode == "P" and palette == ADAPTIVE:
im = self.im.quantize(colors)
new = self._new(im)
from . import ImagePalette
new.palette = ImagePalette.raw("RGB", new.im.getpalette("RGB"))
if delete_trns:
# This could possibly happen if we requantize to fewer colors.
# The transparency would be totally off in that case.
del new.info["transparency"]
if trns is not None:
try:
new.info["transparency"] = new.palette.getcolor(trns)
except:
# if we can't make a transparent color, don't leave the old
# transparency hanging around to mess us up.
del new.info["transparency"]
warnings.warn("Couldn't allocate palette entry " + "for transparency")
return new
# colorspace conversion
if dither is None:
dither = FLOYDSTEINBERG
try:
im = self.im.convert(mode, dither)
except ValueError:
try:
# normalize source image and try again
im = self.im.convert(getmodebase(self.mode))
im = im.convert(mode, dither)
except KeyError:
raise ValueError("illegal conversion")
new_im = self._new(im)
if delete_trns:
# crash fail if we leave a bytes transparency in an rgb/l mode.
del new_im.info["transparency"]
if trns is not None:
if new_im.mode == "P":
try:
new_im.info["transparency"] = new_im.palette.getcolor(trns)
except:
del new_im.info["transparency"]
warnings.warn("Couldn't allocate palette entry " + "for transparency")
else:
new_im.info["transparency"] = trns
return new_im
|
https://github.com/python-pillow/Pillow/issues/3150
|
Traceback (most recent call last):
File "C:/base2_nl/py-grayscale/minimalExampleBug.py", line 5, in <module>
im.save('tbbn2c16-out.png')
File "C:\Python36\lib\site-packages\PIL\Image.py", line 1935, in save
save_handler(self, fp, filename)
File "C:\Python36\lib\site-packages\PIL\PngImagePlugin.py", line 790, in _save
transparency = max(0, min(65535, transparency))
TypeError: '<' not supported between instances of 'tuple' and 'int'
|
TypeError
|
def multiline_text(
self,
xy,
text,
fill=None,
font=None,
anchor=None,
spacing=4,
align="left",
direction=None,
features=None,
):
widths = []
max_width = 0
lines = self._multiline_split(text)
line_spacing = self.textsize("A", font=font)[1] + spacing
for line in lines:
line_width, line_height = self.textsize(line, font)
widths.append(line_width)
max_width = max(max_width, line_width)
left, top = xy
for idx, line in enumerate(lines):
if align == "left":
pass # left = x
elif align == "center":
left += (max_width - widths[idx]) / 2.0
elif align == "right":
left += max_width - widths[idx]
else:
raise ValueError('align must be "left", "center" or "right"')
self.text(
(left, top),
line,
fill,
font,
anchor,
direction=direction,
features=features,
)
top += line_spacing
left = xy[0]
|
def multiline_text(
self,
xy,
text,
fill=None,
font=None,
anchor=None,
spacing=4,
align="left",
direction=None,
features=None,
):
widths = []
max_width = 0
lines = self._multiline_split(text)
line_spacing = self.textsize("A", font=font)[1] + spacing
for line in lines:
line_width, line_height = self.textsize(line, font)
widths.append(line_width)
max_width = max(max_width, line_width)
left, top = xy
for idx, line in enumerate(lines):
if align == "left":
pass # left = x
elif align == "center":
left += (max_width - widths[idx]) / 2.0
elif align == "right":
left += max_width - widths[idx]
else:
assert False, 'align must be "left", "center" or "right"'
self.text(
(left, top),
line,
fill,
font,
anchor,
direction=direction,
features=features,
)
top += line_spacing
left = xy[0]
|
https://github.com/python-pillow/Pillow/issues/3231
|
Traceback (most recent call last):
File "test.py", line 1, in <module>
import PIL
File "/usr/lib/python3.6/site-packages/PIL/__init__.py", line 27, in <module>
__doc__ = __doc__.format(__version__) # include version in docstring
AttributeError: 'NoneType' object has no attribute 'format'
|
AttributeError
|
def paste(self, im, box=None):
"""
Paste a PIL image into the photo image. Note that this can
be very slow if the photo image is displayed.
:param im: A PIL image. The size must match the target region. If the
mode does not match, the image is converted to the mode of
the bitmap image.
:param box: A 4-tuple defining the left, upper, right, and lower pixel
coordinate. See :ref:`coordinate-system`. If None is given
instead of a tuple, all of the image is assumed.
"""
# convert to blittable
im.load()
image = im.im
if image.isblock() and im.mode == self.__mode:
block = image
else:
block = image.new_block(self.__mode, im.size)
image.convert2(block, image) # convert directly between buffers
tk = self.__photo.tk
try:
tk.call("PyImagingPhoto", self.__photo, block.id)
except tkinter.TclError:
# activate Tkinter hook
try:
from . import _imagingtk
try:
if hasattr(tk, "interp"):
# Required for PyPy, which always has CFFI installed
from cffi import FFI
ffi = FFI()
# PyPy is using an FFI CDATA element
# (Pdb) self.tk.interp
# <cdata 'Tcl_Interp *' 0x3061b50>
_imagingtk.tkinit(int(ffi.cast("uintptr_t", tk.interp)), 1)
else:
_imagingtk.tkinit(tk.interpaddr(), 1)
except AttributeError:
_imagingtk.tkinit(id(tk), 0)
tk.call("PyImagingPhoto", self.__photo, block.id)
except (ImportError, AttributeError, tkinter.TclError):
raise # configuration problem; cannot attach to Tkinter
|
def paste(self, im, box=None):
"""
Paste a PIL image into the photo image. Note that this can
be very slow if the photo image is displayed.
:param im: A PIL image. The size must match the target region. If the
mode does not match, the image is converted to the mode of
the bitmap image.
:param box: A 4-tuple defining the left, upper, right, and lower pixel
coordinate. See :ref:`coordinate-system`. If None is given
instead of a tuple, all of the image is assumed.
"""
# convert to blittable
im.load()
image = im.im
if image.isblock() and im.mode == self.__mode:
block = image
else:
block = image.new_block(self.__mode, im.size)
image.convert2(block, image) # convert directly between buffers
tk = self.__photo.tk
try:
tk.call("PyImagingPhoto", self.__photo, block.id)
except tkinter.TclError:
# activate Tkinter hook
try:
from . import _imagingtk
try:
if hasattr(tk, "interp"):
# Pypy is using a ffi cdata element
# (Pdb) self.tk.interp
# <cdata 'Tcl_Interp *' 0x3061b50>
_imagingtk.tkinit(int(ffi.cast("uintptr_t", tk.interp)), 1)
else:
_imagingtk.tkinit(tk.interpaddr(), 1)
except AttributeError:
_imagingtk.tkinit(id(tk), 0)
tk.call("PyImagingPhoto", self.__photo, block.id)
except (ImportError, AttributeError, tkinter.TclError):
raise # configuration problem; cannot attach to Tkinter
|
https://github.com/python-pillow/Pillow/issues/3231
|
Traceback (most recent call last):
File "test.py", line 1, in <module>
import PIL
File "/usr/lib/python3.6/site-packages/PIL/__init__.py", line 27, in <module>
__doc__ = __doc__.format(__version__) # include version in docstring
AttributeError: 'NoneType' object has no attribute 'format'
|
AttributeError
|
def APP(self, marker):
#
# Application marker. Store these in the APP dictionary.
# Also look for well-known application markers.
n = i16(self.fp.read(2)) - 2
s = ImageFile._safe_read(self.fp, n)
app = "APP%d" % (marker & 15)
self.app[app] = s # compatibility
self.applist.append((app, s))
if marker == 0xFFE0 and s[:4] == b"JFIF":
# extract JFIF information
self.info["jfif"] = version = i16(s, 5) # version
self.info["jfif_version"] = divmod(version, 256)
# extract JFIF properties
try:
jfif_unit = i8(s[7])
jfif_density = i16(s, 8), i16(s, 10)
except:
pass
else:
if jfif_unit == 1:
self.info["dpi"] = jfif_density
self.info["jfif_unit"] = jfif_unit
self.info["jfif_density"] = jfif_density
elif marker == 0xFFE1 and s[:5] == b"Exif\0":
# extract Exif information (incomplete)
self.info["exif"] = s # FIXME: value will change
elif marker == 0xFFE2 and s[:5] == b"FPXR\0":
# extract FlashPix information (incomplete)
self.info["flashpix"] = s # FIXME: value will change
elif marker == 0xFFE2 and s[:12] == b"ICC_PROFILE\0":
# Since an ICC profile can be larger than the maximum size of
# a JPEG marker (64K), we need provisions to split it into
# multiple markers. The format defined by the ICC specifies
# one or more APP2 markers containing the following data:
# Identifying string ASCII "ICC_PROFILE\0" (12 bytes)
# Marker sequence number 1, 2, etc (1 byte)
# Number of markers Total of APP2's used (1 byte)
# Profile data (remainder of APP2 data)
# Decoders should use the marker sequence numbers to
# reassemble the profile, rather than assuming that the APP2
# markers appear in the correct sequence.
self.icclist.append(s)
elif marker == 0xFFEE and s[:5] == b"Adobe":
self.info["adobe"] = i16(s, 5)
# extract Adobe custom properties
try:
adobe_transform = i8(s[1])
except:
pass
else:
self.info["adobe_transform"] = adobe_transform
elif marker == 0xFFE2 and s[:4] == b"MPF\0":
# extract MPO information
self.info["mp"] = s[4:]
# offset is current location minus buffer size
# plus constant header size
self.info["mpoffset"] = self.fp.tell() - n + 4
# If DPI isn't in JPEG header, fetch from EXIF
if "dpi" not in self.info and "exif" in self.info:
exif = self._getexif()
try:
resolution_unit = exif[0x0128]
x_resolution = exif[0x011A]
try:
dpi = x_resolution[0] / x_resolution[1]
except TypeError:
dpi = x_resolution
if resolution_unit == 3: # cm
# 1 dpcm = 2.54 dpi
dpi *= 2.54
self.info["dpi"] = dpi, dpi
except KeyError:
self.info["dpi"] = 72, 72
|
def APP(self, marker):
#
# Application marker. Store these in the APP dictionary.
# Also look for well-known application markers.
n = i16(self.fp.read(2)) - 2
s = ImageFile._safe_read(self.fp, n)
app = "APP%d" % (marker & 15)
self.app[app] = s # compatibility
self.applist.append((app, s))
if marker == 0xFFE0 and s[:4] == b"JFIF":
# extract JFIF information
self.info["jfif"] = version = i16(s, 5) # version
self.info["jfif_version"] = divmod(version, 256)
# extract JFIF properties
try:
jfif_unit = i8(s[7])
jfif_density = i16(s, 8), i16(s, 10)
except:
pass
else:
if jfif_unit == 1:
self.info["dpi"] = jfif_density
self.info["jfif_unit"] = jfif_unit
self.info["jfif_density"] = jfif_density
elif marker == 0xFFE1 and s[:5] == b"Exif\0":
# extract Exif information (incomplete)
self.info["exif"] = s # FIXME: value will change
elif marker == 0xFFE2 and s[:5] == b"FPXR\0":
# extract FlashPix information (incomplete)
self.info["flashpix"] = s # FIXME: value will change
elif marker == 0xFFE2 and s[:12] == b"ICC_PROFILE\0":
# Since an ICC profile can be larger than the maximum size of
# a JPEG marker (64K), we need provisions to split it into
# multiple markers. The format defined by the ICC specifies
# one or more APP2 markers containing the following data:
# Identifying string ASCII "ICC_PROFILE\0" (12 bytes)
# Marker sequence number 1, 2, etc (1 byte)
# Number of markers Total of APP2's used (1 byte)
# Profile data (remainder of APP2 data)
# Decoders should use the marker sequence numbers to
# reassemble the profile, rather than assuming that the APP2
# markers appear in the correct sequence.
self.icclist.append(s)
elif marker == 0xFFEE and s[:5] == b"Adobe":
self.info["adobe"] = i16(s, 5)
# extract Adobe custom properties
try:
adobe_transform = i8(s[1])
except:
pass
else:
self.info["adobe_transform"] = adobe_transform
elif marker == 0xFFE2 and s[:4] == b"MPF\0":
# extract MPO information
self.info["mp"] = s[4:]
# offset is current location minus buffer size
# plus constant header size
self.info["mpoffset"] = self.fp.tell() - n + 4
# If DPI isn't in JPEG header, fetch from EXIF
if "dpi" not in self.info and "exif" in self.info:
exif = self._getexif()
try:
resolution_unit = exif[0x0128]
x_resolution = exif[0x011A]
dpi = x_resolution[0] / x_resolution[1]
if resolution_unit == 3: # cm
# 1 dpcm = 2.54 dpi
dpi *= 2.54
self.info["dpi"] = dpi, dpi
except KeyError:
self.info["dpi"] = 72, 72
|
https://github.com/python-pillow/Pillow/issues/2481
|
In [1]: import PIL
In [2]: PIL.PILLOW_VERSION
Out[2]: '4.1.0'
In [3]: from PIL import Image
In [4]: Image.open('tests/sample/pictures/dir2/exo20101028-b-full.jpg')
---------------------------------------------------------------------------
OSError Traceback (most recent call last)
<ipython-input-4-8014aa965ecb> in <module>()
----> 1 Image.open('tests/sample/pictures/dir2/exo20101028-b-full.jpg')
/home/simon/miniconda3/envs/sigal/lib/python3.6/site-packages/PIL/Image.py in open(fp, mode)
2450 fp.close()
2451 raise IOError("cannot identify image file %r"
-> 2452 % (filename if filename else fp))
2453
2454 #
OSError: cannot identify image file 'tests/sample/pictures/dir2/exo20101028-b-full.jpg'
|
OSError
|
def load(self):
"Load image data based on tile list"
pixel = Image.Image.load(self)
if self.tile is None:
raise IOError("cannot load this image")
if not self.tile:
return pixel
self.map = None
use_mmap = self.filename and len(self.tile) == 1
# As of pypy 2.1.0, memory mapping was failing here.
use_mmap = use_mmap and not hasattr(sys, "pypy_version_info")
readonly = 0
# look for read/seek overrides
try:
read = self.load_read
# don't use mmap if there are custom read/seek functions
use_mmap = False
except AttributeError:
read = self.fp.read
try:
seek = self.load_seek
use_mmap = False
except AttributeError:
seek = self.fp.seek
if use_mmap:
# try memory mapping
decoder_name, extents, offset, args = self.tile[0]
if (
decoder_name == "raw"
and len(args) >= 3
and args[0] == self.mode
and args[0] in Image._MAPMODES
):
try:
if hasattr(Image.core, "map"):
# use built-in mapper WIN32 only
self.map = Image.core.map(self.filename)
self.map.seek(offset)
self.im = self.map.readimage(self.mode, self.size, args[1], args[2])
else:
# use mmap, if possible
import mmap
fp = open(self.filename, "r")
size = os.path.getsize(self.filename)
self.map = mmap.mmap(fp.fileno(), size, access=mmap.ACCESS_READ)
self.im = Image.core.map_buffer(
self.map, self.size, decoder_name, extents, offset, args
)
readonly = 1
# After trashing self.im, we might need to reload the palette data.
if self.palette:
self.palette.dirty = 1
except (AttributeError, EnvironmentError, ImportError):
self.map = None
self.load_prepare()
if not self.map:
# sort tiles in file order
self.tile.sort(key=_tilesort)
try:
# FIXME: This is a hack to handle TIFF's JpegTables tag.
prefix = self.tile_prefix
except AttributeError:
prefix = b""
for decoder_name, extents, offset, args in self.tile:
decoder = Image._getdecoder(
self.mode, decoder_name, args, self.decoderconfig
)
seek(offset)
try:
decoder.setimage(self.im, extents)
except ValueError:
continue
if decoder.pulls_fd:
decoder.setfd(self.fp)
status, err_code = decoder.decode(b"")
else:
b = prefix
while True:
try:
s = read(self.decodermaxblock)
except (IndexError, struct.error): # truncated png/gif
if LOAD_TRUNCATED_IMAGES:
break
else:
raise IOError("image file is truncated")
if not s and not decoder.handles_eof: # truncated jpeg
self.tile = []
# JpegDecode needs to clean things up here either way
# If we don't destroy the decompressor,
# we have a memory leak.
decoder.cleanup()
if LOAD_TRUNCATED_IMAGES:
break
else:
raise IOError(
"image file is truncated "
"(%d bytes not processed)" % len(b)
)
b = b + s
n, err_code = decoder.decode(b)
if n < 0:
break
b = b[n:]
# Need to cleanup here to prevent leaks in PyPy
decoder.cleanup()
self.tile = []
self.readonly = readonly
self.fp = None # might be shared
if not self.map and not LOAD_TRUNCATED_IMAGES and err_code < 0:
# still raised if decoder fails to return anything
raise_ioerror(err_code)
# post processing
if hasattr(self, "tile_post_rotate"):
# FIXME: This is a hack to handle rotated PCD's
self.im = self.im.rotate(self.tile_post_rotate)
self.size = self.im.size
self.load_end()
return Image.Image.load(self)
|
def load(self):
"Load image data based on tile list"
pixel = Image.Image.load(self)
if self.tile is None:
raise IOError("cannot load this image")
if not self.tile:
return pixel
self.map = None
use_mmap = self.filename and len(self.tile) == 1
# As of pypy 2.1.0, memory mapping was failing here.
use_mmap = use_mmap and not hasattr(sys, "pypy_version_info")
readonly = 0
# look for read/seek overrides
try:
read = self.load_read
# don't use mmap if there are custom read/seek functions
use_mmap = False
except AttributeError:
read = self.fp.read
try:
seek = self.load_seek
use_mmap = False
except AttributeError:
seek = self.fp.seek
if use_mmap:
# try memory mapping
d, e, o, a = self.tile[0]
if d == "raw" and a[0] == self.mode and a[0] in Image._MAPMODES:
try:
if hasattr(Image.core, "map"):
# use built-in mapper WIN32 only
self.map = Image.core.map(self.filename)
self.map.seek(o)
self.im = self.map.readimage(self.mode, self.size, a[1], a[2])
else:
# use mmap, if possible
import mmap
fp = open(self.filename, "r")
size = os.path.getsize(self.filename)
self.map = mmap.mmap(fp.fileno(), size, access=mmap.ACCESS_READ)
self.im = Image.core.map_buffer(self.map, self.size, d, e, o, a)
readonly = 1
# After trashing self.im, we might need to reload the palette data.
if self.palette:
self.palette.dirty = 1
except (AttributeError, EnvironmentError, ImportError):
self.map = None
self.load_prepare()
if not self.map:
# sort tiles in file order
self.tile.sort(key=_tilesort)
try:
# FIXME: This is a hack to handle TIFF's JpegTables tag.
prefix = self.tile_prefix
except AttributeError:
prefix = b""
for decoder_name, extents, offset, args in self.tile:
decoder = Image._getdecoder(
self.mode, decoder_name, args, self.decoderconfig
)
seek(offset)
try:
decoder.setimage(self.im, extents)
except ValueError:
continue
if decoder.pulls_fd:
decoder.setfd(self.fp)
status, err_code = decoder.decode(b"")
else:
b = prefix
while True:
try:
s = read(self.decodermaxblock)
except (IndexError, struct.error): # truncated png/gif
if LOAD_TRUNCATED_IMAGES:
break
else:
raise IOError("image file is truncated")
if not s and not decoder.handles_eof: # truncated jpeg
self.tile = []
# JpegDecode needs to clean things up here either way
# If we don't destroy the decompressor,
# we have a memory leak.
decoder.cleanup()
if LOAD_TRUNCATED_IMAGES:
break
else:
raise IOError(
"image file is truncated "
"(%d bytes not processed)" % len(b)
)
b = b + s
n, err_code = decoder.decode(b)
if n < 0:
break
b = b[n:]
# Need to cleanup here to prevent leaks in PyPy
decoder.cleanup()
self.tile = []
self.readonly = readonly
self.fp = None # might be shared
if not self.map and not LOAD_TRUNCATED_IMAGES and err_code < 0:
# still raised if decoder fails to return anything
raise_ioerror(err_code)
# post processing
if hasattr(self, "tile_post_rotate"):
# FIXME: This is a hack to handle rotated PCD's
self.im = self.im.rotate(self.tile_post_rotate)
self.size = self.im.size
self.load_end()
return Image.Image.load(self)
|
https://github.com/python-pillow/Pillow/issues/2231
|
from PIL import Image
im = Image.open('sunraster.im1')
print im.format,im.size, im.mode
SUN (640, 400) 1
im.load()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/opt/rh/python27/root/usr/lib/python2.7/site-packages/PIL/ImageFile.py", line 240, in load
raise_ioerror(err_code)
File "/opt/rh/python27/root/usr/lib/python2.7/site-packages/PIL/ImageFile.py", line 59, in raise_ioerror
raise IOError(message + " when reading image file")
IOError: buffer overrun when reading image file
|
IOError
|
def _open(self):
# The Sun Raster file header is 32 bytes in length and has the following format:
# typedef struct _SunRaster
# {
# DWORD MagicNumber; /* Magic (identification) number */
# DWORD Width; /* Width of image in pixels */
# DWORD Height; /* Height of image in pixels */
# DWORD Depth; /* Number of bits per pixel */
# DWORD Length; /* Size of image data in bytes */
# DWORD Type; /* Type of raster file */
# DWORD ColorMapType; /* Type of color map */
# DWORD ColorMapLength; /* Size of the color map in bytes */
# } SUNRASTER;
# HEAD
s = self.fp.read(32)
if i32(s) != 0x59A66A95:
raise SyntaxError("not an SUN raster file")
offset = 32
self.size = i32(s[4:8]), i32(s[8:12])
depth = i32(s[12:16])
data_length = i32(s[16:20]) # unreliable, ignore.
file_type = i32(s[20:24])
palette_type = i32(s[24:28]) # 0: None, 1: RGB, 2: Raw/arbitrary
palette_length = i32(s[28:32])
if depth == 1:
self.mode, rawmode = "1", "1;I"
elif depth == 4:
self.mode, rawmode = "L", "L;4"
elif depth == 8:
self.mode = rawmode = "L"
elif depth == 24:
if file_type == 3:
self.mode, rawmode = "RGB", "RGB"
else:
self.mode, rawmode = "RGB", "BGR"
elif depth == 32:
if file_type == 3:
self.mode, rawmode = "RGB", "RGBX"
else:
self.mode, rawmode = "RGB", "BGRX"
else:
raise SyntaxError("Unsupported Mode/Bit Depth")
if palette_length:
if palette_length > 1024:
raise SyntaxError("Unsupported Color Palette Length")
if palette_type != 1:
raise SyntaxError("Unsupported Palette Type")
offset = offset + palette_length
self.palette = ImagePalette.raw("RGB;L", self.fp.read(palette_length))
if self.mode == "L":
self.mode = "P"
rawmode = rawmode.replace("L", "P")
# 16 bit boundaries on stride
stride = ((self.size[0] * depth + 15) // 16) * 2
# file type: Type is the version (or flavor) of the bitmap
# file. The following values are typically found in the Type
# field:
# 0000h Old
# 0001h Standard
# 0002h Byte-encoded
# 0003h RGB format
# 0004h TIFF format
# 0005h IFF format
# FFFFh Experimental
# Old and standard are the same, except for the length tag.
# byte-encoded is run-length-encoded
# RGB looks similar to standard, but RGB byte order
# TIFF and IFF mean that they were converted from T/IFF
# Experimental means that it's something else.
# (http://www.fileformat.info/format/sunraster/egff.htm)
if file_type in (0, 1, 3, 4, 5):
self.tile = [("raw", (0, 0) + self.size, offset, (rawmode, stride))]
elif file_type == 2:
self.tile = [("sun_rle", (0, 0) + self.size, offset, rawmode)]
else:
raise SyntaxError("Unsupported Sun Raster file type")
|
def _open(self):
# HEAD
s = self.fp.read(32)
if i32(s) != 0x59A66A95:
raise SyntaxError("not an SUN raster file")
offset = 32
self.size = i32(s[4:8]), i32(s[8:12])
depth = i32(s[12:16])
if depth == 1:
self.mode, rawmode = "1", "1;I"
elif depth == 8:
self.mode = rawmode = "L"
elif depth == 24:
self.mode, rawmode = "RGB", "BGR"
else:
raise SyntaxError("unsupported mode")
compression = i32(s[20:24])
if i32(s[24:28]) != 0:
length = i32(s[28:32])
offset = offset + length
self.palette = ImagePalette.raw("RGB;L", self.fp.read(length))
if self.mode == "L":
self.mode = rawmode = "P"
stride = (((self.size[0] * depth + 7) // 8) + 3) & (~3)
if compression == 1:
self.tile = [("raw", (0, 0) + self.size, offset, (rawmode, stride))]
elif compression == 2:
self.tile = [("sun_rle", (0, 0) + self.size, offset, rawmode)]
|
https://github.com/python-pillow/Pillow/issues/2231
|
from PIL import Image
im = Image.open('sunraster.im1')
print im.format,im.size, im.mode
SUN (640, 400) 1
im.load()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/opt/rh/python27/root/usr/lib/python2.7/site-packages/PIL/ImageFile.py", line 240, in load
raise_ioerror(err_code)
File "/opt/rh/python27/root/usr/lib/python2.7/site-packages/PIL/ImageFile.py", line 59, in raise_ioerror
raise IOError(message + " when reading image file")
IOError: buffer overrun when reading image file
|
IOError
|
def _bitmap(self, header=0, offset=0):
"""Read relevant info about the BMP"""
read, seek = self.fp.read, self.fp.seek
if header:
seek(header)
file_info = dict()
file_info["header_size"] = i32(
read(4)
) # read bmp header size @offset 14 (this is part of the header size)
file_info["direction"] = -1
# --------------------- If requested, read header at a specific position
header_data = ImageFile._safe_read(
self.fp, file_info["header_size"] - 4
) # read the rest of the bmp header, without its size
# --------------------------------------------------- IBM OS/2 Bitmap v1
# ------ This format has different offsets because of width/height types
if file_info["header_size"] == 12:
file_info["width"] = i16(header_data[0:2])
file_info["height"] = i16(header_data[2:4])
file_info["planes"] = i16(header_data[4:6])
file_info["bits"] = i16(header_data[6:8])
file_info["compression"] = self.RAW
file_info["palette_padding"] = 3
# ---------------------------------------------- Windows Bitmap v2 to v5
elif file_info["header_size"] in (40, 64, 108, 124): # v3, OS/2 v2, v4, v5
if file_info["header_size"] >= 40: # v3 and OS/2
file_info["y_flip"] = i8(header_data[7]) == 0xFF
file_info["direction"] = 1 if file_info["y_flip"] else -1
file_info["width"] = i32(header_data[0:4])
file_info["height"] = (
i32(header_data[4:8])
if not file_info["y_flip"]
else 2**32 - i32(header_data[4:8])
)
file_info["planes"] = i16(header_data[8:10])
file_info["bits"] = i16(header_data[10:12])
file_info["compression"] = i32(header_data[12:16])
file_info["data_size"] = i32(header_data[16:20]) # byte size of pixel data
file_info["pixels_per_meter"] = (
i32(header_data[20:24]),
i32(header_data[24:28]),
)
file_info["colors"] = i32(header_data[28:32])
file_info["palette_padding"] = 4
self.info["dpi"] = tuple(
map(
lambda x: int(math.ceil(x / 39.3701)), file_info["pixels_per_meter"]
)
)
if file_info["compression"] == self.BITFIELDS:
if len(header_data) >= 52:
for idx, mask in enumerate(
["r_mask", "g_mask", "b_mask", "a_mask"]
):
file_info[mask] = i32(header_data[36 + idx * 4 : 40 + idx * 4])
else:
# 40 byte headers only have the three components in the bitfields masks,
# ref: https://msdn.microsoft.com/en-us/library/windows/desktop/dd183376(v=vs.85).aspx
# See also https://github.com/python-pillow/Pillow/issues/1293
file_info["a_mask"] = 0xFF000000
for mask in ["r_mask", "g_mask", "b_mask"]:
file_info[mask] = i32(read(4))
file_info["rgb_mask"] = (
file_info["r_mask"],
file_info["g_mask"],
file_info["b_mask"],
)
file_info["rgba_mask"] = (
file_info["r_mask"],
file_info["g_mask"],
file_info["b_mask"],
file_info["a_mask"],
)
else:
raise IOError("Unsupported BMP header type (%d)" % file_info["header_size"])
# ------------------ Special case : header is reported 40, which
# ---------------------- is shorter than real size for bpp >= 16
self.size = file_info["width"], file_info["height"]
# -------- If color count was not found in the header, compute from bits
file_info["colors"] = (
file_info["colors"] if file_info.get("colors", 0) else (1 << file_info["bits"])
)
# -------------------------------- Check abnormal values for DOS attacks
if file_info["width"] * file_info["height"] > 2**31:
raise IOError("Unsupported BMP Size: (%dx%d)" % self.size)
# ----------------------- Check bit depth for unusual unsupported values
self.mode, raw_mode = BIT2MODE.get(file_info["bits"], (None, None))
if self.mode is None:
raise IOError("Unsupported BMP pixel depth (%d)" % file_info["bits"])
# ----------------- Process BMP with Bitfields compression (not palette)
if file_info["compression"] == self.BITFIELDS:
SUPPORTED = {
32: [
(0xFF0000, 0xFF00, 0xFF, 0x0),
(0xFF0000, 0xFF00, 0xFF, 0xFF000000),
(0x0, 0x0, 0x0, 0x0),
],
24: [(0xFF0000, 0xFF00, 0xFF)],
16: [(0xF800, 0x7E0, 0x1F), (0x7C00, 0x3E0, 0x1F)],
}
MASK_MODES = {
(32, (0xFF0000, 0xFF00, 0xFF, 0x0)): "BGRX",
(32, (0xFF0000, 0xFF00, 0xFF, 0xFF000000)): "BGRA",
(32, (0x0, 0x0, 0x0, 0x0)): "BGRA",
(24, (0xFF0000, 0xFF00, 0xFF)): "BGR",
(16, (0xF800, 0x7E0, 0x1F)): "BGR;16",
(16, (0x7C00, 0x3E0, 0x1F)): "BGR;15",
}
if file_info["bits"] in SUPPORTED:
if (
file_info["bits"] == 32
and file_info["rgba_mask"] in SUPPORTED[file_info["bits"]]
):
raw_mode = MASK_MODES[(file_info["bits"], file_info["rgba_mask"])]
self.mode = "RGBA" if raw_mode in ("BGRA",) else self.mode
elif (
file_info["bits"] in (24, 16)
and file_info["rgb_mask"] in SUPPORTED[file_info["bits"]]
):
raw_mode = MASK_MODES[(file_info["bits"], file_info["rgb_mask"])]
else:
raise IOError("Unsupported BMP bitfields layout")
else:
raise IOError("Unsupported BMP bitfields layout")
elif file_info["compression"] == self.RAW:
if file_info["bits"] == 32 and header == 22: # 32-bit .cur offset
raw_mode, self.mode = "BGRA", "RGBA"
else:
raise IOError("Unsupported BMP compression (%d)" % file_info["compression"])
# ---------------- Once the header is processed, process the palette/LUT
if self.mode == "P": # Paletted for 1, 4 and 8 bit images
# ----------------------------------------------------- 1-bit images
if not (0 < file_info["colors"] <= 65536):
raise IOError("Unsupported BMP Palette size (%d)" % file_info["colors"])
else:
padding = file_info["palette_padding"]
palette = read(padding * file_info["colors"])
greyscale = True
indices = (
(0, 255)
if file_info["colors"] == 2
else list(range(file_info["colors"]))
)
# ------------------ Check if greyscale and ignore palette if so
for ind, val in enumerate(indices):
rgb = palette[ind * padding : ind * padding + 3]
if rgb != o8(val) * 3:
greyscale = False
# -------- If all colors are grey, white or black, ditch palette
if greyscale:
self.mode = "1" if file_info["colors"] == 2 else "L"
raw_mode = self.mode
else:
self.mode = "P"
self.palette = ImagePalette.raw(
"BGRX" if padding == 4 else "BGR", palette
)
# ----------------------------- Finally set the tile data for the plugin
self.info["compression"] = file_info["compression"]
self.tile = [
(
"raw",
(0, 0, file_info["width"], file_info["height"]),
offset or self.fp.tell(),
(
raw_mode,
((file_info["width"] * file_info["bits"] + 31) >> 3) & (~3),
file_info["direction"],
),
)
]
|
def _bitmap(self, header=0, offset=0):
"""Read relevant info about the BMP"""
read, seek = self.fp.read, self.fp.seek
if header:
seek(header)
file_info = dict()
file_info["header_size"] = i32(
read(4)
) # read bmp header size @offset 14 (this is part of the header size)
file_info["direction"] = -1
# --------------------- If requested, read header at a specific position
header_data = ImageFile._safe_read(
self.fp, file_info["header_size"] - 4
) # read the rest of the bmp header, without its size
# --------------------------------------------------- IBM OS/2 Bitmap v1
# ------ This format has different offsets because of width/height types
if file_info["header_size"] == 12:
file_info["width"] = i16(header_data[0:2])
file_info["height"] = i16(header_data[2:4])
file_info["planes"] = i16(header_data[4:6])
file_info["bits"] = i16(header_data[6:8])
file_info["compression"] = self.RAW
file_info["palette_padding"] = 3
# ---------------------------------------------- Windows Bitmap v2 to v5
elif file_info["header_size"] in (40, 64, 108, 124): # v3, OS/2 v2, v4, v5
if file_info["header_size"] >= 40: # v3 and OS/2
file_info["y_flip"] = i8(header_data[7]) == 0xFF
file_info["direction"] = 1 if file_info["y_flip"] else -1
file_info["width"] = i32(header_data[0:4])
file_info["height"] = (
i32(header_data[4:8])
if not file_info["y_flip"]
else 2**32 - i32(header_data[4:8])
)
file_info["planes"] = i16(header_data[8:10])
file_info["bits"] = i16(header_data[10:12])
file_info["compression"] = i32(header_data[12:16])
file_info["data_size"] = i32(header_data[16:20]) # byte size of pixel data
file_info["pixels_per_meter"] = (
i32(header_data[20:24]),
i32(header_data[24:28]),
)
file_info["colors"] = i32(header_data[28:32])
file_info["palette_padding"] = 4
self.info["dpi"] = tuple(
map(
lambda x: int(math.ceil(x / 39.3701)), file_info["pixels_per_meter"]
)
)
if file_info["compression"] == self.BITFIELDS:
if len(header_data) >= 52:
for idx, mask in enumerate(
["r_mask", "g_mask", "b_mask", "a_mask"]
):
file_info[mask] = i32(header_data[36 + idx * 4 : 40 + idx * 4])
else:
for mask in ["r_mask", "g_mask", "b_mask", "a_mask"]:
file_info[mask] = i32(read(4))
file_info["rgb_mask"] = (
file_info["r_mask"],
file_info["g_mask"],
file_info["b_mask"],
)
file_info["rgba_mask"] = (
file_info["r_mask"],
file_info["g_mask"],
file_info["b_mask"],
file_info["a_mask"],
)
else:
raise IOError("Unsupported BMP header type (%d)" % file_info["header_size"])
# ------------------ Special case : header is reported 40, which
# ---------------------- is shorter than real size for bpp >= 16
self.size = file_info["width"], file_info["height"]
# -------- If color count was not found in the header, compute from bits
file_info["colors"] = (
file_info["colors"] if file_info.get("colors", 0) else (1 << file_info["bits"])
)
# -------------------------------- Check abnormal values for DOS attacks
if file_info["width"] * file_info["height"] > 2**31:
raise IOError("Unsupported BMP Size: (%dx%d)" % self.size)
# ----------------------- Check bit depth for unusual unsupported values
self.mode, raw_mode = BIT2MODE.get(file_info["bits"], (None, None))
if self.mode is None:
raise IOError("Unsupported BMP pixel depth (%d)" % file_info["bits"])
# ----------------- Process BMP with Bitfields compression (not palette)
if file_info["compression"] == self.BITFIELDS:
SUPPORTED = {
32: [
(0xFF0000, 0xFF00, 0xFF, 0x0),
(0xFF0000, 0xFF00, 0xFF, 0xFF000000),
(0x0, 0x0, 0x0, 0x0),
],
24: [(0xFF0000, 0xFF00, 0xFF)],
16: [(0xF800, 0x7E0, 0x1F), (0x7C00, 0x3E0, 0x1F)],
}
MASK_MODES = {
(32, (0xFF0000, 0xFF00, 0xFF, 0x0)): "BGRX",
(32, (0xFF0000, 0xFF00, 0xFF, 0xFF000000)): "BGRA",
(32, (0x0, 0x0, 0x0, 0x0)): "BGRA",
(24, (0xFF0000, 0xFF00, 0xFF)): "BGR",
(16, (0xF800, 0x7E0, 0x1F)): "BGR;16",
(16, (0x7C00, 0x3E0, 0x1F)): "BGR;15",
}
if file_info["bits"] in SUPPORTED:
if (
file_info["bits"] == 32
and file_info["rgba_mask"] in SUPPORTED[file_info["bits"]]
):
raw_mode = MASK_MODES[(file_info["bits"], file_info["rgba_mask"])]
self.mode = "RGBA" if raw_mode in ("BGRA",) else self.mode
elif (
file_info["bits"] in (24, 16)
and file_info["rgb_mask"] in SUPPORTED[file_info["bits"]]
):
raw_mode = MASK_MODES[(file_info["bits"], file_info["rgb_mask"])]
else:
raise IOError("Unsupported BMP bitfields layout")
else:
raise IOError("Unsupported BMP bitfields layout")
elif file_info["compression"] == self.RAW:
if file_info["bits"] == 32 and header == 22: # 32-bit .cur offset
raw_mode, self.mode = "BGRA", "RGBA"
else:
raise IOError("Unsupported BMP compression (%d)" % file_info["compression"])
# ---------------- Once the header is processed, process the palette/LUT
if self.mode == "P": # Paletted for 1, 4 and 8 bit images
# ----------------------------------------------------- 1-bit images
if not (0 < file_info["colors"] <= 65536):
raise IOError("Unsupported BMP Palette size (%d)" % file_info["colors"])
else:
padding = file_info["palette_padding"]
palette = read(padding * file_info["colors"])
greyscale = True
indices = (
(0, 255)
if file_info["colors"] == 2
else list(range(file_info["colors"]))
)
# ------------------ Check if greyscale and ignore palette if so
for ind, val in enumerate(indices):
rgb = palette[ind * padding : ind * padding + 3]
if rgb != o8(val) * 3:
greyscale = False
# -------- If all colors are grey, white or black, ditch palette
if greyscale:
self.mode = "1" if file_info["colors"] == 2 else "L"
raw_mode = self.mode
else:
self.mode = "P"
self.palette = ImagePalette.raw(
"BGRX" if padding == 4 else "BGR", palette
)
# ----------------------------- Finally set the tile data for the plugin
self.info["compression"] = file_info["compression"]
self.tile = [
(
"raw",
(0, 0, file_info["width"], file_info["height"]),
offset or self.fp.tell(),
(
raw_mode,
((file_info["width"] * file_info["bits"] + 31) >> 3) & (~3),
file_info["direction"],
),
)
]
|
https://github.com/python-pillow/Pillow/issues/1293
|
ImageGrab.grabclipboard()
---------------------------------------------------------------------------
IOError Traceback (most recent call last)
<ipython-input-2-c8274e888e6c> in <module>()
1 from PIL import ImageGrab
2
----> 3 ImageGrab.grabclipboard()
C:\Anaconda\lib\site-packages\PIL\ImageGrab.pyc in grabclipboard()
49 from PIL import BmpImagePlugin
50 import io
---> 51 return BmpImagePlugin.DibImageFile(io.BytesIO(data))
52 return data
C:\Anaconda\lib\site-packages\PIL\ImageFile.pyc in __init__(self, fp, filename)
95
96 try:
---> 97 self._open()
98 except IndexError as v: # end of data
99 if Image.DEBUG > 1:
C:\Anaconda\lib\site-packages\PIL\BmpImagePlugin.pyc in _open(self)
204
205 def _open(self):
--> 206 self._bitmap()
207
208 #
C:\Anaconda\lib\site-packages\PIL\BmpImagePlugin.pyc in _bitmap(self, header, offset)
145 raw_mode = MASK_MODES[(file_info['bits'], file_info['rgb_mask'])]
146 else:
--> 147 raise IOError("Unsupported BMP bitfields layout")
148 else:
149 raise IOError("Unsupported BMP bitfields layout")
IOError: Unsupported BMP bitfields layout
|
IOError
|
def _setitem(self, tag, value, legacy_api):
basetypes = (Number, bytes, str)
if bytes is str:
basetypes += (unicode,)
info = TiffTags.lookup(tag)
values = [value] if isinstance(value, basetypes) else value
if tag not in self.tagtype:
if info.type:
self.tagtype[tag] = info.type
else:
self.tagtype[tag] = 7
if all(isinstance(v, IFDRational) for v in values):
self.tagtype[tag] = 5
elif all(isinstance(v, int) for v in values):
if all(v < 2**16 for v in values):
self.tagtype[tag] = 3
else:
self.tagtype[tag] = 4
elif all(isinstance(v, float) for v in values):
self.tagtype[tag] = 12
else:
if bytes is str:
# Never treat data as binary by default on Python 2.
self.tagtype[tag] = 2
else:
if all(isinstance(v, str) for v in values):
self.tagtype[tag] = 2
if self.tagtype[tag] == 7 and bytes is not str:
values = [value.encode("ascii", "replace") if isinstance(value, str) else value]
values = tuple(info.cvt_enum(value) for value in values)
dest = self._tags_v1 if legacy_api else self._tags_v2
if info.length == 1:
if legacy_api and self.tagtype[tag] in [5, 10]:
values = (values,)
(dest[tag],) = values
else:
dest[tag] = values
|
def _setitem(self, tag, value, legacy_api):
basetypes = (Number, bytes, str)
if bytes is str:
basetypes += (unicode,)
info = TiffTags.lookup(tag)
values = [value] if isinstance(value, basetypes) else value
if tag not in self.tagtype:
if info.type:
self.tagtype[tag] = info.type
else:
self.tagtype[tag] = 7
if all(isinstance(v, IFDRational) for v in values):
self.tagtype[tag] = 5
elif all(isinstance(v, int) for v in values):
if all(v < 2**16 for v in values):
self.tagtype[tag] = 3
else:
self.tagtype[tag] = 4
elif all(isinstance(v, float) for v in values):
self.tagtype[tag] = 12
else:
if bytes is str:
# Never treat data as binary by default on Python 2.
self.tagtype[tag] = 2
else:
if all(isinstance(v, str) for v in values):
self.tagtype[tag] = 2
if self.tagtype[tag] == 7 and bytes is not str:
values = [
value.encode("ascii", "replace") if isinstance(value, str) else value
for value in values
]
values = tuple(info.cvt_enum(value) for value in values)
dest = self._tags_v1 if legacy_api else self._tags_v2
if info.length == 1:
if legacy_api and self.tagtype[tag] in [5, 10]:
values = (values,)
(dest[tag],) = values
else:
dest[tag] = values
|
https://github.com/python-pillow/Pillow/issues/1462
|
Traceback (most recent call last):
File "pyi_lib_PIL_img_conversion.py", line 17, in <module>
im.save(os.path.join(basedir, "tinysample.png"))
File "c:\Users\Rio\Documents\src\pyinst1328\.env27\lib\site-packages\PIL\Image.py", line 1665, in save
save_handler(self, fp, filename)
File "c:\Users\Rio\Documents\src\pyinst1328\.env27\lib\site-packages\PIL\PngImagePlugin.py", line 757, in _save
data = name + b"\0\0" + zlib.compress(im.info["icc_profile"])
TypeError: must be string or read-only buffer, not tuple
|
TypeError
|
def load_byte(self, data, legacy_api=True):
return data
|
def load_byte(self, data, legacy_api=True):
return data if legacy_api else tuple(map(ord, data) if bytes is str else data)
|
https://github.com/python-pillow/Pillow/issues/1462
|
Traceback (most recent call last):
File "pyi_lib_PIL_img_conversion.py", line 17, in <module>
im.save(os.path.join(basedir, "tinysample.png"))
File "c:\Users\Rio\Documents\src\pyinst1328\.env27\lib\site-packages\PIL\Image.py", line 1665, in save
save_handler(self, fp, filename)
File "c:\Users\Rio\Documents\src\pyinst1328\.env27\lib\site-packages\PIL\PngImagePlugin.py", line 757, in _save
data = name + b"\0\0" + zlib.compress(im.info["icc_profile"])
TypeError: must be string or read-only buffer, not tuple
|
TypeError
|
def get_sampling(im):
# There's no subsampling when image have only 1 layer
# (grayscale images) or when they are CMYK (4 layers),
# so set subsampling to default value.
#
# NOTE: currently Pillow can't encode JPEG to YCCK format.
# If YCCK support is added in the future, subsampling code will have
# to be updated (here and in JpegEncode.c) to deal with 4 layers.
if not hasattr(im, "layers") or im.layers in (1, 4):
return -1
sampling = im.layer[0][1:3] + im.layer[1][1:3] + im.layer[2][1:3]
return samplings.get(sampling, -1)
|
def get_sampling(im):
sampling = im.layer[0][1:3] + im.layer[1][1:3] + im.layer[2][1:3]
return samplings.get(sampling, -1)
|
https://github.com/python-pillow/Pillow/issues/857
|
$ ipython
Python 2.7.6 (default, Apr 3 2014, 19:58:06)
Type "copyright", "credits" or "license" for more information.
IPython 2.1.0 -- An enhanced Interactive Python.
? -> Introduction and overview of IPython's features.
%quickref -> Quick reference.
help -> Python's own help system.
object? -> Details about 'object', use 'object??' for extra details.
In [1]: import PIL
In [2]: PIL.PILLOW_VERSION, PIL.VERSION
Out[2]: ('2.5.1', '1.1.7')
In [3]: from PIL import Image
In [4]: ll /tmp/72.jpg
-rw-r--r-- 1 ywang wheel 43990 Mar 13 12:37 /tmp/72.jpg
In [5]: im = Image.open('/tmp/72.jpg')
In [6]: im.layer
Out[6]: [('\x01', 1, 1, 0)]
In [7]: im
Out[7]: <PIL.JpegImagePlugin.JpegImageFile image mode=L size=600x339 at 0x10AE8F1B8>
In [8]: im.format
Out[8]: 'JPEG'
In [9]: im.save('/tmp/72-keep.jpg', 'JPEG', quality='keep')
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-9-dfd9f8d20942> in <module>()
----> 1 im.save('/tmp/72-keep.jpg', 'JPEG', quality='keep')
/Users/ywang/.envs/dbi/lib/python2.7/site-packages/PIL/Image.pyc in save(self, fp, format, **params)
1661
1662 try:
-> 1663 save_handler(self, fp, filename)
1664 finally:
1665 # do what we can to clean up
/Users/ywang/.envs/dbi/lib/python2.7/site-packages/PIL/JpegImagePlugin.pyc in _save(im, fp, filename)
512 if im.format != "JPEG":
513 raise ValueError("Cannot use 'keep' when original image is not a JPEG")
--> 514 subsampling = get_sampling(im)
515
516 def validate_qtables(qtables):
/Users/ywang/.envs/dbi/lib/python2.7/site-packages/PIL/JpegImagePlugin.pyc in get_sampling(im)
467
468 def get_sampling(im):
--> 469 sampling = im.layer[0][1:3] + im.layer[1][1:3] + im.layer[2][1:3]
470 return samplings.get(sampling, -1)
471
IndexError: list index out of range
In [10]: ll /tmp/72-keep.jpg
-rw-r--r-- 1 ywang wheel 0 Aug 19 12:18 /tmp/72-keep.jpg
In [11]: im.save('/tmp/72-q50.jpg', 'JPEG', quality=50)
In [12]: ll /tmp/72-q50.jpg
-rw-r--r-- 1 ywang wheel 33036 Aug 19 12:22 /tmp/72-q50.jpg
In [13]: im.save('/tmp/72-no-quality-specified.jpg', 'JPEG')
In [14]: ll /tmp/72-no-quality-specified.jpg
-rw-r--r-- 1 ywang wheel 45262 Aug 19 12:26 /tmp/72-no-quality-specified.jpg
In [15]: exit()
$ jhead /tmp/72*.jpg
Not JPEG: /tmp/72-keep.jpg
File name : /tmp/72-no-quality-specified.jpg
File size : 45262 bytes
File date : 2014:08:19 12:26:26
Resolution : 500 x 282
Color/bw : Black and white
JPEG Quality : 75
File name : /tmp/72-q50.jpg
File size : 33036 bytes
File date : 2014:08:19 12:22:28
Resolution : 500 x 282
Color/bw : Black and white
JPEG Quality : 50
File name : /tmp/72.jpg
File size : 43990 bytes
File date : 2013:03:13 12:37:40
Resolution : 600 x 339
Color/bw : Black and white
JPEG Quality : 50
|
IndexError
|
def openid():
oidc_configuration, jwt_key_set = get_oidc_configuration(current_app)
token_endpoint = oidc_configuration["token_endpoint"]
userinfo_endpoint = oidc_configuration["userinfo_endpoint"]
data = {
"grant_type": "authorization_code",
"code": request.json["code"],
"redirect_uri": request.json["redirectUri"],
"client_id": request.json["clientId"],
"client_secret": current_app.config["OAUTH2_CLIENT_SECRET"],
}
r = requests.post(token_endpoint, data)
token = r.json()
if "error" in token:
error_text = token.get("error_description") or token["error"]
raise ApiError(error_text)
try:
if current_app.config["OIDC_VERIFY_TOKEN"]:
jwt_header = jwt.get_unverified_header(token["id_token"])
public_key = jwt_key_set[jwt_header["kid"]]
id_token = jwt.decode(
token["id_token"], key=public_key, algorithms=jwt_header["alg"]
)
else:
id_token = jwt.decode(token["id_token"], verify=False)
except Exception:
current_app.logger.warning("No ID token in OpenID Connect token response.")
id_token = {}
try:
headers = {
"Authorization": "{} {}".format(
token.get("token_type", "Bearer"), token["access_token"]
)
}
r = requests.get(userinfo_endpoint, headers=headers)
userinfo = r.json()
except Exception:
raise ApiError("No access token in OpenID Connect token response.")
subject = userinfo["sub"]
name = userinfo.get("name") or id_token.get("name")
username = userinfo.get("preferred_username") or id_token.get("preferred_username")
nickname = userinfo.get("nickname") or id_token.get("nickname")
email = userinfo.get("email") or id_token.get("email")
email_verified = userinfo.get(
"email_verified", id_token.get("email_verified", bool(email))
)
email_verified = (
True if email_verified == "true" else email_verified
) # Cognito returns string boolean
picture = userinfo.get("picture") or id_token.get("picture")
role_claim = current_app.config["OIDC_ROLE_CLAIM"]
group_claim = current_app.config["OIDC_GROUP_CLAIM"]
custom_claims = {
role_claim: userinfo.get(role_claim) or id_token.get(role_claim, []),
group_claim: userinfo.get(group_claim) or id_token.get(group_claim, []),
}
login = username or nickname or email
if not login:
raise ApiError(
"Must support one of the following OpenID claims: 'preferred_username', 'nickname' or 'email'",
400,
)
if current_app.config["OIDC_LINK_USER_EMAIL"] and email and email_verified:
user = User.find_by_email(email=email)
else:
user = User.find_by_id(id=subject)
if not user:
user = User(
id=subject,
name=name,
login=login,
password="",
email=email,
roles=current_app.config["USER_ROLES"],
text="",
email_verified=email_verified,
)
user.create()
else:
user.update(login=login, email=email, email_verified=email_verified)
roles = custom_claims[role_claim] + user.roles
groups = custom_claims[group_claim]
if user.id != subject:
custom_claims["oid"] = (
user.id
) # if subject differs store the original subject as "oid" claim
if user.status != "active":
raise ApiError("User {} is not active".format(login), 403)
if not_authorized("ALLOWED_OIDC_ROLES", roles) or not_authorized(
"ALLOWED_EMAIL_DOMAINS", groups=[user.domain]
):
raise ApiError("User {} is not authorized".format(login), 403)
user.update_last_login()
scopes = Permission.lookup(login, roles=roles)
customers = get_customers(login, groups=[user.domain] + groups)
auth_audit_trail.send(
current_app._get_current_object(),
event="openid-login",
message="user login via OpenID Connect",
user=login,
customers=customers,
scopes=scopes,
**custom_claims,
resource_id=subject,
type="user",
request=request,
)
token = create_token(
user_id=subject,
name=name,
login=login,
provider=current_app.config["AUTH_PROVIDER"],
customers=customers,
scopes=scopes,
**custom_claims,
email=email,
email_verified=email_verified,
picture=picture,
)
return jsonify(token=token.tokenize)
|
def openid():
oidc_configuration, jwt_key_set = get_oidc_configuration(current_app)
token_endpoint = oidc_configuration["token_endpoint"]
userinfo_endpoint = oidc_configuration["userinfo_endpoint"]
data = {
"grant_type": "authorization_code",
"code": request.json["code"],
"redirect_uri": request.json["redirectUri"],
"client_id": request.json["clientId"],
"client_secret": current_app.config["OAUTH2_CLIENT_SECRET"],
}
r = requests.post(token_endpoint, data)
token = r.json()
if "error" in token:
error_text = token.get("error_description") or token["error"]
raise ApiError(error_text)
try:
if current_app.config["OIDC_VERIFY_TOKEN"]:
jwt_header = jwt.get_unverified_header(token["id_token"])
public_key = jwt_key_set[jwt_header["kid"]]
id_token = jwt.decode(
token["id_token"], key=public_key, algorithms=jwt_header["alg"]
)
else:
id_token = jwt.decode(token["id_token"], verify=False)
except Exception:
current_app.logger.warning("No ID token in OpenID Connect token response.")
id_token = {}
try:
headers = {
"Authorization": "{} {}".format(
token.get("token_type", "Bearer"), token["access_token"]
)
}
r = requests.get(userinfo_endpoint, headers=headers)
userinfo = r.json()
except Exception:
raise ApiError("No access token in OpenID Connect token response.")
subject = userinfo["sub"]
name = userinfo.get("name") or id_token.get("name")
username = userinfo.get("preferred_username") or id_token.get("preferred_username")
nickname = userinfo.get("nickname") or id_token.get("nickname")
email = userinfo.get("email") or id_token.get("email")
email_verified = userinfo.get(
"email_verified", id_token.get("email_verified", bool(email))
)
email_verified = (
True if email_verified == "true" else email_verified
) # Cognito returns string boolean
picture = userinfo.get("picture") or id_token.get("picture")
role_claim = current_app.config["OIDC_ROLE_CLAIM"]
group_claim = current_app.config["OIDC_GROUP_CLAIM"]
custom_claims = {
role_claim: userinfo.get(role_claim) or id_token.get(role_claim, []),
group_claim: userinfo.get(group_claim) or id_token.get(group_claim, []),
}
login = username or nickname or email
if not login:
raise ApiError(
"Must support one of the following OpenID claims: 'preferred_username', 'nickname' or 'email'",
400,
)
user = User.find_by_id(id=subject)
if not user:
user = User(
id=subject,
name=name,
login=login,
password="",
email=email,
roles=current_app.config["USER_ROLES"],
text="",
email_verified=email_verified,
)
user.create()
else:
user.update(login=login, email=email)
roles = custom_claims[role_claim] + user.roles
groups = custom_claims[group_claim]
if user.status != "active":
raise ApiError("User {} is not active".format(login), 403)
if not_authorized("ALLOWED_OIDC_ROLES", roles) or not_authorized(
"ALLOWED_EMAIL_DOMAINS", groups=[user.domain]
):
raise ApiError("User {} is not authorized".format(login), 403)
user.update_last_login()
scopes = Permission.lookup(login, roles=roles)
customers = get_customers(login, groups=[user.domain] + groups)
auth_audit_trail.send(
current_app._get_current_object(),
event="openid-login",
message="user login via OpenID Connect",
user=login,
customers=customers,
scopes=scopes,
**custom_claims,
resource_id=subject,
type="user",
request=request,
)
token = create_token(
user_id=subject,
name=name,
login=login,
provider=current_app.config["AUTH_PROVIDER"],
customers=customers,
scopes=scopes,
**custom_claims,
email=email,
email_verified=email_verified,
picture=picture,
)
return jsonify(token=token.tokenize)
|
https://github.com/alerta/alerta/issues/1336
|
Oct 15 10:45:04 influx alertad[28913]: 2020-10-15 10:45:04,542 alerta.app[28919]: [ERROR] duplicate key value violates unique constraint "users_email_key"
DETAIL: Key (email)=(xxxxxx@xxxxx) already exists.
request_id=7f8eab74-9c03-4d36-a92f-c5c0d82b3c6f ip=80.120.197.114
Traceback (most recent call last):
File "/opt/alerta/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/alerta/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/opt/alerta/lib/python3.7/site-packages/flask_cors/decorator.py", line 128, in wrapped_function
resp = make_response(f(*args, **kwargs))
File "/opt/alerta/lib/python3.7/site-packages/alerta/auth/oidc.py", line 135, in openid
user.create()
File "/opt/alerta/lib/python3.7/site-packages/alerta/models/user.py", line 140, in create
return User.from_db(db.create_user(self))
File "/opt/alerta/lib/python3.7/site-packages/alerta/database/backends/postgres/base.py", line 910, in create_user
return self._insert(insert, vars(user))
File "/opt/alerta/lib/python3.7/site-packages/alerta/database/backends/postgres/base.py", line 1389, in _insert
cursor.execute(query, vars)
File "/opt/alerta/lib/python3.7/site-packages/psycopg2/extras.py", line 327, in execute
return super(NamedTupleCursor, self).execute(query, vars)
psycopg2.errors.UniqueViolation: duplicate key value violates unique constraint "users_email_key"
DETAIL: Key (email)=(xxxxxx@xxxxxxxx) already exists.
Oct 15 10:45:04 influx alertad[28913]: 2020-10-15 10:45:04,541 alerta.app[28919]: [DEBUG] ****************************************
INSERT INTO users (id, name, login, password, email, status, roles, attributes,
create_time, last_login, text, update_time, email_verified)
VALUES ('Cil1aWQ9c2hlZGVuaWdnLG91PVBlb3BsZSxkYz1pbnZlbml1bSxkYz1pbxIEbGRhcA', 'xxxxxxx', 'xxxxxxxxx', '', 'xxxxxxxxxx', 'active', ARRAY['admin']
NULL, '', '2020-10-15T10:45:04.541Z', true)
RETURNING *
**************************************** request_id=7f8eab74-9c03-4d36-a92f-c5c0d82b3c6f ip=xxxxx
Oct 15 10:45:04 influx alertad[28913]: 2020-10-15 10:45:04,540 alerta.app[28919]: [DEBUG] ****************************************
SELECT * FROM users WHERE id='Cil1aWQ9c2hlZGVuaWdnLG91PVBlb3BsZSxkYz1pbnZlbml1bSxkYz1pbxIEbGRhcA'
**************************************** request_id=7f8eab74-9c03-4d36-a92f-c5c0d82b3c6f ip=xxxxx
|
psycopg2.errors.UniqueViolation
|
def __init__(
self,
iss: str,
typ: str,
sub: str,
aud: str,
exp: dt,
nbf: dt,
iat: dt,
jti: str = None,
**kwargs,
) -> None:
self.issuer = iss
self.type = typ
self.subject = sub
self.audience = aud
self.expiration = exp
self.not_before = nbf
self.issued_at = iat
self.jwt_id = jti
self.name = kwargs.get("name")
self.preferred_username = kwargs.get("preferred_username")
self.email = kwargs.get("email")
self.provider = kwargs.get("provider")
self.orgs = kwargs.get("orgs", list())
self.groups = kwargs.get("groups", list())
self.roles = kwargs.get("roles", list())
self.scopes = kwargs.get("scopes", list())
self.email_verified = kwargs.get("email_verified")
self.picture = kwargs.get("picture")
self.customers = kwargs.get("customers")
self.oid = kwargs.get("oid")
|
def __init__(
self,
iss: str,
typ: str,
sub: str,
aud: str,
exp: dt,
nbf: dt,
iat: dt,
jti: str = None,
**kwargs,
) -> None:
self.issuer = iss
self.type = typ
self.subject = sub
self.audience = aud
self.expiration = exp
self.not_before = nbf
self.issued_at = iat
self.jwt_id = jti
self.name = kwargs.get("name")
self.preferred_username = kwargs.get("preferred_username")
self.email = kwargs.get("email")
self.provider = kwargs.get("provider")
self.orgs = kwargs.get("orgs", list())
self.groups = kwargs.get("groups", list())
self.roles = kwargs.get("roles", list())
self.scopes = kwargs.get("scopes", list())
self.email_verified = kwargs.get("email_verified")
self.picture = kwargs.get("picture")
self.customers = kwargs.get("customers")
|
https://github.com/alerta/alerta/issues/1336
|
Oct 15 10:45:04 influx alertad[28913]: 2020-10-15 10:45:04,542 alerta.app[28919]: [ERROR] duplicate key value violates unique constraint "users_email_key"
DETAIL: Key (email)=(xxxxxx@xxxxx) already exists.
request_id=7f8eab74-9c03-4d36-a92f-c5c0d82b3c6f ip=80.120.197.114
Traceback (most recent call last):
File "/opt/alerta/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/alerta/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/opt/alerta/lib/python3.7/site-packages/flask_cors/decorator.py", line 128, in wrapped_function
resp = make_response(f(*args, **kwargs))
File "/opt/alerta/lib/python3.7/site-packages/alerta/auth/oidc.py", line 135, in openid
user.create()
File "/opt/alerta/lib/python3.7/site-packages/alerta/models/user.py", line 140, in create
return User.from_db(db.create_user(self))
File "/opt/alerta/lib/python3.7/site-packages/alerta/database/backends/postgres/base.py", line 910, in create_user
return self._insert(insert, vars(user))
File "/opt/alerta/lib/python3.7/site-packages/alerta/database/backends/postgres/base.py", line 1389, in _insert
cursor.execute(query, vars)
File "/opt/alerta/lib/python3.7/site-packages/psycopg2/extras.py", line 327, in execute
return super(NamedTupleCursor, self).execute(query, vars)
psycopg2.errors.UniqueViolation: duplicate key value violates unique constraint "users_email_key"
DETAIL: Key (email)=(xxxxxx@xxxxxxxx) already exists.
Oct 15 10:45:04 influx alertad[28913]: 2020-10-15 10:45:04,541 alerta.app[28919]: [DEBUG] ****************************************
INSERT INTO users (id, name, login, password, email, status, roles, attributes,
create_time, last_login, text, update_time, email_verified)
VALUES ('Cil1aWQ9c2hlZGVuaWdnLG91PVBlb3BsZSxkYz1pbnZlbml1bSxkYz1pbxIEbGRhcA', 'xxxxxxx', 'xxxxxxxxx', '', 'xxxxxxxxxx', 'active', ARRAY['admin']
NULL, '', '2020-10-15T10:45:04.541Z', true)
RETURNING *
**************************************** request_id=7f8eab74-9c03-4d36-a92f-c5c0d82b3c6f ip=xxxxx
Oct 15 10:45:04 influx alertad[28913]: 2020-10-15 10:45:04,540 alerta.app[28919]: [DEBUG] ****************************************
SELECT * FROM users WHERE id='Cil1aWQ9c2hlZGVuaWdnLG91PVBlb3BsZSxkYz1pbnZlbml1bSxkYz1pbxIEbGRhcA'
**************************************** request_id=7f8eab74-9c03-4d36-a92f-c5c0d82b3c6f ip=xxxxx
|
psycopg2.errors.UniqueViolation
|
def parse(
cls, token: str, key: str = None, verify: bool = True, algorithm: str = "HS256"
) -> "Jwt":
try:
json = jwt.decode(
token,
key=key or current_app.config["SECRET_KEY"],
verify=verify,
algorithms=algorithm,
audience=current_app.config["OAUTH2_CLIENT_ID"]
or current_app.config["SAML2_ENTITY_ID"]
or absolute_url(),
)
except (DecodeError, ExpiredSignature, InvalidAudience):
raise
return Jwt(
iss=json.get("iss", None),
typ=json.get("typ", None),
sub=json.get("sub", None),
aud=json.get("aud", None),
exp=json.get("exp", None),
nbf=json.get("nbf", None),
iat=json.get("iat", None),
jti=json.get("jti", None),
name=json.get("name", None),
preferred_username=json.get("preferred_username", None),
email=json.get("email", None),
provider=json.get("provider", None),
orgs=json.get("orgs", list()),
groups=json.get("groups", list()),
roles=json.get("roles", list()),
scopes=json.get("scope", "").split(
" "
), # eg. scope='read write' => scopes=['read', 'write']
email_verified=json.get("email_verified", None),
picture=json.get("picture", None),
customers=[json["customer"]]
if "customer" in json
else json.get("customers", list()),
oid=json.get("oid"),
)
|
def parse(
cls, token: str, key: str = None, verify: bool = True, algorithm: str = "HS256"
) -> "Jwt":
try:
json = jwt.decode(
token,
key=key or current_app.config["SECRET_KEY"],
verify=verify,
algorithms=algorithm,
audience=current_app.config["OAUTH2_CLIENT_ID"]
or current_app.config["SAML2_ENTITY_ID"]
or absolute_url(),
)
except (DecodeError, ExpiredSignature, InvalidAudience):
raise
return Jwt(
iss=json.get("iss", None),
typ=json.get("typ", None),
sub=json.get("sub", None),
aud=json.get("aud", None),
exp=json.get("exp", None),
nbf=json.get("nbf", None),
iat=json.get("iat", None),
jti=json.get("jti", None),
name=json.get("name", None),
preferred_username=json.get("preferred_username", None),
email=json.get("email", None),
provider=json.get("provider", None),
orgs=json.get("orgs", list()),
groups=json.get("groups", list()),
roles=json.get("roles", list()),
scopes=json.get("scope", "").split(
" "
), # eg. scope='read write' => scopes=['read', 'write']
email_verified=json.get("email_verified", None),
picture=json.get("picture", None),
customers=[json["customer"]]
if "customer" in json
else json.get("customers", list()),
)
|
https://github.com/alerta/alerta/issues/1336
|
Oct 15 10:45:04 influx alertad[28913]: 2020-10-15 10:45:04,542 alerta.app[28919]: [ERROR] duplicate key value violates unique constraint "users_email_key"
DETAIL: Key (email)=(xxxxxx@xxxxx) already exists.
request_id=7f8eab74-9c03-4d36-a92f-c5c0d82b3c6f ip=80.120.197.114
Traceback (most recent call last):
File "/opt/alerta/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/alerta/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/opt/alerta/lib/python3.7/site-packages/flask_cors/decorator.py", line 128, in wrapped_function
resp = make_response(f(*args, **kwargs))
File "/opt/alerta/lib/python3.7/site-packages/alerta/auth/oidc.py", line 135, in openid
user.create()
File "/opt/alerta/lib/python3.7/site-packages/alerta/models/user.py", line 140, in create
return User.from_db(db.create_user(self))
File "/opt/alerta/lib/python3.7/site-packages/alerta/database/backends/postgres/base.py", line 910, in create_user
return self._insert(insert, vars(user))
File "/opt/alerta/lib/python3.7/site-packages/alerta/database/backends/postgres/base.py", line 1389, in _insert
cursor.execute(query, vars)
File "/opt/alerta/lib/python3.7/site-packages/psycopg2/extras.py", line 327, in execute
return super(NamedTupleCursor, self).execute(query, vars)
psycopg2.errors.UniqueViolation: duplicate key value violates unique constraint "users_email_key"
DETAIL: Key (email)=(xxxxxx@xxxxxxxx) already exists.
Oct 15 10:45:04 influx alertad[28913]: 2020-10-15 10:45:04,541 alerta.app[28919]: [DEBUG] ****************************************
INSERT INTO users (id, name, login, password, email, status, roles, attributes,
create_time, last_login, text, update_time, email_verified)
VALUES ('Cil1aWQ9c2hlZGVuaWdnLG91PVBlb3BsZSxkYz1pbnZlbml1bSxkYz1pbxIEbGRhcA', 'xxxxxxx', 'xxxxxxxxx', '', 'xxxxxxxxxx', 'active', ARRAY['admin']
NULL, '', '2020-10-15T10:45:04.541Z', true)
RETURNING *
**************************************** request_id=7f8eab74-9c03-4d36-a92f-c5c0d82b3c6f ip=xxxxx
Oct 15 10:45:04 influx alertad[28913]: 2020-10-15 10:45:04,540 alerta.app[28919]: [DEBUG] ****************************************
SELECT * FROM users WHERE id='Cil1aWQ9c2hlZGVuaWdnLG91PVBlb3BsZSxkYz1pbnZlbml1bSxkYz1pbxIEbGRhcA'
**************************************** request_id=7f8eab74-9c03-4d36-a92f-c5c0d82b3c6f ip=xxxxx
|
psycopg2.errors.UniqueViolation
|
def serialize(self) -> Dict[str, Any]:
data = {
"iss": self.issuer,
"typ": self.type,
"sub": self.subject,
"aud": self.audience,
"exp": self.expiration,
"nbf": self.not_before,
"iat": self.issued_at,
"jti": self.jwt_id,
}
if self.name:
data["name"] = self.name
if self.preferred_username:
data["preferred_username"] = self.preferred_username
if self.email:
data["email"] = self.email
if self.provider:
data["provider"] = self.provider
if self.orgs:
data["orgs"] = self.orgs
if self.groups:
data["groups"] = self.groups
if self.roles:
data["roles"] = self.roles
if self.scopes:
data["scope"] = " ".join(self.scopes)
if self.email_verified is not None:
data["email_verified"] = self.email_verified
if self.picture is not None:
data["picture"] = self.picture
if current_app.config["CUSTOMER_VIEWS"]:
data["customers"] = self.customers
if self.oid:
data["oid"] = self.oid
return data
|
def serialize(self) -> Dict[str, Any]:
data = {
"iss": self.issuer,
"typ": self.type,
"sub": self.subject,
"aud": self.audience,
"exp": self.expiration,
"nbf": self.not_before,
"iat": self.issued_at,
"jti": self.jwt_id,
}
if self.name:
data["name"] = self.name
if self.preferred_username:
data["preferred_username"] = self.preferred_username
if self.email:
data["email"] = self.email
if self.provider:
data["provider"] = self.provider
if self.orgs:
data["orgs"] = self.orgs
if self.groups:
data["groups"] = self.groups
if self.roles:
data["roles"] = self.roles
if self.scopes:
data["scope"] = " ".join(self.scopes)
if self.email_verified is not None:
data["email_verified"] = self.email_verified
if self.picture is not None:
data["picture"] = self.picture
if current_app.config["CUSTOMER_VIEWS"]:
data["customers"] = self.customers
return data
|
https://github.com/alerta/alerta/issues/1336
|
Oct 15 10:45:04 influx alertad[28913]: 2020-10-15 10:45:04,542 alerta.app[28919]: [ERROR] duplicate key value violates unique constraint "users_email_key"
DETAIL: Key (email)=(xxxxxx@xxxxx) already exists.
request_id=7f8eab74-9c03-4d36-a92f-c5c0d82b3c6f ip=80.120.197.114
Traceback (most recent call last):
File "/opt/alerta/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/alerta/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/opt/alerta/lib/python3.7/site-packages/flask_cors/decorator.py", line 128, in wrapped_function
resp = make_response(f(*args, **kwargs))
File "/opt/alerta/lib/python3.7/site-packages/alerta/auth/oidc.py", line 135, in openid
user.create()
File "/opt/alerta/lib/python3.7/site-packages/alerta/models/user.py", line 140, in create
return User.from_db(db.create_user(self))
File "/opt/alerta/lib/python3.7/site-packages/alerta/database/backends/postgres/base.py", line 910, in create_user
return self._insert(insert, vars(user))
File "/opt/alerta/lib/python3.7/site-packages/alerta/database/backends/postgres/base.py", line 1389, in _insert
cursor.execute(query, vars)
File "/opt/alerta/lib/python3.7/site-packages/psycopg2/extras.py", line 327, in execute
return super(NamedTupleCursor, self).execute(query, vars)
psycopg2.errors.UniqueViolation: duplicate key value violates unique constraint "users_email_key"
DETAIL: Key (email)=(xxxxxx@xxxxxxxx) already exists.
Oct 15 10:45:04 influx alertad[28913]: 2020-10-15 10:45:04,541 alerta.app[28919]: [DEBUG] ****************************************
INSERT INTO users (id, name, login, password, email, status, roles, attributes,
create_time, last_login, text, update_time, email_verified)
VALUES ('Cil1aWQ9c2hlZGVuaWdnLG91PVBlb3BsZSxkYz1pbnZlbml1bSxkYz1pbxIEbGRhcA', 'xxxxxxx', 'xxxxxxxxx', '', 'xxxxxxxxxx', 'active', ARRAY['admin']
NULL, '', '2020-10-15T10:45:04.541Z', true)
RETURNING *
**************************************** request_id=7f8eab74-9c03-4d36-a92f-c5c0d82b3c6f ip=xxxxx
Oct 15 10:45:04 influx alertad[28913]: 2020-10-15 10:45:04,540 alerta.app[28919]: [DEBUG] ****************************************
SELECT * FROM users WHERE id='Cil1aWQ9c2hlZGVuaWdnLG91PVBlb3BsZSxkYz1pbnZlbml1bSxkYz1pbxIEbGRhcA'
**************************************** request_id=7f8eab74-9c03-4d36-a92f-c5c0d82b3c6f ip=xxxxx
|
psycopg2.errors.UniqueViolation
|
def housekeeping():
expired_threshold = request.args.get(
"expired", default=current_app.config["DEFAULT_EXPIRED_DELETE_HRS"], type=int
)
info_threshold = request.args.get(
"info", default=current_app.config["DEFAULT_INFO_DELETE_HRS"], type=int
)
has_expired, has_timedout = Alert.housekeeping(expired_threshold, info_threshold)
errors = []
for alert in has_expired:
try:
alert, _, text, timeout = process_action(
alert,
action="expired",
text="",
timeout=current_app.config["ALERT_TIMEOUT"],
)
alert = alert.from_expired(text, timeout)
except RejectException as e:
write_audit_trail.send(
current_app._get_current_object(),
event="alert-expire-rejected",
message=alert.text,
user=g.login,
customers=g.customers,
scopes=g.scopes,
resource_id=alert.id,
type="alert",
request=request,
)
errors.append(str(e))
continue
except Exception as e:
raise ApiError(str(e), 500)
write_audit_trail.send(
current_app._get_current_object(),
event="alert-expired",
message=text,
user=g.login,
customers=g.customers,
scopes=g.scopes,
resource_id=alert.id,
type="alert",
request=request,
)
for alert in has_timedout:
try:
alert, _, text, timeout = process_action(
alert,
action="timeout",
text="",
timeout=current_app.config["ALERT_TIMEOUT"],
)
alert = alert.from_timeout(text, timeout)
except RejectException as e:
write_audit_trail.send(
current_app._get_current_object(),
event="alert-timeout-rejected",
message=alert.text,
user=g.login,
customers=g.customers,
scopes=g.scopes,
resource_id=alert.id,
type="alert",
request=request,
)
errors.append(str(e))
continue
except Exception as e:
raise ApiError(str(e), 500)
write_audit_trail.send(
current_app._get_current_object(),
event="alert-timeout",
message=text,
user=g.login,
customers=g.customers,
scopes=g.scopes,
resource_id=alert.id,
type="alert",
request=request,
)
if errors:
raise ApiError("housekeeping failed", 500, errors=errors)
else:
return jsonify(
status="ok",
expired=[a.id for a in has_expired],
timedout=[a.id for a in has_timedout],
count=len(has_expired) + len(has_timedout),
)
|
def housekeeping():
expired_threshold = request.args.get(
"expired", current_app.config["DEFAULT_EXPIRED_DELETE_HRS"], type="int"
)
info_threshold = request.args.get(
"info", current_app.config["DEFAULT_INFO_DELETE_HRS"], type="int"
)
has_expired, has_timedout = Alert.housekeeping(expired_threshold, info_threshold)
errors = []
for alert in has_expired:
try:
alert, _, text, timeout = process_action(
alert,
action="expired",
text="",
timeout=current_app.config["ALERT_TIMEOUT"],
)
alert = alert.from_expired(text, timeout)
except RejectException as e:
write_audit_trail.send(
current_app._get_current_object(),
event="alert-expire-rejected",
message=alert.text,
user=g.login,
customers=g.customers,
scopes=g.scopes,
resource_id=alert.id,
type="alert",
request=request,
)
errors.append(str(e))
continue
except Exception as e:
raise ApiError(str(e), 500)
write_audit_trail.send(
current_app._get_current_object(),
event="alert-expired",
message=text,
user=g.login,
customers=g.customers,
scopes=g.scopes,
resource_id=alert.id,
type="alert",
request=request,
)
for alert in has_timedout:
try:
alert, _, text, timeout = process_action(
alert,
action="timeout",
text="",
timeout=current_app.config["ALERT_TIMEOUT"],
)
alert = alert.from_timeout(text, timeout)
except RejectException as e:
write_audit_trail.send(
current_app._get_current_object(),
event="alert-timeout-rejected",
message=alert.text,
user=g.login,
customers=g.customers,
scopes=g.scopes,
resource_id=alert.id,
type="alert",
request=request,
)
errors.append(str(e))
continue
except Exception as e:
raise ApiError(str(e), 500)
write_audit_trail.send(
current_app._get_current_object(),
event="alert-timeout",
message=text,
user=g.login,
customers=g.customers,
scopes=g.scopes,
resource_id=alert.id,
type="alert",
request=request,
)
if errors:
raise ApiError("housekeeping failed", 500, errors=errors)
else:
return jsonify(
status="ok",
expired=[a.id for a in has_expired],
timedout=[a.id for a in has_timedout],
count=len(has_expired) + len(has_timedout),
)
|
https://github.com/alerta/alerta/issues/1260
|
$ /opt/alerta/bin/alerta housekeeping --expired 72 --info 72
Error: {"code":500,"errors":["Traceback (most recent call last):\n File \"/opt/alerta/lib64/python3.6/site-packages/flask/app.py\", line 1950, in full_dispatch_request\n rv = self.dispatch_request()\n File \"/opt/alerta/lib64/python3.6/site-packages/flask/app.py\", line 1936, in dispatch_request\n return self.view_functions[rule.endpoint](**req.view_args)\n File \"/opt/alerta/lib64/python3.6/site-packages/flask_cors/decorator.py\", line 128, in wrapped_function\n resp = make_response(f(*args, **kwargs))\n File \"/opt/alerta/lib64/python3.6/site-packages/alerta/auth/decorators.py\", line 53, in wrapped\n return f(*args, **kwargs)\n File \"/opt/alerta/lib64/python3.6/site-packages/alerta/management/views.py\", line 148, in housekeeping\n expired_threshold = request.args.get('expired', current_app.config['DEFAULT_EXPIRED_DELETE_HRS'], type='int')\n File \"/opt/alerta/lib64/python3.6/site-packages/werkzeug/datastructures.py\", line 319, in get\n rv = type(rv)\nTypeError: 'str' object is not callable\n"],"message":"'str' object is not callable","requestId":"1b4b77c0-cb58-4b9d-a064-e0c548d0cbc3","status":"error"}
|
nTypeError
|
def __init__(self, match: str, scopes: List[Scope], **kwargs) -> None:
for s in scopes:
if s not in list(Scope):
raise ValueError("invalid scope: {}".format(s))
self.id = kwargs.get("id", str(uuid4()))
self.match = match
self.scopes = scopes or list()
|
def __init__(self, match: str, scopes: List[Scope], **kwargs) -> None:
self.id = kwargs.get("id", str(uuid4()))
self.match = match
self.scopes = scopes or list()
|
https://github.com/alerta/alerta/issues/1075
|
{
"code": 500,
"errors": [
"ValueError: 'read:prodalerts' is not a valid Scope\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/app/.heroku/python/lib/python3.7/site-packages/flask/app.py\", line 1813, in full_dispatch_request\n rv = self.dispatch_request()\n File \"/app/.heroku/python/lib/python3.7/site-packages/flask/app.py\", line 1799, in dispatch_request\n return self.view_functions[rule.endpoint](**req.view_args)\n File \"/app/.heroku/python/lib/python3.7/site-packages/flask_cors/decorator.py\", line 128, in wrapped_function\n resp = make_response(f(*args, **kwargs))\n File \"/app/.heroku/python/lib/python3.7/site-packages/alerta/auth/decorators.py\", line 112, in wrapped\n return f(*args, **kwargs)\n File \"/app/.heroku/python/lib/python3.7/site-packages/alerta/utils/response.py\", line 19, in decorated\n return func(*args, **kwargs)\n File \"/app/.heroku/python/lib/python3.7/site-packages/alerta/views/permissions.py\", line 67, in list_perms\n perms = Permission.find_all(query)\n File \"/app/.heroku/python/lib/python3.7/site-packages/alerta/models/permission.py\", line 75, in find_all\n return [Permission.from_db(perm) for perm in db.get_perms(query)]\n File \"/app/.heroku/python/lib/python3.7/site-packages/alerta/models/permission.py\", line 75, in <listcomp>\n return [Permission.from_db(perm) for perm in db.get_perms(query)]\n File \"/app/.heroku/python/lib/python3.7/site-packages/alerta/models/permission.py\", line 62, in from_db\n return cls.from_document(r)\n File \"/app/.heroku/python/lib/python3.7/site-packages/alerta/models/permission.py\", line 48, in from_document\n scopes=[Scope(s) for s in doc.get('scopes', list())]\n File \"/app/.heroku/python/lib/python3.7/site-packages/alerta/models/permission.py\", line 48, in <listcomp>\n scopes=[Scope(s) for s in doc.get('scopes', list())]\n File \"/app/.heroku/python/lib/python3.7/enum.py\", line 310, in __call__\n return cls.__new__(cls, value)\n File \"/app/.heroku/python/lib/python3.7/enum.py\", line 564, in __new__\n raise exc\n File \"/app/.heroku/python/lib/python3.7/enum.py\", line 548, in __new__\n result = cls._missing_(value)\n File \"/app/.heroku/python/lib/python3.7/enum.py\", line 577, in _missing_\n raise ValueError(\"%r is not a valid %s\" % (value, cls.__name__))\nValueError: 'read:prodalerts' is not a valid Scope\n"
],
"message": "'read:prodalerts' is not a valid Scope",
"requestId": "109e9b17-1626-4816-9390-53021164ab4c",
"status": "error"
}
|
ValueError
|
def __init__(self, resource: str, event: str, **kwargs) -> None:
if not resource:
raise ValueError('Missing mandatory value for "resource"')
if not event:
raise ValueError('Missing mandatory value for "event"')
if any(["." in key for key in kwargs.get("attributes", dict()).keys()]) or any(
["$" in key for key in kwargs.get("attributes", dict()).keys()]
):
raise ValueError('Attribute keys must not contain "." or "$"')
if isinstance(kwargs.get("value", None), int):
kwargs["value"] = str(kwargs["value"])
for attr in ["create_time", "receive_time", "last_receive_time"]:
if not isinstance(kwargs.get(attr), (datetime, NoneType)): # type: ignore
raise ValueError("Attribute '{}' must be datetime type".format(attr))
timeout = (
kwargs.get("timeout")
if kwargs.get("timeout") is not None
else current_app.config["ALERT_TIMEOUT"]
)
try:
timeout = int(timeout) # type: ignore
except ValueError:
raise ValueError(
"Could not convert 'timeout' value of '{}' to an integer".format(timeout)
)
if timeout < 0:
raise ValueError("Invalid negative 'timeout' value ({})".format(timeout))
self.id = kwargs.get("id", None) or str(uuid4())
self.resource = resource
self.event = event
self.environment = kwargs.get("environment", None) or ""
self.severity = kwargs.get("severity", None) or alarm_model.DEFAULT_NORMAL_SEVERITY
self.correlate = kwargs.get("correlate", None) or list()
if self.correlate and event not in self.correlate:
self.correlate.append(event)
self.status = kwargs.get("status", None) or alarm_model.DEFAULT_STATUS
self.service = kwargs.get("service", None) or list()
self.group = kwargs.get("group", None) or "Misc"
self.value = kwargs.get("value", None)
self.text = kwargs.get("text", None) or ""
self.tags = kwargs.get("tags", None) or list()
self.attributes = kwargs.get("attributes", None) or dict()
self.origin = kwargs.get("origin", None) or "{}/{}".format(
os.path.basename(sys.argv[0]), platform.uname()[1]
)
self.event_type = (
kwargs.get("event_type", kwargs.get("type", None)) or "exceptionAlert"
)
self.create_time = kwargs.get("create_time", None) or datetime.utcnow()
self.timeout = timeout
self.raw_data = kwargs.get("raw_data", None)
self.customer = kwargs.get("customer", None)
self.duplicate_count = kwargs.get("duplicate_count", None)
self.repeat = kwargs.get("repeat", None)
self.previous_severity = kwargs.get("previous_severity", None)
self.trend_indication = kwargs.get("trend_indication", None)
self.receive_time = kwargs.get("receive_time", None) or datetime.utcnow()
self.last_receive_id = kwargs.get("last_receive_id", None)
self.last_receive_time = kwargs.get("last_receive_time", None)
self.update_time = kwargs.get("update_time", None)
self.history = kwargs.get("history", None) or list()
|
def __init__(self, resource: str, event: str, **kwargs) -> None:
if not resource:
raise ValueError('Missing mandatory value for "resource"')
if not event:
raise ValueError('Missing mandatory value for "event"')
if any(["." in key for key in kwargs.get("attributes", dict()).keys()]) or any(
["$" in key for key in kwargs.get("attributes", dict()).keys()]
):
raise ValueError('Attribute keys must not contain "." or "$"')
if isinstance(kwargs.get("value", None), int):
kwargs["value"] = str(kwargs["value"])
for attr in ["create_time", "receive_time", "last_receive_time"]:
if not isinstance(kwargs.get(attr), (datetime, NoneType)): # type: ignore
raise ValueError("Attribute '{}' must be datetime type".format(attr))
self.id = kwargs.get("id", None) or str(uuid4())
self.resource = resource
self.event = event
self.environment = kwargs.get("environment", None) or ""
self.severity = kwargs.get("severity", None) or alarm_model.DEFAULT_NORMAL_SEVERITY
self.correlate = kwargs.get("correlate", None) or list()
if self.correlate and event not in self.correlate:
self.correlate.append(event)
self.status = kwargs.get("status", None) or alarm_model.DEFAULT_STATUS
self.service = kwargs.get("service", None) or list()
self.group = kwargs.get("group", None) or "Misc"
self.value = kwargs.get("value", None)
self.text = kwargs.get("text", None) or ""
self.tags = kwargs.get("tags", None) or list()
self.attributes = kwargs.get("attributes", None) or dict()
self.origin = kwargs.get("origin", None) or "{}/{}".format(
os.path.basename(sys.argv[0]), platform.uname()[1]
)
self.event_type = (
kwargs.get("event_type", kwargs.get("type", None)) or "exceptionAlert"
)
self.create_time = kwargs.get("create_time", None) or datetime.utcnow()
timeout = kwargs.get("timeout")
self.timeout = (
timeout if timeout is not None else current_app.config["ALERT_TIMEOUT"]
)
self.raw_data = kwargs.get("raw_data", None)
self.customer = kwargs.get("customer", None)
self.duplicate_count = kwargs.get("duplicate_count", None)
self.repeat = kwargs.get("repeat", None)
self.previous_severity = kwargs.get("previous_severity", None)
self.trend_indication = kwargs.get("trend_indication", None)
self.receive_time = kwargs.get("receive_time", None) or datetime.utcnow()
self.last_receive_id = kwargs.get("last_receive_id", None)
self.last_receive_time = kwargs.get("last_receive_time", None)
self.update_time = kwargs.get("update_time", None)
self.history = kwargs.get("history", None) or list()
|
https://github.com/alerta/alerta/issues/911
|
result = aclient.get_alert("5645884f-f486-4db3-8058-46e17260fb95")
Traceback (most recent call last):
File "/Users/thomasjongerius/Library/Preferences/PyCharmCE2017.3/scratches/scratch_36.py", line 13, in <module>
result = aclient.get_alert("5645884f-f486-4db3-8058-46e17260fb95")
File "/Users/thomasjongerius/PycharmProjects/videns/venv37/lib/python3.7/site-packages/alertaclient/api.py", line 65, in get_alert
return Alert.parse(self.http.get('/alert/%s' % id)['alert'])
File "/Users/thomasjongerius/PycharmProjects/videns/venv37/lib/python3.7/site-packages/alertaclient/models/alert.py", line 64, in parse
raise ValueError('timeout must be an integer')
ValueError: timeout must be an integer
|
ValueError
|
def __init__(
self,
origin: str = None,
tags: List[str] = None,
create_time: datetime = None,
timeout: int = None,
customer: str = None,
**kwargs,
) -> None:
timeout = (
timeout if timeout is not None else current_app.config["HEARTBEAT_TIMEOUT"]
)
try:
timeout = int(timeout)
except ValueError:
raise ValueError(
"Could not convert 'timeout' value of '{}' to an integer".format(timeout)
)
if timeout < 0:
raise ValueError("Invalid negative 'timeout' value ({})".format(timeout))
self.id = kwargs.get("id", str(uuid4()))
self.origin = origin or "{}/{}".format(
os.path.basename(sys.argv[0]), platform.uname()[1]
)
self.tags = tags or list()
self.event_type = kwargs.get("event_type", kwargs.get("type", None)) or "Heartbeat"
self.create_time = create_time or datetime.utcnow()
self.timeout = timeout
self.receive_time = kwargs.get("receive_time", None) or datetime.utcnow()
self.customer = customer
|
def __init__(
self,
origin: str = None,
tags: List[str] = None,
create_time: datetime = None,
timeout: int = None,
customer: str = None,
**kwargs,
) -> None:
self.id = kwargs.get("id", str(uuid4()))
self.origin = origin or "{}/{}".format(
os.path.basename(sys.argv[0]), platform.uname()[1]
)
self.tags = tags or list()
self.event_type = kwargs.get("event_type", kwargs.get("type", None)) or "Heartbeat"
self.create_time = create_time or datetime.utcnow()
self.timeout = (
timeout if timeout is not None else current_app.config["HEARTBEAT_TIMEOUT"]
)
self.receive_time = kwargs.get("receive_time", None) or datetime.utcnow()
self.customer = customer
|
https://github.com/alerta/alerta/issues/911
|
result = aclient.get_alert("5645884f-f486-4db3-8058-46e17260fb95")
Traceback (most recent call last):
File "/Users/thomasjongerius/Library/Preferences/PyCharmCE2017.3/scratches/scratch_36.py", line 13, in <module>
result = aclient.get_alert("5645884f-f486-4db3-8058-46e17260fb95")
File "/Users/thomasjongerius/PycharmProjects/videns/venv37/lib/python3.7/site-packages/alertaclient/api.py", line 65, in get_alert
return Alert.parse(self.http.get('/alert/%s' % id)['alert'])
File "/Users/thomasjongerius/PycharmProjects/videns/venv37/lib/python3.7/site-packages/alertaclient/models/alert.py", line 64, in parse
raise ValueError('timeout must be an integer')
ValueError: timeout must be an integer
|
ValueError
|
def parse_grafana(
alert: JSON, match: Dict[str, Any], args: ImmutableMultiDict
) -> Alert:
alerting_severity = args.get("severity", "major")
if alert["state"] == "alerting":
severity = alerting_severity
elif alert["state"] == "ok":
severity = "normal"
else:
severity = "indeterminate"
environment = args.get("environment", "Production") # TODO: verify at create?
event_type = args.get("event_type", "performanceAlert")
group = args.get("group", "Performance")
origin = args.get("origin", "Grafana")
service = args.get("service", "Grafana")
timeout = args.get("timeout", type=int)
attributes = match.get("tags", None) or dict()
attributes = {k.replace(".", "_"): v for (k, v) in attributes.items()}
attributes["ruleId"] = str(alert["ruleId"])
if "ruleUrl" in alert:
attributes["ruleUrl"] = (
'<a href="%s" target="_blank">Rule</a>' % alert["ruleUrl"]
)
if "imageUrl" in alert:
attributes["imageUrl"] = (
'<a href="%s" target="_blank">Image</a>' % alert["imageUrl"]
)
return Alert(
resource=match["metric"],
event=alert["ruleName"],
environment=environment,
severity=severity,
service=[service],
group=group,
value="%s" % match["value"],
text=alert.get("message", None) or alert.get("title", alert["state"]),
tags=list(),
attributes=attributes,
origin=origin,
event_type=event_type,
timeout=timeout,
raw_data=json.dumps(alert),
)
|
def parse_grafana(
alert: JSON, match: Dict[str, Any], args: ImmutableMultiDict
) -> Alert:
alerting_severity = args.get("severity", "major")
if alert["state"] == "alerting":
severity = alerting_severity
elif alert["state"] == "ok":
severity = "normal"
else:
severity = "indeterminate"
environment = args.get("environment", "Production") # TODO: verify at create?
event_type = args.get("event_type", "performanceAlert")
group = args.get("group", "Performance")
origin = args.get("origin", "Grafana")
service = args.get("service", "Grafana")
timeout = args.get("timeout", current_app.config["ALERT_TIMEOUT"])
attributes = match.get("tags", None) or dict()
attributes = {k.replace(".", "_"): v for (k, v) in attributes.items()}
attributes["ruleId"] = str(alert["ruleId"])
if "ruleUrl" in alert:
attributes["ruleUrl"] = (
'<a href="%s" target="_blank">Rule</a>' % alert["ruleUrl"]
)
if "imageUrl" in alert:
attributes["imageUrl"] = (
'<a href="%s" target="_blank">Image</a>' % alert["imageUrl"]
)
return Alert(
resource=match["metric"],
event=alert["ruleName"],
environment=environment,
severity=severity,
service=[service],
group=group,
value="%s" % match["value"],
text=alert.get("message", None) or alert.get("title", alert["state"]),
tags=list(),
attributes=attributes,
origin=origin,
event_type=event_type,
timeout=timeout,
raw_data=json.dumps(alert),
)
|
https://github.com/alerta/alerta/issues/911
|
result = aclient.get_alert("5645884f-f486-4db3-8058-46e17260fb95")
Traceback (most recent call last):
File "/Users/thomasjongerius/Library/Preferences/PyCharmCE2017.3/scratches/scratch_36.py", line 13, in <module>
result = aclient.get_alert("5645884f-f486-4db3-8058-46e17260fb95")
File "/Users/thomasjongerius/PycharmProjects/videns/venv37/lib/python3.7/site-packages/alertaclient/api.py", line 65, in get_alert
return Alert.parse(self.http.get('/alert/%s' % id)['alert'])
File "/Users/thomasjongerius/PycharmProjects/videns/venv37/lib/python3.7/site-packages/alertaclient/models/alert.py", line 64, in parse
raise ValueError('timeout must be an integer')
ValueError: timeout must be an integer
|
ValueError
|
def setup_logging(app):
del app.logger.handlers[:]
# for key in logging.Logger.manager.loggerDict:
# print(key)
loggers = [
app.logger,
logging.getLogger("alerta"), # ??
# logging.getLogger('flask'), # ??
logging.getLogger("flask_compress"), # ??
# logging.getLogger('flask_cors'), # ??
logging.getLogger("pymongo"), # ??
logging.getLogger("raven"), # ??
logging.getLogger("requests"), # ??
logging.getLogger("sentry"), # ??
logging.getLogger("urllib3"), # ??
logging.getLogger("werkzeug"), # ??
]
if app.debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
if app.config["LOG_FILE"]:
from logging.handlers import RotatingFileHandler
handler = RotatingFileHandler(
filename=app.config["LOG_FILE"],
maxBytes=app.config["LOG_MAX_BYTES"],
backupCount=app.config["LOG_BACKUP_COUNT"],
encoding="utf-8",
)
handler.setLevel(log_level)
handler.setFormatter(logging.Formatter(app.config["LOG_FORMAT"]))
else:
handler = logging.StreamHandler()
handler.setLevel(log_level)
handler.setFormatter(logging.Formatter(app.config["LOG_FORMAT"]))
for logger in loggers:
logger.addHandler(handler)
logger.setLevel(log_level)
logger.propagate = True
|
def setup_logging(app):
del app.logger.handlers[:]
# for key in logging.Logger.manager.loggerDict:
# print(key)
loggers = [
app.logger,
logging.getLogger("alerta"), # ??
# logging.getLogger('flask'), # ??
logging.getLogger("flask_compress"), # ??
# logging.getLogger('flask_cors'), # ??
logging.getLogger("pymongo"), # ??
logging.getLogger("raven"), # ??
logging.getLogger("requests"), # ??
logging.getLogger("sentry"), # ??
logging.getLogger("urllib3"), # ??
logging.getLogger("werkzeug"), # ??
]
if app.debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
if app.config["LOG_FILE"]:
from logging.handlers import RotatingFileHandler
handler = RotatingFileHandler(
filename=app.config["LOG_FILE"],
maxBytes=app.config["LOG_MAX_BYTES"],
backupCount=app.config["LOG_BACKUP_COUNT"],
)
handler.setLevel(log_level)
handler.setFormatter(logging.Formatter(app.config["LOG_FORMAT"]))
else:
handler = logging.StreamHandler()
handler.setLevel(log_level)
handler.setFormatter(logging.Formatter(app.config["LOG_FORMAT"]))
for logger in loggers:
logger.addHandler(handler)
logger.setLevel(log_level)
logger.propagate = True
|
https://github.com/alerta/alerta/issues/583
|
--- Logging error ---
Traceback (most recent call last):
File "/usr/lib64/python3.5/logging/__init__.py", line 988, in emit
stream.write(msg)
UnicodeEncodeError: 'ascii' codec can't encode character '\\u5e74' in position 1710: ordinal not in range(128)
Call stack:
File "/usr/lib/python3.5/site-packages/flask/app.py", line 2309, in __call__
return self.wsgi_app(environ, start_response)
File "/usr/lib/python3.5/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/usr/lib/python3.5/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/lib/python3.5/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/lib/python3.5/site-packages/flask_cors/decorator.py", line 128, in wrapped_function
resp = make_response(f(*args, **kwargs))
File "/usr/lib/python3.5/site-packages/alerta/auth/utils.py", line 123, in wrapped
return f(*args, **kwargs)
File "/usr/lib/python3.5/site-packages/alerta/models/metrics.py", line 259, in wrapped
response = f(*args, **kwargs)
File "/usr/lib/python3.5/site-packages/alerta/utils/api.py", line 32, in decorated
return func(*args, **kwargs)
File "/usr/lib/python3.5/site-packages/alerta/views/alerts.py", line 42, in receive
alert = process_alert(incomingAlert)
File "/usr/lib/python3.5/site-packages/alerta/utils/api.py", line 103, in process_alert
updated = plugin.post_receive(alert)
File "/usr/lib/python3.5/site-packages/alerta_amqp.py", line 52, in post_receive
LOG.debug('Message: %s', body)
Message: 'Message: %s'
Arguments: {'createTime': '2018-07-26T06:39:04.653Z', 'duplicateCount': 2, 'type': 'zabbixAlert', 'repeat': True, 'severity': 'major', 'correlate': [], 'text': 'PROBLEM: log trigger zbx.log', 'history': [{'text': 'PROBLEM: log trigger zbx.log', 'type': 'severity', 'status': None, 'value': 'hogehoge', 'event': 'log[/tmp/zbx.log]', 'severity': 'major', 'updateTime': datetime.datetime(2018, 7, 26, 6, 39, 4, 653000), 'href': 'http://dhcp-175-92/alert/3013a7bb-1213-47fa-9135-8845908abd1a', 'id': '3013a7bb-1213-47fa-9135-8845908abd1a'}, {'text': 'new alert status change', 'type': 'status', 'status': 'open', 'value': None, 'event': 'log[/tmp/zbx.log]', 'severity': None, 'updateTime': datetime.datetime(2018, 7, 26, 6, 39, 4, 676000), 'href': 'http://dhcp-175-92/alert/3013a7bb-1213-47fa-9135-8845908abd1a', 'id': '3013a7bb-1213-47fa-9135-8845908abd1a'}, {'text': 'duplicate alert with value change', 'type': 'value', 'status': None, 'value': 'hogehoge1', 'event': 'log[/tmp/zbx.log]', 'severity': None, 'updateTime': datetime.datetime(2018, 7, 26, 6, 39, 54, 592000), 'href': 'http://dhcp-175-92/alert/5376a8d9-29ee-4313-9cab-cb6b9b5998c5', 'id': '5376a8d9-29ee-4313-9cab-cb6b9b5998c5'}, {'text': 'status change via console by Yoshiharu Mori (acknowledged in Zabbix)', 'type': 'action', 'status': 'ack', 'value': None, 'event': 'log[/tmp/zbx.log]', 'severity': 'major', 'updateTime': datetime.datetime(2018, 7, 26, 6, 44, 36, 456000), 'href': 'http://dhcp-175-92/alert/3013a7bb-1213-47fa-9135-8845908abd1a', 'id': '3013a7bb-1213-47fa-9135-8845908abd1a'}, {'text': 'duplicate alert with value change', 'type': 'value', 'status': None, 'value': '2018\xe5\xb9\xb4 7\xe6\x9c\x88 27\xe6\x97\xa5 \xe9\x87\x91\xe6\x9b\x9c\xe6\x97\xa5 16:48:49 JST hogehoge', 'event': 'log[/tmp/zbx.log]', 'severity': None, 'updateTime': datetime.datetime(2018, 7, 27, 7, 49, 10, 102000), 'href': 'http://dhcp-175-92/alert/8a5fdc71-90ab-4439-967a-ca34ede2bafb', 'id': '8a5fdc71-90ab-4439-967a-ca34ede2bafb'}], 'customer': None, 'status': 'ack', 'value': '2018\xe5\xb9\xb4 7\xe6\x9c\x88 27\xe6\x97\xa5 \xe9\x87\x91\xe6\x9b\x9c\xe6\x97\xa5 16:48:49 JST hogehoge', 'event': 'log[/tmp/zbx.log]', 'attributes': {'triggerID': '13563', 'eventId': '981', 'thresholdInfo': '*UNKNOWN*: {Zabbix server:log[/tmp/zbx.log].strlen()}>0', 'ip': '133.137.175.143'}, 'previousSeverity': 'indeterminate', 'timeout': 86400, 'resource': 'Zabbix server', 'lastReceiveId': '8a5fdc71-90ab-4439-967a-ca34ede2bafb', 'trendIndication': 'moreSevere', 'tags': ['{EVENT.TAGS}'], 'receiveTime': '2018-07-26T06:39:04.676Z', 'group': 'Zabbix', 'id': '3013a7bb-1213-47fa-9135-8845908abd1a', 'service': ['Zabbix servers'], 'href': 'http://dhcp-175-92/alert/3013a7bb-1213-47fa-9135-8845908abd1a', 'lastReceiveTime': '2018-07-27T07:49:10.102Z', 'environment': 'Zabbix 1st', 'rawData': None, 'origin': 'zabbix/dhcp-175-143'}
|
UnicodeEncodeError
|
def parse_prometheus(alert, external_url):
status = alert.get("status", "firing")
labels = copy(alert["labels"])
annotations = copy(alert["annotations"])
starts_at = parse_date(alert["startsAt"])
if alert["endsAt"] == "0001-01-01T00:00:00Z":
ends_at = None
else:
ends_at = parse_date(alert["endsAt"])
if status == "firing":
severity = labels.pop("severity", "warning")
create_time = starts_at
elif status == "resolved":
severity = "normal"
create_time = ends_at
else:
severity = "unknown"
create_time = ends_at or starts_at
# get labels
resource = labels.pop("exported_instance", None) or labels.pop("instance", "n/a")
event = labels.pop("alertname")
environment = labels.pop("environment", "Production")
# get annotations
correlate = (
annotations.pop("correlate").split(",") if "correlate" in annotations else None
)
service = annotations.pop("service", "").split(",")
group = annotations.pop("job", "Prometheus")
value = annotations.pop("value", None)
# build alert text
summary = annotations.pop("summary", None)
description = annotations.pop("description", None)
text = (
description
or summary
or "{}: {} is {}".format(severity.upper(), resource, event)
)
try:
timeout = int(labels.pop("timeout", 0)) or None
except ValueError:
timeout = None
if external_url:
annotations["externalUrl"] = external_url
if "generatorURL" in alert:
annotations["moreInfo"] = (
'<a href="%s" target="_blank">Prometheus Graph</a>' % alert["generatorURL"]
)
return Alert(
resource=resource,
event=event,
environment=environment,
severity=severity,
correlate=correlate,
service=service,
group=group,
value=value,
text=text,
attributes=annotations,
origin="prometheus/" + labels.pop("monitor", "-"),
event_type="prometheusAlert",
create_time=create_time.astimezone(tz=pytz.UTC).replace(tzinfo=None),
timeout=timeout,
raw_data=alert,
tags=["%s=%s" % t for t in labels.items()], # any labels left are used for tags
)
|
def parse_prometheus(alert, external_url):
status = alert.get("status", "firing")
labels = copy(alert["labels"])
annotations = copy(alert["annotations"])
starts_at = parse_date(alert["startsAt"])
if alert["endsAt"] == "0001-01-01T00:00:00Z":
ends_at = None
else:
ends_at = parse_date(alert["endsAt"])
if status == "firing":
severity = labels.pop("severity", "warning")
create_time = starts_at
elif status == "resolved":
severity = "normal"
create_time = ends_at
else:
severity = "unknown"
create_time = ends_at or starts_at
# get labels
resource = labels.pop("exported_instance", None) or labels.pop("instance", "n/a")
event = labels.pop("alertname")
environment = labels.pop("environment", "Production")
# get annotations
correlate = (
annotations.pop("correlate").split(",") if "correlate" in annotations else None
)
service = annotations.pop("service", "").split(",")
group = annotations.pop("job", "Prometheus")
value = annotations.pop("value", None)
# build alert text
summary = annotations.pop("summary", None)
description = annotations.pop("description", None)
text = (
description
or summary
or "%s: %s on %s" % (labels["job"], labels["alertname"], labels["instance"])
)
try:
timeout = int(labels.pop("timeout", 0)) or None
except ValueError:
timeout = None
if external_url:
annotations["externalUrl"] = external_url
if "generatorURL" in alert:
annotations["moreInfo"] = (
'<a href="%s" target="_blank">Prometheus Graph</a>' % alert["generatorURL"]
)
return Alert(
resource=resource,
event=event,
environment=environment,
severity=severity,
correlate=correlate,
service=service,
group=group,
value=value,
text=text,
attributes=annotations,
origin="prometheus/" + labels.pop("monitor", "-"),
event_type="prometheusAlert",
create_time=create_time.astimezone(tz=pytz.UTC).replace(tzinfo=None),
timeout=timeout,
raw_data=alert,
tags=["%s=%s" % t for t in labels.items()], # any labels left are used for tags
)
|
https://github.com/alerta/alerta/issues/596
|
alerta_1 | 2018-08-05 21:56:22,585 DEBG 'uwsgi' stdout output:
alerta_1 | [pid: 71|app: 0|req: 218/1482] 172.30.0.2 () {38 vars in 518 bytes} [Sun Aug 5 21:56:22 2018] POST /api/webhooks/prometheus => generated 1088 bytes in 329 msecs (HTTP/1.1 500) 4 headers in 153 bytes (1 switches on core 0)
alerta_1 |
alerta_1 | 2018-08-05 21:56:22,585 DEBG 'nginx' stdout output:
alerta_1 | ip=\- [\05/Aug/2018:21:56:22 +0000] "\POST /api/webhooks/prometheus HTTP/1.1" \500 \1088 "\-" "\Alertmanager/0.15.1"
alerta_1 | 2018/08/05 21:56:22 [info] 66#66: *2730 client 172.30.0.2 closed keepalive connection
alerta_1 |
alerta_1 | 2018-08-05 21:56:22,699 DEBG 'uwsgi' stdout output:
alerta_1 | 2018-08-05 21:56:22,698 - flask.app[73]: ERROR - 'alertname' [in /venv/lib/python3.7/site-packages/alerta/exceptions.py:95]
alerta_1 | Traceback (most recent call last):
alerta_1 | File "/venv/lib/python3.7/site-packages/flask/app.py", line 1813, in full_dispatch_request
alerta_1 | rv = self.dispatch_request()
alerta_1 | File "/venv/lib/python3.7/site-packages/flask/app.py", line 1799, in dispatch_request
alerta_1 | return self.view_functions[rule.endpoint](**req.view_args)
alerta_1 | File "/venv/lib/python3.7/site-packages/flask_cors/decorator.py", line 128, in wrapped_function
alerta_1 | resp = make_response(f(*args, **kwargs))
alerta_1 | File "/venv/lib/python3.7/site-packages/alerta/auth/decorators.py", line 88, in wrapped
alerta_1 | return f(*args, **kwargs)
alerta_1 | File "/venv/lib/python3.7/site-packages/alerta/webhooks/prometheus.py", line 94, in prometheus
alerta_1 | incomingAlert = parse_prometheus(alert, external_url)
alerta_1 | File "/venv/lib/python3.7/site-packages/alerta/webhooks/prometheus.py", line 52, in parse_prometheus
alerta_1 | text = description or summary or '%s: %s on %s' % (labels['job'], labels['alertname'], labels['instance'])
alerta_1 | KeyError: 'alertname'
alerta_1 | 2018-08-05 21:56:22,700 DEBG 'uwsgi' stdout output:
alerta_1 |
alerta_1 |
db_1 | 2018-08-05T21:56:22.703+0000 I NETWORK [conn2971] end connection 172.31.0.3:52282 (1 connection now open)
db_1 | 2018-08-05T21:56:22.704+0000 I NETWORK [conn2972] end connection 172.31.0.3:52284 (0 connections now open)
alerta_1 | 2018-08-05 21:56:22,707 DEBG 'nginx' stdout output:
alerta_1 | ip=\- [\05/Aug/2018:21:56:22 +0000] "\POST /api/webhooks/prometheus HTTP/1.1" \500 \1088 "\-" "\Alertmanager/0.15.1"
|
KeyError
|
def parse_notification(notification):
if notification["Type"] == "SubscriptionConfirmation":
return Alert(
resource=notification["TopicArn"],
event=notification["Type"],
environment="Production",
severity="informational",
service=["Unknown"],
group="AWS/CloudWatch",
text='%s <a href="%s" target="_blank">SubscribeURL</a>'
% (notification["Message"], notification["SubscribeURL"]),
origin=notification["TopicArn"],
event_type="cloudwatchAlarm",
create_time=datetime.strptime(
notification["Timestamp"], "%Y-%m-%dT%H:%M:%S.%fZ"
),
raw_data=notification,
)
elif notification["Type"] == "Notification":
alarm = json.loads(notification["Message"])
if "Trigger" not in alarm:
raise ValueError("SNS message is not a Cloudwatch notification")
return Alert(
resource="%s:%s"
% (
alarm["Trigger"]["Dimensions"][0]["name"],
alarm["Trigger"]["Dimensions"][0]["value"],
),
event=alarm["AlarmName"],
environment="Production",
severity=cw_state_to_severity(alarm["NewStateValue"]),
service=[alarm["AWSAccountId"]],
group=alarm["Trigger"]["Namespace"],
value=alarm["NewStateValue"],
text=alarm["AlarmDescription"],
tags=[alarm["Region"]],
attributes={
"incidentKey": alarm["AlarmName"],
"thresholdInfo": alarm["Trigger"],
},
origin=notification["TopicArn"],
event_type="cloudwatchAlarm",
create_time=datetime.strptime(
notification["Timestamp"], "%Y-%m-%dT%H:%M:%S.%fZ"
),
raw_data=alarm,
)
|
def parse_notification(notification):
notification = json.loads(notification)
if notification["Type"] == "SubscriptionConfirmation":
return Alert(
resource=notification["TopicArn"],
event=notification["Type"],
environment="Production",
severity="informational",
service=["Unknown"],
group="AWS/CloudWatch",
text='%s <a href="%s" target="_blank">SubscribeURL</a>'
% (notification["Message"], notification["SubscribeURL"]),
origin=notification["TopicArn"],
event_type="cloudwatchAlarm",
create_time=datetime.strptime(
notification["Timestamp"], "%Y-%m-%dT%H:%M:%S.%fZ"
),
raw_data=notification,
)
elif notification["Type"] == "Notification":
alarm = json.loads(notification["Message"])
if "Trigger" not in alarm:
raise ValueError("SNS message is not a Cloudwatch notification")
return Alert(
resource="%s:%s"
% (
alarm["Trigger"]["Dimensions"][0]["name"],
alarm["Trigger"]["Dimensions"][0]["value"],
),
event=alarm["AlarmName"],
environment="Production",
severity=cw_state_to_severity(alarm["NewStateValue"]),
service=[alarm["AWSAccountId"]],
group=alarm["Trigger"]["Namespace"],
value=alarm["NewStateValue"],
text=alarm["AlarmDescription"],
tags=[alarm["Region"]],
attributes={
"incidentKey": alarm["AlarmName"],
"thresholdInfo": alarm["Trigger"],
},
origin=notification["TopicArn"],
event_type="cloudwatchAlarm",
create_time=datetime.strptime(
notification["Timestamp"], "%Y-%m-%dT%H:%M:%S.%fZ"
),
raw_data=alarm,
)
|
https://github.com/alerta/alerta/issues/565
|
Traceback (most recent call last):
File "/opt/alerta/venv/lib/python3.5/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/alerta/venv/lib/python3.5/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/opt/alerta/venv/lib/python3.5/site-packages/flask_cors/decorator.py", line 128, in wrapped_function
resp = make_response(f(*args, **kwargs))
File "/opt/alerta/venv/lib/python3.5/site-packages/alerta/auth/utils.py", line 95, in wrapped
return f(*args, **kwargs)
File "/opt/alerta/venv/lib/python3.5/site-packages/alerta/webhooks/cloudwatch.py", line 81, in cloudwatch
incomingAlert = parse_notification(request.data)
File "/opt/alerta/venv/lib/python3.5/site-packages/alerta/webhooks/cloudwatch.py", line 29, in parse_notification
notification = json.loads(notification)
File "/usr/lib/python3.5/json/__init__.py", line 312, in loads
s.__class__.__name__))
TypeError: the JSON object must be str, not 'bytes'
|
TypeError
|
def cloudwatch():
try:
incomingAlert = parse_notification(request.json)
except ValueError as e:
raise ApiError(str(e), 400)
incomingAlert.customer = assign_customer(wanted=incomingAlert.customer)
add_remote_ip(request, incomingAlert)
try:
alert = process_alert(incomingAlert)
except RejectException as e:
raise ApiError(str(e), 403)
except Exception as e:
raise ApiError(str(e), 500)
if alert:
return jsonify(status="ok", id=alert.id, alert=alert.serialize), 201
else:
raise ApiError("insert or update of cloudwatch alarm failed", 500)
|
def cloudwatch():
try:
incomingAlert = parse_notification(request.data)
except ValueError as e:
raise ApiError(str(e), 400)
incomingAlert.customer = assign_customer(wanted=incomingAlert.customer)
add_remote_ip(request, incomingAlert)
try:
alert = process_alert(incomingAlert)
except RejectException as e:
raise ApiError(str(e), 403)
except Exception as e:
raise ApiError(str(e), 500)
if alert:
return jsonify(status="ok", id=alert.id, alert=alert.serialize), 201
else:
raise ApiError("insert or update of cloudwatch alarm failed", 500)
|
https://github.com/alerta/alerta/issues/565
|
Traceback (most recent call last):
File "/opt/alerta/venv/lib/python3.5/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/alerta/venv/lib/python3.5/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/opt/alerta/venv/lib/python3.5/site-packages/flask_cors/decorator.py", line 128, in wrapped_function
resp = make_response(f(*args, **kwargs))
File "/opt/alerta/venv/lib/python3.5/site-packages/alerta/auth/utils.py", line 95, in wrapped
return f(*args, **kwargs)
File "/opt/alerta/venv/lib/python3.5/site-packages/alerta/webhooks/cloudwatch.py", line 81, in cloudwatch
incomingAlert = parse_notification(request.data)
File "/opt/alerta/venv/lib/python3.5/site-packages/alerta/webhooks/cloudwatch.py", line 29, in parse_notification
notification = json.loads(notification)
File "/usr/lib/python3.5/json/__init__.py", line 312, in loads
s.__class__.__name__))
TypeError: the JSON object must be str, not 'bytes'
|
TypeError
|
def cloudwatch():
try:
incomingAlert = parse_notification(request.get_json(force=True))
except ValueError as e:
raise ApiError(str(e), 400)
incomingAlert.customer = assign_customer(wanted=incomingAlert.customer)
add_remote_ip(request, incomingAlert)
try:
alert = process_alert(incomingAlert)
except RejectException as e:
raise ApiError(str(e), 403)
except Exception as e:
raise ApiError(str(e), 500)
if alert:
return jsonify(status="ok", id=alert.id, alert=alert.serialize), 201
else:
raise ApiError("insert or update of cloudwatch alarm failed", 500)
|
def cloudwatch():
try:
incomingAlert = parse_notification(request.json)
except ValueError as e:
raise ApiError(str(e), 400)
incomingAlert.customer = assign_customer(wanted=incomingAlert.customer)
add_remote_ip(request, incomingAlert)
try:
alert = process_alert(incomingAlert)
except RejectException as e:
raise ApiError(str(e), 403)
except Exception as e:
raise ApiError(str(e), 500)
if alert:
return jsonify(status="ok", id=alert.id, alert=alert.serialize), 201
else:
raise ApiError("insert or update of cloudwatch alarm failed", 500)
|
https://github.com/alerta/alerta/issues/565
|
Traceback (most recent call last):
File "/opt/alerta/venv/lib/python3.5/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/alerta/venv/lib/python3.5/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/opt/alerta/venv/lib/python3.5/site-packages/flask_cors/decorator.py", line 128, in wrapped_function
resp = make_response(f(*args, **kwargs))
File "/opt/alerta/venv/lib/python3.5/site-packages/alerta/auth/utils.py", line 95, in wrapped
return f(*args, **kwargs)
File "/opt/alerta/venv/lib/python3.5/site-packages/alerta/webhooks/cloudwatch.py", line 81, in cloudwatch
incomingAlert = parse_notification(request.data)
File "/opt/alerta/venv/lib/python3.5/site-packages/alerta/webhooks/cloudwatch.py", line 29, in parse_notification
notification = json.loads(notification)
File "/usr/lib/python3.5/json/__init__.py", line 312, in loads
s.__class__.__name__))
TypeError: the JSON object must be str, not 'bytes'
|
TypeError
|
def housekeeping(self, expired_threshold, info_threshold):
# delete 'closed' or 'expired' alerts older than "expired_threshold" hours
# and 'informational' alerts older than "info_threshold" hours
expired_hours_ago = datetime.utcnow() - timedelta(hours=expired_threshold)
g.db.alerts.remove(
{
"status": {"$in": ["closed", "expired"]},
"lastReceiveTime": {"$lt": expired_hours_ago},
}
)
info_hours_ago = datetime.utcnow() - timedelta(hours=info_threshold)
g.db.alerts.remove(
{"severity": "informational", "lastReceiveTime": {"$lt": info_hours_ago}}
)
# get list of alerts to be newly expired
pipeline = [
{
"$project": {
"event": 1,
"status": 1,
"lastReceiveId": 1,
"timeout": 1,
"expireTime": {
"$add": ["$lastReceiveTime", {"$multiply": ["$timeout", 1000]}]
},
}
},
{
"$match": {
"status": {"$nin": ["expired", "shelved"]},
"expireTime": {"$lt": datetime.utcnow()},
"timeout": {"$ne": 0},
}
},
]
expired = [
(r["_id"], r["event"], r["lastReceiveId"])
for r in g.db.alerts.aggregate(pipeline)
]
# get list of alerts to be unshelved
pipeline = [
{"$match": {"status": "shelved"}},
{"$unwind": "$history"},
{"$match": {"history.type": "action", "history.status": "shelved"}},
{"$sort": {"history.updateTime": -1}},
{
"$group": {
"_id": "$_id",
"event": {"$first": "$event"},
"lastReceiveId": {"$first": "$lastReceiveId"},
"updateTime": {"$first": "$history.updateTime"},
"timeout": {"$first": "$timeout"},
}
},
{
"$project": {
"event": 1,
"lastReceiveId": 1,
"expireTime": {
"$add": ["$updateTime", {"$multiply": ["$timeout", 1000]}]
},
}
},
{"$match": {"expireTime": {"$lt": datetime.utcnow()}, "timeout": {"$ne": 0}}},
]
unshelved = [
(r["_id"], r["event"], r["lastReceiveId"])
for r in g.db.alerts.aggregate(pipeline)
]
return (expired, unshelved)
|
def housekeeping(self, expired_threshold, info_threshold):
# delete 'closed' or 'expired' alerts older than "expired_threshold" hours
# and 'informational' alerts older than "info_threshold" hours
expired_hours_ago = datetime.utcnow() - timedelta(hours=expired_threshold)
g.db.alerts.remove(
{
"status": {"$in": ["closed", "expired"]},
"lastReceiveTime": {"$lt": expired_hours_ago},
}
)
info_hours_ago = datetime.utcnow() - timedelta(hours=info_threshold)
g.db.alerts.remove(
{"severity": "informational", "lastReceiveTime": {"$lt": info_hours_ago}}
)
# get list of alerts to be newly expired
pipeline = [
{
"$project": {
"event": 1,
"status": 1,
"lastReceiveId": 1,
"timeout": 1,
"expireTime": {
"$add": ["$lastReceiveTime", {"$multiply": ["$timeout", 1000]}]
},
}
},
{
"$match": {
"status": {"$nin": ["expired", "shelved"]},
"expireTime": {"$lt": datetime.utcnow()},
"timeout": {"$ne": 0},
}
},
]
expired = [
(r["_id"], r["event"], r["lastReceiveId"])
for r in g.db.alerts.aggregate(pipeline)
]
# get list of alerts to be unshelved
pipeline = [
{
"$project": {
"event": 1,
"status": 1,
"lastReceiveId": 1,
"timeout": 1,
"expireTime": {
"$add": ["$lastReceiveTime", {"$multiply": ["$timeout", 1000]}]
},
}
},
{
"$match": {
"status": "shelved",
"expireTime": {"$lt": datetime.utcnow()},
"timeout": {"$ne": 0},
}
},
]
unshelved = [
(r["_id"], r["event"], r["lastReceiveId"])
for r in g.db.alerts.aggregate(pipeline)
]
return (expired, unshelved)
|
https://github.com/alerta/alerta/issues/528
|
2018-04-28 00:06:43,862 - alerta[18702]: ERROR - HOUSEKEEPING FAILED: Type names and field names can only contain alphanumeric characters and underscores: '?column?' [in /usr/lib/python2.7/site-packages/alerta_server-5.2.0_-py2.7.egg/alerta/exceptions.py:67]
Traceback (most recent call last):
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/lib/python2.7/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/lib/python2.7/site-packages/flask_cors/decorator.py", line 128, in wrapped_function
resp = make_response(f(*args, **kwargs))
File "/usr/lib/python2.7/site-packages/alerta_server-5.2.0_-py2.7.egg/alerta/auth/utils.py", line 95, in wrapped
return f(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/alerta_server-5.2.0_-py2.7.egg/alerta/management/views.py", line 148, in housekeeping
raise ApiError('HOUSEKEEPING FAILED: %s' % e, 503)
ApiError: HOUSEKEEPING FAILED: Type names and field names can only contain alphanumeric characters and underscores: '?column?'
|
ApiError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.