after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
async def reset(self, *, timeout=None):
self._check_open()
self._listeners.clear()
self._log_listeners.clear()
reset_query = self._get_reset_query()
if reset_query:
await self.execute(reset_query, timeout=timeout)
|
async def reset(self):
self._check_open()
self._listeners.clear()
self._log_listeners.clear()
reset_query = self._get_reset_query()
if reset_query:
await self.execute(reset_query)
|
https://github.com/MagicStack/asyncpg/issues/220
|
[2017-11-01 00:09:25,800] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<Pool.release.<locals>._release_impl() running at /usr/local/lib/python3.6/site-packages/asyncpg/pool.py:465> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f109d68>()]> cb=[shield.<locals>._done_callback() at /usr/local/lib/python3.6/asyncio/tasks.py:672]>
[2017-11-01 00:09:25,804] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<Pool.release.<locals>._release_impl() running at /usr/local/lib/python3.6/site-packages/asyncpg/pool.py:465> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f0891f8>()]> cb=[shield.<locals>._done_callback() at /usr/local/lib/python3.6/asyncio/tasks.py:672]>
[2017-11-01 00:09:25,808] (ERROR) asyncio: Fatal write error on socket transport
protocol: <asyncpg.protocol.protocol.Protocol object at 0x7f830f6bb588>
transport: <_SelectorSocketTransport fd=9>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
await self._con.reset()
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
await self.execute(reset_query)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
return await self._protocol.query(query, timeout)
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/asyncio/selector_events.py", line 762, in write
n = self._sock.send(data)
OSError: [Errno 9] Bad file descriptor
Exception ignored in: <coroutine object Pool.release.<locals>._release_impl at 0x7f830f197678>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 465, in _release_impl
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 203, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
[2017-11-01 00:09:25,813] (ERROR) asyncio: Fatal write error on socket transport
protocol: <asyncpg.protocol.protocol.Protocol object at 0x7f830f6bb6d8>
transport: <_SelectorSocketTransport fd=10>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
await self._con.reset()
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
await self.execute(reset_query)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
return await self._protocol.query(query, timeout)
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/asyncio/selector_events.py", line 762, in write
n = self._sock.send(data)
OSError: [Errno 9] Bad file descriptor
Exception ignored in: <coroutine object Pool.release.<locals>._release_impl at 0x7f830f197990>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 465, in _release_impl
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 203, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
[2017-11-01 00:09:25,817] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<DefaultModule.run() running at ./src/models/module.py:52> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f1093a8>()]>>
[2017-11-01 00:09:25,821] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<DefaultModule.run() running at ./src/models/module.py:52> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f089198>()]>>
[2017-11-01 00:09:25,825] (ERROR) DatabaseController: Traceback (most recent call last):
File "./src/controllers/database.py", line 102, in fetch
_logger.info("Conflict with recovery, retrying")
GeneratorExit
|
AttributeError
|
def _cancel_current_command(self, waiter):
self._cancellations.add(self._loop.create_task(self._cancel(waiter)))
|
def _cancel_current_command(self, waiter):
async def cancel():
try:
# Open new connection to the server
r, w = await connect_utils._open_connection(
loop=self._loop, addr=self._addr, params=self._params
)
except Exception as ex:
waiter.set_exception(ex)
return
try:
# Pack CancelRequest message
msg = struct.pack(
"!llll",
16,
80877102,
self._protocol.backend_pid,
self._protocol.backend_secret,
)
w.write(msg)
await r.read() # Wait until EOF
except ConnectionResetError:
# On some systems Postgres will reset the connection
# after processing the cancellation command.
pass
except Exception as ex:
waiter.set_exception(ex)
finally:
if not waiter.done(): # Ensure set_exception wasn't called.
waiter.set_result(None)
w.close()
self._loop.create_task(cancel())
|
https://github.com/MagicStack/asyncpg/issues/220
|
[2017-11-01 00:09:25,800] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<Pool.release.<locals>._release_impl() running at /usr/local/lib/python3.6/site-packages/asyncpg/pool.py:465> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f109d68>()]> cb=[shield.<locals>._done_callback() at /usr/local/lib/python3.6/asyncio/tasks.py:672]>
[2017-11-01 00:09:25,804] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<Pool.release.<locals>._release_impl() running at /usr/local/lib/python3.6/site-packages/asyncpg/pool.py:465> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f0891f8>()]> cb=[shield.<locals>._done_callback() at /usr/local/lib/python3.6/asyncio/tasks.py:672]>
[2017-11-01 00:09:25,808] (ERROR) asyncio: Fatal write error on socket transport
protocol: <asyncpg.protocol.protocol.Protocol object at 0x7f830f6bb588>
transport: <_SelectorSocketTransport fd=9>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
await self._con.reset()
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
await self.execute(reset_query)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
return await self._protocol.query(query, timeout)
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/asyncio/selector_events.py", line 762, in write
n = self._sock.send(data)
OSError: [Errno 9] Bad file descriptor
Exception ignored in: <coroutine object Pool.release.<locals>._release_impl at 0x7f830f197678>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 465, in _release_impl
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 203, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
[2017-11-01 00:09:25,813] (ERROR) asyncio: Fatal write error on socket transport
protocol: <asyncpg.protocol.protocol.Protocol object at 0x7f830f6bb6d8>
transport: <_SelectorSocketTransport fd=10>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
await self._con.reset()
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
await self.execute(reset_query)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
return await self._protocol.query(query, timeout)
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/asyncio/selector_events.py", line 762, in write
n = self._sock.send(data)
OSError: [Errno 9] Bad file descriptor
Exception ignored in: <coroutine object Pool.release.<locals>._release_impl at 0x7f830f197990>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 465, in _release_impl
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 203, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
[2017-11-01 00:09:25,817] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<DefaultModule.run() running at ./src/models/module.py:52> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f1093a8>()]>>
[2017-11-01 00:09:25,821] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<DefaultModule.run() running at ./src/models/module.py:52> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f089198>()]>>
[2017-11-01 00:09:25,825] (ERROR) DatabaseController: Traceback (most recent call last):
File "./src/controllers/database.py", line 102, in fetch
_logger.info("Conflict with recovery, retrying")
GeneratorExit
|
AttributeError
|
def __init__(
self,
pool,
*,
connect_args,
connect_kwargs,
max_queries,
setup,
init,
max_inactive_time,
):
self._pool = pool
self._con = None
self._connect_args = connect_args
self._connect_kwargs = connect_kwargs
self._max_queries = max_queries
self._max_inactive_time = max_inactive_time
self._setup = setup
self._init = init
self._inactive_callback = None
self._in_use = False
self._timeout = None
|
def __init__(
self,
pool,
*,
connect_args,
connect_kwargs,
max_queries,
setup,
init,
max_inactive_time,
):
self._pool = pool
self._con = None
self._connect_args = connect_args
self._connect_kwargs = connect_kwargs
self._max_queries = max_queries
self._max_inactive_time = max_inactive_time
self._setup = setup
self._init = init
self._inactive_callback = None
self._in_use = False
|
https://github.com/MagicStack/asyncpg/issues/220
|
[2017-11-01 00:09:25,800] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<Pool.release.<locals>._release_impl() running at /usr/local/lib/python3.6/site-packages/asyncpg/pool.py:465> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f109d68>()]> cb=[shield.<locals>._done_callback() at /usr/local/lib/python3.6/asyncio/tasks.py:672]>
[2017-11-01 00:09:25,804] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<Pool.release.<locals>._release_impl() running at /usr/local/lib/python3.6/site-packages/asyncpg/pool.py:465> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f0891f8>()]> cb=[shield.<locals>._done_callback() at /usr/local/lib/python3.6/asyncio/tasks.py:672]>
[2017-11-01 00:09:25,808] (ERROR) asyncio: Fatal write error on socket transport
protocol: <asyncpg.protocol.protocol.Protocol object at 0x7f830f6bb588>
transport: <_SelectorSocketTransport fd=9>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
await self._con.reset()
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
await self.execute(reset_query)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
return await self._protocol.query(query, timeout)
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/asyncio/selector_events.py", line 762, in write
n = self._sock.send(data)
OSError: [Errno 9] Bad file descriptor
Exception ignored in: <coroutine object Pool.release.<locals>._release_impl at 0x7f830f197678>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 465, in _release_impl
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 203, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
[2017-11-01 00:09:25,813] (ERROR) asyncio: Fatal write error on socket transport
protocol: <asyncpg.protocol.protocol.Protocol object at 0x7f830f6bb6d8>
transport: <_SelectorSocketTransport fd=10>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
await self._con.reset()
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
await self.execute(reset_query)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
return await self._protocol.query(query, timeout)
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/asyncio/selector_events.py", line 762, in write
n = self._sock.send(data)
OSError: [Errno 9] Bad file descriptor
Exception ignored in: <coroutine object Pool.release.<locals>._release_impl at 0x7f830f197990>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 465, in _release_impl
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 203, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
[2017-11-01 00:09:25,817] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<DefaultModule.run() running at ./src/models/module.py:52> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f1093a8>()]>>
[2017-11-01 00:09:25,821] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<DefaultModule.run() running at ./src/models/module.py:52> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f089198>()]>>
[2017-11-01 00:09:25,825] (ERROR) DatabaseController: Traceback (most recent call last):
File "./src/controllers/database.py", line 102, in fetch
_logger.info("Conflict with recovery, retrying")
GeneratorExit
|
AttributeError
|
async def release(self, timeout):
assert self._in_use
self._in_use = False
self._timeout = None
self._con._on_release()
if self._con.is_closed():
self._con = None
elif self._con._protocol.queries_count >= self._max_queries:
try:
await self._con.close(timeout=timeout)
finally:
self._con = None
else:
try:
budget = timeout
if self._con._protocol._is_cancelling():
# If the connection is in cancellation state,
# wait for the cancellation
started = time.monotonic()
await asyncio.wait_for(
self._con._protocol._wait_for_cancellation(),
budget,
loop=self._pool._loop,
)
if budget is not None:
budget -= time.monotonic() - started
await self._con.reset(timeout=budget)
except Exception as ex:
# If the `reset` call failed, terminate the connection.
# A new one will be created when `acquire` is called
# again.
try:
# An exception in `reset` is most likely caused by
# an IO error, so terminate the connection.
self._con.terminate()
finally:
self._con = None
raise ex
assert self._inactive_callback is None
if self._max_inactive_time and self._con is not None:
self._inactive_callback = self._pool._loop.call_later(
self._max_inactive_time, self._deactivate_connection
)
|
async def release(self):
assert self._in_use
self._in_use = False
self._con._on_release()
if self._con.is_closed():
self._con = None
elif self._con._protocol.queries_count >= self._max_queries:
try:
await self._con.close()
finally:
self._con = None
else:
try:
await self._con.reset()
except Exception as ex:
# If the `reset` call failed, terminate the connection.
# A new one will be created when `acquire` is called
# again.
try:
# An exception in `reset` is most likely caused by
# an IO error, so terminate the connection.
self._con.terminate()
finally:
self._con = None
raise ex
assert self._inactive_callback is None
if self._max_inactive_time and self._con is not None:
self._inactive_callback = self._pool._loop.call_later(
self._max_inactive_time, self._deactivate_connection
)
|
https://github.com/MagicStack/asyncpg/issues/220
|
[2017-11-01 00:09:25,800] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<Pool.release.<locals>._release_impl() running at /usr/local/lib/python3.6/site-packages/asyncpg/pool.py:465> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f109d68>()]> cb=[shield.<locals>._done_callback() at /usr/local/lib/python3.6/asyncio/tasks.py:672]>
[2017-11-01 00:09:25,804] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<Pool.release.<locals>._release_impl() running at /usr/local/lib/python3.6/site-packages/asyncpg/pool.py:465> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f0891f8>()]> cb=[shield.<locals>._done_callback() at /usr/local/lib/python3.6/asyncio/tasks.py:672]>
[2017-11-01 00:09:25,808] (ERROR) asyncio: Fatal write error on socket transport
protocol: <asyncpg.protocol.protocol.Protocol object at 0x7f830f6bb588>
transport: <_SelectorSocketTransport fd=9>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
await self._con.reset()
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
await self.execute(reset_query)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
return await self._protocol.query(query, timeout)
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/asyncio/selector_events.py", line 762, in write
n = self._sock.send(data)
OSError: [Errno 9] Bad file descriptor
Exception ignored in: <coroutine object Pool.release.<locals>._release_impl at 0x7f830f197678>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 465, in _release_impl
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 203, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
[2017-11-01 00:09:25,813] (ERROR) asyncio: Fatal write error on socket transport
protocol: <asyncpg.protocol.protocol.Protocol object at 0x7f830f6bb6d8>
transport: <_SelectorSocketTransport fd=10>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
await self._con.reset()
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
await self.execute(reset_query)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
return await self._protocol.query(query, timeout)
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/asyncio/selector_events.py", line 762, in write
n = self._sock.send(data)
OSError: [Errno 9] Bad file descriptor
Exception ignored in: <coroutine object Pool.release.<locals>._release_impl at 0x7f830f197990>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 465, in _release_impl
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 203, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
[2017-11-01 00:09:25,817] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<DefaultModule.run() running at ./src/models/module.py:52> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f1093a8>()]>>
[2017-11-01 00:09:25,821] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<DefaultModule.run() running at ./src/models/module.py:52> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f089198>()]>>
[2017-11-01 00:09:25,825] (ERROR) DatabaseController: Traceback (most recent call last):
File "./src/controllers/database.py", line 102, in fetch
_logger.info("Conflict with recovery, retrying")
GeneratorExit
|
AttributeError
|
async def _acquire(self, timeout):
async def _acquire_impl():
ch = await self._queue.get() # type: PoolConnectionHolder
try:
proxy = await ch.acquire() # type: PoolConnectionProxy
except Exception:
self._queue.put_nowait(ch)
raise
else:
# Record the timeout, as we will apply it by default
# in release().
ch._timeout = timeout
return proxy
self._check_init()
if timeout is None:
return await _acquire_impl()
else:
return await asyncio.wait_for(_acquire_impl(), timeout=timeout, loop=self._loop)
|
async def _acquire(self, timeout):
async def _acquire_impl():
ch = await self._queue.get() # type: PoolConnectionHolder
try:
proxy = await ch.acquire() # type: PoolConnectionProxy
except Exception:
self._queue.put_nowait(ch)
raise
else:
return proxy
self._check_init()
if timeout is None:
return await _acquire_impl()
else:
return await asyncio.wait_for(_acquire_impl(), timeout=timeout, loop=self._loop)
|
https://github.com/MagicStack/asyncpg/issues/220
|
[2017-11-01 00:09:25,800] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<Pool.release.<locals>._release_impl() running at /usr/local/lib/python3.6/site-packages/asyncpg/pool.py:465> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f109d68>()]> cb=[shield.<locals>._done_callback() at /usr/local/lib/python3.6/asyncio/tasks.py:672]>
[2017-11-01 00:09:25,804] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<Pool.release.<locals>._release_impl() running at /usr/local/lib/python3.6/site-packages/asyncpg/pool.py:465> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f0891f8>()]> cb=[shield.<locals>._done_callback() at /usr/local/lib/python3.6/asyncio/tasks.py:672]>
[2017-11-01 00:09:25,808] (ERROR) asyncio: Fatal write error on socket transport
protocol: <asyncpg.protocol.protocol.Protocol object at 0x7f830f6bb588>
transport: <_SelectorSocketTransport fd=9>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
await self._con.reset()
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
await self.execute(reset_query)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
return await self._protocol.query(query, timeout)
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/asyncio/selector_events.py", line 762, in write
n = self._sock.send(data)
OSError: [Errno 9] Bad file descriptor
Exception ignored in: <coroutine object Pool.release.<locals>._release_impl at 0x7f830f197678>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 465, in _release_impl
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 203, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
[2017-11-01 00:09:25,813] (ERROR) asyncio: Fatal write error on socket transport
protocol: <asyncpg.protocol.protocol.Protocol object at 0x7f830f6bb6d8>
transport: <_SelectorSocketTransport fd=10>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
await self._con.reset()
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
await self.execute(reset_query)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
return await self._protocol.query(query, timeout)
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/asyncio/selector_events.py", line 762, in write
n = self._sock.send(data)
OSError: [Errno 9] Bad file descriptor
Exception ignored in: <coroutine object Pool.release.<locals>._release_impl at 0x7f830f197990>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 465, in _release_impl
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 203, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
[2017-11-01 00:09:25,817] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<DefaultModule.run() running at ./src/models/module.py:52> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f1093a8>()]>>
[2017-11-01 00:09:25,821] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<DefaultModule.run() running at ./src/models/module.py:52> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f089198>()]>>
[2017-11-01 00:09:25,825] (ERROR) DatabaseController: Traceback (most recent call last):
File "./src/controllers/database.py", line 102, in fetch
_logger.info("Conflict with recovery, retrying")
GeneratorExit
|
AttributeError
|
async def _acquire_impl():
ch = await self._queue.get() # type: PoolConnectionHolder
try:
proxy = await ch.acquire() # type: PoolConnectionProxy
except Exception:
self._queue.put_nowait(ch)
raise
else:
# Record the timeout, as we will apply it by default
# in release().
ch._timeout = timeout
return proxy
|
async def _acquire_impl():
ch = await self._queue.get() # type: PoolConnectionHolder
try:
proxy = await ch.acquire() # type: PoolConnectionProxy
except Exception:
self._queue.put_nowait(ch)
raise
else:
return proxy
|
https://github.com/MagicStack/asyncpg/issues/220
|
[2017-11-01 00:09:25,800] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<Pool.release.<locals>._release_impl() running at /usr/local/lib/python3.6/site-packages/asyncpg/pool.py:465> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f109d68>()]> cb=[shield.<locals>._done_callback() at /usr/local/lib/python3.6/asyncio/tasks.py:672]>
[2017-11-01 00:09:25,804] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<Pool.release.<locals>._release_impl() running at /usr/local/lib/python3.6/site-packages/asyncpg/pool.py:465> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f0891f8>()]> cb=[shield.<locals>._done_callback() at /usr/local/lib/python3.6/asyncio/tasks.py:672]>
[2017-11-01 00:09:25,808] (ERROR) asyncio: Fatal write error on socket transport
protocol: <asyncpg.protocol.protocol.Protocol object at 0x7f830f6bb588>
transport: <_SelectorSocketTransport fd=9>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
await self._con.reset()
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
await self.execute(reset_query)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
return await self._protocol.query(query, timeout)
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/asyncio/selector_events.py", line 762, in write
n = self._sock.send(data)
OSError: [Errno 9] Bad file descriptor
Exception ignored in: <coroutine object Pool.release.<locals>._release_impl at 0x7f830f197678>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 465, in _release_impl
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 203, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
[2017-11-01 00:09:25,813] (ERROR) asyncio: Fatal write error on socket transport
protocol: <asyncpg.protocol.protocol.Protocol object at 0x7f830f6bb6d8>
transport: <_SelectorSocketTransport fd=10>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
await self._con.reset()
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
await self.execute(reset_query)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
return await self._protocol.query(query, timeout)
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/asyncio/selector_events.py", line 762, in write
n = self._sock.send(data)
OSError: [Errno 9] Bad file descriptor
Exception ignored in: <coroutine object Pool.release.<locals>._release_impl at 0x7f830f197990>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 465, in _release_impl
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 203, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
[2017-11-01 00:09:25,817] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<DefaultModule.run() running at ./src/models/module.py:52> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f1093a8>()]>>
[2017-11-01 00:09:25,821] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<DefaultModule.run() running at ./src/models/module.py:52> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f089198>()]>>
[2017-11-01 00:09:25,825] (ERROR) DatabaseController: Traceback (most recent call last):
File "./src/controllers/database.py", line 102, in fetch
_logger.info("Conflict with recovery, retrying")
GeneratorExit
|
AttributeError
|
async def release(self, connection, *, timeout=None):
"""Release a database connection back to the pool.
:param Connection connection:
A :class:`~asyncpg.connection.Connection` object to release.
:param float timeout:
A timeout for releasing the connection. If not specified, defaults
to the timeout provided in the corresponding call to the
:meth:`Pool.acquire() <asyncpg.pool.Pool.acquire>` method.
.. versionchanged:: 0.14.0
Added the *timeout* parameter.
"""
async def _release_impl(ch: PoolConnectionHolder, timeout: float):
try:
await ch.release(timeout)
finally:
self._queue.put_nowait(ch)
self._check_init()
if (
type(connection) is not PoolConnectionProxy
or connection._holder._pool is not self
):
raise exceptions.InterfaceError(
"Pool.release() received invalid connection: "
"{connection!r} is not a member of this pool".format(connection=connection)
)
if connection._con is None:
# Already released, do nothing.
return
connection._detach()
if timeout is None:
timeout = connection._holder._timeout
# Use asyncio.shield() to guarantee that task cancellation
# does not prevent the connection from being returned to the
# pool properly.
return await asyncio.shield(
_release_impl(connection._holder, timeout), loop=self._loop
)
|
async def release(self, connection):
"""Release a database connection back to the pool."""
async def _release_impl(ch: PoolConnectionHolder):
try:
await ch.release()
finally:
self._queue.put_nowait(ch)
self._check_init()
if (
type(connection) is not PoolConnectionProxy
or connection._holder._pool is not self
):
raise exceptions.InterfaceError(
"Pool.release() received invalid connection: "
"{connection!r} is not a member of this pool".format(connection=connection)
)
if connection._con is None:
# Already released, do nothing.
return
connection._detach()
# Use asyncio.shield() to guarantee that task cancellation
# does not prevent the connection from being returned to the
# pool properly.
return await asyncio.shield(_release_impl(connection._holder), loop=self._loop)
|
https://github.com/MagicStack/asyncpg/issues/220
|
[2017-11-01 00:09:25,800] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<Pool.release.<locals>._release_impl() running at /usr/local/lib/python3.6/site-packages/asyncpg/pool.py:465> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f109d68>()]> cb=[shield.<locals>._done_callback() at /usr/local/lib/python3.6/asyncio/tasks.py:672]>
[2017-11-01 00:09:25,804] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<Pool.release.<locals>._release_impl() running at /usr/local/lib/python3.6/site-packages/asyncpg/pool.py:465> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f0891f8>()]> cb=[shield.<locals>._done_callback() at /usr/local/lib/python3.6/asyncio/tasks.py:672]>
[2017-11-01 00:09:25,808] (ERROR) asyncio: Fatal write error on socket transport
protocol: <asyncpg.protocol.protocol.Protocol object at 0x7f830f6bb588>
transport: <_SelectorSocketTransport fd=9>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
await self._con.reset()
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
await self.execute(reset_query)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
return await self._protocol.query(query, timeout)
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/asyncio/selector_events.py", line 762, in write
n = self._sock.send(data)
OSError: [Errno 9] Bad file descriptor
Exception ignored in: <coroutine object Pool.release.<locals>._release_impl at 0x7f830f197678>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 465, in _release_impl
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 203, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
[2017-11-01 00:09:25,813] (ERROR) asyncio: Fatal write error on socket transport
protocol: <asyncpg.protocol.protocol.Protocol object at 0x7f830f6bb6d8>
transport: <_SelectorSocketTransport fd=10>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
await self._con.reset()
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
await self.execute(reset_query)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
return await self._protocol.query(query, timeout)
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/asyncio/selector_events.py", line 762, in write
n = self._sock.send(data)
OSError: [Errno 9] Bad file descriptor
Exception ignored in: <coroutine object Pool.release.<locals>._release_impl at 0x7f830f197990>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 465, in _release_impl
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 203, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
[2017-11-01 00:09:25,817] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<DefaultModule.run() running at ./src/models/module.py:52> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f1093a8>()]>>
[2017-11-01 00:09:25,821] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<DefaultModule.run() running at ./src/models/module.py:52> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f089198>()]>>
[2017-11-01 00:09:25,825] (ERROR) DatabaseController: Traceback (most recent call last):
File "./src/controllers/database.py", line 102, in fetch
_logger.info("Conflict with recovery, retrying")
GeneratorExit
|
AttributeError
|
async def _release_impl(ch: PoolConnectionHolder, timeout: float):
try:
await ch.release(timeout)
finally:
self._queue.put_nowait(ch)
|
async def _release_impl(ch: PoolConnectionHolder):
try:
await ch.release()
finally:
self._queue.put_nowait(ch)
|
https://github.com/MagicStack/asyncpg/issues/220
|
[2017-11-01 00:09:25,800] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<Pool.release.<locals>._release_impl() running at /usr/local/lib/python3.6/site-packages/asyncpg/pool.py:465> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f109d68>()]> cb=[shield.<locals>._done_callback() at /usr/local/lib/python3.6/asyncio/tasks.py:672]>
[2017-11-01 00:09:25,804] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<Pool.release.<locals>._release_impl() running at /usr/local/lib/python3.6/site-packages/asyncpg/pool.py:465> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f0891f8>()]> cb=[shield.<locals>._done_callback() at /usr/local/lib/python3.6/asyncio/tasks.py:672]>
[2017-11-01 00:09:25,808] (ERROR) asyncio: Fatal write error on socket transport
protocol: <asyncpg.protocol.protocol.Protocol object at 0x7f830f6bb588>
transport: <_SelectorSocketTransport fd=9>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
await self._con.reset()
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
await self.execute(reset_query)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
return await self._protocol.query(query, timeout)
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/asyncio/selector_events.py", line 762, in write
n = self._sock.send(data)
OSError: [Errno 9] Bad file descriptor
Exception ignored in: <coroutine object Pool.release.<locals>._release_impl at 0x7f830f197678>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 465, in _release_impl
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 203, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
[2017-11-01 00:09:25,813] (ERROR) asyncio: Fatal write error on socket transport
protocol: <asyncpg.protocol.protocol.Protocol object at 0x7f830f6bb6d8>
transport: <_SelectorSocketTransport fd=10>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
await self._con.reset()
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
await self.execute(reset_query)
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
return await self._protocol.query(query, timeout)
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/asyncio/selector_events.py", line 762, in write
n = self._sock.send(data)
OSError: [Errno 9] Bad file descriptor
Exception ignored in: <coroutine object Pool.release.<locals>._release_impl at 0x7f830f197990>
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 465, in _release_impl
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 203, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/pool.py", line 192, in release
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 986, in reset
File "/usr/local/lib/python3.6/site-packages/asyncpg/connection.py", line 238, in execute
File "asyncpg/protocol/protocol.pyx", line 296, in query
AttributeError: 'weakref' object has no attribute 'cline_in_traceback'
[2017-11-01 00:09:25,817] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<DefaultModule.run() running at ./src/models/module.py:52> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f1093a8>()]>>
[2017-11-01 00:09:25,821] (ERROR) asyncio: Task was destroyed but it is pending!
task: <Task pending coro=<DefaultModule.run() running at ./src/models/module.py:52> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7f830f089198>()]>>
[2017-11-01 00:09:25,825] (ERROR) DatabaseController: Traceback (most recent call last):
File "./src/controllers/database.py", line 102, in fetch
_logger.info("Conflict with recovery, retrying")
GeneratorExit
|
AttributeError
|
def __init__(
self,
protocol,
transport,
loop,
addr: (str, int) or str,
config: connect_utils._ClientConfiguration,
params: connect_utils._ConnectionParameters,
):
self._protocol = protocol
self._transport = transport
self._loop = loop
self._top_xact = None
self._uid = 0
self._aborted = False
# Incremented very time the connection is released back to a pool.
# Used to catch invalid references to connection-related resources
# post-release (e.g. explicit prepared statements).
self._pool_release_ctr = 0
self._addr = addr
self._config = config
self._params = params
self._stmt_cache = _StatementCache(
loop=loop,
max_size=config.statement_cache_size,
on_remove=self._maybe_gc_stmt,
max_lifetime=config.max_cached_statement_lifetime,
)
self._stmts_to_close = set()
self._listeners = {}
self._log_listeners = set()
settings = self._protocol.get_settings()
ver_string = settings.server_version
self._server_version = serverversion.split_server_version_string(ver_string)
self._server_caps = _detect_server_capabilities(self._server_version, settings)
self._intro_query = introspection.INTRO_LOOKUP_TYPES
self._reset_query = None
self._proxy = None
# Used to serialize operations that might involve anonymous
# statements. Specifically, we want to make the following
# operation atomic:
# ("prepare an anonymous statement", "use the statement")
#
# Used for `con.fetchval()`, `con.fetch()`, `con.fetchrow()`,
# `con.execute()`, and `con.executemany()`.
self._stmt_exclusive_section = _Atomic()
|
def __init__(
self,
protocol,
transport,
loop,
addr: (str, int) or str,
config: connect_utils._ClientConfiguration,
params: connect_utils._ConnectionParameters,
):
self._protocol = protocol
self._transport = transport
self._loop = loop
self._types_stmt = None
self._type_by_name_stmt = None
self._top_xact = None
self._uid = 0
self._aborted = False
# Incremented very time the connection is released back to a pool.
# Used to catch invalid references to connection-related resources
# post-release (e.g. explicit prepared statements).
self._pool_release_ctr = 0
self._addr = addr
self._config = config
self._params = params
self._stmt_cache = _StatementCache(
loop=loop,
max_size=config.statement_cache_size,
on_remove=self._maybe_gc_stmt,
max_lifetime=config.max_cached_statement_lifetime,
)
self._stmts_to_close = set()
self._listeners = {}
self._log_listeners = set()
settings = self._protocol.get_settings()
ver_string = settings.server_version
self._server_version = serverversion.split_server_version_string(ver_string)
self._server_caps = _detect_server_capabilities(self._server_version, settings)
self._intro_query = introspection.INTRO_LOOKUP_TYPES
self._reset_query = None
self._proxy = None
# Used to serialize operations that might involve anonymous
# statements. Specifically, we want to make the following
# operation atomic:
# ("prepare an anonymous statement", "use the statement")
#
# Used for `con.fetchval()`, `con.fetch()`, `con.fetchrow()`,
# `con.execute()`, and `con.executemany()`.
self._stmt_exclusive_section = _Atomic()
|
https://github.com/MagicStack/asyncpg/issues/198
|
In [1]: import os, asyncpg, asyncio
In [2]: loop = asyncio.get_event_loop()
In [3]: conn = loop.run_until_complete(asyncpg.connect(
...: host = 'pgaas.mail.yandex.net',
...: port = 12000,
...: user = 'statinfra_api',
...: database = 'statinfra_api_beta',
...: password=os.environ['DB_PASSWORD'],
...: ssl=True,
...: statement_cache_size=0
...: ))
In [4]: loop.run_until_complete(conn.fetch('SELECT * FROM tasks'))
---------------------------------------------------------------------------
InvalidSQLStatementNameError Traceback (most recent call last)
<ipython-input-5-8a44237acb87> in <module>()
----> 1 loop.run_until_complete(conn.fetch('SELECT * FROM tasks'))
/usr/lib/python3.6/asyncio/base_events.py in run_until_complete(self, future)
465 raise RuntimeError('Event loop stopped before Future completed.')
466
--> 467 return future.result()
468
469 def stop(self):
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in fetch(self, query, timeout, *args)
339 """
340 self._check_open()
--> 341 return await self._execute(query, args, 0, timeout)
342
343 async def fetchval(self, query, *args, column=0, timeout=None):
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in _execute(self, query, args, limit, timeout, return_status)
1186 timeout = self._protocol._get_timeout(timeout)
1187 with self._stmt_exclusive_section:
-> 1188 return await self._do_execute(query, executor, timeout)
1189
1190 async def _executemany(self, query, args, timeout):
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in _do_execute(self, query, executor, timeout, retry)
1197 async def _do_execute(self, query, executor, timeout, retry=True):
1198 if timeout is None:
-> 1199 stmt = await self._get_statement(query, None)
1200 else:
1201 before = time.monotonic()
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in _get_statement(self, query, timeout, named)
290 self._types_stmt = await self.prepare(self._intro_query)
291
--> 292 types = await self._types_stmt.fetch(list(ready))
293 self._protocol.get_settings().register_data_types(types)
294
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/prepared_stmt.py in fetch(self, timeout, *args)
155 :return: A list of :class:`Record` instances.
156 """
--> 157 data = await self.__bind_execute(args, 0, timeout)
158 return data
159
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/prepared_stmt.py in __bind_execute(self, args, limit, timeout)
194 protocol = self._connection._protocol
195 data, status, _ = await protocol.bind_execute(
--> 196 self._state, args, '', limit, True, timeout)
197 self._last_status = status
198 return data
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/protocol/protocol.pyx in bind_execute (asyncpg/protocol/protocol.c:66799)()
InvalidSQLStatementNameError: prepared statement "__asyncpg_stmt_1__" does not exist
|
InvalidSQLStatementNameError
|
async def _get_statement(self, query, timeout, *, named: bool = False):
statement = self._stmt_cache.get(query)
if statement is not None:
return statement
# Only use the cache when:
# * `statement_cache_size` is greater than 0;
# * query size is less than `max_cacheable_statement_size`.
use_cache = self._stmt_cache.get_max_size() > 0
if (
use_cache
and self._config.max_cacheable_statement_size
and len(query) > self._config.max_cacheable_statement_size
):
use_cache = False
if use_cache or named:
stmt_name = self._get_unique_id("stmt")
else:
stmt_name = ""
statement = await self._protocol.prepare(stmt_name, query, timeout)
ready = statement._init_types()
if ready is not True:
types, intro_stmt = await self.__execute(
self._intro_query, (list(ready),), 0, timeout
)
self._protocol.get_settings().register_data_types(types)
if not intro_stmt.name and not statement.name:
# The introspection query has used an anonymous statement,
# which has blown away the anonymous statement we've prepared
# for the query, so we need to re-prepare it.
statement = await self._protocol.prepare(stmt_name, query, timeout)
if use_cache:
self._stmt_cache.put(query, statement)
# If we've just created a new statement object, check if there
# are any statements for GC.
if self._stmts_to_close:
await self._cleanup_stmts()
return statement
|
async def _get_statement(self, query, timeout, *, named: bool = False):
statement = self._stmt_cache.get(query)
if statement is not None:
return statement
# Only use the cache when:
# * `statement_cache_size` is greater than 0;
# * query size is less than `max_cacheable_statement_size`.
use_cache = self._stmt_cache.get_max_size() > 0
if (
use_cache
and self._config.max_cacheable_statement_size
and len(query) > self._config.max_cacheable_statement_size
):
use_cache = False
if use_cache or named:
stmt_name = self._get_unique_id("stmt")
else:
stmt_name = ""
statement = await self._protocol.prepare(stmt_name, query, timeout)
ready = statement._init_types()
if ready is not True:
if self._types_stmt is None:
self._types_stmt = await self.prepare(self._intro_query)
types = await self._types_stmt.fetch(list(ready))
self._protocol.get_settings().register_data_types(types)
if use_cache:
self._stmt_cache.put(query, statement)
# If we've just created a new statement object, check if there
# are any statements for GC.
if self._stmts_to_close:
await self._cleanup_stmts()
return statement
|
https://github.com/MagicStack/asyncpg/issues/198
|
In [1]: import os, asyncpg, asyncio
In [2]: loop = asyncio.get_event_loop()
In [3]: conn = loop.run_until_complete(asyncpg.connect(
...: host = 'pgaas.mail.yandex.net',
...: port = 12000,
...: user = 'statinfra_api',
...: database = 'statinfra_api_beta',
...: password=os.environ['DB_PASSWORD'],
...: ssl=True,
...: statement_cache_size=0
...: ))
In [4]: loop.run_until_complete(conn.fetch('SELECT * FROM tasks'))
---------------------------------------------------------------------------
InvalidSQLStatementNameError Traceback (most recent call last)
<ipython-input-5-8a44237acb87> in <module>()
----> 1 loop.run_until_complete(conn.fetch('SELECT * FROM tasks'))
/usr/lib/python3.6/asyncio/base_events.py in run_until_complete(self, future)
465 raise RuntimeError('Event loop stopped before Future completed.')
466
--> 467 return future.result()
468
469 def stop(self):
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in fetch(self, query, timeout, *args)
339 """
340 self._check_open()
--> 341 return await self._execute(query, args, 0, timeout)
342
343 async def fetchval(self, query, *args, column=0, timeout=None):
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in _execute(self, query, args, limit, timeout, return_status)
1186 timeout = self._protocol._get_timeout(timeout)
1187 with self._stmt_exclusive_section:
-> 1188 return await self._do_execute(query, executor, timeout)
1189
1190 async def _executemany(self, query, args, timeout):
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in _do_execute(self, query, executor, timeout, retry)
1197 async def _do_execute(self, query, executor, timeout, retry=True):
1198 if timeout is None:
-> 1199 stmt = await self._get_statement(query, None)
1200 else:
1201 before = time.monotonic()
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in _get_statement(self, query, timeout, named)
290 self._types_stmt = await self.prepare(self._intro_query)
291
--> 292 types = await self._types_stmt.fetch(list(ready))
293 self._protocol.get_settings().register_data_types(types)
294
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/prepared_stmt.py in fetch(self, timeout, *args)
155 :return: A list of :class:`Record` instances.
156 """
--> 157 data = await self.__bind_execute(args, 0, timeout)
158 return data
159
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/prepared_stmt.py in __bind_execute(self, args, limit, timeout)
194 protocol = self._connection._protocol
195 data, status, _ = await protocol.bind_execute(
--> 196 self._state, args, '', limit, True, timeout)
197 self._last_status = status
198 return data
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/protocol/protocol.pyx in bind_execute (asyncpg/protocol/protocol.c:66799)()
InvalidSQLStatementNameError: prepared statement "__asyncpg_stmt_1__" does not exist
|
InvalidSQLStatementNameError
|
async def set_type_codec(
self, typename, *, schema="public", encoder, decoder, binary=None, format="text"
):
"""Set an encoder/decoder pair for the specified data type.
:param typename:
Name of the data type the codec is for.
:param schema:
Schema name of the data type the codec is for
(defaults to ``'public'``)
:param format:
The type of the argument received by the *decoder* callback,
and the type of the *encoder* callback return value.
If *format* is ``'text'`` (the default), the exchange datum is a
``str`` instance containing valid text representation of the
data type.
If *format* is ``'binary'``, the exchange datum is a ``bytes``
instance containing valid _binary_ representation of the
data type.
If *format* is ``'tuple'``, the exchange datum is a type-specific
``tuple`` of values. The table below lists supported data
types and their format for this mode.
+-----------------+---------------------------------------------+
| Type | Tuple layout |
+=================+=============================================+
| ``interval`` | (``months``, ``days``, ``microseconds``) |
+-----------------+---------------------------------------------+
| ``date`` | (``date ordinal relative to Jan 1 2000``,) |
| | ``-2^31`` for negative infinity timestamp |
| | ``2^31-1`` for positive infinity timestamp. |
+-----------------+---------------------------------------------+
| ``timestamp`` | (``microseconds relative to Jan 1 2000``,) |
| | ``-2^63`` for negative infinity timestamp |
| | ``2^63-1`` for positive infinity timestamp. |
+-----------------+---------------------------------------------+
| ``timestamp | (``microseconds relative to Jan 1 2000 |
| with time zone``| UTC``,) |
| | ``-2^63`` for negative infinity timestamp |
| | ``2^63-1`` for positive infinity timestamp. |
+-----------------+---------------------------------------------+
| ``time`` | (``microseconds``,) |
+-----------------+---------------------------------------------+
| ``time with | (``microseconds``, |
| time zone`` | ``time zone offset in seconds``) |
+-----------------+---------------------------------------------+
:param encoder:
Callable accepting a Python object as a single argument and
returning a value encoded according to *format*.
:param decoder:
Callable accepting a single argument encoded according to *format*
and returning a decoded Python object.
:param binary:
**Deprecated**. Use *format* instead.
Example:
.. code-block:: pycon
>>> import asyncpg
>>> import asyncio
>>> import datetime
>>> from dateutil.relativedelta import relativedelta
>>> async def run():
... con = await asyncpg.connect(user='postgres')
... def encoder(delta):
... ndelta = delta.normalized()
... return (ndelta.years * 12 + ndelta.months,
... ndelta.days,
... ((ndelta.hours * 3600 +
... ndelta.minutes * 60 +
... ndelta.seconds) * 1000000 +
... ndelta.microseconds))
... def decoder(tup):
... return relativedelta(months=tup[0], days=tup[1],
... microseconds=tup[2])
... await con.set_type_codec(
... 'interval', schema='pg_catalog', encoder=encoder,
... decoder=decoder, format='tuple')
... result = await con.fetchval(
... "SELECT '2 years 3 mons 1 day'::interval")
... print(result)
... print(datetime.datetime(2002, 1, 1) + result)
>>> asyncio.get_event_loop().run_until_complete(run())
relativedelta(years=+2, months=+3, days=+1)
2004-04-02 00:00:00
.. versionadded:: 0.12.0
Added the ``format`` keyword argument and support for 'tuple'
format.
.. versionchanged:: 0.12.0
The ``binary`` keyword argument is deprecated in favor of
``format``.
"""
self._check_open()
if binary is not None:
format = "binary" if binary else "text"
warnings.warn(
"The `binary` keyword argument to "
"set_type_codec() is deprecated and will be removed in "
"asyncpg 0.13.0. Use the `format` keyword argument instead.",
DeprecationWarning,
stacklevel=2,
)
typeinfo = await self.fetchrow(introspection.TYPE_BY_NAME, typename, schema)
if not typeinfo:
raise ValueError("unknown type: {}.{}".format(schema, typename))
oid = typeinfo["oid"]
if typeinfo["kind"] != b"b" or typeinfo["elemtype"]:
raise ValueError(
"cannot use custom codec on non-scalar type {}.{}".format(schema, typename)
)
self._protocol.get_settings().add_python_codec(
oid, typename, schema, "scalar", encoder, decoder, format
)
# Statement cache is no longer valid due to codec changes.
self._drop_local_statement_cache()
|
async def set_type_codec(
self, typename, *, schema="public", encoder, decoder, binary=None, format="text"
):
"""Set an encoder/decoder pair for the specified data type.
:param typename:
Name of the data type the codec is for.
:param schema:
Schema name of the data type the codec is for
(defaults to ``'public'``)
:param format:
The type of the argument received by the *decoder* callback,
and the type of the *encoder* callback return value.
If *format* is ``'text'`` (the default), the exchange datum is a
``str`` instance containing valid text representation of the
data type.
If *format* is ``'binary'``, the exchange datum is a ``bytes``
instance containing valid _binary_ representation of the
data type.
If *format* is ``'tuple'``, the exchange datum is a type-specific
``tuple`` of values. The table below lists supported data
types and their format for this mode.
+-----------------+---------------------------------------------+
| Type | Tuple layout |
+=================+=============================================+
| ``interval`` | (``months``, ``days``, ``microseconds``) |
+-----------------+---------------------------------------------+
| ``date`` | (``date ordinal relative to Jan 1 2000``,) |
| | ``-2^31`` for negative infinity timestamp |
| | ``2^31-1`` for positive infinity timestamp. |
+-----------------+---------------------------------------------+
| ``timestamp`` | (``microseconds relative to Jan 1 2000``,) |
| | ``-2^63`` for negative infinity timestamp |
| | ``2^63-1`` for positive infinity timestamp. |
+-----------------+---------------------------------------------+
| ``timestamp | (``microseconds relative to Jan 1 2000 |
| with time zone``| UTC``,) |
| | ``-2^63`` for negative infinity timestamp |
| | ``2^63-1`` for positive infinity timestamp. |
+-----------------+---------------------------------------------+
| ``time`` | (``microseconds``,) |
+-----------------+---------------------------------------------+
| ``time with | (``microseconds``, |
| time zone`` | ``time zone offset in seconds``) |
+-----------------+---------------------------------------------+
:param encoder:
Callable accepting a Python object as a single argument and
returning a value encoded according to *format*.
:param decoder:
Callable accepting a single argument encoded according to *format*
and returning a decoded Python object.
:param binary:
**Deprecated**. Use *format* instead.
Example:
.. code-block:: pycon
>>> import asyncpg
>>> import asyncio
>>> import datetime
>>> from dateutil.relativedelta import relativedelta
>>> async def run():
... con = await asyncpg.connect(user='postgres')
... def encoder(delta):
... ndelta = delta.normalized()
... return (ndelta.years * 12 + ndelta.months,
... ndelta.days,
... ((ndelta.hours * 3600 +
... ndelta.minutes * 60 +
... ndelta.seconds) * 1000000 +
... ndelta.microseconds))
... def decoder(tup):
... return relativedelta(months=tup[0], days=tup[1],
... microseconds=tup[2])
... await con.set_type_codec(
... 'interval', schema='pg_catalog', encoder=encoder,
... decoder=decoder, format='tuple')
... result = await con.fetchval(
... "SELECT '2 years 3 mons 1 day'::interval")
... print(result)
... print(datetime.datetime(2002, 1, 1) + result)
>>> asyncio.get_event_loop().run_until_complete(run())
relativedelta(years=+2, months=+3, days=+1)
2004-04-02 00:00:00
.. versionadded:: 0.12.0
Added the ``format`` keyword argument and support for 'tuple'
format.
.. versionchanged:: 0.12.0
The ``binary`` keyword argument is deprecated in favor of
``format``.
"""
self._check_open()
if binary is not None:
format = "binary" if binary else "text"
warnings.warn(
"The `binary` keyword argument to "
"set_type_codec() is deprecated and will be removed in "
"asyncpg 0.13.0. Use the `format` keyword argument instead.",
DeprecationWarning,
stacklevel=2,
)
if self._type_by_name_stmt is None:
self._type_by_name_stmt = await self.prepare(introspection.TYPE_BY_NAME)
typeinfo = await self._type_by_name_stmt.fetchrow(typename, schema)
if not typeinfo:
raise ValueError("unknown type: {}.{}".format(schema, typename))
oid = typeinfo["oid"]
if typeinfo["kind"] != b"b" or typeinfo["elemtype"]:
raise ValueError(
"cannot use custom codec on non-scalar type {}.{}".format(schema, typename)
)
self._protocol.get_settings().add_python_codec(
oid, typename, schema, "scalar", encoder, decoder, format
)
# Statement cache is no longer valid due to codec changes.
self._drop_local_statement_cache()
|
https://github.com/MagicStack/asyncpg/issues/198
|
In [1]: import os, asyncpg, asyncio
In [2]: loop = asyncio.get_event_loop()
In [3]: conn = loop.run_until_complete(asyncpg.connect(
...: host = 'pgaas.mail.yandex.net',
...: port = 12000,
...: user = 'statinfra_api',
...: database = 'statinfra_api_beta',
...: password=os.environ['DB_PASSWORD'],
...: ssl=True,
...: statement_cache_size=0
...: ))
In [4]: loop.run_until_complete(conn.fetch('SELECT * FROM tasks'))
---------------------------------------------------------------------------
InvalidSQLStatementNameError Traceback (most recent call last)
<ipython-input-5-8a44237acb87> in <module>()
----> 1 loop.run_until_complete(conn.fetch('SELECT * FROM tasks'))
/usr/lib/python3.6/asyncio/base_events.py in run_until_complete(self, future)
465 raise RuntimeError('Event loop stopped before Future completed.')
466
--> 467 return future.result()
468
469 def stop(self):
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in fetch(self, query, timeout, *args)
339 """
340 self._check_open()
--> 341 return await self._execute(query, args, 0, timeout)
342
343 async def fetchval(self, query, *args, column=0, timeout=None):
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in _execute(self, query, args, limit, timeout, return_status)
1186 timeout = self._protocol._get_timeout(timeout)
1187 with self._stmt_exclusive_section:
-> 1188 return await self._do_execute(query, executor, timeout)
1189
1190 async def _executemany(self, query, args, timeout):
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in _do_execute(self, query, executor, timeout, retry)
1197 async def _do_execute(self, query, executor, timeout, retry=True):
1198 if timeout is None:
-> 1199 stmt = await self._get_statement(query, None)
1200 else:
1201 before = time.monotonic()
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in _get_statement(self, query, timeout, named)
290 self._types_stmt = await self.prepare(self._intro_query)
291
--> 292 types = await self._types_stmt.fetch(list(ready))
293 self._protocol.get_settings().register_data_types(types)
294
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/prepared_stmt.py in fetch(self, timeout, *args)
155 :return: A list of :class:`Record` instances.
156 """
--> 157 data = await self.__bind_execute(args, 0, timeout)
158 return data
159
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/prepared_stmt.py in __bind_execute(self, args, limit, timeout)
194 protocol = self._connection._protocol
195 data, status, _ = await protocol.bind_execute(
--> 196 self._state, args, '', limit, True, timeout)
197 self._last_status = status
198 return data
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/protocol/protocol.pyx in bind_execute (asyncpg/protocol/protocol.c:66799)()
InvalidSQLStatementNameError: prepared statement "__asyncpg_stmt_1__" does not exist
|
InvalidSQLStatementNameError
|
async def reset_type_codec(self, typename, *, schema="public"):
"""Reset *typename* codec to the default implementation.
:param typename:
Name of the data type the codec is for.
:param schema:
Schema name of the data type the codec is for
(defaults to ``'public'``)
.. versionadded:: 0.12.0
"""
typeinfo = await self.fetchrow(introspection.TYPE_BY_NAME, typename, schema)
if not typeinfo:
raise ValueError("unknown type: {}.{}".format(schema, typename))
oid = typeinfo["oid"]
self._protocol.get_settings().remove_python_codec(oid, typename, schema)
# Statement cache is no longer valid due to codec changes.
self._drop_local_statement_cache()
|
async def reset_type_codec(self, typename, *, schema="public"):
"""Reset *typename* codec to the default implementation.
:param typename:
Name of the data type the codec is for.
:param schema:
Schema name of the data type the codec is for
(defaults to ``'public'``)
.. versionadded:: 0.12.0
"""
if self._type_by_name_stmt is None:
self._type_by_name_stmt = await self.prepare(introspection.TYPE_BY_NAME)
typeinfo = await self._type_by_name_stmt.fetchrow(typename, schema)
if not typeinfo:
raise ValueError("unknown type: {}.{}".format(schema, typename))
oid = typeinfo["oid"]
self._protocol.get_settings().remove_python_codec(oid, typename, schema)
# Statement cache is no longer valid due to codec changes.
self._drop_local_statement_cache()
|
https://github.com/MagicStack/asyncpg/issues/198
|
In [1]: import os, asyncpg, asyncio
In [2]: loop = asyncio.get_event_loop()
In [3]: conn = loop.run_until_complete(asyncpg.connect(
...: host = 'pgaas.mail.yandex.net',
...: port = 12000,
...: user = 'statinfra_api',
...: database = 'statinfra_api_beta',
...: password=os.environ['DB_PASSWORD'],
...: ssl=True,
...: statement_cache_size=0
...: ))
In [4]: loop.run_until_complete(conn.fetch('SELECT * FROM tasks'))
---------------------------------------------------------------------------
InvalidSQLStatementNameError Traceback (most recent call last)
<ipython-input-5-8a44237acb87> in <module>()
----> 1 loop.run_until_complete(conn.fetch('SELECT * FROM tasks'))
/usr/lib/python3.6/asyncio/base_events.py in run_until_complete(self, future)
465 raise RuntimeError('Event loop stopped before Future completed.')
466
--> 467 return future.result()
468
469 def stop(self):
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in fetch(self, query, timeout, *args)
339 """
340 self._check_open()
--> 341 return await self._execute(query, args, 0, timeout)
342
343 async def fetchval(self, query, *args, column=0, timeout=None):
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in _execute(self, query, args, limit, timeout, return_status)
1186 timeout = self._protocol._get_timeout(timeout)
1187 with self._stmt_exclusive_section:
-> 1188 return await self._do_execute(query, executor, timeout)
1189
1190 async def _executemany(self, query, args, timeout):
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in _do_execute(self, query, executor, timeout, retry)
1197 async def _do_execute(self, query, executor, timeout, retry=True):
1198 if timeout is None:
-> 1199 stmt = await self._get_statement(query, None)
1200 else:
1201 before = time.monotonic()
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in _get_statement(self, query, timeout, named)
290 self._types_stmt = await self.prepare(self._intro_query)
291
--> 292 types = await self._types_stmt.fetch(list(ready))
293 self._protocol.get_settings().register_data_types(types)
294
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/prepared_stmt.py in fetch(self, timeout, *args)
155 :return: A list of :class:`Record` instances.
156 """
--> 157 data = await self.__bind_execute(args, 0, timeout)
158 return data
159
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/prepared_stmt.py in __bind_execute(self, args, limit, timeout)
194 protocol = self._connection._protocol
195 data, status, _ = await protocol.bind_execute(
--> 196 self._state, args, '', limit, True, timeout)
197 self._last_status = status
198 return data
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/protocol/protocol.pyx in bind_execute (asyncpg/protocol/protocol.c:66799)()
InvalidSQLStatementNameError: prepared statement "__asyncpg_stmt_1__" does not exist
|
InvalidSQLStatementNameError
|
async def set_builtin_type_codec(self, typename, *, schema="public", codec_name):
"""Set a builtin codec for the specified data type.
:param typename: Name of the data type the codec is for.
:param schema: Schema name of the data type the codec is for
(defaults to 'public')
:param codec_name: The name of the builtin codec.
"""
self._check_open()
typeinfo = await self.fetchrow(introspection.TYPE_BY_NAME, typename, schema)
if not typeinfo:
raise ValueError("unknown type: {}.{}".format(schema, typename))
oid = typeinfo["oid"]
if typeinfo["kind"] != b"b" or typeinfo["elemtype"]:
raise ValueError("cannot alias non-scalar type {}.{}".format(schema, typename))
self._protocol.get_settings().set_builtin_type_codec(
oid, typename, schema, "scalar", codec_name
)
# Statement cache is no longer valid due to codec changes.
self._drop_local_statement_cache()
|
async def set_builtin_type_codec(self, typename, *, schema="public", codec_name):
"""Set a builtin codec for the specified data type.
:param typename: Name of the data type the codec is for.
:param schema: Schema name of the data type the codec is for
(defaults to 'public')
:param codec_name: The name of the builtin codec.
"""
self._check_open()
if self._type_by_name_stmt is None:
self._type_by_name_stmt = await self.prepare(introspection.TYPE_BY_NAME)
typeinfo = await self._type_by_name_stmt.fetchrow(typename, schema)
if not typeinfo:
raise ValueError("unknown type: {}.{}".format(schema, typename))
oid = typeinfo["oid"]
if typeinfo["kind"] != b"b" or typeinfo["elemtype"]:
raise ValueError("cannot alias non-scalar type {}.{}".format(schema, typename))
self._protocol.get_settings().set_builtin_type_codec(
oid, typename, schema, "scalar", codec_name
)
# Statement cache is no longer valid due to codec changes.
self._drop_local_statement_cache()
|
https://github.com/MagicStack/asyncpg/issues/198
|
In [1]: import os, asyncpg, asyncio
In [2]: loop = asyncio.get_event_loop()
In [3]: conn = loop.run_until_complete(asyncpg.connect(
...: host = 'pgaas.mail.yandex.net',
...: port = 12000,
...: user = 'statinfra_api',
...: database = 'statinfra_api_beta',
...: password=os.environ['DB_PASSWORD'],
...: ssl=True,
...: statement_cache_size=0
...: ))
In [4]: loop.run_until_complete(conn.fetch('SELECT * FROM tasks'))
---------------------------------------------------------------------------
InvalidSQLStatementNameError Traceback (most recent call last)
<ipython-input-5-8a44237acb87> in <module>()
----> 1 loop.run_until_complete(conn.fetch('SELECT * FROM tasks'))
/usr/lib/python3.6/asyncio/base_events.py in run_until_complete(self, future)
465 raise RuntimeError('Event loop stopped before Future completed.')
466
--> 467 return future.result()
468
469 def stop(self):
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in fetch(self, query, timeout, *args)
339 """
340 self._check_open()
--> 341 return await self._execute(query, args, 0, timeout)
342
343 async def fetchval(self, query, *args, column=0, timeout=None):
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in _execute(self, query, args, limit, timeout, return_status)
1186 timeout = self._protocol._get_timeout(timeout)
1187 with self._stmt_exclusive_section:
-> 1188 return await self._do_execute(query, executor, timeout)
1189
1190 async def _executemany(self, query, args, timeout):
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in _do_execute(self, query, executor, timeout, retry)
1197 async def _do_execute(self, query, executor, timeout, retry=True):
1198 if timeout is None:
-> 1199 stmt = await self._get_statement(query, None)
1200 else:
1201 before = time.monotonic()
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in _get_statement(self, query, timeout, named)
290 self._types_stmt = await self.prepare(self._intro_query)
291
--> 292 types = await self._types_stmt.fetch(list(ready))
293 self._protocol.get_settings().register_data_types(types)
294
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/prepared_stmt.py in fetch(self, timeout, *args)
155 :return: A list of :class:`Record` instances.
156 """
--> 157 data = await self.__bind_execute(args, 0, timeout)
158 return data
159
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/prepared_stmt.py in __bind_execute(self, args, limit, timeout)
194 protocol = self._connection._protocol
195 data, status, _ = await protocol.bind_execute(
--> 196 self._state, args, '', limit, True, timeout)
197 self._last_status = status
198 return data
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/protocol/protocol.pyx in bind_execute (asyncpg/protocol/protocol.c:66799)()
InvalidSQLStatementNameError: prepared statement "__asyncpg_stmt_1__" does not exist
|
InvalidSQLStatementNameError
|
async def _execute(self, query, args, limit, timeout, return_status=False):
with self._stmt_exclusive_section:
result, _ = await self.__execute(
query, args, limit, timeout, return_status=return_status
)
return result
|
async def _execute(self, query, args, limit, timeout, return_status=False):
executor = lambda stmt, timeout: self._protocol.bind_execute(
stmt, args, "", limit, return_status, timeout
)
timeout = self._protocol._get_timeout(timeout)
with self._stmt_exclusive_section:
return await self._do_execute(query, executor, timeout)
|
https://github.com/MagicStack/asyncpg/issues/198
|
In [1]: import os, asyncpg, asyncio
In [2]: loop = asyncio.get_event_loop()
In [3]: conn = loop.run_until_complete(asyncpg.connect(
...: host = 'pgaas.mail.yandex.net',
...: port = 12000,
...: user = 'statinfra_api',
...: database = 'statinfra_api_beta',
...: password=os.environ['DB_PASSWORD'],
...: ssl=True,
...: statement_cache_size=0
...: ))
In [4]: loop.run_until_complete(conn.fetch('SELECT * FROM tasks'))
---------------------------------------------------------------------------
InvalidSQLStatementNameError Traceback (most recent call last)
<ipython-input-5-8a44237acb87> in <module>()
----> 1 loop.run_until_complete(conn.fetch('SELECT * FROM tasks'))
/usr/lib/python3.6/asyncio/base_events.py in run_until_complete(self, future)
465 raise RuntimeError('Event loop stopped before Future completed.')
466
--> 467 return future.result()
468
469 def stop(self):
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in fetch(self, query, timeout, *args)
339 """
340 self._check_open()
--> 341 return await self._execute(query, args, 0, timeout)
342
343 async def fetchval(self, query, *args, column=0, timeout=None):
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in _execute(self, query, args, limit, timeout, return_status)
1186 timeout = self._protocol._get_timeout(timeout)
1187 with self._stmt_exclusive_section:
-> 1188 return await self._do_execute(query, executor, timeout)
1189
1190 async def _executemany(self, query, args, timeout):
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in _do_execute(self, query, executor, timeout, retry)
1197 async def _do_execute(self, query, executor, timeout, retry=True):
1198 if timeout is None:
-> 1199 stmt = await self._get_statement(query, None)
1200 else:
1201 before = time.monotonic()
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in _get_statement(self, query, timeout, named)
290 self._types_stmt = await self.prepare(self._intro_query)
291
--> 292 types = await self._types_stmt.fetch(list(ready))
293 self._protocol.get_settings().register_data_types(types)
294
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/prepared_stmt.py in fetch(self, timeout, *args)
155 :return: A list of :class:`Record` instances.
156 """
--> 157 data = await self.__bind_execute(args, 0, timeout)
158 return data
159
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/prepared_stmt.py in __bind_execute(self, args, limit, timeout)
194 protocol = self._connection._protocol
195 data, status, _ = await protocol.bind_execute(
--> 196 self._state, args, '', limit, True, timeout)
197 self._last_status = status
198 return data
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/protocol/protocol.pyx in bind_execute (asyncpg/protocol/protocol.c:66799)()
InvalidSQLStatementNameError: prepared statement "__asyncpg_stmt_1__" does not exist
|
InvalidSQLStatementNameError
|
async def _executemany(self, query, args, timeout):
executor = lambda stmt, timeout: self._protocol.bind_execute_many(
stmt, args, "", timeout
)
timeout = self._protocol._get_timeout(timeout)
with self._stmt_exclusive_section:
result, _ = await self._do_execute(query, executor, timeout)
return result
|
async def _executemany(self, query, args, timeout):
executor = lambda stmt, timeout: self._protocol.bind_execute_many(
stmt, args, "", timeout
)
timeout = self._protocol._get_timeout(timeout)
with self._stmt_exclusive_section:
return await self._do_execute(query, executor, timeout)
|
https://github.com/MagicStack/asyncpg/issues/198
|
In [1]: import os, asyncpg, asyncio
In [2]: loop = asyncio.get_event_loop()
In [3]: conn = loop.run_until_complete(asyncpg.connect(
...: host = 'pgaas.mail.yandex.net',
...: port = 12000,
...: user = 'statinfra_api',
...: database = 'statinfra_api_beta',
...: password=os.environ['DB_PASSWORD'],
...: ssl=True,
...: statement_cache_size=0
...: ))
In [4]: loop.run_until_complete(conn.fetch('SELECT * FROM tasks'))
---------------------------------------------------------------------------
InvalidSQLStatementNameError Traceback (most recent call last)
<ipython-input-5-8a44237acb87> in <module>()
----> 1 loop.run_until_complete(conn.fetch('SELECT * FROM tasks'))
/usr/lib/python3.6/asyncio/base_events.py in run_until_complete(self, future)
465 raise RuntimeError('Event loop stopped before Future completed.')
466
--> 467 return future.result()
468
469 def stop(self):
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in fetch(self, query, timeout, *args)
339 """
340 self._check_open()
--> 341 return await self._execute(query, args, 0, timeout)
342
343 async def fetchval(self, query, *args, column=0, timeout=None):
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in _execute(self, query, args, limit, timeout, return_status)
1186 timeout = self._protocol._get_timeout(timeout)
1187 with self._stmt_exclusive_section:
-> 1188 return await self._do_execute(query, executor, timeout)
1189
1190 async def _executemany(self, query, args, timeout):
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in _do_execute(self, query, executor, timeout, retry)
1197 async def _do_execute(self, query, executor, timeout, retry=True):
1198 if timeout is None:
-> 1199 stmt = await self._get_statement(query, None)
1200 else:
1201 before = time.monotonic()
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in _get_statement(self, query, timeout, named)
290 self._types_stmt = await self.prepare(self._intro_query)
291
--> 292 types = await self._types_stmt.fetch(list(ready))
293 self._protocol.get_settings().register_data_types(types)
294
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/prepared_stmt.py in fetch(self, timeout, *args)
155 :return: A list of :class:`Record` instances.
156 """
--> 157 data = await self.__bind_execute(args, 0, timeout)
158 return data
159
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/prepared_stmt.py in __bind_execute(self, args, limit, timeout)
194 protocol = self._connection._protocol
195 data, status, _ = await protocol.bind_execute(
--> 196 self._state, args, '', limit, True, timeout)
197 self._last_status = status
198 return data
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/protocol/protocol.pyx in bind_execute (asyncpg/protocol/protocol.c:66799)()
InvalidSQLStatementNameError: prepared statement "__asyncpg_stmt_1__" does not exist
|
InvalidSQLStatementNameError
|
async def _do_execute(self, query, executor, timeout, retry=True):
if timeout is None:
stmt = await self._get_statement(query, None)
else:
before = time.monotonic()
stmt = await self._get_statement(query, timeout)
after = time.monotonic()
timeout -= after - before
before = after
try:
if timeout is None:
result = await executor(stmt, None)
else:
try:
result = await executor(stmt, timeout)
finally:
after = time.monotonic()
timeout -= after - before
except exceptions.InvalidCachedStatementError as e:
# PostgreSQL will raise an exception when it detects
# that the result type of the query has changed from
# when the statement was prepared. This may happen,
# for example, after an ALTER TABLE or SET search_path.
#
# When this happens, and there is no transaction running,
# we can simply re-prepare the statement and try once
# again. We deliberately retry only once as this is
# supposed to be a rare occurrence.
#
# If the transaction _is_ running, this error will put it
# into an error state, and we have no choice but to
# re-raise the exception.
#
# In either case we clear the statement cache for this
# connection and all other connections of the pool this
# connection belongs to (if any).
#
# See https://github.com/MagicStack/asyncpg/issues/72
# and https://github.com/MagicStack/asyncpg/issues/76
# for discussion.
#
self._drop_global_statement_cache()
if self._protocol.is_in_transaction() or not retry:
raise
else:
return await self._do_execute(query, executor, timeout, retry=False)
return result, stmt
|
async def _do_execute(self, query, executor, timeout, retry=True):
if timeout is None:
stmt = await self._get_statement(query, None)
else:
before = time.monotonic()
stmt = await self._get_statement(query, timeout)
after = time.monotonic()
timeout -= after - before
before = after
try:
if timeout is None:
result = await executor(stmt, None)
else:
try:
result = await executor(stmt, timeout)
finally:
after = time.monotonic()
timeout -= after - before
except exceptions.InvalidCachedStatementError as e:
# PostgreSQL will raise an exception when it detects
# that the result type of the query has changed from
# when the statement was prepared. This may happen,
# for example, after an ALTER TABLE or SET search_path.
#
# When this happens, and there is no transaction running,
# we can simply re-prepare the statement and try once
# again. We deliberately retry only once as this is
# supposed to be a rare occurrence.
#
# If the transaction _is_ running, this error will put it
# into an error state, and we have no choice but to
# re-raise the exception.
#
# In either case we clear the statement cache for this
# connection and all other connections of the pool this
# connection belongs to (if any).
#
# See https://github.com/MagicStack/asyncpg/issues/72
# and https://github.com/MagicStack/asyncpg/issues/76
# for discussion.
#
self._drop_global_statement_cache()
if self._protocol.is_in_transaction() or not retry:
raise
else:
result = await self._do_execute(query, executor, timeout, retry=False)
return result
|
https://github.com/MagicStack/asyncpg/issues/198
|
In [1]: import os, asyncpg, asyncio
In [2]: loop = asyncio.get_event_loop()
In [3]: conn = loop.run_until_complete(asyncpg.connect(
...: host = 'pgaas.mail.yandex.net',
...: port = 12000,
...: user = 'statinfra_api',
...: database = 'statinfra_api_beta',
...: password=os.environ['DB_PASSWORD'],
...: ssl=True,
...: statement_cache_size=0
...: ))
In [4]: loop.run_until_complete(conn.fetch('SELECT * FROM tasks'))
---------------------------------------------------------------------------
InvalidSQLStatementNameError Traceback (most recent call last)
<ipython-input-5-8a44237acb87> in <module>()
----> 1 loop.run_until_complete(conn.fetch('SELECT * FROM tasks'))
/usr/lib/python3.6/asyncio/base_events.py in run_until_complete(self, future)
465 raise RuntimeError('Event loop stopped before Future completed.')
466
--> 467 return future.result()
468
469 def stop(self):
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in fetch(self, query, timeout, *args)
339 """
340 self._check_open()
--> 341 return await self._execute(query, args, 0, timeout)
342
343 async def fetchval(self, query, *args, column=0, timeout=None):
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in _execute(self, query, args, limit, timeout, return_status)
1186 timeout = self._protocol._get_timeout(timeout)
1187 with self._stmt_exclusive_section:
-> 1188 return await self._do_execute(query, executor, timeout)
1189
1190 async def _executemany(self, query, args, timeout):
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in _do_execute(self, query, executor, timeout, retry)
1197 async def _do_execute(self, query, executor, timeout, retry=True):
1198 if timeout is None:
-> 1199 stmt = await self._get_statement(query, None)
1200 else:
1201 before = time.monotonic()
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/connection.py in _get_statement(self, query, timeout, named)
290 self._types_stmt = await self.prepare(self._intro_query)
291
--> 292 types = await self._types_stmt.fetch(list(ready))
293 self._protocol.get_settings().register_data_types(types)
294
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/prepared_stmt.py in fetch(self, timeout, *args)
155 :return: A list of :class:`Record` instances.
156 """
--> 157 data = await self.__bind_execute(args, 0, timeout)
158 return data
159
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/prepared_stmt.py in __bind_execute(self, args, limit, timeout)
194 protocol = self._connection._protocol
195 data, status, _ = await protocol.bind_execute(
--> 196 self._state, args, '', limit, True, timeout)
197 self._last_status = status
198 return data
~/.virtualenvs/statbox-abt-backend/lib/python3.6/site-packages/asyncpg/protocol/protocol.pyx in bind_execute (asyncpg/protocol/protocol.c:66799)()
InvalidSQLStatementNameError: prepared statement "__asyncpg_stmt_1__" does not exist
|
InvalidSQLStatementNameError
|
def get_mx_flags(build_ext, cpp_flags):
mx_include_dirs = [get_mx_include_dirs()]
mx_lib_dirs = get_mx_lib_dirs()
mx_libs = get_mx_libs(build_ext, mx_lib_dirs, cpp_flags)
compile_flags = []
has_mkldnn = is_mx_mkldnn()
for include_dir in mx_include_dirs:
compile_flags.append("-I%s" % include_dir)
if has_mkldnn:
mkldnn_include = os.path.join(include_dir, "mkldnn")
compile_flags.append("-I%s" % mkldnn_include)
link_flags = []
for lib_dir in mx_lib_dirs:
link_flags.append("-Wl,-rpath,%s" % lib_dir)
link_flags.append("-L%s" % lib_dir)
for lib in mx_libs:
link_flags.append("-l%s" % lib)
return compile_flags, link_flags
|
def get_mx_flags(build_ext, cpp_flags):
mx_include_dirs = [get_mx_include_dirs()]
mx_lib_dirs = get_mx_lib_dirs()
mx_libs = get_mx_libs(build_ext, mx_lib_dirs, cpp_flags)
compile_flags = []
for include_dir in mx_include_dirs:
compile_flags.append("-I%s" % include_dir)
link_flags = []
for lib_dir in mx_lib_dirs:
link_flags.append("-Wl,-rpath,%s" % lib_dir)
link_flags.append("-L%s" % lib_dir)
for lib in mx_libs:
link_flags.append("-l%s" % lib)
return compile_flags, link_flags
|
https://github.com/bytedance/byteps/issues/222
|
(mx_byteps) ubuntu@ip-172-31-85-4:~$ bpslaunch python byteps/example/mxnet/train_imagenet_byteps.py --benchmark 1 --batch-size=32
BytePS launching worker
INFO:root:start with arguments Namespace(batch_size=32, benchmark=1, cpu_train=False, data_nthreads=4, data_train=None, data_train_idx='', data_val=None, data_val_idx='', disp_batches=20, dtype='float32', gc_threshold=0.5, gc_type='none', image_shape='3,224,224', initializer='default', kv_store='device', load_epoch=None, loss='', lr=0.1, lr_factor=0.1, lr_step_epochs='30,60', macrobatch_size=0, max_random_aspect_ratio=0.25, max_random_h=36, max_random_l=50, max_random_rotate_angle=10, max_random_s=50, max_random_scale=1, max_random_shear_ratio=0.1, min_random_scale=1, model_prefix=None, mom=0.9, monitor=0, network='resnet', num_classes=1000, num_epochs=80, num_examples=1281167, num_layers=50, optimizer='sgd', pad_size=0, random_crop=1, random_mirror=1, rgb_mean='123.68,116.779,103.939', test_io=0, top_k=0, warmup_epochs=5, warmup_strategy='linear', wd=0.0001)
INFO:root:start with arguments Namespace(batch_size=32, benchmark=1, cpu_train=False, data_nthreads=4, data_train=None, data_train_idx='', data_val=None, data_val_idx='', disp_batches=20, dtype='float32', gc_threshold=0.5, gc_type='none', image_shape='3,224,224', initializer='default', kv_store='device', load_epoch=None, loss='', lr=0.1, lr_factor=0.1, lr_step_epochs='30,60', macrobatch_size=0, max_random_aspect_ratio=0.25, max_random_h=36, max_random_l=50, max_random_rotate_angle=10, max_random_s=50, max_random_scale=1, max_random_shear_ratio=0.1, min_random_scale=1, model_prefix=None, mom=0.9, monitor=0, network='resnet', num_classes=1000, num_epochs=80, num_examples=1281167, num_layers=50, optimizer='sgd', pad_size=0, random_crop=1, random_mirror=1, rgb_mean='123.68,116.779,103.939', test_io=0, top_k=0, warmup_epochs=5, warmup_strategy='linear', wd=0.0001)
INFO:root:start with arguments Namespace(batch_size=32, benchmark=1, cpu_train=False, data_nthreads=4, data_train=None, data_train_idx='', data_val=None, data_val_idx='', disp_batches=20, dtype='float32', gc_threshold=0.5, gc_type='none', image_shape='3,224,224', initializer='default', kv_store='device', load_epoch=None, loss='', lr=0.1, lr_factor=0.1, lr_step_epochs='30,60', macrobatch_size=0, max_random_aspect_ratio=0.25, max_random_h=36, max_random_l=50, max_random_rotate_angle=10, max_random_s=50, max_random_scale=1, max_random_shear_ratio=0.1, min_random_scale=1, model_prefix=None, mom=0.9, monitor=0, network='resnet', num_classes=1000, num_epochs=80, num_examples=1281167, num_layers=50, optimizer='sgd', pad_size=0, random_crop=1, random_mirror=1, rgb_mean='123.68,116.779,103.939', test_io=0, top_k=0, warmup_epochs=5, warmup_strategy='linear', wd=0.0001)
INFO:root:start with arguments Namespace(batch_size=32, benchmark=1, cpu_train=False, data_nthreads=4, data_train=None, data_train_idx='', data_val=None, data_val_idx='', disp_batches=20, dtype='float32', gc_threshold=0.5, gc_type='none', image_shape='3,224,224', initializer='default', kv_store='device', load_epoch=None, loss='', lr=0.1, lr_factor=0.1, lr_step_epochs='30,60', macrobatch_size=0, max_random_aspect_ratio=0.25, max_random_h=36, max_random_l=50, max_random_rotate_angle=10, max_random_s=50, max_random_scale=1, max_random_shear_ratio=0.1, min_random_scale=1, model_prefix=None, mom=0.9, monitor=0, network='resnet', num_classes=1000, num_epochs=80, num_examples=1281167, num_layers=50, optimizer='sgd', pad_size=0, random_crop=1, random_mirror=1, rgb_mean='123.68,116.779,103.939', test_io=0, top_k=0, warmup_epochs=5, warmup_strategy='linear', wd=0.0001)
INFO:root:Launch BytePS process on GPU-2
learning rate from ``lr_scheduler`` has been overwritten by ``learning_rate`` in optimizer.
INFO:root:Launch BytePS process on GPU-0
INFO:root:Launch BytePS process on GPU-1
learning rate from ``lr_scheduler`` has been overwritten by ``learning_rate`` in optimizer.
learning rate from ``lr_scheduler`` has been overwritten by ``learning_rate`` in optimizer.
environ({'LESSOPEN': '| /usr/bin/lesspipe %s', 'CONDA_PROMPT_MODIFIER': '(mx_byteps) ', 'BYTEPS_LOCAL_RANK': '2', 'MAIL': '/var/mail/ubuntu', 'SSH_CLIENT': '76.126.245.87 59732 22', 'USER': 'ubuntu', 'LD_LIBRARY_PATH_WITH_DEFAULT_CUDA': '/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:/usr/local/cuda-9.0/lib/:/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:/usr/local/cuda-9.0/lib/:', 'LD_LIBRARY_PATH': '/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:/home/ubuntu/src/cntk/bindings/python/cntk/libs:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda/efa/lib:/usr/local/cuda/lib:/opt/amazon/efa/lib:/usr/local/mpi/lib:/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:', 'SHLVL': '1', 'CONDA_SHLVL': '1', 'HOME': '/home/ubuntu', 'SSH_TTY': '/dev/pts/1', 'DMLC_PS_ROOT_URI': '10.0.0.1', 'LC_TERMINAL_VERSION': '3.3.9', 'DMLC_NUM_SERVER': '1', 'LOGNAME': 'ubuntu', 'DMLC_PS_ROOT_PORT': '1234', '_': '/home/ubuntu/anaconda3/envs/mx_byteps/bin/bpslaunch', 'BYTEPS_LOCAL_SIZE': '4', 'PKG_CONFIG_PATH': '/usr/local/lib/pkgconfig:/usr/local/lib/pkgconfig:/usr/local/lib/pkgconfig:', 'XDG_SESSION_ID': '3', 'TERM': 'xterm-256color', 'DMLC_NUM_WORKER': '1', 'PATH': '/home/ubuntu/anaconda3/envs/mx_byteps/bin:/home/ubuntu/anaconda3/bin/:/home/ubuntu/bin:/home/ubuntu/.local/bin:/home/ubuntu/anaconda3/bin/:/usr/local/cuda/bin:/usr/local/bin:/opt/aws/bin:/usr/local/mpi/bin:/usr/local/cuda/bin:/usr/local/bin:/opt/aws/bin:/home/ubuntu/src/cntk/bin:/usr/local/mpi/bin:/opt/aws/neuron/bin:/usr/local/cuda/bin:/usr/local/bin:/opt/aws/bin:/usr/local/mpi/bin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin', 'DMLC_ROLE': 'worker', 'XDG_RUNTIME_DIR': '/run/user/1000', 'LANG': 'en_US.UTF-8', 'LS_COLORS': 'rs=0:di=01;34:ln=01;36:mh=00:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:mi=00:su=37;41:sg=30;43:ca=30;41:tw=30;42:ow=34;42:st=37;44:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arc=01;31:*.arj=01;31:*.taz=01;31:*.lha=01;31:*.lz4=01;31:*.lzh=01;31:*.lzma=01;31:*.tlz=01;31:*.txz=01;31:*.tzo=01;31:*.t7z=01;31:*.zip=01;31:*.z=01;31:*.Z=01;31:*.dz=01;31:*.gz=01;31:*.lrz=01;31:*.lz=01;31:*.lzo=01;31:*.xz=01;31:*.bz2=01;31:*.bz=01;31:*.tbz=01;31:*.tbz2=01;31:*.tz=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.war=01;31:*.ear=01;31:*.sar=01;31:*.rar=01;31:*.alz=01;31:*.ace=01;31:*.zoo=01;31:*.cpio=01;31:*.7z=01;31:*.rz=01;31:*.cab=01;31:*.jpg=01;35:*.jpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.svg=01;35:*.svgz=01;35:*.mng=01;35:*.pcx=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.m2v=01;35:*.mkv=01;35:*.webm=01;35:*.ogm=01;35:*.mp4=01;35:*.m4v=01;35:*.mp4v=01;35:*.vob=01;35:*.qt=01;35:*.nuv=01;35:*.wmv=01;35:*.asf=01;35:*.rm=01;35:*.rmvb=01;35:*.flc=01;35:*.avi=01;35:*.fli=01;35:*.flv=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.yuv=01;35:*.cgm=01;35:*.emf=01;35:*.ogv=01;35:*.ogx=01;35:*.aac=00;36:*.au=00;36:*.flac=00;36:*.m4a=00;36:*.mid=00;36:*.midi=00;36:*.mka=00;36:*.mp3=00;36:*.mpc=00;36:*.ogg=00;36:*.ra=00;36:*.wav=00;36:*.oga=00;36:*.opus=00;36:*.spx=00;36:*.xspf=00;36:', 'CONDA_PYTHON_EXE': '/home/ubuntu/anaconda3/bin/python', 'SHELL': '/bin/bash', 'CONDA_DEFAULT_ENV': 'mx_byteps', 'LESSCLOSE': '/usr/bin/lesspipe %s %s', 'MODULE_VERSION': '3.2.10', 'LD_LIBRARY_PATH_WITHOUT_CUDA': '/usr/lib64/openmpi/lib/:/usr/local/lib:/usr/lib:/usr/local/mpi/lib:/lib/:/usr/lib64/openmpi/lib/:/usr/local/lib:/usr/lib:/usr/local/mpi/lib:/lib/:', 'LC_TERMINAL': 'iTerm2', 'MODULE_VERSION_STACK': '3.2.10', 'PWD': '/home/ubuntu', 'LOADEDMODULES': '', 'CONDA_EXE': '/home/ubuntu/anaconda3/bin/conda', 'XDG_DATA_DIRS': '/usr/local/share:/usr/share:/var/lib/snapd/desktop', 'SSH_CONNECTION': '76.126.245.87 59732 172.31.85.4 22', 'PYTHONPATH': '/home/ubuntu/src/cntk/bindings/python', 'DMLC_WORKER_ID': '0', 'NVIDIA_VISIBLE_DEVICES': '0,1,2,3', 'CONDA_PREFIX': '/home/ubuntu/anaconda3/envs/mx_byteps', 'MANPATH': '/opt/aws/neuron/share/man:', 'MODULEPATH': '/etc/environment-modules/modules:/usr/share/modules/versions:/usr/Modules/$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles', 'MODULESHOME': '/usr/share/modules'})=============2
INFO:root:Launch BytePS process on GPU-3
environ({'LESSOPEN': '| /usr/bin/lesspipe %s', 'CONDA_PROMPT_MODIFIER': '(mx_byteps) ', 'BYTEPS_LOCAL_RANK': '1', 'MAIL': '/var/mail/ubuntu', 'SSH_CLIENT': '76.126.245.87 59732 22', 'USER': 'ubuntu', 'LD_LIBRARY_PATH_WITH_DEFAULT_CUDA': '/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:/usr/local/cuda-9.0/lib/:/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:/usr/local/cuda-9.0/lib/:', 'LD_LIBRARY_PATH': '/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:/home/ubuntu/src/cntk/bindings/python/cntk/libs:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda/efa/lib:/usr/local/cuda/lib:/opt/amazon/efa/lib:/usr/local/mpi/lib:/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:', 'SHLVL': '1', 'CONDA_SHLVL': '1', 'HOME': '/home/ubuntu', 'SSH_TTY': '/dev/pts/1', 'DMLC_PS_ROOT_URI': '10.0.0.1', 'LC_TERMINAL_VERSION': '3.3.9', 'DMLC_NUM_SERVER': '1', 'LOGNAME': 'ubuntu', 'DMLC_PS_ROOT_PORT': '1234', '_': '/home/ubuntu/anaconda3/envs/mx_byteps/bin/bpslaunch', 'BYTEPS_LOCAL_SIZE': '4', 'PKG_CONFIG_PATH': '/usr/local/lib/pkgconfig:/usr/local/lib/pkgconfig:/usr/local/lib/pkgconfig:', 'XDG_SESSION_ID': '3', 'TERM': 'xterm-256color', 'DMLC_NUM_WORKER': '1', 'PATH': '/home/ubuntu/anaconda3/envs/mx_byteps/bin:/home/ubuntu/anaconda3/bin/:/home/ubuntu/bin:/home/ubuntu/.local/bin:/home/ubuntu/anaconda3/bin/:/usr/local/cuda/bin:/usr/local/bin:/opt/aws/bin:/usr/local/mpi/bin:/usr/local/cuda/bin:/usr/local/bin:/opt/aws/bin:/home/ubuntu/src/cntk/bin:/usr/local/mpi/bin:/opt/aws/neuron/bin:/usr/local/cuda/bin:/usr/local/bin:/opt/aws/bin:/usr/local/mpi/bin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin', 'DMLC_ROLE': 'worker', 'XDG_RUNTIME_DIR': '/run/user/1000', 'LANG': 'en_US.UTF-8', 'LS_COLORS': 'rs=0:di=01;34:ln=01;36:mh=00:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:mi=00:su=37;41:sg=30;43:ca=30;41:tw=30;42:ow=34;42:st=37;44:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arc=01;31:*.arj=01;31:*.taz=01;31:*.lha=01;31:*.lz4=01;31:*.lzh=01;31:*.lzma=01;31:*.tlz=01;31:*.txz=01;31:*.tzo=01;31:*.t7z=01;31:*.zip=01;31:*.z=01;31:*.Z=01;31:*.dz=01;31:*.gz=01;31:*.lrz=01;31:*.lz=01;31:*.lzo=01;31:*.xz=01;31:*.bz2=01;31:*.bz=01;31:*.tbz=01;31:*.tbz2=01;31:*.tz=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.war=01;31:*.ear=01;31:*.sar=01;31:*.rar=01;31:*.alz=01;31:*.ace=01;31:*.zoo=01;31:*.cpio=01;31:*.7z=01;31:*.rz=01;31:*.cab=01;31:*.jpg=01;35:*.jpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.svg=01;35:*.svgz=01;35:*.mng=01;35:*.pcx=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.m2v=01;35:*.mkv=01;35:*.webm=01;35:*.ogm=01;35:*.mp4=01;35:*.m4v=01;35:*.mp4v=01;35:*.vob=01;35:*.qt=01;35:*.nuv=01;35:*.wmv=01;35:*.asf=01;35:*.rm=01;35:*.rmvb=01;35:*.flc=01;35:*.avi=01;35:*.fli=01;35:*.flv=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.yuv=01;35:*.cgm=01;35:*.emf=01;35:*.ogv=01;35:*.ogx=01;35:*.aac=00;36:*.au=00;36:*.flac=00;36:*.m4a=00;36:*.mid=00;36:*.midi=00;36:*.mka=00;36:*.mp3=00;36:*.mpc=00;36:*.ogg=00;36:*.ra=00;36:*.wav=00;36:*.oga=00;36:*.opus=00;36:*.spx=00;36:*.xspf=00;36:', 'CONDA_PYTHON_EXE': '/home/ubuntu/anaconda3/bin/python', 'SHELL': '/bin/bash', 'CONDA_DEFAULT_ENV': 'mx_byteps', 'LESSCLOSE': '/usr/bin/lesspipe %s %s', 'MODULE_VERSION': '3.2.10', 'LD_LIBRARY_PATH_WITHOUT_CUDA': '/usr/lib64/openmpi/lib/:/usr/local/lib:/usr/lib:/usr/local/mpi/lib:/lib/:/usr/lib64/openmpi/lib/:/usr/local/lib:/usr/lib:/usr/local/mpi/lib:/lib/:', 'LC_TERMINAL': 'iTerm2', 'MODULE_VERSION_STACK': '3.2.10', 'PWD': '/home/ubuntu', 'LOADEDMODULES': '', 'CONDA_EXE': '/home/ubuntu/anaconda3/bin/conda', 'XDG_DATA_DIRS': '/usr/local/share:/usr/share:/var/lib/snapd/desktop', 'SSH_CONNECTION': '76.126.245.87 59732 172.31.85.4 22', 'PYTHONPATH': '/home/ubuntu/src/cntk/bindings/python', 'DMLC_WORKER_ID': '0', 'NVIDIA_VISIBLE_DEVICES': '0,1,2,3', 'CONDA_PREFIX': '/home/ubuntu/anaconda3/envs/mx_byteps', 'MANPATH': '/opt/aws/neuron/share/man:', 'MODULEPATH': '/etc/environment-modules/modules:/usr/share/modules/versions:/usr/Modules/$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles', 'MODULESHOME': '/usr/share/modules'})=============1
learning rate from ``lr_scheduler`` has been overwritten by ``learning_rate`` in optimizer.
environ({'LESSOPEN': '| /usr/bin/lesspipe %s', 'CONDA_PROMPT_MODIFIER': '(mx_byteps) ', 'BYTEPS_LOCAL_RANK': '0', 'MAIL': '/var/mail/ubuntu', 'SSH_CLIENT': '76.126.245.87 59732 22', 'USER': 'ubuntu', 'LD_LIBRARY_PATH_WITH_DEFAULT_CUDA': '/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:/usr/local/cuda-9.0/lib/:/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:/usr/local/cuda-9.0/lib/:', 'LD_LIBRARY_PATH': '/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:/home/ubuntu/src/cntk/bindings/python/cntk/libs:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda/efa/lib:/usr/local/cuda/lib:/opt/amazon/efa/lib:/usr/local/mpi/lib:/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:', 'SHLVL': '1', 'CONDA_SHLVL': '1', 'HOME': '/home/ubuntu', 'SSH_TTY': '/dev/pts/1', 'DMLC_PS_ROOT_URI': '10.0.0.1', 'LC_TERMINAL_VERSION': '3.3.9', 'DMLC_NUM_SERVER': '1', 'LOGNAME': 'ubuntu', 'DMLC_PS_ROOT_PORT': '1234', '_': '/home/ubuntu/anaconda3/envs/mx_byteps/bin/bpslaunch', 'BYTEPS_LOCAL_SIZE': '4', 'PKG_CONFIG_PATH': '/usr/local/lib/pkgconfig:/usr/local/lib/pkgconfig:/usr/local/lib/pkgconfig:', 'XDG_SESSION_ID': '3', 'TERM': 'xterm-256color', 'DMLC_NUM_WORKER': '1', 'PATH': '/home/ubuntu/anaconda3/envs/mx_byteps/bin:/home/ubuntu/anaconda3/bin/:/home/ubuntu/bin:/home/ubuntu/.local/bin:/home/ubuntu/anaconda3/bin/:/usr/local/cuda/bin:/usr/local/bin:/opt/aws/bin:/usr/local/mpi/bin:/usr/local/cuda/bin:/usr/local/bin:/opt/aws/bin:/home/ubuntu/src/cntk/bin:/usr/local/mpi/bin:/opt/aws/neuron/bin:/usr/local/cuda/bin:/usr/local/bin:/opt/aws/bin:/usr/local/mpi/bin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin', 'DMLC_ROLE': 'worker', 'XDG_RUNTIME_DIR': '/run/user/1000', 'LANG': 'en_US.UTF-8', 'LS_COLORS': 'rs=0:di=01;34:ln=01;36:mh=00:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:mi=00:su=37;41:sg=30;43:ca=30;41:tw=30;42:ow=34;42:st=37;44:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arc=01;31:*.arj=01;31:*.taz=01;31:*.lha=01;31:*.lz4=01;31:*.lzh=01;31:*.lzma=01;31:*.tlz=01;31:*.txz=01;31:*.tzo=01;31:*.t7z=01;31:*.zip=01;31:*.z=01;31:*.Z=01;31:*.dz=01;31:*.gz=01;31:*.lrz=01;31:*.lz=01;31:*.lzo=01;31:*.xz=01;31:*.bz2=01;31:*.bz=01;31:*.tbz=01;31:*.tbz2=01;31:*.tz=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.war=01;31:*.ear=01;31:*.sar=01;31:*.rar=01;31:*.alz=01;31:*.ace=01;31:*.zoo=01;31:*.cpio=01;31:*.7z=01;31:*.rz=01;31:*.cab=01;31:*.jpg=01;35:*.jpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.svg=01;35:*.svgz=01;35:*.mng=01;35:*.pcx=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.m2v=01;35:*.mkv=01;35:*.webm=01;35:*.ogm=01;35:*.mp4=01;35:*.m4v=01;35:*.mp4v=01;35:*.vob=01;35:*.qt=01;35:*.nuv=01;35:*.wmv=01;35:*.asf=01;35:*.rm=01;35:*.rmvb=01;35:*.flc=01;35:*.avi=01;35:*.fli=01;35:*.flv=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.yuv=01;35:*.cgm=01;35:*.emf=01;35:*.ogv=01;35:*.ogx=01;35:*.aac=00;36:*.au=00;36:*.flac=00;36:*.m4a=00;36:*.mid=00;36:*.midi=00;36:*.mka=00;36:*.mp3=00;36:*.mpc=00;36:*.ogg=00;36:*.ra=00;36:*.wav=00;36:*.oga=00;36:*.opus=00;36:*.spx=00;36:*.xspf=00;36:', 'CONDA_PYTHON_EXE': '/home/ubuntu/anaconda3/bin/python', 'SHELL': '/bin/bash', 'CONDA_DEFAULT_ENV': 'mx_byteps', 'LESSCLOSE': '/usr/bin/lesspipe %s %s', 'MODULE_VERSION': '3.2.10', 'LD_LIBRARY_PATH_WITHOUT_CUDA': '/usr/lib64/openmpi/lib/:/usr/local/lib:/usr/lib:/usr/local/mpi/lib:/lib/:/usr/lib64/openmpi/lib/:/usr/local/lib:/usr/lib:/usr/local/mpi/lib:/lib/:', 'LC_TERMINAL': 'iTerm2', 'MODULE_VERSION_STACK': '3.2.10', 'PWD': '/home/ubuntu', 'LOADEDMODULES': '', 'CONDA_EXE': '/home/ubuntu/anaconda3/bin/conda', 'XDG_DATA_DIRS': '/usr/local/share:/usr/share:/var/lib/snapd/desktop', 'SSH_CONNECTION': '76.126.245.87 59732 172.31.85.4 22', 'PYTHONPATH': '/home/ubuntu/src/cntk/bindings/python', 'DMLC_WORKER_ID': '0', 'NVIDIA_VISIBLE_DEVICES': '0,1,2,3', 'CONDA_PREFIX': '/home/ubuntu/anaconda3/envs/mx_byteps', 'MANPATH': '/opt/aws/neuron/share/man:', 'MODULEPATH': '/etc/environment-modules/modules:/usr/share/modules/versions:/usr/Modules/$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles', 'MODULESHOME': '/usr/share/modules'})=============0
environ({'LESSOPEN': '| /usr/bin/lesspipe %s', 'CONDA_PROMPT_MODIFIER': '(mx_byteps) ', 'BYTEPS_LOCAL_RANK': '3', 'MAIL': '/var/mail/ubuntu', 'SSH_CLIENT': '76.126.245.87 59732 22', 'USER': 'ubuntu', 'LD_LIBRARY_PATH_WITH_DEFAULT_CUDA': '/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:/usr/local/cuda-9.0/lib/:/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:/usr/local/cuda-9.0/lib/:', 'LD_LIBRARY_PATH': '/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:/home/ubuntu/src/cntk/bindings/python/cntk/libs:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda/efa/lib:/usr/local/cuda/lib:/opt/amazon/efa/lib:/usr/local/mpi/lib:/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:', 'SHLVL': '1', 'CONDA_SHLVL': '1', 'HOME': '/home/ubuntu', 'SSH_TTY': '/dev/pts/1', 'DMLC_PS_ROOT_URI': '10.0.0.1', 'LC_TERMINAL_VERSION': '3.3.9', 'DMLC_NUM_SERVER': '1', 'LOGNAME': 'ubuntu', 'DMLC_PS_ROOT_PORT': '1234', '_': '/home/ubuntu/anaconda3/envs/mx_byteps/bin/bpslaunch', 'BYTEPS_LOCAL_SIZE': '4', 'PKG_CONFIG_PATH': '/usr/local/lib/pkgconfig:/usr/local/lib/pkgconfig:/usr/local/lib/pkgconfig:', 'XDG_SESSION_ID': '3', 'TERM': 'xterm-256color', 'DMLC_NUM_WORKER': '1', 'PATH': '/home/ubuntu/anaconda3/envs/mx_byteps/bin:/home/ubuntu/anaconda3/bin/:/home/ubuntu/bin:/home/ubuntu/.local/bin:/home/ubuntu/anaconda3/bin/:/usr/local/cuda/bin:/usr/local/bin:/opt/aws/bin:/usr/local/mpi/bin:/usr/local/cuda/bin:/usr/local/bin:/opt/aws/bin:/home/ubuntu/src/cntk/bin:/usr/local/mpi/bin:/opt/aws/neuron/bin:/usr/local/cuda/bin:/usr/local/bin:/opt/aws/bin:/usr/local/mpi/bin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin', 'DMLC_ROLE': 'worker', 'XDG_RUNTIME_DIR': '/run/user/1000', 'LANG': 'en_US.UTF-8', 'LS_COLORS': 'rs=0:di=01;34:ln=01;36:mh=00:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:mi=00:su=37;41:sg=30;43:ca=30;41:tw=30;42:ow=34;42:st=37;44:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arc=01;31:*.arj=01;31:*.taz=01;31:*.lha=01;31:*.lz4=01;31:*.lzh=01;31:*.lzma=01;31:*.tlz=01;31:*.txz=01;31:*.tzo=01;31:*.t7z=01;31:*.zip=01;31:*.z=01;31:*.Z=01;31:*.dz=01;31:*.gz=01;31:*.lrz=01;31:*.lz=01;31:*.lzo=01;31:*.xz=01;31:*.bz2=01;31:*.bz=01;31:*.tbz=01;31:*.tbz2=01;31:*.tz=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.war=01;31:*.ear=01;31:*.sar=01;31:*.rar=01;31:*.alz=01;31:*.ace=01;31:*.zoo=01;31:*.cpio=01;31:*.7z=01;31:*.rz=01;31:*.cab=01;31:*.jpg=01;35:*.jpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.svg=01;35:*.svgz=01;35:*.mng=01;35:*.pcx=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.m2v=01;35:*.mkv=01;35:*.webm=01;35:*.ogm=01;35:*.mp4=01;35:*.m4v=01;35:*.mp4v=01;35:*.vob=01;35:*.qt=01;35:*.nuv=01;35:*.wmv=01;35:*.asf=01;35:*.rm=01;35:*.rmvb=01;35:*.flc=01;35:*.avi=01;35:*.fli=01;35:*.flv=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.yuv=01;35:*.cgm=01;35:*.emf=01;35:*.ogv=01;35:*.ogx=01;35:*.aac=00;36:*.au=00;36:*.flac=00;36:*.m4a=00;36:*.mid=00;36:*.midi=00;36:*.mka=00;36:*.mp3=00;36:*.mpc=00;36:*.ogg=00;36:*.ra=00;36:*.wav=00;36:*.oga=00;36:*.opus=00;36:*.spx=00;36:*.xspf=00;36:', 'CONDA_PYTHON_EXE': '/home/ubuntu/anaconda3/bin/python', 'SHELL': '/bin/bash', 'CONDA_DEFAULT_ENV': 'mx_byteps', 'LESSCLOSE': '/usr/bin/lesspipe %s %s', 'MODULE_VERSION': '3.2.10', 'LD_LIBRARY_PATH_WITHOUT_CUDA': '/usr/lib64/openmpi/lib/:/usr/local/lib:/usr/lib:/usr/local/mpi/lib:/lib/:/usr/lib64/openmpi/lib/:/usr/local/lib:/usr/lib:/usr/local/mpi/lib:/lib/:', 'LC_TERMINAL': 'iTerm2', 'MODULE_VERSION_STACK': '3.2.10', 'PWD': '/home/ubuntu', 'LOADEDMODULES': '', 'CONDA_EXE': '/home/ubuntu/anaconda3/bin/conda', 'XDG_DATA_DIRS': '/usr/local/share:/usr/share:/var/lib/snapd/desktop', 'SSH_CONNECTION': '76.126.245.87 59732 172.31.85.4 22', 'PYTHONPATH': '/home/ubuntu/src/cntk/bindings/python', 'DMLC_WORKER_ID': '0', 'NVIDIA_VISIBLE_DEVICES': '0,1,2,3', 'CONDA_PREFIX': '/home/ubuntu/anaconda3/envs/mx_byteps', 'MANPATH': '/opt/aws/neuron/share/man:', 'MODULEPATH': '/etc/environment-modules/modules:/usr/share/modules/versions:/usr/Modules/$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles', 'MODULESHOME': '/usr/share/modules'})=============3
Segmentation fault: 11
Segmentation fault: 11
Stack trace:
[bt] (0) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x41a8100) [0x7fe075c25100]
[bt] (1) /lib/x86_64-linux-gnu/libc.so.6(+0x354b0) [0x7fe1021c34b0]
[bt] (2) /lib/x86_64-linux-gnu/libpthread.so.0(pthread_mutex_lock+0x4) [0x7fe102561d44]
[bt] (3) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x389d737) [0x7fe07531a737]
[bt] (4) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x38a0863) [0x7fe07531d863]
[bt] (5) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x3896551) [0x7fe075313551]
[bt] (6) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(MXEnginePushAsync+0x2f7) [0x7fe075279a67]
[bt] (7) /home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/site-packages/byteps/mxnet/c_lib.cpython-36m-x86_64-linux-gnu.so(byteps_mxnet_push_pull_async+0x150) [0x7fdff9762970]
[bt] (8) /home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/lib-dynload/../../libffi.so.6(ffi_call_unix64+0x4c) [0x7fe1012dfec0]
Stack trace:
[bt] (0) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x41a8100) [0x7f3556934100]
[bt] (1) /lib/x86_64-linux-gnu/libc.so.6(+0x354b0) [0x7f35e2ed24b0]
[bt] (2) /lib/x86_64-linux-gnu/libpthread.so.0(pthread_mutex_lock+0x4) [0x7f35e3270d44]
[bt] (3) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x389d737) [0x7f3556029737]
[bt] (4) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x38a0863) [0x7f355602c863]
[bt] (5) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x3896551) [0x7f3556022551]
[bt] (6) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(MXEnginePushAsync+0x2f7) [0x7f3555f88a67]
[bt] (7) /home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/site-packages/byteps/mxnet/c_lib.cpython-36m-x86_64-linux-gnu.so(byteps_mxnet_push_pull_async+0x150) [0x7f34e1762970]
[bt] (8) /home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/lib-dynload/../../libffi.so.6(ffi_call_unix64+0x4c) [0x7f35e1feeec0]
[2020-03-16 21:41:59*** Error in `.956268: F byteps/common/core_loops.cc:299] Check failed: r == ncclSuccess NCCL error: unhandled cuda error
Segmentation fault: 11
Stack trace:
[bt] (0) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x41a8100) [0x7fefa3442100]
[bt] (1) /lib/x86_64-linux-gnu/libc.so.6(+0x354b0) [0x7ff02f9e04b0]
[bt] (2) /lib/x86_64-linux-gnu/libpthread.so.0(pthread_mutex_lock+0x4) [0x7ff02fd7ed44]
[bt] (3) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x389d737) [0x7fefa2b37737]
[bt] (4) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x38a0863) [0x7fefa2b3a863]
[bt] (5) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x3896551) [0x7fefa2b30551]
[bt] (6) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(MXEnginePushAsync+0x2f7) [0x7fefa2a96a67]
[bt] (7) /home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/site-packages/byteps/mxnet/c_lib.cpython-36m-x86_64-linux-gnu.so(byteps_mxnet_push_pull_async+0x150) [0x7fef2d762970]
[bt] (8) /home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/lib-dynload/../../libffi.so.6(ffi_call_unix64+0x4c) [0x7ff02eafcec0]
Segmentation fault: 11
Segmentation fault: 11
Stack trace:
[bt] (0) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x41a8100) [0x7f14f6979100]
[bt] (1) /lib/x86_64-linux-gnu/libc.so.6(+0x354b0) [0x7f1582f174b0]
[bt] (2) /lib/x86_64-linux-gnu/libpthread.so.0(pthread_mutex_lock+0x4) [0x7f15832b5d44]
[bt] (3) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x389d737) [0x7f14f606e737]
[bt] (4) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x38a0863) [0x7f14f6071863]
[bt] (5) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x3896551) [0x7f14f6067551]
[bt] (6) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(MXEnginePushAsync+0x2f7) [0x7f14f5fcda67]
[bt] (7) /home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/site-packages/byteps/mxnet/c_lib.cpython-36m-x86_64-linux-gnu.so(byteps_mxnet_push_pull_async+0x150) [0x7f1481762970]
[bt] (8) /home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/lib-dynload/../../libffi.so.6(ffi_call_unix64+0x4c) [0x7f1582033ec0]
Segmentation fault: 11
Segmentation fault: 11
Stack trace:
[bt] (0) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x41a8100) [0x7f14f6979100]
[bt] (1) /lib/x86_64-linux-gnu/libc.so.6(+0x354b0) [0x7f1582f174b0]
[bt] (2) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x389f261) [0x7f14f6070261]
[bt] (3) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x38a0611) [0x7f14f6071611]
[bt] (4) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x3896551) [0x7f14f6067551]
[bt] (5) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x38974a4) [0x7f14f60684a4]
[bt] (6) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(mxnet::NDArray::Chunk::~Chunk()+0x48a) [0x7f14f629056a]
[bt] (7) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x6d860a) [0x7f14f2ea960a]
[bt] (8) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x3ab7101) [0x7f14f6288101]
Stack trace:
[bt] (0) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x41a8100) [0x7f14f6979100]
[bt] (1) /lib/x86_64-linux-gnu/libc.so.6(+0x354b0) [0x7f1582f174b0]
[bt] (2) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x389f261) [0x7f14f6070261]
[bt] (3) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x38a0611) [0x7f14f6071611]
[bt] (4) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x3896551) [0x7f14f6067551]
[bt] (5) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x38974a4) [0x7f14f60684a4]
[bt] (6) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(mxnet::NDArray::Chunk::~Chunk()+0x48a) [0x7f14f629056a]
[bt] (7) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x6d860a) [0x7f14f2ea960a]
[bt] (8) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x3ab7101) [0x7f14f6288101]
Aborted (core dumped)
Exception in thread Thread-4:
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/ubuntu/anaconda3/envs/mx_byteps/bin/bpslaunch", line 47, in worker
subprocess.check_call(command, env=my_env, stdout=sys.stdout, stderr=sys.stderr, shell=True)
File "/home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/subprocess.py", line 311, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command 'python byteps/example/mxnet/train_imagenet_byteps.py --benchmark 1 --batch-size=32' returned non-zero exit status 134.
Segmentation fault (core dumped)
Exception in thread Thread-3:
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/ubuntu/anaconda3/envs/mx_byteps/bin/bpslaunch", line 47, in worker
subprocess.check_call(command, env=my_env, stdout=sys.stdout, stderr=sys.stderr, shell=True)
File "/home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/subprocess.py", line 311, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command 'python byteps/example/mxnet/train_imagenet_byteps.py --benchmark 1 --batch-size=32' returned non-zero exit status 139.
Segmentation fault (core dumped)
Exception in thread Thread-1:
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/ubuntu/anaconda3/envs/mx_byteps/bin/bpslaunch", line 47, in worker
subprocess.check_call(command, env=my_env, stdout=sys.stdout, stderr=sys.stderr, shell=True)
File "/home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/subprocess.py", line 311, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command 'python byteps/example/mxnet/train_imagenet_byteps.py --benchmark 1 --batch-size=32' returned non-zero exit status 139.
Segmentation fault (core dumped)
Exception in thread Thread-2:
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/ubuntu/anaconda3/envs/mx_byteps/bin/bpslaunch", line 47, in worker
subprocess.check_call(command, env=my_env, stdout=sys.stdout, stderr=sys.stderr, shell=True)
File "/home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/subprocess.py", line 311, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command 'python byteps/example/mxnet/train_imagenet_byteps.py --benchmark 1 --batch-size=32' returned non-zero exit status 139.
|
subprocess.CalledProcessError
|
def build_mx_extension(build_ext, options):
# clear ROLE -- installation does not need this
os.environ.pop("DMLC_ROLE", None)
check_mx_version()
mx_compile_flags, mx_link_flags = get_mx_flags(build_ext, options["COMPILE_FLAGS"])
mx_have_cuda = is_mx_cuda()
macro_have_cuda = check_macro(options["MACROS"], "HAVE_CUDA")
if not mx_have_cuda and macro_have_cuda:
raise DistutilsPlatformError(
"BytePS build with GPU support was requested, but this MXNet "
"installation does not support CUDA."
)
# Update HAVE_CUDA to mean that MXNet supports CUDA.
if mx_have_cuda and not macro_have_cuda:
cuda_include_dirs, cuda_lib_dirs = get_cuda_dirs(
build_ext, options["COMPILE_FLAGS"]
)
options["MACROS"] += [("HAVE_CUDA", "1")]
options["INCLUDES"] += cuda_include_dirs
options["LIBRARY_DIRS"] += cuda_lib_dirs
options["LIBRARIES"] += ["cudart"]
mxnet_lib.define_macros = options["MACROS"]
if check_macro(options["MACROS"], "HAVE_CUDA"):
mxnet_lib.define_macros += [("MSHADOW_USE_CUDA", "1")]
else:
mxnet_lib.define_macros += [("MSHADOW_USE_CUDA", "0")]
if is_mx_mkldnn():
mxnet_lib.define_macros += [("MXNET_USE_MKLDNN", "1")]
else:
mxnet_lib.define_macros += [("MXNET_USE_MKLDNN", "0")]
mxnet_lib.define_macros += [("MSHADOW_USE_MKL", "0")]
# use MXNet's DMLC headers first instead of ps-lite's
options["INCLUDES"].insert(0, get_mx_include_dirs())
mxnet_lib.include_dirs = options["INCLUDES"]
mxnet_lib.sources = options["SOURCES"] + [
"byteps/mxnet/ops.cc",
"byteps/mxnet/ready_event.cc",
"byteps/mxnet/tensor_util.cc",
"byteps/mxnet/cuda_util.cc",
"byteps/mxnet/adapter.cc",
]
mxnet_lib.extra_compile_args = options["COMPILE_FLAGS"] + mx_compile_flags
mxnet_lib.extra_link_args = options["LINK_FLAGS"] + mx_link_flags
mxnet_lib.extra_objects = options["EXTRA_OBJECTS"]
mxnet_lib.library_dirs = options["LIBRARY_DIRS"]
mxnet_lib.libraries = options["LIBRARIES"]
build_ext.build_extension(mxnet_lib)
|
def build_mx_extension(build_ext, options):
# clear ROLE -- installation does not need this
os.environ.pop("DMLC_ROLE", None)
check_mx_version()
mx_compile_flags, mx_link_flags = get_mx_flags(build_ext, options["COMPILE_FLAGS"])
mx_have_cuda = is_mx_cuda()
macro_have_cuda = check_macro(options["MACROS"], "HAVE_CUDA")
if not mx_have_cuda and macro_have_cuda:
raise DistutilsPlatformError(
"BytePS build with GPU support was requested, but this MXNet "
"installation does not support CUDA."
)
# Update HAVE_CUDA to mean that MXNet supports CUDA.
if mx_have_cuda and not macro_have_cuda:
cuda_include_dirs, cuda_lib_dirs = get_cuda_dirs(
build_ext, options["COMPILE_FLAGS"]
)
options["MACROS"] += [("HAVE_CUDA", "1")]
options["INCLUDES"] += cuda_include_dirs
options["LIBRARY_DIRS"] += cuda_lib_dirs
options["LIBRARIES"] += ["cudart"]
mxnet_lib.define_macros = options["MACROS"]
if check_macro(options["MACROS"], "HAVE_CUDA"):
mxnet_lib.define_macros += [("MSHADOW_USE_CUDA", "1")]
else:
mxnet_lib.define_macros += [("MSHADOW_USE_CUDA", "0")]
mxnet_lib.define_macros += [("MSHADOW_USE_MKL", "0")]
# use MXNet's DMLC headers first instead of ps-lite's
options["INCLUDES"].insert(0, get_mx_include_dirs())
mxnet_lib.include_dirs = options["INCLUDES"]
mxnet_lib.sources = options["SOURCES"] + [
"byteps/mxnet/ops.cc",
"byteps/mxnet/ready_event.cc",
"byteps/mxnet/tensor_util.cc",
"byteps/mxnet/cuda_util.cc",
"byteps/mxnet/adapter.cc",
]
mxnet_lib.extra_compile_args = options["COMPILE_FLAGS"] + mx_compile_flags
mxnet_lib.extra_link_args = options["LINK_FLAGS"] + mx_link_flags
mxnet_lib.extra_objects = options["EXTRA_OBJECTS"]
mxnet_lib.library_dirs = options["LIBRARY_DIRS"]
mxnet_lib.libraries = options["LIBRARIES"]
build_ext.build_extension(mxnet_lib)
|
https://github.com/bytedance/byteps/issues/222
|
(mx_byteps) ubuntu@ip-172-31-85-4:~$ bpslaunch python byteps/example/mxnet/train_imagenet_byteps.py --benchmark 1 --batch-size=32
BytePS launching worker
INFO:root:start with arguments Namespace(batch_size=32, benchmark=1, cpu_train=False, data_nthreads=4, data_train=None, data_train_idx='', data_val=None, data_val_idx='', disp_batches=20, dtype='float32', gc_threshold=0.5, gc_type='none', image_shape='3,224,224', initializer='default', kv_store='device', load_epoch=None, loss='', lr=0.1, lr_factor=0.1, lr_step_epochs='30,60', macrobatch_size=0, max_random_aspect_ratio=0.25, max_random_h=36, max_random_l=50, max_random_rotate_angle=10, max_random_s=50, max_random_scale=1, max_random_shear_ratio=0.1, min_random_scale=1, model_prefix=None, mom=0.9, monitor=0, network='resnet', num_classes=1000, num_epochs=80, num_examples=1281167, num_layers=50, optimizer='sgd', pad_size=0, random_crop=1, random_mirror=1, rgb_mean='123.68,116.779,103.939', test_io=0, top_k=0, warmup_epochs=5, warmup_strategy='linear', wd=0.0001)
INFO:root:start with arguments Namespace(batch_size=32, benchmark=1, cpu_train=False, data_nthreads=4, data_train=None, data_train_idx='', data_val=None, data_val_idx='', disp_batches=20, dtype='float32', gc_threshold=0.5, gc_type='none', image_shape='3,224,224', initializer='default', kv_store='device', load_epoch=None, loss='', lr=0.1, lr_factor=0.1, lr_step_epochs='30,60', macrobatch_size=0, max_random_aspect_ratio=0.25, max_random_h=36, max_random_l=50, max_random_rotate_angle=10, max_random_s=50, max_random_scale=1, max_random_shear_ratio=0.1, min_random_scale=1, model_prefix=None, mom=0.9, monitor=0, network='resnet', num_classes=1000, num_epochs=80, num_examples=1281167, num_layers=50, optimizer='sgd', pad_size=0, random_crop=1, random_mirror=1, rgb_mean='123.68,116.779,103.939', test_io=0, top_k=0, warmup_epochs=5, warmup_strategy='linear', wd=0.0001)
INFO:root:start with arguments Namespace(batch_size=32, benchmark=1, cpu_train=False, data_nthreads=4, data_train=None, data_train_idx='', data_val=None, data_val_idx='', disp_batches=20, dtype='float32', gc_threshold=0.5, gc_type='none', image_shape='3,224,224', initializer='default', kv_store='device', load_epoch=None, loss='', lr=0.1, lr_factor=0.1, lr_step_epochs='30,60', macrobatch_size=0, max_random_aspect_ratio=0.25, max_random_h=36, max_random_l=50, max_random_rotate_angle=10, max_random_s=50, max_random_scale=1, max_random_shear_ratio=0.1, min_random_scale=1, model_prefix=None, mom=0.9, monitor=0, network='resnet', num_classes=1000, num_epochs=80, num_examples=1281167, num_layers=50, optimizer='sgd', pad_size=0, random_crop=1, random_mirror=1, rgb_mean='123.68,116.779,103.939', test_io=0, top_k=0, warmup_epochs=5, warmup_strategy='linear', wd=0.0001)
INFO:root:start with arguments Namespace(batch_size=32, benchmark=1, cpu_train=False, data_nthreads=4, data_train=None, data_train_idx='', data_val=None, data_val_idx='', disp_batches=20, dtype='float32', gc_threshold=0.5, gc_type='none', image_shape='3,224,224', initializer='default', kv_store='device', load_epoch=None, loss='', lr=0.1, lr_factor=0.1, lr_step_epochs='30,60', macrobatch_size=0, max_random_aspect_ratio=0.25, max_random_h=36, max_random_l=50, max_random_rotate_angle=10, max_random_s=50, max_random_scale=1, max_random_shear_ratio=0.1, min_random_scale=1, model_prefix=None, mom=0.9, monitor=0, network='resnet', num_classes=1000, num_epochs=80, num_examples=1281167, num_layers=50, optimizer='sgd', pad_size=0, random_crop=1, random_mirror=1, rgb_mean='123.68,116.779,103.939', test_io=0, top_k=0, warmup_epochs=5, warmup_strategy='linear', wd=0.0001)
INFO:root:Launch BytePS process on GPU-2
learning rate from ``lr_scheduler`` has been overwritten by ``learning_rate`` in optimizer.
INFO:root:Launch BytePS process on GPU-0
INFO:root:Launch BytePS process on GPU-1
learning rate from ``lr_scheduler`` has been overwritten by ``learning_rate`` in optimizer.
learning rate from ``lr_scheduler`` has been overwritten by ``learning_rate`` in optimizer.
environ({'LESSOPEN': '| /usr/bin/lesspipe %s', 'CONDA_PROMPT_MODIFIER': '(mx_byteps) ', 'BYTEPS_LOCAL_RANK': '2', 'MAIL': '/var/mail/ubuntu', 'SSH_CLIENT': '76.126.245.87 59732 22', 'USER': 'ubuntu', 'LD_LIBRARY_PATH_WITH_DEFAULT_CUDA': '/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:/usr/local/cuda-9.0/lib/:/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:/usr/local/cuda-9.0/lib/:', 'LD_LIBRARY_PATH': '/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:/home/ubuntu/src/cntk/bindings/python/cntk/libs:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda/efa/lib:/usr/local/cuda/lib:/opt/amazon/efa/lib:/usr/local/mpi/lib:/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:', 'SHLVL': '1', 'CONDA_SHLVL': '1', 'HOME': '/home/ubuntu', 'SSH_TTY': '/dev/pts/1', 'DMLC_PS_ROOT_URI': '10.0.0.1', 'LC_TERMINAL_VERSION': '3.3.9', 'DMLC_NUM_SERVER': '1', 'LOGNAME': 'ubuntu', 'DMLC_PS_ROOT_PORT': '1234', '_': '/home/ubuntu/anaconda3/envs/mx_byteps/bin/bpslaunch', 'BYTEPS_LOCAL_SIZE': '4', 'PKG_CONFIG_PATH': '/usr/local/lib/pkgconfig:/usr/local/lib/pkgconfig:/usr/local/lib/pkgconfig:', 'XDG_SESSION_ID': '3', 'TERM': 'xterm-256color', 'DMLC_NUM_WORKER': '1', 'PATH': '/home/ubuntu/anaconda3/envs/mx_byteps/bin:/home/ubuntu/anaconda3/bin/:/home/ubuntu/bin:/home/ubuntu/.local/bin:/home/ubuntu/anaconda3/bin/:/usr/local/cuda/bin:/usr/local/bin:/opt/aws/bin:/usr/local/mpi/bin:/usr/local/cuda/bin:/usr/local/bin:/opt/aws/bin:/home/ubuntu/src/cntk/bin:/usr/local/mpi/bin:/opt/aws/neuron/bin:/usr/local/cuda/bin:/usr/local/bin:/opt/aws/bin:/usr/local/mpi/bin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin', 'DMLC_ROLE': 'worker', 'XDG_RUNTIME_DIR': '/run/user/1000', 'LANG': 'en_US.UTF-8', 'LS_COLORS': 'rs=0:di=01;34:ln=01;36:mh=00:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:mi=00:su=37;41:sg=30;43:ca=30;41:tw=30;42:ow=34;42:st=37;44:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arc=01;31:*.arj=01;31:*.taz=01;31:*.lha=01;31:*.lz4=01;31:*.lzh=01;31:*.lzma=01;31:*.tlz=01;31:*.txz=01;31:*.tzo=01;31:*.t7z=01;31:*.zip=01;31:*.z=01;31:*.Z=01;31:*.dz=01;31:*.gz=01;31:*.lrz=01;31:*.lz=01;31:*.lzo=01;31:*.xz=01;31:*.bz2=01;31:*.bz=01;31:*.tbz=01;31:*.tbz2=01;31:*.tz=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.war=01;31:*.ear=01;31:*.sar=01;31:*.rar=01;31:*.alz=01;31:*.ace=01;31:*.zoo=01;31:*.cpio=01;31:*.7z=01;31:*.rz=01;31:*.cab=01;31:*.jpg=01;35:*.jpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.svg=01;35:*.svgz=01;35:*.mng=01;35:*.pcx=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.m2v=01;35:*.mkv=01;35:*.webm=01;35:*.ogm=01;35:*.mp4=01;35:*.m4v=01;35:*.mp4v=01;35:*.vob=01;35:*.qt=01;35:*.nuv=01;35:*.wmv=01;35:*.asf=01;35:*.rm=01;35:*.rmvb=01;35:*.flc=01;35:*.avi=01;35:*.fli=01;35:*.flv=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.yuv=01;35:*.cgm=01;35:*.emf=01;35:*.ogv=01;35:*.ogx=01;35:*.aac=00;36:*.au=00;36:*.flac=00;36:*.m4a=00;36:*.mid=00;36:*.midi=00;36:*.mka=00;36:*.mp3=00;36:*.mpc=00;36:*.ogg=00;36:*.ra=00;36:*.wav=00;36:*.oga=00;36:*.opus=00;36:*.spx=00;36:*.xspf=00;36:', 'CONDA_PYTHON_EXE': '/home/ubuntu/anaconda3/bin/python', 'SHELL': '/bin/bash', 'CONDA_DEFAULT_ENV': 'mx_byteps', 'LESSCLOSE': '/usr/bin/lesspipe %s %s', 'MODULE_VERSION': '3.2.10', 'LD_LIBRARY_PATH_WITHOUT_CUDA': '/usr/lib64/openmpi/lib/:/usr/local/lib:/usr/lib:/usr/local/mpi/lib:/lib/:/usr/lib64/openmpi/lib/:/usr/local/lib:/usr/lib:/usr/local/mpi/lib:/lib/:', 'LC_TERMINAL': 'iTerm2', 'MODULE_VERSION_STACK': '3.2.10', 'PWD': '/home/ubuntu', 'LOADEDMODULES': '', 'CONDA_EXE': '/home/ubuntu/anaconda3/bin/conda', 'XDG_DATA_DIRS': '/usr/local/share:/usr/share:/var/lib/snapd/desktop', 'SSH_CONNECTION': '76.126.245.87 59732 172.31.85.4 22', 'PYTHONPATH': '/home/ubuntu/src/cntk/bindings/python', 'DMLC_WORKER_ID': '0', 'NVIDIA_VISIBLE_DEVICES': '0,1,2,3', 'CONDA_PREFIX': '/home/ubuntu/anaconda3/envs/mx_byteps', 'MANPATH': '/opt/aws/neuron/share/man:', 'MODULEPATH': '/etc/environment-modules/modules:/usr/share/modules/versions:/usr/Modules/$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles', 'MODULESHOME': '/usr/share/modules'})=============2
INFO:root:Launch BytePS process on GPU-3
environ({'LESSOPEN': '| /usr/bin/lesspipe %s', 'CONDA_PROMPT_MODIFIER': '(mx_byteps) ', 'BYTEPS_LOCAL_RANK': '1', 'MAIL': '/var/mail/ubuntu', 'SSH_CLIENT': '76.126.245.87 59732 22', 'USER': 'ubuntu', 'LD_LIBRARY_PATH_WITH_DEFAULT_CUDA': '/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:/usr/local/cuda-9.0/lib/:/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:/usr/local/cuda-9.0/lib/:', 'LD_LIBRARY_PATH': '/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:/home/ubuntu/src/cntk/bindings/python/cntk/libs:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda/efa/lib:/usr/local/cuda/lib:/opt/amazon/efa/lib:/usr/local/mpi/lib:/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:', 'SHLVL': '1', 'CONDA_SHLVL': '1', 'HOME': '/home/ubuntu', 'SSH_TTY': '/dev/pts/1', 'DMLC_PS_ROOT_URI': '10.0.0.1', 'LC_TERMINAL_VERSION': '3.3.9', 'DMLC_NUM_SERVER': '1', 'LOGNAME': 'ubuntu', 'DMLC_PS_ROOT_PORT': '1234', '_': '/home/ubuntu/anaconda3/envs/mx_byteps/bin/bpslaunch', 'BYTEPS_LOCAL_SIZE': '4', 'PKG_CONFIG_PATH': '/usr/local/lib/pkgconfig:/usr/local/lib/pkgconfig:/usr/local/lib/pkgconfig:', 'XDG_SESSION_ID': '3', 'TERM': 'xterm-256color', 'DMLC_NUM_WORKER': '1', 'PATH': '/home/ubuntu/anaconda3/envs/mx_byteps/bin:/home/ubuntu/anaconda3/bin/:/home/ubuntu/bin:/home/ubuntu/.local/bin:/home/ubuntu/anaconda3/bin/:/usr/local/cuda/bin:/usr/local/bin:/opt/aws/bin:/usr/local/mpi/bin:/usr/local/cuda/bin:/usr/local/bin:/opt/aws/bin:/home/ubuntu/src/cntk/bin:/usr/local/mpi/bin:/opt/aws/neuron/bin:/usr/local/cuda/bin:/usr/local/bin:/opt/aws/bin:/usr/local/mpi/bin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin', 'DMLC_ROLE': 'worker', 'XDG_RUNTIME_DIR': '/run/user/1000', 'LANG': 'en_US.UTF-8', 'LS_COLORS': 'rs=0:di=01;34:ln=01;36:mh=00:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:mi=00:su=37;41:sg=30;43:ca=30;41:tw=30;42:ow=34;42:st=37;44:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arc=01;31:*.arj=01;31:*.taz=01;31:*.lha=01;31:*.lz4=01;31:*.lzh=01;31:*.lzma=01;31:*.tlz=01;31:*.txz=01;31:*.tzo=01;31:*.t7z=01;31:*.zip=01;31:*.z=01;31:*.Z=01;31:*.dz=01;31:*.gz=01;31:*.lrz=01;31:*.lz=01;31:*.lzo=01;31:*.xz=01;31:*.bz2=01;31:*.bz=01;31:*.tbz=01;31:*.tbz2=01;31:*.tz=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.war=01;31:*.ear=01;31:*.sar=01;31:*.rar=01;31:*.alz=01;31:*.ace=01;31:*.zoo=01;31:*.cpio=01;31:*.7z=01;31:*.rz=01;31:*.cab=01;31:*.jpg=01;35:*.jpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.svg=01;35:*.svgz=01;35:*.mng=01;35:*.pcx=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.m2v=01;35:*.mkv=01;35:*.webm=01;35:*.ogm=01;35:*.mp4=01;35:*.m4v=01;35:*.mp4v=01;35:*.vob=01;35:*.qt=01;35:*.nuv=01;35:*.wmv=01;35:*.asf=01;35:*.rm=01;35:*.rmvb=01;35:*.flc=01;35:*.avi=01;35:*.fli=01;35:*.flv=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.yuv=01;35:*.cgm=01;35:*.emf=01;35:*.ogv=01;35:*.ogx=01;35:*.aac=00;36:*.au=00;36:*.flac=00;36:*.m4a=00;36:*.mid=00;36:*.midi=00;36:*.mka=00;36:*.mp3=00;36:*.mpc=00;36:*.ogg=00;36:*.ra=00;36:*.wav=00;36:*.oga=00;36:*.opus=00;36:*.spx=00;36:*.xspf=00;36:', 'CONDA_PYTHON_EXE': '/home/ubuntu/anaconda3/bin/python', 'SHELL': '/bin/bash', 'CONDA_DEFAULT_ENV': 'mx_byteps', 'LESSCLOSE': '/usr/bin/lesspipe %s %s', 'MODULE_VERSION': '3.2.10', 'LD_LIBRARY_PATH_WITHOUT_CUDA': '/usr/lib64/openmpi/lib/:/usr/local/lib:/usr/lib:/usr/local/mpi/lib:/lib/:/usr/lib64/openmpi/lib/:/usr/local/lib:/usr/lib:/usr/local/mpi/lib:/lib/:', 'LC_TERMINAL': 'iTerm2', 'MODULE_VERSION_STACK': '3.2.10', 'PWD': '/home/ubuntu', 'LOADEDMODULES': '', 'CONDA_EXE': '/home/ubuntu/anaconda3/bin/conda', 'XDG_DATA_DIRS': '/usr/local/share:/usr/share:/var/lib/snapd/desktop', 'SSH_CONNECTION': '76.126.245.87 59732 172.31.85.4 22', 'PYTHONPATH': '/home/ubuntu/src/cntk/bindings/python', 'DMLC_WORKER_ID': '0', 'NVIDIA_VISIBLE_DEVICES': '0,1,2,3', 'CONDA_PREFIX': '/home/ubuntu/anaconda3/envs/mx_byteps', 'MANPATH': '/opt/aws/neuron/share/man:', 'MODULEPATH': '/etc/environment-modules/modules:/usr/share/modules/versions:/usr/Modules/$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles', 'MODULESHOME': '/usr/share/modules'})=============1
learning rate from ``lr_scheduler`` has been overwritten by ``learning_rate`` in optimizer.
environ({'LESSOPEN': '| /usr/bin/lesspipe %s', 'CONDA_PROMPT_MODIFIER': '(mx_byteps) ', 'BYTEPS_LOCAL_RANK': '0', 'MAIL': '/var/mail/ubuntu', 'SSH_CLIENT': '76.126.245.87 59732 22', 'USER': 'ubuntu', 'LD_LIBRARY_PATH_WITH_DEFAULT_CUDA': '/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:/usr/local/cuda-9.0/lib/:/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:/usr/local/cuda-9.0/lib/:', 'LD_LIBRARY_PATH': '/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:/home/ubuntu/src/cntk/bindings/python/cntk/libs:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda/efa/lib:/usr/local/cuda/lib:/opt/amazon/efa/lib:/usr/local/mpi/lib:/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:', 'SHLVL': '1', 'CONDA_SHLVL': '1', 'HOME': '/home/ubuntu', 'SSH_TTY': '/dev/pts/1', 'DMLC_PS_ROOT_URI': '10.0.0.1', 'LC_TERMINAL_VERSION': '3.3.9', 'DMLC_NUM_SERVER': '1', 'LOGNAME': 'ubuntu', 'DMLC_PS_ROOT_PORT': '1234', '_': '/home/ubuntu/anaconda3/envs/mx_byteps/bin/bpslaunch', 'BYTEPS_LOCAL_SIZE': '4', 'PKG_CONFIG_PATH': '/usr/local/lib/pkgconfig:/usr/local/lib/pkgconfig:/usr/local/lib/pkgconfig:', 'XDG_SESSION_ID': '3', 'TERM': 'xterm-256color', 'DMLC_NUM_WORKER': '1', 'PATH': '/home/ubuntu/anaconda3/envs/mx_byteps/bin:/home/ubuntu/anaconda3/bin/:/home/ubuntu/bin:/home/ubuntu/.local/bin:/home/ubuntu/anaconda3/bin/:/usr/local/cuda/bin:/usr/local/bin:/opt/aws/bin:/usr/local/mpi/bin:/usr/local/cuda/bin:/usr/local/bin:/opt/aws/bin:/home/ubuntu/src/cntk/bin:/usr/local/mpi/bin:/opt/aws/neuron/bin:/usr/local/cuda/bin:/usr/local/bin:/opt/aws/bin:/usr/local/mpi/bin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin', 'DMLC_ROLE': 'worker', 'XDG_RUNTIME_DIR': '/run/user/1000', 'LANG': 'en_US.UTF-8', 'LS_COLORS': 'rs=0:di=01;34:ln=01;36:mh=00:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:mi=00:su=37;41:sg=30;43:ca=30;41:tw=30;42:ow=34;42:st=37;44:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arc=01;31:*.arj=01;31:*.taz=01;31:*.lha=01;31:*.lz4=01;31:*.lzh=01;31:*.lzma=01;31:*.tlz=01;31:*.txz=01;31:*.tzo=01;31:*.t7z=01;31:*.zip=01;31:*.z=01;31:*.Z=01;31:*.dz=01;31:*.gz=01;31:*.lrz=01;31:*.lz=01;31:*.lzo=01;31:*.xz=01;31:*.bz2=01;31:*.bz=01;31:*.tbz=01;31:*.tbz2=01;31:*.tz=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.war=01;31:*.ear=01;31:*.sar=01;31:*.rar=01;31:*.alz=01;31:*.ace=01;31:*.zoo=01;31:*.cpio=01;31:*.7z=01;31:*.rz=01;31:*.cab=01;31:*.jpg=01;35:*.jpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.svg=01;35:*.svgz=01;35:*.mng=01;35:*.pcx=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.m2v=01;35:*.mkv=01;35:*.webm=01;35:*.ogm=01;35:*.mp4=01;35:*.m4v=01;35:*.mp4v=01;35:*.vob=01;35:*.qt=01;35:*.nuv=01;35:*.wmv=01;35:*.asf=01;35:*.rm=01;35:*.rmvb=01;35:*.flc=01;35:*.avi=01;35:*.fli=01;35:*.flv=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.yuv=01;35:*.cgm=01;35:*.emf=01;35:*.ogv=01;35:*.ogx=01;35:*.aac=00;36:*.au=00;36:*.flac=00;36:*.m4a=00;36:*.mid=00;36:*.midi=00;36:*.mka=00;36:*.mp3=00;36:*.mpc=00;36:*.ogg=00;36:*.ra=00;36:*.wav=00;36:*.oga=00;36:*.opus=00;36:*.spx=00;36:*.xspf=00;36:', 'CONDA_PYTHON_EXE': '/home/ubuntu/anaconda3/bin/python', 'SHELL': '/bin/bash', 'CONDA_DEFAULT_ENV': 'mx_byteps', 'LESSCLOSE': '/usr/bin/lesspipe %s %s', 'MODULE_VERSION': '3.2.10', 'LD_LIBRARY_PATH_WITHOUT_CUDA': '/usr/lib64/openmpi/lib/:/usr/local/lib:/usr/lib:/usr/local/mpi/lib:/lib/:/usr/lib64/openmpi/lib/:/usr/local/lib:/usr/lib:/usr/local/mpi/lib:/lib/:', 'LC_TERMINAL': 'iTerm2', 'MODULE_VERSION_STACK': '3.2.10', 'PWD': '/home/ubuntu', 'LOADEDMODULES': '', 'CONDA_EXE': '/home/ubuntu/anaconda3/bin/conda', 'XDG_DATA_DIRS': '/usr/local/share:/usr/share:/var/lib/snapd/desktop', 'SSH_CONNECTION': '76.126.245.87 59732 172.31.85.4 22', 'PYTHONPATH': '/home/ubuntu/src/cntk/bindings/python', 'DMLC_WORKER_ID': '0', 'NVIDIA_VISIBLE_DEVICES': '0,1,2,3', 'CONDA_PREFIX': '/home/ubuntu/anaconda3/envs/mx_byteps', 'MANPATH': '/opt/aws/neuron/share/man:', 'MODULEPATH': '/etc/environment-modules/modules:/usr/share/modules/versions:/usr/Modules/$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles', 'MODULESHOME': '/usr/share/modules'})=============0
environ({'LESSOPEN': '| /usr/bin/lesspipe %s', 'CONDA_PROMPT_MODIFIER': '(mx_byteps) ', 'BYTEPS_LOCAL_RANK': '3', 'MAIL': '/var/mail/ubuntu', 'SSH_CLIENT': '76.126.245.87 59732 22', 'USER': 'ubuntu', 'LD_LIBRARY_PATH_WITH_DEFAULT_CUDA': '/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:/usr/local/cuda-9.0/lib/:/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:/usr/local/cuda-9.0/lib/:', 'LD_LIBRARY_PATH': '/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:/home/ubuntu/src/cntk/bindings/python/cntk/libs:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda/efa/lib:/usr/local/cuda/lib:/opt/amazon/efa/lib:/usr/local/mpi/lib:/usr/lib64/openmpi/lib/:/usr/local/cuda/lib64:/usr/local/lib:/usr/lib:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/mpi/lib:/lib/:', 'SHLVL': '1', 'CONDA_SHLVL': '1', 'HOME': '/home/ubuntu', 'SSH_TTY': '/dev/pts/1', 'DMLC_PS_ROOT_URI': '10.0.0.1', 'LC_TERMINAL_VERSION': '3.3.9', 'DMLC_NUM_SERVER': '1', 'LOGNAME': 'ubuntu', 'DMLC_PS_ROOT_PORT': '1234', '_': '/home/ubuntu/anaconda3/envs/mx_byteps/bin/bpslaunch', 'BYTEPS_LOCAL_SIZE': '4', 'PKG_CONFIG_PATH': '/usr/local/lib/pkgconfig:/usr/local/lib/pkgconfig:/usr/local/lib/pkgconfig:', 'XDG_SESSION_ID': '3', 'TERM': 'xterm-256color', 'DMLC_NUM_WORKER': '1', 'PATH': '/home/ubuntu/anaconda3/envs/mx_byteps/bin:/home/ubuntu/anaconda3/bin/:/home/ubuntu/bin:/home/ubuntu/.local/bin:/home/ubuntu/anaconda3/bin/:/usr/local/cuda/bin:/usr/local/bin:/opt/aws/bin:/usr/local/mpi/bin:/usr/local/cuda/bin:/usr/local/bin:/opt/aws/bin:/home/ubuntu/src/cntk/bin:/usr/local/mpi/bin:/opt/aws/neuron/bin:/usr/local/cuda/bin:/usr/local/bin:/opt/aws/bin:/usr/local/mpi/bin:/opt/amazon/openmpi/bin:/opt/amazon/efa/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin', 'DMLC_ROLE': 'worker', 'XDG_RUNTIME_DIR': '/run/user/1000', 'LANG': 'en_US.UTF-8', 'LS_COLORS': 'rs=0:di=01;34:ln=01;36:mh=00:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:mi=00:su=37;41:sg=30;43:ca=30;41:tw=30;42:ow=34;42:st=37;44:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arc=01;31:*.arj=01;31:*.taz=01;31:*.lha=01;31:*.lz4=01;31:*.lzh=01;31:*.lzma=01;31:*.tlz=01;31:*.txz=01;31:*.tzo=01;31:*.t7z=01;31:*.zip=01;31:*.z=01;31:*.Z=01;31:*.dz=01;31:*.gz=01;31:*.lrz=01;31:*.lz=01;31:*.lzo=01;31:*.xz=01;31:*.bz2=01;31:*.bz=01;31:*.tbz=01;31:*.tbz2=01;31:*.tz=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.war=01;31:*.ear=01;31:*.sar=01;31:*.rar=01;31:*.alz=01;31:*.ace=01;31:*.zoo=01;31:*.cpio=01;31:*.7z=01;31:*.rz=01;31:*.cab=01;31:*.jpg=01;35:*.jpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.svg=01;35:*.svgz=01;35:*.mng=01;35:*.pcx=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.m2v=01;35:*.mkv=01;35:*.webm=01;35:*.ogm=01;35:*.mp4=01;35:*.m4v=01;35:*.mp4v=01;35:*.vob=01;35:*.qt=01;35:*.nuv=01;35:*.wmv=01;35:*.asf=01;35:*.rm=01;35:*.rmvb=01;35:*.flc=01;35:*.avi=01;35:*.fli=01;35:*.flv=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.yuv=01;35:*.cgm=01;35:*.emf=01;35:*.ogv=01;35:*.ogx=01;35:*.aac=00;36:*.au=00;36:*.flac=00;36:*.m4a=00;36:*.mid=00;36:*.midi=00;36:*.mka=00;36:*.mp3=00;36:*.mpc=00;36:*.ogg=00;36:*.ra=00;36:*.wav=00;36:*.oga=00;36:*.opus=00;36:*.spx=00;36:*.xspf=00;36:', 'CONDA_PYTHON_EXE': '/home/ubuntu/anaconda3/bin/python', 'SHELL': '/bin/bash', 'CONDA_DEFAULT_ENV': 'mx_byteps', 'LESSCLOSE': '/usr/bin/lesspipe %s %s', 'MODULE_VERSION': '3.2.10', 'LD_LIBRARY_PATH_WITHOUT_CUDA': '/usr/lib64/openmpi/lib/:/usr/local/lib:/usr/lib:/usr/local/mpi/lib:/lib/:/usr/lib64/openmpi/lib/:/usr/local/lib:/usr/lib:/usr/local/mpi/lib:/lib/:', 'LC_TERMINAL': 'iTerm2', 'MODULE_VERSION_STACK': '3.2.10', 'PWD': '/home/ubuntu', 'LOADEDMODULES': '', 'CONDA_EXE': '/home/ubuntu/anaconda3/bin/conda', 'XDG_DATA_DIRS': '/usr/local/share:/usr/share:/var/lib/snapd/desktop', 'SSH_CONNECTION': '76.126.245.87 59732 172.31.85.4 22', 'PYTHONPATH': '/home/ubuntu/src/cntk/bindings/python', 'DMLC_WORKER_ID': '0', 'NVIDIA_VISIBLE_DEVICES': '0,1,2,3', 'CONDA_PREFIX': '/home/ubuntu/anaconda3/envs/mx_byteps', 'MANPATH': '/opt/aws/neuron/share/man:', 'MODULEPATH': '/etc/environment-modules/modules:/usr/share/modules/versions:/usr/Modules/$MODULE_VERSION/modulefiles:/usr/share/modules/modulefiles', 'MODULESHOME': '/usr/share/modules'})=============3
Segmentation fault: 11
Segmentation fault: 11
Stack trace:
[bt] (0) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x41a8100) [0x7fe075c25100]
[bt] (1) /lib/x86_64-linux-gnu/libc.so.6(+0x354b0) [0x7fe1021c34b0]
[bt] (2) /lib/x86_64-linux-gnu/libpthread.so.0(pthread_mutex_lock+0x4) [0x7fe102561d44]
[bt] (3) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x389d737) [0x7fe07531a737]
[bt] (4) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x38a0863) [0x7fe07531d863]
[bt] (5) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x3896551) [0x7fe075313551]
[bt] (6) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(MXEnginePushAsync+0x2f7) [0x7fe075279a67]
[bt] (7) /home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/site-packages/byteps/mxnet/c_lib.cpython-36m-x86_64-linux-gnu.so(byteps_mxnet_push_pull_async+0x150) [0x7fdff9762970]
[bt] (8) /home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/lib-dynload/../../libffi.so.6(ffi_call_unix64+0x4c) [0x7fe1012dfec0]
Stack trace:
[bt] (0) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x41a8100) [0x7f3556934100]
[bt] (1) /lib/x86_64-linux-gnu/libc.so.6(+0x354b0) [0x7f35e2ed24b0]
[bt] (2) /lib/x86_64-linux-gnu/libpthread.so.0(pthread_mutex_lock+0x4) [0x7f35e3270d44]
[bt] (3) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x389d737) [0x7f3556029737]
[bt] (4) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x38a0863) [0x7f355602c863]
[bt] (5) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x3896551) [0x7f3556022551]
[bt] (6) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(MXEnginePushAsync+0x2f7) [0x7f3555f88a67]
[bt] (7) /home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/site-packages/byteps/mxnet/c_lib.cpython-36m-x86_64-linux-gnu.so(byteps_mxnet_push_pull_async+0x150) [0x7f34e1762970]
[bt] (8) /home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/lib-dynload/../../libffi.so.6(ffi_call_unix64+0x4c) [0x7f35e1feeec0]
[2020-03-16 21:41:59*** Error in `.956268: F byteps/common/core_loops.cc:299] Check failed: r == ncclSuccess NCCL error: unhandled cuda error
Segmentation fault: 11
Stack trace:
[bt] (0) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x41a8100) [0x7fefa3442100]
[bt] (1) /lib/x86_64-linux-gnu/libc.so.6(+0x354b0) [0x7ff02f9e04b0]
[bt] (2) /lib/x86_64-linux-gnu/libpthread.so.0(pthread_mutex_lock+0x4) [0x7ff02fd7ed44]
[bt] (3) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x389d737) [0x7fefa2b37737]
[bt] (4) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x38a0863) [0x7fefa2b3a863]
[bt] (5) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x3896551) [0x7fefa2b30551]
[bt] (6) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(MXEnginePushAsync+0x2f7) [0x7fefa2a96a67]
[bt] (7) /home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/site-packages/byteps/mxnet/c_lib.cpython-36m-x86_64-linux-gnu.so(byteps_mxnet_push_pull_async+0x150) [0x7fef2d762970]
[bt] (8) /home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/lib-dynload/../../libffi.so.6(ffi_call_unix64+0x4c) [0x7ff02eafcec0]
Segmentation fault: 11
Segmentation fault: 11
Stack trace:
[bt] (0) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x41a8100) [0x7f14f6979100]
[bt] (1) /lib/x86_64-linux-gnu/libc.so.6(+0x354b0) [0x7f1582f174b0]
[bt] (2) /lib/x86_64-linux-gnu/libpthread.so.0(pthread_mutex_lock+0x4) [0x7f15832b5d44]
[bt] (3) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x389d737) [0x7f14f606e737]
[bt] (4) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x38a0863) [0x7f14f6071863]
[bt] (5) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x3896551) [0x7f14f6067551]
[bt] (6) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(MXEnginePushAsync+0x2f7) [0x7f14f5fcda67]
[bt] (7) /home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/site-packages/byteps/mxnet/c_lib.cpython-36m-x86_64-linux-gnu.so(byteps_mxnet_push_pull_async+0x150) [0x7f1481762970]
[bt] (8) /home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/lib-dynload/../../libffi.so.6(ffi_call_unix64+0x4c) [0x7f1582033ec0]
Segmentation fault: 11
Segmentation fault: 11
Stack trace:
[bt] (0) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x41a8100) [0x7f14f6979100]
[bt] (1) /lib/x86_64-linux-gnu/libc.so.6(+0x354b0) [0x7f1582f174b0]
[bt] (2) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x389f261) [0x7f14f6070261]
[bt] (3) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x38a0611) [0x7f14f6071611]
[bt] (4) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x3896551) [0x7f14f6067551]
[bt] (5) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x38974a4) [0x7f14f60684a4]
[bt] (6) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(mxnet::NDArray::Chunk::~Chunk()+0x48a) [0x7f14f629056a]
[bt] (7) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x6d860a) [0x7f14f2ea960a]
[bt] (8) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x3ab7101) [0x7f14f6288101]
Stack trace:
[bt] (0) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x41a8100) [0x7f14f6979100]
[bt] (1) /lib/x86_64-linux-gnu/libc.so.6(+0x354b0) [0x7f1582f174b0]
[bt] (2) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x389f261) [0x7f14f6070261]
[bt] (3) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x38a0611) [0x7f14f6071611]
[bt] (4) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x3896551) [0x7f14f6067551]
[bt] (5) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x38974a4) [0x7f14f60684a4]
[bt] (6) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(mxnet::NDArray::Chunk::~Chunk()+0x48a) [0x7f14f629056a]
[bt] (7) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x6d860a) [0x7f14f2ea960a]
[bt] (8) /home/ubuntu/.local/lib/python3.6/site-packages/mxnet/libmxnet.so(+0x3ab7101) [0x7f14f6288101]
Aborted (core dumped)
Exception in thread Thread-4:
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/ubuntu/anaconda3/envs/mx_byteps/bin/bpslaunch", line 47, in worker
subprocess.check_call(command, env=my_env, stdout=sys.stdout, stderr=sys.stderr, shell=True)
File "/home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/subprocess.py", line 311, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command 'python byteps/example/mxnet/train_imagenet_byteps.py --benchmark 1 --batch-size=32' returned non-zero exit status 134.
Segmentation fault (core dumped)
Exception in thread Thread-3:
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/ubuntu/anaconda3/envs/mx_byteps/bin/bpslaunch", line 47, in worker
subprocess.check_call(command, env=my_env, stdout=sys.stdout, stderr=sys.stderr, shell=True)
File "/home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/subprocess.py", line 311, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command 'python byteps/example/mxnet/train_imagenet_byteps.py --benchmark 1 --batch-size=32' returned non-zero exit status 139.
Segmentation fault (core dumped)
Exception in thread Thread-1:
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/ubuntu/anaconda3/envs/mx_byteps/bin/bpslaunch", line 47, in worker
subprocess.check_call(command, env=my_env, stdout=sys.stdout, stderr=sys.stderr, shell=True)
File "/home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/subprocess.py", line 311, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command 'python byteps/example/mxnet/train_imagenet_byteps.py --benchmark 1 --batch-size=32' returned non-zero exit status 139.
Segmentation fault (core dumped)
Exception in thread Thread-2:
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/ubuntu/anaconda3/envs/mx_byteps/bin/bpslaunch", line 47, in worker
subprocess.check_call(command, env=my_env, stdout=sys.stdout, stderr=sys.stderr, shell=True)
File "/home/ubuntu/anaconda3/envs/mx_byteps/lib/python3.6/subprocess.py", line 311, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command 'python byteps/example/mxnet/train_imagenet_byteps.py --benchmark 1 --batch-size=32' returned non-zero exit status 139.
|
subprocess.CalledProcessError
|
def _push_pull_grad_async(self, p):
"""Call byteps API to push-pull gradient asynchronously
Arguments:
tensor: The tensor to push-pull.
name: The name of the tensor.
Returns:
an push-pull handle and context
"""
name = self._parameter_names.get(id(p))
tensor = p.grad
tensor_compressed, ctx = self._compression.compress(tensor)
self._locks[p].acquire()
handle = byteps_push_pull(tensor_compressed, average=True, name="Gradient." + name)
self._logger.debug(
"{} calls byteps_push_pull for {}".format(
self._desc, self._parameter_names[id(p)]
)
)
# Add to queue to poll completion
self._event_queue.put((p, handle, ctx))
return handle, ctx
|
def _push_pull_grad_async(self, p):
"""Call byteps API to push-pull gradient asynchronously
Arguments:
tensor: The tensor to push-pull.
name: The name of the tensor.
Returns:
an push-pull handle and context
"""
name = self._parameter_names.get(p)
tensor = p.grad
tensor_compressed, ctx = self._compression.compress(tensor)
self._locks[p].acquire()
handle = byteps_push_pull(tensor_compressed, average=True, name="Gradient." + name)
self._logger.debug(
"{} calls byteps_push_pull for {}".format(self._desc, self._parameter_names[p])
)
# Add to queue to poll completion
self._event_queue.put((p, handle, ctx))
return handle, ctx
|
https://github.com/bytedance/byteps/issues/143
|
Traceback (most recent call last):
File "/usr/local/byteps/example/pytorch/benchmark_bytescheduler.py", line 134, in <module>
timeit.timeit(benchmark_step, number=args.num_warmup_batches)
File "/opt/anaconda/lib/python3.7/timeit.py", line 232, in timeit
return Timer(stmt, setup, timer, globals).timeit(number)
File "/opt/anaconda/lib/python3.7/timeit.py", line 176, in timeit
timing = self.inner(it, self.timer)
File "<timeit-src>", line 6, in inner
File "/usr/local/byteps/example/pytorch/benchmark_bytescheduler.py", line 115, in benchmark_step
output = model(data)
File "/opt/anaconda/lib/python3.7/site-packages/torch/nn/modules/module.py", line 541, in __call__
result = self.forward(*input, **kwargs)
File "/opt/anaconda/lib/python3.7/site-packages/torchvision/models/resnet.py", line 196, in forward
x = self.conv1(x)
File "/opt/anaconda/lib/python3.7/site-packages/torch/nn/modules/module.py", line 533, in __call__
result = hook(self, input)
File "/opt/anaconda/lib/python3.7/site-packages/byteps-0.1.0-py3.7-linux-x86_64.egg/byteps/bytescheduler/torch/optimizer.py", line 206, in pre_forward_hook
self._logger.debug("{} {} is ready.".format(self._desc, self._parameter_names[p]))
RuntimeError: bool value of Tensor with more than one value is ambiguous
|
RuntimeError
|
def _poll(self):
"""Poll the completion of the tensor's backward or push-pull from a FIFO event_queue"""
while True:
p, handle, ctx = self._event_queue.get()
if p is None:
self._logger.debug("poller exits.")
break
# Check whether the push-pull is finished. If so, start updating parameters.
if handle is not None and poll(handle):
output = synchronize(handle)
p.grad.set_(self._compression.decompress(output, ctx))
self._logger.debug(
"{} {} finished push-pull".format(
self._desc, self._parameter_names[id(p)]
)
)
self._push_pull_delay[p] = self.backward_passes_per_step
# So far ByteScheduler only supports SGD, Adam and RMSprop optimizers in torch
if isinstance(self._opt, torch.optim.SGD):
self._sgd(p)
elif isinstance(self._opt, torch.optim.Adam):
self._adam(p)
elif isinstance(self._opt, torch.optim.RMSprop):
self._rmsprop(p)
else:
raise ValueError(
"Invalid optimizer! ByteScheduler only supports SGD, Adam and RMSprop."
)
self._zero_one_grad(p)
# notify update completion and parameter is ready for forward propagation
if p in self._locks:
self._locks[p].release()
else:
self._event_queue.put((p, handle, ctx))
|
def _poll(self):
"""Poll the completion of the tensor's backward or push-pull from a FIFO event_queue"""
while True:
p, handle, ctx = self._event_queue.get()
if p is None:
self._logger.debug("poller exits.")
break
# Check whether the push-pull is finished. If so, start updating parameters.
if handle is not None and poll(handle):
output = synchronize(handle)
p.grad.set_(self._compression.decompress(output, ctx))
self._logger.debug(
"{} {} finished push-pull".format(self._desc, self._parameter_names[p])
)
self._push_pull_delay[p] = self.backward_passes_per_step
# So far ByteScheduler only supports SGD, Adam and RMSprop optimizers in torch
if isinstance(self._opt, torch.optim.SGD):
self._sgd(p)
elif isinstance(self._opt, torch.optim.Adam):
self._adam(p)
elif isinstance(self._opt, torch.optim.RMSprop):
self._rmsprop(p)
else:
raise ValueError(
"Invalid optimizer! ByteScheduler only supports SGD, Adam and RMSprop."
)
self._zero_one_grad(p)
# notify update completion and parameter is ready for forward propagation
if p in self._locks:
self._locks[p].release()
else:
self._event_queue.put((p, handle, ctx))
|
https://github.com/bytedance/byteps/issues/143
|
Traceback (most recent call last):
File "/usr/local/byteps/example/pytorch/benchmark_bytescheduler.py", line 134, in <module>
timeit.timeit(benchmark_step, number=args.num_warmup_batches)
File "/opt/anaconda/lib/python3.7/timeit.py", line 232, in timeit
return Timer(stmt, setup, timer, globals).timeit(number)
File "/opt/anaconda/lib/python3.7/timeit.py", line 176, in timeit
timing = self.inner(it, self.timer)
File "<timeit-src>", line 6, in inner
File "/usr/local/byteps/example/pytorch/benchmark_bytescheduler.py", line 115, in benchmark_step
output = model(data)
File "/opt/anaconda/lib/python3.7/site-packages/torch/nn/modules/module.py", line 541, in __call__
result = self.forward(*input, **kwargs)
File "/opt/anaconda/lib/python3.7/site-packages/torchvision/models/resnet.py", line 196, in forward
x = self.conv1(x)
File "/opt/anaconda/lib/python3.7/site-packages/torch/nn/modules/module.py", line 533, in __call__
result = hook(self, input)
File "/opt/anaconda/lib/python3.7/site-packages/byteps-0.1.0-py3.7-linux-x86_64.egg/byteps/bytescheduler/torch/optimizer.py", line 206, in pre_forward_hook
self._logger.debug("{} {} is ready.".format(self._desc, self._parameter_names[p]))
RuntimeError: bool value of Tensor with more than one value is ambiguous
|
RuntimeError
|
def _register_forward_hooks(self):
"""Add hook before forward propagation of each layer to block forward computation until the push-pull and
parameter update is finished. The blocking is implemented using a lock."""
# Recursively find all submodules
submodules = []
q = queue.LifoQueue()
for mod in self._model.children():
q.put(mod)
while not q.empty():
mod = q.get()
if len(list(mod.children())) == 0:
submodules.append(mod)
else:
for m in mod.children():
q.put(m)
def pre_forward_hook(mod, input):
for p in mod.parameters():
if p in self._handles:
del self._handles[p]
if p not in self._locks:
continue
with self._locks[p]:
self._logger.debug(
"{} {} is ready.".format(self._desc, self._parameter_names[id(p)])
)
self._logger.debug("{} starts forward {}.".format(self._desc, mod))
def after_forward_hook(mod, input, result):
self._logger.debug("{} finished forward {}.".format(self._desc, mod))
# Register pre-hook and hook for each module
for mod in reversed(submodules):
self._logger.debug(
"{} registers forward hook on module {}".format(self._desc, mod)
)
mod.register_forward_pre_hook(pre_forward_hook)
mod.register_forward_hook(after_forward_hook)
|
def _register_forward_hooks(self):
"""Add hook before forward propagation of each layer to block forward computation until the push-pull and
parameter update is finished. The blocking is implemented using a lock."""
# Recursively find all submodules
submodules = []
q = queue.LifoQueue()
for mod in self._model.children():
q.put(mod)
while not q.empty():
mod = q.get()
if len(list(mod.children())) == 0:
submodules.append(mod)
else:
for m in mod.children():
q.put(m)
def pre_forward_hook(mod, input):
for p in mod.parameters():
if p in self._handles:
del self._handles[p]
if p not in self._locks:
continue
with self._locks[p]:
self._logger.debug(
"{} {} is ready.".format(self._desc, self._parameter_names[p])
)
self._logger.debug("{} starts forward {}.".format(self._desc, mod))
def after_forward_hook(mod, input, result):
self._logger.debug("{} finished forward {}.".format(self._desc, mod))
# Register pre-hook and hook for each module
for mod in reversed(submodules):
self._logger.debug(
"{} registers forward hook on module {}".format(self._desc, mod)
)
mod.register_forward_pre_hook(pre_forward_hook)
mod.register_forward_hook(after_forward_hook)
|
https://github.com/bytedance/byteps/issues/143
|
Traceback (most recent call last):
File "/usr/local/byteps/example/pytorch/benchmark_bytescheduler.py", line 134, in <module>
timeit.timeit(benchmark_step, number=args.num_warmup_batches)
File "/opt/anaconda/lib/python3.7/timeit.py", line 232, in timeit
return Timer(stmt, setup, timer, globals).timeit(number)
File "/opt/anaconda/lib/python3.7/timeit.py", line 176, in timeit
timing = self.inner(it, self.timer)
File "<timeit-src>", line 6, in inner
File "/usr/local/byteps/example/pytorch/benchmark_bytescheduler.py", line 115, in benchmark_step
output = model(data)
File "/opt/anaconda/lib/python3.7/site-packages/torch/nn/modules/module.py", line 541, in __call__
result = self.forward(*input, **kwargs)
File "/opt/anaconda/lib/python3.7/site-packages/torchvision/models/resnet.py", line 196, in forward
x = self.conv1(x)
File "/opt/anaconda/lib/python3.7/site-packages/torch/nn/modules/module.py", line 533, in __call__
result = hook(self, input)
File "/opt/anaconda/lib/python3.7/site-packages/byteps-0.1.0-py3.7-linux-x86_64.egg/byteps/bytescheduler/torch/optimizer.py", line 206, in pre_forward_hook
self._logger.debug("{} {} is ready.".format(self._desc, self._parameter_names[p]))
RuntimeError: bool value of Tensor with more than one value is ambiguous
|
RuntimeError
|
def pre_forward_hook(mod, input):
for p in mod.parameters():
if p in self._handles:
del self._handles[p]
if p not in self._locks:
continue
with self._locks[p]:
self._logger.debug(
"{} {} is ready.".format(self._desc, self._parameter_names[id(p)])
)
self._logger.debug("{} starts forward {}.".format(self._desc, mod))
|
def pre_forward_hook(mod, input):
for p in mod.parameters():
if p in self._handles:
del self._handles[p]
if p not in self._locks:
continue
with self._locks[p]:
self._logger.debug(
"{} {} is ready.".format(self._desc, self._parameter_names[p])
)
self._logger.debug("{} starts forward {}.".format(self._desc, mod))
|
https://github.com/bytedance/byteps/issues/143
|
Traceback (most recent call last):
File "/usr/local/byteps/example/pytorch/benchmark_bytescheduler.py", line 134, in <module>
timeit.timeit(benchmark_step, number=args.num_warmup_batches)
File "/opt/anaconda/lib/python3.7/timeit.py", line 232, in timeit
return Timer(stmt, setup, timer, globals).timeit(number)
File "/opt/anaconda/lib/python3.7/timeit.py", line 176, in timeit
timing = self.inner(it, self.timer)
File "<timeit-src>", line 6, in inner
File "/usr/local/byteps/example/pytorch/benchmark_bytescheduler.py", line 115, in benchmark_step
output = model(data)
File "/opt/anaconda/lib/python3.7/site-packages/torch/nn/modules/module.py", line 541, in __call__
result = self.forward(*input, **kwargs)
File "/opt/anaconda/lib/python3.7/site-packages/torchvision/models/resnet.py", line 196, in forward
x = self.conv1(x)
File "/opt/anaconda/lib/python3.7/site-packages/torch/nn/modules/module.py", line 533, in __call__
result = hook(self, input)
File "/opt/anaconda/lib/python3.7/site-packages/byteps-0.1.0-py3.7-linux-x86_64.egg/byteps/bytescheduler/torch/optimizer.py", line 206, in pre_forward_hook
self._logger.debug("{} {} is ready.".format(self._desc, self._parameter_names[p]))
RuntimeError: bool value of Tensor with more than one value is ambiguous
|
RuntimeError
|
def _sgd(self, p):
"""Performs a single optimization step using SGD optimizer on a parameter.
Arguments:
p: The parameter to be updated.
"""
for group in self.param_groups:
weight_decay = group["weight_decay"]
momentum = group["momentum"]
dampening = group["dampening"]
nesterov = group["nesterov"]
for gp in group["params"]:
if (
self._parameter_names[id(p)] != self._parameter_names[id(gp)]
or gp.shape != p.shape
):
continue
self._logger.debug(
"{} is updating {}".format(self._desc, self._parameter_names[id(p)])
)
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
if momentum != 0:
param_state = self.state[p]
if "momentum_buffer" not in param_state:
buf = param_state["momentum_buffer"] = torch.zeros_like(p.data)
buf.mul_(momentum).add_(d_p)
else:
buf = param_state["momentum_buffer"]
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.data.add_(-group["lr"], d_p)
break
|
def _sgd(self, p):
"""Performs a single optimization step using SGD optimizer on a parameter.
Arguments:
p: The parameter to be updated.
"""
for group in self.param_groups:
weight_decay = group["weight_decay"]
momentum = group["momentum"]
dampening = group["dampening"]
nesterov = group["nesterov"]
for gp in group["params"]:
if (
self._parameter_names[p] != self._parameter_names[gp]
or gp.shape != p.shape
):
continue
self._logger.debug(
"{} is updating {}".format(self._desc, self._parameter_names[p])
)
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
if momentum != 0:
param_state = self.state[p]
if "momentum_buffer" not in param_state:
buf = param_state["momentum_buffer"] = torch.zeros_like(p.data)
buf.mul_(momentum).add_(d_p)
else:
buf = param_state["momentum_buffer"]
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.data.add_(-group["lr"], d_p)
break
|
https://github.com/bytedance/byteps/issues/143
|
Traceback (most recent call last):
File "/usr/local/byteps/example/pytorch/benchmark_bytescheduler.py", line 134, in <module>
timeit.timeit(benchmark_step, number=args.num_warmup_batches)
File "/opt/anaconda/lib/python3.7/timeit.py", line 232, in timeit
return Timer(stmt, setup, timer, globals).timeit(number)
File "/opt/anaconda/lib/python3.7/timeit.py", line 176, in timeit
timing = self.inner(it, self.timer)
File "<timeit-src>", line 6, in inner
File "/usr/local/byteps/example/pytorch/benchmark_bytescheduler.py", line 115, in benchmark_step
output = model(data)
File "/opt/anaconda/lib/python3.7/site-packages/torch/nn/modules/module.py", line 541, in __call__
result = self.forward(*input, **kwargs)
File "/opt/anaconda/lib/python3.7/site-packages/torchvision/models/resnet.py", line 196, in forward
x = self.conv1(x)
File "/opt/anaconda/lib/python3.7/site-packages/torch/nn/modules/module.py", line 533, in __call__
result = hook(self, input)
File "/opt/anaconda/lib/python3.7/site-packages/byteps-0.1.0-py3.7-linux-x86_64.egg/byteps/bytescheduler/torch/optimizer.py", line 206, in pre_forward_hook
self._logger.debug("{} {} is ready.".format(self._desc, self._parameter_names[p]))
RuntimeError: bool value of Tensor with more than one value is ambiguous
|
RuntimeError
|
def _adam(self, p):
"""Performs a single optimization step using Adam optimizer on a parameter.
Arguments:
p: The parameter to be updated.
"""
for group in self.param_groups:
for gp in group["params"]:
if (
self._parameter_names[id(p)] != self._parameter_names[id(gp)]
or gp.shape != p.shape
):
continue
self._logger.debug(
"{} is updating {}".format(self._desc, self._parameter_names[id(p)])
)
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
"Adam does not support sparse gradients, please consider SparseAdam instead"
)
amsgrad = group["amsgrad"]
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state["max_exp_avg_sq"] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
if amsgrad:
max_exp_avg_sq = state["max_exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
if group["weight_decay"] != 0:
grad.add_(group["weight_decay"], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group["eps"])
else:
denom = exp_avg_sq.sqrt().add_(group["eps"])
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
step_size = group["lr"] * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
break
|
def _adam(self, p):
"""Performs a single optimization step using Adam optimizer on a parameter.
Arguments:
p: The parameter to be updated.
"""
for group in self.param_groups:
for gp in group["params"]:
if (
self._parameter_names[p] != self._parameter_names[gp]
or gp.shape != p.shape
):
continue
self._logger.debug(
"{} is updating {}".format(self._desc, self._parameter_names[p])
)
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
"Adam does not support sparse gradients, please consider SparseAdam instead"
)
amsgrad = group["amsgrad"]
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state["max_exp_avg_sq"] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
if amsgrad:
max_exp_avg_sq = state["max_exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
if group["weight_decay"] != 0:
grad.add_(group["weight_decay"], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group["eps"])
else:
denom = exp_avg_sq.sqrt().add_(group["eps"])
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
step_size = group["lr"] * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
break
|
https://github.com/bytedance/byteps/issues/143
|
Traceback (most recent call last):
File "/usr/local/byteps/example/pytorch/benchmark_bytescheduler.py", line 134, in <module>
timeit.timeit(benchmark_step, number=args.num_warmup_batches)
File "/opt/anaconda/lib/python3.7/timeit.py", line 232, in timeit
return Timer(stmt, setup, timer, globals).timeit(number)
File "/opt/anaconda/lib/python3.7/timeit.py", line 176, in timeit
timing = self.inner(it, self.timer)
File "<timeit-src>", line 6, in inner
File "/usr/local/byteps/example/pytorch/benchmark_bytescheduler.py", line 115, in benchmark_step
output = model(data)
File "/opt/anaconda/lib/python3.7/site-packages/torch/nn/modules/module.py", line 541, in __call__
result = self.forward(*input, **kwargs)
File "/opt/anaconda/lib/python3.7/site-packages/torchvision/models/resnet.py", line 196, in forward
x = self.conv1(x)
File "/opt/anaconda/lib/python3.7/site-packages/torch/nn/modules/module.py", line 533, in __call__
result = hook(self, input)
File "/opt/anaconda/lib/python3.7/site-packages/byteps-0.1.0-py3.7-linux-x86_64.egg/byteps/bytescheduler/torch/optimizer.py", line 206, in pre_forward_hook
self._logger.debug("{} {} is ready.".format(self._desc, self._parameter_names[p]))
RuntimeError: bool value of Tensor with more than one value is ambiguous
|
RuntimeError
|
def _rmsprop(self, p):
"""Performs a single optimization step using RMSprop optimizer on a parameter.
Arguments:
p: The parameter to be updated.
"""
for group in self.param_groups:
for gp in group["params"]:
if (
self._parameter_names[id(p)] != self._parameter_names[id(gp)]
or gp.shape != p.shape
):
continue
self._logger.debug(
"{} is updating {}".format(self._desc, self._parameter_names[id(p)])
)
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("RMSprop does not support sparse gradients")
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
state["square_avg"] = torch.zeros_like(p.data)
if group["momentum"] > 0:
state["momentum_buffer"] = torch.zeros_like(p.data)
if group["centered"]:
state["grad_avg"] = torch.zeros_like(p.data)
square_avg = state["square_avg"]
alpha = group["alpha"]
state["step"] += 1
if group["weight_decay"] != 0:
grad = grad.add(group["weight_decay"], p.data)
square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad)
if group["centered"]:
grad_avg = state["grad_avg"]
grad_avg.mul_(alpha).add_(1 - alpha, grad)
avg = (
square_avg.addcmul(-1, grad_avg, grad_avg).sqrt().add_(group["eps"])
)
else:
avg = square_avg.sqrt().add_(group["eps"])
if group["momentum"] > 0:
buf = state["momentum_buffer"]
buf.mul_(group["momentum"]).addcdiv_(grad, avg)
p.data.add_(-group["lr"], buf)
else:
p.data.addcdiv_(-group["lr"], grad, avg)
break
|
def _rmsprop(self, p):
"""Performs a single optimization step using RMSprop optimizer on a parameter.
Arguments:
p: The parameter to be updated.
"""
for group in self.param_groups:
for gp in group["params"]:
if (
self._parameter_names[p] != self._parameter_names[gp]
or gp.shape != p.shape
):
continue
self._logger.debug(
"{} is updating {}".format(self._desc, self._parameter_names[p])
)
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("RMSprop does not support sparse gradients")
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
state["square_avg"] = torch.zeros_like(p.data)
if group["momentum"] > 0:
state["momentum_buffer"] = torch.zeros_like(p.data)
if group["centered"]:
state["grad_avg"] = torch.zeros_like(p.data)
square_avg = state["square_avg"]
alpha = group["alpha"]
state["step"] += 1
if group["weight_decay"] != 0:
grad = grad.add(group["weight_decay"], p.data)
square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad)
if group["centered"]:
grad_avg = state["grad_avg"]
grad_avg.mul_(alpha).add_(1 - alpha, grad)
avg = (
square_avg.addcmul(-1, grad_avg, grad_avg).sqrt().add_(group["eps"])
)
else:
avg = square_avg.sqrt().add_(group["eps"])
if group["momentum"] > 0:
buf = state["momentum_buffer"]
buf.mul_(group["momentum"]).addcdiv_(grad, avg)
p.data.add_(-group["lr"], buf)
else:
p.data.addcdiv_(-group["lr"], grad, avg)
break
|
https://github.com/bytedance/byteps/issues/143
|
Traceback (most recent call last):
File "/usr/local/byteps/example/pytorch/benchmark_bytescheduler.py", line 134, in <module>
timeit.timeit(benchmark_step, number=args.num_warmup_batches)
File "/opt/anaconda/lib/python3.7/timeit.py", line 232, in timeit
return Timer(stmt, setup, timer, globals).timeit(number)
File "/opt/anaconda/lib/python3.7/timeit.py", line 176, in timeit
timing = self.inner(it, self.timer)
File "<timeit-src>", line 6, in inner
File "/usr/local/byteps/example/pytorch/benchmark_bytescheduler.py", line 115, in benchmark_step
output = model(data)
File "/opt/anaconda/lib/python3.7/site-packages/torch/nn/modules/module.py", line 541, in __call__
result = self.forward(*input, **kwargs)
File "/opt/anaconda/lib/python3.7/site-packages/torchvision/models/resnet.py", line 196, in forward
x = self.conv1(x)
File "/opt/anaconda/lib/python3.7/site-packages/torch/nn/modules/module.py", line 533, in __call__
result = hook(self, input)
File "/opt/anaconda/lib/python3.7/site-packages/byteps-0.1.0-py3.7-linux-x86_64.egg/byteps/bytescheduler/torch/optimizer.py", line 206, in pre_forward_hook
self._logger.debug("{} {} is ready.".format(self._desc, self._parameter_names[p]))
RuntimeError: bool value of Tensor with more than one value is ambiguous
|
RuntimeError
|
def __init__(self, model, hvd_opt, num_steps=10**6):
"""Construct a new ScheduledOptimizer, which uses horovod optimizer under the hood for averaging gradients
across all the Horovod ranks.
Args:
model: The training model. ByteScheduler uses the model object to register hooks.
hvd_opt: Optimizer to use for averaging gradients and applying updates.
num_steps: The maximum number of training steps. ByteScheduler needs to know when to stop cross-iteration
scheduling.
Usage example:
```
import bytescheduler.pytorch.horovod as bsc
bsc.init()
optimizer = hvd.DistributedOptimizer(optimizer, named_parameters, compression)
optimizer = bsc.ScheduledOptimizer(model, optimizer, num_steps)
```
"""
self._model = model
self._opt = hvd_opt
self._logger = logging.getLogger("ByteScheduler")
self._logger.debug("hvd size {}, rank {}".format(size(), rank()))
self._desc = "rank {}".format(rank())
# Track training steps
self._step = 0
self._final_step = num_steps
# Use lock to block the forward propagation of each parameter.
self._locks = {}
for param_group in self.param_groups:
for p in param_group["params"]:
self._locks[p] = threading.Lock()
# The closer to input layer, the higher the priority is.
self._priority_indexes = {}
priority = 0
for p in model.parameters():
self._priority_indexes[p] = priority
priority += 1
assert len(self._grad_accs) == 0
if size() > 1:
self._register_forward_hooks()
self._register_hooks()
# Poll whether the tensor is ready for allreduce or whether the allreduce is finished.
self.event_queue = queue.Queue()
self._poller = threading.Thread(target=self._poll, args=())
self._poller.start()
# Let rank 0 decide the communication order.
self._immediate = False
self._rank = rank()
if self._rank != 0:
self._immediate = True
core.start(rank=self._rank, arch="allreduce")
|
def __init__(self, model, hvd_opt, num_steps=10**6):
"""Construct a new ScheduledOptimizer, which uses horovod optimizer under the hood for averaging gradients
across all the Horovod ranks.
Args:
model: The training model. ByteScheduler uses the model object to register hooks.
hvd_opt: Optimizer to use for averaging gradients and applying updates.
num_steps: The maximum number of training steps. ByteScheduler needs to know when to stop cross-iteration
scheduling.
Usage example:
```
import bytescheduler.pytorch.horovod as bsc
bsc.init()
optimizer = hvd.DistributedOptimizer(optimizer, named_parameters, compression)
optimizer = bsc.ScheduledOptimizer(model, optimizer, num_steps)
```
"""
self._model = model
self._opt = hvd_opt
self._logger = logging.getLogger("ByteScheduler")
self._logger.debug("hvd size {}, rank {}".format(size(), rank()))
self._desc = "rank {}".format(rank())
# Track training steps
self._step = 0
self._final_step = num_steps
# Use lock to block the forward propagation of each parameter.
self._locks = {}
for param_group in self.param_groups:
for p in param_group["params"]:
self._locks[p] = threading.Lock()
# The closer to input layer, the higher the priority is.
self._priority_indexes = {}
priority = 0
for p in model.parameters():
self._priority_indexes[p] = priority
priority += 1
assert len(self._grad_accs) == 0
if size() > 1:
self._register_forward_hooks()
self._register_hooks()
# Poll whether the tensor is ready for allreduce or whether the allreduce is finished.
self.event_queue = queue.Queue()
self._poller = threading.Thread(target=self._poll, args=())
self._poller.start()
# Let rank 0 decide the communication order.
self._immediate = False
self._rank = rank()
if self._rank != 0:
self._immediate = True
core.start(rank=self._rank, arch="allreduce")
core.set_broadcaster(self._broadcast_partition, synchronize)
|
https://github.com/bytedance/byteps/issues/110
|
root@mynode:~/byteps/byteschuler/examples# mpirun --allow-run-as-root -np 1 -H my_IP:1 -mca plm_rsh_args "-p 12945" python pytorch_horovod_benchmark.py
hijack function <unbound method _DistributedOptimizer._register_hooks>
16:44:58.106 comm.py:185 INFO: Comm host: localhost, port: 58888
16:44:58.107 search.py:119 INFO: Bayesian Search is enabled, space {'credit': (4.0, 64.0)}, max_num_steps 15.
16:44:58.107 bytecore.py:124 INFO: start Core 0: credit 4000000.0, partition 1000000, credit tuning 1, partition tuning 0.
Traceback (most recent call last):
File "pytorch_horovod_benchmark.py", line 78, in <module>
optimizer = bsc.ScheduledOptimizer(model, optimizer, args.num_warmup_batches + args.num_iters * args.num_batches_per_iter)
File "/usr/local/lib/python2.7/dist-packages/bytescheduler-0.1.0-py2.7-linux-x86_64.egg/bytescheduler/pytorch/horovod.py", line 79, in __init__
core.set_broadcaster(self._broadcast_partition, synchronize)
AttributeError: 'ByteCore' object has no attribute 'set_broadcaster'
|
AttributeError
|
def _register_compressor(self, params, optimizer_params, compression_params):
"""Register compressor for BytePS
params : mx.gluon.ParameterDict
optimizer_params : dict
compression_params : dict
"""
intra_compressor = Compression.none
if not compression_params:
return intra_compressor
if "fp16" in compression_params:
intra_compressor = Compression.fp16
if "compressor" not in compression_params:
warnings.warn("Compressor is not defined")
return intra_compressor
check_list = ["compressor", "ef", "momentum"]
for _, param in params.items():
# generic
for item in check_list:
if compression_params.get(item):
if isinstance(compression_params[item], str):
setattr(param, "byteps_%s_type" % item, compression_params[item])
else:
raise TypeError("%s should be str" % item)
# need parameter
compressor = compression_params["compressor"]
if compressor == "onebit":
setattr(
param,
"byteps_compressor_onebit_scaling",
str(compression_params.get("scaling", False)),
)
elif (
compressor == "topk" or compressor == "randomk" or compressor == "multibit"
):
# raise KeyError if 'k' is not found
setattr(param, "byteps_compressor_k", compression_params["k"])
if compression_params.get("momentum"):
setattr(param, "byteps_momentum_mu", optimizer_params["momentum"])
if compression_params.get("seed"):
setattr(param, "byteps_seed", compression_params["seed"])
# change
if compression_params.get("momentum"):
# 1bit compressor use an additional momentum for weight decay
if compressor == "onebit" and "wd" in optimizer_params:
intra_compressor = Compression.wdmom(
intra_compressor, optimizer_params["momentum"], optimizer_params["wd"]
)
del optimizer_params["wd"]
del optimizer_params["momentum"]
return intra_compressor
|
def _register_compressor(self, params, optimizer_params, compression_params):
"""Register compressor for BytePS
params : mx.gluon.ParameterDict
optimizer_params : dict
compression_params : dict
"""
intra_compressor = Compression.none
if not compression_params:
return intra_compressor
if "fp16" in compression_params:
intra_compressor = Compression.fp16
if "compressor" not in compression_params:
warnings.warn("Compressor is not defined")
return intra_compressor
check_list = ["compressor", "ef", "momentum"]
for _, param in params.items():
# generic
for item in check_list:
if compression_params.get(item):
if isinstance(compression_params[item], str):
setattr(param, "byteps_%s_type" % item, compression_params[item])
else:
raise TypeError("%s should be str" % item)
# need parameter
compressor = compression_params["compressor"]
if compressor == "onebit":
setattr(
param,
"byteps_compressor_onebit_scaling",
str(compression_params.get("scaling", False)),
)
elif (
compressor == "topk" or compressor == "randomk" or compressor == "multibit"
):
# raise KeyError if 'k' is not found
setattr(param, "byteps_compressor_k", compression_params["k"])
if compression_params.get("momentum"):
setattr(param, "byteps_momentum_mu", optimizer_params["momentum"])
# change
if compression_params.get("momentum"):
# 1bit compressor use an additional momentum for weight decay
if compressor == "onebit" and "wd" in optimizer_params:
intra_compressor = Compression.wdmom(
intra_compressor, optimizer_params["momentum"], optimizer_params["wd"]
)
del optimizer_params["wd"]
del optimizer_params["momentum"]
return intra_compressor
|
https://github.com/bytedance/byteps/issues/25
|
$ DMLC_ROLE=worker DMLC_PS_ROOT_URI=12.12.10.12 DMLC_PS_ROOT_PORT=9000 DMLC_WORKER_ID=0 DMLC_NUM_WORKER=1 DMLC_NUM_SERVER=1 python launcher/launch.py python example/tensorflow/tensorflow_mnist.py
BytePS launching worker
INFO:tensorflow:Create CheckpointSaverHook.
Traceback (most recent call last):
File "example/tensorflow/tensorflow_mnist.py", line 160, in <module>
tf.app.run()
File "/home/shuai/.conda/envs/byteps/lib/python2.7/site-packages/tensorflow/python/platform/app.py", line 125, in run
_sys.exit(main(argv))
File "example/tensorflow/tensorflow_mnist.py", line 152, in main
config=config) as mon_sess:
File "/home/shuai/.conda/envs/byteps/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.py", line 421, in MonitoredTrainingSession
stop_grace_period_secs=stop_grace_period_secs)
File "/home/shuai/.conda/envs/byteps/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.py", line 832, in __init__
stop_grace_period_secs=stop_grace_period_secs)
File "/home/shuai/.conda/envs/byteps/lib/python2.7/site-packages/tensorflow/python/training/monitored_session.py", line 548, in __init__
h.begin()
File "/home/shuai/.conda/envs/byteps/lib/python2.7/site-packages/byteps-0.1.0-py2.7-linux-x86_64.egg/byteps/tensorflow/__init__.py", line 107, in begin
self.bcast_op = broadcast_global_variables(self.root_rank)
File "/home/shuai/.conda/envs/byteps/lib/python2.7/site-packages/byteps-0.1.0-py2.7-linux-x86_64.egg/byteps/tensorflow/__init__.py", line 66, in broadcast_global_variables
return broadcast_variables(tf.global_variables(), root_rank, scope)
File "/home/shuai/.conda/envs/byteps/lib/python2.7/site-packages/byteps-0.1.0-py2.7-linux-x86_64.egg/byteps/tensorflow/__init__.py", line 78, in broadcast_variables
for var in variables])
File "/home/shuai/.conda/envs/byteps/lib/python2.7/site-packages/byteps-0.1.0-py2.7-linux-x86_64.egg/byteps/tensorflow/ops.py", line 116, in broadcast
scope = tf.compat.v1.get_default_graph().get_name_scope()
AttributeError: 'module' object has no attribute 'v1'
^C^C^C^C^C^C^C^C^C^CTerminated
|
AttributeError
|
def _register_compressor(self, params, optimizer_params, compression_params):
"""Register compressor for BytePS
params : mx.gluon.ParameterDict
optimizer_params : dict
compression_params : dict
"""
intra_compressor = Compression.none
if not compression_params:
return intra_compressor
if "fp16" in compression_params:
intra_compressor = Compression.fp16
if "compressor" not in compression_params:
warnings.warn("Compressor is not defined")
return intra_compressor
check_list = ["compressor", "ef", "momentum"]
for _, param in params.items():
# generic
for item in check_list:
if item in compression_params and compression_params[item]:
if isinstance(compression_params[item], str):
setattr(param, "byteps_%s_type" % item, compression_params[item])
else:
raise TypeError("%s should be str" % item)
# need parameter
compressor = compression_params["compressor"]
if compressor == "onebit":
setattr(
param,
"byteps_compressor_onebit_scaling",
str(compression_params.get("scaling", False)),
)
elif (
compressor == "topk" or compressor == "randomk" or compressor == "multibit"
):
# raise KeyError if 'k' is not found
setattr(param, "byteps_compressor_k", compression_params["k"])
if "momentum" in compression_params:
setattr(param, "byteps_momentum_mu", optimizer_params["momentum"])
# change
if "momentum" in compression_params:
# 1bit compressor use an additional momentum for weight decay
if compressor == "onebit" and "wd" in optimizer_params:
intra_compressor = Compression.wdmom(
intra_compressor, optimizer_params["momentum"], optimizer_params["wd"]
)
del optimizer_params["wd"]
del optimizer_params["momentum"]
return intra_compressor
|
def _register_compressor(self, params, optimizer_params, compression_params):
"""Register compressor for BytePS
params : mx.gluon.ParameterDict
optimizer_params : dict
compression_params : dict
"""
intra_compressor = Compression.none
if not compression_params:
return intra_compressor
if "fp16" in compression_params:
intra_compressor = Compression.fp16
if "compressor" not in compression_params:
warnings.warn("Compressor is not defined")
return intra_compressor
check_list = ["compressor", "ef", "momentum"]
for _, param in params.items():
# generic
for item in check_list:
if item in compression_params:
if isinstance(compression_params[item], str):
setattr(param, "byteps_%s_type" % item, compression_params[item])
else:
raise TypeError("%s should be str" % item)
# need parameter
compressor = compression_params["compressor"]
if compressor == "onebit":
setattr(
param,
"byteps_compressor_onebit_scaling",
str(compression_params.get("scaling", False)),
)
elif (
compressor == "topk" or compressor == "randomk" or compressor == "multibit"
):
# raise KeyError if 'k' is not found
setattr(param, "byteps_compressor_k", compression_params["k"])
if "momentum" in compression_params:
setattr(param, "byteps_momentum_mu", optimizer_params["momentum"])
# change
if "momentum" in compression_params:
# 1bit compressor use an additional momentum for weight decay
if compressor == "onebit" and "wd" in optimizer_params:
intra_compressor = Compression.wdmom(
intra_compressor, optimizer_params["momentum"], optimizer_params["wd"]
)
del optimizer_params["wd"]
del optimizer_params["momentum"]
return intra_compressor
|
https://github.com/bytedance/byteps/issues/10
|
In [1]: import tensorflow as tf
In [2]: import byteps.tensorflow as bps
WARNING: Logging before flag parsing goes to stderr.
W0627 11:36:47.010180 139917697820480 deprecation_wrapper.py:119] From /private/home/yuxinwu/.local/lib/python3.6/site-packages/byteps/tensorflow/__init__.py:79: The name tf.train.SessionRunHook is deprecated. Please use tf.estimator.SessionRunHook instead.
W0627 11:36:47.010504 139917697820480 deprecation_wrapper.py:119] From /private/home/yuxinwu/.local/lib/python3.6/site-packages/byteps/tensorflow/__init__.py:111: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead.
In [3]: bps.push_pull(tf.constant([0.0]))
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-3-fc0e9eb4f2bb> in <module>()
----> 1 bps.push_pull(tf.constant([0.0]))
~/.local/lib/python3.6/site-packages/byteps/tensorflow/__init__.py in push_pull(tensor, scope, average, device_dense, device_sparse, compression)
50 byteps_size = tf.cast(size(), dtype=tensor.dtype)
51 tensor_compressed, ctx = compression.compress(tensor)
---> 52 summed_tensor_compressed = _push_pull(tensor_compressed, scope)
53 summed_tensor = compression.decompress(summed_tensor_compressed, ctx)
54 new_tensor = (tf.div(summed_tensor, byteps_size)
~/.local/lib/python3.6/site-packages/byteps/tensorflow/ops.py in _push_pull(tensor, scope, name)
80 if name is None and not _executing_eagerly():
81 name = 'BytePSPushPull_%s' % _normalize_name(tensor.name)
---> 82 TF_LIB_CTYPES.byteps_tensorflow_declare_tensor(ctypes.c_char_p(scope+name))
83 return C_LIB.byteps_push_pull(tensor, name=name)
84
TypeError: bytes or integer address expected instead of str instance
|
TypeError
|
def parse_args():
parser = argparse.ArgumentParser(
description="Train a model for image classification."
)
parser.add_argument(
"--data-dir",
type=str,
default="~/.mxnet/datasets/imagenet",
help="training and validation pictures to use.",
)
parser.add_argument(
"--rec-train",
type=str,
default="~/.mxnet/datasets/imagenet/rec/train.rec",
help="the training data",
)
parser.add_argument(
"--rec-train-idx",
type=str,
default="~/.mxnet/datasets/imagenet/rec/train.idx",
help="the index of training data",
)
parser.add_argument(
"--rec-val",
type=str,
default="~/.mxnet/datasets/imagenet/rec/val.rec",
help="the validation data",
)
parser.add_argument(
"--rec-val-idx",
type=str,
default="~/.mxnet/datasets/imagenet/rec/val.idx",
help="the index of validation data",
)
parser.add_argument(
"--use-rec",
action="store_true",
help="use image record iter for data input. default is false.",
)
parser.add_argument(
"--batch-size",
type=int,
default=32,
help="training batch size per device (CPU/GPU).",
)
parser.add_argument(
"--dtype",
type=str,
default="float32",
help="data type for training. default is float32",
)
parser.add_argument(
"--num-gpus", type=int, default=0, help="number of gpus to use."
)
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers",
)
parser.add_argument(
"--num-epochs", type=int, default=3, help="number of training epochs."
)
parser.add_argument(
"--lr", type=float, default=0.1, help="learning rate. default is 0.1."
)
parser.add_argument(
"--momentum",
type=float,
default=0.9,
help="momentum value for optimizer, default is 0.9.",
)
parser.add_argument(
"--wd", type=float, default=0.0001, help="weight decay rate. default is 0.0001."
)
parser.add_argument(
"--lr-mode",
type=str,
default="step",
help="learning rate scheduler mode. options are step, poly and cosine.",
)
parser.add_argument(
"--lr-decay",
type=float,
default=0.1,
help="decay rate of learning rate. default is 0.1.",
)
parser.add_argument(
"--lr-decay-period",
type=int,
default=0,
help="interval for periodic learning rate decays. default is 0 to disable.",
)
parser.add_argument(
"--lr-decay-epoch",
type=str,
default="40,60",
help="epochs at which learning rate decays. default is 40,60.",
)
parser.add_argument(
"--warmup-lr",
type=float,
default=0.0,
help="starting warmup learning rate. default is 0.0.",
)
parser.add_argument(
"--warmup-epochs", type=int, default=0, help="number of warmup epochs."
)
parser.add_argument(
"--last-gamma",
action="store_true",
help="whether to init gamma of the last BN layer in each bottleneck to 0.",
)
parser.add_argument(
"--mode",
type=str,
help="mode in which to train the model. options are symbolic, imperative, hybrid",
)
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see vision_model for options.",
)
parser.add_argument(
"--input-size",
type=int,
default=224,
help="size of the input image size. default is 224",
)
parser.add_argument(
"--crop-ratio",
type=float,
default=0.875,
help="Crop ratio during validation. default is 0.875",
)
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from gluon.",
)
parser.add_argument(
"--use_se",
action="store_true",
help="use SE layers or not in resnext. default is false.",
)
parser.add_argument(
"--mixup",
action="store_true",
help="whether train the model with mix-up. default is false.",
)
parser.add_argument(
"--mixup-alpha",
type=float,
default=0.2,
help="beta distribution parameter for mixup sampling, default is 0.2.",
)
parser.add_argument(
"--mixup-off-epoch",
type=int,
default=0,
help="how many last epochs to train without mixup, default is 0.",
)
parser.add_argument(
"--label-smoothing",
action="store_true",
help="use label smoothing or not in training. default is false.",
)
parser.add_argument(
"--no-wd",
action="store_true",
help="whether to remove weight decay on bias, and beta/gamma for batchnorm layers.",
)
parser.add_argument(
"--teacher",
type=str,
default=None,
help="teacher model for distillation training",
)
parser.add_argument(
"--temperature",
type=float,
default=20,
help="temperature parameter for distillation teacher model",
)
parser.add_argument(
"--hard-weight",
type=float,
default=0.5,
help="weight for the loss of one-hot label for distillation training",
)
parser.add_argument(
"--batch-norm",
action="store_true",
help="enable batch normalization or not in vgg. default is false.",
)
parser.add_argument(
"--save-frequency", type=int, default=10, help="frequency of model saving."
)
parser.add_argument(
"--save-dir", type=str, default="params", help="directory of saved models"
)
parser.add_argument(
"--resume-epoch", type=int, default=0, help="epoch to resume training from."
)
parser.add_argument(
"--resume-params", type=str, default="", help="path of parameters to load from."
)
parser.add_argument(
"--resume-states",
type=str,
default="",
help="path of trainer state to load from.",
)
parser.add_argument(
"--log-interval",
type=int,
default=50,
help="Number of batches to wait before logging.",
)
parser.add_argument(
"--logging-file",
type=str,
default="train_imagenet.log",
help="name of training log file",
)
parser.add_argument(
"--use-gn", action="store_true", help="whether to use group norm."
)
# additional arguments for gradient compression
parser.add_argument("--compressor", type=str, default="", help="which compressor")
parser.add_argument("--ef", type=str, default="", help="which error-feedback")
parser.add_argument(
"--compress-momentum", type=str, default="", help="which compress momentum"
)
parser.add_argument(
"--onebit-scaling",
action="store_true",
default=False,
help="enable scaling for onebit compressor",
)
parser.add_argument("--k", default=1, type=int, help="topk or randomk")
parser.add_argument(
"--fp16-pushpull",
action="store_true",
default=False,
help="use fp16 compression during pushpull",
)
opt = parser.parse_args()
return opt
|
def parse_args():
parser = argparse.ArgumentParser(
description="Train a model for image classification."
)
parser.add_argument(
"--data-dir",
type=str,
default="~/.mxnet/datasets/imagenet",
help="training and validation pictures to use.",
)
parser.add_argument(
"--rec-train",
type=str,
default="~/.mxnet/datasets/imagenet/rec/train.rec",
help="the training data",
)
parser.add_argument(
"--rec-train-idx",
type=str,
default="~/.mxnet/datasets/imagenet/rec/train.idx",
help="the index of training data",
)
parser.add_argument(
"--rec-val",
type=str,
default="~/.mxnet/datasets/imagenet/rec/val.rec",
help="the validation data",
)
parser.add_argument(
"--rec-val-idx",
type=str,
default="~/.mxnet/datasets/imagenet/rec/val.idx",
help="the index of validation data",
)
parser.add_argument(
"--use-rec",
action="store_true",
help="use image record iter for data input. default is false.",
)
parser.add_argument(
"--batch-size",
type=int,
default=32,
help="training batch size per device (CPU/GPU).",
)
parser.add_argument(
"--dtype",
type=str,
default="float32",
help="data type for training. default is float32",
)
parser.add_argument(
"--num-gpus", type=int, default=0, help="number of gpus to use."
)
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers",
)
parser.add_argument(
"--num-epochs", type=int, default=3, help="number of training epochs."
)
parser.add_argument(
"--lr", type=float, default=0.1, help="learning rate. default is 0.1."
)
parser.add_argument(
"--momentum",
type=float,
default=0.9,
help="momentum value for optimizer, default is 0.9.",
)
parser.add_argument(
"--wd", type=float, default=0.0001, help="weight decay rate. default is 0.0001."
)
parser.add_argument(
"--lr-mode",
type=str,
default="step",
help="learning rate scheduler mode. options are step, poly and cosine.",
)
parser.add_argument(
"--lr-decay",
type=float,
default=0.1,
help="decay rate of learning rate. default is 0.1.",
)
parser.add_argument(
"--lr-decay-period",
type=int,
default=0,
help="interval for periodic learning rate decays. default is 0 to disable.",
)
parser.add_argument(
"--lr-decay-epoch",
type=str,
default="40,60",
help="epochs at which learning rate decays. default is 40,60.",
)
parser.add_argument(
"--warmup-lr",
type=float,
default=0.0,
help="starting warmup learning rate. default is 0.0.",
)
parser.add_argument(
"--warmup-epochs", type=int, default=0, help="number of warmup epochs."
)
parser.add_argument(
"--last-gamma",
action="store_true",
help="whether to init gamma of the last BN layer in each bottleneck to 0.",
)
parser.add_argument(
"--mode",
type=str,
help="mode in which to train the model. options are symbolic, imperative, hybrid",
)
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see vision_model for options.",
)
parser.add_argument(
"--input-size",
type=int,
default=224,
help="size of the input image size. default is 224",
)
parser.add_argument(
"--crop-ratio",
type=float,
default=0.875,
help="Crop ratio during validation. default is 0.875",
)
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from gluon.",
)
parser.add_argument(
"--use_se",
action="store_true",
help="use SE layers or not in resnext. default is false.",
)
parser.add_argument(
"--mixup",
action="store_true",
help="whether train the model with mix-up. default is false.",
)
parser.add_argument(
"--mixup-alpha",
type=float,
default=0.2,
help="beta distribution parameter for mixup sampling, default is 0.2.",
)
parser.add_argument(
"--mixup-off-epoch",
type=int,
default=0,
help="how many last epochs to train without mixup, default is 0.",
)
parser.add_argument(
"--label-smoothing",
action="store_true",
help="use label smoothing or not in training. default is false.",
)
parser.add_argument(
"--no-wd",
action="store_true",
help="whether to remove weight decay on bias, and beta/gamma for batchnorm layers.",
)
parser.add_argument(
"--teacher",
type=str,
default=None,
help="teacher model for distillation training",
)
parser.add_argument(
"--temperature",
type=float,
default=20,
help="temperature parameter for distillation teacher model",
)
parser.add_argument(
"--hard-weight",
type=float,
default=0.5,
help="weight for the loss of one-hot label for distillation training",
)
parser.add_argument(
"--batch-norm",
action="store_true",
help="enable batch normalization or not in vgg. default is false.",
)
parser.add_argument(
"--save-frequency", type=int, default=10, help="frequency of model saving."
)
parser.add_argument(
"--save-dir", type=str, default="params", help="directory of saved models"
)
parser.add_argument(
"--resume-epoch", type=int, default=0, help="epoch to resume training from."
)
parser.add_argument(
"--resume-params", type=str, default="", help="path of parameters to load from."
)
parser.add_argument(
"--resume-states",
type=str,
default="",
help="path of trainer state to load from.",
)
parser.add_argument(
"--log-interval",
type=int,
default=50,
help="Number of batches to wait before logging.",
)
parser.add_argument(
"--logging-file",
type=str,
default="train_imagenet.log",
help="name of training log file",
)
parser.add_argument(
"--use-gn", action="store_true", help="whether to use group norm."
)
# additional arguments for gradient compression
parser.add_argument("--compressor", type=str, default="", help="which compressor")
parser.add_argument("--ef", type=str, default="", help="which error-feedback")
parser.add_argument(
"--compress-momentum", type=str, default="", help="which compress momentum"
)
parser.add_argument(
"--onebit-scaling",
action="store_true",
default=False,
help="enable scaling for onebit compressor",
)
parser.add_argument(
"--fp16-pushpull",
action="store_true",
default=False,
help="use fp16 compression during pushpull",
)
opt = parser.parse_args()
return opt
|
https://github.com/bytedance/byteps/issues/10
|
In [1]: import tensorflow as tf
In [2]: import byteps.tensorflow as bps
WARNING: Logging before flag parsing goes to stderr.
W0627 11:36:47.010180 139917697820480 deprecation_wrapper.py:119] From /private/home/yuxinwu/.local/lib/python3.6/site-packages/byteps/tensorflow/__init__.py:79: The name tf.train.SessionRunHook is deprecated. Please use tf.estimator.SessionRunHook instead.
W0627 11:36:47.010504 139917697820480 deprecation_wrapper.py:119] From /private/home/yuxinwu/.local/lib/python3.6/site-packages/byteps/tensorflow/__init__.py:111: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead.
In [3]: bps.push_pull(tf.constant([0.0]))
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-3-fc0e9eb4f2bb> in <module>()
----> 1 bps.push_pull(tf.constant([0.0]))
~/.local/lib/python3.6/site-packages/byteps/tensorflow/__init__.py in push_pull(tensor, scope, average, device_dense, device_sparse, compression)
50 byteps_size = tf.cast(size(), dtype=tensor.dtype)
51 tensor_compressed, ctx = compression.compress(tensor)
---> 52 summed_tensor_compressed = _push_pull(tensor_compressed, scope)
53 summed_tensor = compression.decompress(summed_tensor_compressed, ctx)
54 new_tensor = (tf.div(summed_tensor, byteps_size)
~/.local/lib/python3.6/site-packages/byteps/tensorflow/ops.py in _push_pull(tensor, scope, name)
80 if name is None and not _executing_eagerly():
81 name = 'BytePSPushPull_%s' % _normalize_name(tensor.name)
---> 82 TF_LIB_CTYPES.byteps_tensorflow_declare_tensor(ctypes.c_char_p(scope+name))
83 return C_LIB.byteps_push_pull(tensor, name=name)
84
TypeError: bytes or integer address expected instead of str instance
|
TypeError
|
def main():
opt = parse_args()
filehandler = logging.FileHandler(opt.logging_file)
streamhandler = logging.StreamHandler()
logger = logging.getLogger("")
logger.setLevel(logging.INFO)
logger.addHandler(filehandler)
logger.addHandler(streamhandler)
logger.info(opt)
bps.init()
batch_size = opt.batch_size
classes = 1000
num_training_samples = 1281167
num_gpus = opt.num_gpus
# batch_size *= max(1, num_gpus)
context = mx.gpu(bps.local_rank()) if num_gpus > 0 else mx.cpu(bps.local_rank())
num_workers = opt.num_workers
nworker = bps.size()
rank = bps.rank()
lr_decay = opt.lr_decay
lr_decay_period = opt.lr_decay_period
if opt.lr_decay_period > 0:
lr_decay_epoch = list(range(lr_decay_period, opt.num_epochs, lr_decay_period))
else:
lr_decay_epoch = [int(i) for i in opt.lr_decay_epoch.split(",")]
lr_decay_epoch = [e - opt.warmup_epochs for e in lr_decay_epoch]
num_batches = num_training_samples // (batch_size * nworker)
lr_scheduler = LRSequential(
[
LRScheduler(
"linear",
base_lr=opt.warmup_lr,
target_lr=opt.lr * nworker / bps.local_size(),
nepochs=opt.warmup_epochs,
iters_per_epoch=num_batches,
),
LRScheduler(
opt.lr_mode,
base_lr=opt.lr * nworker / bps.local_size(),
target_lr=0,
nepochs=opt.num_epochs - opt.warmup_epochs,
iters_per_epoch=num_batches,
step_epoch=lr_decay_epoch,
step_factor=lr_decay,
power=2,
),
]
)
model_name = opt.model
kwargs = {"ctx": context, "pretrained": opt.use_pretrained, "classes": classes}
if opt.use_gn:
from gluoncv.nn import GroupNorm
kwargs["norm_layer"] = GroupNorm
if model_name.startswith("vgg"):
kwargs["batch_norm"] = opt.batch_norm
elif model_name.startswith("resnext"):
kwargs["use_se"] = opt.use_se
if opt.last_gamma:
kwargs["last_gamma"] = True
if opt.compressor:
optimizer = "sgd"
else:
optimizer = "nag"
optimizer_params = {
"wd": opt.wd,
"momentum": opt.momentum,
"lr_scheduler": lr_scheduler,
}
if opt.dtype != "float32":
optimizer_params["multi_precision"] = True
net = get_model(model_name, **kwargs)
net.cast(opt.dtype)
if opt.resume_params is not "":
net.load_parameters(opt.resume_params, ctx=context)
# teacher model for distillation training
if opt.teacher is not None and opt.hard_weight < 1.0:
teacher_name = opt.teacher
teacher = get_model(teacher_name, pretrained=True, classes=classes, ctx=context)
teacher.cast(opt.dtype)
distillation = True
else:
distillation = False
# Two functions for reading data from record file or raw images
def get_data_rec(
rec_train, rec_train_idx, rec_val, rec_val_idx, batch_size, num_workers
):
rec_train = os.path.expanduser(rec_train)
rec_train_idx = os.path.expanduser(rec_train_idx)
rec_val = os.path.expanduser(rec_val)
rec_val_idx = os.path.expanduser(rec_val_idx)
jitter_param = 0.4
lighting_param = 0.1
input_size = opt.input_size
crop_ratio = opt.crop_ratio if opt.crop_ratio > 0 else 0.875
resize = int(math.ceil(input_size / crop_ratio))
mean_rgb = [123.68, 116.779, 103.939]
std_rgb = [58.393, 57.12, 57.375]
def batch_fn(batch, ctx):
data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(
batch.label[0], ctx_list=ctx, batch_axis=0
)
return data, label
train_data = mx.io.ImageRecordIter(
path_imgrec=rec_train,
path_imgidx=rec_train_idx,
preprocess_threads=num_workers,
shuffle=True,
batch_size=batch_size,
data_shape=(3, input_size, input_size),
mean_r=mean_rgb[0],
mean_g=mean_rgb[1],
mean_b=mean_rgb[2],
std_r=std_rgb[0],
std_g=std_rgb[1],
std_b=std_rgb[2],
rand_mirror=True,
random_resized_crop=True,
max_aspect_ratio=4.0 / 3.0,
min_aspect_ratio=3.0 / 4.0,
max_random_area=1,
min_random_area=0.08,
brightness=jitter_param,
saturation=jitter_param,
contrast=jitter_param,
pca_noise=lighting_param,
num_parts=nworker,
part_index=rank,
)
val_data = mx.io.ImageRecordIter(
path_imgrec=rec_val,
path_imgidx=rec_val_idx,
preprocess_threads=num_workers,
shuffle=False,
batch_size=batch_size,
resize=resize,
data_shape=(3, input_size, input_size),
mean_r=mean_rgb[0],
mean_g=mean_rgb[1],
mean_b=mean_rgb[2],
std_r=std_rgb[0],
std_g=std_rgb[1],
std_b=std_rgb[2],
num_parts=nworker,
part_index=rank,
)
return train_data, val_data, batch_fn
def get_data_loader(data_dir, batch_size, num_workers):
normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
jitter_param = 0.4
lighting_param = 0.1
input_size = opt.input_size
crop_ratio = opt.crop_ratio if opt.crop_ratio > 0 else 0.875
resize = int(math.ceil(input_size / crop_ratio))
def batch_fn(batch, ctx):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
return data, label
transform_train = transforms.Compose(
[
transforms.RandomResizedCrop(input_size),
transforms.RandomFlipLeftRight(),
transforms.RandomColorJitter(
brightness=jitter_param,
contrast=jitter_param,
saturation=jitter_param,
),
transforms.RandomLighting(lighting_param),
transforms.ToTensor(),
normalize,
]
)
transform_test = transforms.Compose(
[
transforms.Resize(resize, keep_ratio=True),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
normalize,
]
)
train_data = gluon.data.DataLoader(
imagenet.classification.ImageNet(data_dir, train=True).transform_first(
transform_train
),
batch_size=batch_size,
shuffle=True,
last_batch="discard",
num_workers=num_workers,
)
val_data = gluon.data.DataLoader(
imagenet.classification.ImageNet(data_dir, train=False).transform_first(
transform_test
),
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
)
return train_data, val_data, batch_fn
if opt.use_rec:
train_data, val_data, batch_fn = get_data_rec(
opt.rec_train,
opt.rec_train_idx,
opt.rec_val,
opt.rec_val_idx,
batch_size,
num_workers,
)
else:
train_data, val_data, batch_fn = get_data_loader(
opt.data_dir, batch_size, num_workers
)
if opt.mixup:
train_metric = mx.metric.RMSE()
else:
train_metric = mx.metric.Accuracy()
acc_top1 = mx.metric.Accuracy()
acc_top5 = mx.metric.TopKAccuracy(5)
save_frequency = opt.save_frequency
if opt.save_dir and save_frequency:
save_dir = opt.save_dir
makedirs(save_dir)
else:
save_dir = ""
save_frequency = 0
def mixup_transform(label, classes, lam=1, eta=0.0):
if isinstance(label, nd.NDArray):
label = [label]
res = []
for l in label:
y1 = l.one_hot(
classes, on_value=1 - eta + eta / classes, off_value=eta / classes
)
y2 = l[::-1].one_hot(
classes, on_value=1 - eta + eta / classes, off_value=eta / classes
)
res.append(lam * y1 + (1 - lam) * y2)
return res
def smooth(label, classes, eta=0.1):
if isinstance(label, nd.NDArray):
label = [label]
smoothed = []
for l in label:
res = l.one_hot(
classes, on_value=1 - eta + eta / classes, off_value=eta / classes
)
smoothed.append(res)
return smoothed
def test(ctx, val_data):
if opt.use_rec:
val_data.reset()
acc_top1.reset()
acc_top5.reset()
for i, batch in enumerate(val_data):
data, label = batch_fn(batch, ctx)
outputs = [net(X.astype(opt.dtype, copy=False)) for X in data]
acc_top1.update(label, outputs)
acc_top5.update(label, outputs)
_, top1 = acc_top1.get()
_, top5 = acc_top5.get()
return (1 - top1, 1 - top5)
def train(ctx):
if isinstance(ctx, mx.Context):
ctx = [ctx]
if opt.resume_params is "":
net.initialize(mx.init.MSRAPrelu(), ctx=ctx)
if opt.no_wd:
for k, v in net.collect_params(".*beta|.*gamma|.*bias").items():
v.wd_mult = 0.0
compression_params = {
"compressor": opt.compressor,
"ef": opt.ef,
"momentum": opt.compress_momentum,
"scaling": opt.onebit_scaling,
"k": opt.k,
}
trainer = bps.DistributedTrainer(
net.collect_params(),
optimizer,
optimizer_params,
compression_params=compression_params,
)
if opt.resume_states is not "":
trainer.load_states(opt.resume_states)
if opt.label_smoothing or opt.mixup:
sparse_label_loss = False
else:
sparse_label_loss = True
if distillation:
L = gcv.loss.DistillationSoftmaxCrossEntropyLoss(
temperature=opt.temperature,
hard_weight=opt.hard_weight,
sparse_label=sparse_label_loss,
)
else:
L = gluon.loss.SoftmaxCrossEntropyLoss(sparse_label=sparse_label_loss)
best_val_score = 1
for epoch in range(opt.resume_epoch, opt.num_epochs):
tic = time.time()
if opt.use_rec:
train_data.reset()
train_metric.reset()
btic = time.time()
for i, batch in enumerate(train_data):
data, label = batch_fn(batch, ctx)
if opt.mixup:
lam = np.random.beta(opt.mixup_alpha, opt.mixup_alpha)
if epoch >= opt.num_epochs - opt.mixup_off_epoch:
lam = 1
data = [lam * X + (1 - lam) * X[::-1] for X in data]
if opt.label_smoothing:
eta = 0.1
else:
eta = 0.0
label = mixup_transform(label, classes, lam, eta)
elif opt.label_smoothing:
hard_label = label
label = smooth(label, classes)
if distillation:
teacher_prob = [
nd.softmax(
teacher(X.astype(opt.dtype, copy=False)) / opt.temperature
)
for X in data
]
with ag.record():
outputs = [net(X.astype(opt.dtype, copy=False)) for X in data]
if distillation:
loss = [
L(
yhat.astype("float32", copy=False),
y.astype("float32", copy=False),
p.astype("float32", copy=False),
)
for yhat, y, p in zip(outputs, label, teacher_prob)
]
else:
loss = [
L(yhat, y.astype(opt.dtype, copy=False))
for yhat, y in zip(outputs, label)
]
for l in loss:
l.backward()
trainer.step(batch_size)
if opt.mixup:
output_softmax = [
nd.SoftmaxActivation(out.astype("float32", copy=False))
for out in outputs
]
train_metric.update(label, output_softmax)
else:
if opt.label_smoothing:
train_metric.update(hard_label, outputs)
else:
train_metric.update(label, outputs)
if opt.log_interval and not (i + 1) % opt.log_interval:
train_metric_name, train_metric_score = train_metric.get()
logger.info(
"Epoch[%d] Batch [%d]\tSpeed: %f samples/sec\t%s=%f\tlr=%f\ttime=%f"
% (
epoch,
i,
batch_size
* nworker
* opt.log_interval
/ (time.time() - btic),
train_metric_name,
train_metric_score,
trainer.learning_rate,
time.time() - btic,
)
)
btic = time.time()
train_metric_name, train_metric_score = train_metric.get()
throughput = int(batch_size * nworker * i / (time.time() - tic))
logger.info(
"[Epoch %d] training: %s=%f"
% (epoch, train_metric_name, train_metric_score)
)
logger.info(
"[Epoch %d] speed: %d samples/sec\ttime cost: %f"
% (epoch, throughput, time.time() - tic)
)
err_top1_val, err_top5_val = test(ctx, val_data)
logger.info(
"[Epoch %d] validation: err-top1=%f err-top5=%f"
% (epoch, err_top1_val, err_top5_val)
)
if err_top1_val < best_val_score:
best_val_score = err_top1_val
net.save_parameters(
"%s/%.4f-imagenet-%s-%d-best.params"
% (save_dir, best_val_score, model_name, epoch)
)
trainer.save_states(
"%s/%.4f-imagenet-%s-%d-best.states"
% (save_dir, best_val_score, model_name, epoch)
)
if save_frequency and save_dir and (epoch + 1) % save_frequency == 0:
net.save_parameters(
"%s/imagenet-%s-%d.params" % (save_dir, model_name, epoch)
)
trainer.save_states(
"%s/imagenet-%s-%d.states" % (save_dir, model_name, epoch)
)
if save_frequency and save_dir:
net.save_parameters(
"%s/imagenet-%s-%d.params" % (save_dir, model_name, opt.num_epochs - 1)
)
trainer.save_states(
"%s/imagenet-%s-%d.states" % (save_dir, model_name, opt.num_epochs - 1)
)
if opt.mode == "hybrid":
net.hybridize(static_alloc=True, static_shape=True)
if distillation:
teacher.hybridize(static_alloc=True, static_shape=True)
train(context)
|
def main():
opt = parse_args()
filehandler = logging.FileHandler(opt.logging_file)
streamhandler = logging.StreamHandler()
logger = logging.getLogger("")
logger.setLevel(logging.INFO)
logger.addHandler(filehandler)
logger.addHandler(streamhandler)
logger.info(opt)
bps.init()
batch_size = opt.batch_size
classes = 1000
num_training_samples = 1281167
num_gpus = opt.num_gpus
# batch_size *= max(1, num_gpus)
context = mx.gpu(bps.local_rank()) if num_gpus > 0 else mx.cpu(bps.local_rank())
num_workers = opt.num_workers
nworker = bps.size()
rank = bps.rank()
lr_decay = opt.lr_decay
lr_decay_period = opt.lr_decay_period
if opt.lr_decay_period > 0:
lr_decay_epoch = list(range(lr_decay_period, opt.num_epochs, lr_decay_period))
else:
lr_decay_epoch = [int(i) for i in opt.lr_decay_epoch.split(",")]
lr_decay_epoch = [e - opt.warmup_epochs for e in lr_decay_epoch]
num_batches = num_training_samples // (batch_size * nworker)
lr_scheduler = LRSequential(
[
LRScheduler(
"linear",
base_lr=opt.warmup_lr,
target_lr=opt.lr * nworker / bps.local_size(),
nepochs=opt.warmup_epochs,
iters_per_epoch=num_batches,
),
LRScheduler(
opt.lr_mode,
base_lr=opt.lr * nworker / bps.local_size(),
target_lr=0,
nepochs=opt.num_epochs - opt.warmup_epochs,
iters_per_epoch=num_batches,
step_epoch=lr_decay_epoch,
step_factor=lr_decay,
power=2,
),
]
)
model_name = opt.model
kwargs = {"ctx": context, "pretrained": opt.use_pretrained, "classes": classes}
if opt.use_gn:
from gluoncv.nn import GroupNorm
kwargs["norm_layer"] = GroupNorm
if model_name.startswith("vgg"):
kwargs["batch_norm"] = opt.batch_norm
elif model_name.startswith("resnext"):
kwargs["use_se"] = opt.use_se
if opt.last_gamma:
kwargs["last_gamma"] = True
if opt.compressor:
optimizer = "sgd"
else:
optimizer = "nag"
optimizer_params = {
"wd": opt.wd,
"momentum": opt.momentum,
"lr_scheduler": lr_scheduler,
}
if opt.dtype != "float32":
optimizer_params["multi_precision"] = True
net = get_model(model_name, **kwargs)
net.cast(opt.dtype)
if opt.resume_params is not "":
net.load_parameters(opt.resume_params, ctx=context)
# teacher model for distillation training
if opt.teacher is not None and opt.hard_weight < 1.0:
teacher_name = opt.teacher
teacher = get_model(teacher_name, pretrained=True, classes=classes, ctx=context)
teacher.cast(opt.dtype)
distillation = True
else:
distillation = False
# Two functions for reading data from record file or raw images
def get_data_rec(
rec_train, rec_train_idx, rec_val, rec_val_idx, batch_size, num_workers
):
rec_train = os.path.expanduser(rec_train)
rec_train_idx = os.path.expanduser(rec_train_idx)
rec_val = os.path.expanduser(rec_val)
rec_val_idx = os.path.expanduser(rec_val_idx)
jitter_param = 0.4
lighting_param = 0.1
input_size = opt.input_size
crop_ratio = opt.crop_ratio if opt.crop_ratio > 0 else 0.875
resize = int(math.ceil(input_size / crop_ratio))
mean_rgb = [123.68, 116.779, 103.939]
std_rgb = [58.393, 57.12, 57.375]
def batch_fn(batch, ctx):
data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(
batch.label[0], ctx_list=ctx, batch_axis=0
)
return data, label
train_data = mx.io.ImageRecordIter(
path_imgrec=rec_train,
path_imgidx=rec_train_idx,
preprocess_threads=num_workers,
shuffle=True,
batch_size=batch_size,
data_shape=(3, input_size, input_size),
mean_r=mean_rgb[0],
mean_g=mean_rgb[1],
mean_b=mean_rgb[2],
std_r=std_rgb[0],
std_g=std_rgb[1],
std_b=std_rgb[2],
rand_mirror=True,
random_resized_crop=True,
max_aspect_ratio=4.0 / 3.0,
min_aspect_ratio=3.0 / 4.0,
max_random_area=1,
min_random_area=0.08,
brightness=jitter_param,
saturation=jitter_param,
contrast=jitter_param,
pca_noise=lighting_param,
num_parts=nworker,
part_index=rank,
)
val_data = mx.io.ImageRecordIter(
path_imgrec=rec_val,
path_imgidx=rec_val_idx,
preprocess_threads=num_workers,
shuffle=False,
batch_size=batch_size,
resize=resize,
data_shape=(3, input_size, input_size),
mean_r=mean_rgb[0],
mean_g=mean_rgb[1],
mean_b=mean_rgb[2],
std_r=std_rgb[0],
std_g=std_rgb[1],
std_b=std_rgb[2],
num_parts=nworker,
part_index=rank,
)
return train_data, val_data, batch_fn
def get_data_loader(data_dir, batch_size, num_workers):
normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
jitter_param = 0.4
lighting_param = 0.1
input_size = opt.input_size
crop_ratio = opt.crop_ratio if opt.crop_ratio > 0 else 0.875
resize = int(math.ceil(input_size / crop_ratio))
def batch_fn(batch, ctx):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
return data, label
transform_train = transforms.Compose(
[
transforms.RandomResizedCrop(input_size),
transforms.RandomFlipLeftRight(),
transforms.RandomColorJitter(
brightness=jitter_param,
contrast=jitter_param,
saturation=jitter_param,
),
transforms.RandomLighting(lighting_param),
transforms.ToTensor(),
normalize,
]
)
transform_test = transforms.Compose(
[
transforms.Resize(resize, keep_ratio=True),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
normalize,
]
)
train_data = gluon.data.DataLoader(
imagenet.classification.ImageNet(data_dir, train=True).transform_first(
transform_train
),
batch_size=batch_size,
shuffle=True,
last_batch="discard",
num_workers=num_workers,
)
val_data = gluon.data.DataLoader(
imagenet.classification.ImageNet(data_dir, train=False).transform_first(
transform_test
),
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
)
return train_data, val_data, batch_fn
if opt.use_rec:
train_data, val_data, batch_fn = get_data_rec(
opt.rec_train,
opt.rec_train_idx,
opt.rec_val,
opt.rec_val_idx,
batch_size,
num_workers,
)
else:
train_data, val_data, batch_fn = get_data_loader(
opt.data_dir, batch_size, num_workers
)
if opt.mixup:
train_metric = mx.metric.RMSE()
else:
train_metric = mx.metric.Accuracy()
acc_top1 = mx.metric.Accuracy()
acc_top5 = mx.metric.TopKAccuracy(5)
save_frequency = opt.save_frequency
if opt.save_dir and save_frequency:
save_dir = opt.save_dir
makedirs(save_dir)
else:
save_dir = ""
save_frequency = 0
def mixup_transform(label, classes, lam=1, eta=0.0):
if isinstance(label, nd.NDArray):
label = [label]
res = []
for l in label:
y1 = l.one_hot(
classes, on_value=1 - eta + eta / classes, off_value=eta / classes
)
y2 = l[::-1].one_hot(
classes, on_value=1 - eta + eta / classes, off_value=eta / classes
)
res.append(lam * y1 + (1 - lam) * y2)
return res
def smooth(label, classes, eta=0.1):
if isinstance(label, nd.NDArray):
label = [label]
smoothed = []
for l in label:
res = l.one_hot(
classes, on_value=1 - eta + eta / classes, off_value=eta / classes
)
smoothed.append(res)
return smoothed
def test(ctx, val_data):
if opt.use_rec:
val_data.reset()
acc_top1.reset()
acc_top5.reset()
for i, batch in enumerate(val_data):
data, label = batch_fn(batch, ctx)
outputs = [net(X.astype(opt.dtype, copy=False)) for X in data]
acc_top1.update(label, outputs)
acc_top5.update(label, outputs)
_, top1 = acc_top1.get()
_, top5 = acc_top5.get()
return (1 - top1, 1 - top5)
def train(ctx):
if isinstance(ctx, mx.Context):
ctx = [ctx]
if opt.resume_params is "":
net.initialize(mx.init.MSRAPrelu(), ctx=ctx)
if opt.no_wd:
for k, v in net.collect_params(".*beta|.*gamma|.*bias").items():
v.wd_mult = 0.0
compression_params = {
"compressor": opt.compressor,
"ef": opt.ef,
"momentum": opt.compress_momentum,
"scaling": opt.onebit_scaling,
}
trainer = bps.DistributedTrainer(
net.collect_params(),
optimizer,
optimizer_params,
compression_params=compression_params,
)
if opt.resume_states is not "":
trainer.load_states(opt.resume_states)
if opt.label_smoothing or opt.mixup:
sparse_label_loss = False
else:
sparse_label_loss = True
if distillation:
L = gcv.loss.DistillationSoftmaxCrossEntropyLoss(
temperature=opt.temperature,
hard_weight=opt.hard_weight,
sparse_label=sparse_label_loss,
)
else:
L = gluon.loss.SoftmaxCrossEntropyLoss(sparse_label=sparse_label_loss)
best_val_score = 1
for epoch in range(opt.resume_epoch, opt.num_epochs):
tic = time.time()
if opt.use_rec:
train_data.reset()
train_metric.reset()
btic = time.time()
for i, batch in enumerate(train_data):
data, label = batch_fn(batch, ctx)
if opt.mixup:
lam = np.random.beta(opt.mixup_alpha, opt.mixup_alpha)
if epoch >= opt.num_epochs - opt.mixup_off_epoch:
lam = 1
data = [lam * X + (1 - lam) * X[::-1] for X in data]
if opt.label_smoothing:
eta = 0.1
else:
eta = 0.0
label = mixup_transform(label, classes, lam, eta)
elif opt.label_smoothing:
hard_label = label
label = smooth(label, classes)
if distillation:
teacher_prob = [
nd.softmax(
teacher(X.astype(opt.dtype, copy=False)) / opt.temperature
)
for X in data
]
with ag.record():
outputs = [net(X.astype(opt.dtype, copy=False)) for X in data]
if distillation:
loss = [
L(
yhat.astype("float32", copy=False),
y.astype("float32", copy=False),
p.astype("float32", copy=False),
)
for yhat, y, p in zip(outputs, label, teacher_prob)
]
else:
loss = [
L(yhat, y.astype(opt.dtype, copy=False))
for yhat, y in zip(outputs, label)
]
for l in loss:
l.backward()
trainer.step(batch_size)
if opt.mixup:
output_softmax = [
nd.SoftmaxActivation(out.astype("float32", copy=False))
for out in outputs
]
train_metric.update(label, output_softmax)
else:
if opt.label_smoothing:
train_metric.update(hard_label, outputs)
else:
train_metric.update(label, outputs)
if opt.log_interval and not (i + 1) % opt.log_interval:
train_metric_name, train_metric_score = train_metric.get()
logger.info(
"Epoch[%d] Batch [%d]\tSpeed: %f samples/sec\t%s=%f\tlr=%f\ttime=%f"
% (
epoch,
i,
batch_size
* nworker
* opt.log_interval
/ (time.time() - btic),
train_metric_name,
train_metric_score,
trainer.learning_rate,
time.time() - btic,
)
)
btic = time.time()
train_metric_name, train_metric_score = train_metric.get()
throughput = int(batch_size * nworker * i / (time.time() - tic))
logger.info(
"[Epoch %d] training: %s=%f"
% (epoch, train_metric_name, train_metric_score)
)
logger.info(
"[Epoch %d] speed: %d samples/sec\ttime cost: %f"
% (epoch, throughput, time.time() - tic)
)
err_top1_val, err_top5_val = test(ctx, val_data)
logger.info(
"[Epoch %d] validation: err-top1=%f err-top5=%f"
% (epoch, err_top1_val, err_top5_val)
)
if err_top1_val < best_val_score:
best_val_score = err_top1_val
net.save_parameters(
"%s/%.4f-imagenet-%s-%d-best.params"
% (save_dir, best_val_score, model_name, epoch)
)
trainer.save_states(
"%s/%.4f-imagenet-%s-%d-best.states"
% (save_dir, best_val_score, model_name, epoch)
)
if save_frequency and save_dir and (epoch + 1) % save_frequency == 0:
net.save_parameters(
"%s/imagenet-%s-%d.params" % (save_dir, model_name, epoch)
)
trainer.save_states(
"%s/imagenet-%s-%d.states" % (save_dir, model_name, epoch)
)
if save_frequency and save_dir:
net.save_parameters(
"%s/imagenet-%s-%d.params" % (save_dir, model_name, opt.num_epochs - 1)
)
trainer.save_states(
"%s/imagenet-%s-%d.states" % (save_dir, model_name, opt.num_epochs - 1)
)
if opt.mode == "hybrid":
net.hybridize(static_alloc=True, static_shape=True)
if distillation:
teacher.hybridize(static_alloc=True, static_shape=True)
train(context)
|
https://github.com/bytedance/byteps/issues/10
|
In [1]: import tensorflow as tf
In [2]: import byteps.tensorflow as bps
WARNING: Logging before flag parsing goes to stderr.
W0627 11:36:47.010180 139917697820480 deprecation_wrapper.py:119] From /private/home/yuxinwu/.local/lib/python3.6/site-packages/byteps/tensorflow/__init__.py:79: The name tf.train.SessionRunHook is deprecated. Please use tf.estimator.SessionRunHook instead.
W0627 11:36:47.010504 139917697820480 deprecation_wrapper.py:119] From /private/home/yuxinwu/.local/lib/python3.6/site-packages/byteps/tensorflow/__init__.py:111: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead.
In [3]: bps.push_pull(tf.constant([0.0]))
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-3-fc0e9eb4f2bb> in <module>()
----> 1 bps.push_pull(tf.constant([0.0]))
~/.local/lib/python3.6/site-packages/byteps/tensorflow/__init__.py in push_pull(tensor, scope, average, device_dense, device_sparse, compression)
50 byteps_size = tf.cast(size(), dtype=tensor.dtype)
51 tensor_compressed, ctx = compression.compress(tensor)
---> 52 summed_tensor_compressed = _push_pull(tensor_compressed, scope)
53 summed_tensor = compression.decompress(summed_tensor_compressed, ctx)
54 new_tensor = (tf.div(summed_tensor, byteps_size)
~/.local/lib/python3.6/site-packages/byteps/tensorflow/ops.py in _push_pull(tensor, scope, name)
80 if name is None and not _executing_eagerly():
81 name = 'BytePSPushPull_%s' % _normalize_name(tensor.name)
---> 82 TF_LIB_CTYPES.byteps_tensorflow_declare_tensor(ctypes.c_char_p(scope+name))
83 return C_LIB.byteps_push_pull(tensor, name=name)
84
TypeError: bytes or integer address expected instead of str instance
|
TypeError
|
def train(ctx):
if isinstance(ctx, mx.Context):
ctx = [ctx]
if opt.resume_params is "":
net.initialize(mx.init.MSRAPrelu(), ctx=ctx)
if opt.no_wd:
for k, v in net.collect_params(".*beta|.*gamma|.*bias").items():
v.wd_mult = 0.0
compression_params = {
"compressor": opt.compressor,
"ef": opt.ef,
"momentum": opt.compress_momentum,
"scaling": opt.onebit_scaling,
"k": opt.k,
}
trainer = bps.DistributedTrainer(
net.collect_params(),
optimizer,
optimizer_params,
compression_params=compression_params,
)
if opt.resume_states is not "":
trainer.load_states(opt.resume_states)
if opt.label_smoothing or opt.mixup:
sparse_label_loss = False
else:
sparse_label_loss = True
if distillation:
L = gcv.loss.DistillationSoftmaxCrossEntropyLoss(
temperature=opt.temperature,
hard_weight=opt.hard_weight,
sparse_label=sparse_label_loss,
)
else:
L = gluon.loss.SoftmaxCrossEntropyLoss(sparse_label=sparse_label_loss)
best_val_score = 1
for epoch in range(opt.resume_epoch, opt.num_epochs):
tic = time.time()
if opt.use_rec:
train_data.reset()
train_metric.reset()
btic = time.time()
for i, batch in enumerate(train_data):
data, label = batch_fn(batch, ctx)
if opt.mixup:
lam = np.random.beta(opt.mixup_alpha, opt.mixup_alpha)
if epoch >= opt.num_epochs - opt.mixup_off_epoch:
lam = 1
data = [lam * X + (1 - lam) * X[::-1] for X in data]
if opt.label_smoothing:
eta = 0.1
else:
eta = 0.0
label = mixup_transform(label, classes, lam, eta)
elif opt.label_smoothing:
hard_label = label
label = smooth(label, classes)
if distillation:
teacher_prob = [
nd.softmax(
teacher(X.astype(opt.dtype, copy=False)) / opt.temperature
)
for X in data
]
with ag.record():
outputs = [net(X.astype(opt.dtype, copy=False)) for X in data]
if distillation:
loss = [
L(
yhat.astype("float32", copy=False),
y.astype("float32", copy=False),
p.astype("float32", copy=False),
)
for yhat, y, p in zip(outputs, label, teacher_prob)
]
else:
loss = [
L(yhat, y.astype(opt.dtype, copy=False))
for yhat, y in zip(outputs, label)
]
for l in loss:
l.backward()
trainer.step(batch_size)
if opt.mixup:
output_softmax = [
nd.SoftmaxActivation(out.astype("float32", copy=False))
for out in outputs
]
train_metric.update(label, output_softmax)
else:
if opt.label_smoothing:
train_metric.update(hard_label, outputs)
else:
train_metric.update(label, outputs)
if opt.log_interval and not (i + 1) % opt.log_interval:
train_metric_name, train_metric_score = train_metric.get()
logger.info(
"Epoch[%d] Batch [%d]\tSpeed: %f samples/sec\t%s=%f\tlr=%f\ttime=%f"
% (
epoch,
i,
batch_size * nworker * opt.log_interval / (time.time() - btic),
train_metric_name,
train_metric_score,
trainer.learning_rate,
time.time() - btic,
)
)
btic = time.time()
train_metric_name, train_metric_score = train_metric.get()
throughput = int(batch_size * nworker * i / (time.time() - tic))
logger.info(
"[Epoch %d] training: %s=%f"
% (epoch, train_metric_name, train_metric_score)
)
logger.info(
"[Epoch %d] speed: %d samples/sec\ttime cost: %f"
% (epoch, throughput, time.time() - tic)
)
err_top1_val, err_top5_val = test(ctx, val_data)
logger.info(
"[Epoch %d] validation: err-top1=%f err-top5=%f"
% (epoch, err_top1_val, err_top5_val)
)
if err_top1_val < best_val_score:
best_val_score = err_top1_val
net.save_parameters(
"%s/%.4f-imagenet-%s-%d-best.params"
% (save_dir, best_val_score, model_name, epoch)
)
trainer.save_states(
"%s/%.4f-imagenet-%s-%d-best.states"
% (save_dir, best_val_score, model_name, epoch)
)
if save_frequency and save_dir and (epoch + 1) % save_frequency == 0:
net.save_parameters(
"%s/imagenet-%s-%d.params" % (save_dir, model_name, epoch)
)
trainer.save_states(
"%s/imagenet-%s-%d.states" % (save_dir, model_name, epoch)
)
if save_frequency and save_dir:
net.save_parameters(
"%s/imagenet-%s-%d.params" % (save_dir, model_name, opt.num_epochs - 1)
)
trainer.save_states(
"%s/imagenet-%s-%d.states" % (save_dir, model_name, opt.num_epochs - 1)
)
|
def train(ctx):
if isinstance(ctx, mx.Context):
ctx = [ctx]
if opt.resume_params is "":
net.initialize(mx.init.MSRAPrelu(), ctx=ctx)
if opt.no_wd:
for k, v in net.collect_params(".*beta|.*gamma|.*bias").items():
v.wd_mult = 0.0
compression_params = {
"compressor": opt.compressor,
"ef": opt.ef,
"momentum": opt.compress_momentum,
"scaling": opt.onebit_scaling,
}
trainer = bps.DistributedTrainer(
net.collect_params(),
optimizer,
optimizer_params,
compression_params=compression_params,
)
if opt.resume_states is not "":
trainer.load_states(opt.resume_states)
if opt.label_smoothing or opt.mixup:
sparse_label_loss = False
else:
sparse_label_loss = True
if distillation:
L = gcv.loss.DistillationSoftmaxCrossEntropyLoss(
temperature=opt.temperature,
hard_weight=opt.hard_weight,
sparse_label=sparse_label_loss,
)
else:
L = gluon.loss.SoftmaxCrossEntropyLoss(sparse_label=sparse_label_loss)
best_val_score = 1
for epoch in range(opt.resume_epoch, opt.num_epochs):
tic = time.time()
if opt.use_rec:
train_data.reset()
train_metric.reset()
btic = time.time()
for i, batch in enumerate(train_data):
data, label = batch_fn(batch, ctx)
if opt.mixup:
lam = np.random.beta(opt.mixup_alpha, opt.mixup_alpha)
if epoch >= opt.num_epochs - opt.mixup_off_epoch:
lam = 1
data = [lam * X + (1 - lam) * X[::-1] for X in data]
if opt.label_smoothing:
eta = 0.1
else:
eta = 0.0
label = mixup_transform(label, classes, lam, eta)
elif opt.label_smoothing:
hard_label = label
label = smooth(label, classes)
if distillation:
teacher_prob = [
nd.softmax(
teacher(X.astype(opt.dtype, copy=False)) / opt.temperature
)
for X in data
]
with ag.record():
outputs = [net(X.astype(opt.dtype, copy=False)) for X in data]
if distillation:
loss = [
L(
yhat.astype("float32", copy=False),
y.astype("float32", copy=False),
p.astype("float32", copy=False),
)
for yhat, y, p in zip(outputs, label, teacher_prob)
]
else:
loss = [
L(yhat, y.astype(opt.dtype, copy=False))
for yhat, y in zip(outputs, label)
]
for l in loss:
l.backward()
trainer.step(batch_size)
if opt.mixup:
output_softmax = [
nd.SoftmaxActivation(out.astype("float32", copy=False))
for out in outputs
]
train_metric.update(label, output_softmax)
else:
if opt.label_smoothing:
train_metric.update(hard_label, outputs)
else:
train_metric.update(label, outputs)
if opt.log_interval and not (i + 1) % opt.log_interval:
train_metric_name, train_metric_score = train_metric.get()
logger.info(
"Epoch[%d] Batch [%d]\tSpeed: %f samples/sec\t%s=%f\tlr=%f\ttime=%f"
% (
epoch,
i,
batch_size * nworker * opt.log_interval / (time.time() - btic),
train_metric_name,
train_metric_score,
trainer.learning_rate,
time.time() - btic,
)
)
btic = time.time()
train_metric_name, train_metric_score = train_metric.get()
throughput = int(batch_size * nworker * i / (time.time() - tic))
logger.info(
"[Epoch %d] training: %s=%f"
% (epoch, train_metric_name, train_metric_score)
)
logger.info(
"[Epoch %d] speed: %d samples/sec\ttime cost: %f"
% (epoch, throughput, time.time() - tic)
)
err_top1_val, err_top5_val = test(ctx, val_data)
logger.info(
"[Epoch %d] validation: err-top1=%f err-top5=%f"
% (epoch, err_top1_val, err_top5_val)
)
if err_top1_val < best_val_score:
best_val_score = err_top1_val
net.save_parameters(
"%s/%.4f-imagenet-%s-%d-best.params"
% (save_dir, best_val_score, model_name, epoch)
)
trainer.save_states(
"%s/%.4f-imagenet-%s-%d-best.states"
% (save_dir, best_val_score, model_name, epoch)
)
if save_frequency and save_dir and (epoch + 1) % save_frequency == 0:
net.save_parameters(
"%s/imagenet-%s-%d.params" % (save_dir, model_name, epoch)
)
trainer.save_states(
"%s/imagenet-%s-%d.states" % (save_dir, model_name, epoch)
)
if save_frequency and save_dir:
net.save_parameters(
"%s/imagenet-%s-%d.params" % (save_dir, model_name, opt.num_epochs - 1)
)
trainer.save_states(
"%s/imagenet-%s-%d.states" % (save_dir, model_name, opt.num_epochs - 1)
)
|
https://github.com/bytedance/byteps/issues/10
|
In [1]: import tensorflow as tf
In [2]: import byteps.tensorflow as bps
WARNING: Logging before flag parsing goes to stderr.
W0627 11:36:47.010180 139917697820480 deprecation_wrapper.py:119] From /private/home/yuxinwu/.local/lib/python3.6/site-packages/byteps/tensorflow/__init__.py:79: The name tf.train.SessionRunHook is deprecated. Please use tf.estimator.SessionRunHook instead.
W0627 11:36:47.010504 139917697820480 deprecation_wrapper.py:119] From /private/home/yuxinwu/.local/lib/python3.6/site-packages/byteps/tensorflow/__init__.py:111: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead.
In [3]: bps.push_pull(tf.constant([0.0]))
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-3-fc0e9eb4f2bb> in <module>()
----> 1 bps.push_pull(tf.constant([0.0]))
~/.local/lib/python3.6/site-packages/byteps/tensorflow/__init__.py in push_pull(tensor, scope, average, device_dense, device_sparse, compression)
50 byteps_size = tf.cast(size(), dtype=tensor.dtype)
51 tensor_compressed, ctx = compression.compress(tensor)
---> 52 summed_tensor_compressed = _push_pull(tensor_compressed, scope)
53 summed_tensor = compression.decompress(summed_tensor_compressed, ctx)
54 new_tensor = (tf.div(summed_tensor, byteps_size)
~/.local/lib/python3.6/site-packages/byteps/tensorflow/ops.py in _push_pull(tensor, scope, name)
80 if name is None and not _executing_eagerly():
81 name = 'BytePSPushPull_%s' % _normalize_name(tensor.name)
---> 82 TF_LIB_CTYPES.byteps_tensorflow_declare_tensor(ctypes.c_char_p(scope+name))
83 return C_LIB.byteps_push_pull(tensor, name=name)
84
TypeError: bytes or integer address expected instead of str instance
|
TypeError
|
def read_journal(context, journal_name="default"):
configuration = load_config(context.config_path)
with open(configuration["journals"][journal_name]) as journal_file:
journal = journal_file.read()
return journal
|
def read_journal(journal_name="default"):
config = load_config(install.CONFIG_FILE_PATH)
with open(config["journals"][journal_name]) as journal_file:
journal = journal_file.read()
return journal
|
https://github.com/jrnl-org/jrnl/issues/659
|
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 6, in <module>
from jrnl.cli import run
File "/usr/local/lib/python3.7/site-packages/jrnl/cli.py", line 14, in <module>
from . import install
File "/usr/local/lib/python3.7/site-packages/jrnl/install.py", line 25, in <module>
CONFIG_PATH = xdg.BaseDirectory.save_config_path(XDG_RESOURCE) or USER_HOME
File "/usr/local/lib/python3.7/site-packages/xdg/BaseDirectory.py", line 58, in save_config_path
os.makedirs(path, 0o700)
File "/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
FileExistsError: [Errno 17] File exists: '/Users/jonathanwren/.config/jrnl'
|
FileExistsError
|
def open_journal(context, journal_name="default"):
configuration = load_config(context.config_path)
journal_conf = configuration["journals"][journal_name]
# We can override the default config on a by-journal basis
if type(journal_conf) is dict:
configuration.update(journal_conf)
# But also just give them a string to point to the journal file
else:
configuration["journal"] = journal_conf
return Journal.open_journal(journal_name, configuration)
|
def open_journal(journal_name="default"):
config = load_config(install.CONFIG_FILE_PATH)
journal_conf = config["journals"][journal_name]
# We can override the default config on a by-journal basis
if type(journal_conf) is dict:
config.update(journal_conf)
# But also just give them a string to point to the journal file
else:
config["journal"] = journal_conf
return Journal.open_journal(journal_name, config)
|
https://github.com/jrnl-org/jrnl/issues/659
|
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 6, in <module>
from jrnl.cli import run
File "/usr/local/lib/python3.7/site-packages/jrnl/cli.py", line 14, in <module>
from . import install
File "/usr/local/lib/python3.7/site-packages/jrnl/install.py", line 25, in <module>
CONFIG_PATH = xdg.BaseDirectory.save_config_path(XDG_RESOURCE) or USER_HOME
File "/usr/local/lib/python3.7/site-packages/xdg/BaseDirectory.py", line 58, in save_config_path
os.makedirs(path, 0o700)
File "/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
FileExistsError: [Errno 17] File exists: '/Users/jonathanwren/.config/jrnl'
|
FileExistsError
|
def set_config(context, config_file):
full_path = os.path.join("features/configs", config_file)
context.config_path = os.path.abspath(full_path)
if config_file.endswith("yaml") and os.path.exists(full_path):
# Add jrnl version to file for 2.x journals
with open(context.config_path, "a") as cf:
cf.write("version: {}".format(__version__))
|
def set_config(context, config_file):
full_path = os.path.join("features/configs", config_file)
install.CONFIG_FILE_PATH = os.path.abspath(full_path)
if config_file.endswith("yaml") and os.path.exists(full_path):
# Add jrnl version to file for 2.x journals
with open(install.CONFIG_FILE_PATH, "a") as cf:
cf.write("version: {}".format(__version__))
|
https://github.com/jrnl-org/jrnl/issues/659
|
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 6, in <module>
from jrnl.cli import run
File "/usr/local/lib/python3.7/site-packages/jrnl/cli.py", line 14, in <module>
from . import install
File "/usr/local/lib/python3.7/site-packages/jrnl/install.py", line 25, in <module>
CONFIG_PATH = xdg.BaseDirectory.save_config_path(XDG_RESOURCE) or USER_HOME
File "/usr/local/lib/python3.7/site-packages/xdg/BaseDirectory.py", line 58, in save_config_path
os.makedirs(path, 0o700)
File "/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
FileExistsError: [Errno 17] File exists: '/Users/jonathanwren/.config/jrnl'
|
FileExistsError
|
def open_editor_and_enter(context, method, text=""):
text = text or context.text or ""
if method == "enter":
file_method = "w+"
elif method == "append":
file_method = "a"
else:
file_method = "r+"
def _mock_editor(command):
context.editor_command = command
tmpfile = command[-1]
with open(tmpfile, file_method) as f:
f.write(text)
return tmpfile
if "password" in context:
password = context.password
else:
password = ""
# fmt: off
# see: https://github.com/psf/black/issues/664
with \
patch("subprocess.call", side_effect=_mock_editor) as mock_editor, \
patch("getpass.getpass", side_effect=_mock_getpass(password)) as mock_getpass, \
patch("sys.stdin.isatty", return_value=True), \
patch("jrnl.config.get_config_path", side_effect=lambda: context.config_path), \
patch("jrnl.install.get_config_path", side_effect=lambda: context.config_path) \
:
context.editor = mock_editor
context.getpass = mock_getpass
try:
cli(["--edit"])
context.exit_status = 0
except SystemExit as e:
context.exit_status = e.code
|
def open_editor_and_enter(context, method, text=""):
text = text or context.text or ""
if method == "enter":
file_method = "w+"
elif method == "append":
file_method = "a"
else:
file_method = "r+"
def _mock_editor(command):
context.editor_command = command
tmpfile = command[-1]
with open(tmpfile, file_method) as f:
f.write(text)
return tmpfile
if "password" in context:
password = context.password
else:
password = ""
# fmt: off
# see: https://github.com/psf/black/issues/664
with \
patch("subprocess.call", side_effect=_mock_editor) as mock_editor, \
patch("getpass.getpass", side_effect=_mock_getpass(password)) as mock_getpass, \
patch("sys.stdin.isatty", return_value=True) \
:
context.editor = mock_editor
context.getpass = mock_getpass
cli(["--edit"])
|
https://github.com/jrnl-org/jrnl/issues/659
|
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 6, in <module>
from jrnl.cli import run
File "/usr/local/lib/python3.7/site-packages/jrnl/cli.py", line 14, in <module>
from . import install
File "/usr/local/lib/python3.7/site-packages/jrnl/install.py", line 25, in <module>
CONFIG_PATH = xdg.BaseDirectory.save_config_path(XDG_RESOURCE) or USER_HOME
File "/usr/local/lib/python3.7/site-packages/xdg/BaseDirectory.py", line 58, in save_config_path
os.makedirs(path, 0o700)
File "/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
FileExistsError: [Errno 17] File exists: '/Users/jonathanwren/.config/jrnl'
|
FileExistsError
|
def run_with_input(context, command, inputs=""):
# create an iterator through all inputs. These inputs will be fed one by one
# to the mocked calls for 'input()', 'util.getpass()' and 'sys.stdin.read()'
if context.text:
text = iter(context.text.split("\n"))
else:
text = iter([inputs])
args = ushlex(command)[1:]
def _mock_editor(command):
context.editor_command = command
tmpfile = command[-1]
with open(tmpfile, "r") as editor_file:
file_content = editor_file.read()
context.editor_file = {"name": tmpfile, "content": file_content}
Path(tmpfile).touch()
if "password" in context:
password = context.password
else:
password = text
# fmt: off
# see: https://github.com/psf/black/issues/664
with \
patch("builtins.input", side_effect=_mock_input(text)) as mock_input, \
patch("getpass.getpass", side_effect=_mock_getpass(password)) as mock_getpass, \
patch("sys.stdin.read", side_effect=text) as mock_read, \
patch("subprocess.call", side_effect=_mock_editor) as mock_editor, \
patch("jrnl.config.get_config_path", side_effect=lambda: context.config_path), \
patch("jrnl.install.get_config_path", side_effect=lambda: context.config_path) \
:
try:
cli(args or [])
context.exit_status = 0
except SystemExit as e:
context.exit_status = e.code
# put mocks into context so they can be checked later in "then" statements
context.editor = mock_editor
context.input = mock_input
context.getpass = mock_getpass
context.read = mock_read
context.iter_text = text
context.execute_steps('''
Then all input was used
And at least one input method was called
''')
|
def run_with_input(context, command, inputs=""):
# create an iterator through all inputs. These inputs will be fed one by one
# to the mocked calls for 'input()', 'util.getpass()' and 'sys.stdin.read()'
if context.text:
text = iter(context.text.split("\n"))
else:
text = iter([inputs])
args = ushlex(command)[1:]
def _mock_editor(command):
context.editor_command = command
tmpfile = command[-1]
with open(tmpfile, "r") as editor_file:
file_content = editor_file.read()
context.editor_file = {"name": tmpfile, "content": file_content}
Path(tmpfile).touch()
if "password" in context:
password = context.password
else:
password = text
# fmt: off
# see: https://github.com/psf/black/issues/664
with \
patch("builtins.input", side_effect=_mock_input(text)) as mock_input, \
patch("getpass.getpass", side_effect=_mock_getpass(password)) as mock_getpass, \
patch("sys.stdin.read", side_effect=text) as mock_read, \
patch("subprocess.call", side_effect=_mock_editor) as mock_editor \
:
try:
cli(args or [])
context.exit_status = 0
except SystemExit as e:
context.exit_status = e.code
# put mocks into context so they can be checked later in "then" statements
context.editor = mock_editor
context.input = mock_input
context.getpass = mock_getpass
context.read = mock_read
context.iter_text = text
context.execute_steps('''
Then all input was used
And at least one input method was called
''')
|
https://github.com/jrnl-org/jrnl/issues/659
|
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 6, in <module>
from jrnl.cli import run
File "/usr/local/lib/python3.7/site-packages/jrnl/cli.py", line 14, in <module>
from . import install
File "/usr/local/lib/python3.7/site-packages/jrnl/install.py", line 25, in <module>
CONFIG_PATH = xdg.BaseDirectory.save_config_path(XDG_RESOURCE) or USER_HOME
File "/usr/local/lib/python3.7/site-packages/xdg/BaseDirectory.py", line 58, in save_config_path
os.makedirs(path, 0o700)
File "/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
FileExistsError: [Errno 17] File exists: '/Users/jonathanwren/.config/jrnl'
|
FileExistsError
|
def run(context, command, text=""):
text = text or context.text or ""
if "cache_dir" in context and context.cache_dir is not None:
cache_dir = os.path.join("features", "cache", context.cache_dir)
command = command.format(cache_dir=cache_dir)
args = ushlex(command)
def _mock_editor(command):
context.editor_command = command
tmpfile = command[-1]
with open(tmpfile, "r") as editor_file:
file_content = editor_file.read()
context.editor_file = {"name": tmpfile, "content": file_content}
Path(tmpfile).touch()
if "password" in context:
password = context.password
else:
password = iter(text)
try:
# fmt: off
# see: https://github.com/psf/black/issues/664
with \
patch("sys.argv", args), \
patch("getpass.getpass", side_effect=_mock_getpass(password)) as mock_getpass, \
patch("subprocess.call", side_effect=_mock_editor) as mock_editor, \
patch("sys.stdin.read", side_effect=lambda: text), \
patch("jrnl.config.get_config_path", side_effect=lambda: context.config_path), \
patch("jrnl.install.get_config_path", side_effect=lambda: context.config_path) \
:
context.editor = mock_editor
context.getpass = mock_getpass
cli(args[1:])
context.exit_status = 0
# fmt: on
except SystemExit as e:
context.exit_status = e.code
|
def run(context, command, text=""):
text = text or context.text or ""
if "cache_dir" in context and context.cache_dir is not None:
cache_dir = os.path.join("features", "cache", context.cache_dir)
command = command.format(cache_dir=cache_dir)
args = ushlex(command)
def _mock_editor(command):
context.editor_command = command
tmpfile = command[-1]
with open(tmpfile, "r") as editor_file:
file_content = editor_file.read()
context.editor_file = {"name": tmpfile, "content": file_content}
Path(tmpfile).touch()
if "password" in context:
password = context.password
else:
password = iter(text)
try:
# fmt: off
# see: https://github.com/psf/black/issues/664
with \
patch("sys.argv", args), \
patch("getpass.getpass", side_effect=_mock_getpass(password)) as mock_getpass, \
patch("subprocess.call", side_effect=_mock_editor) as mock_editor, \
patch("sys.stdin.read", side_effect=lambda: text) \
:
context.editor = mock_editor
context.getpass = mock_getpass
cli(args[1:])
context.exit_status = 0
# fmt: on
except SystemExit as e:
context.exit_status = e.code
|
https://github.com/jrnl-org/jrnl/issues/659
|
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 6, in <module>
from jrnl.cli import run
File "/usr/local/lib/python3.7/site-packages/jrnl/cli.py", line 14, in <module>
from . import install
File "/usr/local/lib/python3.7/site-packages/jrnl/install.py", line 25, in <module>
CONFIG_PATH = xdg.BaseDirectory.save_config_path(XDG_RESOURCE) or USER_HOME
File "/usr/local/lib/python3.7/site-packages/xdg/BaseDirectory.py", line 58, in save_config_path
os.makedirs(path, 0o700)
File "/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
FileExistsError: [Errno 17] File exists: '/Users/jonathanwren/.config/jrnl'
|
FileExistsError
|
def check_journal_content(context, text, journal_name="default"):
journal = read_journal(context, journal_name)
assert text in journal, journal
|
def check_journal_content(context, text, journal_name="default"):
journal = read_journal(journal_name)
assert text in journal, journal
|
https://github.com/jrnl-org/jrnl/issues/659
|
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 6, in <module>
from jrnl.cli import run
File "/usr/local/lib/python3.7/site-packages/jrnl/cli.py", line 14, in <module>
from . import install
File "/usr/local/lib/python3.7/site-packages/jrnl/install.py", line 25, in <module>
CONFIG_PATH = xdg.BaseDirectory.save_config_path(XDG_RESOURCE) or USER_HOME
File "/usr/local/lib/python3.7/site-packages/xdg/BaseDirectory.py", line 58, in save_config_path
os.makedirs(path, 0o700)
File "/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
FileExistsError: [Errno 17] File exists: '/Users/jonathanwren/.config/jrnl'
|
FileExistsError
|
def check_not_journal_content(context, text, journal_name="default"):
journal = read_journal(context, journal_name)
assert text not in journal, journal
|
def check_not_journal_content(context, text, journal_name="default"):
journal = read_journal(journal_name)
assert text not in journal, journal
|
https://github.com/jrnl-org/jrnl/issues/659
|
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 6, in <module>
from jrnl.cli import run
File "/usr/local/lib/python3.7/site-packages/jrnl/cli.py", line 14, in <module>
from . import install
File "/usr/local/lib/python3.7/site-packages/jrnl/install.py", line 25, in <module>
CONFIG_PATH = xdg.BaseDirectory.save_config_path(XDG_RESOURCE) or USER_HOME
File "/usr/local/lib/python3.7/site-packages/xdg/BaseDirectory.py", line 58, in save_config_path
os.makedirs(path, 0o700)
File "/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
FileExistsError: [Errno 17] File exists: '/Users/jonathanwren/.config/jrnl'
|
FileExistsError
|
def journal_doesnt_exist(context, journal_name="default"):
configuration = load_config(context.config_path)
journal_path = configuration["journals"][journal_name]
assert not os.path.exists(journal_path)
|
def journal_doesnt_exist(context, journal_name="default"):
config = load_config(install.CONFIG_FILE_PATH)
journal_path = config["journals"][journal_name]
assert not os.path.exists(journal_path)
|
https://github.com/jrnl-org/jrnl/issues/659
|
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 6, in <module>
from jrnl.cli import run
File "/usr/local/lib/python3.7/site-packages/jrnl/cli.py", line 14, in <module>
from . import install
File "/usr/local/lib/python3.7/site-packages/jrnl/install.py", line 25, in <module>
CONFIG_PATH = xdg.BaseDirectory.save_config_path(XDG_RESOURCE) or USER_HOME
File "/usr/local/lib/python3.7/site-packages/xdg/BaseDirectory.py", line 58, in save_config_path
os.makedirs(path, 0o700)
File "/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
FileExistsError: [Errno 17] File exists: '/Users/jonathanwren/.config/jrnl'
|
FileExistsError
|
def journal_exists(context, journal_name="default"):
configuration = load_config(context.config_path)
journal_path = configuration["journals"][journal_name]
assert os.path.exists(journal_path)
|
def journal_exists(context, journal_name="default"):
config = load_config(install.CONFIG_FILE_PATH)
journal_path = config["journals"][journal_name]
assert os.path.exists(journal_path)
|
https://github.com/jrnl-org/jrnl/issues/659
|
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 6, in <module>
from jrnl.cli import run
File "/usr/local/lib/python3.7/site-packages/jrnl/cli.py", line 14, in <module>
from . import install
File "/usr/local/lib/python3.7/site-packages/jrnl/install.py", line 25, in <module>
CONFIG_PATH = xdg.BaseDirectory.save_config_path(XDG_RESOURCE) or USER_HOME
File "/usr/local/lib/python3.7/site-packages/xdg/BaseDirectory.py", line 58, in save_config_path
os.makedirs(path, 0o700)
File "/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
FileExistsError: [Errno 17] File exists: '/Users/jonathanwren/.config/jrnl'
|
FileExistsError
|
def config_var(context, key, value="", journal=None):
value = read_value_from_string(value or context.text or "")
configuration = load_config(context.config_path)
if journal:
configuration = configuration["journals"][journal]
assert key in configuration
assert configuration[key] == value
|
def config_var(context, key, value="", journal=None):
value = read_value_from_string(value or context.text or "")
config = load_config(install.CONFIG_FILE_PATH)
if journal:
config = config["journals"][journal]
assert key in config
assert config[key] == value
|
https://github.com/jrnl-org/jrnl/issues/659
|
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 6, in <module>
from jrnl.cli import run
File "/usr/local/lib/python3.7/site-packages/jrnl/cli.py", line 14, in <module>
from . import install
File "/usr/local/lib/python3.7/site-packages/jrnl/install.py", line 25, in <module>
CONFIG_PATH = xdg.BaseDirectory.save_config_path(XDG_RESOURCE) or USER_HOME
File "/usr/local/lib/python3.7/site-packages/xdg/BaseDirectory.py", line 58, in save_config_path
os.makedirs(path, 0o700)
File "/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
FileExistsError: [Errno 17] File exists: '/Users/jonathanwren/.config/jrnl'
|
FileExistsError
|
def config_no_var(context, key, value="", journal=None):
configuration = load_config(context.config_path)
if journal:
configuration = configuration["journals"][journal]
assert key not in configuration
|
def config_no_var(context, key, value="", journal=None):
config = load_config(install.CONFIG_FILE_PATH)
if journal:
config = config["journals"][journal]
assert key not in config
|
https://github.com/jrnl-org/jrnl/issues/659
|
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 6, in <module>
from jrnl.cli import run
File "/usr/local/lib/python3.7/site-packages/jrnl/cli.py", line 14, in <module>
from . import install
File "/usr/local/lib/python3.7/site-packages/jrnl/install.py", line 25, in <module>
CONFIG_PATH = xdg.BaseDirectory.save_config_path(XDG_RESOURCE) or USER_HOME
File "/usr/local/lib/python3.7/site-packages/xdg/BaseDirectory.py", line 58, in save_config_path
os.makedirs(path, 0o700)
File "/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
FileExistsError: [Errno 17] File exists: '/Users/jonathanwren/.config/jrnl'
|
FileExistsError
|
def check_journal_entries(context, number, journal_name="default"):
journal = open_journal(context, journal_name)
assert len(journal.entries) == number
|
def check_journal_entries(context, number, journal_name="default"):
journal = open_journal(journal_name)
assert len(journal.entries) == number
|
https://github.com/jrnl-org/jrnl/issues/659
|
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 6, in <module>
from jrnl.cli import run
File "/usr/local/lib/python3.7/site-packages/jrnl/cli.py", line 14, in <module>
from . import install
File "/usr/local/lib/python3.7/site-packages/jrnl/install.py", line 25, in <module>
CONFIG_PATH = xdg.BaseDirectory.save_config_path(XDG_RESOURCE) or USER_HOME
File "/usr/local/lib/python3.7/site-packages/xdg/BaseDirectory.py", line 58, in save_config_path
os.makedirs(path, 0o700)
File "/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
FileExistsError: [Errno 17] File exists: '/Users/jonathanwren/.config/jrnl'
|
FileExistsError
|
def list_journal_directory(context, journal="default"):
with open(context.config_path) as config_file:
configuration = yaml.load(config_file, Loader=yaml.FullLoader)
journal_path = configuration["journals"][journal]
for root, dirnames, f in os.walk(journal_path):
for file in f:
print(os.path.join(root, file))
|
def list_journal_directory(context, journal="default"):
with open(install.CONFIG_FILE_PATH) as config_file:
config = yaml.load(config_file, Loader=yaml.FullLoader)
journal_path = config["journals"][journal]
for root, dirnames, f in os.walk(journal_path):
for file in f:
print(os.path.join(root, file))
|
https://github.com/jrnl-org/jrnl/issues/659
|
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 6, in <module>
from jrnl.cli import run
File "/usr/local/lib/python3.7/site-packages/jrnl/cli.py", line 14, in <module>
from . import install
File "/usr/local/lib/python3.7/site-packages/jrnl/install.py", line 25, in <module>
CONFIG_PATH = xdg.BaseDirectory.save_config_path(XDG_RESOURCE) or USER_HOME
File "/usr/local/lib/python3.7/site-packages/xdg/BaseDirectory.py", line 58, in save_config_path
os.makedirs(path, 0o700)
File "/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
FileExistsError: [Errno 17] File exists: '/Users/jonathanwren/.config/jrnl'
|
FileExistsError
|
def cli(manual_args=None):
try:
if manual_args is None:
manual_args = sys.argv[1:]
args = parse_args(manual_args)
configure_logger(args.debug)
logging.debug("Parsed args: %s", args)
return run(args)
except JrnlError as e:
print(e.message, file=sys.stderr)
return 1
except KeyboardInterrupt:
return 1
|
def cli(manual_args=None):
try:
if manual_args is None:
manual_args = sys.argv[1:]
args = parse_args(manual_args)
configure_logger(args.debug)
logging.debug("Parsed args: %s", args)
return run(args)
except KeyboardInterrupt:
return 1
|
https://github.com/jrnl-org/jrnl/issues/659
|
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 6, in <module>
from jrnl.cli import run
File "/usr/local/lib/python3.7/site-packages/jrnl/cli.py", line 14, in <module>
from . import install
File "/usr/local/lib/python3.7/site-packages/jrnl/install.py", line 25, in <module>
CONFIG_PATH = xdg.BaseDirectory.save_config_path(XDG_RESOURCE) or USER_HOME
File "/usr/local/lib/python3.7/site-packages/xdg/BaseDirectory.py", line 58, in save_config_path
os.makedirs(path, 0o700)
File "/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
FileExistsError: [Errno 17] File exists: '/Users/jonathanwren/.config/jrnl'
|
FileExistsError
|
def get_journal_name(args, config):
args.journal_name = DEFAULT_JOURNAL_KEY
if args.text and args.text[0] in config["journals"]:
args.journal_name = args.text[0]
args.text = args.text[1:]
elif DEFAULT_JOURNAL_KEY not in config["journals"]:
print("No default journal configured.", file=sys.stderr)
print(list_journals(config), file=sys.stderr)
sys.exit(1)
logging.debug("Using journal name: %s", args.journal_name)
return args
|
def get_journal_name(args, config):
from . import install
args.journal_name = install.DEFAULT_JOURNAL_KEY
if args.text and args.text[0] in config["journals"]:
args.journal_name = args.text[0]
args.text = args.text[1:]
elif install.DEFAULT_JOURNAL_KEY not in config["journals"]:
print("No default journal configured.", file=sys.stderr)
print(list_journals(config), file=sys.stderr)
sys.exit(1)
logging.debug("Using journal name: %s", args.journal_name)
return args
|
https://github.com/jrnl-org/jrnl/issues/659
|
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 6, in <module>
from jrnl.cli import run
File "/usr/local/lib/python3.7/site-packages/jrnl/cli.py", line 14, in <module>
from . import install
File "/usr/local/lib/python3.7/site-packages/jrnl/install.py", line 25, in <module>
CONFIG_PATH = xdg.BaseDirectory.save_config_path(XDG_RESOURCE) or USER_HOME
File "/usr/local/lib/python3.7/site-packages/xdg/BaseDirectory.py", line 58, in save_config_path
os.makedirs(path, 0o700)
File "/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
FileExistsError: [Errno 17] File exists: '/Users/jonathanwren/.config/jrnl'
|
FileExistsError
|
def upgrade_config(config):
"""Checks if there are keys missing in a given config dict, and if so, updates the config file accordingly.
This essentially automatically ports jrnl installations if new config parameters are introduced in later
versions."""
default_config = get_default_config()
missing_keys = set(default_config).difference(config)
if missing_keys:
for key in missing_keys:
config[key] = default_config[key]
save_config(config)
print(
f"[Configuration updated to newest version at {get_config_path()}]",
file=sys.stderr,
)
|
def upgrade_config(config):
"""Checks if there are keys missing in a given config dict, and if so, updates the config file accordingly.
This essentially automatically ports jrnl installations if new config parameters are introduced in later
versions."""
missing_keys = set(default_config).difference(config)
if missing_keys:
for key in missing_keys:
config[key] = default_config[key]
save_config(config)
print(
f"[Configuration updated to newest version at {CONFIG_FILE_PATH}]",
file=sys.stderr,
)
|
https://github.com/jrnl-org/jrnl/issues/659
|
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 6, in <module>
from jrnl.cli import run
File "/usr/local/lib/python3.7/site-packages/jrnl/cli.py", line 14, in <module>
from . import install
File "/usr/local/lib/python3.7/site-packages/jrnl/install.py", line 25, in <module>
CONFIG_PATH = xdg.BaseDirectory.save_config_path(XDG_RESOURCE) or USER_HOME
File "/usr/local/lib/python3.7/site-packages/xdg/BaseDirectory.py", line 58, in save_config_path
os.makedirs(path, 0o700)
File "/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
FileExistsError: [Errno 17] File exists: '/Users/jonathanwren/.config/jrnl'
|
FileExistsError
|
def load_or_install_jrnl():
"""
If jrnl is already installed, loads and returns a config object.
Else, perform various prompts to install jrnl.
"""
config_path = (
get_config_path()
if os.path.exists(get_config_path())
else os.path.join(os.path.expanduser("~"), ".jrnl_config")
)
if os.path.exists(config_path):
logging.debug("Reading configuration from file %s", config_path)
config = load_config(config_path)
if is_old_version(config_path):
from . import upgrade
try:
upgrade.upgrade_jrnl(config_path)
except upgrade.UpgradeValidationException:
print("Aborting upgrade.", file=sys.stderr)
print(
"Please tell us about this problem at the following URL:",
file=sys.stderr,
)
print(
"https://github.com/jrnl-org/jrnl/issues/new?title=UpgradeValidationException",
file=sys.stderr,
)
print("Exiting.", file=sys.stderr)
sys.exit(1)
upgrade_config(config)
verify_config_colors(config)
else:
logging.debug("Configuration file not found, installing jrnl...")
try:
config = install()
except KeyboardInterrupt:
raise UserAbort("Installation aborted")
logging.debug('Using configuration "%s"', config)
return config
|
def load_or_install_jrnl():
"""
If jrnl is already installed, loads and returns a config object.
Else, perform various prompts to install jrnl.
"""
config_path = (
CONFIG_FILE_PATH
if os.path.exists(CONFIG_FILE_PATH)
else CONFIG_FILE_PATH_FALLBACK
)
if os.path.exists(config_path):
logging.debug("Reading configuration from file %s", config_path)
config = load_config(config_path)
if is_old_version(config_path):
from . import upgrade
try:
upgrade.upgrade_jrnl(config_path)
except upgrade.UpgradeValidationException:
print("Aborting upgrade.", file=sys.stderr)
print(
"Please tell us about this problem at the following URL:",
file=sys.stderr,
)
print(
"https://github.com/jrnl-org/jrnl/issues/new?title=UpgradeValidationException",
file=sys.stderr,
)
print("Exiting.", file=sys.stderr)
sys.exit(1)
upgrade_config(config)
verify_config_colors(config)
else:
logging.debug("Configuration file not found, installing jrnl...")
try:
config = install()
except KeyboardInterrupt:
raise UserAbort("Installation aborted")
logging.debug('Using configuration "%s"', config)
return config
|
https://github.com/jrnl-org/jrnl/issues/659
|
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 6, in <module>
from jrnl.cli import run
File "/usr/local/lib/python3.7/site-packages/jrnl/cli.py", line 14, in <module>
from . import install
File "/usr/local/lib/python3.7/site-packages/jrnl/install.py", line 25, in <module>
CONFIG_PATH = xdg.BaseDirectory.save_config_path(XDG_RESOURCE) or USER_HOME
File "/usr/local/lib/python3.7/site-packages/xdg/BaseDirectory.py", line 58, in save_config_path
os.makedirs(path, 0o700)
File "/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
FileExistsError: [Errno 17] File exists: '/Users/jonathanwren/.config/jrnl'
|
FileExistsError
|
def install():
_initialize_autocomplete()
# Where to create the journal?
default_journal_path = get_default_journal_path()
path_query = f"Path to your journal file (leave blank for {default_journal_path}): "
journal_path = os.path.abspath(input(path_query).strip() or default_journal_path)
default_config = get_default_config()
default_config["journals"][DEFAULT_JOURNAL_KEY] = os.path.expanduser(
os.path.expandvars(journal_path)
)
# If the folder doesn't exist, create it
path = os.path.split(default_config["journals"][DEFAULT_JOURNAL_KEY])[0]
try:
os.makedirs(path)
except OSError:
pass
# Encrypt it?
encrypt = yesno(
"Do you want to encrypt your journal? You can always change this later",
default=False,
)
if encrypt:
default_config["encrypt"] = True
print("Journal will be encrypted.", file=sys.stderr)
save_config(default_config)
return default_config
|
def install():
_initialize_autocomplete()
# Where to create the journal?
path_query = f"Path to your journal file (leave blank for {JOURNAL_FILE_PATH}): "
journal_path = os.path.abspath(input(path_query).strip() or JOURNAL_FILE_PATH)
default_config["journals"][DEFAULT_JOURNAL_KEY] = os.path.expanduser(
os.path.expandvars(journal_path)
)
# If the folder doesn't exist, create it
path = os.path.split(default_config["journals"][DEFAULT_JOURNAL_KEY])[0]
try:
os.makedirs(path)
except OSError:
pass
# Encrypt it?
encrypt = yesno(
"Do you want to encrypt your journal? You can always change this later",
default=False,
)
if encrypt:
default_config["encrypt"] = True
print("Journal will be encrypted.", file=sys.stderr)
save_config(default_config)
return default_config
|
https://github.com/jrnl-org/jrnl/issues/659
|
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 6, in <module>
from jrnl.cli import run
File "/usr/local/lib/python3.7/site-packages/jrnl/cli.py", line 14, in <module>
from . import install
File "/usr/local/lib/python3.7/site-packages/jrnl/install.py", line 25, in <module>
CONFIG_PATH = xdg.BaseDirectory.save_config_path(XDG_RESOURCE) or USER_HOME
File "/usr/local/lib/python3.7/site-packages/xdg/BaseDirectory.py", line 58, in save_config_path
os.makedirs(path, 0o700)
File "/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
FileExistsError: [Errno 17] File exists: '/Users/jonathanwren/.config/jrnl'
|
FileExistsError
|
def _edit_search_results(config, journal, old_entries, **kwargs):
"""
1. Send the given journal entries to the user-configured editor
2. Print out stats on any modifications to journal
3. Write modifications to journal
"""
if not config["editor"]:
print(
f"""
[{ERROR_COLOR}ERROR{RESET_COLOR}: There is no editor configured.]
Please specify an editor in config file ({get_config_path()})
to use the --edit option.
""",
file=sys.stderr,
)
sys.exit(1)
# separate entries we are not editing
other_entries = [e for e in old_entries if e not in journal.entries]
# Get stats now for summary later
old_stats = _get_predit_stats(journal)
# Send user to the editor
edited = get_text_from_editor(config, journal.editable_str())
journal.parse_editable_str(edited)
# Print summary if available
_print_edited_summary(journal, old_stats)
# Put back entries we separated earlier, sort, and write the journal
journal.entries += other_entries
journal.sort()
journal.write()
|
def _edit_search_results(config, journal, old_entries, **kwargs):
"""
1. Send the given journal entries to the user-configured editor
2. Print out stats on any modifications to journal
3. Write modifications to journal
"""
if not config["editor"]:
print(
f"""
[{ERROR_COLOR}ERROR{RESET_COLOR}: There is no editor configured.]
Please specify an editor in config file ({install.CONFIG_FILE_PATH})
to use the --edit option.
""",
file=sys.stderr,
)
sys.exit(1)
# separate entries we are not editing
other_entries = [e for e in old_entries if e not in journal.entries]
# Get stats now for summary later
old_stats = _get_predit_stats(journal)
# Send user to the editor
edited = get_text_from_editor(config, journal.editable_str())
journal.parse_editable_str(edited)
# Print summary if available
_print_edited_summary(journal, old_stats)
# Put back entries we separated earlier, sort, and write the journal
journal.entries += other_entries
journal.sort()
journal.write()
|
https://github.com/jrnl-org/jrnl/issues/659
|
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 6, in <module>
from jrnl.cli import run
File "/usr/local/lib/python3.7/site-packages/jrnl/cli.py", line 14, in <module>
from . import install
File "/usr/local/lib/python3.7/site-packages/jrnl/install.py", line 25, in <module>
CONFIG_PATH = xdg.BaseDirectory.save_config_path(XDG_RESOURCE) or USER_HOME
File "/usr/local/lib/python3.7/site-packages/xdg/BaseDirectory.py", line 58, in save_config_path
os.makedirs(path, 0o700)
File "/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
FileExistsError: [Errno 17] File exists: '/Users/jonathanwren/.config/jrnl'
|
FileExistsError
|
def list_journals(configuration):
from . import config
"""List the journals specified in the configuration file"""
result = f"Journals defined in {config.get_config_path()}\n"
ml = min(max(len(k) for k in configuration["journals"]), 20)
for journal, cfg in configuration["journals"].items():
result += " * {:{}} -> {}\n".format(
journal, ml, cfg["journal"] if isinstance(cfg, dict) else cfg
)
return result
|
def list_journals(config):
from . import install
"""List the journals specified in the configuration file"""
result = f"Journals defined in {install.CONFIG_FILE_PATH}\n"
ml = min(max(len(k) for k in config["journals"]), 20)
for journal, cfg in config["journals"].items():
result += " * {:{}} -> {}\n".format(
journal, ml, cfg["journal"] if isinstance(cfg, dict) else cfg
)
return result
|
https://github.com/jrnl-org/jrnl/issues/659
|
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 6, in <module>
from jrnl.cli import run
File "/usr/local/lib/python3.7/site-packages/jrnl/cli.py", line 14, in <module>
from . import install
File "/usr/local/lib/python3.7/site-packages/jrnl/install.py", line 25, in <module>
CONFIG_PATH = xdg.BaseDirectory.save_config_path(XDG_RESOURCE) or USER_HOME
File "/usr/local/lib/python3.7/site-packages/xdg/BaseDirectory.py", line 58, in save_config_path
os.makedirs(path, 0o700)
File "/usr/local/Cellar/python/3.7.4_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
FileExistsError: [Errno 17] File exists: '/Users/jonathanwren/.config/jrnl'
|
FileExistsError
|
def set_password(self, servicename, username, password):
raise keyring.errors.KeyringError
|
def set_password(self, servicename, username, password):
self.keys[servicename][username] = password
|
https://github.com/jrnl-org/jrnl/issues/1020
|
$ jrnl -from yesterday
WARNING: Python versions below 3.7 will no longer be supported as of jrnl v2.5
(the next release). You are currently on Python 3.6.9. Please update to
Python 3.7 (or higher) soon.
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 11, in <module>
sys.exit(run())
File "/usr/local/lib/python3.6/dist-packages/jrnl/cli.py", line 387, in run
journal = open_journal(journal_name, config)
File "/usr/local/lib/python3.6/dist-packages/jrnl/Journal.py", line 405, in open_journal
return EncryptedJournal.EncryptedJournal(name, **config).open()
File "/usr/local/lib/python3.6/dist-packages/jrnl/EncryptedJournal.py", line 55, in open
text = self._load(filename)
File "/usr/local/lib/python3.6/dist-packages/jrnl/EncryptedJournal.py", line 82, in _load
return util.decrypt_content(keychain=self.name, decrypt_func=decrypt_journal)
File "/usr/local/lib/python3.6/dist-packages/jrnl/util.py", line 70, in decrypt_content
pwd_from_keychain = keychain and get_keychain(keychain)
File "/usr/local/lib/python3.6/dist-packages/jrnl/util.py", line 93, in get_keychain
return keyring.get_password("jrnl", journal_name)
File "/usr/local/lib/python3.6/dist-packages/keyring/core.py", line 56, in get_password
return _keyring_backend.get_password(service_name, username)
File "/usr/local/lib/python3.6/dist-packages/keyring/backends/chainer.py", line 51, in get_password
password = keyring.get_password(service, username)
File "/usr/local/lib/python3.6/dist-packages/keyring/backends/SecretService.py", line 75, in get_password
collection = self.get_preferred_collection()
File "/usr/local/lib/python3.6/dist-packages/keyring/backends/SecretService.py", line 63, in get_preferred_collection
raise KeyringLocked("Failed to unlock the collection!")
keyring.errors.KeyringLocked: Failed to unlock the collection!
|
keyring.errors.KeyringLocked
|
def get_password(self, servicename, username):
raise keyring.errors.KeyringError
|
def get_password(self, servicename, username):
raise keyring.errors.NoKeyringError
|
https://github.com/jrnl-org/jrnl/issues/1020
|
$ jrnl -from yesterday
WARNING: Python versions below 3.7 will no longer be supported as of jrnl v2.5
(the next release). You are currently on Python 3.6.9. Please update to
Python 3.7 (or higher) soon.
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 11, in <module>
sys.exit(run())
File "/usr/local/lib/python3.6/dist-packages/jrnl/cli.py", line 387, in run
journal = open_journal(journal_name, config)
File "/usr/local/lib/python3.6/dist-packages/jrnl/Journal.py", line 405, in open_journal
return EncryptedJournal.EncryptedJournal(name, **config).open()
File "/usr/local/lib/python3.6/dist-packages/jrnl/EncryptedJournal.py", line 55, in open
text = self._load(filename)
File "/usr/local/lib/python3.6/dist-packages/jrnl/EncryptedJournal.py", line 82, in _load
return util.decrypt_content(keychain=self.name, decrypt_func=decrypt_journal)
File "/usr/local/lib/python3.6/dist-packages/jrnl/util.py", line 70, in decrypt_content
pwd_from_keychain = keychain and get_keychain(keychain)
File "/usr/local/lib/python3.6/dist-packages/jrnl/util.py", line 93, in get_keychain
return keyring.get_password("jrnl", journal_name)
File "/usr/local/lib/python3.6/dist-packages/keyring/core.py", line 56, in get_password
return _keyring_backend.get_password(service_name, username)
File "/usr/local/lib/python3.6/dist-packages/keyring/backends/chainer.py", line 51, in get_password
password = keyring.get_password(service, username)
File "/usr/local/lib/python3.6/dist-packages/keyring/backends/SecretService.py", line 75, in get_password
collection = self.get_preferred_collection()
File "/usr/local/lib/python3.6/dist-packages/keyring/backends/SecretService.py", line 63, in get_preferred_collection
raise KeyringLocked("Failed to unlock the collection!")
keyring.errors.KeyringLocked: Failed to unlock the collection!
|
keyring.errors.KeyringLocked
|
def delete_password(self, servicename, username):
raise keyring.errors.KeyringError
|
def delete_password(self, servicename, username):
self.keys[servicename][username] = None
|
https://github.com/jrnl-org/jrnl/issues/1020
|
$ jrnl -from yesterday
WARNING: Python versions below 3.7 will no longer be supported as of jrnl v2.5
(the next release). You are currently on Python 3.6.9. Please update to
Python 3.7 (or higher) soon.
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 11, in <module>
sys.exit(run())
File "/usr/local/lib/python3.6/dist-packages/jrnl/cli.py", line 387, in run
journal = open_journal(journal_name, config)
File "/usr/local/lib/python3.6/dist-packages/jrnl/Journal.py", line 405, in open_journal
return EncryptedJournal.EncryptedJournal(name, **config).open()
File "/usr/local/lib/python3.6/dist-packages/jrnl/EncryptedJournal.py", line 55, in open
text = self._load(filename)
File "/usr/local/lib/python3.6/dist-packages/jrnl/EncryptedJournal.py", line 82, in _load
return util.decrypt_content(keychain=self.name, decrypt_func=decrypt_journal)
File "/usr/local/lib/python3.6/dist-packages/jrnl/util.py", line 70, in decrypt_content
pwd_from_keychain = keychain and get_keychain(keychain)
File "/usr/local/lib/python3.6/dist-packages/jrnl/util.py", line 93, in get_keychain
return keyring.get_password("jrnl", journal_name)
File "/usr/local/lib/python3.6/dist-packages/keyring/core.py", line 56, in get_password
return _keyring_backend.get_password(service_name, username)
File "/usr/local/lib/python3.6/dist-packages/keyring/backends/chainer.py", line 51, in get_password
password = keyring.get_password(service, username)
File "/usr/local/lib/python3.6/dist-packages/keyring/backends/SecretService.py", line 75, in get_password
collection = self.get_preferred_collection()
File "/usr/local/lib/python3.6/dist-packages/keyring/backends/SecretService.py", line 63, in get_preferred_collection
raise KeyringLocked("Failed to unlock the collection!")
keyring.errors.KeyringLocked: Failed to unlock the collection!
|
keyring.errors.KeyringLocked
|
def set_keyring(context, type=""):
if type == "failed":
keyring.set_keyring(FailedKeyring())
else:
keyring.set_keyring(TestKeyring())
|
def set_keyring(context):
keyring.set_keyring(TestKeyring())
|
https://github.com/jrnl-org/jrnl/issues/1020
|
$ jrnl -from yesterday
WARNING: Python versions below 3.7 will no longer be supported as of jrnl v2.5
(the next release). You are currently on Python 3.6.9. Please update to
Python 3.7 (or higher) soon.
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 11, in <module>
sys.exit(run())
File "/usr/local/lib/python3.6/dist-packages/jrnl/cli.py", line 387, in run
journal = open_journal(journal_name, config)
File "/usr/local/lib/python3.6/dist-packages/jrnl/Journal.py", line 405, in open_journal
return EncryptedJournal.EncryptedJournal(name, **config).open()
File "/usr/local/lib/python3.6/dist-packages/jrnl/EncryptedJournal.py", line 55, in open
text = self._load(filename)
File "/usr/local/lib/python3.6/dist-packages/jrnl/EncryptedJournal.py", line 82, in _load
return util.decrypt_content(keychain=self.name, decrypt_func=decrypt_journal)
File "/usr/local/lib/python3.6/dist-packages/jrnl/util.py", line 70, in decrypt_content
pwd_from_keychain = keychain and get_keychain(keychain)
File "/usr/local/lib/python3.6/dist-packages/jrnl/util.py", line 93, in get_keychain
return keyring.get_password("jrnl", journal_name)
File "/usr/local/lib/python3.6/dist-packages/keyring/core.py", line 56, in get_password
return _keyring_backend.get_password(service_name, username)
File "/usr/local/lib/python3.6/dist-packages/keyring/backends/chainer.py", line 51, in get_password
password = keyring.get_password(service, username)
File "/usr/local/lib/python3.6/dist-packages/keyring/backends/SecretService.py", line 75, in get_password
collection = self.get_preferred_collection()
File "/usr/local/lib/python3.6/dist-packages/keyring/backends/SecretService.py", line 63, in get_preferred_collection
raise KeyringLocked("Failed to unlock the collection!")
keyring.errors.KeyringLocked: Failed to unlock the collection!
|
keyring.errors.KeyringLocked
|
def get_keychain(journal_name):
import keyring
try:
return keyring.get_password("jrnl", journal_name)
except keyring.errors.KeyringError as e:
if not isinstance(e, keyring.errors.NoKeyringError):
print("Failed to retrieve keyring", file=sys.stderr)
return ""
|
def get_keychain(journal_name):
import keyring
try:
return keyring.get_password("jrnl", journal_name)
except RuntimeError:
return ""
|
https://github.com/jrnl-org/jrnl/issues/1020
|
$ jrnl -from yesterday
WARNING: Python versions below 3.7 will no longer be supported as of jrnl v2.5
(the next release). You are currently on Python 3.6.9. Please update to
Python 3.7 (or higher) soon.
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 11, in <module>
sys.exit(run())
File "/usr/local/lib/python3.6/dist-packages/jrnl/cli.py", line 387, in run
journal = open_journal(journal_name, config)
File "/usr/local/lib/python3.6/dist-packages/jrnl/Journal.py", line 405, in open_journal
return EncryptedJournal.EncryptedJournal(name, **config).open()
File "/usr/local/lib/python3.6/dist-packages/jrnl/EncryptedJournal.py", line 55, in open
text = self._load(filename)
File "/usr/local/lib/python3.6/dist-packages/jrnl/EncryptedJournal.py", line 82, in _load
return util.decrypt_content(keychain=self.name, decrypt_func=decrypt_journal)
File "/usr/local/lib/python3.6/dist-packages/jrnl/util.py", line 70, in decrypt_content
pwd_from_keychain = keychain and get_keychain(keychain)
File "/usr/local/lib/python3.6/dist-packages/jrnl/util.py", line 93, in get_keychain
return keyring.get_password("jrnl", journal_name)
File "/usr/local/lib/python3.6/dist-packages/keyring/core.py", line 56, in get_password
return _keyring_backend.get_password(service_name, username)
File "/usr/local/lib/python3.6/dist-packages/keyring/backends/chainer.py", line 51, in get_password
password = keyring.get_password(service, username)
File "/usr/local/lib/python3.6/dist-packages/keyring/backends/SecretService.py", line 75, in get_password
collection = self.get_preferred_collection()
File "/usr/local/lib/python3.6/dist-packages/keyring/backends/SecretService.py", line 63, in get_preferred_collection
raise KeyringLocked("Failed to unlock the collection!")
keyring.errors.KeyringLocked: Failed to unlock the collection!
|
keyring.errors.KeyringLocked
|
def set_keychain(journal_name, password):
import keyring
if password is None:
try:
keyring.delete_password("jrnl", journal_name)
except keyring.errors.KeyringError:
pass
else:
try:
keyring.set_password("jrnl", journal_name, password)
except keyring.errors.KeyringError as e:
if isinstance(e, keyring.errors.NoKeyringError):
print(
"Keyring backend not found. Please install one of the supported backends by visiting: https://pypi.org/project/keyring/",
file=sys.stderr,
)
else:
print("Failed to retrieve keyring", file=sys.stderr)
|
def set_keychain(journal_name, password):
import keyring
if password is None:
try:
keyring.delete_password("jrnl", journal_name)
except keyring.errors.PasswordDeleteError:
pass
else:
try:
keyring.set_password("jrnl", journal_name, password)
except keyring.errors.NoKeyringError:
print(
"Keyring backend not found. Please install one of the supported backends by visiting: https://pypi.org/project/keyring/",
file=sys.stderr,
)
|
https://github.com/jrnl-org/jrnl/issues/1020
|
$ jrnl -from yesterday
WARNING: Python versions below 3.7 will no longer be supported as of jrnl v2.5
(the next release). You are currently on Python 3.6.9. Please update to
Python 3.7 (or higher) soon.
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 11, in <module>
sys.exit(run())
File "/usr/local/lib/python3.6/dist-packages/jrnl/cli.py", line 387, in run
journal = open_journal(journal_name, config)
File "/usr/local/lib/python3.6/dist-packages/jrnl/Journal.py", line 405, in open_journal
return EncryptedJournal.EncryptedJournal(name, **config).open()
File "/usr/local/lib/python3.6/dist-packages/jrnl/EncryptedJournal.py", line 55, in open
text = self._load(filename)
File "/usr/local/lib/python3.6/dist-packages/jrnl/EncryptedJournal.py", line 82, in _load
return util.decrypt_content(keychain=self.name, decrypt_func=decrypt_journal)
File "/usr/local/lib/python3.6/dist-packages/jrnl/util.py", line 70, in decrypt_content
pwd_from_keychain = keychain and get_keychain(keychain)
File "/usr/local/lib/python3.6/dist-packages/jrnl/util.py", line 93, in get_keychain
return keyring.get_password("jrnl", journal_name)
File "/usr/local/lib/python3.6/dist-packages/keyring/core.py", line 56, in get_password
return _keyring_backend.get_password(service_name, username)
File "/usr/local/lib/python3.6/dist-packages/keyring/backends/chainer.py", line 51, in get_password
password = keyring.get_password(service, username)
File "/usr/local/lib/python3.6/dist-packages/keyring/backends/SecretService.py", line 75, in get_password
collection = self.get_preferred_collection()
File "/usr/local/lib/python3.6/dist-packages/keyring/backends/SecretService.py", line 63, in get_preferred_collection
raise KeyringLocked("Failed to unlock the collection!")
keyring.errors.KeyringLocked: Failed to unlock the collection!
|
keyring.errors.KeyringLocked
|
def make_filename(cls, entry):
return entry.date.strftime("%Y-%m-%d") + "_{}.{}".format(
cls._slugify(str(entry.title)), cls.extension
)
|
def make_filename(cls, entry):
return entry.date.strftime(
"%Y-%m-%d_{}.{}".format(cls._slugify(str(entry.title)), cls.extension)
)
|
https://github.com/jrnl-org/jrnl/issues/1089
|
Traceback (most recent call last):
File "c:\python37\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "c:\python37\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\Users\micah\.local\bin\jrnl.exe\__main__.py", line 7, in <module>
File "c:\users\micah\.local\pipx\venvs\jrnl\lib\site-packages\jrnl\cli.py", line 47, in cli
return run(args)
File "c:\users\micah\.local\pipx\venvs\jrnl\lib\site-packages\jrnl\jrnl.py", line 61, in run
search_mode(**kwargs)
File "c:\users\micah\.local\pipx\venvs\jrnl\lib\site-packages\jrnl\jrnl.py", line 157, in search_mode
_display_search_results(**kwargs)
File "c:\users\micah\.local\pipx\venvs\jrnl\lib\site-packages\jrnl\jrnl.py", line 302, in _display_search_results
print(exporter.export(journal, args.filename))
File "c:\users\micah\.local\pipx\venvs\jrnl\lib\site-packages\jrnl\plugins\text_exporter.py", line 73, in export
return cls.write_files(journal, output)
File "c:\users\micah\.local\pipx\venvs\jrnl\lib\site-packages\jrnl\plugins\text_exporter.py", line 49, in write_files
full_path = os.path.join(path, cls.make_filename(entry))
File "c:\users\micah\.local\pipx\venvs\jrnl\lib\site-packages\jrnl\plugins\text_exporter.py", line 41, in make_filename
"%Y-%m-%d_{}.{}".format(cls._slugify(str(entry.title)), cls.extension)
UnicodeEncodeError: 'locale' codec can't encode character '\u043f' in position 9: encoding error
|
UnicodeEncodeError
|
def _parse_text(self):
raw_text = self.text
lines = raw_text.splitlines()
if lines and lines[0].strip().endswith("*"):
self.starred = True
raw_text = lines[0].strip("\n *") + "\n" + "\n".join(lines[1:])
self._title, self._body = split_title(raw_text)
if self._tags is None:
self._tags = list(self._parse_tags())
|
def _parse_text(self):
raw_text = self.text
lines = raw_text.splitlines()
if lines[0].strip().endswith("*"):
self.starred = True
raw_text = lines[0].strip("\n *") + "\n" + "\n".join(lines[1:])
self._title, self._body = split_title(raw_text)
if self._tags is None:
self._tags = list(self._parse_tags())
|
https://github.com/jrnl-org/jrnl/issues/780
|
fbreunig@nighttrain:~ $ jrnl --short
Traceback (most recent call last):
File "/usr/local/bin/jrnl", line 8, in <module>
sys.exit(run())
File "/usr/local/lib/python3.7/site-packages/jrnl/cli.py", line 248, in run
print(journal.pprint(short=True))
File "/usr/local/lib/python3.7/site-packages/jrnl/Journal.py", line 148, in pprint
pp = sep.join([e.pprint(short=short) for e in self.entries])
File "/usr/local/lib/python3.7/site-packages/jrnl/Journal.py", line 148, in <listcomp>
pp = sep.join([e.pprint(short=short) for e in self.entries])
File "/usr/local/lib/python3.7/site-packages/jrnl/Entry.py", line 91, in pprint
title = date_str + " " + self.title.rstrip("\n ")
File "/usr/local/lib/python3.7/site-packages/jrnl/Entry.py", line 35, in title
self._parse_text()
File "/usr/local/lib/python3.7/site-packages/jrnl/Entry.py", line 25, in _parse_text
if lines[0].strip().endswith("*"):
IndexError: list index out of range
|
IndexError
|
def __exporter_from_file(template_file):
"""Create a template class from a file"""
name = os.path.basename(template_file).replace(".template", "")
template = Template.from_file(template_file)
return type(
str("{}Exporter".format(name.title())),
(GenericTemplateExporter,),
{"names": [name], "extension": template.extension, "template": template},
)
|
def __exporter_from_file(template_file):
"""Create a template class from a file"""
name = os.path.basename(template_file).replace(".template", "")
template = Template.from_file(template_file)
return type(
"{}Exporter".format(name.title()),
(GenericTemplateExporter,),
{"names": [name], "extension": template.extension, "template": template},
)
|
https://github.com/jrnl-org/jrnl/issues/456
|
Traceback (most recent call last):
File "C:\Util\Python27\lib\runpy.py", line 162, in _run_module_as_main
"__main__", fname, loader, pkg_name)
File "C:\Util\Python27\lib\runpy.py", line 72, in _run_code
exec code in run_globals
File "d:\Projects\jrnl-nb\jrnl\__main__.py", line 4, in <module>
from . import cli
File "jrnl\cli.py", line 15, in <module>
from . import plugins
File "jrnl\plugins\__init__.py", line 13, in <module>
from .template_exporter import __all__ as template_exporters
File "jrnl\plugins\template_exporter.py", line 49, in <module>
__all__.append(__exporter_from_file(template_file))
File "jrnl\plugins\template_exporter.py", line 42, in __exporter_from_file
"template": template
TypeError: type() argument 1 must be string, not unicode
|
TypeError
|
def getpass(prompt="Password: "):
if not TEST:
return gp.getpass(bytes(prompt))
else:
return py23_input(prompt)
|
def getpass(prompt="Password: "):
if not TEST:
return gp.getpass(prompt)
else:
return py23_input(prompt)
|
https://github.com/jrnl-org/jrnl/issues/392
|
Traceback (most recent call last):
File "c:\python27\lib\runpy.py", line 162, in _run_module_as_main
"__main__", fname, loader, pkg_name)
File "c:\python27\lib\runpy.py", line 72, in _run_code
exec code in run_globals
File "C:\Python27\Scripts\jrnl.exe\__main__.py", line 9, in <module>
File "c:\python27\lib\site-packages\jrnl\cli.py", line 257, in run
encrypt(journal, filename=args.encrypt)
File "c:\python27\lib\site-packages\jrnl\cli.py", line 76, in encrypt
password = util.getpass("Enter new password: ")
File "c:\python27\lib\site-packages\jrnl\util.py", line 31, in getpass
return gp.getpass(prompt)
File "c:\python27\lib\getpass.py", line 95, in win_getpass
msvcrt.putch(c)
TypeError: must be char, not unicode
|
TypeError
|
def export(journal, format, output=None):
"""Exports the journal to various formats.
format should be one of json, txt, text, md, markdown.
If output is None, returns a unicode representation of the output.
If output is a directory, exports entries into individual files.
Otherwise, exports to the given output file.
"""
maps = {
"json": to_json,
"txt": to_txt,
"text": to_txt,
"md": to_md,
"markdown": to_md,
}
if format not in maps:
return "[ERROR: can't export to {0}. Valid options are 'md', 'txt', and 'json']".format(
format
)
if output and os.path.isdir(output): # multiple files
return write_files(journal, output, format)
else:
content = maps[format](journal)
if output:
try:
with codecs.open(output, "w", "utf-8") as f:
f.write(content)
return "[Journal exported to {0}]".format(output)
except IOError as e:
return "[ERROR: {0} {1}]".format(e.filename, e.strerror)
else:
return content
|
def export(journal, format, output=None):
"""Exports the journal to various formats.
format should be one of json, txt, text, md, markdown.
If output is None, returns a unicode representation of the output.
If output is a directory, exports entries into individual files.
Otherwise, exports to the given output file.
"""
maps = {
"json": to_json,
"txt": to_txt,
"text": to_txt,
"md": to_md,
"markdown": to_md,
}
if output and os.path.isdir(output): # multiple files
return write_files(journal, output, format)
else:
content = maps[format](journal)
if output:
try:
with codecs.open(output, "w", "utf-8") as f:
f.write(content)
return "[Journal exported to {0}]".format(output)
except IOError as e:
return "[ERROR: {0} {1}]".format(e.filename, e.strerror)
else:
return content
|
https://github.com/jrnl-org/jrnl/issues/201
|
$ ls output
ls: cannot access output: No such file or directory
$ jrnl --export text -o output
[Journal exported to output]
$ rm output
$ mkdir output
$ jrnl --export text -o output
Traceback (most recent call last):
File "/home/michael/bin/jrnl", line 9, in <module>
load_entry_point('jrnl==1.8.1', 'console_scripts', 'jrnl')()
File "/home/michael/.local/lib/python2.7/site-packages/jrnl/cli.py", line 221, in run
print(util.py2encode(exporters.export(journal, args.export, args.output)))
File "/home/michael/.local/lib/python2.7/site-packages/jrnl/exporters.py", line 80, in export
return write_files(journal, output, format)
File "/home/michael/.local/lib/python2.7/site-packages/jrnl/exporters.py", line 106, in write_files
f.write(content)
UnboundLocalError: local variable 'content' referenced before assignment
|
UnboundLocalError
|
def write_files(journal, path, format):
"""Turns your journal into separate files for each entry.
Format should be either json, md or txt."""
make_filename = lambda entry: e.date.strftime(
"%C-%m-%d_{0}.{1}".format(slugify(u(e.title)), format)
)
for e in journal.entries:
full_path = os.path.join(path, make_filename(e))
if format == "json":
content = json.dumps(e.to_dict(), indent=2) + "\n"
elif format in ("md", "markdown"):
content = e.to_md()
elif format in ("txt", "text"):
content = u(e)
with codecs.open(full_path, "w", "utf-8") as f:
f.write(content)
return "[Journal exported individual files in {0}]".format(path)
|
def write_files(journal, path, format):
"""Turns your journal into separate files for each entry.
Format should be either json, md or txt."""
make_filename = lambda entry: e.date.strftime(
"%C-%m-%d_{0}.{1}".format(slugify(u(e.title)), format)
)
for e in journal.entries:
full_path = os.path.join(path, make_filename(e))
if format == "json":
content = json.dumps(e.to_dict(), indent=2) + "\n"
elif format == "md":
content = e.to_md()
elif format == "txt":
content = u(e)
with codecs.open(full_path, "w", "utf-8") as f:
f.write(content)
return "[Journal exported individual files in {0}]".format(path)
|
https://github.com/jrnl-org/jrnl/issues/201
|
$ ls output
ls: cannot access output: No such file or directory
$ jrnl --export text -o output
[Journal exported to output]
$ rm output
$ mkdir output
$ jrnl --export text -o output
Traceback (most recent call last):
File "/home/michael/bin/jrnl", line 9, in <module>
load_entry_point('jrnl==1.8.1', 'console_scripts', 'jrnl')()
File "/home/michael/.local/lib/python2.7/site-packages/jrnl/cli.py", line 221, in run
print(util.py2encode(exporters.export(journal, args.export, args.output)))
File "/home/michael/.local/lib/python2.7/site-packages/jrnl/exporters.py", line 80, in export
return write_files(journal, output, format)
File "/home/michael/.local/lib/python2.7/site-packages/jrnl/exporters.py", line 106, in write_files
f.write(content)
UnboundLocalError: local variable 'content' referenced before assignment
|
UnboundLocalError
|
def get_local_timezone():
"""Returns the Olson identifier of the local timezone.
In a happy world, tzlocal.get_localzone would do this, but there's a bug on OS X
that prevents that right now: https://github.com/regebro/tzlocal/issues/6"""
global __cached_tz
if not __cached_tz and "darwin" in sys.platform:
__cached_tz = (
os.popen("systemsetup -gettimezone")
.read()
.replace("Time Zone: ", "")
.strip()
)
if not __cached_tz or __cached_tz not in pytz.all_timezones_set:
link = os.readlink("/etc/localtime")
# This is something like /usr/share/zoneinfo/America/Los_Angeles.
# Find second / from right and take the substring
__cached_tz = link[link.rfind("/", 0, link.rfind("/")) + 1 :]
if not __cached_tz or __cached_tz not in pytz.all_timezones_set:
__cached_tz = str(get_localzone())
if not __cached_tz or __cached_tz not in pytz.all_timezones_set:
__cached_tz = "UTC"
return __cached_tz
|
def get_local_timezone():
"""Returns the Olson identifier of the local timezone.
In a happy world, tzlocal.get_localzone would do this, but there's a bug on OS X
that prevents that right now: https://github.com/regebro/tzlocal/issues/6"""
global __cached_tz
if not __cached_tz and "darwin" in sys.platform:
__cached_tz = (
os.popen("systemsetup -gettimezone")
.read()
.replace("Time Zone: ", "")
.strip()
)
elif not __cached_tz:
__cached_tz = str(get_localzone())
return __cached_tz
|
https://github.com/jrnl-org/jrnl/issues/93
|
dio:~ milo$ jrnl new
Traceback (most recent call last):
File "/usr/local/share/python/jrnl", line 9, in <module>
load_entry_point('jrnl==1.5.5', 'console_scripts', 'jrnl')()
File "/usr/local/lib/python2.7/site-packages/jrnl/jrnl.py", line 156, in cli
journal = Journal.DayOne(**config)
File "/usr/local/lib/python2.7/site-packages/jrnl/Journal.py", line 303, in __init__
super(DayOne, self).__init__(**kwargs)
File "/usr/local/lib/python2.7/site-packages/jrnl/Journal.py", line 60, in __init__
self.entries = self.parse(journal_txt)
File "/usr/local/lib/python2.7/site-packages/jrnl/Journal.py", line 319, in parse
timezone = pytz.timezone(util.get_local_timezone())
File "/usr/local/lib/python2.7/site-packages/pytz/__init__.py", line 185, in timezone
raise UnknownTimeZoneError(zone)
pytz.exceptions.UnknownTimeZoneError: 'You need administrator access to run this tool... exiting!'
dio:~ milo$ id
uid=502(milo) gid=20(staff) groups=20(staff),402(com.apple.sharepoint.group.1),12(everyone),61(localaccounts)
dio:~ milo$ ls -ld "/Users/milo/Library/Mobile Documents/5U8NS4GX82~com~dayoneapp~dayone/Documents/Journal_dayone"
drwxr-xr-x 5 milo staff 170 3 Sep 16:15 /Users/milo/Library/Mobile Documents/5U8NS4GX82~com~dayoneapp~dayone/Documents/Journal_dayone
dio:~ milo$ cat .jrnl_config
{
"default_hour": 9,
"timeformat": "%Y-%m-%d %H:%M",
"linewrap": 80,
"encrypt": false,
"editor": "",
"default_minute": 0,
"highlight": true,
"password": "",
"journals": {
"default": "/Users/milo/Library/Mobile Documents/5U8NS4GX82~com~dayoneapp~dayone/Documents/Journal_dayone"
},
"tagsymbols": "@"
}
|
pytz.exceptions.UnknownTimeZoneError
|
def _decrypt(self, cipher):
"""Decrypts a cipher string using self.key as the key and the first 16 byte of the cipher as the IV"""
if not cipher:
return ""
crypto = AES.new(self.key, AES.MODE_CBC, cipher[:16])
try:
plain = crypto.decrypt(cipher[16:])
except ValueError:
print(
"ERROR: Your journal file seems to be corrupted. You do have a backup, don't you?"
)
sys.exit(-1)
if plain[-1] != " ": # Journals are always padded
return None
else:
return plain
|
def _decrypt(self, cipher):
"""Decrypts a cipher string using self.key as the key and the first 16 byte of the cipher as the IV"""
if not cipher:
return ""
crypto = AES.new(self.key, AES.MODE_CBC, cipher[:16])
plain = crypto.decrypt(cipher[16:])
if plain[-1] != " ": # Journals are always padded
return None
else:
return plain
|
https://github.com/jrnl-org/jrnl/issues/22
|
Traceback (most recent call last):
(...)
File "/Users/maebert/code/jrnl/jrnl.py", line 118, in _decrypt
plain = crypto.decrypt(cipher[16:])
ValueError: Input strings must be a multiple of 16 in length
|
ValueError
|
def alwaysIndentBody(self, event=None):
"""
The always-indent-region command indents each line of the selected body
text. The @tabwidth directive in effect determines amount of
indentation.
"""
c, p, u, w = self, self.p, self.undoer, self.frame.body.wrapper
#
# #1801: Don't rely on bindings to ensure that we are editing the body.
event_w = event and event.w
if event_w != w:
c.insertCharFromEvent(event)
return
#
# "Before" snapshot.
bunch = u.beforeChangeBody(p)
#
# Initial data.
sel_1, sel_2 = w.getSelectionRange()
tab_width = c.getTabWidth(p)
head, lines, tail, oldSel, oldYview = self.getBodyLines()
#
# Calculate the result.
changed, result = False, []
for line in lines:
i, width = g.skip_leading_ws_with_indent(line, 0, tab_width)
s = g.computeLeadingWhitespace(width + abs(tab_width), tab_width) + line[i:]
if s != line:
changed = True
result.append(s)
if not changed:
return
#
# Set p.b and w's text first.
middle = "".join(result)
all = head + middle + tail
p.b = all # Sets dirty and changed bits.
w.setAllText(all)
#
# Calculate the proper selection range (i, j, ins).
if sel_1 == sel_2:
line = result[0]
i, width = g.skip_leading_ws_with_indent(line, 0, tab_width)
i = j = len(head) + i
else:
i = len(head)
j = len(head) + len(middle)
if middle.endswith("\n"): # #1742.
j -= 1
#
# Set the selection range and scroll position.
w.setSelectionRange(i, j, insert=j)
w.setYScrollPosition(oldYview)
#
# "after" snapshot.
u.afterChangeBody(p, "Indent Region", bunch)
|
def alwaysIndentBody(self, event=None):
"""
The always-indent-region command indents each line of the selected body
text. The @tabwidth directive in effect determines amount of
indentation.
"""
c, p, u, w = self, self.p, self.undoer, self.frame.body.wrapper
#
# "Before" snapshot.
bunch = u.beforeChangeBody(p)
#
# Initial data.
sel_1, sel_2 = w.getSelectionRange()
tab_width = c.getTabWidth(p)
head, lines, tail, oldSel, oldYview = self.getBodyLines()
#
# Calculate the result.
changed, result = False, []
for line in lines:
i, width = g.skip_leading_ws_with_indent(line, 0, tab_width)
s = g.computeLeadingWhitespace(width + abs(tab_width), tab_width) + line[i:]
if s != line:
changed = True
result.append(s)
if not changed:
return
#
# Set p.b and w's text first.
middle = "".join(result)
all = head + middle + tail
p.b = all # Sets dirty and changed bits.
w.setAllText(all)
#
# Calculate the proper selection range (i, j, ins).
if sel_1 == sel_2:
line = result[0]
i, width = g.skip_leading_ws_with_indent(line, 0, tab_width)
i = j = len(head) + i
else:
i = len(head)
j = len(head) + len(middle)
if middle.endswith("\n"): # #1742.
j -= 1
#
# Set the selection range and scroll position.
w.setSelectionRange(i, j, insert=j)
w.setYScrollPosition(oldYview)
#
# "after" snapshot.
u.afterChangeBody(p, "Indent Region", bunch)
|
https://github.com/leo-editor/leo-editor/issues/1801
|
Traceback (most recent call last):
File "...leo\core\leoCommands.py", line 2252, in doCommand
return_value = command_func(event)
File "...leo\core\leoGlobals.py", line 244, in commander_command_wrapper
method(event=event)
File "...\leo\commands\commanderEditCommands.py", line 714, in indentBody
c.editCommands.selfInsertCommand(event) # Handles undo.
File "...leo\commands\editCommands.py", line 1678, in selfInsertCommand
oldYview = w.getYScrollPosition()
AttributeError: 'QHeadlineWrapper' object has no attribute 'getYScrollPosition'
|
AttributeError
|
def indentBody(self, event=None):
"""
The indent-region command indents each line of the selected body text.
Unlike the always-indent-region command, this command inserts a tab
(soft or hard) when there is no selected text.
The @tabwidth directive in effect determines amount of indentation.
"""
c, event_w, w = self, event and event.w, self.frame.body.wrapper
# #1801: Don't rely on bindings to ensure that we are editing the body.
if event_w != w:
c.insertCharFromEvent(event)
return
# # 1739. Special case for a *plain* tab bound to indent-region.
sel_1, sel_2 = w.getSelectionRange()
if sel_1 == sel_2:
char = getattr(event, "char", None)
stroke = getattr(event, "stroke", None)
if char == "\t" and stroke and stroke.isPlainKey():
c.editCommands.selfInsertCommand(event) # Handles undo.
return
c.alwaysIndentBody(event)
|
def indentBody(self, event=None):
"""
The indent-region command indents each line of the selected body text.
Unlike the always-indent-region command, this command inserts a tab
(soft or hard) when there is no selected text.
The @tabwidth directive in effect determines amount of indentation.
"""
c, w = self, self.frame.body.wrapper
# # 1739. Special case for a *plain* tab bound to indent-region.
sel_1, sel_2 = w.getSelectionRange()
if sel_1 == sel_2:
char = getattr(event, "char", None)
stroke = getattr(event, "stroke", None)
if char == "\t" and stroke and stroke.isPlainKey():
c.editCommands.selfInsertCommand(event) # Handles undo.
return
c.alwaysIndentBody(event)
|
https://github.com/leo-editor/leo-editor/issues/1801
|
Traceback (most recent call last):
File "...leo\core\leoCommands.py", line 2252, in doCommand
return_value = command_func(event)
File "...leo\core\leoGlobals.py", line 244, in commander_command_wrapper
method(event=event)
File "...\leo\commands\commanderEditCommands.py", line 714, in indentBody
c.editCommands.selfInsertCommand(event) # Handles undo.
File "...leo\commands\editCommands.py", line 1678, in selfInsertCommand
oldYview = w.getYScrollPosition()
AttributeError: 'QHeadlineWrapper' object has no attribute 'getYScrollPosition'
|
AttributeError
|
def insertNewLineAndTab(self, event):
"""Insert a newline and tab at the cursor."""
trace = "keys" in g.app.debug
c, k = self.c, self.c.k
p = c.p
w = self.editWidget(event)
if not w:
return
if not g.isTextWrapper(w):
return
name = c.widget_name(w)
if name.startswith("head"):
return
if trace:
g.trace("(newline-and-indent)")
self.beginCommand(w, undoType="insert-newline-and-indent")
oldSel = w.getSelectionRange()
self.insertNewlineHelper(w=w, oldSel=oldSel, undoType=None)
self.updateTab(event, p, w, smartTab=False)
k.setInputState("insert")
k.showStateAndMode()
self.endCommand(changed=True, setLabel=False)
|
def insertNewLineAndTab(self, event):
"""Insert a newline and tab at the cursor."""
trace = "keys" in g.app.debug
c, k = self.c, self.c.k
p = c.p
w = self.editWidget(event)
if not w:
return
if not g.isTextWrapper(w):
return
name = c.widget_name(w)
if name.startswith("head"):
return
if trace:
g.trace("(newline-and-indent)")
self.beginCommand(w, undoType="insert-newline-and-indent")
oldSel = w.getSelectionRange()
self.insertNewlineHelper(w=w, oldSel=oldSel, undoType=None)
self.updateTab(p, w, smartTab=False)
k.setInputState("insert")
k.showStateAndMode()
self.endCommand(changed=True, setLabel=False)
|
https://github.com/leo-editor/leo-editor/issues/1801
|
Traceback (most recent call last):
File "...leo\core\leoCommands.py", line 2252, in doCommand
return_value = command_func(event)
File "...leo\core\leoGlobals.py", line 244, in commander_command_wrapper
method(event=event)
File "...\leo\commands\commanderEditCommands.py", line 714, in indentBody
c.editCommands.selfInsertCommand(event) # Handles undo.
File "...leo\commands\editCommands.py", line 1678, in selfInsertCommand
oldYview = w.getYScrollPosition()
AttributeError: 'QHeadlineWrapper' object has no attribute 'getYScrollPosition'
|
AttributeError
|
def selfInsertCommand(self, event, action="insert"):
"""
Insert a character in the body pane.
This is the default binding for all keys in the body pane.
It handles undo, bodykey events, tabs, back-spaces and bracket matching.
"""
trace = "keys" in g.app.debug
c, p, u, w = self.c, self.c.p, self.c.undoer, self.editWidget(event)
undoType = "Typing"
if not w:
return # pragma: no cover (defensive)
# @+<< set local vars >>
# @+node:ekr.20150514063305.269: *5* << set local vars >> (selfInsertCommand)
stroke = event.stroke if event else None
ch = event.char if event else ""
if ch == "Return":
ch = "\n" # This fixes the MacOS return bug.
if ch == "Tab":
ch = "\t"
name = c.widget_name(w)
oldSel = w.getSelectionRange() if name.startswith("body") else (None, None)
oldText = p.b if name.startswith("body") else ""
oldYview = w.getYScrollPosition()
brackets = self.openBracketsList + self.closeBracketsList
inBrackets = ch and g.checkUnicode(ch) in brackets
# @-<< set local vars >>
if not ch:
return
if trace:
g.trace("ch", repr(ch)) # and ch in '\n\r\t'
assert g.isStrokeOrNone(stroke)
if g.doHook("bodykey1", c=c, p=p, ch=ch, oldSel=oldSel, undoType=undoType):
return
if ch == "\t":
self.updateTab(event, p, w, smartTab=True)
elif ch == "\b":
# This is correct: we only come here if there no bindngs for this key.
self.backwardDeleteCharacter(event)
elif ch in ("\r", "\n"):
ch = "\n"
self.insertNewlineHelper(w, oldSel, undoType)
elif ch in "'\"" and c.config.getBool("smart-quotes"):
self.doSmartQuote(action, ch, oldSel, w)
elif inBrackets and self.autocompleteBrackets:
self.updateAutomatchBracket(p, w, ch, oldSel)
elif ch:
# Null chars must not delete the selection.
self.doPlainChar(action, ch, event, inBrackets, oldSel, stroke, w)
#
# Common processing.
# Set the column for up and down keys.
spot = w.getInsertPoint()
c.editCommands.setMoveCol(w, spot)
#
# Update the text and handle undo.
newText = w.getAllText()
if newText != oldText:
# Call u.doTyping to honor the user's undo granularity.
newSel = w.getSelectionRange()
newInsert = w.getInsertPoint()
newSel = w.getSelectionRange()
newText = w.getAllText() # Converts to unicode.
u.doTyping(
p,
"Typing",
oldText,
newText,
oldSel=oldSel,
oldYview=oldYview,
newInsert=newInsert,
newSel=newSel,
)
g.doHook("bodykey2", c=c, p=p, ch=ch, oldSel=oldSel, undoType=undoType)
|
def selfInsertCommand(self, event, action="insert"):
"""
Insert a character in the body pane.
This is the default binding for all keys in the body pane.
It handles undo, bodykey events, tabs, back-spaces and bracket matching.
"""
trace = "keys" in g.app.debug
c, p, u, w = self.c, self.c.p, self.c.undoer, self.editWidget(event)
undoType = "Typing"
if not w:
return # pragma: no cover (defensive)
# @+<< set local vars >>
# @+node:ekr.20150514063305.269: *5* << set local vars >> (selfInsertCommand)
stroke = event.stroke if event else None
ch = event.char if event else ""
if ch == "Return":
ch = "\n" # This fixes the MacOS return bug.
if ch == "Tab":
ch = "\t"
name = c.widget_name(w)
oldSel = w.getSelectionRange() if name.startswith("body") else (None, None)
oldText = p.b if name.startswith("body") else ""
oldYview = w.getYScrollPosition()
brackets = self.openBracketsList + self.closeBracketsList
inBrackets = ch and g.checkUnicode(ch) in brackets
# @-<< set local vars >>
if not ch:
return
if trace:
g.trace("ch", repr(ch)) # and ch in '\n\r\t'
assert g.isStrokeOrNone(stroke)
if g.doHook("bodykey1", c=c, p=p, ch=ch, oldSel=oldSel, undoType=undoType):
return
if ch == "\t":
self.updateTab(p, w, smartTab=True)
elif ch == "\b":
# This is correct: we only come here if there no bindngs for this key.
self.backwardDeleteCharacter(event)
elif ch in ("\r", "\n"):
ch = "\n"
self.insertNewlineHelper(w, oldSel, undoType)
elif ch in "'\"" and c.config.getBool("smart-quotes"):
self.doSmartQuote(action, ch, oldSel, w)
elif inBrackets and self.autocompleteBrackets:
self.updateAutomatchBracket(p, w, ch, oldSel)
elif ch:
# Null chars must not delete the selection.
self.doPlainChar(action, ch, event, inBrackets, oldSel, stroke, w)
#
# Common processing.
# Set the column for up and down keys.
spot = w.getInsertPoint()
c.editCommands.setMoveCol(w, spot)
#
# Update the text and handle undo.
newText = w.getAllText()
if newText != oldText:
# Call u.doTyping to honor the user's undo granularity.
newSel = w.getSelectionRange()
newInsert = w.getInsertPoint()
newSel = w.getSelectionRange()
newText = w.getAllText() # Converts to unicode.
u.doTyping(
p,
"Typing",
oldText,
newText,
oldSel=oldSel,
oldYview=oldYview,
newInsert=newInsert,
newSel=newSel,
)
g.doHook("bodykey2", c=c, p=p, ch=ch, oldSel=oldSel, undoType=undoType)
|
https://github.com/leo-editor/leo-editor/issues/1801
|
Traceback (most recent call last):
File "...leo\core\leoCommands.py", line 2252, in doCommand
return_value = command_func(event)
File "...leo\core\leoGlobals.py", line 244, in commander_command_wrapper
method(event=event)
File "...\leo\commands\commanderEditCommands.py", line 714, in indentBody
c.editCommands.selfInsertCommand(event) # Handles undo.
File "...leo\commands\editCommands.py", line 1678, in selfInsertCommand
oldYview = w.getYScrollPosition()
AttributeError: 'QHeadlineWrapper' object has no attribute 'getYScrollPosition'
|
AttributeError
|
def updateTab(self, event, p, w, smartTab=True):
"""
A helper for selfInsertCommand.
Add spaces equivalent to a tab.
"""
c = self.c
i, j = w.getSelectionRange()
# Returns insert point if no selection, with i <= j.
if i != j:
c.indentBody(event)
return
tab_width = c.getTabWidth(p)
# Get the preceeding characters.
s = w.getAllText()
start, end = g.getLine(s, i)
after = s[i:end]
if after.endswith("\n"):
after = after[:-1]
# Only do smart tab at the start of a blank line.
doSmartTab = smartTab and c.smart_tab and i == start
# Truly at the start of the line.
# and not after # Nothing *at all* after the cursor.
if doSmartTab:
self.updateAutoIndent(p, w)
# Add a tab if otherwise nothing would happen.
if s == w.getAllText():
self.doPlainTab(s, i, tab_width, w)
else:
self.doPlainTab(s, i, tab_width, w)
|
def updateTab(self, p, w, smartTab=True):
"""
A helper for selfInsertCommand.
Add spaces equivalent to a tab.
"""
c = self.c
i, j = w.getSelectionRange()
# Returns insert point if no selection, with i <= j.
if i != j:
c.indentBody()
return
tab_width = c.getTabWidth(p)
# Get the preceeding characters.
s = w.getAllText()
start, end = g.getLine(s, i)
after = s[i:end]
if after.endswith("\n"):
after = after[:-1]
# Only do smart tab at the start of a blank line.
doSmartTab = smartTab and c.smart_tab and i == start
# Truly at the start of the line.
# and not after # Nothing *at all* after the cursor.
if doSmartTab:
self.updateAutoIndent(p, w)
# Add a tab if otherwise nothing would happen.
if s == w.getAllText():
self.doPlainTab(s, i, tab_width, w)
else:
self.doPlainTab(s, i, tab_width, w)
|
https://github.com/leo-editor/leo-editor/issues/1801
|
Traceback (most recent call last):
File "...leo\core\leoCommands.py", line 2252, in doCommand
return_value = command_func(event)
File "...leo\core\leoGlobals.py", line 244, in commander_command_wrapper
method(event=event)
File "...\leo\commands\commanderEditCommands.py", line 714, in indentBody
c.editCommands.selfInsertCommand(event) # Handles undo.
File "...leo\commands\editCommands.py", line 1678, in selfInsertCommand
oldYview = w.getYScrollPosition()
AttributeError: 'QHeadlineWrapper' object has no attribute 'getYScrollPosition'
|
AttributeError
|
def updateAfterTyping(self, p, w):
"""
Perform all update tasks after changing body text.
This is ugly, ad-hoc code, but should be done uniformly.
"""
c = self.c
if g.isTextWrapper(w):
# An important, ever-present unit test.
all = w.getAllText()
if g.unitTesting:
assert p.b == all, (w, g.callers())
elif p.b != all:
g.trace(f"\np.b != w.getAllText() p: {p.h} \nw: {w!r} \n{g.callers()}\n")
# g.printObj(g.splitLines(p.b), tag='p.b')
# g.printObj(g.splitLines(all), tag='getAllText')
p.v.insertSpot = ins = w.getInsertPoint()
# From u.doTyping.
newSel = w.getSelectionRange()
if newSel is None:
p.v.selectionStart, p.v.selectionLength = (ins, 0)
else:
i, j = newSel
p.v.selectionStart, p.v.selectionLength = (i, j - i)
else:
if g.unitTesting:
assert False, f"Not a text wrapper: {g.callers()}"
g.trace("Not a text wrapper")
p.v.insertSpot = 0
p.v.selectionStart, p.v.selectionLength = (0, 0)
#
# #1749.
if p.isDirty():
redraw_flag = False
else:
p.setDirty() # Do not call p.v.setDirty!
redraw_flag = True
if not c.isChanged():
c.setChanged()
# Update editors.
c.frame.body.updateEditors()
# Update icons.
val = p.computeIcon()
if not hasattr(p.v, "iconVal") or val != p.v.iconVal:
p.v.iconVal = val
redraw_flag = True
#
# Recolor the body.
c.frame.scanForTabWidth(p) # Calls frame.setTabWidth()
c.recolor()
if g.app.unitTesting:
g.app.unitTestDict["colorized"] = True
if redraw_flag:
c.redraw_after_icons_changed()
w.setFocus()
|
def updateAfterTyping(self, p, w):
"""
Perform all update tasks after changing body text.
This is ugly, ad-hoc code, but should be done uniformly.
"""
c = self.c
if g.isTextWrapper(w):
# An important, ever-present unit test.
all = w.getAllText()
if g.unitTesting:
assert p.b == all, g.callers()
elif p.b != all:
g.trace(f"\nError:p.b != w.getAllText() p:{p.h} {g.callers()}\n")
# g.printObj(g.splitLines(p.b), tag='p.b')
# g.printObj(g.splitLines(all), tag='getAllText')
p.v.insertSpot = ins = w.getInsertPoint()
# From u.doTyping.
newSel = w.getSelectionRange()
if newSel is None:
p.v.selectionStart, p.v.selectionLength = (ins, 0)
else:
i, j = newSel
p.v.selectionStart, p.v.selectionLength = (i, j - i)
else:
if g.unitTesting:
assert False, f"Not a text wrapper: {g.callers()}"
g.trace("Not a text wrapper")
p.v.insertSpot = 0
p.v.selectionStart, p.v.selectionLength = (0, 0)
#
# #1749.
if p.isDirty():
redraw_flag = False
else:
p.setDirty() # Do not call p.v.setDirty!
redraw_flag = True
if not c.isChanged():
c.setChanged()
# Update editors.
c.frame.body.updateEditors()
# Update icons.
val = p.computeIcon()
if not hasattr(p.v, "iconVal") or val != p.v.iconVal:
p.v.iconVal = val
redraw_flag = True
#
# Recolor the body.
c.frame.scanForTabWidth(p) # Calls frame.setTabWidth()
c.recolor()
if g.app.unitTesting:
g.app.unitTestDict["colorized"] = True
if redraw_flag:
c.redraw_after_icons_changed()
w.setFocus()
|
https://github.com/leo-editor/leo-editor/issues/1801
|
Traceback (most recent call last):
File "...leo\core\leoCommands.py", line 2252, in doCommand
return_value = command_func(event)
File "...leo\core\leoGlobals.py", line 244, in commander_command_wrapper
method(event=event)
File "...\leo\commands\commanderEditCommands.py", line 714, in indentBody
c.editCommands.selfInsertCommand(event) # Handles undo.
File "...leo\commands\editCommands.py", line 1678, in selfInsertCommand
oldYview = w.getYScrollPosition()
AttributeError: 'QHeadlineWrapper' object has no attribute 'getYScrollPosition'
|
AttributeError
|
def show_all_tags(self):
"""Show all tags, organized by node."""
c, tc = self.c, self
d = {}
for p in c.all_unique_positions():
u = p.v.u
tags = set(u.get(tc.TAG_LIST_KEY, set([])))
for tag in tags:
aList = d.get(tag, [])
aList.append(p.h)
d[tag] = aList
# Print all tags.
if d:
for key in sorted(d):
aList = d.get(key)
for h in sorted(aList):
print(f"{key:>8} {h}")
else:
print(f"no tags in {c.shortFileName()}")
|
def show_all_tags(self):
"""Show all tags, organized by node."""
c, tc = self.c, self
d = {}
for p in c.all_unique_positions():
u = p.v.u
tags = set(u.get(tc.TAG_LIST_KEY, set([])))
for tag in tags:
aList = d.get(tag, [])
aList.append(p.h)
d[tag] = aList
# Print all tags.
for key in sorted(d):
aList = d.get(key)
for h in sorted(aList):
print(f"{key:>8} {h}")
|
https://github.com/leo-editor/leo-editor/issues/1801
|
Traceback (most recent call last):
File "...leo\core\leoCommands.py", line 2252, in doCommand
return_value = command_func(event)
File "...leo\core\leoGlobals.py", line 244, in commander_command_wrapper
method(event=event)
File "...\leo\commands\commanderEditCommands.py", line 714, in indentBody
c.editCommands.selfInsertCommand(event) # Handles undo.
File "...leo\commands\editCommands.py", line 1678, in selfInsertCommand
oldYview = w.getYScrollPosition()
AttributeError: 'QHeadlineWrapper' object has no attribute 'getYScrollPosition'
|
AttributeError
|
def doTyping(
self,
p,
undo_type,
oldText,
newText,
newInsert=None,
oldSel=None,
newSel=None,
oldYview=None,
):
"""
Save enough information to undo or redo a typing operation efficiently,
that is, with the proper granularity.
Do nothing when called from the undo/redo logic because the Undo
and Redo commands merely reset the bead pointer.
**Important**: Code should call this method *only* when the user has
actually typed something. Commands should use u.beforeChangeBody and
u.afterChangeBody.
Only qtm.onTextChanged and ec.selfInsertCommand now call this method.
"""
c, u, w = self.c, self, self.c.frame.body.wrapper
# Leo 6.4: undo_type must be 'Typing'.
undo_type = undo_type.capitalize()
assert undo_type == "Typing", (repr(undo_type), g.callers())
# @+<< return if there is nothing to do >>
# @+node:ekr.20040324061854: *5* << return if there is nothing to do >>
if u.redoing or u.undoing:
return None
if undo_type is None:
return None
if undo_type == "Can't Undo":
u.clearUndoState()
u.setUndoTypes() # Must still recalculate the menu labels.
return None
if oldText == newText:
u.setUndoTypes() # Must still recalculate the menu labels.
return None
# @-<< return if there is nothing to do >>
# @+<< init the undo params >>
# @+node:ekr.20040324061854.1: *5* << init the undo params >>
u.clearOptionalIvars()
# Set the params.
u.undoType = undo_type
u.p = p.copy()
# @-<< init the undo params >>
# @+<< compute leading, middle & trailing lines >>
# @+node:ekr.20031218072017.1491: *5* << compute leading, middle & trailing lines >>
# @+at Incremental undo typing is similar to incremental syntax coloring. We compute
# the number of leading and trailing lines that match, and save both the old and
# new middle lines. NB: the number of old and new middle lines may be different.
# @@c
old_lines = oldText.split("\n")
new_lines = newText.split("\n")
new_len = len(new_lines)
old_len = len(old_lines)
min_len = min(old_len, new_len)
i = 0
while i < min_len:
if old_lines[i] != new_lines[i]:
break
i += 1
leading = i
if leading == new_len:
# This happens when we remove lines from the end.
# The new text is simply the leading lines from the old text.
trailing = 0
else:
i = 0
while i < min_len - leading:
if old_lines[old_len - i - 1] != new_lines[new_len - i - 1]:
break
i += 1
trailing = i
# NB: the number of old and new middle lines may be different.
if trailing == 0:
old_middle_lines = old_lines[leading:]
new_middle_lines = new_lines[leading:]
else:
old_middle_lines = old_lines[leading:-trailing]
new_middle_lines = new_lines[leading:-trailing]
# Remember how many trailing newlines in the old and new text.
i = len(oldText) - 1
old_newlines = 0
while i >= 0 and oldText[i] == "\n":
old_newlines += 1
i -= 1
i = len(newText) - 1
new_newlines = 0
while i >= 0 and newText[i] == "\n":
new_newlines += 1
i -= 1
# @-<< compute leading, middle & trailing lines >>
# @+<< save undo text info >>
# @+node:ekr.20031218072017.1492: *5* << save undo text info >>
u.oldText = None
u.newText = None
u.leading = leading
u.trailing = trailing
u.oldMiddleLines = old_middle_lines
u.newMiddleLines = new_middle_lines
u.oldNewlines = old_newlines
u.newNewlines = new_newlines
# @-<< save undo text info >>
# @+<< save the selection and scrolling position >>
# @+node:ekr.20040324061854.2: *5* << save the selection and scrolling position >>
# Remember the selection.
u.oldSel = oldSel
u.newSel = newSel
# Remember the scrolling position.
if oldYview:
u.yview = oldYview
else:
u.yview = c.frame.body.wrapper.getYScrollPosition()
# @-<< save the selection and scrolling position >>
# @+<< adjust the undo stack, clearing all forward entries >>
# @+node:ekr.20040324061854.3: *5* << adjust the undo stack, clearing all forward entries >>
# @+at
# New in Leo 4.3. Instead of creating a new bead on every character, we
# may adjust the top bead:
# word granularity: adjust the top bead if the typing would continue the word.
# line granularity: adjust the top bead if the typing is on the same line.
# node granularity: adjust the top bead if the typing is anywhere on the same node.
# @@c
granularity = u.granularity
old_d = u.peekBead(u.bead)
old_p = old_d and old_d.get("p")
# @+<< set newBead if we can't share the previous bead >>
# @+node:ekr.20050125220613: *6* << set newBead if we can't share the previous bead >>
# Set newBead to True if undo_type is not 'Typing' so that commands that
# get treated like typing (by onBodyChanged) don't get lumped
# with 'real' typing.
# @@c
if (
not old_d
or not old_p
or old_p.v != p.v
or old_d.get("kind") != "typing"
or old_d.get("undoType") != "Typing"
or undo_type != "Typing"
):
newBead = True # We can't share the previous node.
elif granularity == "char":
newBead = True # This was the old way.
elif granularity == "node":
newBead = False # Always replace previous bead.
else:
assert granularity in ("line", "word")
# Replace the previous bead if only the middle lines have changed.
newBead = (
old_d.get("leading", 0) != u.leading
or old_d.get("trailing", 0) != u.trailing
)
if granularity == "word" and not newBead:
# Protect the method that may be changed by the user
try:
# @+<< set newBead if the change does not continue a word >>
# @+node:ekr.20050125203937: *7* << set newBead if the change does not continue a word >>
# Fix #653: undoer problem: be wary of the ternary operator here.
old_start = old_end = new_start = new_end = 0
if oldSel is not None:
old_start, old_end = oldSel
if newSel is not None:
new_start, new_end = newSel
if u.prevSel is None:
prev_start, prev_end = 0, 0
else:
prev_start, prev_end = u.prevSel
if old_start != old_end or new_start != new_end:
# The new and old characters are not contiguous.
newBead = True
else:
# 2011/04/01: Patch by Sam Hartsfield
old_row, old_col = g.convertPythonIndexToRowCol(oldText, old_start)
new_row, new_col = g.convertPythonIndexToRowCol(newText, new_start)
prev_row, prev_col = g.convertPythonIndexToRowCol(
oldText, prev_start
)
old_lines = g.splitLines(oldText)
new_lines = g.splitLines(newText)
# Recognize backspace, del, etc. as contiguous.
if old_row != new_row or abs(old_col - new_col) != 1:
# The new and old characters are not contiguous.
newBead = True
elif old_col == 0 or new_col == 0:
# py-lint: disable=W0511
# W0511:1362: TODO
# TODO this is not true, we might as well just have entered a
# char at the beginning of an existing line
pass # We have just inserted a line.
else:
# 2011/04/01: Patch by Sam Hartsfield
old_s = old_lines[old_row]
new_s = new_lines[new_row]
# New in 4.3b2:
# Guard against invalid oldSel or newSel params.
if old_col - 1 >= len(old_s) or new_col - 1 >= len(new_s):
newBead = True
else:
old_ch = old_s[old_col - 1]
new_ch = new_s[new_col - 1]
newBead = self.recognizeStartOfTypingWord(
old_lines,
old_row,
old_col,
old_ch,
new_lines,
new_row,
new_col,
new_ch,
prev_row,
prev_col,
)
# @-<< set newBead if the change does not continue a word >>
except Exception:
g.error("Unexpected exception...")
g.es_exception()
newBead = True
# @-<< set newBead if we can't share the previous bead >>
# Save end selection as new "previous" selection
u.prevSel = u.newSel
if newBead:
# Push params on undo stack, clearing all forward entries.
bunch = g.Bunch(
p=p.copy(),
kind="typing", # lowercase.
undoType=undo_type, # capitalized.
undoHelper=u.undoTyping,
redoHelper=u.redoTyping,
oldMarked=old_p.isMarked() if old_p else p.isMarked(), # #1694
oldText=u.oldText,
oldSel=u.oldSel,
oldNewlines=u.oldNewlines,
oldMiddleLines=u.oldMiddleLines,
)
u.pushBead(bunch)
else:
bunch = old_d
bunch.leading = u.leading
bunch.trailing = u.trailing
bunch.newMarked = p.isMarked() # #1694
bunch.newNewlines = u.newNewlines
bunch.newMiddleLines = u.newMiddleLines
bunch.newSel = u.newSel
bunch.newText = u.newText
bunch.yview = u.yview
# @-<< adjust the undo stack, clearing all forward entries >>
if "undo" in g.app.debug and "verbose" in g.app.debug:
print(f"u.doTyping: {len(oldText)} => {len(newText)}")
if u.per_node_undo:
u.putIvarsToVnode(p)
#
# Finish updating the text.
p.v.setBodyString(newText)
u.updateAfterTyping(p, w)
|
def doTyping(
self,
p,
undo_type,
oldText,
newText,
newInsert=None,
oldSel=None,
newSel=None,
oldYview=None,
):
"""
Save enough information to undo or redo a typing operation efficiently,
that is, with the proper granularity.
Do nothing when called from the undo/redo logic because the Undo
and Redo commands merely reset the bead pointer.
**Important**: Code should call this method *only* when the user has
actually typed something. Commands should use u.beforeChangeBody and
u.afterChangeBody.
Only qtm.onTextChanged and ec.selfInsertCommand now call this method.
"""
c, u, w = self.c, self, self.c.frame.body.wrapper
# Leo 6.4: undo_type must be 'Typing'.
undo_type = undo_type.capitalize()
assert undo_type == "Typing", (repr(undo_type), g.callers())
# @+<< return if there is nothing to do >>
# @+node:ekr.20040324061854: *5* << return if there is nothing to do >>
if u.redoing or u.undoing:
return None
if undo_type is None:
return None
if undo_type == "Can't Undo":
u.clearUndoState()
u.setUndoTypes() # Must still recalculate the menu labels.
return None
if oldText == newText:
u.setUndoTypes() # Must still recalculate the menu labels.
return None
# @-<< return if there is nothing to do >>
# @+<< init the undo params >>
# @+node:ekr.20040324061854.1: *5* << init the undo params >>
u.clearOptionalIvars()
# Set the params.
u.undoType = undo_type
u.p = p.copy()
# @-<< init the undo params >>
# @+<< compute leading, middle & trailing lines >>
# @+node:ekr.20031218072017.1491: *5* << compute leading, middle & trailing lines >>
# @+at Incremental undo typing is similar to incremental syntax coloring. We compute
# the number of leading and trailing lines that match, and save both the old and
# new middle lines. NB: the number of old and new middle lines may be different.
# @@c
old_lines = oldText.split("\n")
new_lines = newText.split("\n")
new_len = len(new_lines)
old_len = len(old_lines)
min_len = min(old_len, new_len)
i = 0
while i < min_len:
if old_lines[i] != new_lines[i]:
break
i += 1
leading = i
if leading == new_len:
# This happens when we remove lines from the end.
# The new text is simply the leading lines from the old text.
trailing = 0
else:
i = 0
while i < min_len - leading:
if old_lines[old_len - i - 1] != new_lines[new_len - i - 1]:
break
i += 1
trailing = i
# NB: the number of old and new middle lines may be different.
if trailing == 0:
old_middle_lines = old_lines[leading:]
new_middle_lines = new_lines[leading:]
else:
old_middle_lines = old_lines[leading:-trailing]
new_middle_lines = new_lines[leading:-trailing]
# Remember how many trailing newlines in the old and new text.
i = len(oldText) - 1
old_newlines = 0
while i >= 0 and oldText[i] == "\n":
old_newlines += 1
i -= 1
i = len(newText) - 1
new_newlines = 0
while i >= 0 and newText[i] == "\n":
new_newlines += 1
i -= 1
# @-<< compute leading, middle & trailing lines >>
# @+<< save undo text info >>
# @+node:ekr.20031218072017.1492: *5* << save undo text info >>
u.oldText = None
u.newText = None
u.leading = leading
u.trailing = trailing
u.oldMiddleLines = old_middle_lines
u.newMiddleLines = new_middle_lines
u.oldNewlines = old_newlines
u.newNewlines = new_newlines
# @-<< save undo text info >>
# @+<< save the selection and scrolling position >>
# @+node:ekr.20040324061854.2: *5* << save the selection and scrolling position >>
# Remember the selection.
u.oldSel = oldSel
u.newSel = newSel
# Remember the scrolling position.
if oldYview:
u.yview = oldYview
else:
u.yview = c.frame.body.wrapper.getYScrollPosition()
# @-<< save the selection and scrolling position >>
# @+<< adjust the undo stack, clearing all forward entries >>
# @+node:ekr.20040324061854.3: *5* << adjust the undo stack, clearing all forward entries >>
# @+at
# New in Leo 4.3. Instead of creating a new bead on every character, we
# may adjust the top bead:
# word granularity: adjust the top bead if the typing would continue the word.
# line granularity: adjust the top bead if the typing is on the same line.
# node granularity: adjust the top bead if the typing is anywhere on the same node.
# @@c
granularity = u.granularity
old_d = u.peekBead(u.bead)
old_p = old_d and old_d.get("p")
# @+<< set newBead if we can't share the previous bead >>
# @+node:ekr.20050125220613: *6* << set newBead if we can't share the previous bead >>
# Set newBead to True if undo_type is not 'Typing' so that commands that
# get treated like typing (by onBodyChanged) don't get lumped
# with 'real' typing.
# @@c
if (
not old_d
or not old_p
or old_p.v != p.v
or old_d.get("kind") != "typing"
or old_d.get("undoType") != "Typing"
or undo_type != "Typing"
):
newBead = True # We can't share the previous node.
elif granularity == "char":
newBead = True # This was the old way.
elif granularity == "node":
newBead = False # Always replace previous bead.
else:
assert granularity in ("line", "word")
# Replace the previous bead if only the middle lines have changed.
newBead = (
old_d.get("leading", 0) != u.leading
or old_d.get("trailing", 0) != u.trailing
)
if granularity == "word" and not newBead:
# Protect the method that may be changed by the user
try:
# @+<< set newBead if the change does not continue a word >>
# @+node:ekr.20050125203937: *7* << set newBead if the change does not continue a word >>
# Fix #653: undoer problem: be wary of the ternary operator here.
old_start = old_end = new_start = new_end = 0
if oldSel:
old_start, old_end = oldSel
if newSel:
new_start, new_end = newSel
prev_start, prev_end = u.prevSel
if old_start != old_end or new_start != new_end:
# The new and old characters are not contiguous.
newBead = True
else:
# 2011/04/01: Patch by Sam Hartsfield
old_row, old_col = g.convertPythonIndexToRowCol(oldText, old_start)
new_row, new_col = g.convertPythonIndexToRowCol(newText, new_start)
prev_row, prev_col = g.convertPythonIndexToRowCol(
oldText, prev_start
)
old_lines = g.splitLines(oldText)
new_lines = g.splitLines(newText)
# Recognize backspace, del, etc. as contiguous.
if old_row != new_row or abs(old_col - new_col) != 1:
# The new and old characters are not contiguous.
newBead = True
elif old_col == 0 or new_col == 0:
# py-lint: disable=W0511
# W0511:1362: TODO
# TODO this is not true, we might as well just have entered a
# char at the beginning of an existing line
pass # We have just inserted a line.
else:
# 2011/04/01: Patch by Sam Hartsfield
old_s = old_lines[old_row]
new_s = new_lines[new_row]
# New in 4.3b2:
# Guard against invalid oldSel or newSel params.
if old_col - 1 >= len(old_s) or new_col - 1 >= len(new_s):
newBead = True
else:
old_ch = old_s[old_col - 1]
new_ch = new_s[new_col - 1]
newBead = self.recognizeStartOfTypingWord(
old_lines,
old_row,
old_col,
old_ch,
new_lines,
new_row,
new_col,
new_ch,
prev_row,
prev_col,
)
# @-<< set newBead if the change does not continue a word >>
except Exception:
g.error("Unexpected exception...")
g.es_exception()
newBead = True
# @-<< set newBead if we can't share the previous bead >>
# Save end selection as new "previous" selection
u.prevSel = u.newSel
if newBead:
# Push params on undo stack, clearing all forward entries.
bunch = g.Bunch(
p=p.copy(),
kind="typing", # lowercase.
undoType=undo_type, # capitalized.
undoHelper=u.undoTyping,
redoHelper=u.redoTyping,
oldMarked=old_p.isMarked() if old_p else p.isMarked(), # #1694
oldText=u.oldText,
oldSel=u.oldSel,
oldNewlines=u.oldNewlines,
oldMiddleLines=u.oldMiddleLines,
)
u.pushBead(bunch)
else:
bunch = old_d
bunch.leading = u.leading
bunch.trailing = u.trailing
bunch.newMarked = p.isMarked() # #1694
bunch.newNewlines = u.newNewlines
bunch.newMiddleLines = u.newMiddleLines
bunch.newSel = u.newSel
bunch.newText = u.newText
bunch.yview = u.yview
# @-<< adjust the undo stack, clearing all forward entries >>
if "undo" in g.app.debug and "verbose" in g.app.debug:
print(f"u.doTyping: {len(oldText)} => {len(newText)}")
if u.per_node_undo:
u.putIvarsToVnode(p)
#
# Finish updating the text.
p.v.setBodyString(newText)
u.updateAfterTyping(p, w)
|
https://github.com/leo-editor/leo-editor/issues/1790
|
Unexpected exception...Traceback (most recent call last):
File "d:\Tom\git\leo-editor\leo\core\leoUndo.py", line 1054, in doTyping
prev_start, prev_end = u.prevSel
TypeError: cannot unpack non-iterable NoneType object
|
TypeError
|
def refreshFromDisk(self, event=None):
"""Refresh an @<file> node from disk."""
c, p, u = self, self.p, self.undoer
c.nodeConflictList = []
fn = p.anyAtFileNodeName()
shouldDelete = c.sqlite_connection is None
if not fn:
g.warning(f"not an @<file> node: {p.h!r}")
return
# #1603.
if os.path.isdir(fn):
g.warning(f"not a file: {fn!r}")
return
b = u.beforeChangeTree(p)
redraw_flag = True
at = c.atFileCommands
c.recreateGnxDict()
# Fix bug 1090950 refresh from disk: cut node ressurection.
i = g.skip_id(p.h, 0, chars="@")
word = p.h[0:i]
if word == "@auto":
# This includes @auto-*
if shouldDelete:
p.v._deleteAllChildren()
# Fix #451: refresh-from-disk selects wrong node.
p = at.readOneAtAutoNode(fn, p)
elif word in ("@thin", "@file"):
if shouldDelete:
p.v._deleteAllChildren()
at.read(p, force=True)
elif word == "@clean":
# Wishlist 148: use @auto parser if the node is empty.
if p.b.strip() or p.hasChildren():
at.readOneAtCleanNode(p)
else:
# Fix #451: refresh-from-disk selects wrong node.
p = at.readOneAtAutoNode(fn, p)
elif word == "@shadow":
if shouldDelete:
p.v._deleteAllChildren()
at.read(p, force=True, atShadow=True)
elif word == "@edit":
at.readOneAtEditNode(fn, p)
# Always deletes children.
elif word == "@asis":
# Fix #1067.
at.readOneAtAsisNode(fn, p)
# Always deletes children.
else:
g.es_print(f"can not refresh from disk\n{p.h!r}")
redraw_flag = False
if redraw_flag:
# Fix #451: refresh-from-disk selects wrong node.
c.selectPosition(p)
u.afterChangeTree(p, command="refresh-from-disk", bunch=b)
# Create the 'Recovered Nodes' tree.
c.fileCommands.handleNodeConflicts()
c.redraw()
|
def refreshFromDisk(self, event=None):
"""Refresh an @<file> node from disk."""
c, p, u = self, self.p, self.undoer
c.nodeConflictList = []
fn = p.anyAtFileNodeName()
shouldDelete = c.sqlite_connection is None
if not fn:
g.warning(f"not an @<file> node:\n{p.h!r}")
return
b = u.beforeChangeTree(p)
redraw_flag = True
at = c.atFileCommands
c.recreateGnxDict()
# Fix bug 1090950 refresh from disk: cut node ressurection.
i = g.skip_id(p.h, 0, chars="@")
word = p.h[0:i]
if word == "@auto":
# This includes @auto-*
if shouldDelete:
p.v._deleteAllChildren()
# Fix #451: refresh-from-disk selects wrong node.
p = at.readOneAtAutoNode(fn, p)
elif word in ("@thin", "@file"):
if shouldDelete:
p.v._deleteAllChildren()
at.read(p, force=True)
elif word == "@clean":
# Wishlist 148: use @auto parser if the node is empty.
if p.b.strip() or p.hasChildren():
at.readOneAtCleanNode(p)
else:
# Fix #451: refresh-from-disk selects wrong node.
p = at.readOneAtAutoNode(fn, p)
elif word == "@shadow":
if shouldDelete:
p.v._deleteAllChildren()
at.read(p, force=True, atShadow=True)
elif word == "@edit":
at.readOneAtEditNode(fn, p)
# Always deletes children.
elif word == "@asis":
# Fix #1067.
at.readOneAtAsisNode(fn, p)
# Always deletes children.
else:
g.es_print(f"can not refresh from disk\n{p.h!r}")
redraw_flag = False
if redraw_flag:
# Fix #451: refresh-from-disk selects wrong node.
c.selectPosition(p)
u.afterChangeTree(p, command="refresh-from-disk", bunch=b)
# Create the 'Recovered Nodes' tree.
c.fileCommands.handleNodeConflicts()
c.redraw()
|
https://github.com/leo-editor/leo-editor/issues/1603
|
readFileIntoString not a file: C:/Users/edreamleo/RustProjects/hello_cargo/src
Traceback (most recent call last):
File "c:\leo.repo\leo-editor\leo\plugins\contextmenu.py", line 332, in refresh_rclick_cb
c.refreshFromDisk()
File "c:\leo.repo\leo-editor\leo\commands\commanderFileCommands.py", line 305, in refreshFromDisk
p = at.readOneAtAutoNode(fn, p)
File "c:\leo.repo\leo-editor\leo\core\leoAtFile.py", line 597, in readOneAtAutoNode
c.persistenceController.update_after_read_foreign_file(p)
File "c:\leo.repo\leo-editor\leo\core\leoPersistence.py", line 126, in update_after_read_foreign_file
if not self.is_foreign_file(root):
File "c:\leo.repo\leo-editor\leo\core\leoPersistence.py", line 461, in is_foreign_file
self.is_at_auto_node(p) or
File "c:\leo.repo\leo-editor\leo\core\leoPersistence.py", line 448, in is_at_auto_node
return p.isAtAutoNode()
AttributeError: 'NoneType' object has no attribute 'isAtAutoNode'
|
AttributeError
|
def openFileHelper(self, fileName):
"""Open a file, reporting all exceptions."""
at = self
s = ""
try:
with open(fileName, "rb") as f:
s = f.read()
except IOError:
at.error(f"can not open {fileName}")
except Exception:
at.error(f"Exception reading {fileName}")
g.es_exception()
return s
|
def openFileHelper(self, fileName):
"""Open a file, reporting all exceptions."""
at = self
s = None
try:
with open(fileName, "rb") as f:
s = f.read()
except IOError:
at.error(f"can not open {fileName}")
except Exception:
at.error(f"Exception reading {fileName}")
g.es_exception()
return s
|
https://github.com/leo-editor/leo-editor/issues/1603
|
readFileIntoString not a file: C:/Users/edreamleo/RustProjects/hello_cargo/src
Traceback (most recent call last):
File "c:\leo.repo\leo-editor\leo\plugins\contextmenu.py", line 332, in refresh_rclick_cb
c.refreshFromDisk()
File "c:\leo.repo\leo-editor\leo\commands\commanderFileCommands.py", line 305, in refreshFromDisk
p = at.readOneAtAutoNode(fn, p)
File "c:\leo.repo\leo-editor\leo\core\leoAtFile.py", line 597, in readOneAtAutoNode
c.persistenceController.update_after_read_foreign_file(p)
File "c:\leo.repo\leo-editor\leo\core\leoPersistence.py", line 126, in update_after_read_foreign_file
if not self.is_foreign_file(root):
File "c:\leo.repo\leo-editor\leo\core\leoPersistence.py", line 461, in is_foreign_file
self.is_at_auto_node(p) or
File "c:\leo.repo\leo-editor\leo\core\leoPersistence.py", line 448, in is_at_auto_node
return p.isAtAutoNode()
AttributeError: 'NoneType' object has no attribute 'isAtAutoNode'
|
AttributeError
|
def update_after_read_foreign_file(self, root):
"""Restore gnx's, uAs and clone links using @gnxs nodes and @uas trees."""
self.at_persistence = self.find_at_persistence_node()
if not self.at_persistence:
return
if not root:
return
if not self.is_foreign_file(root):
return
# Create clone links from @gnxs node
at_gnxs = self.has_at_gnxs_node(root)
if at_gnxs:
self.restore_gnxs(at_gnxs, root)
# Create uas from @uas tree.
at_uas = self.has_at_uas_node(root)
if at_uas:
self.create_uas(at_uas, root)
|
def update_after_read_foreign_file(self, root):
"""Restore gnx's, uAs and clone links using @gnxs nodes and @uas trees."""
self.at_persistence = self.find_at_persistence_node()
if not self.at_persistence:
return
if not self.is_foreign_file(root):
return
# Create clone links from @gnxs node
at_gnxs = self.has_at_gnxs_node(root)
if at_gnxs:
self.restore_gnxs(at_gnxs, root)
# Create uas from @uas tree.
at_uas = self.has_at_uas_node(root)
if at_uas:
self.create_uas(at_uas, root)
|
https://github.com/leo-editor/leo-editor/issues/1603
|
readFileIntoString not a file: C:/Users/edreamleo/RustProjects/hello_cargo/src
Traceback (most recent call last):
File "c:\leo.repo\leo-editor\leo\plugins\contextmenu.py", line 332, in refresh_rclick_cb
c.refreshFromDisk()
File "c:\leo.repo\leo-editor\leo\commands\commanderFileCommands.py", line 305, in refreshFromDisk
p = at.readOneAtAutoNode(fn, p)
File "c:\leo.repo\leo-editor\leo\core\leoAtFile.py", line 597, in readOneAtAutoNode
c.persistenceController.update_after_read_foreign_file(p)
File "c:\leo.repo\leo-editor\leo\core\leoPersistence.py", line 126, in update_after_read_foreign_file
if not self.is_foreign_file(root):
File "c:\leo.repo\leo-editor\leo\core\leoPersistence.py", line 461, in is_foreign_file
self.is_at_auto_node(p) or
File "c:\leo.repo\leo-editor\leo\core\leoPersistence.py", line 448, in is_at_auto_node
return p.isAtAutoNode()
AttributeError: 'NoneType' object has no attribute 'isAtAutoNode'
|
AttributeError
|
def readFile(self, path):
"""Read the file, change splitter ratiors, and return its hidden vnode."""
with open(path, "rb") as f:
s = f.read()
v, g_element = self.readWithElementTree(path, s)
if not v: # #1510.
return None
self.scanGlobals(g_element)
# Fix #1047: only this method changes splitter sizes.
#
# Fix bug #1111: ensure that all outlines have at least one node.
if not v.children:
new_vnode = leoNodes.VNode(context=self.c)
new_vnode.h = "newHeadline"
v.children = [new_vnode]
return v
|
def readFile(self, path):
"""Read the file, change splitter ratiors, and return its hidden vnode."""
with open(path, "rb") as f:
s = f.read()
v, g_element = self.readWithElementTree(path, s)
self.scanGlobals(g_element)
# Fix #1047: only this method changes splitter sizes.
#
# Fix bug #1111: ensure that all outlines have at least one node.
if not v.children:
new_vnode = leoNodes.VNode(context=self.c)
new_vnode.h = "newHeadline"
v.children = [new_vnode]
return v
|
https://github.com/leo-editor/leo-editor/issues/1510
|
bad .leo file: WORK.leo
g.toUnicode: unexpected argument of type ParseError
toUnicode openLeoFile,getLeoFile,readFile,readWithElementTree
Traceback (most recent call last):
File "E:\git\leo\leo\core\leoCommands.py", line 2278, in executeAnyCommand
return command(event)
File "E:\git\leo\leo\core\leoGlobals.py", line 240, in commander_command_wrapper
method(event=event)
File "E:\git\leo\leo\commands\commanderFileCommands.py", line 261, in open_outline
open_completer(c, closeFlag, fileName)
File "E:\git\leo\leo\commands\commanderFileCommands.py", line 204, in open_completer
c2 = g.openWithFileName(fileName, old_c=c)
File "E:\git\leo\leo\core\leoGlobals.py", line 3920, in openWithFileName
return g.app.loadManager.loadLocalFile(fileName, gui, old_c)
File "E:\git\leo\leo\core\leoApp.py", line 3288, in loadLocalFile
previousSettings = lm.getPreviousSettings(fn)
File "E:\git\leo\leo\core\leoApp.py", line 2241, in getPreviousSettings
c = lm.openSettingsFile(fn)
File "E:\git\leo\leo\core\leoApp.py", line 2412, in openSettingsFile
ok = c.fileCommands.openLeoFile(theFile, fn,
File "E:\git\leo\leo\core\leoFileCommands.py", line 692, in openLeoFile
ok, ratio = self.getLeoFile(
File "E:\git\leo\leo\core\leoFileCommands.py", line 559, in getLeoFile
v = FastRead(c, self.gnxDict).readFile(fileName)
File "E:\git\leo\leo\core\leoFileCommands.py", line 60, in readFile
v, g_element = self.readWithElementTree(path, s)
TypeError: cannot unpack non-iterable NoneType object
Traceback (most recent call last):
File "E:\git\leo\leo\plugins\qt_frame.py", line 1638, in closeEvent
g.app.gui.close_event(event)
AttributeError: 'NullGui' object has no attribute 'close_event'
|
TypeError
|
def readFileFromClipboard(self, s):
"""
Recreate a file from a string s, and return its hidden vnode.
Unlike readFile above, this does not affect splitter sizes.
"""
v, g_element = self.readWithElementTree(path=None, s=s)
if not v: # #1510.
return None
#
# Fix bug #1111: ensure that all outlines have at least one node.
if not v.children:
new_vnode = leoNodes.VNode(context=self.c)
new_vnode.h = "newHeadline"
v.children = [new_vnode]
return v
|
def readFileFromClipboard(self, s):
"""
Recreate a file from a string s, and return its hidden vnode.
Unlike readFile above, this does not affect splitter sizes.
"""
v, g_element = self.readWithElementTree(path=None, s=s)
#
# Fix bug #1111: ensure that all outlines have at least one node.
if not v.children:
new_vnode = leoNodes.VNode(context=self.c)
new_vnode.h = "newHeadline"
v.children = [new_vnode]
return v
|
https://github.com/leo-editor/leo-editor/issues/1510
|
bad .leo file: WORK.leo
g.toUnicode: unexpected argument of type ParseError
toUnicode openLeoFile,getLeoFile,readFile,readWithElementTree
Traceback (most recent call last):
File "E:\git\leo\leo\core\leoCommands.py", line 2278, in executeAnyCommand
return command(event)
File "E:\git\leo\leo\core\leoGlobals.py", line 240, in commander_command_wrapper
method(event=event)
File "E:\git\leo\leo\commands\commanderFileCommands.py", line 261, in open_outline
open_completer(c, closeFlag, fileName)
File "E:\git\leo\leo\commands\commanderFileCommands.py", line 204, in open_completer
c2 = g.openWithFileName(fileName, old_c=c)
File "E:\git\leo\leo\core\leoGlobals.py", line 3920, in openWithFileName
return g.app.loadManager.loadLocalFile(fileName, gui, old_c)
File "E:\git\leo\leo\core\leoApp.py", line 3288, in loadLocalFile
previousSettings = lm.getPreviousSettings(fn)
File "E:\git\leo\leo\core\leoApp.py", line 2241, in getPreviousSettings
c = lm.openSettingsFile(fn)
File "E:\git\leo\leo\core\leoApp.py", line 2412, in openSettingsFile
ok = c.fileCommands.openLeoFile(theFile, fn,
File "E:\git\leo\leo\core\leoFileCommands.py", line 692, in openLeoFile
ok, ratio = self.getLeoFile(
File "E:\git\leo\leo\core\leoFileCommands.py", line 559, in getLeoFile
v = FastRead(c, self.gnxDict).readFile(fileName)
File "E:\git\leo\leo\core\leoFileCommands.py", line 60, in readFile
v, g_element = self.readWithElementTree(path, s)
TypeError: cannot unpack non-iterable NoneType object
Traceback (most recent call last):
File "E:\git\leo\leo\plugins\qt_frame.py", line 1638, in closeEvent
g.app.gui.close_event(event)
AttributeError: 'NullGui' object has no attribute 'close_event'
|
TypeError
|
def readWithElementTree(self, path, s):
contents = g.toUnicode(s)
s = contents.translate(self.translate_table)
# Fix #1036 and #1046.
try:
xroot = ElementTree.fromstring(contents)
except Exception as e:
# #970: Just report failure here.
if path:
message = f"bad .leo file: {g.shortFileName(path)}"
else:
message = "The clipboard is not a vaild .leo file"
g.es_print("\n" + message, color="red")
g.es_print(g.toUnicode(e))
print("")
# #1510: Return a tuple.
return None, None
g_element = xroot.find("globals")
v_elements = xroot.find("vnodes")
t_elements = xroot.find("tnodes")
gnx2body, gnx2ua = self.scanTnodes(t_elements)
hidden_v = self.scanVnodes(gnx2body, self.gnx2vnode, gnx2ua, v_elements)
self.handleBits()
return hidden_v, g_element
|
def readWithElementTree(self, path, s):
s = s.translate(None, self.translate_table)
# Fix #1036 and #1046.
contents = g.toUnicode(s)
try:
xroot = ElementTree.fromstring(contents)
except Exception as e:
if path:
message = f"bad .leo file: {g.shortFileName(path)}"
else:
message = "The clipboard is not a vaild .leo file"
g.es_print("\n" + message, color="red")
g.es_print(g.toUnicode(e))
print("")
# #970: Just report failure here.
return None
g_element = xroot.find("globals")
v_elements = xroot.find("vnodes")
t_elements = xroot.find("tnodes")
gnx2body, gnx2ua = self.scanTnodes(t_elements)
hidden_v = self.scanVnodes(gnx2body, self.gnx2vnode, gnx2ua, v_elements)
self.handleBits()
return hidden_v, g_element
|
https://github.com/leo-editor/leo-editor/issues/1510
|
bad .leo file: WORK.leo
g.toUnicode: unexpected argument of type ParseError
toUnicode openLeoFile,getLeoFile,readFile,readWithElementTree
Traceback (most recent call last):
File "E:\git\leo\leo\core\leoCommands.py", line 2278, in executeAnyCommand
return command(event)
File "E:\git\leo\leo\core\leoGlobals.py", line 240, in commander_command_wrapper
method(event=event)
File "E:\git\leo\leo\commands\commanderFileCommands.py", line 261, in open_outline
open_completer(c, closeFlag, fileName)
File "E:\git\leo\leo\commands\commanderFileCommands.py", line 204, in open_completer
c2 = g.openWithFileName(fileName, old_c=c)
File "E:\git\leo\leo\core\leoGlobals.py", line 3920, in openWithFileName
return g.app.loadManager.loadLocalFile(fileName, gui, old_c)
File "E:\git\leo\leo\core\leoApp.py", line 3288, in loadLocalFile
previousSettings = lm.getPreviousSettings(fn)
File "E:\git\leo\leo\core\leoApp.py", line 2241, in getPreviousSettings
c = lm.openSettingsFile(fn)
File "E:\git\leo\leo\core\leoApp.py", line 2412, in openSettingsFile
ok = c.fileCommands.openLeoFile(theFile, fn,
File "E:\git\leo\leo\core\leoFileCommands.py", line 692, in openLeoFile
ok, ratio = self.getLeoFile(
File "E:\git\leo\leo\core\leoFileCommands.py", line 559, in getLeoFile
v = FastRead(c, self.gnxDict).readFile(fileName)
File "E:\git\leo\leo\core\leoFileCommands.py", line 60, in readFile
v, g_element = self.readWithElementTree(path, s)
TypeError: cannot unpack non-iterable NoneType object
Traceback (most recent call last):
File "E:\git\leo\leo\plugins\qt_frame.py", line 1638, in closeEvent
g.app.gui.close_event(event)
AttributeError: 'NullGui' object has no attribute 'close_event'
|
TypeError
|
def readWithElementTree(self, path, s):
contents = g.toUnicode(s)
contents = contents.translate(self.translate_table)
# Fix #1036 and #1046.
try:
xroot = ElementTree.fromstring(contents)
except Exception as e:
# #970: Just report failure here.
if path:
message = f"bad .leo file: {g.shortFileName(path)}"
else:
message = "The clipboard is not a vaild .leo file"
g.es_print("\n" + message, color="red")
g.es_print(g.toUnicode(e))
print("")
# #1510: Return a tuple.
return None, None
g_element = xroot.find("globals")
v_elements = xroot.find("vnodes")
t_elements = xroot.find("tnodes")
gnx2body, gnx2ua = self.scanTnodes(t_elements)
hidden_v = self.scanVnodes(gnx2body, self.gnx2vnode, gnx2ua, v_elements)
self.handleBits()
return hidden_v, g_element
|
def readWithElementTree(self, path, s):
contents = g.toUnicode(s)
s = contents.translate(self.translate_table)
# Fix #1036 and #1046.
try:
xroot = ElementTree.fromstring(contents)
except Exception as e:
# #970: Just report failure here.
if path:
message = f"bad .leo file: {g.shortFileName(path)}"
else:
message = "The clipboard is not a vaild .leo file"
g.es_print("\n" + message, color="red")
g.es_print(g.toUnicode(e))
print("")
# #1510: Return a tuple.
return None, None
g_element = xroot.find("globals")
v_elements = xroot.find("vnodes")
t_elements = xroot.find("tnodes")
gnx2body, gnx2ua = self.scanTnodes(t_elements)
hidden_v = self.scanVnodes(gnx2body, self.gnx2vnode, gnx2ua, v_elements)
self.handleBits()
return hidden_v, g_element
|
https://github.com/leo-editor/leo-editor/issues/1510
|
bad .leo file: WORK.leo
g.toUnicode: unexpected argument of type ParseError
toUnicode openLeoFile,getLeoFile,readFile,readWithElementTree
Traceback (most recent call last):
File "E:\git\leo\leo\core\leoCommands.py", line 2278, in executeAnyCommand
return command(event)
File "E:\git\leo\leo\core\leoGlobals.py", line 240, in commander_command_wrapper
method(event=event)
File "E:\git\leo\leo\commands\commanderFileCommands.py", line 261, in open_outline
open_completer(c, closeFlag, fileName)
File "E:\git\leo\leo\commands\commanderFileCommands.py", line 204, in open_completer
c2 = g.openWithFileName(fileName, old_c=c)
File "E:\git\leo\leo\core\leoGlobals.py", line 3920, in openWithFileName
return g.app.loadManager.loadLocalFile(fileName, gui, old_c)
File "E:\git\leo\leo\core\leoApp.py", line 3288, in loadLocalFile
previousSettings = lm.getPreviousSettings(fn)
File "E:\git\leo\leo\core\leoApp.py", line 2241, in getPreviousSettings
c = lm.openSettingsFile(fn)
File "E:\git\leo\leo\core\leoApp.py", line 2412, in openSettingsFile
ok = c.fileCommands.openLeoFile(theFile, fn,
File "E:\git\leo\leo\core\leoFileCommands.py", line 692, in openLeoFile
ok, ratio = self.getLeoFile(
File "E:\git\leo\leo\core\leoFileCommands.py", line 559, in getLeoFile
v = FastRead(c, self.gnxDict).readFile(fileName)
File "E:\git\leo\leo\core\leoFileCommands.py", line 60, in readFile
v, g_element = self.readWithElementTree(path, s)
TypeError: cannot unpack non-iterable NoneType object
Traceback (most recent call last):
File "E:\git\leo\leo\plugins\qt_frame.py", line 1638, in closeEvent
g.app.gui.close_event(event)
AttributeError: 'NullGui' object has no attribute 'close_event'
|
TypeError
|
def checkForOpenFile(self, c, fn):
"""Warn if fn is already open and add fn to already_open_files list."""
d, tag = g.app.db, "open-leo-files"
if g.app.reverting:
# #302: revert to saved doesn't reset external file change monitoring
g.app.already_open_files = []
if (
d is None
or g.app.unitTesting
or g.app.batchMode
or g.app.reverting
or g.app.inBridge
):
return
# #1519: check os.path.exists.
aList = g.app.db.get(tag) or []
if [x for x in aList if os.path.exists(x) and os.path.samefile(x, fn)]:
# The file may be open in another copy of Leo, or not:
# another Leo may have been killed prematurely.
# Put the file on the global list.
# A dialog will warn the user such files later.
if fn not in g.app.already_open_files:
g.es("may be open in another Leo:", color="red")
g.es(fn)
g.app.already_open_files.append(fn)
else:
g.app.rememberOpenFile(fn)
|
def checkForOpenFile(self, c, fn):
"""Warn if fn is already open and add fn to already_open_files list."""
d, tag = g.app.db, "open-leo-files"
if g.app.reverting:
# Fix #302: revert to saved doesn't reset external file change monitoring
g.app.already_open_files = []
if (
d is None
or g.app.unitTesting
or g.app.batchMode
or g.app.reverting
or g.app.inBridge
):
return
aList = g.app.db.get(tag) or []
if [x for x in aList if os.path.samefile(x, fn)]:
# The file may be open in another copy of Leo, or not:
# another Leo may have been killed prematurely.
# Put the file on the global list.
# A dialog will warn the user such files later.
if fn not in g.app.already_open_files:
g.es("may be open in another Leo:", color="red")
g.es(fn)
g.app.already_open_files.append(fn)
else:
g.app.rememberOpenFile(fn)
|
https://github.com/leo-editor/leo-editor/issues/1519
|
(base) c:\leo.repo\leo-editor>python c:\leo.repo\leo-editor\launchLeo.py --gui=qttabs --gui=qttabs leo\core\leoPy.leo leo\plugins\leoPlugins.leo
Leo 6.2-b1-devel, devel branch, build 5da736ea4d
2020-02-29 06:19:36 -0600
Unexpected exception reading 'c:/leo.repo/leo-editor/leo/core/leoPy.leo'
Traceback (most recent call last):
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 2575, in doPostPluginsInit
c = lm.loadLocalFile(fn, gui=g.app.gui, old_c=None)
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 3297, in loadLocalFile
c = lm.openFileByName(fn, gui, old_c, previousSettings)
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 3335, in openFileByName
ok = lm.readOpenedLeoFile(c, fn, readAtFileNodesFlag, theFile)
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 3505, in readOpenedLeoFile
readAtFileNodesFlag=readAtFileNodesFlag)
File "c:\leo.repo\leo-editor\leo\core\leoFileCommands.py", line 695, in openLeoFile
silent=silent,
File "c:\leo.repo\leo-editor\leo\core\leoFileCommands.py", line 553, in getLeoFile
g.app.checkForOpenFile(c, fileName)
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 1476, in checkForOpenFile
if [x for x in aList if os.path.samefile(x, fn)]:
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 1476, in <listcomp>
if [x for x in aList if os.path.samefile(x, fn)]:
File "C:\Users\edreamleo\Anaconda3\lib\genericpath.py", line 96, in samefile
s1 = os.stat(f1)
FileNotFoundError: [WinError 2] The system cannot find the file specified: 'C:/Users/edreamleo/Desktop/leo_unittest_position.leo'
Can not create empty workbook
Traceback (most recent call last):
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 2602, in doPostPluginsInit
c1 = lm.openEmptyWorkBook()
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 2645, in openEmptyWorkBook
c = lm.loadLocalFile(fn, gui=g.app.gui, old_c=None)
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 3297, in loadLocalFile
c = lm.openFileByName(fn, gui, old_c, previousSettings)
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 3335, in openFileByName
ok = lm.readOpenedLeoFile(c, fn, readAtFileNodesFlag, theFile)
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 3505, in readOpenedLeoFile
readAtFileNodesFlag=readAtFileNodesFlag)
File "c:\leo.repo\leo-editor\leo\core\leoFileCommands.py", line 695, in openLeoFile
silent=silent,
File "c:\leo.repo\leo-editor\leo\core\leoFileCommands.py", line 553, in getLeoFile
g.app.checkForOpenFile(c, fileName)
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 1476, in checkForOpenFile
if [x for x in aList if os.path.samefile(x, fn)]:
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 1476, in <listcomp>
if [x for x in aList if os.path.samefile(x, fn)]:
File "C:\Users\edreamleo\Anaconda3\lib\genericpath.py", line 96, in samefile
s1 = os.stat(f1)
FileNotFoundError: [WinError 2] The system cannot find the file specified: 'C:/Users/edreamleo/Desktop/leo_unittest_position.leo'
|
FileNotFoundError
|
def writePathChanged(self, p):
"""
raise IOError if p's path has changed *and* user forbids the write.
"""
at, c = self, self.c
#
# Suppress this message during save-as and save-to commands.
if c.ignoreChangedPaths:
return
oldPath = g.os_path_normcase(at.getPathUa(p))
newPath = g.os_path_normcase(g.fullPath(c, p))
try: # #1367: samefile can throw an exception.
changed = oldPath and not os.path.samefile(oldPath, newPath)
except Exception:
changed = True
if not changed:
return
ok = at.promptForDangerousWrite(
fileName=None,
message=(
f"{g.tr('path changed for %s' % (p.h))}\n{g.tr('write this file anyway?')}"
),
)
if not ok:
raise IOError
at.setPathUa(p, newPath) # Remember that we have changed paths.
|
def writePathChanged(self, p):
"""
raise IOError if p's path has changed *and* user forbids the write.
"""
at, c = self, self.c
#
# Suppress this message during save-as and save-to commands.
if c.ignoreChangedPaths:
return
oldPath = g.os_path_normcase(at.getPathUa(p))
newPath = g.os_path_normcase(g.fullPath(c, p))
try: # #1367: samefile can throw IOError!
changed = oldPath and not os.path.samefile(oldPath, newPath)
except IOError:
changed = True
if not changed:
return
ok = at.promptForDangerousWrite(
fileName=None,
message=(
f"{g.tr('path changed for %s' % (p.h))}\n{g.tr('write this file anyway?')}"
),
)
if not ok:
raise IOError
at.setPathUa(p, newPath) # Remember that we have changed paths.
|
https://github.com/leo-editor/leo-editor/issues/1519
|
(base) c:\leo.repo\leo-editor>python c:\leo.repo\leo-editor\launchLeo.py --gui=qttabs --gui=qttabs leo\core\leoPy.leo leo\plugins\leoPlugins.leo
Leo 6.2-b1-devel, devel branch, build 5da736ea4d
2020-02-29 06:19:36 -0600
Unexpected exception reading 'c:/leo.repo/leo-editor/leo/core/leoPy.leo'
Traceback (most recent call last):
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 2575, in doPostPluginsInit
c = lm.loadLocalFile(fn, gui=g.app.gui, old_c=None)
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 3297, in loadLocalFile
c = lm.openFileByName(fn, gui, old_c, previousSettings)
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 3335, in openFileByName
ok = lm.readOpenedLeoFile(c, fn, readAtFileNodesFlag, theFile)
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 3505, in readOpenedLeoFile
readAtFileNodesFlag=readAtFileNodesFlag)
File "c:\leo.repo\leo-editor\leo\core\leoFileCommands.py", line 695, in openLeoFile
silent=silent,
File "c:\leo.repo\leo-editor\leo\core\leoFileCommands.py", line 553, in getLeoFile
g.app.checkForOpenFile(c, fileName)
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 1476, in checkForOpenFile
if [x for x in aList if os.path.samefile(x, fn)]:
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 1476, in <listcomp>
if [x for x in aList if os.path.samefile(x, fn)]:
File "C:\Users\edreamleo\Anaconda3\lib\genericpath.py", line 96, in samefile
s1 = os.stat(f1)
FileNotFoundError: [WinError 2] The system cannot find the file specified: 'C:/Users/edreamleo/Desktop/leo_unittest_position.leo'
Can not create empty workbook
Traceback (most recent call last):
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 2602, in doPostPluginsInit
c1 = lm.openEmptyWorkBook()
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 2645, in openEmptyWorkBook
c = lm.loadLocalFile(fn, gui=g.app.gui, old_c=None)
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 3297, in loadLocalFile
c = lm.openFileByName(fn, gui, old_c, previousSettings)
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 3335, in openFileByName
ok = lm.readOpenedLeoFile(c, fn, readAtFileNodesFlag, theFile)
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 3505, in readOpenedLeoFile
readAtFileNodesFlag=readAtFileNodesFlag)
File "c:\leo.repo\leo-editor\leo\core\leoFileCommands.py", line 695, in openLeoFile
silent=silent,
File "c:\leo.repo\leo-editor\leo\core\leoFileCommands.py", line 553, in getLeoFile
g.app.checkForOpenFile(c, fileName)
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 1476, in checkForOpenFile
if [x for x in aList if os.path.samefile(x, fn)]:
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 1476, in <listcomp>
if [x for x in aList if os.path.samefile(x, fn)]:
File "C:\Users\edreamleo\Anaconda3\lib\genericpath.py", line 96, in samefile
s1 = os.stat(f1)
FileNotFoundError: [WinError 2] The system cannot find the file specified: 'C:/Users/edreamleo/Desktop/leo_unittest_position.leo'
|
FileNotFoundError
|
def shouldPromptForDangerousWrite(self, fn, p):
"""
Return True if Leo should warn the user that p is an @<file> node that
was not read during startup. Writing that file might cause data loss.
See #50: https://github.com/leo-editor/leo-editor/issues/50
"""
trace = "save" in g.app.debug
sfn = g.shortFileName(fn)
c = self.c
efc = g.app.externalFilesController
if p.isAtNoSentFileNode():
# #1450.
# No danger of overwriting a file.
# It was never read.
return False
if not g.os_path_exists(fn):
# No danger of overwriting fn.
if trace:
g.trace("Return False: does not exist:", sfn)
return False
# #1347: Prompt if the external file is newer.
if efc:
# Like c.checkFileTimeStamp.
if c.sqlite_connection and c.mFileName == fn:
# sqlite database file is never actually overwriten by Leo,
# so do *not* check its timestamp.
pass
elif efc.has_changed(c, fn):
if trace:
g.trace("Return True: changed:", sfn)
return True
if hasattr(p.v, "at_read"):
# Fix bug #50: body text lost switching @file to @auto-rst
d = p.v.at_read
for k in d:
# Fix bug # #1469: make sure k still exists.
if os.path.exists(k) and os.path.samefile(k, fn) and p.h in d.get(k, set()):
d[fn] = d[k]
if trace:
g.trace("Return False: in p.v.at_read:", sfn)
return False
aSet = d.get(fn, set())
if trace:
g.trace(f"Return {p.h not in aSet()}: p.h not in aSet(): {sfn}")
return p.h not in aSet
if trace:
g.trace("Return True: never read:", sfn)
return True # The file was never read.
|
def shouldPromptForDangerousWrite(self, fn, p):
"""
Return True if Leo should warn the user that p is an @<file> node that
was not read during startup. Writing that file might cause data loss.
See #50: https://github.com/leo-editor/leo-editor/issues/50
"""
trace = "save" in g.app.debug
sfn = g.shortFileName(fn)
c = self.c
efc = g.app.externalFilesController
if p.isAtNoSentFileNode():
# #1450.
# No danger of overwriting a file.
# It was never read.
return False
if not g.os_path_exists(fn):
# No danger of overwriting fn.
if trace:
g.trace("Return False: does not exist:", sfn)
return False
# #1347: Prompt if the external file is newer.
if efc:
# Like c.checkFileTimeStamp.
if c.sqlite_connection and c.mFileName == fn:
# sqlite database file is never actually overwriten by Leo,
# so do *not* check its timestamp.
pass
elif efc.has_changed(c, fn):
if trace:
g.trace("Return True: changed:", sfn)
return True
if hasattr(p.v, "at_read"):
# Fix bug #50: body text lost switching @file to @auto-rst
d = p.v.at_read
for k in d:
# Fix bug # #1469: make sure k still exists.
if os.path.exists(k) and os.path.samefile(k, fn) and p.h in d.get(k, set()):
d[fn] = d[k]
if trace:
g.trace("Return False: in p.v.at_read:", sfn)
return False
aSet = d.get(fn, set())
if trace:
g.trace(f"Return {p.h not in aSet()}: p.h not in aSet(): {sfn}")
return p.h not in aSet
if trace:
g.trace("Return True: never read:", sfn)
return True # The file was never read.
|
https://github.com/leo-editor/leo-editor/issues/1519
|
(base) c:\leo.repo\leo-editor>python c:\leo.repo\leo-editor\launchLeo.py --gui=qttabs --gui=qttabs leo\core\leoPy.leo leo\plugins\leoPlugins.leo
Leo 6.2-b1-devel, devel branch, build 5da736ea4d
2020-02-29 06:19:36 -0600
Unexpected exception reading 'c:/leo.repo/leo-editor/leo/core/leoPy.leo'
Traceback (most recent call last):
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 2575, in doPostPluginsInit
c = lm.loadLocalFile(fn, gui=g.app.gui, old_c=None)
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 3297, in loadLocalFile
c = lm.openFileByName(fn, gui, old_c, previousSettings)
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 3335, in openFileByName
ok = lm.readOpenedLeoFile(c, fn, readAtFileNodesFlag, theFile)
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 3505, in readOpenedLeoFile
readAtFileNodesFlag=readAtFileNodesFlag)
File "c:\leo.repo\leo-editor\leo\core\leoFileCommands.py", line 695, in openLeoFile
silent=silent,
File "c:\leo.repo\leo-editor\leo\core\leoFileCommands.py", line 553, in getLeoFile
g.app.checkForOpenFile(c, fileName)
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 1476, in checkForOpenFile
if [x for x in aList if os.path.samefile(x, fn)]:
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 1476, in <listcomp>
if [x for x in aList if os.path.samefile(x, fn)]:
File "C:\Users\edreamleo\Anaconda3\lib\genericpath.py", line 96, in samefile
s1 = os.stat(f1)
FileNotFoundError: [WinError 2] The system cannot find the file specified: 'C:/Users/edreamleo/Desktop/leo_unittest_position.leo'
Can not create empty workbook
Traceback (most recent call last):
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 2602, in doPostPluginsInit
c1 = lm.openEmptyWorkBook()
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 2645, in openEmptyWorkBook
c = lm.loadLocalFile(fn, gui=g.app.gui, old_c=None)
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 3297, in loadLocalFile
c = lm.openFileByName(fn, gui, old_c, previousSettings)
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 3335, in openFileByName
ok = lm.readOpenedLeoFile(c, fn, readAtFileNodesFlag, theFile)
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 3505, in readOpenedLeoFile
readAtFileNodesFlag=readAtFileNodesFlag)
File "c:\leo.repo\leo-editor\leo\core\leoFileCommands.py", line 695, in openLeoFile
silent=silent,
File "c:\leo.repo\leo-editor\leo\core\leoFileCommands.py", line 553, in getLeoFile
g.app.checkForOpenFile(c, fileName)
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 1476, in checkForOpenFile
if [x for x in aList if os.path.samefile(x, fn)]:
File "c:\leo.repo\leo-editor\leo\core\leoApp.py", line 1476, in <listcomp>
if [x for x in aList if os.path.samefile(x, fn)]:
File "C:\Users\edreamleo\Anaconda3\lib\genericpath.py", line 96, in samefile
s1 = os.stat(f1)
FileNotFoundError: [WinError 2] The system cannot find the file specified: 'C:/Users/edreamleo/Desktop/leo_unittest_position.leo'
|
FileNotFoundError
|
def writeAsisNode(self, p):
"""Write the p's node to an @asis file."""
at = self
def put(s):
"""Append s to self.output_list."""
# #1480: Avoid calling at.os().
s = g.toUnicode(s, at.encoding, reportErrors=True)
at.outputList.append(s)
# Write the headline only if it starts with '@@'.
s = p.h
if g.match(s, 0, "@@"):
s = s[2:]
if s:
put("\n") # Experimental.
put(s)
put("\n")
# Write the body.
s = p.b
if s:
put(s)
|
def writeAsisNode(self, p):
"""Write the p's node to an @asis file."""
at = self
# Write the headline only if it starts with '@@'.
s = p.h
if g.match(s, 0, "@@"):
s = s[2:]
if s:
at.outputFile.write(s)
# Write the body.
s = p.b
if s:
s = g.toEncodedString(s, at.encoding, reportErrors=True)
at.outputStringWithLineEndings(s)
|
https://github.com/leo-editor/leo-editor/issues/1480
|
exception writing: C:/Users/jrhut/Documents/Active/My Maps/_foo.txt
Traceback (most recent call last):
File "C:\cygwin64\home\jrhut\src\leo-editor\leo-editor\leo\core\leoAtFile.py", line 1291, in asisWrite
at.writeAsisNode(p)
File "C:\cygwin64\home\jrhut\src\leo-editor\leo-editor\leo\core\leoAtFile.py", line 1307, in writeAsisNode
at.outputFile.write(s)
AttributeError: 'AtFile' object has no attribute 'outputFile'
|
AttributeError
|
def os(self, s):
"""
Append a string to at.outputList.
All output produced by leoAtFile module goes here.
"""
at = self
if s.startswith(self.underindentEscapeString):
try:
junk, s = at.parseUnderindentTag(s)
except Exception:
at.exception("exception writing:" + s)
return
s = g.toUnicode(s, at.encoding)
at.outputList.append(s)
|
def os(self, s):
"""
Write a string to the output file or stream.
All output produced by leoAtFile module goes here.
"""
at = self
if s.startswith(self.underindentEscapeString):
try:
junk, s = at.parseUnderindentTag(s)
except Exception:
at.exception("exception writing:" + s)
return
s = g.toUnicode(s, at.encoding)
at.outputList.append(s)
|
https://github.com/leo-editor/leo-editor/issues/1480
|
exception writing: C:/Users/jrhut/Documents/Active/My Maps/_foo.txt
Traceback (most recent call last):
File "C:\cygwin64\home\jrhut\src\leo-editor\leo-editor\leo\core\leoAtFile.py", line 1291, in asisWrite
at.writeAsisNode(p)
File "C:\cygwin64\home\jrhut\src\leo-editor\leo-editor\leo\core\leoAtFile.py", line 1307, in writeAsisNode
at.outputFile.write(s)
AttributeError: 'AtFile' object has no attribute 'outputFile'
|
AttributeError
|
def shouldPromptForDangerousWrite(self, fn, p):
"""
Return True if Leo should warn the user that p is an @<file> node that
was not read during startup. Writing that file might cause data loss.
See #50: https://github.com/leo-editor/leo-editor/issues/50
"""
trace = "save" in g.app.debug
sfn = g.shortFileName(fn)
c = self.c
efc = g.app.externalFilesController
if p.isAtNoSentFileNode():
# #1450.
# No danger of overwriting a file.
# It was never read.
return False
if not g.os_path_exists(fn):
# No danger of overwriting fn.
if trace:
g.trace("Return False: does not exist:", sfn)
return False
# #1347: Prompt if the external file is newer.
if efc:
# Like c.checkFileTimeStamp.
if c.sqlite_connection and c.mFileName == fn:
# sqlite database file is never actually overwriten by Leo,
# so do *not* check its timestamp.
pass
elif efc.has_changed(c, fn):
if trace:
g.trace("Return True: changed:", sfn)
return True
if hasattr(p.v, "at_read"):
# Fix bug #50: body text lost switching @file to @auto-rst
d = p.v.at_read
for k in d:
# Fix bug # #1469: make sure k still exists.
if os.path.exists(k) and os.path.samefile(k, fn) and p.h in d.get(k, set()):
d[fn] = d[k]
if trace:
g.trace("Return False: in p.v.at_read:", sfn)
return False
aSet = d.get(fn, set())
if trace:
g.trace(f"Return {p.h not in aSet()}: p.h not in aSet(): {sfn}")
return p.h not in aSet
if trace:
g.trace("Return True: never read:", sfn)
return True
|
def shouldPromptForDangerousWrite(self, fn, p):
"""
Return True if Leo should warn the user that p is an @<file> node that
was not read during startup. Writing that file might cause data loss.
See #50: https://github.com/leo-editor/leo-editor/issues/50
"""
trace = "save" in g.app.debug
sfn = g.shortFileName(fn)
c = self.c
efc = g.app.externalFilesController
if p.isAtNoSentFileNode():
# #1450.
# No danger of overwriting a file.
# It was never read.
return False
if not g.os_path_exists(fn):
# No danger of overwriting fn.
if trace:
g.trace("Return False: does not exist:", sfn)
return False
# #1347: Prompt if the external file is newer.
if efc:
# Like c.checkFileTimeStamp.
if c.sqlite_connection and c.mFileName == fn:
# sqlite database file is never actually overwriten by Leo,
# so do *not* check its timestamp.
pass
elif efc.has_changed(c, fn):
if trace:
g.trace("Return True: changed:", sfn)
return True
if hasattr(p.v, "at_read"):
# Fix bug #50: body text lost switching @file to @auto-rst
d = p.v.at_read
for k in d:
if os.path.samefile(k, fn) and p.h in d.get(k, set()):
d[fn] = d[k]
if trace:
g.trace("Return False: in p.v.at_read:", sfn)
return False
aSet = d.get(fn, set())
if trace:
g.trace(f"Return {p.h not in aSet()}: p.h not in aSet(): {sfn}")
return p.h not in aSet
if trace:
g.trace("Return True: never read:", sfn)
return True
|
https://github.com/leo-editor/leo-editor/issues/1469
|
exception writing: /tmp/pytest-of-btheado/pytest-85/test_save_after_external_file_0/1_renamed
Traceback (most recent call last):
File "/home/btheado/src/leo-editor/leo/core/leoAtFile.py", line 1323, in write
if not fileName or not at.precheck(fileName, root):
File "/home/btheado/src/leo-editor/leo/core/leoAtFile.py", line 2524, in precheck
if not at.shouldPromptForDangerousWrite(fileName, root):
File "/home/btheado/src/leo-editor/leo/core/leoAtFile.py", line 3085, in shouldPromptForDangerousWrite
if os.path.samefile(k, fn) and p.h in d.get(k, set()):
File "/usr/lib/python3.7/genericpath.py", line 100, in samefile
s1 = os.stat(f1)
FileNotFoundError: [Errno 2] No such file or directory: '/tmp/pytest-of-btheado/pytest-85/test_save_after_external_file_0/1'
|
FileNotFoundError
|
def bridge(self):
"""Return an instance of Leo's bridge."""
import leo.core.leoBridge as leoBridge
return leoBridge.controller(
gui="nullGui",
loadPlugins=False,
readSettings=False,
silent=True,
verbose=False,
)
|
def bridge(self):
import leo.core.leoBridge as leoBridge
return leoBridge.controller(
gui="nullGui",
loadPlugins=False,
readSettings=False,
silent=False,
verbose=False,
)
|
https://github.com/leo-editor/leo-editor/issues/1469
|
exception writing: /tmp/pytest-of-btheado/pytest-85/test_save_after_external_file_0/1_renamed
Traceback (most recent call last):
File "/home/btheado/src/leo-editor/leo/core/leoAtFile.py", line 1323, in write
if not fileName or not at.precheck(fileName, root):
File "/home/btheado/src/leo-editor/leo/core/leoAtFile.py", line 2524, in precheck
if not at.shouldPromptForDangerousWrite(fileName, root):
File "/home/btheado/src/leo-editor/leo/core/leoAtFile.py", line 3085, in shouldPromptForDangerousWrite
if os.path.samefile(k, fn) and p.h in d.get(k, set()):
File "/usr/lib/python3.7/genericpath.py", line 100, in samefile
s1 = os.stat(f1)
FileNotFoundError: [Errno 2] No such file or directory: '/tmp/pytest-of-btheado/pytest-85/test_save_after_external_file_0/1'
|
FileNotFoundError
|
def create(self, fn):
"""Create the given file with empty contents."""
theDir = g.os_path_dirname(fn)
# Make the directories as needed.
ok = g.makeAllNonExistentDirectories(theDir, c=self.c, force=True, verbose=True)
# #1453: Don't assume the directory exists.
if not ok:
g.error(f"did not create: {theDir}")
return
try:
f = open(fn, mode="wb")
f.close()
g.note(f"created: {fn}")
except IOError:
g.error(f"can not create: {fn}")
except Exception:
g.error(f"unexpected error creating: {fn}")
g.es_exception()
|
def create(self, fn):
"""Create the given file with empty contents."""
theDir = g.os_path_dirname(fn)
g.makeAllNonExistentDirectories(theDir, c=self.c, force=True, verbose=True)
# Make the directories as needed.
try:
f = open(fn, mode="wb")
f.close()
g.note(f"created: {fn}")
except IOError:
g.error(f"can not create: {fn}")
except Exception:
g.error(f"unexpected error creating: {fn}")
g.es_exception()
|
https://github.com/leo-editor/leo-editor/issues/1453
|
Leo Log Window
Leo 6.1-final, master branch, build b80e074204
2019-11-08 12:36:03 -0600
Python 3.8.0, PyQt version 5.13.0
Windows 10 AMD64 (build 10.0.17763) SP0
created directory: C:/Users/C050536/.leo
leoID='metaperl'
.leoID.txt created in C:/Users/C050536/.leo
current dir: C:/bin/leo-editor
load dir: C:/bin/leo-editor/leo/core
global config dir: C:/bin/leo-editor/leo/config
home dir: C:/Users/C050536
reading settings in C:/bin/leo-editor/leo/config/leoSettings.leo
Do spellpyx.txt file found
unexpected error creating: None
Traceback (most recent call last):
File "C:\bin\leo-editor\leo\commands\spellCommands.py", line 56, in create
f = open(fn, mode='wb')
TypeError: expected str, bytes or os.PathLike object, not NoneType
reading settings in C:/bin/leo-editor/leo/doc/CheatSheet.leo
|
TypeError
|
def computeHomeLeoDir(self):
# lm = self
homeLeoDir = g.os_path_finalize_join(g.app.homeDir, ".leo")
if g.os_path_exists(homeLeoDir):
return homeLeoDir
ok = g.makeAllNonExistentDirectories(homeLeoDir, force=True)
return homeLeoDir if ok else "" # #1450
|
def computeHomeLeoDir(self):
# lm = self
homeLeoDir = g.os_path_finalize_join(g.app.homeDir, ".leo")
if not g.os_path_exists(homeLeoDir):
g.makeAllNonExistentDirectories(homeLeoDir, force=True)
return homeLeoDir
|
https://github.com/leo-editor/leo-editor/issues/1453
|
Leo Log Window
Leo 6.1-final, master branch, build b80e074204
2019-11-08 12:36:03 -0600
Python 3.8.0, PyQt version 5.13.0
Windows 10 AMD64 (build 10.0.17763) SP0
created directory: C:/Users/C050536/.leo
leoID='metaperl'
.leoID.txt created in C:/Users/C050536/.leo
current dir: C:/bin/leo-editor
load dir: C:/bin/leo-editor/leo/core
global config dir: C:/bin/leo-editor/leo/config
home dir: C:/Users/C050536
reading settings in C:/bin/leo-editor/leo/config/leoSettings.leo
Do spellpyx.txt file found
unexpected error creating: None
Traceback (most recent call last):
File "C:\bin\leo-editor\leo\commands\spellCommands.py", line 56, in create
f = open(fn, mode='wb')
TypeError: expected str, bytes or os.PathLike object, not NoneType
reading settings in C:/bin/leo-editor/leo/doc/CheatSheet.leo
|
TypeError
|
def makeAllNonExistentDirectories(theDir, c=None, force=False, verbose=True):
"""
Attempt to make all non-existent directories.
A wrapper from os.makedirs (new in Python 3.2).
"""
if force:
create = True # Bug fix: g.app.config will not exist during startup.
elif c:
create = c.config and c.config.create_nonexistent_directories
else:
create = g.app and g.app.config and g.app.config.create_nonexistent_directories
if c:
theDir = c.expand_path_expression(theDir)
#
# Return True if the directory already exists.
dir1 = theDir = g.os_path_normpath(theDir)
exists = g.os_path_isdir(dir1) and g.os_path_exists(dir1)
if exists:
return True
#
# Return False if we aren't forcing the create.
if not force and not create:
return False
#
# Just use os.makedirs.
try:
os.makedirs(theDir, mode=0o777, exist_ok=False)
return True
except Exception:
return False
|
def makeAllNonExistentDirectories(theDir, c=None, force=False, verbose=True):
"""Attempt to make all non-existent directories"""
testing = False # True: don't actually make the directories.
if force:
create = True # Bug fix: g.app.config will not exist during startup.
elif c:
create = c.config and c.config.create_nonexistent_directories
else:
create = g.app and g.app.config and g.app.config.create_nonexistent_directories
if c:
theDir = c.expand_path_expression(theDir)
dir1 = theDir = g.os_path_normpath(theDir)
ok = g.os_path_isdir(dir1) and g.os_path_exists(dir1)
if ok:
return ok
if not force and not create:
return False
# Split theDir into all its component parts.
paths = []
while theDir:
head, tail = g.os_path_split(theDir)
if tail:
paths.append(tail)
theDir = head
else:
paths.append(head)
break
path = ""
paths.reverse()
for s in paths:
path = g.os_path_finalize_join(path, s)
if not g.os_path_exists(path):
try:
if testing:
g.trace("***making", path)
else:
os.mkdir(path)
if verbose and not testing and not g.app.unitTesting:
g.red("created directory:", path)
except Exception:
if verbose:
g.error("exception creating directory:", path)
g.es_exception()
return None
return dir1 # All have been created.
|
https://github.com/leo-editor/leo-editor/issues/1453
|
Leo Log Window
Leo 6.1-final, master branch, build b80e074204
2019-11-08 12:36:03 -0600
Python 3.8.0, PyQt version 5.13.0
Windows 10 AMD64 (build 10.0.17763) SP0
created directory: C:/Users/C050536/.leo
leoID='metaperl'
.leoID.txt created in C:/Users/C050536/.leo
current dir: C:/bin/leo-editor
load dir: C:/bin/leo-editor/leo/core
global config dir: C:/bin/leo-editor/leo/config
home dir: C:/Users/C050536
reading settings in C:/bin/leo-editor/leo/config/leoSettings.leo
Do spellpyx.txt file found
unexpected error creating: None
Traceback (most recent call last):
File "C:\bin\leo-editor\leo\commands\spellCommands.py", line 56, in create
f = open(fn, mode='wb')
TypeError: expected str, bytes or os.PathLike object, not NoneType
reading settings in C:/bin/leo-editor/leo/doc/CheatSheet.leo
|
TypeError
|
def update_file_if_changed(c, file_name, temp_name):
"""Compares two files.
If they are different, we replace file_name with temp_name.
Otherwise, we just delete temp_name. Both files should be closed."""
if g.os_path_exists(file_name):
if filecmp.cmp(temp_name, file_name):
kind = "unchanged"
ok = g.utils_remove(temp_name)
else:
kind = "***updating"
mode = g.utils_stat(file_name)
ok = g.utils_rename(c, temp_name, file_name, mode)
else:
kind = "creating"
head, tail = g.os_path_split(file_name)
ok = True
if head:
ok = g.makeAllNonExistentDirectories(head, c=c)
if ok:
ok = g.utils_rename(c, temp_name, file_name)
if ok:
g.es("", f"{kind:12}: {file_name}")
else:
g.error("rename failed: no file created!")
g.es("", file_name, " may be read-only or in use")
|
def update_file_if_changed(c, file_name, temp_name):
"""Compares two files.
If they are different, we replace file_name with temp_name.
Otherwise, we just delete temp_name. Both files should be closed."""
if g.os_path_exists(file_name):
if filecmp.cmp(temp_name, file_name):
kind = "unchanged"
ok = g.utils_remove(temp_name)
else:
kind = "***updating"
mode = g.utils_stat(file_name)
ok = g.utils_rename(c, temp_name, file_name, mode)
else:
kind = "creating"
# 2010/02/04: g.utils_rename no longer calls
# makeAllNonExistentDirectories
head, tail = g.os_path_split(file_name)
ok = True
if head:
ok = g.makeAllNonExistentDirectories(head, c=c)
if ok:
ok = g.utils_rename(c, temp_name, file_name)
if ok:
g.es("", f"{kind:12}: {file_name}")
else:
g.error("rename failed: no file created!")
g.es("", file_name, " may be read-only or in use")
|
https://github.com/leo-editor/leo-editor/issues/1453
|
Leo Log Window
Leo 6.1-final, master branch, build b80e074204
2019-11-08 12:36:03 -0600
Python 3.8.0, PyQt version 5.13.0
Windows 10 AMD64 (build 10.0.17763) SP0
created directory: C:/Users/C050536/.leo
leoID='metaperl'
.leoID.txt created in C:/Users/C050536/.leo
current dir: C:/bin/leo-editor
load dir: C:/bin/leo-editor/leo/core
global config dir: C:/bin/leo-editor/leo/config
home dir: C:/Users/C050536
reading settings in C:/bin/leo-editor/leo/config/leoSettings.leo
Do spellpyx.txt file found
unexpected error creating: None
Traceback (most recent call last):
File "C:\bin\leo-editor\leo\commands\spellCommands.py", line 56, in create
f = open(fn, mode='wb')
TypeError: expected str, bytes or os.PathLike object, not NoneType
reading settings in C:/bin/leo-editor/leo/doc/CheatSheet.leo
|
TypeError
|
def utils_rename(c, src, dst, verbose=True):
"""Platform independent rename."""
# Don't call g.makeAllNonExistentDirectories here!
try:
shutil.move(src, dst)
return True
except Exception:
if verbose:
g.error("exception renaming", src, "to", dst)
g.es_exception(full=False)
return False
|
def utils_rename(c, src, dst, verbose=True):
"""Platform independent rename."""
# Don't call g.makeAllNonExistentDirectories.
# It's not right to do this here!!
# head, tail = g.os_path_split(dst)
# if head: g.makeAllNonExistentDirectories(head,c=c)
try:
shutil.move(src, dst)
return True
except Exception:
if verbose:
g.error("exception renaming", src, "to", dst)
g.es_exception(full=False)
return False
|
https://github.com/leo-editor/leo-editor/issues/1453
|
Leo Log Window
Leo 6.1-final, master branch, build b80e074204
2019-11-08 12:36:03 -0600
Python 3.8.0, PyQt version 5.13.0
Windows 10 AMD64 (build 10.0.17763) SP0
created directory: C:/Users/C050536/.leo
leoID='metaperl'
.leoID.txt created in C:/Users/C050536/.leo
current dir: C:/bin/leo-editor
load dir: C:/bin/leo-editor/leo/core
global config dir: C:/bin/leo-editor/leo/config
home dir: C:/Users/C050536
reading settings in C:/bin/leo-editor/leo/config/leoSettings.leo
Do spellpyx.txt file found
unexpected error creating: None
Traceback (most recent call last):
File "C:\bin\leo-editor\leo\commands\spellCommands.py", line 56, in create
f = open(fn, mode='wb')
TypeError: expected str, bytes or os.PathLike object, not NoneType
reading settings in C:/bin/leo-editor/leo/doc/CheatSheet.leo
|
TypeError
|
def create(self, fn):
"""Create the given file with empty contents."""
# Make the directories as needed.
theDir = g.os_path_dirname(fn)
if theDir:
ok = g.makeAllNonExistentDirectories(theDir, c=self.c, force=True, verbose=True)
# #1453: Don't assume the directory exists.
if not ok:
g.error(f"did not create directory: {theDir}")
return
# Create the file.
try:
f = open(fn, mode="wb")
f.close()
g.note(f"created: {fn}")
except IOError:
g.error(f"can not create: {fn}")
except Exception:
g.error(f"unexpected error creating: {fn}")
g.es_exception()
|
def create(self, fn):
"""Create the given file with empty contents."""
theDir = g.os_path_dirname(fn)
# Make the directories as needed.
ok = g.makeAllNonExistentDirectories(theDir, c=self.c, force=True, verbose=True)
# #1453: Don't assume the directory exists.
if not ok:
g.error(f"did not create: {theDir}")
return
try:
f = open(fn, mode="wb")
f.close()
g.note(f"created: {fn}")
except IOError:
g.error(f"can not create: {fn}")
except Exception:
g.error(f"unexpected error creating: {fn}")
g.es_exception()
|
https://github.com/leo-editor/leo-editor/issues/1453
|
Leo Log Window
Leo 6.1-final, master branch, build b80e074204
2019-11-08 12:36:03 -0600
Python 3.8.0, PyQt version 5.13.0
Windows 10 AMD64 (build 10.0.17763) SP0
created directory: C:/Users/C050536/.leo
leoID='metaperl'
.leoID.txt created in C:/Users/C050536/.leo
current dir: C:/bin/leo-editor
load dir: C:/bin/leo-editor/leo/core
global config dir: C:/bin/leo-editor/leo/config
home dir: C:/Users/C050536
reading settings in C:/bin/leo-editor/leo/config/leoSettings.leo
Do spellpyx.txt file found
unexpected error creating: None
Traceback (most recent call last):
File "C:\bin\leo-editor\leo\commands\spellCommands.py", line 56, in create
f = open(fn, mode='wb')
TypeError: expected str, bytes or os.PathLike object, not NoneType
reading settings in C:/bin/leo-editor/leo/doc/CheatSheet.leo
|
TypeError
|
def makeAllNonExistentDirectories(theDir, c=None, force=False, verbose=True):
"""
Attempt to make all non-existent directories.
Return the created directory, or None.
If c is given, support {{expressions}}.
A wrapper from os.makedirs (new in Python 3.2).
"""
if force:
# Bug fix: g.app.config will not exist during startup.
create = True
elif c:
create = c and c.config and c.config.create_nonexistent_directories
else:
create = g.app and g.app.config and g.app.config.create_nonexistent_directories
if c:
theDir = c.expand_path_expression(theDir)
#
# Return True if the directory already exists.
theDir = g.os_path_normpath(theDir)
exists = g.os_path_isdir(theDir) and g.os_path_exists(theDir)
if exists:
return theDir
#
# Return False if we aren't forcing the create.
if not force and not create:
return None
#
# #1450: Just use os.makedirs.
try:
os.makedirs(theDir, mode=0o777, exist_ok=False)
return theDir
except Exception:
return None
|
def makeAllNonExistentDirectories(theDir, c=None, force=False, verbose=True):
"""
Attempt to make all non-existent directories.
A wrapper from os.makedirs (new in Python 3.2).
"""
if force:
create = True # Bug fix: g.app.config will not exist during startup.
elif c:
create = c.config and c.config.create_nonexistent_directories
else:
create = g.app and g.app.config and g.app.config.create_nonexistent_directories
if c:
theDir = c.expand_path_expression(theDir)
#
# Return True if the directory already exists.
dir1 = theDir = g.os_path_normpath(theDir)
exists = g.os_path_isdir(dir1) and g.os_path_exists(dir1)
if exists:
return True
#
# Return False if we aren't forcing the create.
if not force and not create:
return False
#
# Just use os.makedirs.
try:
os.makedirs(theDir, mode=0o777, exist_ok=False)
return True
except Exception:
return False
|
https://github.com/leo-editor/leo-editor/issues/1453
|
Leo Log Window
Leo 6.1-final, master branch, build b80e074204
2019-11-08 12:36:03 -0600
Python 3.8.0, PyQt version 5.13.0
Windows 10 AMD64 (build 10.0.17763) SP0
created directory: C:/Users/C050536/.leo
leoID='metaperl'
.leoID.txt created in C:/Users/C050536/.leo
current dir: C:/bin/leo-editor
load dir: C:/bin/leo-editor/leo/core
global config dir: C:/bin/leo-editor/leo/config
home dir: C:/Users/C050536
reading settings in C:/bin/leo-editor/leo/config/leoSettings.leo
Do spellpyx.txt file found
unexpected error creating: None
Traceback (most recent call last):
File "C:\bin\leo-editor\leo\commands\spellCommands.py", line 56, in create
f = open(fn, mode='wb')
TypeError: expected str, bytes or os.PathLike object, not NoneType
reading settings in C:/bin/leo-editor/leo/doc/CheatSheet.leo
|
TypeError
|
def find_user_dict(self):
"""Return the full path to the local dictionary."""
c = self.c
join = g.os_path_finalize_join
table = (
c.config.getString("enchant-local-dictionary"),
# Settings first.
join(g.app.homeDir, ".leo", "spellpyx.txt"),
# #108: then the .leo directory.
join(g.app.loadDir, "..", "plugins", "spellpyx.txt"),
# The plugins directory as a last resort.
)
for path in table:
if g.os_path_exists(path):
return path
g.es_print("Creating ~/.leo/spellpyx.txt")
# #1453: Return the default path.
return join(g.app.homeDir, ".leo", "spellpyx.txt")
|
def find_user_dict(self):
"""Return the full path to the local dictionary."""
c = self.c
table = (
c.config.getString("enchant-local-dictionary"),
# Settings first.
g.os_path_finalize_join(g.app.homeDir, ".leo", "spellpyx.txt"),
# #108: then the .leo directory.
g.os_path_finalize_join(g.app.loadDir, "..", "plugins", "spellpyx.txt"),
# The plugins directory as a last resort.
)
for path in table:
if g.os_path_exists(path):
return path
#
g.es_print("Do spellpyx.txt file found")
return None
|
https://github.com/leo-editor/leo-editor/issues/1453
|
Leo Log Window
Leo 6.1-final, master branch, build b80e074204
2019-11-08 12:36:03 -0600
Python 3.8.0, PyQt version 5.13.0
Windows 10 AMD64 (build 10.0.17763) SP0
created directory: C:/Users/C050536/.leo
leoID='metaperl'
.leoID.txt created in C:/Users/C050536/.leo
current dir: C:/bin/leo-editor
load dir: C:/bin/leo-editor/leo/core
global config dir: C:/bin/leo-editor/leo/config
home dir: C:/Users/C050536
reading settings in C:/bin/leo-editor/leo/config/leoSettings.leo
Do spellpyx.txt file found
unexpected error creating: None
Traceback (most recent call last):
File "C:\bin\leo-editor\leo\commands\spellCommands.py", line 56, in create
f = open(fn, mode='wb')
TypeError: expected str, bytes or os.PathLike object, not NoneType
reading settings in C:/bin/leo-editor/leo/doc/CheatSheet.leo
|
TypeError
|
def write(self, root, sentinels=True):
"""Write a 4.x derived file.
root is the position of an @<file> node.
sentinels will be False for @clean and @nosent nodes.
"""
at, c = self, self.c
try:
c.endEditing()
fileName = at.initWriteIvars(
root, root.anyAtFileNodeName(), sentinels=sentinels
)
if not fileName or not at.precheck(fileName, root):
if sentinels:
# Raise dialog warning of data loss.
at.addToOrphanList(root)
else:
# #1450: No danger of data loss.
pass
return
at.openOutputStream()
at.putFile(root, sentinels=sentinels)
at.warnAboutOrphandAndIgnoredNodes()
contents = at.closeOutputStream()
if at.errors:
g.es("not written:", g.shortFileName(fileName))
at.addToOrphanList(root)
else:
at.replaceFile(contents, at.encoding, fileName, root)
except Exception:
if hasattr(self.root.v, "tnodeList"):
delattr(self.root.v, "tnodeList")
at.writeException(fileName, root)
|
def write(self, root, sentinels=True):
"""Write a 4.x derived file.
root is the position of an @<file> node.
sentinels will be False for @clean and @nosent nodes.
"""
at, c = self, self.c
try:
c.endEditing()
fileName = at.initWriteIvars(
root, root.anyAtFileNodeName(), sentinels=sentinels
)
# #1450.
if not fileName or not at.precheck(fileName, root):
at.addToOrphanList(root)
return
at.openOutputStream()
at.putFile(root, sentinels=sentinels)
at.warnAboutOrphandAndIgnoredNodes()
contents = at.closeOutputStream()
if at.errors:
g.es("not written:", g.shortFileName(fileName))
at.addToOrphanList(root)
else:
at.replaceFile(contents, at.encoding, fileName, root)
except Exception:
if hasattr(self.root.v, "tnodeList"):
delattr(self.root.v, "tnodeList")
at.writeException(fileName, root)
|
https://github.com/leo-editor/leo-editor/issues/1450
|
Traceback (most recent call last):
File ".../leo/core/leoGlobals.py", line 293, in new_cmd_wrapper
func(self, event=event)
File ".../leo/core/leoFileCommands.py", line 1827, in writeMissingAtFileNodes
c.atFileCommands.writeMissing(c.p)
File ".../leo/core/leoAtFile.py", line 1327, in writeMissing
at.default_directory = c.expand_path_expression(at.default_directory) # #1341.
AttributeError: 'AtFile' object has no attribute 'default_directory'
|
AttributeError
|
def shouldPromptForDangerousWrite(self, fn, p):
"""
Return True if Leo should warn the user that p is an @<file> node that
was not read during startup. Writing that file might cause data loss.
See #50: https://github.com/leo-editor/leo-editor/issues/50
"""
trace = "save" in g.app.debug
sfn = g.shortFileName(fn)
c = self.c
efc = g.app.externalFilesController
if p.isAtNoSentFileNode():
# #1450.
# No danger of overwriting a file.
# It was never read.
return False
if not g.os_path_exists(fn):
# No danger of overwriting fn.
if trace:
g.trace("Return False: does not exist:", sfn)
return False
# #1347: Prompt if the external file is newer.
if efc:
# Like c.checkFileTimeStamp.
if c.sqlite_connection and c.mFileName == fn:
# sqlite database file is never actually overwriten by Leo,
# so do *not* check its timestamp.
pass
elif efc.has_changed(c, fn):
if trace:
g.trace("Return True: changed:", sfn)
return True
if hasattr(p.v, "at_read"):
# Fix bug #50: body text lost switching @file to @auto-rst
d = p.v.at_read
for k in d:
if os.path.samefile(k, fn) and p.h in d.get(k, set()):
d[fn] = d[k]
if trace:
g.trace("Return False: in p.v.at_read:", sfn)
return False
aSet = d.get(fn, set())
if trace:
g.trace(f"Return {p.h not in aSet()}: p.h not in aSet(): {sfn}")
return p.h not in aSet
if trace:
g.trace("Return True: never read:", sfn)
return True
|
def shouldPromptForDangerousWrite(self, fn, p):
"""
Return True if Leo should warn the user that p is an @<file> node that
was not read during startup. Writing that file might cause data loss.
See #50: https://github.com/leo-editor/leo-editor/issues/50
"""
trace = "save" in g.app.debug
sfn = g.shortFileName(fn)
c = self.c
efc = g.app.externalFilesController
if not g.os_path_exists(fn):
# No danger of overwriting fn.
if trace:
g.trace("Return False: does not exist:", sfn)
return False
# #1347: Prompt if the external file is newer.
if efc:
# Like c.checkFileTimeStamp.
if c.sqlite_connection and c.mFileName == fn:
# sqlite database file is never actually overwriten by Leo,
# so do *not* check its timestamp.
pass
elif efc.has_changed(c, fn):
if trace:
g.trace("Return True: changed:", sfn)
return True
if hasattr(p.v, "at_read"):
# Fix bug #50: body text lost switching @file to @auto-rst
d = p.v.at_read
for k in d:
if os.path.samefile(k, fn) and p.h in d.get(k, set()):
d[fn] = d[k]
if trace:
g.trace("Return False: in p.v.at_read:", sfn)
return False
aSet = d.get(fn, set())
if trace:
g.trace(f"Return {p.h not in aSet()}: p.h not in aSet(): {sfn}")
return p.h not in aSet
if trace:
g.trace("Return True: never read:", sfn)
return True
|
https://github.com/leo-editor/leo-editor/issues/1450
|
Traceback (most recent call last):
File ".../leo/core/leoGlobals.py", line 293, in new_cmd_wrapper
func(self, event=event)
File ".../leo/core/leoFileCommands.py", line 1827, in writeMissingAtFileNodes
c.atFileCommands.writeMissing(c.p)
File ".../leo/core/leoAtFile.py", line 1327, in writeMissing
at.default_directory = c.expand_path_expression(at.default_directory) # #1341.
AttributeError: 'AtFile' object has no attribute 'default_directory'
|
AttributeError
|
def get(self, key, default=None):
if not self.has_key(key):
return default
try:
val = self[key]
return val
except Exception: # #1444: Was KeyError.
return default
|
def get(self, key, default=None):
if not self.has_key(key):
return default
try:
val = self[key]
return val
except KeyError:
return default
|
https://github.com/leo-editor/leo-editor/issues/1444
|
hook failed: after-create-leo-frame2, <bound method FreeLayoutController.loadLayouts of <leo.plugins.free_layout.FreeLayoutController object at 0x00000248F6C65548>>, <no module>
Traceback (most recent call last):
File "C:\Documents and Settings\tom\leo-editor\leo\core\leoPlugins.py", line 324, in callTagHandler
result = handler(tag, keywords)
File "C:\Documents and Settings\tom\leo-editor\leo\plugins\free_layout.py", line 213, in loadLayouts
d = g.app.db.get('ns_layouts') or {}
File "C:\Documents and Settings\tom\leo-editor\leo\core\leoCache.py", line 607, in get
val = self[key]
File "C:\Documents and Settings\tom\leo-editor\leo\core\leoCache.py", line 522, in __getitem__
obj = self.loader(row[0])
File "C:\Documents and Settings\tom\leo-editor\leo\core\leoCache.py", line 483, in loadz
val = pickle.loads(zlib.decompress(data))
ModuleNotFoundError: No module named 'PyQt4'
|
ModuleNotFoundError
|
def computeWorkbookFileName(self):
"""
Return full path to the workbook.
Return None if testing, or in batch mode, or if the containing
directory does not exist.
"""
# lm = self
# Never create a workbook during unit tests or in batch mode.
if g.unitTesting or g.app.batchMode:
return None
fn = g.app.config.getString(setting="default_leo_file") or "~/.leo/workbook.leo"
fn = g.os_path_finalize(fn)
directory = g.os_path_finalize(os.path.dirname(fn))
# #1415.
return fn if os.path.exists(directory) else None
|
def computeWorkbookFileName(self):
"""
Return the name of the workbook.
Return None *only* if:
1. The workbook does not exist.
2. We are unit testing or in batch mode.
"""
# lm = self
fn = g.app.config.getString(setting="default_leo_file")
# The default is ~/.leo/workbook.leo
if not fn:
fn = g.os_path_finalize("~/.leo/workbook.leo")
fn = g.os_path_finalize(fn)
if not fn:
return None
if g.os_path_exists(fn):
return fn
if g.unitTesting or g.app.batchMode:
# 2017/02/18: unit tests must not create a workbook.
# Neither should batch mode operation.
return None
if g.os_path_isabs(fn):
# Create the file.
g.error("Using default leo file name:\n%s" % (fn))
return fn
# It's too risky to open a default file if it is relative.
return None
|
https://github.com/leo-editor/leo-editor/issues/1415
|
unexpected exception loading session
Traceback (most recent call last):
File "c:\users\viktor\pyve\github\leo-dev-opt-b\devel\leo-editor-devel\leo\core\leoApp.py", line 2601, in doPostPluginsInit
g.app.sessionManager.load_session(c1, aList)
[...]
File "c:\users\viktor\pyve\github\leo-dev-opt-b\devel\leo-editor-devel\leo\core\leoApp.py", line 1478, in <listcomp>
if [x for x in aList if os.path.samefile(x, fn)]:
File "C:\EE\Python\lib\genericpath.py", line 96, in samefile
s1 = os.stat(f1)
FileNotFoundError: [WinError 3] Das System kann den angegebenen Pfad nicht finden: 'C:/users/viktor/pyve/ve4ileo/lib/site-packages/leo/doc/quickstart.leo'
|
FileNotFoundError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.