after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def __init__(self, *args, **kwargs):
"""
Keywords:
filename (str) : a path to a Jupyter notebook (".ipynb") file
"""
nbformat = import_required(
"nbformat",
"The Bokeh notebook application handler requires Jupyter Notebook to be installed.",
)
nbconvert = import_required(
"nbconvert",
"The Bokeh notebook application handler requires Jupyter Notebook to be installed.",
)
if "filename" not in kwargs:
raise ValueError("Must pass a filename to NotebookHandler")
filename = kwargs["filename"]
with open(filename) as f:
nb = nbformat.read(f, nbformat.NO_CONVERT)
exporter = nbconvert.PythonExporter()
source, _ = exporter.from_notebook_node(nb)
kwargs["source"] = source
super(NotebookHandler, self).__init__(*args, **kwargs)
|
def __init__(self, *args, **kwargs):
"""
Keywords:
filename (str) : a path to a Jupyter notebook (".ipynb") file
"""
nbformat = import_required(
"nbformat",
"The Bokeh notebook application handler requires Jupyter Notebook to be installed.",
)
nbconvert = import_required(
"nbconvert",
"The Bokeh notebook application handler requires Jupyter Notebook to be installed.",
)
if "filename" not in kwargs:
raise ValueError("Must pass a filename to NotebookHandler")
filename = kwargs["filename"]
with open(filename) as f:
nb = nbformat.read(f, nbformat.NO_CONVERT)
exporter = nbconvert.PythonExporter()
source, meta = exporter.from_notebook_node(nb)
kwargs["source"] = source
super(NotebookHandler, self).__init__(*args, **kwargs)
|
https://github.com/bokeh/bokeh/issues/8034
|
(venv) $ bokeh serve myapp.py
2018-06-27 12:59:45,343 Starting Bokeh server version 0.13.0 (running on Tornado 5.0.1)
2018-06-27 12:59:45,345 Bokeh app running at: http://localhost:5006/myapp
2018-06-27 12:59:45,345 Starting Bokeh server with process id: 13979
0.5604607908584933
2018-06-27 13:00:06,274 200 GET /myapp (::1) 96.25ms
2018-06-27 13:00:06,437 101 GET /myapp/ws?bokeh-protocol-version=1.0&bokeh-session-id=OxhSy81ZBM8URWfk6wp7OTH7bTictF12u6pI8o7fkIUs (::1) 0.64ms
2018-06-27 13:00:06,437 WebSocket connection opened
2018-06-27 13:00:06,438 ServerConnection created
0.6452850662545722
0.2762922805351794
2018-06-27 13:00:11,120 Error running application handler <bokeh.application.handlers.script.ScriptHandler object at 0x7feae4861a58>: boom!
File "myapp.py", line 46, in <module>:
raise Exception('boom!') Traceback (most recent call last):
File "/home/matt/Clients/QEye/venv/lib/python3.5/site-packages/bokeh/application/handlers/code_runner.py", line 163, in run
exec(self._code, module.__dict__)
File "/home/matt/Clients/QEye/isolate/myapp.py", line 46, in <module>
raise Exception('boom!')
Exception: boom!
2018-06-27 13:00:11,125 200 GET /myapp (::1) 25.45ms
2018-06-27 13:00:11,132 WebSocket connection closed: code=1001, reason=None
2018-06-27 13:00:11,207 101 GET /myapp/ws?bokeh-protocol-version=1.0&bokeh-session-id=Y5IlTpoIxVs42drth0tKZonyuwGnUNM3zKK6OAG5eg6n (::1) 0.60ms
2018-06-27 13:00:11,208 WebSocket connection opened
2018-06-27 13:00:11,208 ServerConnection created
|
Exception
|
def bk_worker():
asyncio.set_event_loop(asyncio.new_event_loop())
bokeh_tornado = BokehTornado(
{"/bkapp": bkapp}, extra_websocket_origins=["localhost:8000"]
)
bokeh_http = HTTPServer(bokeh_tornado)
bokeh_http.add_sockets(sockets)
server = BaseServer(IOLoop.current(), bokeh_tornado, bokeh_http)
server.start()
server.io_loop.start()
|
def bk_worker():
io_loop = IOLoop.current()
server = BaseServer(io_loop, bokeh_tornado, bokeh_http)
server.start()
server.io_loop.start()
|
https://github.com/bokeh/bokeh/issues/7904
|
Exception in thread Thread-1:
Traceback (most recent call last):
File "/Users/bryanv/anaconda/envs/01216/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/Users/bryanv/anaconda/envs/01216/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/Users/bryanv/work/bokeh/examples/howto/server_embed/flask_gunicorn_embed.py", line 72, in bk_worker
server.start()
File "/Users/bryanv/anaconda/envs/01216/lib/python3.6/site-packages/bokeh/server/server.py", line 149, in start
self._tornado.start()
File "/Users/bryanv/anaconda/envs/01216/lib/python3.6/site-packages/bokeh/server/tornado.py", line 372, in start
self._stats_job.start()
File "/Users/bryanv/anaconda/envs/01216/lib/python3.6/site-packages/tornado/ioloop.py", line 1185, in start
self.io_loop = IOLoop.current()
File "/Use
rs/bryanv/anaconda/envs/01216/lib/python3.6/site-packages/tornado/ioloop.py", line 282, in current
loop = asyncio.get_event_loop()
File "/Users/bryanv/anaconda/envs/01216/lib/python3.6/asyncio/events.py", line 694, in get_event_loop
return get_event_loop_policy().get_event_loop()
File "/Users/bryanv/anaconda/envs/01216/lib/python3.6/asyncio/events.py", line 602, in get_event_loop
% threading.current_thread().name)
RuntimeError: There is no current event loop in thread 'Thread-1'.
|
RuntimeError
|
def check_origin(self, origin):
"""Implement a check_origin policy for Tornado to call.
The suplied origin will be compared to the Bokeh server whitelist. If the
origin is not allow, an error will be logged and ``False`` will be returned.
Args:
origin (str) :
The URL of the connection origin
Returns:
bool, True if the connection is allowed, False otherwise
"""
from ..util import check_whitelist
parsed_origin = urlparse(origin)
origin_host = parsed_origin.netloc.lower()
allowed_hosts = self.application.websocket_origins
allowed = check_whitelist(origin_host, allowed_hosts)
if allowed:
return True
else:
log.error(
"Refusing websocket connection from Origin '%s'; \
use --allow-websocket-origin=%s to permit this; currently we allow origins %r",
origin,
origin_host,
allowed_hosts,
)
return False
|
def check_origin(self, origin):
from ..util import check_whitelist
parsed_origin = urlparse(origin)
origin_host = parsed_origin.netloc.lower()
allowed_hosts = self.application.websocket_origins
allowed = check_whitelist(origin_host, allowed_hosts)
if allowed:
return True
else:
log.error(
"Refusing websocket connection from Origin '%s'; \
use --allow-websocket-origin=%s to permit this; currently we allow origins %r",
origin,
origin_host,
allowed_hosts,
)
return False
|
https://github.com/bokeh/bokeh/issues/7619
|
2018-03-12 09:05:56,504 200 GET /gapminder (::1) 134.13ms
2018-03-12 09:05:56,506 Sending pull-doc-reply from session 'c13X3Xet3szgUeNWFMzrL86pmFq77b8q04pGAUD8WUFJ'
2018-03-12 09:05:56,513 Failed sending message as connection was closed
2018-03-12 09:05:56,514 WebSocket connection closed: code=None, reason=None
2018-03-12 09:05:56,604 Future exception was never retrieved
future: <Future finished exception=WebSocketClosedError()>
Traceback (most recent call last):
File "/Users/bryanv/anaconda/envs/15dev1/lib/python3.6/site-packages/tornado/websocket.py", line 808, in wrapper
yield fut
File "/Users/bryanv/anaconda/envs/15dev1/lib/python3.6/site-packages/tornado/gen.py", line 1099, in run
value = future.result()
tornado.iostream.StreamClosedError: Stream is closed
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/bryanv/anaconda/envs/15dev1/lib/python3.6/site-packages/tornado/gen.py", line 1107, in run
yielded = self.gen.throw(*exc_info)
File "/Users/bryanv/anaconda/envs/15dev1/lib/python3.6/site-packages/tornado/websocket.py", line 810, in wrapper
raise WebSocketClosedError()
tornado.websocket.WebSocketClosedError
|
tornado.iostream.StreamClosedError
|
def open(self):
"""Initialize a connection to a client.
Returns:
None
"""
log.info("WebSocket connection opened")
proto_version = self.get_argument("bokeh-protocol-version", default=None)
if proto_version is None:
self.close()
raise ProtocolError("No bokeh-protocol-version specified")
session_id = self.get_argument("bokeh-session-id", default=None)
if session_id is None:
self.close()
raise ProtocolError("No bokeh-session-id specified")
if not check_session_id_signature(
session_id,
signed=self.application.sign_sessions,
secret_key=self.application.secret_key,
):
log.error("Session id had invalid signature: %r", session_id)
raise ProtocolError("Invalid session ID")
def on_fully_opened(future):
e = future.exception()
if e is not None:
# this isn't really an error (unless we have a
# bug), it just means a client disconnected
# immediately, most likely.
log.debug("Failed to fully open connection %r", e)
future = self._async_open(session_id, proto_version)
self.application.io_loop.add_future(future, on_fully_opened)
|
def open(self):
"""Initialize a connection to a client."""
log.info("WebSocket connection opened")
proto_version = self.get_argument("bokeh-protocol-version", default=None)
if proto_version is None:
self.close()
raise ProtocolError("No bokeh-protocol-version specified")
session_id = self.get_argument("bokeh-session-id", default=None)
if session_id is None:
self.close()
raise ProtocolError("No bokeh-session-id specified")
if not check_session_id_signature(
session_id,
signed=self.application.sign_sessions,
secret_key=self.application.secret_key,
):
log.error("Session id had invalid signature: %r", session_id)
raise ProtocolError("Invalid session ID")
def on_fully_opened(future):
e = future.exception()
if e is not None:
# this isn't really an error (unless we have a
# bug), it just means a client disconnected
# immediately, most likely.
log.debug("Failed to fully open connection %r", e)
future = self._async_open(session_id, proto_version)
self.application.io_loop.add_future(future, on_fully_opened)
|
https://github.com/bokeh/bokeh/issues/7619
|
2018-03-12 09:05:56,504 200 GET /gapminder (::1) 134.13ms
2018-03-12 09:05:56,506 Sending pull-doc-reply from session 'c13X3Xet3szgUeNWFMzrL86pmFq77b8q04pGAUD8WUFJ'
2018-03-12 09:05:56,513 Failed sending message as connection was closed
2018-03-12 09:05:56,514 WebSocket connection closed: code=None, reason=None
2018-03-12 09:05:56,604 Future exception was never retrieved
future: <Future finished exception=WebSocketClosedError()>
Traceback (most recent call last):
File "/Users/bryanv/anaconda/envs/15dev1/lib/python3.6/site-packages/tornado/websocket.py", line 808, in wrapper
yield fut
File "/Users/bryanv/anaconda/envs/15dev1/lib/python3.6/site-packages/tornado/gen.py", line 1099, in run
value = future.result()
tornado.iostream.StreamClosedError: Stream is closed
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/bryanv/anaconda/envs/15dev1/lib/python3.6/site-packages/tornado/gen.py", line 1107, in run
yielded = self.gen.throw(*exc_info)
File "/Users/bryanv/anaconda/envs/15dev1/lib/python3.6/site-packages/tornado/websocket.py", line 810, in wrapper
raise WebSocketClosedError()
tornado.websocket.WebSocketClosedError
|
tornado.iostream.StreamClosedError
|
def _async_open(self, session_id, proto_version):
"""Perform the specific steps needed to open a connection to a Bokeh session
Sepcifically, this method coordinates:
* Getting a session for a session ID (creating a new one if needed)
* Creating a protocol receiver and hander
* Opening a new ServerConnection and sending it an ACK
Args:
session_id (str) :
A session ID to for a session to connect to
If no session exists with the given ID, a new session is made
proto_version (str):
The protocol version requested by the connecting client.
Returns:
None
"""
try:
yield self.application_context.create_session_if_needed(
session_id, self.request
)
session = self.application_context.get_session(session_id)
protocol = Protocol(proto_version)
self.receiver = Receiver(protocol)
log.debug("Receiver created for %r", protocol)
self.handler = ProtocolHandler()
log.debug("ProtocolHandler created for %r", protocol)
self.connection = self.application.new_connection(
protocol, self, self.application_context, session
)
log.info("ServerConnection created")
except ProtocolError as e:
log.error("Could not create new server session, reason: %s", e)
self.close()
raise e
msg = self.connection.protocol.create("ACK")
yield self.send_message(msg)
raise gen.Return(None)
|
def _async_open(self, session_id, proto_version):
try:
yield self.application_context.create_session_if_needed(
session_id, self.request
)
session = self.application_context.get_session(session_id)
protocol = Protocol(proto_version)
self.receiver = Receiver(protocol)
log.debug("Receiver created for %r", protocol)
self.handler = ProtocolHandler()
log.debug("ProtocolHandler created for %r", protocol)
self.connection = self.application.new_connection(
protocol, self, self.application_context, session
)
log.info("ServerConnection created")
except ProtocolError as e:
log.error("Could not create new server session, reason: %s", e)
self.close()
raise e
msg = self.connection.protocol.create("ACK")
yield self.send_message(msg)
raise gen.Return(None)
|
https://github.com/bokeh/bokeh/issues/7619
|
2018-03-12 09:05:56,504 200 GET /gapminder (::1) 134.13ms
2018-03-12 09:05:56,506 Sending pull-doc-reply from session 'c13X3Xet3szgUeNWFMzrL86pmFq77b8q04pGAUD8WUFJ'
2018-03-12 09:05:56,513 Failed sending message as connection was closed
2018-03-12 09:05:56,514 WebSocket connection closed: code=None, reason=None
2018-03-12 09:05:56,604 Future exception was never retrieved
future: <Future finished exception=WebSocketClosedError()>
Traceback (most recent call last):
File "/Users/bryanv/anaconda/envs/15dev1/lib/python3.6/site-packages/tornado/websocket.py", line 808, in wrapper
yield fut
File "/Users/bryanv/anaconda/envs/15dev1/lib/python3.6/site-packages/tornado/gen.py", line 1099, in run
value = future.result()
tornado.iostream.StreamClosedError: Stream is closed
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/bryanv/anaconda/envs/15dev1/lib/python3.6/site-packages/tornado/gen.py", line 1107, in run
yielded = self.gen.throw(*exc_info)
File "/Users/bryanv/anaconda/envs/15dev1/lib/python3.6/site-packages/tornado/websocket.py", line 810, in wrapper
raise WebSocketClosedError()
tornado.websocket.WebSocketClosedError
|
tornado.iostream.StreamClosedError
|
def on_message(self, fragment):
"""Process an individual wire protocol fragment.
The websocket RFC specifies opcodes for distinguishing text frames
from binary frames. Tornado passes us either a text or binary string
depending on that opcode, we have to look at the type of the fragment
to see what we got.
Args:
fragment (unicode or bytes) : wire fragment to process
"""
# We shouldn't throw exceptions from on_message because the caller is
# just Tornado and it doesn't know what to do with them other than
# report them as an unhandled Future
try:
message = yield self._receive(fragment)
except Exception as e:
# If you go look at self._receive, it's catching the
# expected error types... here we have something weird.
log.error(
"Unhandled exception receiving a message: %r: %r",
e,
fragment,
exc_info=True,
)
self._internal_error("server failed to parse a message")
try:
if message:
work = yield self._handle(message)
if work:
yield self._schedule(work)
except Exception as e:
log.error(
"Handler or its work threw an exception: %r: %r", e, message, exc_info=True
)
self._internal_error("server failed to handle a message")
raise gen.Return(None)
|
def on_message(self, fragment):
"""Process an individual wire protocol fragment.
The websocket RFC specifies opcodes for distinguishing
text frames from binary frames. Tornado passes us either
a text or binary string depending on that opcode, we have
to look at the type of the fragment to see what we got.
Args:
fragment (unicode or bytes) : wire fragment to process
"""
# We shouldn't throw exceptions from on_message because
# the caller is just Tornado and it doesn't know what to
# do with them other than report them as an unhandled
# Future
try:
message = yield self._receive(fragment)
except Exception as e:
# If you go look at self._receive, it's catching the
# expected error types... here we have something weird.
log.error(
"Unhandled exception receiving a message: %r: %r",
e,
fragment,
exc_info=True,
)
self._internal_error("server failed to parse a message")
try:
if message:
work = yield self._handle(message)
if work:
yield self._schedule(work)
except Exception as e:
log.error(
"Handler or its work threw an exception: %r: %r", e, message, exc_info=True
)
self._internal_error("server failed to handle a message")
raise gen.Return(None)
|
https://github.com/bokeh/bokeh/issues/7619
|
2018-03-12 09:05:56,504 200 GET /gapminder (::1) 134.13ms
2018-03-12 09:05:56,506 Sending pull-doc-reply from session 'c13X3Xet3szgUeNWFMzrL86pmFq77b8q04pGAUD8WUFJ'
2018-03-12 09:05:56,513 Failed sending message as connection was closed
2018-03-12 09:05:56,514 WebSocket connection closed: code=None, reason=None
2018-03-12 09:05:56,604 Future exception was never retrieved
future: <Future finished exception=WebSocketClosedError()>
Traceback (most recent call last):
File "/Users/bryanv/anaconda/envs/15dev1/lib/python3.6/site-packages/tornado/websocket.py", line 808, in wrapper
yield fut
File "/Users/bryanv/anaconda/envs/15dev1/lib/python3.6/site-packages/tornado/gen.py", line 1099, in run
value = future.result()
tornado.iostream.StreamClosedError: Stream is closed
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/bryanv/anaconda/envs/15dev1/lib/python3.6/site-packages/tornado/gen.py", line 1107, in run
yielded = self.gen.throw(*exc_info)
File "/Users/bryanv/anaconda/envs/15dev1/lib/python3.6/site-packages/tornado/websocket.py", line 810, in wrapper
raise WebSocketClosedError()
tornado.websocket.WebSocketClosedError
|
tornado.iostream.StreamClosedError
|
def send_message(self, message):
"""Send a Bokeh Server protocol message to the connected client.
Args:
message (Message) : a message to send
"""
try:
yield message.send(self)
except (
WebSocketClosedError,
StreamClosedError,
): # Tornado 4.x may raise StreamClosedError
# on_close() is / will be called anyway
log.warn("Failed sending message as connection was closed")
raise gen.Return(None)
|
def send_message(self, message):
"""Send a Bokeh Server protocol message to the connected client.
Args:
message (Message) : a message to send
"""
try:
yield message.send(self)
except WebSocketClosedError:
# on_close() is / will be called anyway
log.warn("Failed sending message as connection was closed")
raise gen.Return(None)
|
https://github.com/bokeh/bokeh/issues/7619
|
2018-03-12 09:05:56,504 200 GET /gapminder (::1) 134.13ms
2018-03-12 09:05:56,506 Sending pull-doc-reply from session 'c13X3Xet3szgUeNWFMzrL86pmFq77b8q04pGAUD8WUFJ'
2018-03-12 09:05:56,513 Failed sending message as connection was closed
2018-03-12 09:05:56,514 WebSocket connection closed: code=None, reason=None
2018-03-12 09:05:56,604 Future exception was never retrieved
future: <Future finished exception=WebSocketClosedError()>
Traceback (most recent call last):
File "/Users/bryanv/anaconda/envs/15dev1/lib/python3.6/site-packages/tornado/websocket.py", line 808, in wrapper
yield fut
File "/Users/bryanv/anaconda/envs/15dev1/lib/python3.6/site-packages/tornado/gen.py", line 1099, in run
value = future.result()
tornado.iostream.StreamClosedError: Stream is closed
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/bryanv/anaconda/envs/15dev1/lib/python3.6/site-packages/tornado/gen.py", line 1107, in run
yielded = self.gen.throw(*exc_info)
File "/Users/bryanv/anaconda/envs/15dev1/lib/python3.6/site-packages/tornado/websocket.py", line 810, in wrapper
raise WebSocketClosedError()
tornado.websocket.WebSocketClosedError
|
tornado.iostream.StreamClosedError
|
def write_message(self, message, binary=False, locked=True):
"""Override parent write_message with a version that acquires a
write lock before writing.
"""
if locked:
with (yield self.write_lock.acquire()):
yield super(WSHandler, self).write_message(message, binary)
else:
yield super(WSHandler, self).write_message(message, binary)
|
def write_message(self, message, binary=False, locked=True):
"""Override parent write_message with a version that acquires a
write lock before writing.
"""
def write_message_unlocked():
future = super(WSHandler, self).write_message(message, binary)
# don't yield this future or we're blocking on ourselves!
raise gen.Return(future)
if locked:
with (yield self.write_lock.acquire()):
write_message_unlocked()
else:
write_message_unlocked()
|
https://github.com/bokeh/bokeh/issues/7619
|
2018-03-12 09:05:56,504 200 GET /gapminder (::1) 134.13ms
2018-03-12 09:05:56,506 Sending pull-doc-reply from session 'c13X3Xet3szgUeNWFMzrL86pmFq77b8q04pGAUD8WUFJ'
2018-03-12 09:05:56,513 Failed sending message as connection was closed
2018-03-12 09:05:56,514 WebSocket connection closed: code=None, reason=None
2018-03-12 09:05:56,604 Future exception was never retrieved
future: <Future finished exception=WebSocketClosedError()>
Traceback (most recent call last):
File "/Users/bryanv/anaconda/envs/15dev1/lib/python3.6/site-packages/tornado/websocket.py", line 808, in wrapper
yield fut
File "/Users/bryanv/anaconda/envs/15dev1/lib/python3.6/site-packages/tornado/gen.py", line 1099, in run
value = future.result()
tornado.iostream.StreamClosedError: Stream is closed
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/bryanv/anaconda/envs/15dev1/lib/python3.6/site-packages/tornado/gen.py", line 1107, in run
yielded = self.gen.throw(*exc_info)
File "/Users/bryanv/anaconda/envs/15dev1/lib/python3.6/site-packages/tornado/websocket.py", line 810, in wrapper
raise WebSocketClosedError()
tornado.websocket.WebSocketClosedError
|
tornado.iostream.StreamClosedError
|
def env_before_read_docs(app, env, docnames):
docnames.sort(key=lambda x: 0 if "examples" in x else 1)
for name in [x for x in docnames if env.doc2path(x).endswith(".py")]:
if not name.startswith(tuple(env.app.config.bokeh_plot_pyfile_include_dirs)):
env.found_docs.remove(name)
docnames.remove(name)
|
def env_before_read_docs(app, env, docnames):
docnames.sort(key=lambda x: 2 if "extension" in x else 0 if "examples" in x else 1)
for name in [x for x in docnames if env.doc2path(x).endswith(".py")]:
if not name.startswith(tuple(env.app.config.bokeh_plot_pyfile_include_dirs)):
env.found_docs.remove(name)
docnames.remove(name)
|
https://github.com/bokeh/bokeh/issues/4965
|
2016-08-16 15:16:51,365 error handling message Message 'PATCH-DOC' (revision 1): ValueError('Unknown string format',)
2016-08-16 15:16:51,365 message header {'msgid': '8B5BE6EBF7044F8793268769745D6C7A', 'msgtype': 'PATCH-DOC'} content {'events': [{'new': 'Mon Aug 01 2016 00:00:00 GMT-0400 (Eastern Daylight Time)', 'model': {'type': 'DatePicker', 'id': '5f8ecfc4-a06e-428f-b21d-6ed1a34cefff'}, 'attr': 'value', 'kind': 'ModelChanged'}], 'references': []}
Traceback (most recent call last):
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/bokeh/server/protocol/server_handler.py", line 38, in handle
work = yield handler(message, connection)
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/tornado/gen.py", line 1008, in run
value = future.result()
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/tornado/concurrent.py", line 232, in result
raise_exc_info(self._exc_info)
File "<string>", line 3, in raise_exc_info
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/tornado/gen.py", line 1017, in run
yielded = self.gen.send(value)
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/bokeh/server/session.py", line 45, in _needs_document_lock_wrapper
result = yield yield_for_all_futures(func(self, *args, **kwargs))
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/bokeh/server/session.py", line 217, in _handle_patch
message.apply_to_document(self.document)
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/bokeh/server/protocol/messages/patch_doc.py", line 92, in apply_to_document
doc.apply_json_patch(self.content)
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/bokeh/document.py", line 955, in apply_json_patch
patched_obj.set_from_json(attr, value, models=references)
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/bokeh/core/properties.py", line 735, in set_from_json
prop.set_from_json(self, json, models)
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/bokeh/core/properties.py", line 371, in set_from_json
models)
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/bokeh/core/properties.py", line 323, in set_from_json
return self.__set__(obj, json)
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/bokeh/core/properties.py", line 455, in __set__
value = self.descriptor.prepare_value(obj.__class__, self.name, value)
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/bokeh/core/properties.py", line 274, in prepare_value
value = self.transform(value)
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/bokeh/core/properties.py", line 1529, in transform
value = dateutil.parser.parse(value).date()
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/dateutil/parser.py", line 1164, in parse
return DEFAULTPARSER.parse(timestr, **kwargs)
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/dateutil/parser.py", line 555, in parse
raise ValueError("Unknown string format")
|
ValueError
|
def bundle_all_models():
key = calc_cache_key()
bundle = _bundle_cache.get(key, None)
if bundle is None:
_bundle_cache[key] = bundle = (
bundle_models(Model.model_class_reverse_map.values()) or ""
)
return bundle
|
def bundle_all_models():
return bundle_models(Model.model_class_reverse_map.values()) or ""
|
https://github.com/bokeh/bokeh/issues/4965
|
2016-08-16 15:16:51,365 error handling message Message 'PATCH-DOC' (revision 1): ValueError('Unknown string format',)
2016-08-16 15:16:51,365 message header {'msgid': '8B5BE6EBF7044F8793268769745D6C7A', 'msgtype': 'PATCH-DOC'} content {'events': [{'new': 'Mon Aug 01 2016 00:00:00 GMT-0400 (Eastern Daylight Time)', 'model': {'type': 'DatePicker', 'id': '5f8ecfc4-a06e-428f-b21d-6ed1a34cefff'}, 'attr': 'value', 'kind': 'ModelChanged'}], 'references': []}
Traceback (most recent call last):
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/bokeh/server/protocol/server_handler.py", line 38, in handle
work = yield handler(message, connection)
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/tornado/gen.py", line 1008, in run
value = future.result()
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/tornado/concurrent.py", line 232, in result
raise_exc_info(self._exc_info)
File "<string>", line 3, in raise_exc_info
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/tornado/gen.py", line 1017, in run
yielded = self.gen.send(value)
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/bokeh/server/session.py", line 45, in _needs_document_lock_wrapper
result = yield yield_for_all_futures(func(self, *args, **kwargs))
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/bokeh/server/session.py", line 217, in _handle_patch
message.apply_to_document(self.document)
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/bokeh/server/protocol/messages/patch_doc.py", line 92, in apply_to_document
doc.apply_json_patch(self.content)
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/bokeh/document.py", line 955, in apply_json_patch
patched_obj.set_from_json(attr, value, models=references)
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/bokeh/core/properties.py", line 735, in set_from_json
prop.set_from_json(self, json, models)
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/bokeh/core/properties.py", line 371, in set_from_json
models)
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/bokeh/core/properties.py", line 323, in set_from_json
return self.__set__(obj, json)
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/bokeh/core/properties.py", line 455, in __set__
value = self.descriptor.prepare_value(obj.__class__, self.name, value)
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/bokeh/core/properties.py", line 274, in prepare_value
value = self.transform(value)
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/bokeh/core/properties.py", line 1529, in transform
value = dateutil.parser.parse(value).date()
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/dateutil/parser.py", line 1164, in parse
return DEFAULTPARSER.parse(timestr, **kwargs)
File "/Users/rheineke/anaconda/lib/python3.5/site-packages/dateutil/parser.py", line 555, in parse
raise ValueError("Unknown string format")
|
ValueError
|
def check_session_id_signature(
session_id, secret_key=settings.secret_key_bytes(), signed=settings.sign_sessions()
):
"""Check the signature of a session ID, returning True if it's valid.
The server uses this function to check whether a session ID
was generated with the correct secret key. If signed sessions are disabled,
this function always returns True.
Args:
session_id (str) : The session ID to check
secret_key (str, optional) : Secret key (default: value of 'BOKEH_SECRET_KEY' env var)
signed (bool, optional) : Whether to check anything (default: value of
'BOKEH_SIGN_SESSIONS' env var)
"""
secret_key = _ensure_bytes(secret_key)
if signed:
pieces = session_id.split("-", 1)
if len(pieces) != 2:
return False
base_id = pieces[0]
provided_signature = pieces[1]
expected_signature = _signature(base_id, secret_key)
# hmac.compare_digest() uses a string compare algorithm that doesn't
# short-circuit so we don't allow timing analysis
# encode_utf8 is used to ensure that strings have same encoding
return hmac.compare_digest(
encode_utf8(expected_signature), encode_utf8(provided_signature)
)
else:
return True
|
def check_session_id_signature(
session_id, secret_key=settings.secret_key_bytes(), signed=settings.sign_sessions()
):
"""Check the signature of a session ID, returning True if it's valid.
The server uses this function to check whether a session ID
was generated with the correct secret key. If signed sessions are disabled,
this function always returns True.
Args:
session_id (str) : The session ID to check
secret_key (str, optional) : Secret key (default: value of 'BOKEH_SECRET_KEY' env var)
signed (bool, optional) : Whether to check anything (default: value of
'BOKEH_SIGN_SESSIONS' env var)
"""
secret_key = _ensure_bytes(secret_key)
if signed:
pieces = session_id.split("-", 1)
if len(pieces) != 2:
return False
base_id = pieces[0]
provided_signature = pieces[1]
expected_signature = _signature(base_id, secret_key)
# hmac.compare_digest() uses a string compare algorithm that doesn't
# short-circuit so we don't allow timing analysis
return hmac.compare_digest(expected_signature, provided_signature)
else:
return True
|
https://github.com/bokeh/bokeh/issues/6653
|
2017-07-21 00:23:58,161 Uncaught exception GET /sliders?bokeh-session-id=4RJKVrnFVe60gB5urh9sE3jUnSGDkJAfCwvoaDsoMB8f-W6QAfyDoxORtN7mb6DHAzftAhpfnxVdzC-6gIT13uV0 (::1)
HTTPServerRequest(protocol='http', host='localhost:5006', method='GET', uri='/sliders?bokeh-session-id=4RJKVrnFVe60gB5urh9sE3jUnSGDkJAfCwvoaDsoMB8f-W6QAfyDoxORtN7mb6DHAzftAhpfnxVdzC-6gIT13uV0', version='HTTP/1.1', remote_ip='::1', headers={'Accept-Language': 'en-US,en;q=0.8,ru;q=0.6,de;q=0.4', 'Accept-Encoding': 'gzip, deflate, br', 'Host': 'localhost:5006', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/59.0.3071.109 Chrome/59.0.3071.109 Safari/537.36', 'Connection': 'keep-alive', 'Cookie': '_xsrf=2|4ab25a11|f7d3fbdf1fab4d2b01135d63518a4f9a|1498856594; username-localhost-8888="2|1:0|10:1499014969|23:username-localhost-8888|44:ODVmNmU2NjIwYjUwNDlhYzk2MzY4OWQ5NDU2ZTExYjU=|3a908d5ba83bca558deae2665732f340eeef5ce69a2763c6cef367fd892e22b7"', 'Upgrade-Insecure-Requests': '1'})
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/tornado/web.py", line 1469, in _execute
result = yield result
File "/usr/local/lib/python2.7/dist-packages/tornado/gen.py", line 1015, in run
value = future.result()
File "/usr/local/lib/python2.7/dist-packages/tornado/concurrent.py", line 237, in result
raise_exc_info(self._exc_info)
File "/usr/local/lib/python2.7/dist-packages/tornado/gen.py", line 1021, in run
yielded = self.gen.throw(*exc_info)
File "/usr/local/lib/python2.7/dist-packages/bokeh/server/views/doc_handler.py", line 27, in get
session = yield self.get_session()
File "/usr/local/lib/python2.7/dist-packages/tornado/gen.py", line 1015, in run
value = future.result()
File "/usr/local/lib/python2.7/dist-packages/tornado/concurrent.py", line 237, in result
raise_exc_info(self._exc_info)
File "/usr/local/lib/python2.7/dist-packages/tornado/gen.py", line 285, in wrapper
yielded = next(result)
File "/usr/local/lib/python2.7/dist-packages/bokeh/server/views/session_handler.py", line 36, in get_session
signed=self.application.sign_sessions):
File "/usr/local/lib/python2.7/dist-packages/bokeh/util/session_id.py", line 156, in check_session_id_signature
return hmac.compare_digest(expected_signature, provided_signature)
TypeError: 'unicode' does not have the buffer interface
|
TypeError
|
def _build_docstring():
global __doc__
from . import subcommands
for cls in subcommands.all:
# running python with -OO will discard docstrings -> __doc__ is None
if __doc__ is None:
__doc__ = ""
__doc__ += "%8s : %s\n" % (cls.name, cls.help)
|
def _build_docstring():
global __doc__
from . import subcommands
for cls in subcommands.all:
__doc__ += "%8s : %s\n" % (cls.name, cls.help)
|
https://github.com/bokeh/bokeh/issues/6156
|
2017-04-18 16:51:23 ERROR Error on request:
Traceback (most recent call last):
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/werkzeug/serving.py", line 193, in run_wsgi
execute(self.server.app)
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/werkzeug/serving.py", line 181, in execute
application_iter = app(environ, start_response)
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/werkzeug/wsgi.py", line 599, in __call__
return self.app(environ, start_response)
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/werkzeug/contrib/lint.py", line 337, in __call__
app_iter = self.app(environ, checking_start_response)
File "/data/projects/python/server-monitoring/serverMonitoring/Main.py", line 40, in __call__
response, endpoint = self.dispatch_request(request)
File "/data/projects/python/server-monitoring/serverMonitoring/Main.py", line 31, in dispatch_request
func = getattr(importlib.import_module(module[0]), module[1])
File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/projects/python/server-monitoring/serverMonitoring/Handler/Delivery.py", line 20, in <module>
from serverMonitoring.Handler.Dashboard.Widgets import ChartParameters, Chart
File "/data/projects/python/server-monitoring/serverMonitoring/Handler/Dashboard/Widgets.py", line 20, in <module>
from bokeh.embed import file_html
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/bokeh/embed.py", line 28, in <module>
from .core.json_encoder import serialize_json
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/bokeh/core/json_encoder.py", line 47, in <module>
from ..util.serialization import transform_series, transform_array
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/bokeh/util/serialization.py", line 39, in <module>
__doc__ = __doc__ % ("\n".join("* ``np." + str(x) + "``" for x in BINARY_ARRAY_TYPES))
TypeError: unsupported operand type(s) for %: 'NoneType' and 'str'
|
TypeError
|
def abstract(cls):
"""A decorator to mark abstract base classes derived from |HasProps|."""
if not issubclass(cls, HasProps):
raise TypeError("%s is not a subclass of HasProps" % cls.__name__)
# running python with -OO will discard docstrings -> __doc__ is None
if cls.__doc__ is not None:
cls.__doc__ += _ABSTRACT_ADMONITION
return cls
|
def abstract(cls):
"""A decorator to mark abstract base classes derived from |HasProps|."""
if not issubclass(cls, HasProps):
raise TypeError("%s is not a subclass of HasProps" % cls.__name__)
cls.__doc__ += _ABSTRACT_ADMONITION
return cls
|
https://github.com/bokeh/bokeh/issues/6156
|
2017-04-18 16:51:23 ERROR Error on request:
Traceback (most recent call last):
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/werkzeug/serving.py", line 193, in run_wsgi
execute(self.server.app)
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/werkzeug/serving.py", line 181, in execute
application_iter = app(environ, start_response)
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/werkzeug/wsgi.py", line 599, in __call__
return self.app(environ, start_response)
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/werkzeug/contrib/lint.py", line 337, in __call__
app_iter = self.app(environ, checking_start_response)
File "/data/projects/python/server-monitoring/serverMonitoring/Main.py", line 40, in __call__
response, endpoint = self.dispatch_request(request)
File "/data/projects/python/server-monitoring/serverMonitoring/Main.py", line 31, in dispatch_request
func = getattr(importlib.import_module(module[0]), module[1])
File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/projects/python/server-monitoring/serverMonitoring/Handler/Delivery.py", line 20, in <module>
from serverMonitoring.Handler.Dashboard.Widgets import ChartParameters, Chart
File "/data/projects/python/server-monitoring/serverMonitoring/Handler/Dashboard/Widgets.py", line 20, in <module>
from bokeh.embed import file_html
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/bokeh/embed.py", line 28, in <module>
from .core.json_encoder import serialize_json
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/bokeh/core/json_encoder.py", line 47, in <module>
from ..util.serialization import transform_series, transform_array
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/bokeh/util/serialization.py", line 39, in <module>
__doc__ = __doc__ % ("\n".join("* ``np." + str(x) + "``" for x in BINARY_ARRAY_TYPES))
TypeError: unsupported operand type(s) for %: 'NoneType' and 'str'
|
TypeError
|
def __new__(meta_cls, class_name, bases, class_dict):
""" """
names_with_refs = set()
container_names = set()
# Now handle all the Override
overridden_defaults = {}
for name, prop in class_dict.items():
if not isinstance(prop, Override):
continue
if prop.default_overridden:
overridden_defaults[name] = prop.default
for name, default in overridden_defaults.items():
del class_dict[name]
generators = dict()
for name, generator in class_dict.items():
if isinstance(generator, PropertyDescriptorFactory):
generators[name] = generator
elif isinstance(generator, type) and issubclass(
generator, PropertyDescriptorFactory
):
# Support the user adding a property without using parens,
# i.e. using just the Property subclass instead of an
# instance of the subclass
generators[name] = generator.autocreate()
dataspecs = {}
new_class_attrs = {}
for name, generator in generators.items():
prop_descriptors = generator.make_descriptors(name)
replaced_self = False
for prop_descriptor in prop_descriptors:
if prop_descriptor.name in generators:
if generators[prop_descriptor.name] is generator:
# a generator can replace itself, this is the
# standard case like `foo = Int()`
replaced_self = True
prop_descriptor.add_prop_descriptor_to_class(
class_name,
new_class_attrs,
names_with_refs,
container_names,
dataspecs,
)
else:
# if a generator tries to overwrite another
# generator that's been explicitly provided,
# use the prop that was manually provided
# and ignore this one.
pass
else:
prop_descriptor.add_prop_descriptor_to_class(
class_name,
new_class_attrs,
names_with_refs,
container_names,
dataspecs,
)
# if we won't overwrite ourselves anyway, delete the generator
if not replaced_self:
del class_dict[name]
class_dict.update(new_class_attrs)
class_dict["__properties__"] = set(new_class_attrs)
class_dict["__properties_with_refs__"] = names_with_refs
class_dict["__container_props__"] = container_names
if len(overridden_defaults) > 0:
class_dict["__overridden_defaults__"] = overridden_defaults
if dataspecs:
class_dict["__dataspecs__"] = dataspecs
if "__example__" in class_dict:
path = class_dict["__example__"]
# running python with -OO will discard docstrings -> __doc__ is None
if "__doc__" in class_dict and class_dict["__doc__"] is not None:
class_dict["__doc__"] += _EXAMPLE_TEMPLATE % dict(path=path)
return super(MetaHasProps, meta_cls).__new__(
meta_cls, class_name, bases, class_dict
)
|
def __new__(meta_cls, class_name, bases, class_dict):
""" """
names_with_refs = set()
container_names = set()
# Now handle all the Override
overridden_defaults = {}
for name, prop in class_dict.items():
if not isinstance(prop, Override):
continue
if prop.default_overridden:
overridden_defaults[name] = prop.default
for name, default in overridden_defaults.items():
del class_dict[name]
generators = dict()
for name, generator in class_dict.items():
if isinstance(generator, PropertyDescriptorFactory):
generators[name] = generator
elif isinstance(generator, type) and issubclass(
generator, PropertyDescriptorFactory
):
# Support the user adding a property without using parens,
# i.e. using just the Property subclass instead of an
# instance of the subclass
generators[name] = generator.autocreate()
dataspecs = {}
new_class_attrs = {}
for name, generator in generators.items():
prop_descriptors = generator.make_descriptors(name)
replaced_self = False
for prop_descriptor in prop_descriptors:
if prop_descriptor.name in generators:
if generators[prop_descriptor.name] is generator:
# a generator can replace itself, this is the
# standard case like `foo = Int()`
replaced_self = True
prop_descriptor.add_prop_descriptor_to_class(
class_name,
new_class_attrs,
names_with_refs,
container_names,
dataspecs,
)
else:
# if a generator tries to overwrite another
# generator that's been explicitly provided,
# use the prop that was manually provided
# and ignore this one.
pass
else:
prop_descriptor.add_prop_descriptor_to_class(
class_name,
new_class_attrs,
names_with_refs,
container_names,
dataspecs,
)
# if we won't overwrite ourselves anyway, delete the generator
if not replaced_self:
del class_dict[name]
class_dict.update(new_class_attrs)
class_dict["__properties__"] = set(new_class_attrs)
class_dict["__properties_with_refs__"] = names_with_refs
class_dict["__container_props__"] = container_names
if len(overridden_defaults) > 0:
class_dict["__overridden_defaults__"] = overridden_defaults
if dataspecs:
class_dict["__dataspecs__"] = dataspecs
if "__example__" in class_dict:
path = class_dict["__example__"]
class_dict["__doc__"] += _EXAMPLE_TEMPLATE % dict(path=path)
return super(MetaHasProps, meta_cls).__new__(
meta_cls, class_name, bases, class_dict
)
|
https://github.com/bokeh/bokeh/issues/6156
|
2017-04-18 16:51:23 ERROR Error on request:
Traceback (most recent call last):
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/werkzeug/serving.py", line 193, in run_wsgi
execute(self.server.app)
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/werkzeug/serving.py", line 181, in execute
application_iter = app(environ, start_response)
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/werkzeug/wsgi.py", line 599, in __call__
return self.app(environ, start_response)
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/werkzeug/contrib/lint.py", line 337, in __call__
app_iter = self.app(environ, checking_start_response)
File "/data/projects/python/server-monitoring/serverMonitoring/Main.py", line 40, in __call__
response, endpoint = self.dispatch_request(request)
File "/data/projects/python/server-monitoring/serverMonitoring/Main.py", line 31, in dispatch_request
func = getattr(importlib.import_module(module[0]), module[1])
File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/projects/python/server-monitoring/serverMonitoring/Handler/Delivery.py", line 20, in <module>
from serverMonitoring.Handler.Dashboard.Widgets import ChartParameters, Chart
File "/data/projects/python/server-monitoring/serverMonitoring/Handler/Dashboard/Widgets.py", line 20, in <module>
from bokeh.embed import file_html
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/bokeh/embed.py", line 28, in <module>
from .core.json_encoder import serialize_json
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/bokeh/core/json_encoder.py", line 47, in <module>
from ..util.serialization import transform_series, transform_array
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/bokeh/util/serialization.py", line 39, in <module>
__doc__ = __doc__ % ("\n".join("* ``np." + str(x) + "``" for x in BINARY_ARRAY_TYPES))
TypeError: unsupported operand type(s) for %: 'NoneType' and 'str'
|
TypeError
|
def autoload_server(
model=None, app_path=None, session_id=None, url="default", relative_urls=False
):
"""Return a script tag that embeds content from a Bokeh server session.
Bokeh apps embedded using ``autoload_server`` will NOT set the browser
window title.
.. note::
Typically you will not want to save or re-use the output of this
function for different or multiple page loads.
Args:
model (Model, optional) : The object to render from the session
If ``None`` an entire document is rendered. (default: ``None``)
If you supply a specific model to render, you must also supply the
session ID containing that model.
Supplying a model is usually only useful when embedding
a specific session that was previously created using the
``bokeh.client`` API.
session_id (str, optional) : A server session ID (default: None)
If ``None``, let the server auto-generate a random session ID.
Supplying a session id is usually only useful when embedding
a specific session that was previously created using the
``bokeh.client`` API.
url (str, optional) : A URL to a Bokeh application on a Bokeh server
If ``None`` the default URL ``{DEFAULT_SERVER_HTTP_URL}`` will be used.
relative_urls (bool, optional) :
Whether to use relative URLs for resources.
If ``True`` the links generated for resources such a BokehJS
JavaScript and CSS will be relative links.
This should normally be set to ``False``, but must be set to
``True`` in situations where only relative URLs will work. E.g.
when running the Bokeh behind reverse-proxies under certain
configurations
Returns:
A ``<script>`` tag that will execute an autoload script loaded
from the Bokeh Server.
Examples:
In the simplest and most common case, we wish to embed Bokeh server
application by providing the URL to where it is located.
Suppose the app is running (perhaps behind Nginx or some other proxy)
at ``http://app.server.org/foo/myapp``. We wish to embed this app in
a page at ``mysite.com``. The following will provide an HTML script
tag to do that, that can be included in ``mysite.com``:
.. code-block:: python
script = autoload_server(url="http://app.server.org/foo/myapp")
Note that in order for this embedding to work, the Bokeh server needs
to have been configured to allow connections from the public URL where
the embedding happens. In this case, if the autoload script is run from
a page located at ``http://mysite.com/report`` then the Bokeh server
must have been started with an ``--allow-websocket-origin`` option
specifically allowing websocket connections from pages that originate
from ``mysite.com``:
.. code-block:: sh
bokeh serve mayapp.py --allow-websocket-origin=mysite.com
If an autoload script runs from an origin that has not been allowed,
the Bokeh server will return a 403 error.
It's also possible to initiate sessions on a Bokeh server from
Python, using the functions :func:`~bokeh.client.push_session` and
:func:`~bokeh.client.push_session`. This can be useful in advanced
situations where you may want to "set up" the session before you
embed it. For example, you might to load up a session and modify
``session.document`` in some way (perhaps adding per-user data).
In such cases you will pass the session id as an argument as well:
.. code-block:: python
script = autoload_server(session_id="some_session_id",
url="http://app.server.org/foo/myapp")
.. warning::
It is typically a bad idea to re-use the same ``session_id`` for
every page load. This is likely to create scalability and security
problems, and will cause "shared Google doc" behaviour, which is
typically not desired.
"""
if app_path is not None:
deprecated(
(0, 12, 5),
"app_path",
"url",
"Now pass entire app URLS in the url arguments, e.g. 'url=http://foo.com:5010/bar/myapp'",
)
if not app_path.startswith("/"):
app_path = "/" + app_path
url = url + app_path
coords = _SessionCoordinates(url=url, session_id=session_id)
elementid = make_id()
# empty model_id means render the entire doc from session_id
model_id = ""
if model is not None:
model_id = model._id
if model_id and session_id is None:
raise ValueError(
"A specific model was passed to autoload_server() but no session_id; "
"this doesn't work because the server will generate a fresh session "
"which won't have the model in it."
)
src_path = coords.url + "/autoload.js?bokeh-autoload-element=" + elementid
if url != "default":
app_path = urlparse(url).path.rstrip("/")
if not app_path.startswith("/"):
app_path = "/" + app_path
src_path += "&bokeh-app-path=" + app_path
if not relative_urls:
src_path += "&bokeh-absolute-url=" + coords.url
# we want the server to generate the ID, so the autoload script
# can be embedded in a static page while every user still gets
# their own session. So we omit bokeh-session-id rather than
# using a generated ID.
if coords.session_id_allowing_none is not None:
src_path = src_path + "&bokeh-session-id=" + session_id
tag = AUTOLOAD_TAG.render(
src_path=src_path,
app_path=app_path,
elementid=elementid,
modelid=model_id,
)
return encode_utf8(tag)
|
def autoload_server(
model=None, app_path=None, session_id=None, url="default", relative_urls=False
):
"""Return a script tag that embeds content from a Bokeh server session.
Bokeh apps embedded using ``autoload_server`` will NOT set the browser
window title.
.. note::
Typically you will not want to save or re-use the output of this
function for different or multiple page loads.
Args:
model (Model, optional) : The object to render from the session
If ``None`` an entire document is rendered. (default: ``None``)
If you supply a specific model to render, you must also supply the
session ID containing that model.
Supplying a model is usually only useful when embedding
a specific session that was previously created using the
``bokeh.client`` API.
session_id (str, optional) : A server session ID (default: None)
If ``None``, let the server auto-generate a random session ID.
Supplying a session id is usually only useful when embedding
a specific session that was previously created using the
``bokeh.client`` API.
url (str, optional) : A URL to a Bokeh application on a Bokeh server
If ``None`` the default URL ``%s`` will be used.
relative_urls (bool, optional) :
Whether to use relative URLs for resources.
If ``True`` the links generated for resources such a BokehJS
JavaScript and CSS will be relative links.
This should normally be set to ``False``, but must be set to
``True`` in situations where only relative URLs will work. E.g.
when running the Bokeh behind reverse-proxies under certain
configurations
Returns:
A ``<script>`` tag that will execute an autoload script loaded
from the Bokeh Server.
Examples:
In the simplest and most common case, we wish to embed Bokeh server
application by providing the URL to where it is located.
Suppose the app is running (perhaps behind Nginx or some other proxy)
at ``http://app.server.org/foo/myapp``. We wish to embed this app in
a page at ``mysite.com``. The following will provide an HTML script
tag to do that, that can be included in ``mysite.com``:
.. code-block:: python
script = autoload_server(url="http://app.server.org/foo/myapp")
Note that in order for this embedding to work, the Bokeh server needs
to have been configured to allow connections from the public URL where
the embedding happens. In this case, if the autoload script is run from
a page located at ``http://mysite.com/report`` then the Bokeh server
must have been started with an ``--allow-websocket-origin`` option
specifically allowing websocket connections from pages that originate
from ``mysite.com``:
.. code-block:: sh
bokeh serve mayapp.py --allow-websocket-origin=mysite.com
If an autoload script runs from an origin that has not been allowed,
the Bokeh server will return a 403 error.
It's also possible to initiate sessions on a Bokeh server from
Python, using the functions :func:`~bokeh.client.push_session` and
:func:`~bokeh.client.push_session`. This can be useful in advanced
situations where you may want to "set up" the session before you
embed it. For example, you might to load up a session and modify
``session.document`` in some way (perhaps adding per-user data).
In such cases you will pass the session id as an argument as well:
.. code-block:: python
script = autoload_server(session_id="some_session_id",
url="http://app.server.org/foo/myapp")
.. warning::
It is typically a bad idea to re-use the same ``session_id`` for
every page load. This is likely to create scalability and security
problems, and will cause "shared Google doc" behaviour, which is
typically not desired.
"""
if app_path is not None:
deprecated(
(0, 12, 5),
"app_path",
"url",
"Now pass entire app URLS in the url arguments, e.g. 'url=http://foo.com:5010/bar/myapp'",
)
if not app_path.startswith("/"):
app_path = "/" + app_path
url = url + app_path
coords = _SessionCoordinates(url=url, session_id=session_id)
elementid = make_id()
# empty model_id means render the entire doc from session_id
model_id = ""
if model is not None:
model_id = model._id
if model_id and session_id is None:
raise ValueError(
"A specific model was passed to autoload_server() but no session_id; "
"this doesn't work because the server will generate a fresh session "
"which won't have the model in it."
)
src_path = coords.url + "/autoload.js?bokeh-autoload-element=" + elementid
if url != "default":
app_path = urlparse(url).path.rstrip("/")
if not app_path.startswith("/"):
app_path = "/" + app_path
src_path += "&bokeh-app-path=" + app_path
if not relative_urls:
src_path += "&bokeh-absolute-url=" + coords.url
# we want the server to generate the ID, so the autoload script
# can be embedded in a static page while every user still gets
# their own session. So we omit bokeh-session-id rather than
# using a generated ID.
if coords.session_id_allowing_none is not None:
src_path = src_path + "&bokeh-session-id=" + session_id
tag = AUTOLOAD_TAG.render(
src_path=src_path,
app_path=app_path,
elementid=elementid,
modelid=model_id,
)
return encode_utf8(tag)
|
https://github.com/bokeh/bokeh/issues/6156
|
2017-04-18 16:51:23 ERROR Error on request:
Traceback (most recent call last):
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/werkzeug/serving.py", line 193, in run_wsgi
execute(self.server.app)
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/werkzeug/serving.py", line 181, in execute
application_iter = app(environ, start_response)
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/werkzeug/wsgi.py", line 599, in __call__
return self.app(environ, start_response)
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/werkzeug/contrib/lint.py", line 337, in __call__
app_iter = self.app(environ, checking_start_response)
File "/data/projects/python/server-monitoring/serverMonitoring/Main.py", line 40, in __call__
response, endpoint = self.dispatch_request(request)
File "/data/projects/python/server-monitoring/serverMonitoring/Main.py", line 31, in dispatch_request
func = getattr(importlib.import_module(module[0]), module[1])
File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/projects/python/server-monitoring/serverMonitoring/Handler/Delivery.py", line 20, in <module>
from serverMonitoring.Handler.Dashboard.Widgets import ChartParameters, Chart
File "/data/projects/python/server-monitoring/serverMonitoring/Handler/Dashboard/Widgets.py", line 20, in <module>
from bokeh.embed import file_html
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/bokeh/embed.py", line 28, in <module>
from .core.json_encoder import serialize_json
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/bokeh/core/json_encoder.py", line 47, in <module>
from ..util.serialization import transform_series, transform_array
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/bokeh/util/serialization.py", line 39, in <module>
__doc__ = __doc__ % ("\n".join("* ``np." + str(x) + "``" for x in BINARY_ARRAY_TYPES))
TypeError: unsupported operand type(s) for %: 'NoneType' and 'str'
|
TypeError
|
def _get_argspecs(glyphclass):
argspecs = OrderedDict()
for arg in glyphclass._args:
spec = {}
prop = getattr(glyphclass, arg)
# running python with -OO will discard docstrings -> __doc__ is None
if prop.__doc__:
spec["desc"] = " ".join(
x.strip() for x in prop.__doc__.strip().split("\n\n")[0].split("\n")
)
else:
spec["desc"] = ""
spec["default"] = prop.class_default(glyphclass)
spec["type"] = prop.__class__.__name__
argspecs[arg] = spec
return argspecs
|
def _get_argspecs(glyphclass):
argspecs = OrderedDict()
for arg in glyphclass._args:
spec = {}
prop = getattr(glyphclass, arg)
spec["desc"] = " ".join(
x.strip() for x in prop.__doc__.strip().split("\n\n")[0].split("\n")
)
spec["default"] = prop.class_default(glyphclass)
spec["type"] = prop.__class__.__name__
argspecs[arg] = spec
return argspecs
|
https://github.com/bokeh/bokeh/issues/6156
|
2017-04-18 16:51:23 ERROR Error on request:
Traceback (most recent call last):
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/werkzeug/serving.py", line 193, in run_wsgi
execute(self.server.app)
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/werkzeug/serving.py", line 181, in execute
application_iter = app(environ, start_response)
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/werkzeug/wsgi.py", line 599, in __call__
return self.app(environ, start_response)
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/werkzeug/contrib/lint.py", line 337, in __call__
app_iter = self.app(environ, checking_start_response)
File "/data/projects/python/server-monitoring/serverMonitoring/Main.py", line 40, in __call__
response, endpoint = self.dispatch_request(request)
File "/data/projects/python/server-monitoring/serverMonitoring/Main.py", line 31, in dispatch_request
func = getattr(importlib.import_module(module[0]), module[1])
File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/projects/python/server-monitoring/serverMonitoring/Handler/Delivery.py", line 20, in <module>
from serverMonitoring.Handler.Dashboard.Widgets import ChartParameters, Chart
File "/data/projects/python/server-monitoring/serverMonitoring/Handler/Dashboard/Widgets.py", line 20, in <module>
from bokeh.embed import file_html
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/bokeh/embed.py", line 28, in <module>
from .core.json_encoder import serialize_json
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/bokeh/core/json_encoder.py", line 47, in <module>
from ..util.serialization import transform_series, transform_array
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/bokeh/util/serialization.py", line 39, in <module>
__doc__ = __doc__ % ("\n".join("* ``np." + str(x) + "``" for x in BINARY_ARRAY_TYPES))
TypeError: unsupported operand type(s) for %: 'NoneType' and 'str'
|
TypeError
|
def array_encoding_disabled(array):
"""Determine whether an array may be binary encoded.
The NumPy array dtypes that can be encoded are:
{binary_array_types}
Args:
array (np.ndarray) : the array to check
Returns:
bool
"""
# disable binary encoding for non-supported dtypes
return array.dtype not in BINARY_ARRAY_TYPES
|
def array_encoding_disabled(array):
"""Determine whether an array may be binary encoded.
The NumPy array dtypes that can be encoded are:
%s
Args:
array (np.ndarray) : the array to check
Returns:
bool
"""
# disable binary encoding for non-supported dtypes
return array.dtype not in BINARY_ARRAY_TYPES
|
https://github.com/bokeh/bokeh/issues/6156
|
2017-04-18 16:51:23 ERROR Error on request:
Traceback (most recent call last):
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/werkzeug/serving.py", line 193, in run_wsgi
execute(self.server.app)
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/werkzeug/serving.py", line 181, in execute
application_iter = app(environ, start_response)
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/werkzeug/wsgi.py", line 599, in __call__
return self.app(environ, start_response)
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/werkzeug/contrib/lint.py", line 337, in __call__
app_iter = self.app(environ, checking_start_response)
File "/data/projects/python/server-monitoring/serverMonitoring/Main.py", line 40, in __call__
response, endpoint = self.dispatch_request(request)
File "/data/projects/python/server-monitoring/serverMonitoring/Main.py", line 31, in dispatch_request
func = getattr(importlib.import_module(module[0]), module[1])
File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/data/projects/python/server-monitoring/serverMonitoring/Handler/Delivery.py", line 20, in <module>
from serverMonitoring.Handler.Dashboard.Widgets import ChartParameters, Chart
File "/data/projects/python/server-monitoring/serverMonitoring/Handler/Dashboard/Widgets.py", line 20, in <module>
from bokeh.embed import file_html
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/bokeh/embed.py", line 28, in <module>
from .core.json_encoder import serialize_json
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/bokeh/core/json_encoder.py", line 47, in <module>
from ..util.serialization import transform_series, transform_array
File "/data/projects/python/server-monitoring/env/lib/python2.7/site-packages/bokeh/util/serialization.py", line 39, in <module>
__doc__ = __doc__ % ("\n".join("* ``np." + str(x) + "``" for x in BINARY_ARRAY_TYPES))
TypeError: unsupported operand type(s) for %: 'NoneType' and 'str'
|
TypeError
|
def modify_doc(doc):
x = np.linspace(0, 10, 1000)
y = np.log(x) * np.sin(x)
source = ColumnDataSource(data=dict(x=x, y=y))
plot = figure(title="Simple plot with slider")
plot.line("x", "y", source=source)
slider = Slider(start=1, end=10, value=1, step=0.1)
def callback(attr, old, new):
y = np.log(x) * np.sin(x * new)
source.data = dict(x=x, y=y)
slider.on_change("value", callback)
doc.add_root(row(widgetbox(slider), plot))
|
def modify_doc(doc):
x = np.linspace(0, 10, 1000)
y = np.log(x) * np.sin(x)
source = ColumnDataSource(data=dict(x=x, y=y))
plot = figure()
plot.line("x", "y", source=source)
slider = Slider(start=1, end=10, value=1, step=0.1)
def callback(attr, old, new):
y = np.log(x) * np.sin(x * new)
source.data = dict(x=x, y=y)
slider.on_change("value", callback)
doc.add_root(column(slider, plot))
|
https://github.com/bokeh/bokeh/issues/5742
|
$ python flask_embed.py
Opening Flask app with embedded Bokeh application on http://localhost:8080/
[2017-01-16 21:29:37,150] ERROR in app: Exception on / [GET]
Traceback (most recent call last):
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/app.py", line 1982, in wsgi_app
response = self.full_dispatch_request()
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/app.py", line 1614, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/app.py", line 1517, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "flask_embed.py", line 43, in bkapp_page
return render_template("embed.html", script=script)
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/templating.py", line 133, in render_template
return _render(ctx.app.jinja_env.get_or_select_template(template_name_or_list),
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/jinja2/environment.py", line 851, in get_or_select_template
return self.get_template(template_name_or_list, parent, globals)
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/jinja2/environment.py", line 812, in get_template
return self._load_template(name, self.make_globals(globals))
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/jinja2/environment.py", line 774, in _load_template
cache_key = self.loader.get_source(self, name)[1]
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/templating.py", line 57, in get_source
return self._get_source_fast(environment, template)
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/templating.py", line 85, in _get_source_fast
raise TemplateNotFound(template)
jinja2.exceptions.TemplateNotFound: embed.html
|
jinja2.exceptions.TemplateNotFound
|
def bkapp_page():
script = autoload_server(model=None, url="http://localhost:5006/bkapp")
return render_template("embed.html", script=script, template="Flask")
|
def bkapp_page():
script = autoload_server(model=None, url="http://localhost:5006/bkapp")
return render_template("embed.html", script=script)
|
https://github.com/bokeh/bokeh/issues/5742
|
$ python flask_embed.py
Opening Flask app with embedded Bokeh application on http://localhost:8080/
[2017-01-16 21:29:37,150] ERROR in app: Exception on / [GET]
Traceback (most recent call last):
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/app.py", line 1982, in wsgi_app
response = self.full_dispatch_request()
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/app.py", line 1614, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/app.py", line 1517, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "flask_embed.py", line 43, in bkapp_page
return render_template("embed.html", script=script)
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/templating.py", line 133, in render_template
return _render(ctx.app.jinja_env.get_or_select_template(template_name_or_list),
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/jinja2/environment.py", line 851, in get_or_select_template
return self.get_template(template_name_or_list, parent, globals)
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/jinja2/environment.py", line 812, in get_template
return self._load_template(name, self.make_globals(globals))
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/jinja2/environment.py", line 774, in _load_template
cache_key = self.loader.get_source(self, name)[1]
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/templating.py", line 57, in get_source
return self._get_source_fast(environment, template)
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/templating.py", line 85, in _get_source_fast
raise TemplateNotFound(template)
jinja2.exceptions.TemplateNotFound: embed.html
|
jinja2.exceptions.TemplateNotFound
|
def get(self):
template = env.get_template("embed.html")
script = autoload_server(model=None, url="http://localhost:5006/bkapp")
self.write(template.render(script=script, template="Tornado"))
|
def get(self):
template = env.get_template("embed.html")
script = autoload_server(model=None, url="http://localhost:5006/bkapp")
self.write(template.render(script=script))
|
https://github.com/bokeh/bokeh/issues/5742
|
$ python flask_embed.py
Opening Flask app with embedded Bokeh application on http://localhost:8080/
[2017-01-16 21:29:37,150] ERROR in app: Exception on / [GET]
Traceback (most recent call last):
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/app.py", line 1982, in wsgi_app
response = self.full_dispatch_request()
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/app.py", line 1614, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/app.py", line 1517, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "flask_embed.py", line 43, in bkapp_page
return render_template("embed.html", script=script)
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/templating.py", line 133, in render_template
return _render(ctx.app.jinja_env.get_or_select_template(template_name_or_list),
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/jinja2/environment.py", line 851, in get_or_select_template
return self.get_template(template_name_or_list, parent, globals)
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/jinja2/environment.py", line 812, in get_template
return self._load_template(name, self.make_globals(globals))
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/jinja2/environment.py", line 774, in _load_template
cache_key = self.loader.get_source(self, name)[1]
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/templating.py", line 57, in get_source
return self._get_source_fast(environment, template)
File "/home/juanlu/.miniconda3/envs/py35/lib/python3.5/site-packages/flask/templating.py", line 85, in _get_source_fast
raise TemplateNotFound(template)
jinja2.exceptions.TemplateNotFound: embed.html
|
jinja2.exceptions.TemplateNotFound
|
def _run_nodejs(script, input):
if _nodejs is None:
raise RuntimeError(
"node.js is needed to allow compilation of custom models "
+ '("conda install -c bokeh nodejs" or follow https://nodejs.org/en/download/)'
)
proc = Popen([_nodejs, script], stdout=PIPE, stderr=PIPE, stdin=PIPE)
(stdout, errout) = proc.communicate(input=json.dumps(input).encode())
if proc.returncode != 0:
raise RuntimeError(errout)
else:
return AttrDict(json.loads(stdout.decode()))
|
def _run_nodejs(script, input):
if _nodejs is None:
raise RuntimeError(
"node.js is needed to allow compilation of custom models "
+ '("conda install -c bokeh nodejs" or follow https://nodejs.org/en/download/)'
)
proc = Popen([_nodejs, script], stdout=PIPE, stderr=PIPE, stdin=PIPE)
(stdout, errout) = proc.communicate(input=json.dumps(input).encode())
if len(errout) > 0:
raise RuntimeError(errout)
else:
return AttrDict(json.loads(stdout.decode()))
|
https://github.com/bokeh/bokeh/issues/5645
|
[bryan:...xamples/custom/font-awesome]$ BOKEH_RESOURCES=inline python font-awesome.py (bryanv/5626_properties_py)
Traceback (most recent call last):
File "font-awesome.py", line 10, in <module>
show(column(btn))
File "/Users/bryan/work/bokeh/bokeh/io.py", line 300, in show
return _show_with_state(obj, _state, browser, new, notebook_handle=notebook_handle)
File "/Users/bryan/work/bokeh/bokeh/io.py", line 318, in _show_with_state
_show_file_with_state(obj, state, new, controller)
File "/Users/bryan/work/bokeh/bokeh/io.py", line 323, in _show_file_with_state
filename = save(obj, state=state)
File "/Users/bryan/work/bokeh/bokeh/io.py", line 382, in save
_save_helper(obj, filename, resources, title, validate)
File "/Users/bryan/work/bokeh/bokeh/io.py", line 447, in _save_helper
html = standalone_html_page_for_models(obj, resources, title)
File "/Users/bryan/work/bokeh/bokeh/embed.py", line 600, in standalone_html_page_for_models
return file_html(models, resources, title)
File "/Users/bryan/work/bokeh/bokeh/embed.py", line 309, in file_html
bundle = _bundle_for_objs_and_resources(models, resources)
File "/Users/bryan/work/bokeh/bokeh/embed.py", line 206, in _bundle_for_objs_and_resources
bokeh_js = js_resources.render_js()
File "/Users/bryan/work/bokeh/bokeh/resources.py", line 395, in render_js
return JS_RESOURCES.render(js_raw=self.js_raw, js_files=self.js_files)
File "/Users/bryan/work/bokeh/bokeh/resources.py", line 388, in js_raw
custom_models = gen_custom_models_static()
File "/Users/bryan/work/bokeh/bokeh/util/compiler.py", line 298, in gen_custom_models_static
_run_npm(["install"] + [ name + "@" + version for (name, version) in dependencies ])
File "/Users/bryan/work/bokeh/bokeh/util/compiler.py", line 149, in _run_npm
raise RuntimeError(errout)
RuntimeError: b"npm WARN enoent ENOENT: no such file or directory, open '/Users/bryan/work/bokeh/examples/custom/font-awesome/package.json'\nnpm WARN font-awesome No description\nnpm WARN font-awesome No repository field.\nnpm WARN font-awesome No README data\nnpm WARN font-awesome No license field.\n"
|
RuntimeError
|
def _run_npm(argv):
if _nodejs is None:
raise RuntimeError(
"node.js is needed to allow compilation of custom models "
+ '("conda install -c bokeh nodejs" or follow https://nodejs.org/en/download/)'
)
_npm = join(dirname(_nodejs), "npm")
proc = Popen([_npm] + argv, stdout=PIPE, stderr=PIPE, stdin=PIPE)
(_stdout, errout) = proc.communicate()
if proc.returncode != 0:
raise RuntimeError(errout)
else:
return None
|
def _run_npm(argv):
if _nodejs is None:
raise RuntimeError(
"node.js is needed to allow compilation of custom models "
+ '("conda install -c bokeh nodejs" or follow https://nodejs.org/en/download/)'
)
_npm = join(dirname(_nodejs), "npm")
proc = Popen([_npm] + argv, stdout=PIPE, stderr=PIPE, stdin=PIPE)
(_stdout, errout) = proc.communicate()
if len(errout) > 0:
raise RuntimeError(errout)
else:
return None
|
https://github.com/bokeh/bokeh/issues/5645
|
[bryan:...xamples/custom/font-awesome]$ BOKEH_RESOURCES=inline python font-awesome.py (bryanv/5626_properties_py)
Traceback (most recent call last):
File "font-awesome.py", line 10, in <module>
show(column(btn))
File "/Users/bryan/work/bokeh/bokeh/io.py", line 300, in show
return _show_with_state(obj, _state, browser, new, notebook_handle=notebook_handle)
File "/Users/bryan/work/bokeh/bokeh/io.py", line 318, in _show_with_state
_show_file_with_state(obj, state, new, controller)
File "/Users/bryan/work/bokeh/bokeh/io.py", line 323, in _show_file_with_state
filename = save(obj, state=state)
File "/Users/bryan/work/bokeh/bokeh/io.py", line 382, in save
_save_helper(obj, filename, resources, title, validate)
File "/Users/bryan/work/bokeh/bokeh/io.py", line 447, in _save_helper
html = standalone_html_page_for_models(obj, resources, title)
File "/Users/bryan/work/bokeh/bokeh/embed.py", line 600, in standalone_html_page_for_models
return file_html(models, resources, title)
File "/Users/bryan/work/bokeh/bokeh/embed.py", line 309, in file_html
bundle = _bundle_for_objs_and_resources(models, resources)
File "/Users/bryan/work/bokeh/bokeh/embed.py", line 206, in _bundle_for_objs_and_resources
bokeh_js = js_resources.render_js()
File "/Users/bryan/work/bokeh/bokeh/resources.py", line 395, in render_js
return JS_RESOURCES.render(js_raw=self.js_raw, js_files=self.js_files)
File "/Users/bryan/work/bokeh/bokeh/resources.py", line 388, in js_raw
custom_models = gen_custom_models_static()
File "/Users/bryan/work/bokeh/bokeh/util/compiler.py", line 298, in gen_custom_models_static
_run_npm(["install"] + [ name + "@" + version for (name, version) in dependencies ])
File "/Users/bryan/work/bokeh/bokeh/util/compiler.py", line 149, in _run_npm
raise RuntimeError(errout)
RuntimeError: b"npm WARN enoent ENOENT: no such file or directory, open '/Users/bryan/work/bokeh/examples/custom/font-awesome/package.json'\nnpm WARN font-awesome No description\nnpm WARN font-awesome No repository field.\nnpm WARN font-awesome No README data\nnpm WARN font-awesome No license field.\n"
|
RuntimeError
|
def __init__(self, source, path, argv):
self._failed = False
self._error = None
self._error_detail = None
import ast
self._code = None
try:
nodes = ast.parse(source, path)
self._code = compile(nodes, filename=path, mode="exec", dont_inherit=True)
except SyntaxError as e:
self._failed = True
self._error = 'Invalid syntax in "%s" on line %d:\n%s' % (
os.path.basename(e.filename),
e.lineno,
e.text,
)
import traceback
self._error_detail = traceback.format_exc()
self._path = path
self._source = source
self._argv = argv
self.ran = False
|
def __init__(self, source, path, argv):
self._failed = False
self._error = None
self._error_detail = None
import ast
self._code = None
try:
nodes = ast.parse(source, path)
self._code = compile(nodes, filename=path, mode="exec")
except SyntaxError as e:
self._failed = True
self._error = 'Invalid syntax in "%s" on line %d:\n%s' % (
os.path.basename(e.filename),
e.lineno,
e.text,
)
import traceback
self._error_detail = traceback.format_exc()
self._path = path
self._source = source
self._argv = argv
self.ran = False
|
https://github.com/bokeh/bokeh/issues/5590
|
2016-12-20 07:52:02,473 Error running application handler <bokeh.application.handlers.script.ScriptHandler object at 0x10bb20f50>: invalid syntax (step3.py, line 41)
File "step4.py", line 3, in <module>:
execfile("step3.py") Traceback (most recent call last):
File "/Users/pwang/anaconda/envs/bokehdemo/lib/python2.7/site-packages/bokeh/application/handlers/code_runner.py", line 83, in run
exec(self._code, module.__dict__)
File "/Users/pwang/src/bokeh.demo/webinar/demo1/step4.py", line 3, in <module>
execfile("step3.py")
File "step3.py", line 41
print attribute, "changed from '%s' to '%s'" % (old, new)
^
SyntaxError: invalid syntax
|
SyntaxError
|
def _load_stock(filename):
data = {
"date": [],
"open": [],
"high": [],
"low": [],
"close": [],
"volume": [],
"adj_close": [],
}
with _open_csv_file(filename) as f:
next(f)
reader = csv.reader(f, delimiter=",")
for row in reader:
date, open_price, high, low, close, volume, adj_close = row
data["date"].append(date)
data["open"].append(float(open_price))
data["high"].append(float(high))
data["low"].append(float(low))
data["close"].append(float(close))
data["volume"].append(int(volume))
data["adj_close"].append(float(adj_close))
return data
|
def _load_stock(filename):
data = {
"date": [],
"open": [],
"high": [],
"low": [],
"close": [],
"volume": [],
"adj_close": [],
}
with open(filename) as f:
next(f)
reader = csv.reader(f, delimiter=",")
for row in reader:
date, open_price, high, low, close, volume, adj_close = row
data["date"].append(date)
data["open"].append(float(open_price))
data["high"].append(float(high))
data["low"].append(float(low))
data["close"].append(float(close))
data["volume"].append(int(volume))
data["adj_close"].append(float(adj_close))
return data
|
https://github.com/bokeh/bokeh/issues/5029
|
---------------------------------------------------------------------------
UnicodeDecodeError Traceback (most recent call last)
<ipython-input-6-a97873fe504b> in <module>()
2 from bokeh.palettes import Viridis6
3 from bokeh.plotting import figure, show, output_file, ColumnDataSource
----> 4 from bokeh.sampledata.us_counties import data as counties
5 from bokeh.sampledata.unemployment import data as unemployment
/home/nbuser/anaconda3_410/lib/python3.5/site-packages/bokeh/sampledata/us_counties.py in <module>()
24 next(f)
25 reader = csv.reader(f, delimiter=',', quotechar='"')
---> 26 for row in reader:
27 name, dummy, state, dummy, geometry, dummy, dummy, dummy, dummy, state_id, county_id, dummy, dummy = row
28 xml = et.fromstring(geometry)
/home/nbuser/anaconda3_410/lib/python3.5/encodings/ascii.py in decode(self, input, final)
24 class IncrementalDecoder(codecs.IncrementalDecoder):
25 def decode(self, input, final=False):
---> 26 return codecs.ascii_decode(input, self.errors)[0]
27
28 class StreamWriter(Codec,codecs.StreamWriter):
UnicodeDecodeError: 'ascii' codec can't decode byte 0xef in position 2514: ordinal not in range(128)
|
UnicodeDecodeError
|
def __init__(
self, session_id=None, websocket_url=DEFAULT_SERVER_WEBSOCKET_URL, io_loop=None
):
"""
A connection which attaches to a particular named session on the server.
Always call either pull() or push() immediately after creating the session
(until these are called session.document will be None).
The bokeh.client.push_session() and bokeh.client.pull_session() functions
will construct a ClientSession and push or pull in one step, so they are
a good way to obtain a ClientSession.
Args:
session_id (str) :
The name of the session or None to generate one
websocket_url (str) :
Websocket URL to connect to
io_loop (``tornado.ioloop.IOLoop``, optional) :
The IOLoop to use for the websocket
"""
self._document = None
self._id = self._ensure_session_id(session_id)
from ._connection import ClientConnection
self._connection = ClientConnection(
session=self, io_loop=io_loop, websocket_url=websocket_url
)
self._current_patch = None
from bokeh.util.tornado import _DocumentCallbackGroup
self._callbacks = _DocumentCallbackGroup(self._connection.io_loop)
|
def __init__(
self, session_id=None, websocket_url=DEFAULT_SERVER_WEBSOCKET_URL, io_loop=None
):
"""
A connection which attaches to a particular named session on the server.
Always call either pull() or push() immediately after creating the session
(until these are called session.document will be None).
The bokeh.client.push_session() and bokeh.client.pull_session() functions
will construct a ClientSession and push or pull in one step, so they are
a good way to obtain a ClientSession.
Args:
session_id (str) :
The name of the session or None to generate one
websocket_url (str) :
Websocket URL to connect to
io_loop (``tornado.ioloop.IOLoop``, optional) :
The IOLoop to use for the websocket
"""
self._document = None
self._id = self._ensure_session_id(session_id)
self._connection = ClientConnection(
session=self, io_loop=io_loop, websocket_url=websocket_url
)
self._current_patch = None
self._callbacks = _DocumentCallbackGroup(self._connection.io_loop)
|
https://github.com/bokeh/bokeh/issues/5119
|
$ python -c 'import bokeh.io'
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "bokeh/io.py", line 29, in <module>
from .core.state import State
File "bokeh/core/state.py", line 46, in <module>
from ..client import DEFAULT_SESSION_ID
File "bokeh/client/__init__.py", line 6, in <module>
from .session import ClientSession, pull_session, push_session, show_session, DEFAULT_SESSION_ID
File "bokeh/client/session.py", line 10, in <module>
from ._connection import ClientConnection
File "bokeh/client/_connection.py", line 11, in <module>
from tornado import gen, locks
ImportError: No module named tornado
|
ImportError
|
def _generate_items(self, df, columns):
"""Produce list of unique tuples that identify each item."""
if not self.bin:
super(ColorAttr, self)._generate_items(df, columns)
else:
if len(columns) == 1 and ChartDataSource.is_number(df[columns[0]]):
self.bins = Bins(
source=ColumnDataSource(df),
column=columns[0],
bin_count=len(self.iterable),
aggregate=False,
)
if self.sort:
self.bins.sort(ascending=self.ascending)
self.items = [bin.label[0] for bin in self.bins]
else:
raise ValueError(
"Binned colors can only be created for one column of \
numerical data."
)
|
def _generate_items(self, df, columns):
"""Produce list of unique tuples that identify each item."""
if not self.bin:
super(ColorAttr, self)._generate_items(df, columns)
else:
if len(columns) == 1 and ChartDataSource.is_number(df[columns[0]]):
self.bins = Bins(
source=ColumnDataSource(df),
column=columns[0],
bin_count=len(self.iterable),
aggregate=False,
)
items = [bin.label[0] for bin in self.bins]
if self.sort:
items = sorted(items)
if not self.ascending:
items = reversed(items)
self.items = list(items)
else:
raise ValueError(
"Binned colors can only be created for one column of \
numerical data."
)
|
https://github.com/bokeh/bokeh/issues/3124
|
Traceback (most recent call last):
File "test.py", line 22, in <module>
hist = Histogram(df, values='values', legend=True,
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/builder/histogram_builder.py", line 53, in Histogram
return create_and_build(HistogramBuilder, data, **kw)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_builder.py", line 64, in create_and_build
chart.add_builder(builder)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_chart.py", line 134, in add_builder
builder.create(self)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_builder.py", line 303, in create
chart.add_renderers(self, renderers)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_chart.py", line 129, in add_renderers
self.renderers += renderers
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/builder/bar_builder.py", line 228, in _yield_renderers
**group_kwargs)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/glyphs.py", line 469, in __init__
super(HistogramGlyph, self).__init__(**kwargs)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_models.py", line 61, in __init__
self.setup()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_models.py", line 64, in setup
self.renderers = [renderer for renderer in self.build_renderers()]
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/glyphs.py", line 478, in build_renderers
self.bins = Bins(values=self.values, bin_count=self.bin_count)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 164, in __init__
super(Bins, self).__init__(**properties)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 23, in __init__
self._refresh()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 29, in _refresh
self.calculate()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 174, in calculate
binned, bin_edges = pd.cut(self.get_data(), self.bin_count, retbins=True, precision=0)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/pandas/tools/tile.py", line 100, in cut
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/numpy/core/function_base.py", line 84, in linspace
num = int(num)
OverflowError: cannot convert float infinity to integer
|
OverflowError
|
def HeatMap(
data,
x=None,
y=None,
values=None,
stat="count",
xscale="categorical",
yscale="categorical",
xgrid=False,
ygrid=False,
**kw,
):
"""Create a HeatMap chart using :class:`HeatMapBuilder <bokeh.charts.builder.heatmap_builder.HeatMapBuilder>`
to render the geometry from values.
A HeatMap is a 3 Dimensional chart that crosses two dimensions, then aggregates
values if there are multiple that correspond to the intersection of the horizontal
and vertical dimensions. The value that falls at the intersection is then mapped to a
color in a palette. All values that map to the positions on the chart are binned into
the same amount of bins as there are colors in the pallete.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from collections import OrderedDict
from bokeh.charts import HeatMap, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
xyvalues = OrderedDict()
xyvalues['apples'] = [4,5,8]
xyvalues['bananas'] = [1,2,4]
xyvalues['pears'] = [6,5,4]
hm = HeatMap(xyvalues, title='Fruits')
output_file('heatmap.html')
show(hm)
"""
kw["x"] = x
kw["y"] = y
kw["values"] = values
chart = create_and_build(
HeatMapBuilder,
data,
xscale=xscale,
yscale=yscale,
xgrid=xgrid,
ygrid=ygrid,
**kw,
)
chart.add_tools(HoverTool(tooltips=[(stat, "@values")]))
return chart
|
def HeatMap(
data,
x=None,
y=None,
values=None,
stat="count",
xscale="categorical",
yscale="categorical",
xgrid=False,
ygrid=False,
**kw,
):
"""Create a HeatMap chart using :class:`HeatMapBuilder <bokeh.charts.builder.heatmap_builder.HeatMapBuilder>`
to render the geometry from values.
A HeatMap is a 3 Dimensional chart that crosses two dimensions, then aggregates
values if there are multiple that correspond to the intersection of the horizontal
and vertical dimensions. The value that falls at the intersection is then mapped to a
color in a palette. All values that map to the positions on the chart are binned into
the same amount of bins as there are colors in the pallete.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from collections import OrderedDict
from bokeh.charts import HeatMap, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
xyvalues = OrderedDict()
xyvalues['apples'] = [4,5,8]
xyvalues['bananas'] = [1,2,4]
xyvalues['pears'] = [6,5,4]
hm = HeatMap(xyvalues, title='Fruits')
output_file('heatmap.html')
show(hm)
"""
kw["x"] = x
kw["y"] = y
kw["color"] = values
chart = create_and_build(
HeatMapBuilder,
data,
xscale=xscale,
yscale=yscale,
xgrid=xgrid,
ygrid=ygrid,
**kw,
)
chart.add_tools(HoverTool(tooltips=[("value", "@rate")]))
return chart
|
https://github.com/bokeh/bokeh/issues/3124
|
Traceback (most recent call last):
File "test.py", line 22, in <module>
hist = Histogram(df, values='values', legend=True,
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/builder/histogram_builder.py", line 53, in Histogram
return create_and_build(HistogramBuilder, data, **kw)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_builder.py", line 64, in create_and_build
chart.add_builder(builder)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_chart.py", line 134, in add_builder
builder.create(self)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_builder.py", line 303, in create
chart.add_renderers(self, renderers)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_chart.py", line 129, in add_renderers
self.renderers += renderers
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/builder/bar_builder.py", line 228, in _yield_renderers
**group_kwargs)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/glyphs.py", line 469, in __init__
super(HistogramGlyph, self).__init__(**kwargs)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_models.py", line 61, in __init__
self.setup()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_models.py", line 64, in setup
self.renderers = [renderer for renderer in self.build_renderers()]
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/glyphs.py", line 478, in build_renderers
self.bins = Bins(values=self.values, bin_count=self.bin_count)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 164, in __init__
super(Bins, self).__init__(**properties)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 23, in __init__
self._refresh()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 29, in _refresh
self.calculate()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 174, in calculate
binned, bin_edges = pd.cut(self.get_data(), self.bin_count, retbins=True, precision=0)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/pandas/tools/tile.py", line 100, in cut
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/numpy/core/function_base.py", line 84, in linspace
num = int(num)
OverflowError: cannot convert float infinity to integer
|
OverflowError
|
def process_data(self):
"""Take the CategoricalHeatMap data from the input **value.
It calculates the chart properties accordingly. Then build a dict
containing references to all the calculated points to be used by
the rect glyph inside the ``_yield_renderers`` method.
"""
for op in self._data.operations:
if isinstance(op, Bins):
self.bin_width = op.get_dim_width("x")
self.bin_height = op.get_dim_width("y")
self._bins = op
# if we have values specified but color attribute not setup, do so
if self.values.selection is None:
self.values.selection = "values"
if self.attributes["color"].columns is None:
self.attributes["color"].setup(
data=self._data.source, columns=self.values.selection
)
self.attributes["color"].add_bin_labels(self._data)
|
def process_data(self):
"""Take the CategoricalHeatMap data from the input **value.
It calculates the chart properties accordingly. Then build a dict
containing references to all the calculated points to be used by
the rect glyph inside the ``_yield_renderers`` method.
"""
for op in self._data.operations:
if isinstance(op, Bins):
self.bin_width = op.get_dim_width("x")
self.bin_height = op.get_dim_width("y")
self._bins = op
# if we have values specified but color attribute not setup, do so
if self.attributes["color"].columns is None:
self.attributes["color"].setup(
data=self._data.source, columns=self.values.selection or "values"
)
self.attributes["color"].add_bin_labels(self._data)
|
https://github.com/bokeh/bokeh/issues/3124
|
Traceback (most recent call last):
File "test.py", line 22, in <module>
hist = Histogram(df, values='values', legend=True,
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/builder/histogram_builder.py", line 53, in Histogram
return create_and_build(HistogramBuilder, data, **kw)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_builder.py", line 64, in create_and_build
chart.add_builder(builder)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_chart.py", line 134, in add_builder
builder.create(self)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_builder.py", line 303, in create
chart.add_renderers(self, renderers)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_chart.py", line 129, in add_renderers
self.renderers += renderers
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/builder/bar_builder.py", line 228, in _yield_renderers
**group_kwargs)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/glyphs.py", line 469, in __init__
super(HistogramGlyph, self).__init__(**kwargs)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_models.py", line 61, in __init__
self.setup()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_models.py", line 64, in setup
self.renderers = [renderer for renderer in self.build_renderers()]
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/glyphs.py", line 478, in build_renderers
self.bins = Bins(values=self.values, bin_count=self.bin_count)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 164, in __init__
super(Bins, self).__init__(**properties)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 23, in __init__
self._refresh()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 29, in _refresh
self.calculate()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 174, in calculate
binned, bin_edges = pd.cut(self.get_data(), self.bin_count, retbins=True, precision=0)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/pandas/tools/tile.py", line 100, in cut
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/numpy/core/function_base.py", line 84, in linspace
num = int(num)
OverflowError: cannot convert float infinity to integer
|
OverflowError
|
def yield_renderers(self):
"""Use the rect glyphs to display the categorical heatmap.
Takes reference points from data loaded at the ColumnDataSurce.
"""
for group in self._data.groupby(**self.attributes):
glyph = HeatmapGlyph(
x=group.get_values(self.x.selection),
y=group.get_values(self.y.selection),
values=group.get_values(self.values.selection + "_values"),
width=self.bin_width * self.spacing_ratio,
height=self.bin_height * self.spacing_ratio,
line_color=group["color"],
fill_color=group["color"],
label=group.label,
)
self.add_glyph(group, glyph)
for renderer in glyph.renderers:
yield renderer
|
def yield_renderers(self):
"""Use the rect glyphs to display the categorical heatmap.
Takes reference points from data loaded at the ColumnDataSurce.
"""
for group in self._data.groupby(**self.attributes):
glyph = HeatmapGlyph(
x=group.get_values(self.x.selection),
y=group.get_values(self.y.selection),
width=self.bin_width * self.spacing_ratio,
height=self.bin_height * self.spacing_ratio,
line_color=group["color"],
fill_color=group["color"],
)
self.add_glyph(group, glyph)
for renderer in glyph.renderers:
yield renderer
|
https://github.com/bokeh/bokeh/issues/3124
|
Traceback (most recent call last):
File "test.py", line 22, in <module>
hist = Histogram(df, values='values', legend=True,
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/builder/histogram_builder.py", line 53, in Histogram
return create_and_build(HistogramBuilder, data, **kw)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_builder.py", line 64, in create_and_build
chart.add_builder(builder)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_chart.py", line 134, in add_builder
builder.create(self)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_builder.py", line 303, in create
chart.add_renderers(self, renderers)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_chart.py", line 129, in add_renderers
self.renderers += renderers
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/builder/bar_builder.py", line 228, in _yield_renderers
**group_kwargs)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/glyphs.py", line 469, in __init__
super(HistogramGlyph, self).__init__(**kwargs)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_models.py", line 61, in __init__
self.setup()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_models.py", line 64, in setup
self.renderers = [renderer for renderer in self.build_renderers()]
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/glyphs.py", line 478, in build_renderers
self.bins = Bins(values=self.values, bin_count=self.bin_count)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 164, in __init__
super(Bins, self).__init__(**properties)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 23, in __init__
self._refresh()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 29, in _refresh
self.calculate()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 174, in calculate
binned, bin_edges = pd.cut(self.get_data(), self.bin_count, retbins=True, precision=0)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/pandas/tools/tile.py", line 100, in cut
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/numpy/core/function_base.py", line 84, in linspace
num = int(num)
OverflowError: cannot convert float infinity to integer
|
OverflowError
|
def __init__(
self,
x,
y,
values,
column=None,
stat="count",
glyph="rect",
width=1,
height=1,
**kwargs,
):
kwargs["x"] = x
kwargs["y"] = y
kwargs["values"] = values
kwargs["column"] = column
kwargs["stat"] = stat
kwargs["glyph_name"] = glyph
kwargs["height"] = height
kwargs["width"] = width
super(XyGlyph, self).__init__(**kwargs)
self.setup()
|
def __init__(
self, x, y, column=None, stat="count", glyph="rect", width=1, height=1, **kwargs
):
kwargs["x"] = x
kwargs["y"] = y
kwargs["column"] = column
kwargs["stat"] = stat
kwargs["glyph_name"] = glyph
kwargs["height"] = height
kwargs["width"] = width
super(XyGlyph, self).__init__(**kwargs)
self.setup()
|
https://github.com/bokeh/bokeh/issues/3124
|
Traceback (most recent call last):
File "test.py", line 22, in <module>
hist = Histogram(df, values='values', legend=True,
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/builder/histogram_builder.py", line 53, in Histogram
return create_and_build(HistogramBuilder, data, **kw)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_builder.py", line 64, in create_and_build
chart.add_builder(builder)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_chart.py", line 134, in add_builder
builder.create(self)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_builder.py", line 303, in create
chart.add_renderers(self, renderers)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_chart.py", line 129, in add_renderers
self.renderers += renderers
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/builder/bar_builder.py", line 228, in _yield_renderers
**group_kwargs)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/glyphs.py", line 469, in __init__
super(HistogramGlyph, self).__init__(**kwargs)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_models.py", line 61, in __init__
self.setup()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_models.py", line 64, in setup
self.renderers = [renderer for renderer in self.build_renderers()]
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/glyphs.py", line 478, in build_renderers
self.bins = Bins(values=self.values, bin_count=self.bin_count)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 164, in __init__
super(Bins, self).__init__(**properties)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 23, in __init__
self._refresh()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 29, in _refresh
self.calculate()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 174, in calculate
binned, bin_edges = pd.cut(self.get_data(), self.bin_count, retbins=True, precision=0)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/pandas/tools/tile.py", line 100, in cut
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/numpy/core/function_base.py", line 84, in linspace
num = int(num)
OverflowError: cannot convert float infinity to integer
|
OverflowError
|
def build_source(self):
return {"x": self.x, "y": self.y, "values": self.values}
|
def build_source(self):
return {"x": self.x, "y": self.y}
|
https://github.com/bokeh/bokeh/issues/3124
|
Traceback (most recent call last):
File "test.py", line 22, in <module>
hist = Histogram(df, values='values', legend=True,
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/builder/histogram_builder.py", line 53, in Histogram
return create_and_build(HistogramBuilder, data, **kw)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_builder.py", line 64, in create_and_build
chart.add_builder(builder)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_chart.py", line 134, in add_builder
builder.create(self)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_builder.py", line 303, in create
chart.add_renderers(self, renderers)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_chart.py", line 129, in add_renderers
self.renderers += renderers
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/builder/bar_builder.py", line 228, in _yield_renderers
**group_kwargs)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/glyphs.py", line 469, in __init__
super(HistogramGlyph, self).__init__(**kwargs)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_models.py", line 61, in __init__
self.setup()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_models.py", line 64, in setup
self.renderers = [renderer for renderer in self.build_renderers()]
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/glyphs.py", line 478, in build_renderers
self.bins = Bins(values=self.values, bin_count=self.bin_count)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 164, in __init__
super(Bins, self).__init__(**properties)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 23, in __init__
self._refresh()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 29, in _refresh
self.calculate()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 174, in calculate
binned, bin_edges = pd.cut(self.get_data(), self.bin_count, retbins=True, precision=0)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/pandas/tools/tile.py", line 100, in cut
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/numpy/core/function_base.py", line 84, in linspace
num = int(num)
OverflowError: cannot convert float infinity to integer
|
OverflowError
|
def calc_num_bins(self, values):
"""Calculate optimal number of bins using IQR.
From: http://stats.stackexchange.com/questions/114490/optimal-bin-width-for-two-dimensional-histogram
"""
iqr = self.q3.value - self.q1.value
if iqr == 0:
self.bin_width = np.sqrt(values.size)
else:
self.bin_width = 2 * iqr * (len(values) ** -(1.0 / 3.0))
self.bin_count = int(np.ceil((values.max() - values.min()) / self.bin_width))
if self.bin_count == 1:
self.bin_count = 3
|
def calc_num_bins(self, values):
iqr = self.q3.value - self.q1.value
self.bin_width = 2 * iqr * (len(values) ** -(1.0 / 3.0))
self.bin_count = int(np.ceil((values.max() - values.min()) / self.bin_width))
|
https://github.com/bokeh/bokeh/issues/3124
|
Traceback (most recent call last):
File "test.py", line 22, in <module>
hist = Histogram(df, values='values', legend=True,
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/builder/histogram_builder.py", line 53, in Histogram
return create_and_build(HistogramBuilder, data, **kw)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_builder.py", line 64, in create_and_build
chart.add_builder(builder)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_chart.py", line 134, in add_builder
builder.create(self)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_builder.py", line 303, in create
chart.add_renderers(self, renderers)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_chart.py", line 129, in add_renderers
self.renderers += renderers
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/builder/bar_builder.py", line 228, in _yield_renderers
**group_kwargs)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/glyphs.py", line 469, in __init__
super(HistogramGlyph, self).__init__(**kwargs)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_models.py", line 61, in __init__
self.setup()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_models.py", line 64, in setup
self.renderers = [renderer for renderer in self.build_renderers()]
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/glyphs.py", line 478, in build_renderers
self.bins = Bins(values=self.values, bin_count=self.bin_count)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 164, in __init__
super(Bins, self).__init__(**properties)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 23, in __init__
self._refresh()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 29, in _refresh
self.calculate()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 174, in calculate
binned, bin_edges = pd.cut(self.get_data(), self.bin_count, retbins=True, precision=0)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/pandas/tools/tile.py", line 100, in cut
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/numpy/core/function_base.py", line 84, in linspace
num = int(num)
OverflowError: cannot convert float infinity to integer
|
OverflowError
|
def __init__(
self,
values=None,
column=None,
dimensions=None,
bins=None,
stat="count",
source=None,
**properties,
):
if isinstance(stat, str):
stat = stats[stat]()
bin_count = properties.get("bin_count")
if bin_count is not None and not isinstance(bin_count, list):
properties["bin_count"] = [bin_count]
else:
properties["bin_count"] = []
properties["dimensions"] = dimensions
properties["column"] = column
properties["bins"] = bins
properties["stat"] = stat
properties["values"] = values
properties["source"] = source
super(Bins, self).__init__(**properties)
|
def __init__(
self,
values=None,
column=None,
dimensions=None,
bins=None,
stat="count",
source=None,
**properties,
):
if isinstance(stat, str):
stat = stats[stat]()
# explicit dimensions are handled as extra kwargs
if dimensions is None:
dimensions = properties.copy()
for dim, col in iteritems(properties):
if not isinstance(col, str):
dimensions.pop(dim)
for dim in list(dimensions.keys()):
properties.pop(dim)
bin_count = properties.get("bin_count")
if bin_count is not None and not isinstance(bin_count, list):
properties["bin_count"] = [bin_count]
else:
properties["bin_count"] = []
properties["dimensions"] = dimensions
properties["column"] = column
properties["bins"] = bins
properties["stat"] = stat
properties["values"] = values
properties["source"] = source
super(Bins, self).__init__(**properties)
|
https://github.com/bokeh/bokeh/issues/3124
|
Traceback (most recent call last):
File "test.py", line 22, in <module>
hist = Histogram(df, values='values', legend=True,
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/builder/histogram_builder.py", line 53, in Histogram
return create_and_build(HistogramBuilder, data, **kw)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_builder.py", line 64, in create_and_build
chart.add_builder(builder)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_chart.py", line 134, in add_builder
builder.create(self)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_builder.py", line 303, in create
chart.add_renderers(self, renderers)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_chart.py", line 129, in add_renderers
self.renderers += renderers
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/builder/bar_builder.py", line 228, in _yield_renderers
**group_kwargs)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/glyphs.py", line 469, in __init__
super(HistogramGlyph, self).__init__(**kwargs)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_models.py", line 61, in __init__
self.setup()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_models.py", line 64, in setup
self.renderers = [renderer for renderer in self.build_renderers()]
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/glyphs.py", line 478, in build_renderers
self.bins = Bins(values=self.values, bin_count=self.bin_count)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 164, in __init__
super(Bins, self).__init__(**properties)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 23, in __init__
self._refresh()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 29, in _refresh
self.calculate()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 174, in calculate
binned, bin_edges = pd.cut(self.get_data(), self.bin_count, retbins=True, precision=0)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/pandas/tools/tile.py", line 100, in cut
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/numpy/core/function_base.py", line 84, in linspace
num = int(num)
OverflowError: cannot convert float infinity to integer
|
OverflowError
|
def _get_stat(self, dim):
stat_kwargs = {}
if self.source is not None:
stat_kwargs["source"] = self.source
if len(self.dimensions.keys()) > 0:
stat_kwargs["column"] = self.dimensions[dim]
elif self.column is not None:
stat_kwargs["column"] = self.column
else:
stat_kwargs["column"] = "values"
elif self.values is not None:
stat_kwargs["values"] = self.values
else:
raise ValueError("Could not identify bin stat for %s" % dim)
# ToDo: handle multiple bin count inputs for each dimension
if len(self.bin_count) == 1:
stat_kwargs["bin_count"] = self.bin_count[0]
return BinStats(**stat_kwargs)
|
def _get_stat(self, dim):
stat_kwargs = {}
if self.source is not None:
stat_kwargs["source"] = self.source
if len(self.dimensions.keys()) > 0:
stat_kwargs["column"] = self.dimensions[dim]
elif self.column is not None:
stat_kwargs["column"] = self.column
else:
stat_kwargs["column"] = "values"
elif self.values is not None:
stat_kwargs["values"] = self.values
else:
raise ValueError("Could not identify bin stat for %s" % dim)
# if list(self.dimensions.keys()):
# stat_kwargs['bin_count'] = self.bin_count[idx]
return BinStats(**stat_kwargs)
|
https://github.com/bokeh/bokeh/issues/3124
|
Traceback (most recent call last):
File "test.py", line 22, in <module>
hist = Histogram(df, values='values', legend=True,
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/builder/histogram_builder.py", line 53, in Histogram
return create_and_build(HistogramBuilder, data, **kw)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_builder.py", line 64, in create_and_build
chart.add_builder(builder)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_chart.py", line 134, in add_builder
builder.create(self)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_builder.py", line 303, in create
chart.add_renderers(self, renderers)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_chart.py", line 129, in add_renderers
self.renderers += renderers
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/builder/bar_builder.py", line 228, in _yield_renderers
**group_kwargs)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/glyphs.py", line 469, in __init__
super(HistogramGlyph, self).__init__(**kwargs)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_models.py", line 61, in __init__
self.setup()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/_models.py", line 64, in setup
self.renderers = [renderer for renderer in self.build_renderers()]
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/glyphs.py", line 478, in build_renderers
self.bins = Bins(values=self.values, bin_count=self.bin_count)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 164, in __init__
super(Bins, self).__init__(**properties)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 23, in __init__
self._refresh()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 29, in _refresh
self.calculate()
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/bokeh/charts/stats.py", line 174, in calculate
binned, bin_edges = pd.cut(self.get_data(), self.bin_count, retbins=True, precision=0)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/pandas/tools/tile.py", line 100, in cut
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
File "/Users/<username>/Library/Enthought/Canopy_64bit/User/lib/python2.7/site-packages/numpy/core/function_base.py", line 84, in linspace
num = int(num)
OverflowError: cannot convert float infinity to integer
|
OverflowError
|
def _iter_path_collection(paths, path_transforms, offsets, styles):
"""Build an iterator over the elements of the path collection"""
N = max(len(paths), len(offsets))
if len(path_transforms) == 0:
path_transforms = [np.eye(3)]
edgecolor = styles["edgecolor"]
if np.size(edgecolor) == 0:
edgecolor = ["none"]
facecolor = styles["facecolor"]
if np.size(facecolor) == 0:
facecolor = ["none"]
elements = [
paths,
path_transforms,
offsets,
edgecolor,
styles["linewidth"],
facecolor,
]
it = itertools
return it.islice(py3k.zip(*py3k.map(it.cycle, elements)), N)
|
def _iter_path_collection(paths, path_transforms, offsets, styles):
"""Build an iterator over the elements of the path collection"""
N = max(len(paths), len(offsets))
if not path_transforms:
path_transforms = [np.eye(3)]
edgecolor = styles["edgecolor"]
if np.size(edgecolor) == 0:
edgecolor = ["none"]
facecolor = styles["facecolor"]
if np.size(facecolor) == 0:
facecolor = ["none"]
elements = [
paths,
path_transforms,
offsets,
edgecolor,
styles["linewidth"],
facecolor,
]
it = itertools
return it.islice(py3k.zip(*py3k.map(it.cycle, elements)), N)
|
https://github.com/bokeh/bokeh/issues/2643
|
Traceback (most recent call last):
File "violin.py", line 29, in <module>
show(mpl.to_bokeh())
File "/Users/mmari/Desktop/continuum/bokeh/bokeh/compat/mpl.py", line 77, in to_bokeh
exporter.run(fig)
File "/Users/mmari/Desktop/continuum/bokeh/bokeh/compat/mplexporter/exporter.py", line 51, in run
self.crawl_fig(fig)
File "/Users/mmari/Desktop/continuum/bokeh/bokeh/compat/mplexporter/exporter.py", line 118, in crawl_fig
self.crawl_ax(ax)
File "/Users/mmari/Desktop/continuum/bokeh/bokeh/compat/mplexporter/exporter.py", line 140, in crawl_ax
self.draw_collection(ax, collection)
File "/Users/mmari/Desktop/continuum/bokeh/bokeh/compat/bokeh_exporter.py", line 34, in draw_collection
super(BokehExporter, self).draw_collection(ax, collection, force_pathtrans, force_offsettrans)
File "/Users/mmari/Desktop/continuum/bokeh/bokeh/compat/mplexporter/exporter.py", line 272, in draw_collection
mplobj=collection)
File "/Users/mmari/Desktop/continuum/bokeh/bokeh/compat/mplexporter/renderers/base.py", line 255, in draw_path_collection
offsets, styles):
File "/Users/mmari/Desktop/continuum/bokeh/bokeh/compat/mplexporter/renderers/base.py", line 192, in _iter_path_collection
if not path_transforms:
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
|
ValueError
|
def setup_events(self):
## hack - what we want to do is execute the update callback once
## and only if some properties in the graph have changed
## so we set should_update to be True in setup_events, and
## set it to be false as soon as the callback is done
if not self.name:
return
to_delete = []
for k in self.__dict__.keys():
if k.startswith("_func"):
to_delete.append(k)
for k in to_delete:
self.__dict__.pop(k)
counter = 0
if not self.update_registry.get(self.name):
name = "_func%d" % counter
func = self.create_registry[self.name]
setattr(self, name, self.callback(func))
for widget_name in self.widget_list:
obj = self.objects.get(widget_name)
if obj:
for attr in obj.class_properties():
obj.on_change(attr, self, name)
return
for selectors, func in self.update_registry[self.name]:
# hack because we lookup callbacks by func name
name = "_func%d" % counter
counter += 1
setattr(self, name, self.callback(func))
for selector in selectors:
if isinstance(selector, string_types):
self.widget_dict[selector].on_change("value", self, name)
continue
elif isinstance(selector, tuple):
selector, attrs = selector
else:
attrs = None
for obj in self.select(selector):
if obj == self:
continue
if attrs:
toiter = attrs
else:
toiter = obj.class_properties()
for attr in toiter:
obj.on_change(attr, self, name)
self.set_debounce()
|
def setup_events(self):
## hack - what we want to do is execute the update callback once
## and only if some properties in the graph have changed
## so we set should_update to be True in setup_events, and
## set it to be false as soon as the callback is done
if not self.name:
return
for k in self.__dict__.keys():
if k.startswith("_func"):
self.__dict__.pop(k)
counter = 0
if not self.update_registry.get(self.name):
name = "_func%d" % counter
func = self.create_registry[self.name]
setattr(self, name, self.callback(func))
for widget_name in self.widget_list:
obj = self.objects.get(widget_name)
if obj:
for attr in obj.class_properties():
obj.on_change(attr, self, name)
return
for selectors, func in self.update_registry[self.name]:
# hack because we lookup callbacks by func name
name = "_func%d" % counter
counter += 1
setattr(self, name, self.callback(func))
for selector in selectors:
if isinstance(selector, string_types):
self.widget_dict[selector].on_change("value", self, name)
continue
elif isinstance(selector, tuple):
selector, attrs = selector
else:
attrs = None
for obj in self.select(selector):
if obj == self:
continue
if attrs:
toiter = attrs
else:
toiter = obj.class_properties()
for attr in toiter:
obj.on_change(attr, self, name)
self.set_debounce()
|
https://github.com/bokeh/bokeh/issues/2124
|
Traceback (most recent call last):
File "/home/till/programming/miniconda3/lib/python3.4/site-packages/flask/app.py", line 1817, in wsgi_app
response = self.full_dispatch_request()
File "/home/till/programming/miniconda3/lib/python3.4/site-packages/flask/app.py", line 1477, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/till/programming/miniconda3/lib/python3.4/site-packages/flask/app.py", line 1381, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/till/programming/miniconda3/lib/python3.4/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/home/till/programming/miniconda3/lib/python3.4/site-packages/flask/app.py", line 1475, in full_dispatch_request
rv = self.dispatch_request()
File "/home/till/programming/miniconda3/lib/python3.4/site-packages/flask/app.py", line 1461, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/till/programming/miniconda3/lib/python3.4/site-packages/bokeh/server/views/backbone.py", line 208, in _handle_specific_model_put
return _handle_specific_model(docid, typename, id, request.method)
File "/home/till/programming/miniconda3/lib/python3.4/site-packages/bokeh/server/crossdomain.py", line 34, in wrapped_function
resp = make_response(f(*args, **kwargs))
File "/home/till/programming/miniconda3/lib/python3.4/site-packages/bokeh/server/views/backbone.py", line 169, in _handle_specific_model
return update(docid, typename, id)
File "/home/till/programming/miniconda3/lib/python3.4/site-packages/bokeh/server/views/bbauth.py", line 21, in wrapper
return func(*args, **kwargs)
File "/home/till/programming/miniconda3/lib/python3.4/site-packages/bokeh/server/views/backbone.py", line 269, in update
t.load()
File "/home/till/programming/miniconda3/lib/python3.4/site-packages/bokeh/server/serverbb.py", line 376, in load
clientdoc.load(*temporary_json, events='none', dirty=False)
File "/home/till/programming/miniconda3/lib/python3.4/site-packages/bokeh/document.py", line 197, in load
m.setup_events()
File "/home/till/programming/miniconda3/lib/python3.4/site-packages/bokeh/models/widgets/layouts.py", line 143, in setup_events
for k in self.__dict__.keys():
RuntimeError: dictionary changed size during iteration
|
RuntimeError
|
def transform(self, value):
value = super(DashPattern, self).transform(value)
if isinstance(value, string_types):
try:
return self._dash_patterns[value]
except KeyError:
return [int(x) for x in value.split()]
else:
return value
|
def transform(self, value):
value = super(DashPattern, self).transform(value)
if isinstance(value, string_types):
try:
return self._dash_patterns[value]
except KeyError:
return map(int, value.split())
else:
return value
|
https://github.com/bokeh/bokeh/issues/527
|
======================================================================
FAIL: test_string (bokeh.tests.test_properties.TestDashPattern)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/Users/bryan/work/bokeh/bokeh/tests/test_properties.py", line 305, in test_string
self.assertEqual(f.pat, [])
AssertionError: <map object at 0x105c012e8> != []
|
AssertionError
|
def __query_and_simplify(searchTerm: str, apiKey: str = ytmApiKey) -> List[dict]:
"""
`str` `searchTerm` : the search term you would type into YTM's search bar
RETURNS `list<dict>`
For structure of dict, see comment at function declaration
"""
#! For dict structure, see end of this function (~ln 268, ln 283) and chill, this
#! function ain't soo big, there are plenty of comments and blank lines
# build and POST a query to YTM
url = "https://music.youtube.com/youtubei/v1/search?alt=json&key=" + apiKey
headers = {"Referer": "https://music.youtube.com/search"}
payload = {
"context": {"client": {"clientName": "WEB_REMIX", "clientVersion": "0.1"}},
"query": searchTerm,
}
request = post(url=url, headers=headers, json=payload)
response = convert_json_to_dict(request.text)
#! We will hereon call generic levels of nesting as 'Blocks'. What follows is an
#! overview of the basic nesting (you'll need it),
#!
#! Content blocks
#! Group results into 'Top result', 'Songs', 'Videos',
#! 'playlists', 'Albums', 'User notices', etc...
#!
#! Result blocks (under each 'Content block')
#! Represents an individual song, album, video, ..., playlist
#! result
#!
#! Detail blocks (under each 'Result block')
#! Represents a detail of the result, these might be one of
#! name, duration, channel, album, etc...
#!
#! Link block (under each 'Result block')
#! Contains the video/Album/playlist/Song/Artist link of the
#! result as found on YouTube
# Simplify and extract necessary details from senselessly nested YTM response
#! This process is broken into numbered steps below
#! nested-dict keys are used to (a) indicate nesting visually and (b) make the code
#! more readable
# 01. Break response into contentBLocks
contentBlocks = response["contents"]["sectionListRenderer"]["contents"]
# 02. Gather all result block in the same place
resultBlocks = []
for cBlock in contentBlocks:
# Ignore user-suggestion
#! The 'itemSectionRenderer' field is for user notices (stuff like - 'showing
#! results for xyz, search for abc instead') we have no use for them, the for
#! loop below if throw a keyError if we don't ignore them
if "itemSectionRenderer" in cBlock.keys():
continue
for contents in cBlock["musicShelfRenderer"]["contents"]:
#! apparently content Blocks without an 'overlay' field don't have linkBlocks
#! I have no clue what they are and why there even exist
if "overlay" not in contents["musicResponsiveListItemRenderer"]:
continue
result = contents["musicResponsiveListItemRenderer"]["flexColumns"]
# Add the linkBlock
linkBlock = contents["musicResponsiveListItemRenderer"]["overlay"][
"musicItemThumbnailOverlayRenderer"
]["content"]["musicPlayButtonRenderer"].get("playNavigationEndpoint")
if linkBlock is None:
continue
#! detailsBlock is always a list, so we just append the linkBlock to it
#! insted of carrying along all the other junk from 'musicResponsiveListItemRenderer'
result.append(linkBlock)
#! gather resultBlock
resultBlocks.append(result)
# 03. Gather available details in the same place
#! We only need results that are Songs or Videos, so we filter out the rest, since
#! Songs and Videos are supplied with different details, extracting all details from
#! both is just carrying on redundant data, so we also have to selectively extract
#! relevant details. What you need to know to understand how we do that here:
#!
#! Songs details are ALWAYS in the following order:
#! 0 - Name
#! 1 - Type (Song)
#! 2 - Artist
#! 3 - Album
#! 4 - Duration (mm:ss)
#!
#! Video details are ALWAYS in the following order:
#! 0 - Name
#! 1 - Type (Video)
#! 2 - Channel
#! 3 - Viewers
#! 4 - Duration (hh:mm:ss)
#!
#! We blindly gather all the details we get our hands on, then
#! cherrypick the details we need based on their index numbers,
#! we do so only if their Type is 'Song' or 'Video
simplifiedResults = []
for result in resultBlocks:
# Blindly gather available details
availableDetails = []
# Filterout dummies here itself
#! 'musicResponsiveListItmeFlexColumnRenderer' should have more that one
#! sub-block, if not its a dummy, why does the YTM response contain dummies?
#! I have no clue. We skip these.
#! Remember that we appended the linkBlock to result, treating that like the
#! other constituents of a result block will lead to errors, hence the 'in
#! result[:-1]'
for detail in result[:-1]:
if len(detail["musicResponsiveListItemFlexColumnRenderer"]) < 2:
continue
#! if not a dummy, collect all available details
availableDetails.append(
detail["musicResponsiveListItemFlexColumnRenderer"]["text"]["runs"][0][
"text"
]
)
# Filterout non-Song/Video results and incomplete results here itself
#! From what we know about detail order, note that [1] - indicate result type
if availableDetails[1] in ["Song", "Video"] and len(availableDetails) == 5:
#! skip if result is in hours instead of minuts (no song is that long)
if len(availableDetails[4].split(":")) != 2:
continue
# grab position of result
#! This helps for those oddball cases where 2+ results are rated equally,
#! lower position --> better match
resultPosition = resultBlocks.index(result)
# grab result link
#! this is nested as [playlistEndpoint/watchEndpoint][videoId/playlistId/...]
#! so hardcoding the dict keys for data look up is an ardours process, since
#! the sub-block pattern is fixed even though the key isn't, we just
#! reference the dict keys by index
endpointKey = list(result[-1].keys())[1]
resultIdKey = list(result[-1][endpointKey].keys())[0]
linkId = result[-1][endpointKey][resultIdKey]
resultLink = "https://www.youtube.com/watch?v=" + linkId
# convert length into seconds
minStr, secStr = availableDetails[4].split(":")
#! handle leading zeroes (eg. 01, 09, etc...), they cause eval errors, there
#! are a few oddball tracks that are only a few seconds long
if minStr[0] == "0" and len(minStr) == 2:
minStr = minStr[1]
if secStr[0] == "0":
secStr = secStr[1]
time = eval(minStr) * 60 + eval(secStr)
# format relevant details
if availableDetails[1] == "Song":
formattedDetails = {
"name": availableDetails[0],
"type": "song",
"artist": availableDetails[2],
"album": availableDetails[3],
"length": time,
"link": resultLink,
"position": resultPosition,
}
if formattedDetails not in simplifiedResults:
simplifiedResults.append(formattedDetails)
elif availableDetails[1] == "Video":
formattedDetails = {
"name": availableDetails[0],
"type": "video",
"length": time,
"link": resultLink,
"position": resultPosition,
}
if formattedDetails not in simplifiedResults:
simplifiedResults.append(formattedDetails)
#! Things like playlists, albums, etc... just get ignored
# return the results
return simplifiedResults
|
def __query_and_simplify(searchTerm: str, apiKey: str = ytmApiKey) -> List[dict]:
"""
`str` `searchTerm` : the search term you would type into YTM's search bar
RETURNS `list<dict>`
For structure of dict, see comment at function declaration
"""
#! For dict structure, see end of this function (~ln 268, ln 283) and chill, this
#! function ain't soo big, there are plenty of comments and blank lines
# build and POST a query to YTM
url = "https://music.youtube.com/youtubei/v1/search?alt=json&key=" + apiKey
headers = {"Referer": "https://music.youtube.com/search"}
payload = {
"context": {"client": {"clientName": "WEB_REMIX", "clientVersion": "0.1"}},
"query": searchTerm,
}
request = post(url=url, headers=headers, json=payload)
response = convert_json_to_dict(request.text)
#! We will hereon call generic levels of nesting as 'Blocks'. What follows is an
#! overview of the basic nesting (you'll need it),
#!
#! Content blocks
#! Group results into 'Top result', 'Songs', 'Videos',
#! 'playlists', 'Albums', 'User notices', etc...
#!
#! Result blocks (under each 'Content block')
#! Represents an individual song, album, video, ..., playlist
#! result
#!
#! Detail blocks (under each 'Result block')
#! Represents a detail of the result, these might be one of
#! name, duration, channel, album, etc...
#!
#! Link block (under each 'Result block')
#! Contains the video/Album/playlist/Song/Artist link of the
#! result as found on YouTube
# Simplify and extract necessary details from senselessly nested YTM response
#! This process is broken into numbered steps below
#! nested-dict keys are used to (a) indicate nesting visually and (b) make the code
#! more readable
# 01. Break response into contentBLocks
contentBlocks = response["contents"]["sectionListRenderer"]["contents"]
# 02. Gather all result block in the same place
resultBlocks = []
for cBlock in contentBlocks:
# Ignore user-suggestion
#! The 'itemSectionRenderer' field is for user notices (stuff like - 'showing
#! results for xyz, search for abc instead') we have no use for them, the for
#! loop below if throw a keyError if we don't ignore them
if "itemSectionRenderer" in cBlock.keys():
continue
for contents in cBlock["musicShelfRenderer"]["contents"]:
#! apparently content Blocks without an 'overlay' field don't have linkBlocks
#! I have no clue what they are and why there even exist
if "overlay" not in contents["musicResponsiveListItemRenderer"]:
continue
result = contents["musicResponsiveListItemRenderer"]["flexColumns"]
# Add the linkBlock
linkBlock = contents["musicResponsiveListItemRenderer"]["overlay"][
"musicItemThumbnailOverlayRenderer"
]["content"]["musicPlayButtonRenderer"]["playNavigationEndpoint"]
#! detailsBlock is always a list, so we just append the linkBlock to it
#! insted of carrying along all the other junk from 'musicResponsiveListItemRenderer'
result.append(linkBlock)
#! gather resultBlock
resultBlocks.append(result)
# 03. Gather available details in the same place
#! We only need results that are Songs or Videos, so we filter out the rest, since
#! Songs and Videos are supplied with different details, extracting all details from
#! both is just carrying on redundant data, so we also have to selectively extract
#! relevant details. What you need to know to understand how we do that here:
#!
#! Songs details are ALWAYS in the following order:
#! 0 - Name
#! 1 - Type (Song)
#! 2 - Artist
#! 3 - Album
#! 4 - Duration (mm:ss)
#!
#! Video details are ALWAYS in the following order:
#! 0 - Name
#! 1 - Type (Video)
#! 2 - Channel
#! 3 - Viewers
#! 4 - Duration (hh:mm:ss)
#!
#! We blindly gather all the details we get our hands on, then
#! cherrypick the details we need based on their index numbers,
#! we do so only if their Type is 'Song' or 'Video
simplifiedResults = []
for result in resultBlocks:
# Blindly gather available details
availableDetails = []
# Filterout dummies here itself
#! 'musicResponsiveListItmeFlexColumnRenderer' should have more that one
#! sub-block, if not its a dummy, why does the YTM response contain dummies?
#! I have no clue. We skip these.
#! Remember that we appended the linkBlock to result, treating that like the
#! other constituents of a result block will lead to errors, hence the 'in
#! result[:-1]'
for detail in result[:-1]:
if len(detail["musicResponsiveListItemFlexColumnRenderer"]) < 2:
continue
#! if not a dummy, collect all available details
availableDetails.append(
detail["musicResponsiveListItemFlexColumnRenderer"]["text"]["runs"][0][
"text"
]
)
# Filterout non-Song/Video results and incomplete results here itself
#! From what we know about detail order, note that [1] - indicate result type
if availableDetails[1] in ["Song", "Video"] and len(availableDetails) == 5:
#! skip if result is in hours instead of minuts (no song is that long)
if len(availableDetails[4].split(":")) != 2:
continue
# grab position of result
#! This helps for those oddball cases where 2+ results are rated equally,
#! lower position --> better match
resultPosition = resultBlocks.index(result)
# grab result link
#! this is nested as [playlistEndpoint/watchEndpoint][videoId/playlistId/...]
#! so hardcoding the dict keys for data look up is an ardours process, since
#! the sub-block pattern is fixed even though the key isn't, we just
#! reference the dict keys by index
endpointKey = list(result[-1].keys())[1]
resultIdKey = list(result[-1][endpointKey].keys())[0]
linkId = result[-1][endpointKey][resultIdKey]
resultLink = "https://www.youtube.com/watch?v=" + linkId
# convert length into seconds
minStr, secStr = availableDetails[4].split(":")
#! handle leading zeroes (eg. 01, 09, etc...), they cause eval errors, there
#! are a few oddball tracks that are only a few seconds long
if minStr[0] == "0" and len(minStr) == 2:
minStr = minStr[1]
if secStr[0] == "0":
secStr = secStr[1]
time = eval(minStr) * 60 + eval(secStr)
# format relevant details
if availableDetails[1] == "Song":
formattedDetails = {
"name": availableDetails[0],
"type": "song",
"artist": availableDetails[2],
"album": availableDetails[3],
"length": time,
"link": resultLink,
"position": resultPosition,
}
if formattedDetails not in simplifiedResults:
simplifiedResults.append(formattedDetails)
elif availableDetails[1] == "Video":
formattedDetails = {
"name": availableDetails[0],
"type": "video",
"length": time,
"link": resultLink,
"position": resultPosition,
}
if formattedDetails not in simplifiedResults:
simplifiedResults.append(formattedDetails)
#! Things like playlists, albums, etc... just get ignored
# return the results
return simplifiedResults
|
https://github.com/spotDL/spotify-downloader/issues/1038
|
Fetching Playlist...
Traceback (most recent call last):
File "/root/envspotdl/bin/spotdl", line 8, in <module>
sys.exit(console_entry_point())
File "/root/envspotdl/lib/python3.8/site-packages/spotdl/__main__.py", line 114, in console_entry_point
songObjList = get_playlist_tracks(request)
File "/root/envspotdl/lib/python3.8/site-packages/spotdl/search/utils.py", line 82, in get_playlist_tracks
song = SongObj.from_url(
File "/root/envspotdl/lib/python3.8/site-packages/spotdl/search/songObj.py", line 66, in from_url
youtubeLink = SongObj.searchProvider(
File "/root/envspotdl/lib/python3.8/site-packages/spotdl/search/provider.py", line 478, in search_and_get_best_match
results = search_and_order_ytm_results(
File "/root/envspotdl/lib/python3.8/site-packages/spotdl/search/provider.py", line 366, in search_and_order_ytm_results
results = __query_and_simplify(songSearchStr)
File "/root/envspotdl/lib/python3.8/site-packages/spotdl/search/provider.py", line 176, in __query_and_simplify
linkBlock = contents['musicResponsiveListItemRenderer'] \
KeyError: 'playNavigationEndpoint'
|
KeyError
|
def _is_video(self, result):
# ensure result is not a channel
not_video = (
result.find("channel") is not None
or "yt-lockup-channel" in result.parent.attrs["class"]
or "yt-lockup-channel" in result.attrs["class"]
)
# ensure result is not a mix/playlist
not_video = not_video or "yt-lockup-playlist" in result.parent.attrs["class"]
# ensure video result is not an advertisement
not_video = not_video or result.find("googleads") is not None
# ensure video result is not a live stream
not_video = not_video or result.find("span", class_="video-time") is None
video = not not_video
return video
|
def _is_video(self, result):
# ensure result is not a channel
not_video = (
result.find("channel") is not None
or "yt-lockup-channel" in result.parent.attrs["class"]
or "yt-lockup-channel" in result.attrs["class"]
)
# ensure result is not a mix/playlist
not_video = not_video or "yt-lockup-playlist" in result.parent.attrs["class"]
# ensure video result is not an advertisement
not_video = not_video or result.find("googleads") is not None
video = not not_video
return video
|
https://github.com/spotDL/spotify-downloader/issues/733
|
INFO: 2. Downloading "https://open.spotify.com/track/5dPwLIsgL4gklXWAAM1VZY"
Traceback (most recent call last):
File "/usr/local/bin/spotdl", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.7/site-packages/spotdl/command_line/__main__.py", line 48, in main
spotdl.match_arguments()
File "/usr/local/lib/python3.7/site-packages/spotdl/command_line/core.py", line 79, in match_arguments
self.arguments["list"],
File "/usr/local/lib/python3.7/site-packages/spotdl/command_line/core.py", line 346, in download_tracks_from_file
metadata = search_metadata.on_youtube_and_spotify()
File "/usr/local/lib/python3.7/site-packages/spotdl/metadata_search.py", line 89, in on_youtube_and_spotify
metadata = caller()
File "/usr/local/lib/python3.7/site-packages/spotdl/metadata_search.py", line 185, in _on_youtube_and_spotify_for_type_spotify
youtube_video = self._best_on_youtube_search_for_type_spotify(search_query)
File "/usr/local/lib/python3.7/site-packages/spotdl/metadata_search.py", line 178, in _best_on_youtube_search_for_type_spotify
video = self._best_on_youtube_search_for_type_query(search_query)
File "/usr/local/lib/python3.7/site-packages/spotdl/metadata_search.py", line 153, in _best_on_youtube_search_for_type_query
videos = self.providers["youtube"].search(query)
File "/usr/local/lib/python3.7/site-packages/spotdl/metadata/providers/youtube.py", line 247, in search
return YouTubeSearch().search(query)
File "/usr/local/lib/python3.7/site-packages/spotdl/metadata/providers/youtube.py", line 119, in search
videos = self._fetch_search_results(html, limit=limit)
File "/usr/local/lib/python3.7/site-packages/spotdl/metadata/providers/youtube.py", line 83, in _fetch_search_results
video = self._extract_video_details_from_result(result)
File "/usr/local/lib/python3.7/site-packages/spotdl/metadata/providers/youtube.py", line 62, in _extract_video_details_from_result
video_time = html.find("span", class_="video-time").get_text()
AttributeError: 'NoneType' object has no attribute 'get_text'
|
AttributeError
|
def fetch_albums_from_artist(artist_url, album_type=None):
"""
This funcction returns all the albums from a give artist_url using the US
market
:param artist_url - spotify artist url
:param album_type - the type of album to fetch (ex: single) the default is
all albums
:param return - the album from the artist
"""
# fetching artist's albums limitting the results to the US to avoid duplicate
# albums from multiple markets
artist_id = internals.extract_spotify_id(artist_url)
results = spotify.artist_albums(artist_id, album_type=album_type, country="US")
albums = results["items"]
# indexing all pages of results
while results["next"]:
results = spotify.next(results)
albums.extend(results["items"])
return albums
|
def fetch_albums_from_artist(artist_url, album_type="album"):
"""
This funcction returns all the albums from a give artist_url using the US
market
:param artist_url - spotify artist url
:param album_type - the type of album to fetch (ex: single) the default is
a standard album
:param return - the album from the artist
"""
# fetching artist's albums limitting the results to the US to avoid duplicate
# albums from multiple markets
artist_id = internals.extract_spotify_id(artist_url)
results = spotify.artist_albums(artist_id, album_type=album_type, country="US")
albums = results["items"]
# indexing all pages of results
while results["next"]:
results = spotify.next(results)
albums.extend(results["items"])
return albums
|
https://github.com/spotDL/spotify-downloader/issues/473
|
Traceback (most recent call last):
File "/usr/local/bin/spotdl", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python3.7/site-packages/spotdl/spotdl.py", line 62, in main
match_args()
File "/usr/local/lib/python3.7/site-packages/spotdl/spotdl.py", line 44, in match_args
spotify_tools.write_all_albums_from_artist(artist_url=const.args.all_albums)
File "/usr/local/lib/python3.7/site-packages/spotdl/spotify_tools.py", line 200, in write_all_albums_from_artist
text_file = albums[0]["artists"][0]["name"] + ".txt"
IndexError: list index out of range
|
IndexError
|
def write_all_albums_from_artist(artist_url, text_file=None):
"""
This function gets all albums from an artist and writes it to a file in the
current working directory called [ARTIST].txt, where [ARTIST] is the artist
of the album
:param artist_url - spotify artist url
:param text_file - file to write albums to
"""
album_base_url = "https://open.spotify.com/album/"
# fetching all default albums
albums = fetch_albums_from_artist(artist_url, album_type=None)
# if no file if given, the default save file is in the current working
# directory with the name of the artist
if text_file is None:
text_file = albums[0]["artists"][0]["name"] + ".txt"
for album in albums:
# logging album name
log.info("Fetching album: " + album["name"])
write_album(album_base_url + album["id"], text_file=text_file)
|
def write_all_albums_from_artist(artist_url, text_file=None):
"""
This function gets all albums from an artist and writes it to a file in the
current working directory called [ARTIST].txt, where [ARTIST] is the artist
of the album
:param artist_url - spotify artist url
:param text_file - file to write albums to
"""
album_base_url = "https://open.spotify.com/album/"
# fetching all default albums
albums = fetch_albums_from_artist(artist_url)
# if no file if given, the default save file is in the current working
# directory with the name of the artist
if text_file is None:
text_file = albums[0]["artists"][0]["name"] + ".txt"
for album in albums:
# logging album name
log.info("Fetching album: " + album["name"])
write_album(album_base_url + album["id"], text_file=text_file)
# fetching all single albums
singles = fetch_albums_from_artist(artist_url, album_type="single")
for single in singles:
log.info("Fetching single: " + single["name"])
write_album(album_base_url + single["id"], text_file=text_file)
|
https://github.com/spotDL/spotify-downloader/issues/473
|
Traceback (most recent call last):
File "/usr/local/bin/spotdl", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python3.7/site-packages/spotdl/spotdl.py", line 62, in main
match_args()
File "/usr/local/lib/python3.7/site-packages/spotdl/spotdl.py", line 44, in match_args
spotify_tools.write_all_albums_from_artist(artist_url=const.args.all_albums)
File "/usr/local/lib/python3.7/site-packages/spotdl/spotify_tools.py", line 200, in write_all_albums_from_artist
text_file = albums[0]["artists"][0]["name"] + ".txt"
IndexError: list index out of range
|
IndexError
|
def scrape(self, bestmatch=True, tries_remaining=5):
"""Search and scrape YouTube to return a list of matching videos."""
# prevents an infinite loop but allows for a few retries
if tries_remaining == 0:
log.debug("No tries left. I quit.")
return
search_url = generate_search_url(self.search_query)
log.debug("Opening URL: {0}".format(search_url))
item = urllib.request.urlopen(search_url).read()
items_parse = BeautifulSoup(item, "html.parser")
videos = []
for x in items_parse.find_all(
"div", {"class": "yt-lockup-dismissable yt-uix-tile"}
):
if not is_video(x):
continue
y = x.find("div", class_="yt-lockup-content")
link = y.find("a")["href"][-11:]
title = y.find("a")["title"]
try:
videotime = x.find("span", class_="video-time").get_text()
except AttributeError:
log.debug("Could not find video duration on YouTube, retrying..")
return self.scrape(bestmatch=bestmatch, tries_remaining=tries_remaining - 1)
youtubedetails = {
"link": link,
"title": title,
"videotime": videotime,
"seconds": internals.get_sec(videotime),
}
videos.append(youtubedetails)
if bestmatch:
return self._best_match(videos)
return videos
|
def scrape(self, bestmatch=True, tries_remaining=5):
"""Search and scrape YouTube to return a list of matching videos."""
# prevents an infinite loop but allows for a few retries
if tries_remaining == 0:
log.debug("No tries left. I quit.")
return
search_url = generate_search_url(self.search_query)
log.debug("Opening URL: {0}".format(search_url))
item = urllib.request.urlopen(search_url).read()
items_parse = BeautifulSoup(item, "html.parser")
videos = []
for x in items_parse.find_all(
"div", {"class": "yt-lockup-dismissable yt-uix-tile"}
):
if not is_video(x):
continue
y = x.find("div", class_="yt-lockup-content")
link = y.find("a")["href"][-11:]
title = y.find("a")["title"]
try:
videotime = x.find("span", class_="video-time").get_text()
except AttributeError:
log.debug("Could not find video duration on YouTube, retrying..")
return self.scrape(
self.raw_song, self.meta_tags, tries_remaining=tries_remaining - 1
)
youtubedetails = {
"link": link,
"title": title,
"videotime": videotime,
"seconds": internals.get_sec(videotime),
}
videos.append(youtubedetails)
if bestmatch:
return self._best_match(videos)
return videos
|
https://github.com/spotDL/spotify-downloader/issues/276
|
Traceback (most recent call last):
File "C:\Users\User\spotify-downloader-master\core\youtube_tools.py", line 187, in scrape
videotime = x.find('span', class_="video-time").get_text()
AttributeError: 'NoneType' object has no attribute 'get_text'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\User\spotify-downloader-master\core\youtube_tools.py", line 212, in <module>
main()
File "C:\Users\User\spotify-downloader-master\core\youtube_tools.py", line 192, in main
download_single(raw_song=const.args.song)
File "C:\Users\User\spotify-downloader-master\core\youtube_tools.py", line 118, in download_single
content = youtube_tools.go_pafy(raw_song, meta_tags)
File "C:\Users\User\spotify-downloader-master\core\youtube_tools.py", line 32, in go_pafy
track_url = generate_youtube_url(raw_song, meta_tags)
File "C:\Users\User\spotify-downloader-master\core\youtube_tools.py", line 103, in generate_youtube_url
url = url_fetch.scrape()
File "C:\Users\User\spotify-downloader-master\core\youtube_tools.py", line 192, in scrape
tries_remaining=tries_remaining-1)
TypeError: scrape() got multiple values for argument 'tries_remaining'
|
AttributeError
|
def override_config(config_file, parser, raw_args=None):
"""Override default dict with config dict passed as comamnd line argument."""
config_file = os.path.realpath(config_file)
config = merge(default_conf["spotify-downloader"], get_config(config_file))
parser.set_defaults(manual=config["manual"])
parser.set_defaults(no_metadata=config["no-metadata"])
parser.set_defaults(avconv=config["avconv"])
parser.set_defaults(folder=os.path.relpath(config["folder"], os.getcwd()))
parser.set_defaults(overwrite=config["overwrite"])
parser.set_defaults(input_ext=config["input-ext"])
parser.set_defaults(output_ext=config["output-ext"])
parser.set_defaults(download_only_metadata=config["download-only-metadata"])
parser.set_defaults(dry_run=config["dry-run"])
parser.set_defaults(music_videos_only=config["music-videos-only"])
parser.set_defaults(no_spaces=config["no-spaces"])
parser.set_defaults(file_format=config["file-format"])
parser.set_defaults(no_spaces=config["youtube-api-key"])
parser.set_defaults(log_level=config["log-level"])
return parser.parse_args(raw_args)
|
def override_config(config_file, parser, raw_args=None):
"""Override default dict with config dict passed as comamnd line argument."""
config_file = os.path.realpath(config_file)
config = merge(default_conf["spotify-downloader"], get_config(config_file))
parser.set_defaults(avconv=config["avconv"])
parser.set_defaults(download_only_metadata=config["download-only-metadata"])
parser.set_defaults(dry_run=config["dry-run"])
parser.set_defaults(file_format=config["file-format"])
parser.set_defaults(folder=os.path.relpath(config["folder"], os.getcwd()))
parser.set_defaults(input_ext=config["input-ext"])
parser.set_defaults(log_level=config["log-level"])
parser.set_defaults(manual=config["manual"])
parser.set_defaults(music_videos_only=config["music-videos-only"])
parser.set_defaults(no_metadata=config["no-metadata"])
parser.set_defaults(no_spaces=config["no-spaces"])
parser.set_defaults(output_ext=config["output-ext"])
parser.set_defaults(overwrite=config["overwrite"])
return parser.parse_args(raw_args)
|
https://github.com/spotDL/spotify-downloader/issues/246
|
$ python ./spotdl.py -l ./playlist.txt -f /d/Music\ 02/ --log-level DEBUG
DEBUG: Python version: 3.6.1 |Anaconda 4.4.0 (64-bit)| (default, May 11 2017, 13:25:24) [MSC v.1900 64 bit (AMD64)]
DEBUG: Platform: Windows-10-10.0.14393-SP0
DEBUG: {'album': None,
'avconv': False,
'config': None,
'download_only_metadata': False,
'dry_run': False,
'file_format': '{artist} - {track_name}',
'folder': 'D:/Music 02/',
'input_ext': '.m4a',
'list': './playlist.txt',
'log_level': 10,
'manual': False,
'music_videos_only': False,
'no_metadata': False,
'no_spaces': False,
'output_ext': '.mp3',
'overwrite': 'prompt',
'playlist': None,
'song': None,
'username': None}
INFO: Preparing to download 3 songs
DEBUG: Fetching metadata for given track URL
DEBUG: Fetching lyrics
<<(Truncated Meta Output)>>
DEBUG: query: {'part': 'snippet', 'maxResults': 50, 'type': 'video', 'q': 'Arctic Monkeys - R U Mine?'}
Traceback (most recent call last):
File "C:\ProgramData\Anaconda3\lib\site-packages\pafy\util.py", line 34, in call_gdata
data = g.opener.open(url).read().decode('utf-8')
File "C:\ProgramData\Anaconda3\lib\urllib\request.py", line 532, in open
response = meth(req, response)
File "C:\ProgramData\Anaconda3\lib\urllib\request.py", line 642, in http_response
'http', request, response, code, msg, hdrs)
File "C:\ProgramData\Anaconda3\lib\urllib\request.py", line 570, in error
return self._call_chain(*args)
File "C:\ProgramData\Anaconda3\lib\urllib\request.py", line 504, in _call_chain
result = func(*args)
File "C:\ProgramData\Anaconda3\lib\urllib\request.py", line 650, in http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 403: Forbidden
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "./spotdl.py", line 185, in <module>
download_list(text_file=const.args.list)
File "./spotdl.py", line 79, in download_list
download_single(raw_song, number=number)
File "./spotdl.py", line 116, in download_single
content = youtube_tools.go_pafy(raw_song, meta_tags)
File "C:\Users\Jason Cemra\Downloads\spotify-downloader-master\core\youtube_tools.py", line 22, in go_pafy
track_url = generate_youtube_url(raw_song, meta_tags)
File "C:\Users\Jason Cemra\Downloads\spotify-downloader-master\core\youtube_tools.py", line 84, in generate_youtube_url
data = pafy.call_gdata('search', query)
File "C:\ProgramData\Anaconda3\lib\site-packages\pafy\util.py", line 42, in call_gdata
raise GdataError(errmsg)
pafy.util.GdataError: Youtube Error 403: The request cannot be completed because you have exceeded your <a href="/youtube/v3/getting-started#quota">quota</a>.
|
urllib.error.HTTPError
|
def get_arguments(raw_args=None, to_group=True, to_merge=True):
parser = argparse.ArgumentParser(
description="Download and convert songs from Spotify, Youtube etc.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
if to_merge:
config_file = os.path.join(sys.path[0], "config.yml")
config = merge(default_conf["spotify-downloader"], get_config(config_file))
else:
config = default_conf["spotify-downloader"]
if to_group:
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-s", "--song", help="download song by spotify link or name")
group.add_argument("-l", "--list", help="download songs from a file")
group.add_argument(
"-p",
"--playlist",
help="load songs from playlist URL into <playlist_name>.txt",
)
group.add_argument(
"-b", "--album", help="load songs from album URL into <album_name>.txt"
)
group.add_argument(
"-u",
"--username",
help="load songs from user's playlist into <playlist_name>.txt",
)
parser.add_argument(
"-m",
"--manual",
default=config["manual"],
help="choose the song to download manually",
action="store_true",
)
parser.add_argument(
"-nm",
"--no-metadata",
default=config["no-metadata"],
help="do not embed metadata in songs",
action="store_true",
)
parser.add_argument(
"-a",
"--avconv",
default=config["avconv"],
help="Use avconv for conversion otherwise set defaults to ffmpeg",
action="store_true",
)
parser.add_argument(
"-f",
"--folder",
default=os.path.relpath(config["folder"], os.getcwd()),
help="path to folder where files will be stored in",
)
parser.add_argument(
"--overwrite",
default=config["overwrite"],
help="change the overwrite policy",
choices={"prompt", "force", "skip"},
)
parser.add_argument(
"-i",
"--input-ext",
default=config["input-ext"],
help="prefered input format .m4a or .webm (Opus)",
)
parser.add_argument(
"-o",
"--output-ext",
default=config["output-ext"],
help="prefered output extension .mp3 or .m4a (AAC)",
)
parser.add_argument(
"-ff",
"--file-format",
default=config["file-format"],
help="File format to save the downloaded song with, each tag "
"is surrounded by curly braces. Possible formats: "
"{}".format([internals.formats[x] for x in internals.formats]),
action="store_true",
)
parser.add_argument(
"-dm",
"--download-only-metadata",
default=config["download-only-metadata"],
help="download songs for which metadata is found",
action="store_true",
)
parser.add_argument(
"-d",
"--dry-run",
default=config["dry-run"],
help="Show only track title and YouTube URL",
action="store_true",
)
parser.add_argument(
"-mo",
"--music-videos-only",
default=config["music-videos-only"],
help="Search only for music on Youtube",
action="store_true",
)
parser.add_argument(
"-ns",
"--no-spaces",
default=config["no-spaces"],
help="Replace spaces with underscores in file names",
action="store_true",
)
parser.add_argument(
"-ll",
"--log-level",
default=config["log-level"],
choices=_LOG_LEVELS_STR,
type=str.upper,
help="set log verbosity",
)
parser.add_argument(
"-yk",
"--youtube-api-key",
default=config["youtube-api-key"],
help=argparse.SUPPRESS,
)
parser.add_argument(
"-c", "--config", default=None, help="Replace with custom config.yml file"
)
parsed = parser.parse_args(raw_args)
if parsed.config is not None and to_merge:
parsed = override_config(parsed.config, parser)
parsed.log_level = log_leveller(parsed.log_level)
return parsed
|
def get_arguments(raw_args=None, to_group=True, to_merge=True):
parser = argparse.ArgumentParser(
description="Download and convert songs from Spotify, Youtube etc.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
if to_merge:
config_file = os.path.join(sys.path[0], "config.yml")
config = merge(default_conf["spotify-downloader"], get_config(config_file))
else:
config = default_conf["spotify-downloader"]
if to_group:
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-s", "--song", help="download song by spotify link or name")
group.add_argument("-l", "--list", help="download songs from a file")
group.add_argument(
"-p",
"--playlist",
help="load songs from playlist URL into <playlist_name>.txt",
)
group.add_argument(
"-b", "--album", help="load songs from album URL into <album_name>.txt"
)
group.add_argument(
"-u",
"--username",
help="load songs from user's playlist into <playlist_name>.txt",
)
parser.add_argument(
"-m",
"--manual",
default=config["manual"],
help="choose the song to download manually",
action="store_true",
)
parser.add_argument(
"-nm",
"--no-metadata",
default=config["no-metadata"],
help="do not embed metadata in songs",
action="store_true",
)
parser.add_argument(
"-a",
"--avconv",
default=config["avconv"],
help="Use avconv for conversion otherwise set defaults to ffmpeg",
action="store_true",
)
parser.add_argument(
"-f",
"--folder",
default=os.path.relpath(config["folder"], os.getcwd()),
help="path to folder where files will be stored in",
)
parser.add_argument(
"--overwrite",
default=config["overwrite"],
help="change the overwrite policy",
choices={"prompt", "force", "skip"},
)
parser.add_argument(
"-i",
"--input-ext",
default=config["input-ext"],
help="prefered input format .m4a or .webm (Opus)",
)
parser.add_argument(
"-o",
"--output-ext",
default=config["output-ext"],
help="prefered output extension .mp3 or .m4a (AAC)",
)
parser.add_argument(
"-ff",
"--file-format",
default=config["file-format"],
help="File format to save the downloaded song with, each tag "
"is surrounded by curly braces. Possible formats: "
"{}".format([internals.formats[x] for x in internals.formats]),
action="store_true",
)
parser.add_argument(
"-dm",
"--download-only-metadata",
default=config["download-only-metadata"],
help="download songs for which metadata is found",
action="store_true",
)
parser.add_argument(
"-d",
"--dry-run",
default=config["dry-run"],
help="Show only track title and YouTube URL",
action="store_true",
)
parser.add_argument(
"-mo",
"--music-videos-only",
default=config["music-videos-only"],
help="Search only for music on Youtube",
action="store_true",
)
parser.add_argument(
"-ns",
"--no-spaces",
default=config["no-spaces"],
help="Replace spaces with underscores in file names",
action="store_true",
)
parser.add_argument(
"-ll",
"--log-level",
default=config["log-level"],
choices=_LOG_LEVELS_STR,
type=str.upper,
help="set log verbosity",
)
parser.add_argument(
"-c", "--config", default=None, help="Replace with custom config.yml file"
)
parsed = parser.parse_args(raw_args)
if parsed.config is not None and to_merge:
parsed = override_config(parsed.config, parser)
parsed.log_level = log_leveller(parsed.log_level)
return parsed
|
https://github.com/spotDL/spotify-downloader/issues/246
|
$ python ./spotdl.py -l ./playlist.txt -f /d/Music\ 02/ --log-level DEBUG
DEBUG: Python version: 3.6.1 |Anaconda 4.4.0 (64-bit)| (default, May 11 2017, 13:25:24) [MSC v.1900 64 bit (AMD64)]
DEBUG: Platform: Windows-10-10.0.14393-SP0
DEBUG: {'album': None,
'avconv': False,
'config': None,
'download_only_metadata': False,
'dry_run': False,
'file_format': '{artist} - {track_name}',
'folder': 'D:/Music 02/',
'input_ext': '.m4a',
'list': './playlist.txt',
'log_level': 10,
'manual': False,
'music_videos_only': False,
'no_metadata': False,
'no_spaces': False,
'output_ext': '.mp3',
'overwrite': 'prompt',
'playlist': None,
'song': None,
'username': None}
INFO: Preparing to download 3 songs
DEBUG: Fetching metadata for given track URL
DEBUG: Fetching lyrics
<<(Truncated Meta Output)>>
DEBUG: query: {'part': 'snippet', 'maxResults': 50, 'type': 'video', 'q': 'Arctic Monkeys - R U Mine?'}
Traceback (most recent call last):
File "C:\ProgramData\Anaconda3\lib\site-packages\pafy\util.py", line 34, in call_gdata
data = g.opener.open(url).read().decode('utf-8')
File "C:\ProgramData\Anaconda3\lib\urllib\request.py", line 532, in open
response = meth(req, response)
File "C:\ProgramData\Anaconda3\lib\urllib\request.py", line 642, in http_response
'http', request, response, code, msg, hdrs)
File "C:\ProgramData\Anaconda3\lib\urllib\request.py", line 570, in error
return self._call_chain(*args)
File "C:\ProgramData\Anaconda3\lib\urllib\request.py", line 504, in _call_chain
result = func(*args)
File "C:\ProgramData\Anaconda3\lib\urllib\request.py", line 650, in http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 403: Forbidden
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "./spotdl.py", line 185, in <module>
download_list(text_file=const.args.list)
File "./spotdl.py", line 79, in download_list
download_single(raw_song, number=number)
File "./spotdl.py", line 116, in download_single
content = youtube_tools.go_pafy(raw_song, meta_tags)
File "C:\Users\Jason Cemra\Downloads\spotify-downloader-master\core\youtube_tools.py", line 22, in go_pafy
track_url = generate_youtube_url(raw_song, meta_tags)
File "C:\Users\Jason Cemra\Downloads\spotify-downloader-master\core\youtube_tools.py", line 84, in generate_youtube_url
data = pafy.call_gdata('search', query)
File "C:\ProgramData\Anaconda3\lib\site-packages\pafy\util.py", line 42, in call_gdata
raise GdataError(errmsg)
pafy.util.GdataError: Youtube Error 403: The request cannot be completed because you have exceeded your <a href="/youtube/v3/getting-started#quota">quota</a>.
|
urllib.error.HTTPError
|
def generate_youtube_url(raw_song, meta_tags):
url_fetch = GenerateYouTubeURL(raw_song, meta_tags)
if const.args.youtube_api_key:
url = url_fetch.api()
else:
url = url_fetch.scrape()
return url
|
def generate_youtube_url(raw_song, meta_tags, tries_remaining=5):
"""Search for the song on YouTube and generate a URL to its video."""
# prevents an infinite loop but allows for a few retries
if tries_remaining == 0:
log.debug("No tries left. I quit.")
return
query = {"part": "snippet", "maxResults": 50, "type": "video"}
if const.args.music_videos_only:
query["videoCategoryId"] = "10"
if not meta_tags:
song = raw_song
query["q"] = song
else:
song = "{0} - {1}".format(meta_tags["artists"][0]["name"], meta_tags["name"])
query["q"] = song
log.debug("query: {0}".format(query))
data = pafy.call_gdata("search", query)
data["items"] = list(
filter(lambda x: x["id"].get("videoId") is not None, data["items"])
)
query_results = {
"part": "contentDetails,snippet,statistics",
"maxResults": 50,
"id": ",".join(i["id"]["videoId"] for i in data["items"]),
}
log.debug("query_results: {0}".format(query_results))
vdata = pafy.call_gdata("videos", query_results)
videos = []
for x in vdata["items"]:
duration_s = pafy.playlist.parseISO8591(x["contentDetails"]["duration"])
youtubedetails = {
"link": x["id"],
"title": x["snippet"]["title"],
"videotime": internals.videotime_from_seconds(duration_s),
"seconds": duration_s,
}
videos.append(youtubedetails)
if not meta_tags:
break
if not videos:
return None
if const.args.manual:
log.info(song)
log.info("0. Skip downloading this song.\n")
# fetch all video links on first page on YouTube
for i, v in enumerate(videos):
log.info(
"{0}. {1} {2} {3}".format(
i + 1,
v["title"],
v["videotime"],
"http://youtube.com/watch?v=" + v["link"],
)
)
# let user select the song to download
result = internals.input_link(videos)
if not result:
return None
else:
if not meta_tags:
# if the metadata could not be acquired, take the first result
# from Youtube because the proper song length is unknown
result = videos[0]
log.debug("Since no metadata found on Spotify, going with the first result")
else:
# filter out videos that do not have a similar length to the Spotify song
duration_tolerance = 10
max_duration_tolerance = 20
possible_videos_by_duration = list()
"""
start with a reasonable duration_tolerance, and increment duration_tolerance
until one of the Youtube results falls within the correct duration or
the duration_tolerance has reached the max_duration_tolerance
"""
while len(possible_videos_by_duration) == 0:
possible_videos_by_duration = list(
filter(
lambda x: abs(x["seconds"] - meta_tags["duration"])
<= duration_tolerance,
videos,
)
)
duration_tolerance += 1
if duration_tolerance > max_duration_tolerance:
log.error(
"{0} by {1} was not found.\n".format(
meta_tags["name"], meta_tags["artists"][0]["name"]
)
)
return None
result = possible_videos_by_duration[0]
if result:
url = "http://youtube.com/watch?v=" + result["link"]
else:
url = None
return url
|
https://github.com/spotDL/spotify-downloader/issues/246
|
$ python ./spotdl.py -l ./playlist.txt -f /d/Music\ 02/ --log-level DEBUG
DEBUG: Python version: 3.6.1 |Anaconda 4.4.0 (64-bit)| (default, May 11 2017, 13:25:24) [MSC v.1900 64 bit (AMD64)]
DEBUG: Platform: Windows-10-10.0.14393-SP0
DEBUG: {'album': None,
'avconv': False,
'config': None,
'download_only_metadata': False,
'dry_run': False,
'file_format': '{artist} - {track_name}',
'folder': 'D:/Music 02/',
'input_ext': '.m4a',
'list': './playlist.txt',
'log_level': 10,
'manual': False,
'music_videos_only': False,
'no_metadata': False,
'no_spaces': False,
'output_ext': '.mp3',
'overwrite': 'prompt',
'playlist': None,
'song': None,
'username': None}
INFO: Preparing to download 3 songs
DEBUG: Fetching metadata for given track URL
DEBUG: Fetching lyrics
<<(Truncated Meta Output)>>
DEBUG: query: {'part': 'snippet', 'maxResults': 50, 'type': 'video', 'q': 'Arctic Monkeys - R U Mine?'}
Traceback (most recent call last):
File "C:\ProgramData\Anaconda3\lib\site-packages\pafy\util.py", line 34, in call_gdata
data = g.opener.open(url).read().decode('utf-8')
File "C:\ProgramData\Anaconda3\lib\urllib\request.py", line 532, in open
response = meth(req, response)
File "C:\ProgramData\Anaconda3\lib\urllib\request.py", line 642, in http_response
'http', request, response, code, msg, hdrs)
File "C:\ProgramData\Anaconda3\lib\urllib\request.py", line 570, in error
return self._call_chain(*args)
File "C:\ProgramData\Anaconda3\lib\urllib\request.py", line 504, in _call_chain
result = func(*args)
File "C:\ProgramData\Anaconda3\lib\urllib\request.py", line 650, in http_error_default
raise HTTPError(req.full_url, code, msg, hdrs, fp)
urllib.error.HTTPError: HTTP Error 403: Forbidden
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "./spotdl.py", line 185, in <module>
download_list(text_file=const.args.list)
File "./spotdl.py", line 79, in download_list
download_single(raw_song, number=number)
File "./spotdl.py", line 116, in download_single
content = youtube_tools.go_pafy(raw_song, meta_tags)
File "C:\Users\Jason Cemra\Downloads\spotify-downloader-master\core\youtube_tools.py", line 22, in go_pafy
track_url = generate_youtube_url(raw_song, meta_tags)
File "C:\Users\Jason Cemra\Downloads\spotify-downloader-master\core\youtube_tools.py", line 84, in generate_youtube_url
data = pafy.call_gdata('search', query)
File "C:\ProgramData\Anaconda3\lib\site-packages\pafy\util.py", line 42, in call_gdata
raise GdataError(errmsg)
pafy.util.GdataError: Youtube Error 403: The request cannot be completed because you have exceeded your <a href="/youtube/v3/getting-started#quota">quota</a>.
|
urllib.error.HTTPError
|
def feed_playlist(username):
# fetch all user playlists
playlists = spotify.user_playlists(username)
links = []
check = 1
# iterate over user playlists
while True:
for playlist in playlists["items"]:
# In rare cases, playlists may not be found, so playlists['next'] is
# None. Skip these. Also see Issue #91.
if playlist["name"] is not None:
print(
str(check)
+ ". "
+ misc.fix_encoding(playlist["name"])
+ " ("
+ str(playlist["tracks"]["total"])
+ " tracks)"
)
links.append(playlist)
check += 1
if playlists["next"]:
playlists = spotify.next(playlists)
else:
break
print("")
# let user select playlist
playlist = misc.input_link(links)
# fetch detailed information for playlist
results = spotify.user_playlist(
playlist["owner"]["id"], playlist["id"], fields="tracks,next"
)
print("")
# slugify removes any special characters
file = slugify(playlist["name"], ok="-_()[]{}") + ".txt"
print("Feeding " + str(playlist["tracks"]["total"]) + " tracks to " + file)
tracks = results["tracks"]
with open(file, "a") as fout:
while True:
for item in tracks["items"]:
track = item["track"]
try:
fout.write(track["external_urls"]["spotify"] + "\n")
except KeyError:
title = track["name"] + " by " + track["artists"][0]["name"]
print("Skipping track " + title + " (local only?)")
# 1 page = 50 results
# check if there are more pages
if tracks["next"]:
tracks = spotify.next(tracks)
else:
break
|
def feed_playlist(username):
# fetch all user playlists
playlists = spotify.user_playlists(username)
links = []
check = 1
# iterate over user playlists
while True:
for playlist in playlists["items"]:
print(
str(check)
+ ". "
+ misc.fix_encoding(playlist["name"])
+ " ("
+ str(playlist["tracks"]["total"])
+ " tracks)"
)
links.append(playlist)
check += 1
if playlists["next"]:
playlists = spotify.next(playlists)
else:
break
print("")
# let user select playlist
playlist = misc.input_link(links)
# fetch detailed information for playlist
results = spotify.user_playlist(
playlist["owner"]["id"], playlist["id"], fields="tracks,next"
)
print("")
# slugify removes any special characters
file = slugify(playlist["name"], ok="-_()[]{}") + ".txt"
print("Feeding " + str(playlist["tracks"]["total"]) + " tracks to " + file)
tracks = results["tracks"]
with open(file, "a") as fout:
while True:
for item in tracks["items"]:
track = item["track"]
try:
fout.write(track["external_urls"]["spotify"] + "\n")
except KeyError:
title = track["name"] + " by " + track["artists"][0]["name"]
print("Skipping track " + title + " (local only?)")
# 1 page = 50 results
# check if there are more pages
if tracks["next"]:
tracks = spotify.next(tracks)
else:
break
|
https://github.com/spotDL/spotify-downloader/issues/91
|
linus@sony-vaio ~/spotify-downloader $ python3 spotdl.py -u spotify
1. Today's Top Hits (50 tracks)
2. RapCaviar (63 tracks)
...
276. Disco Fever (50 tracks)
277. Tegan and Sara: Pride (27 tracks)
Traceback (most recent call last):
File "spotdl.py", line 303, in <module>
feed_playlist(username=args.username)
File "spotdl.py", line 124, in feed_playlist
print(str(check) + '. ' + misc.fix_encoding(playlist['name']) + ' (' + str(playlist['tracks']['total']) + ' tracks)')
TypeError: Can't convert 'NoneType' object to str implicitly
|
TypeError
|
def escape_xpath_value(value: str):
value = str(value)
if '"' in value and "'" in value:
parts_wo_apos = value.split("'")
escaped = "', \"'\", '".join(parts_wo_apos)
return f"concat('{escaped}')"
if "'" in value:
return f'"{value}"'
return f"'{value}'"
|
def escape_xpath_value(value):
if '"' in value and "'" in value:
parts_wo_apos = value.split("'")
escaped = "', \"'\", '".join(parts_wo_apos)
return f"concat('{escaped}')"
if "'" in value:
return f'"{value}"'
return f"'{value}'"
|
https://github.com/robotframework/SeleniumLibrary/issues/1021
|
18:35:00.405 FAIL TypeError: argument of type 'int' is not iterable
18:35:00.405 DEBUG Traceback (most recent call last):
File "c:\python27\lib\site-packages\SeleniumLibrary\__init__.py", line 360, in run_keyword
return DynamicCore.run_keyword(self, name, args, kwargs)
File "c:\python27\lib\site-packages\SeleniumLibrary\base\robotlibcore.py", line 102, in run_keyword
return self.keywords[name](*args, **kwargs)
File "c:\python27\lib\site-packages\SeleniumLibrary\keywords\waiting.py", line 66, in wait_until_page_contains
timeout, error)
File "c:\python27\lib\site-packages\SeleniumLibrary\keywords\waiting.py", line 220, in _wait_until
self._wait_until_worker(condition, timeout, error)
File "c:\python27\lib\site-packages\SeleniumLibrary\keywords\waiting.py", line 227, in _wait_until_worker
if condition():
File "c:\python27\lib\site-packages\SeleniumLibrary\keywords\waiting.py", line 64, in <lambda>
self._wait_until(lambda: self.is_text_present(text),
File "c:\python27\lib\site-packages\SeleniumLibrary\base\context.py", line 81, in is_text_present
locator = "xpath://*[contains(., %s)]" % escape_xpath_value(text)
File "c:\python27\lib\site-packages\SeleniumLibrary\utils\__init__.py", line 27, i
|
TypeError
|
def get_cookie(self, name):
"""Returns information of cookie with ``name`` as an object.
If no cookie is found with ``name``, keyword fails. The cookie object
contains details about the cookie. Attributes available in the object
are documented in the table below.
| = Attribute = | = Explanation = |
| name | The name of a cookie. |
| value | Value of the cookie. |
| path | Indicates a URL path, for example ``/``. |
| domain | The domain the cookie is visible to. |
| secure | When true, cookie is only used with HTTPS connections. |
| httpOnly | When true, cookie is not accessible via JavaScript. |
| expiry | Python datetime object indicating when the cookie expires. |
| extra | Possible attributes outside of the WebDriver specification |
See the
[https://w3c.github.io/webdriver/#cookies|WebDriver specification]
for details about the cookie information.
Notice that ``expiry`` is specified as a
[https://docs.python.org/3/library/datetime.html#datetime.datetime|datetime object],
not as seconds since Unix Epoch like WebDriver natively does.
In some cases, example when running browser in the cloud, it is possible that
cookie contains other attributes than is defined in the
[https://w3c.github.io/webdriver/#cookies|WebDriver specification].
These other attributes are available in a ``extra`` attribute in the cookie
object and it contains a dictionary of the other attributes. The ``extra``
attribute is new in SeleniumLibrary 4.0.
Example:
| `Add Cookie` | foo | bar |
| ${cookie} = | `Get Cookie` | foo |
| `Should Be Equal` | ${cookie.name} | bar |
| `Should Be Equal` | ${cookie.value} | foo |
| `Should Be True` | ${cookie.expiry.year} > 2017 |
New in SeleniumLibrary 3.0.
"""
cookie = self.driver.get_cookie(name)
if not cookie:
raise CookieNotFound("Cookie with name '%s' not found." % name)
return CookieInformation(**cookie)
|
def get_cookie(self, name):
"""Returns information of cookie with ``name`` as an object.
If no cookie is found with ``name``, keyword fails. The cookie object
contains details about the cookie. Attributes available in the object
are documented in the table below.
| = Attribute = | = Explanation = |
| name | The name of a cookie. |
| value | Value of the cookie. |
| path | Indicates a URL path, for example ``/``. |
| domain | The domain the cookie is visible to. |
| secure | When true, cookie is only used with HTTPS connections. |
| httpOnly | When true, cookie is not accessible via JavaScript. |
| expiry | Python datetime object indicating when the cookie expires. |
See the
[https://w3c.github.io/webdriver/webdriver-spec.html#cookies|WebDriver specification]
for details about the cookie information.
Notice that ``expiry`` is specified as a
[https://docs.python.org/3/library/datetime.html#datetime.datetime|datetime object],
not as seconds since Unix Epoch like WebDriver natively does.
Example:
| `Add Cookie` | foo | bar |
| ${cookie} = | `Get Cookie` | foo |
| `Should Be Equal` | ${cookie.name} | bar |
| `Should Be Equal` | ${cookie.value} | foo |
| `Should Be True` | ${cookie.expiry.year} > 2017 |
New in SeleniumLibrary 3.0.
"""
cookie = self.driver.get_cookie(name)
if not cookie:
raise CookieNotFound("Cookie with name '%s' not found." % name)
return CookieInformation(**cookie)
|
https://github.com/robotframework/SeleniumLibrary/issues/1307
|
20190212 16:54:43.113 : FAIL : TypeError: __init__() got an unexpected keyword argument 'hCode'
20190212 16:54:43.113 : DEBUG :
Traceback (most recent call last):
File "C:\Python27\lib\site-packages\SeleniumLibrary\__init__.py", line 369, in run_keyword
return DynamicCore.run_keyword(self, name, args, kwargs)
File "C:\Python27\lib\site-packages\SeleniumLibrary\base\robotlibcore.py", line 102, in run_keyword
return self.keywords[name](*args, **kwargs)
File "C:\Python27\lib\site-packages\SeleniumLibrary\keywords\cookie.py", line 99, in get_cookie
return CookieInformation(**cookie)
|
TypeError
|
def __init__(
self,
name,
value,
path=None,
domain=None,
secure=False,
httpOnly=False,
expiry=None,
**extra,
):
self.name = name
self.value = value
self.path = path
self.domain = domain
self.secure = secure
self.httpOnly = httpOnly
self.expiry = datetime.fromtimestamp(expiry) if expiry else None
self.extra = extra
|
def __init__(
self, name, value, path=None, domain=None, secure=False, httpOnly=False, expiry=None
):
self.name = name
self.value = value
self.path = path
self.domain = domain
self.secure = secure
self.httpOnly = httpOnly
self.expiry = datetime.fromtimestamp(expiry) if expiry else None
|
https://github.com/robotframework/SeleniumLibrary/issues/1307
|
20190212 16:54:43.113 : FAIL : TypeError: __init__() got an unexpected keyword argument 'hCode'
20190212 16:54:43.113 : DEBUG :
Traceback (most recent call last):
File "C:\Python27\lib\site-packages\SeleniumLibrary\__init__.py", line 369, in run_keyword
return DynamicCore.run_keyword(self, name, args, kwargs)
File "C:\Python27\lib\site-packages\SeleniumLibrary\base\robotlibcore.py", line 102, in run_keyword
return self.keywords[name](*args, **kwargs)
File "C:\Python27\lib\site-packages\SeleniumLibrary\keywords\cookie.py", line 99, in get_cookie
return CookieInformation(**cookie)
|
TypeError
|
def __str__(self):
items = "name value path domain secure httpOnly expiry".split()
string = "\n".join("%s=%s" % (item, getattr(self, item)) for item in items)
if self.extra:
string = "%s%s=%s\n" % (string, "extra", self.extra)
return string
|
def __str__(self):
items = "name value path domain secure httpOnly expiry".split()
return "\n".join("%s=%s" % (item, getattr(self, item)) for item in items)
|
https://github.com/robotframework/SeleniumLibrary/issues/1307
|
20190212 16:54:43.113 : FAIL : TypeError: __init__() got an unexpected keyword argument 'hCode'
20190212 16:54:43.113 : DEBUG :
Traceback (most recent call last):
File "C:\Python27\lib\site-packages\SeleniumLibrary\__init__.py", line 369, in run_keyword
return DynamicCore.run_keyword(self, name, args, kwargs)
File "C:\Python27\lib\site-packages\SeleniumLibrary\base\robotlibcore.py", line 102, in run_keyword
return self.keywords[name](*args, **kwargs)
File "C:\Python27\lib\site-packages\SeleniumLibrary\keywords\cookie.py", line 99, in get_cookie
return CookieInformation(**cookie)
|
TypeError
|
def __str__(self):
items = "name value path domain secure httpOnly expiry".split()
string = "\n".join("%s=%s" % (item, getattr(self, item)) for item in items)
if self.extra:
string = "%s\n%s=%s\n" % (string, "extra", self.extra)
return string
|
def __str__(self):
items = "name value path domain secure httpOnly expiry".split()
string = "\n".join("%s=%s" % (item, getattr(self, item)) for item in items)
if self.extra:
string = "%s%s=%s\n" % (string, "extra", self.extra)
return string
|
https://github.com/robotframework/SeleniumLibrary/issues/1307
|
20190212 16:54:43.113 : FAIL : TypeError: __init__() got an unexpected keyword argument 'hCode'
20190212 16:54:43.113 : DEBUG :
Traceback (most recent call last):
File "C:\Python27\lib\site-packages\SeleniumLibrary\__init__.py", line 369, in run_keyword
return DynamicCore.run_keyword(self, name, args, kwargs)
File "C:\Python27\lib\site-packages\SeleniumLibrary\base\robotlibcore.py", line 102, in run_keyword
return self.keywords[name](*args, **kwargs)
File "C:\Python27\lib\site-packages\SeleniumLibrary\keywords\cookie.py", line 99, in get_cookie
return CookieInformation(**cookie)
|
TypeError
|
def _internal_call(self, method, url, payload, params):
args = dict(params=params)
if not url.startswith("http"):
url = self.prefix + url
headers = self._auth_headers()
if "content_type" in args["params"]:
headers["Content-Type"] = args["params"]["content_type"]
del args["params"]["content_type"]
if payload:
args["data"] = payload
else:
headers["Content-Type"] = "application/json"
if payload:
args["data"] = json.dumps(payload)
if self.language is not None:
headers["Accept-Language"] = self.language
logger.debug(
"Sending %s to %s with Headers: %s and Body: %r ",
method,
url,
headers,
args.get("data"),
)
try:
response = self._session.request(
method,
url,
headers=headers,
proxies=self.proxies,
timeout=self.requests_timeout,
**args,
)
response.raise_for_status()
results = response.json()
except requests.exceptions.HTTPError as http_error:
response = http_error.response
try:
msg = response.json()["error"]["message"]
except (ValueError, KeyError):
msg = "error"
try:
reason = response.json()["error"]["reason"]
except (ValueError, KeyError):
reason = None
logger.error(
"HTTP Error for %s to %s returned %s due to %s",
method,
url,
response.status_code,
msg,
)
raise SpotifyException(
response.status_code,
-1,
"%s:\n %s" % (response.url, msg),
reason=reason,
headers=response.headers,
)
except requests.exceptions.RetryError as retry_error:
request = retry_error.request
logger.error("Max Retries reached")
try:
reason = retry_error.args[0].reason
except (IndexError, AttributeError):
reason = None
raise SpotifyException(
599, -1, "%s:\n %s" % (request.path_url, "Max Retries"), reason=reason
)
except ValueError:
results = None
logger.debug("RESULTS: %s", results)
return results
|
def _internal_call(self, method, url, payload, params):
args = dict(params=params)
if not url.startswith("http"):
url = self.prefix + url
headers = self._auth_headers()
if "content_type" in args["params"]:
headers["Content-Type"] = args["params"]["content_type"]
del args["params"]["content_type"]
if payload:
args["data"] = payload
else:
headers["Content-Type"] = "application/json"
if payload:
args["data"] = json.dumps(payload)
if self.language is not None:
headers["Accept-Language"] = self.language
logger.debug(
"Sending %s to %s with Headers: %s and Body: %r ",
method,
url,
headers,
args.get("data"),
)
try:
response = self._session.request(
method,
url,
headers=headers,
proxies=self.proxies,
timeout=self.requests_timeout,
**args,
)
response.raise_for_status()
results = response.json()
except requests.exceptions.HTTPError:
try:
msg = response.json()["error"]["message"]
except (ValueError, KeyError):
msg = "error"
try:
reason = response.json()["error"]["reason"]
except (ValueError, KeyError):
reason = None
logger.error(
"HTTP Error for %s to %s returned %s due to %s",
method,
url,
response.status_code,
msg,
)
raise SpotifyException(
response.status_code,
-1,
"%s:\n %s" % (response.url, msg),
reason=reason,
headers=response.headers,
)
except requests.exceptions.RetryError:
logger.error("Max Retries reached")
raise SpotifyException(
599,
-1,
"%s:\n %s" % (response.url, "Max Retries"),
headers=response.headers,
)
except ValueError:
results = None
logger.debug("RESULTS: %s", results)
return results
|
https://github.com/plamere/spotipy/issues/581
|
Max Retries reached
Traceback (most recent call last):
File "../project/venv/lib/python3.8/site-packages/requests/adapters.py", line 439, in send
resp = conn.urlopen(
File "../project/venv/lib/python3.8/site-packages/urllib3/connectionpool.py", line 817, in urlopen
return self.urlopen(
File "../project/venv/lib/python3.8/site-packages/urllib3/connectionpool.py", line 817, in urlopen
return self.urlopen(
File "../project/venv/lib/python3.8/site-packages/urllib3/connectionpool.py", line 817, in urlopen
return self.urlopen(
File "../project/venv/lib/python3.8/site-packages/urllib3/connectionpool.py", line 807, in urlopen
retries = retries.increment(method, url, response=response, _pool=self)
File "../project/venv/lib/python3.8/site-packages/urllib3/util/retry.py", line 439, in increment
raise MaxRetryError(_pool, url, error or ResponseError(cause))
urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='api.spotify.com', port=443): Max retries exceeded with url: /v1/playlists/37i9dQZF1DWXLeA8Omikj7/followers/contains?ids=spotify:user:bad_fakename (Caused by ResponseError('too many 500 error responses'))
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "../project/venv/lib/python3.8/site-packages/spotipy/client.py", line 239, in _internal_call
response = self._session.request(
File "../project/venv/lib/python3.8/site-packages/requests/sessions.py", line 530, in request
resp = self.send(prep, **send_kwargs)
File "../project/venv/lib/python3.8/site-packages/requests/sessions.py", line 643, in send
r = adapter.send(request, **kwargs)
File "../project/venv/lib/python3.8/site-packages/requests/adapters.py", line 507, in send
raise RetryError(e, request=request)
requests.exceptions.RetryError: HTTPSConnectionPool(host='api.spotify.com', port=443): Max retries exceeded with url: /v1/playlists/37i9dQZF1DWXLeA8Omikj7/followers/contains?ids=spotify:user:bad_fakename (Caused by ResponseError('too many 500 error responses'))
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "../project/src/issue_boilerplate.py", line 10, in <module>
print(spotify.playlist_is_following(playlist_id=playlist['id'], user_ids=['spotify:user:bad_fakename']))
File "../project/venv/lib/python3.8/site-packages/spotipy/client.py", line 1145, in playlist_is_following
return self._get(
File "../project/venv/lib/python3.8/site-packages/spotipy/client.py", line 286, in _get
return self._internal_call("GET", url, payload, kwargs)
File "../project/venv/lib/python3.8/site-packages/spotipy/client.py", line 273, in _internal_call
"%s:\n %s" % (response.url, "Max Retries"),
AttributeError: 'NoneType' object has no attribute 'url'
|
urllib3.exceptions.MaxRetryError
|
def _internal_call(self, method, url, payload, params):
args = dict(params=params)
if not url.startswith("http"):
url = self.prefix + url
headers = self._auth_headers()
if "content_type" in args["params"]:
headers["Content-Type"] = args["params"]["content_type"]
del args["params"]["content_type"]
if payload:
args["data"] = payload
else:
headers["Content-Type"] = "application/json"
if payload:
args["data"] = json.dumps(payload)
if self.language is not None:
headers["Accept-Language"] = self.language
logger.debug(
"Sending %s to %s with Headers: %s and Body: %r ",
method,
url,
headers,
args.get("data"),
)
try:
response = self._session.request(
method,
url,
headers=headers,
proxies=self.proxies,
timeout=self.requests_timeout,
**args,
)
response.raise_for_status()
results = response.json()
except requests.exceptions.HTTPError:
try:
msg = response.json()["error"]["message"]
except (ValueError, KeyError):
msg = "error"
try:
reason = response.json()["error"]["reason"]
except (ValueError, KeyError):
reason = None
logger.error(
"HTTP Error for %s to %s returned %s due to %s",
method,
url,
response.status_code,
msg,
)
raise SpotifyException(
response.status_code,
-1,
"%s:\n %s" % (response.url, msg),
reason=reason,
headers=response.headers,
)
except requests.exceptions.RetryError:
logger.error("Max Retries reached")
raise SpotifyException(
599,
-1,
"%s:\n %s" % (response.url, "Max Retries"),
headers=response.headers,
)
except ValueError:
results = None
logger.debug("RESULTS: %s", results)
return results
|
def _internal_call(self, method, url, payload, params):
args = dict(params=params)
if not url.startswith("http"):
url = self.prefix + url
headers = self._auth_headers()
if "content_type" in args["params"]:
headers["Content-Type"] = args["params"]["content_type"]
del args["params"]["content_type"]
if payload:
args["data"] = payload
else:
headers["Content-Type"] = "application/json"
if payload:
args["data"] = json.dumps(payload)
if self.language is not None:
headers["Accept-Language"] = self.language
logger.debug(
"Sending %s to %s with Headers: %s and Body: %r ",
method,
url,
headers,
args.get("data"),
)
try:
response = self._session.request(
method,
url,
headers=headers,
proxies=self.proxies,
timeout=self.requests_timeout,
**args,
)
response.raise_for_status()
results = response.json()
except requests.exceptions.HTTPError as http_error:
response = http_error.response
try:
msg = response.json()["error"]["message"]
except (ValueError, KeyError):
msg = "error"
try:
reason = response.json()["error"]["reason"]
except (ValueError, KeyError):
reason = None
logger.error(
"HTTP Error for %s to %s returned %s due to %s",
method,
url,
response.status_code,
msg,
)
raise SpotifyException(
response.status_code,
-1,
"%s:\n %s" % (response.url, msg),
reason=reason,
headers=response.headers,
)
except requests.exceptions.RetryError as retry_error:
response = retry_error.response
logger.error("Max Retries reached")
raise SpotifyException(
599,
-1,
"%s:\n %s" % (response.url, "Max Retries"),
headers=response.headers,
)
except ValueError:
results = None
logger.debug("RESULTS: %s", results)
return results
|
https://github.com/plamere/spotipy/issues/581
|
Max Retries reached
Traceback (most recent call last):
File "../project/venv/lib/python3.8/site-packages/requests/adapters.py", line 439, in send
resp = conn.urlopen(
File "../project/venv/lib/python3.8/site-packages/urllib3/connectionpool.py", line 817, in urlopen
return self.urlopen(
File "../project/venv/lib/python3.8/site-packages/urllib3/connectionpool.py", line 817, in urlopen
return self.urlopen(
File "../project/venv/lib/python3.8/site-packages/urllib3/connectionpool.py", line 817, in urlopen
return self.urlopen(
File "../project/venv/lib/python3.8/site-packages/urllib3/connectionpool.py", line 807, in urlopen
retries = retries.increment(method, url, response=response, _pool=self)
File "../project/venv/lib/python3.8/site-packages/urllib3/util/retry.py", line 439, in increment
raise MaxRetryError(_pool, url, error or ResponseError(cause))
urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='api.spotify.com', port=443): Max retries exceeded with url: /v1/playlists/37i9dQZF1DWXLeA8Omikj7/followers/contains?ids=spotify:user:bad_fakename (Caused by ResponseError('too many 500 error responses'))
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "../project/venv/lib/python3.8/site-packages/spotipy/client.py", line 239, in _internal_call
response = self._session.request(
File "../project/venv/lib/python3.8/site-packages/requests/sessions.py", line 530, in request
resp = self.send(prep, **send_kwargs)
File "../project/venv/lib/python3.8/site-packages/requests/sessions.py", line 643, in send
r = adapter.send(request, **kwargs)
File "../project/venv/lib/python3.8/site-packages/requests/adapters.py", line 507, in send
raise RetryError(e, request=request)
requests.exceptions.RetryError: HTTPSConnectionPool(host='api.spotify.com', port=443): Max retries exceeded with url: /v1/playlists/37i9dQZF1DWXLeA8Omikj7/followers/contains?ids=spotify:user:bad_fakename (Caused by ResponseError('too many 500 error responses'))
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "../project/src/issue_boilerplate.py", line 10, in <module>
print(spotify.playlist_is_following(playlist_id=playlist['id'], user_ids=['spotify:user:bad_fakename']))
File "../project/venv/lib/python3.8/site-packages/spotipy/client.py", line 1145, in playlist_is_following
return self._get(
File "../project/venv/lib/python3.8/site-packages/spotipy/client.py", line 286, in _get
return self._internal_call("GET", url, payload, kwargs)
File "../project/venv/lib/python3.8/site-packages/spotipy/client.py", line 273, in _internal_call
"%s:\n %s" % (response.url, "Max Retries"),
AttributeError: 'NoneType' object has no attribute 'url'
|
urllib3.exceptions.MaxRetryError
|
def get_rates(self):
rates = requests.get(self.api_url).json()["rates"]
rates[self.no_currency] = 1.0
return rates
|
def get_rates(self):
rates = requests.get(self.api_url).json()["rates"]
rates[self.default] = 1.0
return rates
|
https://github.com/spiral-project/ihatemoney/issues/601
|
INFO [alembic.runtime.migration] Running upgrade cb038f79982e -> 927ed575acbd, Add currencies
Traceback (most recent call last):
File "/home/zorun/tmp/venv3-ihatemoney/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1247, in _execute_context
self.dialect.do_execute(
File "/home/zorun/tmp/venv3-ihatemoney/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.StringDataRightTruncation: value too long for type character varying(3)
The above exception was the direct cause of the following exception:
sqlalchemy.exc.DataError: (psycopg2.errors.StringDataRightTruncation) value too long for type character varying(3)
[SQL: ALTER TABLE bill ADD COLUMN original_currency VARCHAR(3) DEFAULT 'No Currency']
|
sqlalchemy.exc.DataError
|
def get_currencies(self, with_no_currency=True):
rates = [
rate
for rate in self.get_rates()
if with_no_currency or rate != self.no_currency
]
rates.sort(key=lambda rate: "" if rate == self.no_currency else rate)
return rates
|
def get_currencies(self):
rates = [rate for rate in self.get_rates()]
rates.sort(key=lambda rate: "" if rate == self.default else rate)
return rates
|
https://github.com/spiral-project/ihatemoney/issues/601
|
INFO [alembic.runtime.migration] Running upgrade cb038f79982e -> 927ed575acbd, Add currencies
Traceback (most recent call last):
File "/home/zorun/tmp/venv3-ihatemoney/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1247, in _execute_context
self.dialect.do_execute(
File "/home/zorun/tmp/venv3-ihatemoney/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.StringDataRightTruncation: value too long for type character varying(3)
The above exception was the direct cause of the following exception:
sqlalchemy.exc.DataError: (psycopg2.errors.StringDataRightTruncation) value too long for type character varying(3)
[SQL: ALTER TABLE bill ADD COLUMN original_currency VARCHAR(3) DEFAULT 'No Currency']
|
sqlalchemy.exc.DataError
|
def exchange_currency(self, amount, source_currency, dest_currency):
if (
source_currency == dest_currency
or source_currency == self.no_currency
or dest_currency == self.no_currency
):
return amount
rates = self.get_rates()
source_rate = rates[source_currency]
dest_rate = rates[dest_currency]
new_amount = (float(amount) / source_rate) * dest_rate
# round to two digits because we are dealing with money
return round(new_amount, 2)
|
def exchange_currency(self, amount, source_currency, dest_currency):
if (
source_currency == dest_currency
or source_currency == self.default
or dest_currency == self.default
):
return amount
rates = self.get_rates()
source_rate = rates[source_currency]
dest_rate = rates[dest_currency]
new_amount = (float(amount) / source_rate) * dest_rate
# round to two digits because we are dealing with money
return round(new_amount, 2)
|
https://github.com/spiral-project/ihatemoney/issues/601
|
INFO [alembic.runtime.migration] Running upgrade cb038f79982e -> 927ed575acbd, Add currencies
Traceback (most recent call last):
File "/home/zorun/tmp/venv3-ihatemoney/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1247, in _execute_context
self.dialect.do_execute(
File "/home/zorun/tmp/venv3-ihatemoney/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.StringDataRightTruncation: value too long for type character varying(3)
The above exception was the direct cause of the following exception:
sqlalchemy.exc.DataError: (psycopg2.errors.StringDataRightTruncation) value too long for type character varying(3)
[SQL: ALTER TABLE bill ADD COLUMN original_currency VARCHAR(3) DEFAULT 'No Currency']
|
sqlalchemy.exc.DataError
|
def get_billform_for(project, set_default=True, **kwargs):
"""Return an instance of BillForm configured for a particular project.
:set_default: if set to True, on GET methods (usually when we want to
display the default form, it will call set_default on it.
"""
form = BillForm(**kwargs)
if form.original_currency.data == "None":
form.original_currency.data = project.default_currency
show_no_currency = form.original_currency.data == CurrencyConverter.no_currency
form.original_currency.choices = [
(currency_name, render_localized_currency(currency_name, detailed=False))
for currency_name in form.currency_helper.get_currencies(
with_no_currency=show_no_currency
)
]
active_members = [(m.id, m.name) for m in project.active_members]
form.payed_for.choices = form.payer.choices = active_members
form.payed_for.default = [m.id for m in project.active_members]
if set_default and request.method == "GET":
form.set_default()
return form
|
def get_billform_for(project, set_default=True, **kwargs):
"""Return an instance of BillForm configured for a particular project.
:set_default: if set to True, on GET methods (usually when we want to
display the default form, it will call set_default on it.
"""
form = BillForm(**kwargs)
if form.original_currency.data == "None":
form.original_currency.data = project.default_currency
if form.original_currency.data != CurrencyConverter.default:
choices = copy.copy(form.original_currency.choices)
choices.remove((CurrencyConverter.default, CurrencyConverter.default))
choices.sort(
key=lambda rates: "" if rates[0] == project.default_currency else rates[0]
)
form.original_currency.choices = choices
else:
form.original_currency.render_kw = {"default": True}
form.original_currency.data = CurrencyConverter.default
form.original_currency.label = Label(
"original_currency", "Currency (Default: %s)" % (project.default_currency)
)
active_members = [(m.id, m.name) for m in project.active_members]
form.payed_for.choices = form.payer.choices = active_members
form.payed_for.default = [m.id for m in project.active_members]
if set_default and request.method == "GET":
form.set_default()
return form
|
https://github.com/spiral-project/ihatemoney/issues/601
|
INFO [alembic.runtime.migration] Running upgrade cb038f79982e -> 927ed575acbd, Add currencies
Traceback (most recent call last):
File "/home/zorun/tmp/venv3-ihatemoney/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1247, in _execute_context
self.dialect.do_execute(
File "/home/zorun/tmp/venv3-ihatemoney/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.StringDataRightTruncation: value too long for type character varying(3)
The above exception was the direct cause of the following exception:
sqlalchemy.exc.DataError: (psycopg2.errors.StringDataRightTruncation) value too long for type character varying(3)
[SQL: ALTER TABLE bill ADD COLUMN original_currency VARCHAR(3) DEFAULT 'No Currency']
|
sqlalchemy.exc.DataError
|
def fake_form(self, bill, project):
bill.payer_id = self.payer
bill.amount = self.amount
bill.what = self.what
bill.external_link = ""
bill.date = self.date
bill.owers = [Person.query.get(ower, project) for ower in self.payed_for]
bill.original_currency = CurrencyConverter.no_currency
bill.converted_amount = self.currency_helper.exchange_currency(
bill.amount, bill.original_currency, project.default_currency
)
return bill
|
def fake_form(self, bill, project):
bill.payer_id = self.payer
bill.amount = self.amount
bill.what = self.what
bill.external_link = ""
bill.date = self.date
bill.owers = [Person.query.get(ower, project) for ower in self.payed_for]
bill.original_currency = CurrencyConverter.default
bill.converted_amount = self.currency_helper.exchange_currency(
bill.amount, bill.original_currency, project.default_currency
)
return bill
|
https://github.com/spiral-project/ihatemoney/issues/601
|
INFO [alembic.runtime.migration] Running upgrade cb038f79982e -> 927ed575acbd, Add currencies
Traceback (most recent call last):
File "/home/zorun/tmp/venv3-ihatemoney/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1247, in _execute_context
self.dialect.do_execute(
File "/home/zorun/tmp/venv3-ihatemoney/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.StringDataRightTruncation: value too long for type character varying(3)
The above exception was the direct cause of the following exception:
sqlalchemy.exc.DataError: (psycopg2.errors.StringDataRightTruncation) value too long for type character varying(3)
[SQL: ALTER TABLE bill ADD COLUMN original_currency VARCHAR(3) DEFAULT 'No Currency']
|
sqlalchemy.exc.DataError
|
def fill(self, bill, project):
self.payer.data = bill.payer_id
self.amount.data = bill.amount
self.what.data = bill.what
self.external_link.data = bill.external_link
self.original_currency.data = bill.original_currency
self.date.data = bill.date
self.payed_for.data = [int(ower.id) for ower in bill.owers]
self.original_currency.label = Label("original_currency", _("Currency"))
self.original_currency.description = _(
"Project default: %(currency)s",
currency=render_localized_currency(project.default_currency, detailed=False),
)
|
def fill(self, bill):
self.payer.data = bill.payer_id
self.amount.data = bill.amount
self.what.data = bill.what
self.external_link.data = bill.external_link
self.original_currency.data = bill.original_currency
self.date.data = bill.date
self.payed_for.data = [int(ower.id) for ower in bill.owers]
|
https://github.com/spiral-project/ihatemoney/issues/601
|
INFO [alembic.runtime.migration] Running upgrade cb038f79982e -> 927ed575acbd, Add currencies
Traceback (most recent call last):
File "/home/zorun/tmp/venv3-ihatemoney/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1247, in _execute_context
self.dialect.do_execute(
File "/home/zorun/tmp/venv3-ihatemoney/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.StringDataRightTruncation: value too long for type character varying(3)
The above exception was the direct cause of the following exception:
sqlalchemy.exc.DataError: (psycopg2.errors.StringDataRightTruncation) value too long for type character varying(3)
[SQL: ALTER TABLE bill ADD COLUMN original_currency VARCHAR(3) DEFAULT 'No Currency']
|
sqlalchemy.exc.DataError
|
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("bill", sa.Column("converted_amount", sa.Float(), nullable=True))
op.add_column(
"bill",
sa.Column(
"original_currency",
sa.String(length=3),
server_default=CurrencyConverter.no_currency,
nullable=True,
),
)
op.add_column(
"bill_version",
sa.Column("converted_amount", sa.Float(), autoincrement=False, nullable=True),
)
op.add_column(
"bill_version",
sa.Column(
"original_currency", sa.String(length=3), autoincrement=False, nullable=True
),
)
op.add_column(
"project",
sa.Column(
"default_currency",
sa.String(length=3),
server_default=CurrencyConverter.no_currency,
nullable=True,
),
)
op.add_column(
"project_version",
sa.Column(
"default_currency", sa.String(length=3), autoincrement=False, nullable=True
),
)
# ### end Alembic commands ###
op.execute(
"""
UPDATE bill
SET converted_amount = amount
WHERE converted_amount IS NULL
"""
)
|
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("bill", sa.Column("converted_amount", sa.Float(), nullable=True))
op.add_column(
"bill",
sa.Column(
"original_currency",
sa.String(length=3),
server_default=CurrencyConverter.default,
nullable=True,
),
)
op.add_column(
"bill_version",
sa.Column("converted_amount", sa.Float(), autoincrement=False, nullable=True),
)
op.add_column(
"bill_version",
sa.Column(
"original_currency", sa.String(length=3), autoincrement=False, nullable=True
),
)
op.add_column(
"project",
sa.Column(
"default_currency",
sa.String(length=3),
server_default=CurrencyConverter.default,
nullable=True,
),
)
op.add_column(
"project_version",
sa.Column(
"default_currency", sa.String(length=3), autoincrement=False, nullable=True
),
)
# ### end Alembic commands ###
op.execute(
"""
UPDATE bill
SET converted_amount = amount
WHERE converted_amount IS NULL
"""
)
|
https://github.com/spiral-project/ihatemoney/issues/601
|
INFO [alembic.runtime.migration] Running upgrade cb038f79982e -> 927ed575acbd, Add currencies
Traceback (most recent call last):
File "/home/zorun/tmp/venv3-ihatemoney/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1247, in _execute_context
self.dialect.do_execute(
File "/home/zorun/tmp/venv3-ihatemoney/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.StringDataRightTruncation: value too long for type character varying(3)
The above exception was the direct cause of the following exception:
sqlalchemy.exc.DataError: (psycopg2.errors.StringDataRightTruncation) value too long for type character varying(3)
[SQL: ALTER TABLE bill ADD COLUMN original_currency VARCHAR(3) DEFAULT 'No Currency']
|
sqlalchemy.exc.DataError
|
def create_app(
configuration=None, instance_path="/etc/ihatemoney", instance_relative_config=True
):
app = Flask(
__name__,
instance_path=instance_path,
instance_relative_config=instance_relative_config,
)
# If a configuration object is passed, use it. Otherwise try to find one.
load_configuration(app, configuration)
app.wsgi_app = PrefixedWSGI(app)
# Get client's real IP
# Note(0livd): When running in a non-proxy setup, is vulnerable to requests
# with a forged X-FORWARDED-FOR header
app.wsgi_app = ProxyFix(app.wsgi_app)
validate_configuration(app)
app.register_blueprint(web_interface)
app.register_blueprint(apiv1)
app.register_error_handler(404, page_not_found)
# Configure the a, root="main"pplication
setup_database(app)
# Setup Currency Cache
CurrencyConverter()
mail = Mail()
mail.init_app(app)
app.mail = mail
# Jinja filters
app.jinja_env.globals["static_include"] = static_include
app.jinja_env.globals["locale_from_iso"] = locale_from_iso
app.jinja_env.filters["minimal_round"] = minimal_round
# Translations
babel = Babel(app)
# Undocumented currencyformat filter from flask_babel is forwarding to Babel format_currency
# We overwrite it to remove the currency sign ¤ when there is no currency
def currencyformat_nc(number, currency, *args, **kwargs):
"""
Same as flask_babel.Babel.currencyformat, but without the "no currency ¤" sign
when there is no currency.
"""
return format_currency(
number,
currency if currency != CurrencyConverter.no_currency else "",
*args,
**kwargs,
).strip()
app.jinja_env.filters["currencyformat_nc"] = currencyformat_nc
@babel.localeselector
def get_locale():
# get the lang from the session if defined, fallback on the browser "accept
# languages" header.
lang = session.get(
"lang",
request.accept_languages.best_match(app.config["SUPPORTED_LANGUAGES"]),
)
setattr(g, "lang", lang)
return lang
return app
|
def create_app(
configuration=None, instance_path="/etc/ihatemoney", instance_relative_config=True
):
app = Flask(
__name__,
instance_path=instance_path,
instance_relative_config=instance_relative_config,
)
# If a configuration object is passed, use it. Otherwise try to find one.
load_configuration(app, configuration)
app.wsgi_app = PrefixedWSGI(app)
# Get client's real IP
# Note(0livd): When running in a non-proxy setup, is vulnerable to requests
# with a forged X-FORWARDED-FOR header
app.wsgi_app = ProxyFix(app.wsgi_app)
validate_configuration(app)
app.register_blueprint(web_interface)
app.register_blueprint(apiv1)
app.register_error_handler(404, page_not_found)
# Configure the a, root="main"pplication
setup_database(app)
# Setup Currency Cache
CurrencyConverter()
mail = Mail()
mail.init_app(app)
app.mail = mail
# Jinja filters
app.jinja_env.globals["static_include"] = static_include
app.jinja_env.globals["locale_from_iso"] = locale_from_iso
app.jinja_env.filters["minimal_round"] = minimal_round
# Translations
babel = Babel(app)
@babel.localeselector
def get_locale():
# get the lang from the session if defined, fallback on the browser "accept
# languages" header.
lang = session.get(
"lang",
request.accept_languages.best_match(app.config["SUPPORTED_LANGUAGES"]),
)
setattr(g, "lang", lang)
return lang
return app
|
https://github.com/spiral-project/ihatemoney/issues/601
|
INFO [alembic.runtime.migration] Running upgrade cb038f79982e -> 927ed575acbd, Add currencies
Traceback (most recent call last):
File "/home/zorun/tmp/venv3-ihatemoney/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1247, in _execute_context
self.dialect.do_execute(
File "/home/zorun/tmp/venv3-ihatemoney/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.StringDataRightTruncation: value too long for type character varying(3)
The above exception was the direct cause of the following exception:
sqlalchemy.exc.DataError: (psycopg2.errors.StringDataRightTruncation) value too long for type character varying(3)
[SQL: ALTER TABLE bill ADD COLUMN original_currency VARCHAR(3) DEFAULT 'No Currency']
|
sqlalchemy.exc.DataError
|
def edit_project():
edit_form = EditProjectForm()
import_form = UploadForm()
# Import form
if import_form.validate_on_submit():
try:
import_project(import_form.file.data.stream, g.project)
flash(_("Project successfully uploaded"))
return redirect(url_for("main.list_bills"))
except ValueError:
flash(_("Invalid JSON"), category="danger")
# Edit form
if edit_form.validate_on_submit():
project = edit_form.update(g.project)
# Update converted currency
if project.default_currency != CurrencyConverter.no_currency:
for bill in project.get_bills():
if bill.original_currency == CurrencyConverter.no_currency:
bill.original_currency = project.default_currency
bill.converted_amount = CurrencyConverter().exchange_currency(
bill.amount, bill.original_currency, project.default_currency
)
db.session.add(bill)
db.session.add(project)
db.session.commit()
return redirect(url_for("main.list_bills"))
else:
edit_form.name.data = g.project.name
if g.project.logging_preference != LoggingMode.DISABLED:
edit_form.project_history.data = True
if g.project.logging_preference == LoggingMode.RECORD_IP:
edit_form.ip_recording.data = True
edit_form.contact_email.data = g.project.contact_email
edit_form.default_currency.data = g.project.default_currency
return render_template(
"edit_project.html",
edit_form=edit_form,
import_form=import_form,
current_view="edit_project",
)
|
def edit_project():
edit_form = get_editprojectform_for(g.project)
import_form = UploadForm()
# Import form
if import_form.validate_on_submit():
try:
import_project(import_form.file.data.stream, g.project)
flash(_("Project successfully uploaded"))
return redirect(url_for("main.list_bills"))
except ValueError:
flash(_("Invalid JSON"), category="danger")
# Edit form
if edit_form.validate_on_submit():
project = edit_form.update(g.project)
# Update converted currency
if project.default_currency != CurrencyConverter.default:
for bill in project.get_bills():
if bill.original_currency == CurrencyConverter.default:
bill.original_currency = project.default_currency
bill.converted_amount = CurrencyConverter().exchange_currency(
bill.amount, bill.original_currency, project.default_currency
)
db.session.add(bill)
db.session.add(project)
db.session.commit()
return redirect(url_for("main.list_bills"))
else:
edit_form.name.data = g.project.name
if g.project.logging_preference != LoggingMode.DISABLED:
edit_form.project_history.data = True
if g.project.logging_preference == LoggingMode.RECORD_IP:
edit_form.ip_recording.data = True
edit_form.contact_email.data = g.project.contact_email
return render_template(
"edit_project.html",
edit_form=edit_form,
import_form=import_form,
current_view="edit_project",
)
|
https://github.com/spiral-project/ihatemoney/issues/601
|
INFO [alembic.runtime.migration] Running upgrade cb038f79982e -> 927ed575acbd, Add currencies
Traceback (most recent call last):
File "/home/zorun/tmp/venv3-ihatemoney/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1247, in _execute_context
self.dialect.do_execute(
File "/home/zorun/tmp/venv3-ihatemoney/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.StringDataRightTruncation: value too long for type character varying(3)
The above exception was the direct cause of the following exception:
sqlalchemy.exc.DataError: (psycopg2.errors.StringDataRightTruncation) value too long for type character varying(3)
[SQL: ALTER TABLE bill ADD COLUMN original_currency VARCHAR(3) DEFAULT 'No Currency']
|
sqlalchemy.exc.DataError
|
def edit_bill(bill_id):
# FIXME: Test this bill belongs to this project !
bill = Bill.query.get(g.project, bill_id)
if not bill:
raise NotFound()
form = get_billform_for(g.project, set_default=False)
if request.method == "POST" and form.validate():
form.save(bill, g.project)
db.session.commit()
flash(_("The bill has been modified"))
return redirect(url_for(".list_bills"))
if not form.errors:
form.fill(bill, g.project)
return render_template("add_bill.html", form=form, edit=True)
|
def edit_bill(bill_id):
# FIXME: Test this bill belongs to this project !
bill = Bill.query.get(g.project, bill_id)
if not bill:
raise NotFound()
form = get_billform_for(g.project, set_default=False)
if request.method == "POST" and form.validate():
form.save(bill, g.project)
db.session.commit()
flash(_("The bill has been modified"))
return redirect(url_for(".list_bills"))
if not form.errors:
form.fill(bill)
return render_template("add_bill.html", form=form, edit=True)
|
https://github.com/spiral-project/ihatemoney/issues/601
|
INFO [alembic.runtime.migration] Running upgrade cb038f79982e -> 927ed575acbd, Add currencies
Traceback (most recent call last):
File "/home/zorun/tmp/venv3-ihatemoney/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1247, in _execute_context
self.dialect.do_execute(
File "/home/zorun/tmp/venv3-ihatemoney/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 590, in do_execute
cursor.execute(statement, parameters)
psycopg2.errors.StringDataRightTruncation: value too long for type character varying(3)
The above exception was the direct cause of the following exception:
sqlalchemy.exc.DataError: (psycopg2.errors.StringDataRightTruncation) value too long for type character varying(3)
[SQL: ALTER TABLE bill ADD COLUMN original_currency VARCHAR(3) DEFAULT 'No Currency']
|
sqlalchemy.exc.DataError
|
def clear_screen() -> None: # pragma: no cover
command = "clear"
if platform.system() == "Windows":
command = "cls"
os.system(command)
|
def clear_screen() -> None: # pragma: no cover
subprocess.call(["clear"])
|
https://github.com/Yelp/detect-secrets/issues/333
|
C:\Files\Source\dataops-infra>detect-secrets audit .secrets.baseline
Traceback (most recent call last):
File "c:\python38\lib\runpy.py", line 193, in _run_module_as_main
return _run_code(code, main_globals, None,
File "c:\python38\lib\runpy.py", line 86, in _run_code
exec(code, run_globals)
File "C:\Python38\Scripts\detect-secrets.exe\__main__.py", line 7, in <module>
File "c:\python38\lib\site-packages\detect_secrets\main.py", line 73, in main
audit.audit_baseline(args.filename[0])
File "c:\python38\lib\site-packages\detect_secrets\core\audit.py", line 84, in audit_baseline
_clear_screen()
File "c:\python38\lib\site-packages\detect_secrets\core\audit.py", line 480, in _clear_screen
subprocess.call(['clear'])
File "c:\python38\lib\subprocess.py", line 340, in call
with Popen(*popenargs, **kwargs) as p:
File "c:\python38\lib\subprocess.py", line 854, in __init__
self._execute_child(args, executable, preexec_fn, close_fds,
File "c:\python38\lib\subprocess.py", line 1307, in _execute_child
hp, ht, pid, tid = _winapi.CreateProcess(executable, args,
FileNotFoundError: [WinError 2] The system cannot find the file specified
|
FileNotFoundError
|
def get_parser_to_add_opt_out_options_to(parser):
"""
The pre-commit hook gets e.g. `--no-jwt-scan` type options
as well as the subparser for `detect-secrets scan`.
:rtype: argparse.ArgumentParser
:returns: argparse.ArgumentParser to pass into PluginOptions
"""
for action in parser._actions: # pragma: no cover (Always returns)
if isinstance(action, argparse._SubParsersAction):
for subparser in action.choices.values():
if subparser.prog.endswith("scan"):
return subparser
# Assume it is the 'detect-secrets-hook' console script
# Relying on parser.prog is too brittle
return parser
|
def get_parser_to_add_opt_out_options_to(parser):
"""
The pre-commit hook gets e.g. `--no-jwt-scan` type options
as well as the subparser for `detect-secrets scan`.
:rtype: argparse.ArgumentParser
:returns: argparse.ArgumentParser to pass into PluginOptions
"""
if parser.prog == "detect-secrets-hook":
return parser
for action in parser._actions: # pragma: no cover (Always returns)
if isinstance(action, argparse._SubParsersAction):
for subparser in action.choices.values():
if subparser.prog.endswith("scan"):
return subparser
|
https://github.com/Yelp/detect-secrets/issues/320
|
Detect secrets...........................................................Failed
- hook id: detect-secrets
- exit code: 1
Traceback (most recent call last):
File "c:\users\john.neville\appdata\local\programs\python\python38-32\lib\runpy.py", line 193, in _run_module_as_main
return _run_code(code, main_globals, None,
File "c:\users\john.neville\appdata\local\programs\python\python38-32\lib\runpy.py", line 86, in _run_code
exec(code, run_globals)
File "C:\Users\john.neville\.cache\pre-commit\repog21r_7q6\py_env-python3.8\Scripts\detect-secrets-hook.EXE\__main__.py", line 7, in <module>
File "c:\users\john.neville\.cache\pre-commit\repog21r_7q6\py_env-python3.8\lib\site-packages\detect_secrets\pre_commit_hook.py", line 27, in main
args = parse_args(argv)
File "c:\users\john.neville\.cache\pre-commit\repog21r_7q6\py_env-python3.8\lib\site-packages\detect_secrets\pre_commit_hook.py", line 21, in parse_args
return ParserBuilder()\
File "c:\users\john.neville\.cache\pre-commit\repog21r_7q6\py_env-python3.8\lib\site-packages\detect_secrets\core\usage.py", line 170, in parse_args
PluginOptions(
File "c:\users\john.neville\.cache\pre-commit\repog21r_7q6\py_env-python3.8\lib\site-packages\detect_secrets\core\usage.py", line 409, in __init__
self.parser = parser.add_argument_group(
AttributeError: 'NoneType' object has no attribute 'add_argument_group'
|
AttributeError
|
def _set_debug_level(self, debug_level):
"""
:type debug_level: int, between 0-2
:param debug_level: configure verbosity of log
"""
mapping = {
0: logging.ERROR,
1: logging.INFO,
2: logging.DEBUG,
}
self.setLevel(
mapping[min(debug_level, 2)],
)
|
def _set_debug_level(self, debug_level):
"""
:type debug_level: int, between 0-2
:param debug_level: configure verbosity of log
"""
mapping = {
0: logging.ERROR,
1: logging.INFO,
2: logging.DEBUG,
}
self.setLevel(mapping[debug_level])
|
https://github.com/Yelp/detect-secrets/issues/61
|
Traceback (most recent call last):
File "/hey/three_six/bin/detect-secrets", line 11, in <module>
sys.exit(main())
File "/hey/three_six/lib/python3.6/site-packages/detect_secrets/main.py", line 27, in main
log.set_debug_level(args.verbose)
File "/hey/three_six/lib/python3.6/site-packages/detect_secrets/core/log.py", line 46, in _set_debug_level
self.setLevel(mapping[debug_level])
KeyError: 3
|
KeyError
|
def __init__(
self,
typ,
filename,
lineno,
secret,
is_secret=None,
):
"""
:type typ: str
:param typ: human-readable secret type, defined by the plugin
that generated this PotentialSecret.
e.g. "High Entropy String"
:type filename: str
:param filename: name of file that this secret was found
:type lineno: int
:param lineno: location of secret, within filename.
Merely used as a reference for easy triage.
:type secret: str
:param secret: the actual secret identified
:type is_secret: bool|None
:param is_secret: whether or not the secret is a true- or false- positive
"""
self.type = typ
self.filename = filename
self.lineno = lineno
self.secret_hash = self.hash_secret(secret)
self.is_secret = is_secret
# If two PotentialSecrets have the same values for these fields,
# they are considered equal. Note that line numbers aren't included
# in this, because line numbers are subject to change.
self.fields_to_compare = ["filename", "secret_hash", "type"]
|
def __init__(self, typ, filename, lineno, secret):
"""
:type typ: str
:param typ: human-readable secret type, defined by the plugin
that generated this PotentialSecret.
Eg. "High Entropy String"
:type filename: str
:param filename: name of file that this secret was found
:type lineno: int
:param lineno: location of secret, within filename.
Merely used as a reference for easy triage.
:type secret: str
:param secret: the actual secret identified
"""
self.type = typ
self.filename = filename
self.lineno = lineno
self.secret_hash = self.hash_secret(secret)
# If two PotentialSecrets have the same values for these fields,
# they are considered equal. Note that line numbers aren't included
# in this, because line numbers are subject to change.
self.fields_to_compare = ["filename", "secret_hash", "type"]
|
https://github.com/Yelp/detect-secrets/issues/61
|
Traceback (most recent call last):
File "/hey/three_six/bin/detect-secrets", line 11, in <module>
sys.exit(main())
File "/hey/three_six/lib/python3.6/site-packages/detect_secrets/main.py", line 27, in main
log.set_debug_level(args.verbose)
File "/hey/three_six/lib/python3.6/site-packages/detect_secrets/core/log.py", line 46, in _set_debug_level
self.setLevel(mapping[debug_level])
KeyError: 3
|
KeyError
|
def json(self):
"""Custom JSON encoder"""
attributes = {
"type": self.type,
"filename": self.filename,
"line_number": self.lineno,
"hashed_secret": self.secret_hash,
}
if self.is_secret is not None:
attributes["is_secret"] = self.is_secret
return attributes
|
def json(self):
"""Custom JSON encoder"""
attributes = {
"type": self.type,
"filename": self.filename,
"line_number": self.lineno,
"hashed_secret": self.secret_hash,
}
return attributes
|
https://github.com/Yelp/detect-secrets/issues/61
|
Traceback (most recent call last):
File "/hey/three_six/bin/detect-secrets", line 11, in <module>
sys.exit(main())
File "/hey/three_six/lib/python3.6/site-packages/detect_secrets/main.py", line 27, in main
log.set_debug_level(args.verbose)
File "/hey/three_six/lib/python3.6/site-packages/detect_secrets/core/log.py", line 46, in _set_debug_level
self.setLevel(mapping[debug_level])
KeyError: 3
|
KeyError
|
def __str__(self): # pragma: no cover
return ("Secret Type: %s\nLocation: %s:%d\n") % (
self.type,
self.filename,
self.lineno,
)
|
def __str__(self): # pragma: no cover
return (
"Secret Type: %s\nLocation: %s:%d\n"
# "Hash: %s\n"
) % (
self.type,
self.filename,
self.lineno,
# self.secret_hash
)
|
https://github.com/Yelp/detect-secrets/issues/61
|
Traceback (most recent call last):
File "/hey/three_six/bin/detect-secrets", line 11, in <module>
sys.exit(main())
File "/hey/three_six/lib/python3.6/site-packages/detect_secrets/main.py", line 27, in main
log.set_debug_level(args.verbose)
File "/hey/three_six/lib/python3.6/site-packages/detect_secrets/core/log.py", line 46, in _set_debug_level
self.setLevel(mapping[debug_level])
KeyError: 3
|
KeyError
|
def _load_baseline_from_dict(cls, data):
"""Initializes a SecretsCollection object from dictionary.
:type data: dict
:param data: properly formatted dictionary to load SecretsCollection from.
:rtype: SecretsCollection
:raises: IOError
"""
result = SecretsCollection()
if not all(
key in data
for key in (
"exclude_regex",
"plugins_used",
"results",
"version",
)
):
raise IOError
result.exclude_regex = data["exclude_regex"]
plugins = []
for plugin in data["plugins_used"]:
plugin_classname = plugin.pop("name")
plugins.append(initialize.from_plugin_classname(plugin_classname, **plugin))
result.plugins = tuple(plugins)
for filename in data["results"]:
result.data[filename] = {}
for item in data["results"][filename]:
secret = PotentialSecret(
item["type"],
filename,
item["line_number"],
secret="will be replaced",
is_secret=item.get("is_secret"),
)
secret.secret_hash = item["hashed_secret"]
result.data[filename][secret] = secret
result.version = data["version"]
return result
|
def _load_baseline_from_dict(cls, data):
"""Initializes a SecretsCollection object from dictionary.
:type data: dict
:param data: properly formatted dictionary to load SecretsCollection from.
:rtype: SecretsCollection
:raises: IOError
"""
result = SecretsCollection()
if not all(
key in data
for key in (
"exclude_regex",
"plugins_used",
"results",
"version",
)
):
raise IOError
result.exclude_regex = data["exclude_regex"]
plugins = []
for plugin in data["plugins_used"]:
plugin_classname = plugin.pop("name")
plugins.append(initialize.from_plugin_classname(plugin_classname, **plugin))
result.plugins = tuple(plugins)
for filename in data["results"]:
result.data[filename] = {}
for item in data["results"][filename]:
secret = PotentialSecret(
item["type"],
filename,
item["line_number"],
"will be replaced",
)
secret.secret_hash = item["hashed_secret"]
result.data[filename][secret] = secret
result.version = data["version"]
return result
|
https://github.com/Yelp/detect-secrets/issues/61
|
Traceback (most recent call last):
File "/hey/three_six/bin/detect-secrets", line 11, in <module>
sys.exit(main())
File "/hey/three_six/lib/python3.6/site-packages/detect_secrets/main.py", line 27, in main
log.set_debug_level(args.verbose)
File "/hey/three_six/lib/python3.6/site-packages/detect_secrets/core/log.py", line 46, in _set_debug_level
self.setLevel(mapping[debug_level])
KeyError: 3
|
KeyError
|
def scan_diff(
self,
diff,
baseline_filename="",
last_commit_hash="",
repo_name="",
):
"""For optimization purposes, our scanning strategy focuses on looking
at incremental differences, rather than re-scanning the codebase every time.
This function supports this, and adds information to self.data.
:type diff: str
:param diff: diff string.
e.g. The output of `git diff <fileA> <fileB>`
:type baseline_filename: str
:param baseline_filename: if there are any baseline secrets, then the baseline
file will have hashes in them. By specifying it, we
can skip this clear exception.
:type last_commit_hash: str
:param last_commit_hash: used for logging only -- the last commit hash we saved
:type repo_name: str
:param repo_name: used for logging only -- the name of the repo
"""
try:
patch_set = PatchSet.from_string(diff)
except UnidiffParseError: # pragma: no cover
alert = {
"alert": "UnidiffParseError",
"hash": last_commit_hash,
"repo_name": repo_name,
}
log.error(alert)
raise
if self.exclude_regex:
regex = re.compile(self.exclude_regex, re.IGNORECASE)
for patch_file in patch_set:
filename = patch_file.path
# If the file matches the exclude_regex, we skip it
if self.exclude_regex and regex.search(filename):
continue
if filename == baseline_filename:
continue
for results, plugin in self._results_accumulator(filename):
results.update(
self._extract_secrets_from_patch(
patch_file,
plugin,
filename,
),
)
|
def scan_diff(
self,
diff,
baseline_filename="",
last_commit_hash="",
repo_name="",
):
"""For optimization purposes, our scanning strategy focuses on looking
at incremental differences, rather than re-scanning the codebase every time.
This function supports this, and adds information to self.data.
:type diff: str
:param diff: diff string.
Eg. The output of `git diff <fileA> <fileB>`
:type baseline_filename: str
:param baseline_filename: if there are any baseline secrets, then the baseline
file will have hashes in them. By specifying it, we
can skip this clear exception.
:type last_commit_hash: str
:param last_commit_hash: used for logging only -- the last commit hash we saved
:type repo_name: str
:param repo_name: used for logging only -- the name of the repo
"""
try:
patch_set = PatchSet.from_string(diff)
except UnidiffParseError: # pragma: no cover
alert = {
"alert": "UnidiffParseError",
"hash": last_commit_hash,
"repo_name": repo_name,
}
log.error(alert)
raise
if self.exclude_regex:
regex = re.compile(self.exclude_regex, re.IGNORECASE)
for patch_file in patch_set:
filename = patch_file.path
# If the file matches the exclude_regex, we skip it
if self.exclude_regex and regex.search(filename):
continue
if filename == baseline_filename:
continue
for results, plugin in self._results_accumulator(filename):
results.update(
self._extract_secrets_from_patch(
patch_file,
plugin,
filename,
),
)
|
https://github.com/Yelp/detect-secrets/issues/61
|
Traceback (most recent call last):
File "/hey/three_six/bin/detect-secrets", line 11, in <module>
sys.exit(main())
File "/hey/three_six/lib/python3.6/site-packages/detect_secrets/main.py", line 27, in main
log.set_debug_level(args.verbose)
File "/hey/three_six/lib/python3.6/site-packages/detect_secrets/core/log.py", line 46, in _set_debug_level
self.setLevel(mapping[debug_level])
KeyError: 3
|
KeyError
|
def _get_value_and_line_offset(self, key, values):
"""Returns the index of the location of key, value pair in lines.
:type key: str
:param key: key, in config file.
:type values: str
:param values: values for key, in config file. This is plural,
because you can have multiple values per key. e.g.
>>> key =
... value1
... value2
:type lines: list
:param lines: a collection of lines-so-far in file
:rtype: list(tuple)
"""
values_list = self._construct_values_list(values)
if not values_list:
return []
current_value_list_index = 0
output = []
lines_modified = False
first_line_regex = re.compile(
r"^\s*{}[ :=]+{}".format(
re.escape(key),
re.escape(values_list[current_value_list_index]),
)
)
comment_regex = re.compile(r"\s*[;#]")
for index, line in enumerate(self.lines):
if current_value_list_index == 0:
if first_line_regex.match(line):
output.append(
(
values_list[current_value_list_index],
self.line_offset + index + 1,
)
)
current_value_list_index += 1
continue
# Check ignored lines before checking values, because
# you can write comments *after* the value.
# Ignore blank lines
if not line.strip():
continue
# Ignore comments
if comment_regex.match(line):
continue
if current_value_list_index == len(values_list):
if index == 0:
index = 1 # don't want to count the same line again
self.line_offset += index
self.lines = self.lines[index:]
lines_modified = True
break
else:
output.append(
(
values_list[current_value_list_index],
self.line_offset + index + 1,
)
)
current_value_list_index += 1
if not lines_modified:
# No more lines left, if loop was not explicitly left.
self.lines = []
return output
|
def _get_value_and_line_offset(self, key, values):
"""Returns the index of the location of key, value pair in lines.
:type key: str
:param key: key, in config file.
:type values: str
:param values: values for key, in config file. This is plural,
because you can have multiple values per key. Eg.
>>> key =
... value1
... value2
:type lines: list
:param lines: a collection of lines-so-far in file
:rtype: list(tuple)
"""
values_list = self._construct_values_list(values)
if not values_list:
return []
current_value_list_index = 0
output = []
lines_modified = False
first_line_regex = re.compile(
r"^\s*{}[ :=]+{}".format(
re.escape(key),
re.escape(values_list[current_value_list_index]),
)
)
comment_regex = re.compile(r"\s*[;#]")
for index, line in enumerate(self.lines):
if current_value_list_index == 0:
if first_line_regex.match(line):
output.append(
(
values_list[current_value_list_index],
self.line_offset + index + 1,
)
)
current_value_list_index += 1
continue
# Check ignored lines before checking values, because
# you can write comments *after* the value.
# Ignore blank lines
if not line.strip():
continue
# Ignore comments
if comment_regex.match(line):
continue
if current_value_list_index == len(values_list):
if index == 0:
index = 1 # don't want to count the same line again
self.line_offset += index
self.lines = self.lines[index:]
lines_modified = True
break
else:
output.append(
(
values_list[current_value_list_index],
self.line_offset + index + 1,
)
)
current_value_list_index += 1
if not lines_modified:
# No more lines left, if loop was not explicitly left.
self.lines = []
return output
|
https://github.com/Yelp/detect-secrets/issues/61
|
Traceback (most recent call last):
File "/hey/three_six/bin/detect-secrets", line 11, in <module>
sys.exit(main())
File "/hey/three_six/lib/python3.6/site-packages/detect_secrets/main.py", line 27, in main
log.set_debug_level(args.verbose)
File "/hey/three_six/lib/python3.6/site-packages/detect_secrets/core/log.py", line 46, in _set_debug_level
self.setLevel(mapping[debug_level])
KeyError: 3
|
KeyError
|
def __gt__(self, other):
return -self <= -(other + 1)
|
def __gt__(self, other):
return (other + 1) <= self
|
https://github.com/OpenMined/PySyft/issues/4705
|
AttributeError Traceback (most recent call last)
<ipython-input-10-c55d3fcd7179> in <module>
2 t2 = torch.tensor([1.2, 1]).fix_precision()#.share(*workers, crypto_provider=crypto_provider, protocol="fss", requires_grad=True)
3
----> 4 t1 > t2
~/code/PySyft/syft/generic/frameworks/hook/hook.py in overloaded_native_method(self, *args, **kwargs)
218 # Send the new command to the appropriate class and get the response
219 method = getattr(new_self, method_name)
--> 220 response = method(*new_args, **new_kwargs)
221
222 # For inplace methods, just directly return self
~/code/PySyft/syft/generic/frameworks/overload.py in _hook_method_args(self, *args, **kwargs)
25
26 # Send it to the appropriate class and get the response
---> 27 response = attr(self, new_self, *new_args, **new_kwargs)
28
29 # Put back SyftTensor on the tensors found in the response
~/code/PySyft/syft/frameworks/torch/tensors/interpreters/precision.py in __gt__(self, _self, other)
821 def __gt__(self, _self, other):
822 print("FPT gt", _self, other)
--> 823 result = _self.__gt__(other)
824 return result.type(self.torch_dtype) * self.base ** self.precision_fractional
825
~/code/PySyft/syft/frameworks/torch/mpc/__init__.py in method(self, *args, **kwargs)
33 def method(self, *args, **kwargs):
34 f = protocol_store[(name, self.protocol)]
---> 35 return f(self, *args, **kwargs)
36
37 return method
~/code/PySyft/syft/frameworks/torch/tensors/interpreters/additive_shared.py in __gt__(self, other)
938 @crypto_protocol("fss")
939 def __gt__(self, other):
--> 940 return (other + 1) <= self
941
942 def ge(self, other):
~/code/PySyft/syft/generic/frameworks/hook/hook.py in overloaded_native_method(self, *args, **kwargs)
156 # arguments
157 if not isinstance(args[0].child, PointerTensor):
--> 158 self = type(args[0].child)().on(self, wrap=True)
159 args = [args[0]]
160 return overloaded_native_method(self, *args, **kwargs)
AttributeError: 'dict' object has no attribute 'on'
|
AttributeError
|
def __str__(self):
"""Return a human readable version of this message"""
return f"({type(self).__name__} {self.object_id, self.user, self.reason})"
|
def __str__(self):
"""Return a human readable version of this message"""
return f"({type(self).__name__} {(self.obj_id, self.user, self.reason)})"
|
https://github.com/OpenMined/PySyft/issues/3496
|
Traceback (most recent call last):
File "/home/juanmixp/Documentos/PySyft/fii.py", line 12, in <module>
y = (x @ x).get().float_prec()
File "/home/juanmixp/Documentos/PySyft/syft/generic/frameworks/hook/hook.py", line 466, in overloaded_native_method
response = method(*new_args, **new_kwargs)
File "/home/juanmixp/Documentos/PySyft/syft/frameworks/torch/tensors/interpreters/precision.py", line 432, in matmul
response = getattr(new_self, "matmul")(*new_args, **new_kwargs)
File "/home/juanmixp/Documentos/PySyft/syft/frameworks/torch/tensors/interpreters/additive_shared.py", line 646, in matmul
return self._private_mul(other, "matmul")
File "/home/juanmixp/Documentos/PySyft/syft/frameworks/torch/tensors/interpreters/additive_shared.py", line 536, in _private_mul
shares = spdz.spdz_mul(cmd, self, other, self.crypto_provider, self.field, self.dtype)
File "/home/juanmixp/Documentos/PySyft/syft/frameworks/torch/mpc/spdz.py", line 40, in spdz_mul
delta = delta.reconstruct()
File "/home/juanmixp/Documentos/PySyft/syft/frameworks/torch/tensors/interpreters/additive_shared.py", line 321, in reconstruct
pointer = ptr_to_sh.remote_get()
File "/home/juanmixp/Documentos/PySyft/syft/generic/pointers/pointer_tensor.py", line 304, in remote_get
self.owner.send_command(message=("mid_get", self, (), {}), recipient=self.location)
File "/home/juanmixp/Documentos/PySyft/syft/workers/base.py", line 637, in send_command
ret_val = self.send_msg(message, location=recipient)
File "/home/juanmixp/Documentos/PySyft/syft/workers/base.py", line 290, in send_msg
bin_response = self._send_msg(bin_message, location)
File "/home/juanmixp/Documentos/PySyft/syft/workers/virtual.py", line 15, in _send_msg
return location._recv_msg(message)
File "/home/juanmixp/Documentos/PySyft/syft/workers/virtual.py", line 19, in _recv_msg
return self.recv_msg(message)
File "/home/juanmixp/Documentos/PySyft/syft/workers/base.py", line 326, in recv_msg
response = self._message_router[type(msg)](msg)
File "/home/juanmixp/Documentos/PySyft/syft/workers/base.py", line 468, in execute_tensor_command
return self.execute_computation_action(cmd.action)
File "/home/juanmixp/Documentos/PySyft/syft/workers/base.py", line 510, in execute_computation_action
response = getattr(_self, op_name)(*args_, **kwargs_)
File "/home/juanmixp/Documentos/PySyft/syft/frameworks/torch/tensors/interpreters/native.py", line 604, in mid_get
self.child.mid_get()
File "/home/juanmixp/Documentos/PySyft/syft/generic/object.py", line 112, in mid_get
tensor = self.get()
File "/home/juanmixp/Documentos/PySyft/syft/frameworks/torch/tensors/interpreters/additive_shared.py", line 240, in get
shares.append(share.get())
File "/home/juanmixp/Documentos/PySyft/syft/generic/pointers/pointer_tensor.py", line 334, in get
tensor = ObjectPointer.get(self, user=user, reason=reason, deregister_ptr=deregister_ptr)
File "/home/juanmixp/Documentos/PySyft/syft/generic/pointers/object_pointer.py", line 269, in get
obj = self.owner.request_obj(self.id_at_location, self.location, user, reason)
File "/home/juanmixp/Documentos/PySyft/syft/workers/base.py", line 750, in request_obj
obj = self.send_msg(ObjectRequestMessage(obj_id, user, reason), location)
File "/home/juanmixp/Documentos/PySyft/syft/workers/base.py", line 284, in send_msg
print(f"worker {self} sending {message} to {location}")
File "/home/juanmixp/Documentos/PySyft/syft/messaging/message.py", line 303, in __str__
return f"({type(self).__name__} {(self.obj_id, self.user, self.reason)})"
AttributeError: 'ObjectRequestMessage' object has no attribute 'obj_id'
|
AttributeError
|
def __init__(
self,
hook: "FrameworkHook",
id: Union[int, str] = 0,
data: Union[List, tuple] = None,
is_client_worker: bool = False,
log_msgs: bool = False,
verbose: bool = None,
auto_add: bool = True,
message_pending_time: Union[int, float] = 0,
):
"""Initializes a BaseWorker."""
super().__init__()
self.hook = hook
self.object_store = ObjectStore(owner=self)
self.id = id
self.is_client_worker = is_client_worker
self.log_msgs = log_msgs
if verbose is None:
self.verbose = hook.verbose if hasattr(hook, "verbose") else False
else:
self.verbose = verbose
if isinstance(hook, sy.TorchHook) and hasattr(hook, "_syft_workers"):
hook._syft_workers.add(self)
self.auto_add = auto_add
self._message_pending_time = message_pending_time
self.msg_history = list()
# For performance, we cache all possible message types
self._message_router = {
TensorCommandMessage: self.execute_tensor_command,
PlanCommandMessage: self.execute_plan_command,
WorkerCommandMessage: self.execute_worker_command,
ObjectMessage: self.handle_object_msg,
ObjectRequestMessage: self.respond_to_obj_req,
ForceObjectDeleteMessage: self.handle_delete_object_msg, # FIXME: there is no ObjectDeleteMessage
ForceObjectDeleteMessage: self.handle_force_delete_object_msg,
IsNoneMessage: self.is_object_none,
GetShapeMessage: self.handle_get_shape_message,
SearchMessage: self.respond_to_search,
}
self._plan_command_router = {
codes.PLAN_CMDS.FETCH_PLAN: self._fetch_plan_remote,
codes.PLAN_CMDS.FETCH_PROTOCOL: self._fetch_protocol_remote,
}
self.load_data(data)
# Declare workers as appropriate
self._known_workers = {}
if auto_add:
if hook is not None and hook.local_worker is not None:
known_workers = self.hook.local_worker._known_workers
if self.id in known_workers:
if isinstance(known_workers[self.id], type(self)):
# If a worker with this id already exists and it has the
# same type as the one being created, we copy all the attributes
# of the existing worker to this one.
self.__dict__.update(known_workers[self.id].__dict__)
else:
raise RuntimeError(
"Worker initialized with the same id and different types."
)
else:
hook.local_worker.add_worker(self)
for worker_id, worker in hook.local_worker._known_workers.items():
if worker_id not in self._known_workers:
self.add_worker(worker)
if self.id not in worker._known_workers:
worker.add_worker(self)
else:
# Make the local worker aware of itself
# self is the to-be-created local worker
self.add_worker(self)
if hook is None:
self.framework = None
else:
# TODO[jvmancuso]: avoid branching here if possible, maybe by changing code in
# execute_tensor_command or command_guard to not expect an attribute named "torch"
# (#2530)
self.framework = hook.framework
if hasattr(hook, "torch"):
self.torch = self.framework
self.remote = Remote(self, "torch")
elif hasattr(hook, "tensorflow"):
self.tensorflow = self.framework
self.remote = Remote(self, "tensorflow")
# storage object for crypto primitives
self.crypto_store = PrimitiveStorage(owner=self)
|
def __init__(
self,
hook: "FrameworkHook",
id: Union[int, str] = 0,
data: Union[List, tuple] = None,
is_client_worker: bool = False,
log_msgs: bool = False,
verbose: bool = False,
auto_add: bool = True,
message_pending_time: Union[int, float] = 0,
):
"""Initializes a BaseWorker."""
super().__init__()
self.hook = hook
self.object_store = ObjectStore(owner=self)
self.id = id
self.is_client_worker = is_client_worker
self.log_msgs = log_msgs
self.verbose = verbose
self.auto_add = auto_add
self._message_pending_time = message_pending_time
self.msg_history = list()
# For performance, we cache all possible message types
self._message_router = {
TensorCommandMessage: self.execute_tensor_command,
PlanCommandMessage: self.execute_plan_command,
WorkerCommandMessage: self.execute_worker_command,
ObjectMessage: self.handle_object_msg,
ObjectRequestMessage: self.respond_to_obj_req,
ForceObjectDeleteMessage: self.handle_delete_object_msg, # FIXME: there is no ObjectDeleteMessage
ForceObjectDeleteMessage: self.handle_force_delete_object_msg,
IsNoneMessage: self.is_object_none,
GetShapeMessage: self.handle_get_shape_message,
SearchMessage: self.respond_to_search,
}
self._plan_command_router = {
codes.PLAN_CMDS.FETCH_PLAN: self._fetch_plan_remote,
codes.PLAN_CMDS.FETCH_PROTOCOL: self._fetch_protocol_remote,
}
self.load_data(data)
# Declare workers as appropriate
self._known_workers = {}
if auto_add:
if hook is not None and hook.local_worker is not None:
known_workers = self.hook.local_worker._known_workers
if self.id in known_workers:
if isinstance(known_workers[self.id], type(self)):
# If a worker with this id already exists and it has the
# same type as the one being created, we copy all the attributes
# of the existing worker to this one.
self.__dict__.update(known_workers[self.id].__dict__)
else:
raise RuntimeError(
"Worker initialized with the same id and different types."
)
else:
hook.local_worker.add_worker(self)
for worker_id, worker in hook.local_worker._known_workers.items():
if worker_id not in self._known_workers:
self.add_worker(worker)
if self.id not in worker._known_workers:
worker.add_worker(self)
else:
# Make the local worker aware of itself
# self is the to-be-created local worker
self.add_worker(self)
if hook is None:
self.framework = None
else:
# TODO[jvmancuso]: avoid branching here if possible, maybe by changing code in
# execute_tensor_command or command_guard to not expect an attribute named "torch"
# (#2530)
self.framework = hook.framework
if hasattr(hook, "torch"):
self.torch = self.framework
self.remote = Remote(self, "torch")
elif hasattr(hook, "tensorflow"):
self.tensorflow = self.framework
self.remote = Remote(self, "tensorflow")
# storage object for crypto primitives
self.crypto_store = PrimitiveStorage(owner=self)
|
https://github.com/OpenMined/PySyft/issues/3496
|
Traceback (most recent call last):
File "/home/juanmixp/Documentos/PySyft/fii.py", line 12, in <module>
y = (x @ x).get().float_prec()
File "/home/juanmixp/Documentos/PySyft/syft/generic/frameworks/hook/hook.py", line 466, in overloaded_native_method
response = method(*new_args, **new_kwargs)
File "/home/juanmixp/Documentos/PySyft/syft/frameworks/torch/tensors/interpreters/precision.py", line 432, in matmul
response = getattr(new_self, "matmul")(*new_args, **new_kwargs)
File "/home/juanmixp/Documentos/PySyft/syft/frameworks/torch/tensors/interpreters/additive_shared.py", line 646, in matmul
return self._private_mul(other, "matmul")
File "/home/juanmixp/Documentos/PySyft/syft/frameworks/torch/tensors/interpreters/additive_shared.py", line 536, in _private_mul
shares = spdz.spdz_mul(cmd, self, other, self.crypto_provider, self.field, self.dtype)
File "/home/juanmixp/Documentos/PySyft/syft/frameworks/torch/mpc/spdz.py", line 40, in spdz_mul
delta = delta.reconstruct()
File "/home/juanmixp/Documentos/PySyft/syft/frameworks/torch/tensors/interpreters/additive_shared.py", line 321, in reconstruct
pointer = ptr_to_sh.remote_get()
File "/home/juanmixp/Documentos/PySyft/syft/generic/pointers/pointer_tensor.py", line 304, in remote_get
self.owner.send_command(message=("mid_get", self, (), {}), recipient=self.location)
File "/home/juanmixp/Documentos/PySyft/syft/workers/base.py", line 637, in send_command
ret_val = self.send_msg(message, location=recipient)
File "/home/juanmixp/Documentos/PySyft/syft/workers/base.py", line 290, in send_msg
bin_response = self._send_msg(bin_message, location)
File "/home/juanmixp/Documentos/PySyft/syft/workers/virtual.py", line 15, in _send_msg
return location._recv_msg(message)
File "/home/juanmixp/Documentos/PySyft/syft/workers/virtual.py", line 19, in _recv_msg
return self.recv_msg(message)
File "/home/juanmixp/Documentos/PySyft/syft/workers/base.py", line 326, in recv_msg
response = self._message_router[type(msg)](msg)
File "/home/juanmixp/Documentos/PySyft/syft/workers/base.py", line 468, in execute_tensor_command
return self.execute_computation_action(cmd.action)
File "/home/juanmixp/Documentos/PySyft/syft/workers/base.py", line 510, in execute_computation_action
response = getattr(_self, op_name)(*args_, **kwargs_)
File "/home/juanmixp/Documentos/PySyft/syft/frameworks/torch/tensors/interpreters/native.py", line 604, in mid_get
self.child.mid_get()
File "/home/juanmixp/Documentos/PySyft/syft/generic/object.py", line 112, in mid_get
tensor = self.get()
File "/home/juanmixp/Documentos/PySyft/syft/frameworks/torch/tensors/interpreters/additive_shared.py", line 240, in get
shares.append(share.get())
File "/home/juanmixp/Documentos/PySyft/syft/generic/pointers/pointer_tensor.py", line 334, in get
tensor = ObjectPointer.get(self, user=user, reason=reason, deregister_ptr=deregister_ptr)
File "/home/juanmixp/Documentos/PySyft/syft/generic/pointers/object_pointer.py", line 269, in get
obj = self.owner.request_obj(self.id_at_location, self.location, user, reason)
File "/home/juanmixp/Documentos/PySyft/syft/workers/base.py", line 750, in request_obj
obj = self.send_msg(ObjectRequestMessage(obj_id, user, reason), location)
File "/home/juanmixp/Documentos/PySyft/syft/workers/base.py", line 284, in send_msg
print(f"worker {self} sending {message} to {location}")
File "/home/juanmixp/Documentos/PySyft/syft/messaging/message.py", line 303, in __str__
return f"({type(self).__name__} {(self.obj_id, self.user, self.reason)})"
AttributeError: 'ObjectRequestMessage' object has no attribute 'obj_id'
|
AttributeError
|
def select_share(alpha_sh, x_sh, y_sh):
"""Performs select share protocol
If the bit alpha_sh is 0, x_sh is returned
If the bit alpha_sh is 1, y_sh is returned
Args:
x_sh (AdditiveSharingTensor): the first share to select
y_sh (AdditiveSharingTensor): the second share to select
alpha_sh (AdditiveSharingTensor): the bit to choose between x_sh and y_sh
Return:
z_sh = (1 - alpha_sh) * x_sh + alpha_sh * y_sh
"""
assert alpha_sh.dtype == x_sh.dtype == y_sh.dtype != "custom", (
"`custom` dtype shares are unsupported in SecureNN, use dtype = `long` or `int` instead"
)
workers = alpha_sh.locations
crypto_provider = alpha_sh.crypto_provider
L = alpha_sh.field
dtype = get_dtype(L)
u_sh = _shares_of_zero(1, L, dtype, crypto_provider, *workers)
# 1)
w_sh = y_sh - x_sh
# 2)
c_sh = alpha_sh * w_sh
# 3)
z_sh = x_sh + c_sh + u_sh
return z_sh
|
def select_share(alpha_sh, x_sh, y_sh):
"""Performs select share protocol
If the bit alpha_sh is 0, x_sh is returned
If the bit alpha_sh is 1, y_sh is returned
Args:
x_sh (AdditiveSharingTensor): the first share to select
y_sh (AdditiveSharingTensor): the second share to select
alpha_sh (AdditiveSharingTensor): the bit to choose between x_sh and y_sh
Return:
z_sh = (1 - alpha_sh) * x_sh + alpha_sh * y_sh
"""
assert alpha_sh.dtype == x_sh.dtype == y_sh.dtype != "custom", (
"`custom` dtype shares are unsupported in SecureNN, use dtype = `long` or `int` instead"
)
alice, bob = alpha_sh.locations
crypto_provider = alpha_sh.crypto_provider
L = alpha_sh.field
dtype = get_dtype(L)
u_sh = _shares_of_zero(1, L, dtype, crypto_provider, alice, bob)
# 1)
w_sh = y_sh - x_sh
# 2)
c_sh = alpha_sh * w_sh
# 3)
z_sh = x_sh + c_sh + u_sh
return z_sh
|
https://github.com/OpenMined/PySyft/issues/2631
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-7-902fb9b77716> in <module>
12 x = th.tensor([1.1,2.0,3.2,4.0]).fix_precision().share(*workers, crypto_provider=sw, requires_grad=True)
13 m=th.nn.Linear(4, 1).fix_precision().share(*workers, crypto_provider=sw, requires_grad=True)
---> 14 o=m(x)
/usr/local/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
491 result = self._slow_forward(*input, **kwargs)
492 else:
--> 493 result = self.forward(*input, **kwargs)
494 for hook in self._forward_hooks.values():
495 hook_result = hook(self, input, result)
/usr/local/lib/python3.7/site-packages/torch/nn/modules/linear.py in forward(self, input)
90 @weak_script_method
91 def forward(self, input):
---> 92 return F.linear(input, self.weight, self.bias)
93
94 def extra_repr(self):
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/hook/hook.py in overloaded_func(*args, **kwargs)
745 handle_func_command = TorchTensor.handle_func_command
746
--> 747 response = handle_func_command(command)
748
749 return response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
310 new_command = (cmd, None, new_args, new_kwargs)
311 # Send it to the appropriate class and get the response
--> 312 response = new_type.handle_func_command(new_command)
313 # Put back the wrappers where needed
314 response = syft.frameworks.torch.hook_args.hook_response(
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in handle_func_command(cls, command)
236 # Try to get recursively the attributes in cmd = "<attr1>.<attr2>.<attr3>..."
237 cmd = cls.rgetattr(cls, cmd)
--> 238 return cmd(*args, **kwargs)
239 except AttributeError:
240 pass
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in linear(*args)
201 Un-hook the function to have its detailed behaviour
202 """
--> 203 return torch.nn.functional.native_linear(*args)
204
205 module.linear = linear
/usr/local/lib/python3.7/site-packages/torch/nn/functional.py in linear(input, weight, bias)
1406 ret = torch.addmm(bias, input, weight.t())
1407 else:
-> 1408 output = input.matmul(weight.t())
1409 if bias is not None:
1410 output += bias
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in method_with_grad(*args, **kwargs)
138 )
139
--> 140 result = getattr(new_self, name)(*new_args, **new_kwargs)
141
142 # Put back SyftTensor on the tensors found in the response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/precision.py in matmul(self, *args, **kwargs)
414
415 # Send it to the appropriate class and get the response
--> 416 response = getattr(new_self, "matmul")(*new_args, **new_kwargs)
417
418 # Put back SyftTensor on the tensors found in the response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/additive_shared.py in matmul(self, other)
515 return self._public_mul(other, "matmul")
516
--> 517 return self._private_mul(other, "matmul")
518
519 def mm(self, *args, **kwargs):
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/additive_shared.py in _private_mul(self, other, equation)
413 raise AttributeError("For multiplication a crypto_provider must be passed.")
414
--> 415 shares = spdz.spdz_mul(cmd, self, other, self.crypto_provider, self.field)
416
417 return shares
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/crypto/spdz.py in spdz_mul(cmd, x_sh, y_sh, crypto_provider, field)
43 j = sy.MultiPointerTensor(children=[j1, j0])
44 else:
---> 45 j = sy.MultiPointerTensor(children=[j1] + j0.child.values())
46
47 delta_b = cmd(delta, b)
TypeError: can only concatenate list (not "dict_values") to list
|
TypeError
|
def private_compare(x_bit_sh, r, beta, L):
"""
Perform privately x > r
args:
x (AdditiveSharedTensor): the private tensor
r (MultiPointerTensor): the threshold commonly held by the workers
beta (MultiPointerTensor): a boolean commonly held by the workers to
hide the result of computation for the crypto provider
L(int): field size for r
return:
β′ = β ⊕ (x > r).
"""
assert isinstance(x_bit_sh, sy.AdditiveSharingTensor)
assert isinstance(r, sy.MultiPointerTensor)
assert isinstance(beta, sy.MultiPointerTensor)
# Would it be safer to have a different r/beta for each value in the tensor?
workers = x_bit_sh.locations
crypto_provider = x_bit_sh.crypto_provider
p = x_bit_sh.field
# the commented out numbers below correspond to the
# line numbers in Algorithm 3 of the SecureNN paper
# https://eprint.iacr.org/2018/442.pdf
# Common randomess
s = torch.randint(1, p, x_bit_sh.shape).send(*workers, **no_wrap)
u = torch.randint(1, p, x_bit_sh.shape).send(*workers, **no_wrap)
perm = torch.randperm(x_bit_sh.shape[-1]).send(*workers, **no_wrap)
j = sy.MultiPointerTensor(
children=[
torch.tensor([int(i == 0)]).send(w, **no_wrap)
for i, w in enumerate(workers)
]
)
# 1)
t = r + 1
t_bit = decompose(t, L)
r_bit = decompose(r, L)
# if beta == 0
# 5)
w = x_bit_sh + (j * r_bit) - (2 * r_bit * x_bit_sh)
# 6)
wc = w.flip(-1).cumsum(-1).flip(-1) - w
c_beta0 = -x_bit_sh + (j * r_bit) + j + wc
# elif beta == 1 AND r != 2^l- 1
# 8)
w = x_bit_sh + (j * t_bit) - (2 * t_bit * x_bit_sh)
# 9)
wc = w.flip(-1).cumsum(-1).flip(-1) - w
c_beta1 = x_bit_sh + (-j * t_bit) + j + wc
# else
# 11)
c_igt1 = (1 - j) * (u + 1) - (j * u)
c_ie1 = (1 - 2 * j) * u
l1_mask = torch.zeros(x_bit_sh.shape).long()
l1_mask[..., 0] = 1
l1_mask = l1_mask.send(*workers, **no_wrap)
# c_else = if i == 1 c_ie1 else c_igt1
c_else = (l1_mask * c_ie1) + ((1 - l1_mask) * c_igt1)
# Mask for the case r == 2^l −1
r_mask = (r == get_r_mask(L)).long()
r_mask = r_mask.unsqueeze(-1)
# Mask combination to execute the if / else statements of 4), 7), 10)
c = (
(1 - beta) * c_beta0
+ (beta * (1 - r_mask)) * c_beta1
+ (beta * r_mask) * c_else
)
# 14)
# Hide c values
mask = s * c
# Permute the mask
# I have to create idx because Ellipsis are still not supported
# (I would like to do permuted_mask = mask[..., perm])
idx = [slice(None)] * (len(x_bit_sh.shape) - 1) + [perm]
permuted_mask = mask[idx]
# Send it to another worker
# We do this because we can't allow the local worker to get and see permuted_mask
# because otherwise it can inverse the permutation and remove s to get c.
# So opening the permuted_mask should be made by a worker which doesn't have access to the randomness
remote_mask = permuted_mask.wrap().send(crypto_provider, **no_wrap)
# 15)
d_ptr = remote_mask.remote_get()
beta_prime = (d_ptr == 0).sum(-1)
# Get result back
res = beta_prime.get()
return res
|
def private_compare(x_bit_sh, r, beta, L):
"""
Perform privately x > r
args:
x (AdditiveSharedTensor): the private tensor
r (MultiPointerTensor): the threshold commonly held by alice and bob
beta (MultiPointerTensor): a boolean commonly held by alice and bob to
hide the result of computation for the crypto provider
L(int): field size for r
return:
β′ = β ⊕ (x > r).
"""
assert isinstance(x_bit_sh, sy.AdditiveSharingTensor)
assert isinstance(r, sy.MultiPointerTensor)
assert isinstance(beta, sy.MultiPointerTensor)
# Would it be safer to have a different r/beta for each value in the tensor?
alice, bob = x_bit_sh.locations
crypto_provider = x_bit_sh.crypto_provider
p = x_bit_sh.field
# the commented out numbers below correspond to the
# line numbers in Algorithm 3 of the SecureNN paper
# https://eprint.iacr.org/2018/442.pdf
# Common randomess
s = torch.randint(1, p, x_bit_sh.shape).send(alice, bob, **no_wrap)
u = torch.randint(1, p, x_bit_sh.shape).send(alice, bob, **no_wrap)
perm = torch.randperm(x_bit_sh.shape[-1]).send(alice, bob, **no_wrap)
j = sy.MultiPointerTensor(
children=[
torch.tensor([0]).send(alice, **no_wrap),
torch.tensor([1]).send(bob, **no_wrap),
]
)
# 1)
t = r + 1
t_bit = decompose(t, L)
r_bit = decompose(r, L)
# if beta == 0
# 5)
w = x_bit_sh + (j * r_bit) - (2 * r_bit * x_bit_sh)
# 6)
wc = w.flip(-1).cumsum(-1).flip(-1) - w
c_beta0 = -x_bit_sh + (j * r_bit) + j + wc
# elif beta == 1 AND r != 2^l- 1
# 8)
w = x_bit_sh + (j * t_bit) - (2 * t_bit * x_bit_sh)
# 9)
wc = w.flip(-1).cumsum(-1).flip(-1) - w
c_beta1 = x_bit_sh + (-j * t_bit) + j + wc
# else
# 11)
c_igt1 = (1 - j) * (u + 1) - (j * u)
c_ie1 = (1 - 2 * j) * u
l1_mask = torch.zeros(x_bit_sh.shape).long()
l1_mask[..., 0] = 1
l1_mask = l1_mask.send(alice, bob, **no_wrap)
# c_else = if i == 1 c_ie1 else c_igt1
c_else = (l1_mask * c_ie1) + ((1 - l1_mask) * c_igt1)
# Mask for the case r == 2^l −1
r_mask = (r == get_r_mask(L)).long()
r_mask = r_mask.unsqueeze(-1)
# Mask combination to execute the if / else statements of 4), 7), 10)
c = (
(1 - beta) * c_beta0
+ (beta * (1 - r_mask)) * c_beta1
+ (beta * r_mask) * c_else
)
# 14)
# Hide c values
mask = s * c
# Permute the mask
# I have to create idx because Ellipsis are still not supported
# (I would like to do permuted_mask = mask[..., perm])
idx = [slice(None)] * (len(x_bit_sh.shape) - 1) + [perm]
permuted_mask = mask[idx]
# Send it to another worker
# We do this because we can't allow the local worker to get and see permuted_mask
# because otherwise it can inverse the permutation and remove s to get c.
# So opening the permuted_mask should be made by a worker which doesn't have access to the randomness
remote_mask = permuted_mask.wrap().send(crypto_provider, **no_wrap)
# 15)
d_ptr = remote_mask.remote_get()
beta_prime = (d_ptr == 0).sum(-1)
# Get result back
res = beta_prime.get()
return res
|
https://github.com/OpenMined/PySyft/issues/2631
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-7-902fb9b77716> in <module>
12 x = th.tensor([1.1,2.0,3.2,4.0]).fix_precision().share(*workers, crypto_provider=sw, requires_grad=True)
13 m=th.nn.Linear(4, 1).fix_precision().share(*workers, crypto_provider=sw, requires_grad=True)
---> 14 o=m(x)
/usr/local/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
491 result = self._slow_forward(*input, **kwargs)
492 else:
--> 493 result = self.forward(*input, **kwargs)
494 for hook in self._forward_hooks.values():
495 hook_result = hook(self, input, result)
/usr/local/lib/python3.7/site-packages/torch/nn/modules/linear.py in forward(self, input)
90 @weak_script_method
91 def forward(self, input):
---> 92 return F.linear(input, self.weight, self.bias)
93
94 def extra_repr(self):
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/hook/hook.py in overloaded_func(*args, **kwargs)
745 handle_func_command = TorchTensor.handle_func_command
746
--> 747 response = handle_func_command(command)
748
749 return response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
310 new_command = (cmd, None, new_args, new_kwargs)
311 # Send it to the appropriate class and get the response
--> 312 response = new_type.handle_func_command(new_command)
313 # Put back the wrappers where needed
314 response = syft.frameworks.torch.hook_args.hook_response(
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in handle_func_command(cls, command)
236 # Try to get recursively the attributes in cmd = "<attr1>.<attr2>.<attr3>..."
237 cmd = cls.rgetattr(cls, cmd)
--> 238 return cmd(*args, **kwargs)
239 except AttributeError:
240 pass
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in linear(*args)
201 Un-hook the function to have its detailed behaviour
202 """
--> 203 return torch.nn.functional.native_linear(*args)
204
205 module.linear = linear
/usr/local/lib/python3.7/site-packages/torch/nn/functional.py in linear(input, weight, bias)
1406 ret = torch.addmm(bias, input, weight.t())
1407 else:
-> 1408 output = input.matmul(weight.t())
1409 if bias is not None:
1410 output += bias
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in method_with_grad(*args, **kwargs)
138 )
139
--> 140 result = getattr(new_self, name)(*new_args, **new_kwargs)
141
142 # Put back SyftTensor on the tensors found in the response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/precision.py in matmul(self, *args, **kwargs)
414
415 # Send it to the appropriate class and get the response
--> 416 response = getattr(new_self, "matmul")(*new_args, **new_kwargs)
417
418 # Put back SyftTensor on the tensors found in the response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/additive_shared.py in matmul(self, other)
515 return self._public_mul(other, "matmul")
516
--> 517 return self._private_mul(other, "matmul")
518
519 def mm(self, *args, **kwargs):
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/additive_shared.py in _private_mul(self, other, equation)
413 raise AttributeError("For multiplication a crypto_provider must be passed.")
414
--> 415 shares = spdz.spdz_mul(cmd, self, other, self.crypto_provider, self.field)
416
417 return shares
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/crypto/spdz.py in spdz_mul(cmd, x_sh, y_sh, crypto_provider, field)
43 j = sy.MultiPointerTensor(children=[j1, j0])
44 else:
---> 45 j = sy.MultiPointerTensor(children=[j1] + j0.child.values())
46
47 delta_b = cmd(delta, b)
TypeError: can only concatenate list (not "dict_values") to list
|
TypeError
|
def msb(a_sh):
"""
Compute the most significant bit in a_sh, this is an implementation of the
SecureNN paper https://eprint.iacr.org/2018/442.pdf
Args:
a_sh (AdditiveSharingTensor): the tensor of study
Return:
the most significant bit
"""
workers = a_sh.locations
crypto_provider = a_sh.crypto_provider
L = a_sh.field + 1 # field of a is L - 1
dtype = get_dtype(L)
input_shape = a_sh.shape
a_sh = a_sh.view(-1)
# the commented out numbers below correspond to the
# line numbers in Table 5 of the SecureNN paper
# https://eprint.iacr.org/2018/442.pdf
# Common Randomness
beta = _random_common_bit(*workers)
u = _shares_of_zero(1, L, dtype, crypto_provider, *workers)
# 1)
x = torch.tensor(a_sh.shape).random_(get_max_val_field(L - 1))
x_bit = decompose(x, L)
x_sh = x.share(
*workers,
field=L - 1,
dtype="custom",
crypto_provider=crypto_provider,
**no_wrap,
)
x_bit_0 = x_bit[..., 0]
x_bit_sh_0 = x_bit_0.share(
*workers, field=L, crypto_provider=crypto_provider, **no_wrap
)
x_bit_sh = x_bit.share(
*workers, field=p, dtype="custom", crypto_provider=crypto_provider, **no_wrap
)
# 2)
y_sh = a_sh * 2
r_sh = y_sh + x_sh
# 3)
r = r_sh.reconstruct() # convert an additive sharing in multi pointer Tensor
r_0 = decompose(r, L)[..., 0]
# 4)
beta_prime = private_compare(x_bit_sh, r, beta, L)
# 5)
beta_prime_sh = beta_prime.share(
*workers, field=L, dtype=dtype, crypto_provider=crypto_provider, **no_wrap
)
# 7)
j = sy.MultiPointerTensor(
children=[
torch.tensor([int(i == 0)]).send(w, **no_wrap)
for i, w in enumerate(workers)
]
)
gamma = beta_prime_sh + (j * beta) - (2 * beta * beta_prime_sh)
# 8)
delta = x_bit_sh_0 + (j * r_0) - (2 * r_0 * x_bit_sh_0)
# 9)
theta = gamma * delta
# 10)
a = gamma + delta - (theta * 2) + u
if len(input_shape):
return a.view(*list(input_shape))
else:
return a
|
def msb(a_sh):
"""
Compute the most significant bit in a_sh, this is an implementation of the
SecureNN paper https://eprint.iacr.org/2018/442.pdf
Args:
a_sh (AdditiveSharingTensor): the tensor of study
Return:
the most significant bit
"""
alice, bob = a_sh.locations
crypto_provider = a_sh.crypto_provider
L = a_sh.field + 1 # field of a is L - 1
dtype = get_dtype(L)
input_shape = a_sh.shape
a_sh = a_sh.view(-1)
# the commented out numbers below correspond to the
# line numbers in Table 5 of the SecureNN paper
# https://eprint.iacr.org/2018/442.pdf
# Common Randomness
beta = _random_common_bit(alice, bob)
u = _shares_of_zero(1, L, dtype, crypto_provider, alice, bob)
# 1)
x = torch.tensor(a_sh.shape).random_(get_max_val_field(L - 1))
x_bit = decompose(x, L)
x_sh = x.share(
bob,
alice,
field=L - 1,
dtype="custom",
crypto_provider=crypto_provider,
**no_wrap,
)
x_bit_0 = x_bit[..., 0]
x_bit_sh_0 = x_bit_0.share(
bob, alice, field=L, crypto_provider=crypto_provider, **no_wrap
)
x_bit_sh = x_bit.share(
bob, alice, field=p, dtype="custom", crypto_provider=crypto_provider, **no_wrap
)
# 2)
y_sh = a_sh * 2
r_sh = y_sh + x_sh
# 3)
r = r_sh.reconstruct() # convert an additive sharing in multi pointer Tensor
r_0 = decompose(r, L)[..., 0]
# 4)
beta_prime = private_compare(x_bit_sh, r, beta, L)
# 5)
beta_prime_sh = beta_prime.share(
bob, alice, field=L, dtype=dtype, crypto_provider=crypto_provider, **no_wrap
)
# 7)
j = sy.MultiPointerTensor(
children=[
torch.tensor([0]).send(alice, **no_wrap),
torch.tensor([1]).send(bob, **no_wrap),
]
)
gamma = beta_prime_sh + (j * beta) - (2 * beta * beta_prime_sh)
# 8)
delta = x_bit_sh_0 + (j * r_0) - (2 * r_0 * x_bit_sh_0)
# 9)
theta = gamma * delta
# 10)
a = gamma + delta - (theta * 2) + u
if len(input_shape):
return a.view(*list(input_shape))
else:
return a
|
https://github.com/OpenMined/PySyft/issues/2631
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-7-902fb9b77716> in <module>
12 x = th.tensor([1.1,2.0,3.2,4.0]).fix_precision().share(*workers, crypto_provider=sw, requires_grad=True)
13 m=th.nn.Linear(4, 1).fix_precision().share(*workers, crypto_provider=sw, requires_grad=True)
---> 14 o=m(x)
/usr/local/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
491 result = self._slow_forward(*input, **kwargs)
492 else:
--> 493 result = self.forward(*input, **kwargs)
494 for hook in self._forward_hooks.values():
495 hook_result = hook(self, input, result)
/usr/local/lib/python3.7/site-packages/torch/nn/modules/linear.py in forward(self, input)
90 @weak_script_method
91 def forward(self, input):
---> 92 return F.linear(input, self.weight, self.bias)
93
94 def extra_repr(self):
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/hook/hook.py in overloaded_func(*args, **kwargs)
745 handle_func_command = TorchTensor.handle_func_command
746
--> 747 response = handle_func_command(command)
748
749 return response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
310 new_command = (cmd, None, new_args, new_kwargs)
311 # Send it to the appropriate class and get the response
--> 312 response = new_type.handle_func_command(new_command)
313 # Put back the wrappers where needed
314 response = syft.frameworks.torch.hook_args.hook_response(
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in handle_func_command(cls, command)
236 # Try to get recursively the attributes in cmd = "<attr1>.<attr2>.<attr3>..."
237 cmd = cls.rgetattr(cls, cmd)
--> 238 return cmd(*args, **kwargs)
239 except AttributeError:
240 pass
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in linear(*args)
201 Un-hook the function to have its detailed behaviour
202 """
--> 203 return torch.nn.functional.native_linear(*args)
204
205 module.linear = linear
/usr/local/lib/python3.7/site-packages/torch/nn/functional.py in linear(input, weight, bias)
1406 ret = torch.addmm(bias, input, weight.t())
1407 else:
-> 1408 output = input.matmul(weight.t())
1409 if bias is not None:
1410 output += bias
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in method_with_grad(*args, **kwargs)
138 )
139
--> 140 result = getattr(new_self, name)(*new_args, **new_kwargs)
141
142 # Put back SyftTensor on the tensors found in the response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/precision.py in matmul(self, *args, **kwargs)
414
415 # Send it to the appropriate class and get the response
--> 416 response = getattr(new_self, "matmul")(*new_args, **new_kwargs)
417
418 # Put back SyftTensor on the tensors found in the response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/additive_shared.py in matmul(self, other)
515 return self._public_mul(other, "matmul")
516
--> 517 return self._private_mul(other, "matmul")
518
519 def mm(self, *args, **kwargs):
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/additive_shared.py in _private_mul(self, other, equation)
413 raise AttributeError("For multiplication a crypto_provider must be passed.")
414
--> 415 shares = spdz.spdz_mul(cmd, self, other, self.crypto_provider, self.field)
416
417 return shares
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/crypto/spdz.py in spdz_mul(cmd, x_sh, y_sh, crypto_provider, field)
43 j = sy.MultiPointerTensor(children=[j1, j0])
44 else:
---> 45 j = sy.MultiPointerTensor(children=[j1] + j0.child.values())
46
47 delta_b = cmd(delta, b)
TypeError: can only concatenate list (not "dict_values") to list
|
TypeError
|
def share_convert(a_sh):
"""
Convert shares of a in field L to shares of a in field L - 1
Args:
a_sh (AdditiveSharingTensor): the additive sharing tensor who owns
the shares in field L to convert
Return:
An additive sharing tensor with shares in field L-1
"""
assert isinstance(a_sh, sy.AdditiveSharingTensor)
assert a_sh.dtype != "custom", (
"`custom` dtype shares are unsupported in SecureNN, use dtype = `long` or `int` instead"
)
workers = a_sh.locations
crypto_provider = a_sh.crypto_provider
L = a_sh.field
# torch_dtype = get_torch_dtype(L)
# dtype = get_dtype(L)
# Common randomness
# eta_pp = _random_common_bit(*workers)
# r = _random_common_value(L, *workers)
# Share remotely r
# r_sh = (
# (r * 1)
# .child[workers[0].id]
# .share(*workers, field=L, dtype=dtype, crypto_provider=crypto_provider)
# .get()
# .child
# )
# r_shares = r_sh.child
# WORKS WITH N PARTIES, NEEDS BUGFIXING IN WRAP
# alpha0 = wrap(r_sh, L)
# alphas = [alpha0.copy().move(w) for w in workers[1:]]
# alpha = sy.MultiPointerTensor(children=[alpha0, *alphas])
# WORKS WITH 2 PARTIES
# alpha0 = (
# (
# (r_shares[workers[0].id] + r_shares[workers[1].id].copy().move(workers[0]))
# > get_max_val_field(L)
# )
# ).type(torch_dtype)
# alpha1 = alpha0.copy().move(workers[1])
# alpha = sy.MultiPointerTensor(children=[alpha0, alpha1])
u_sh = _shares_of_zero(1, L - 1, "custom", crypto_provider, *workers)
# 2)
# a_tilde_sh = a_sh + r_sh
# a_shares = a_sh.child
# beta0 = (
# ((a_shares[workers[0].id] + r_shares[workers[0].id]) > get_max_val_field(L))
# + ((a_shares[workers[0].id] + r_shares[workers[0].id]) < get_min_val_field(L))
# ).type(torch_dtype)
# beta1 = (
# ((a_shares[workers[1].id] + r_shares[workers[1].id]) > get_max_val_field(L))
# + ((a_shares[workers[1].id] + r_shares[workers[1].id]) < get_min_val_field(L))
# ).type(torch_dtype)
# beta = sy.MultiPointerTensor(children=[beta0, beta1])
# 4)
# a_tilde_shares = a_tilde_sh.child
# delta = a_tilde_shares[workers[0].id].copy().get() + a_tilde_shares[workers[1].id].copy().get()
# Check for both positive and negative overflows
# delta = ((delta > get_max_val_field(L)) + (delta < get_min_val_field(L))).type(torch_dtype)
# x = a_tilde_sh.get()
# 5)
# x_bit = decompose(x, L)
# x_bit_sh = x_bit.share(
# *workers, field=p, dtype="custom", crypto_provider=crypto_provider, **no_wrap
# )
# delta_sh = delta.share(
# *workers, field=L - 1, dtype="custom", crypto_provider=crypto_provider, **no_wrap
# )
# 6)
# eta_p = private_compare(x_bit_sh, r - 1, eta_pp, L)
# 7)
# eta_p_sh = eta_p.share(
# *workers, field=L - 1, dtype="custom", crypto_provider=crypto_provider, **no_wrap
# )
# 9)
# j = sy.MultiPointerTensor(
# children=[
# torch.tensor([int(i != 0)]).send(w, **no_wrap)
# for i, w in enumerate(workers)
# ]
# )
# eta_sh = eta_p_sh + (1 - j) * eta_pp - 2 * eta_pp * eta_p_sh
# 10)
# theta_sh = beta - (1 - j) * (alpha + 1) + delta_sh + eta_sh
# 11)
# NOTE:
# It seems simple operation with shares in L-1 field is enough to conver a_sh from L to L-1
# Conversion of shares is handled internally in AST ops for custom dtype
y_sh = u_sh + a_sh
return y_sh
|
def share_convert(a_sh):
"""
Convert shares of a in field L to shares of a in field L - 1
Args:
a_sh (AdditiveSharingTensor): the additive sharing tensor who owns
the shares in field L to convert
Return:
An additive sharing tensor with shares in field L-1
"""
assert isinstance(a_sh, sy.AdditiveSharingTensor)
assert a_sh.dtype != "custom", (
"`custom` dtype shares are unsupported in SecureNN, use dtype = `long` or `int` instead"
)
workers = a_sh.locations
crypto_provider = a_sh.crypto_provider
L = a_sh.field
# torch_dtype = get_torch_dtype(L)
# dtype = get_dtype(L)
# Common randomness
# eta_pp = _random_common_bit(*workers)
# r = _random_common_value(L, *workers)
# Share remotely r
# r_sh = (
# (r * 1)
# .child[workers[0].id]
# .share(*workers, field=L, dtype=dtype, crypto_provider=crypto_provider)
# .get()
# .child
# )
# r_shares = r_sh.child
# alpha0 = (
# (
# (r_shares[workers[0].id] + r_shares[workers[1].id].copy().move(workers[0]))
# > get_max_val_field(L)
# )
# ).type(torch_dtype)
# alpha1 = alpha0.copy().move(workers[1])
# alpha = sy.MultiPointerTensor(children=[alpha0, alpha1])
u_sh = _shares_of_zero(1, L - 1, "custom", crypto_provider, *workers)
# 2)
# a_tilde_sh = a_sh + r_sh
# a_shares = a_sh.child
# beta0 = (
# ((a_shares[workers[0].id] + r_shares[workers[0].id]) > get_max_val_field(L))
# + ((a_shares[workers[0].id] + r_shares[workers[0].id]) < get_min_val_field(L))
# ).type(torch_dtype)
# beta1 = (
# ((a_shares[workers[1].id] + r_shares[workers[1].id]) > get_max_val_field(L))
# + ((a_shares[workers[1].id] + r_shares[workers[1].id]) < get_min_val_field(L))
# ).type(torch_dtype)
# beta = sy.MultiPointerTensor(children=[beta0, beta1])
# 4)
# a_tilde_shares = a_tilde_sh.child
# delta = a_tilde_shares[workers[0].id].copy().get() + a_tilde_shares[workers[1].id].copy().get()
# Check for both positive and negative overflows
# delta = ((delta > get_max_val_field(L)) + (delta < get_min_val_field(L))).type(torch_dtype)
# x = a_tilde_sh.get()
# 5)
# x_bit = decompose(x, L)
# x_bit_sh = x_bit.share(
# *workers, field=p, dtype="custom", crypto_provider=crypto_provider, **no_wrap
# )
# delta_sh = delta.share(
# *workers, field=L - 1, dtype="custom", crypto_provider=crypto_provider, **no_wrap
# )
# 6)
# eta_p = private_compare(x_bit_sh, r - 1, eta_pp, L)
# 7)
# eta_p_sh = eta_p.share(
# *workers, field=L - 1, dtype="custom", crypto_provider=crypto_provider, **no_wrap
# )
# 9)
# j = sy.MultiPointerTensor(
# children=[
# torch.tensor([0]).send(workers[0], **no_wrap),
# torch.tensor([1]).send(workers[1], **no_wrap),
# ]
# )
# eta_sh = eta_p_sh + (1 - j) * eta_pp - 2 * eta_pp * eta_p_sh
# 10)
# theta_sh = beta - (1 - j) * (alpha + 1) + delta_sh + eta_sh
# 11)
# NOTE:
# It seems simple operation with shares in L-1 field is enough to conver a_sh from L to L-1
# Conversion of shares is handled internally in AST ops for custom dtype
y_sh = u_sh + a_sh
return y_sh
|
https://github.com/OpenMined/PySyft/issues/2631
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-7-902fb9b77716> in <module>
12 x = th.tensor([1.1,2.0,3.2,4.0]).fix_precision().share(*workers, crypto_provider=sw, requires_grad=True)
13 m=th.nn.Linear(4, 1).fix_precision().share(*workers, crypto_provider=sw, requires_grad=True)
---> 14 o=m(x)
/usr/local/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
491 result = self._slow_forward(*input, **kwargs)
492 else:
--> 493 result = self.forward(*input, **kwargs)
494 for hook in self._forward_hooks.values():
495 hook_result = hook(self, input, result)
/usr/local/lib/python3.7/site-packages/torch/nn/modules/linear.py in forward(self, input)
90 @weak_script_method
91 def forward(self, input):
---> 92 return F.linear(input, self.weight, self.bias)
93
94 def extra_repr(self):
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/hook/hook.py in overloaded_func(*args, **kwargs)
745 handle_func_command = TorchTensor.handle_func_command
746
--> 747 response = handle_func_command(command)
748
749 return response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
310 new_command = (cmd, None, new_args, new_kwargs)
311 # Send it to the appropriate class and get the response
--> 312 response = new_type.handle_func_command(new_command)
313 # Put back the wrappers where needed
314 response = syft.frameworks.torch.hook_args.hook_response(
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in handle_func_command(cls, command)
236 # Try to get recursively the attributes in cmd = "<attr1>.<attr2>.<attr3>..."
237 cmd = cls.rgetattr(cls, cmd)
--> 238 return cmd(*args, **kwargs)
239 except AttributeError:
240 pass
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in linear(*args)
201 Un-hook the function to have its detailed behaviour
202 """
--> 203 return torch.nn.functional.native_linear(*args)
204
205 module.linear = linear
/usr/local/lib/python3.7/site-packages/torch/nn/functional.py in linear(input, weight, bias)
1406 ret = torch.addmm(bias, input, weight.t())
1407 else:
-> 1408 output = input.matmul(weight.t())
1409 if bias is not None:
1410 output += bias
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in method_with_grad(*args, **kwargs)
138 )
139
--> 140 result = getattr(new_self, name)(*new_args, **new_kwargs)
141
142 # Put back SyftTensor on the tensors found in the response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/precision.py in matmul(self, *args, **kwargs)
414
415 # Send it to the appropriate class and get the response
--> 416 response = getattr(new_self, "matmul")(*new_args, **new_kwargs)
417
418 # Put back SyftTensor on the tensors found in the response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/additive_shared.py in matmul(self, other)
515 return self._public_mul(other, "matmul")
516
--> 517 return self._private_mul(other, "matmul")
518
519 def mm(self, *args, **kwargs):
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/additive_shared.py in _private_mul(self, other, equation)
413 raise AttributeError("For multiplication a crypto_provider must be passed.")
414
--> 415 shares = spdz.spdz_mul(cmd, self, other, self.crypto_provider, self.field)
416
417 return shares
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/crypto/spdz.py in spdz_mul(cmd, x_sh, y_sh, crypto_provider, field)
43 j = sy.MultiPointerTensor(children=[j1, j0])
44 else:
---> 45 j = sy.MultiPointerTensor(children=[j1] + j0.child.values())
46
47 delta_b = cmd(delta, b)
TypeError: can only concatenate list (not "dict_values") to list
|
TypeError
|
def relu_deriv(a_sh):
"""
Compute the derivative of Relu
Args:
a_sh (AdditiveSharingTensor): the private tensor on which the op applies
Returns:
0 if Dec(a_sh) < 0
1 if Dec(a_sh) > 0
encrypted in an AdditiveSharingTensor
"""
assert a_sh.dtype != "custom", (
"`custom` dtype shares are unsupported in SecureNN, use dtype = `long` or `int` instead"
)
workers = a_sh.locations
crypto_provider = a_sh.crypto_provider
L = a_sh.field
dtype = get_dtype(L)
# Common randomness
u = _shares_of_zero(1, L, dtype, crypto_provider, *workers)
# 1)
y_sh = a_sh * 2
# 2) Not applicable with algebraic shares
y_sh = share_convert(y_sh)
# 3)
alpha_sh = msb(y_sh)
# 4)
j = sy.MultiPointerTensor(
children=[
torch.tensor([int(i == 0)]).send(w, **no_wrap)
for i, w in enumerate(workers)
]
)
gamma_sh = j - alpha_sh + u
return gamma_sh
|
def relu_deriv(a_sh):
"""
Compute the derivative of Relu
Args:
a_sh (AdditiveSharingTensor): the private tensor on which the op applies
Returns:
0 if Dec(a_sh) < 0
1 if Dec(a_sh) > 0
encrypted in an AdditiveSharingTensor
"""
assert a_sh.dtype != "custom", (
"`custom` dtype shares are unsupported in SecureNN, use dtype = `long` or `int` instead"
)
alice, bob = a_sh.locations
crypto_provider = a_sh.crypto_provider
L = a_sh.field
dtype = get_dtype(L)
# Common randomness
u = _shares_of_zero(1, L, dtype, crypto_provider, alice, bob)
# 1)
y_sh = a_sh * 2
# 2) Not applicable with algebraic shares
y_sh = share_convert(y_sh)
# 3)
alpha_sh = msb(y_sh)
# 4)
j = sy.MultiPointerTensor(
children=[
torch.tensor([0]).send(alice, **no_wrap),
torch.tensor([1]).send(bob, **no_wrap),
]
)
gamma_sh = j - alpha_sh + u
return gamma_sh
|
https://github.com/OpenMined/PySyft/issues/2631
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-7-902fb9b77716> in <module>
12 x = th.tensor([1.1,2.0,3.2,4.0]).fix_precision().share(*workers, crypto_provider=sw, requires_grad=True)
13 m=th.nn.Linear(4, 1).fix_precision().share(*workers, crypto_provider=sw, requires_grad=True)
---> 14 o=m(x)
/usr/local/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
491 result = self._slow_forward(*input, **kwargs)
492 else:
--> 493 result = self.forward(*input, **kwargs)
494 for hook in self._forward_hooks.values():
495 hook_result = hook(self, input, result)
/usr/local/lib/python3.7/site-packages/torch/nn/modules/linear.py in forward(self, input)
90 @weak_script_method
91 def forward(self, input):
---> 92 return F.linear(input, self.weight, self.bias)
93
94 def extra_repr(self):
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/hook/hook.py in overloaded_func(*args, **kwargs)
745 handle_func_command = TorchTensor.handle_func_command
746
--> 747 response = handle_func_command(command)
748
749 return response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
310 new_command = (cmd, None, new_args, new_kwargs)
311 # Send it to the appropriate class and get the response
--> 312 response = new_type.handle_func_command(new_command)
313 # Put back the wrappers where needed
314 response = syft.frameworks.torch.hook_args.hook_response(
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in handle_func_command(cls, command)
236 # Try to get recursively the attributes in cmd = "<attr1>.<attr2>.<attr3>..."
237 cmd = cls.rgetattr(cls, cmd)
--> 238 return cmd(*args, **kwargs)
239 except AttributeError:
240 pass
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in linear(*args)
201 Un-hook the function to have its detailed behaviour
202 """
--> 203 return torch.nn.functional.native_linear(*args)
204
205 module.linear = linear
/usr/local/lib/python3.7/site-packages/torch/nn/functional.py in linear(input, weight, bias)
1406 ret = torch.addmm(bias, input, weight.t())
1407 else:
-> 1408 output = input.matmul(weight.t())
1409 if bias is not None:
1410 output += bias
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in method_with_grad(*args, **kwargs)
138 )
139
--> 140 result = getattr(new_self, name)(*new_args, **new_kwargs)
141
142 # Put back SyftTensor on the tensors found in the response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/precision.py in matmul(self, *args, **kwargs)
414
415 # Send it to the appropriate class and get the response
--> 416 response = getattr(new_self, "matmul")(*new_args, **new_kwargs)
417
418 # Put back SyftTensor on the tensors found in the response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/additive_shared.py in matmul(self, other)
515 return self._public_mul(other, "matmul")
516
--> 517 return self._private_mul(other, "matmul")
518
519 def mm(self, *args, **kwargs):
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/additive_shared.py in _private_mul(self, other, equation)
413 raise AttributeError("For multiplication a crypto_provider must be passed.")
414
--> 415 shares = spdz.spdz_mul(cmd, self, other, self.crypto_provider, self.field)
416
417 return shares
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/crypto/spdz.py in spdz_mul(cmd, x_sh, y_sh, crypto_provider, field)
43 j = sy.MultiPointerTensor(children=[j1, j0])
44 else:
---> 45 j = sy.MultiPointerTensor(children=[j1] + j0.child.values())
46
47 delta_b = cmd(delta, b)
TypeError: can only concatenate list (not "dict_values") to list
|
TypeError
|
def relu(a_sh):
"""
Compute Relu
Args:
a_sh (AdditiveSharingTensor): the private tensor on which the op applies
Returns:
Dec(a_sh) > 0
encrypted in an AdditiveSharingTensor
"""
assert a_sh.dtype != "custom", (
"`custom` dtype shares are unsupported in SecureNN, use dtype = `long` or `int` instead"
)
workers = a_sh.locations
crypto_provider = a_sh.crypto_provider
L = a_sh.field
dtype = get_dtype(L)
# Common Randomness
u = _shares_of_zero(1, L, dtype, crypto_provider, *workers)
return a_sh * relu_deriv(a_sh) + u
|
def relu(a_sh):
"""
Compute Relu
Args:
a_sh (AdditiveSharingTensor): the private tensor on which the op applies
Returns:
Dec(a_sh) > 0
encrypted in an AdditiveSharingTensor
"""
assert a_sh.dtype != "custom", (
"`custom` dtype shares are unsupported in SecureNN, use dtype = `long` or `int` instead"
)
alice, bob = a_sh.locations
crypto_provider = a_sh.crypto_provider
L = a_sh.field
dtype = get_dtype(L)
# Common Randomness
u = _shares_of_zero(1, L, dtype, crypto_provider, alice, bob)
return a_sh * relu_deriv(a_sh) + u
|
https://github.com/OpenMined/PySyft/issues/2631
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-7-902fb9b77716> in <module>
12 x = th.tensor([1.1,2.0,3.2,4.0]).fix_precision().share(*workers, crypto_provider=sw, requires_grad=True)
13 m=th.nn.Linear(4, 1).fix_precision().share(*workers, crypto_provider=sw, requires_grad=True)
---> 14 o=m(x)
/usr/local/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
491 result = self._slow_forward(*input, **kwargs)
492 else:
--> 493 result = self.forward(*input, **kwargs)
494 for hook in self._forward_hooks.values():
495 hook_result = hook(self, input, result)
/usr/local/lib/python3.7/site-packages/torch/nn/modules/linear.py in forward(self, input)
90 @weak_script_method
91 def forward(self, input):
---> 92 return F.linear(input, self.weight, self.bias)
93
94 def extra_repr(self):
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/hook/hook.py in overloaded_func(*args, **kwargs)
745 handle_func_command = TorchTensor.handle_func_command
746
--> 747 response = handle_func_command(command)
748
749 return response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
310 new_command = (cmd, None, new_args, new_kwargs)
311 # Send it to the appropriate class and get the response
--> 312 response = new_type.handle_func_command(new_command)
313 # Put back the wrappers where needed
314 response = syft.frameworks.torch.hook_args.hook_response(
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in handle_func_command(cls, command)
236 # Try to get recursively the attributes in cmd = "<attr1>.<attr2>.<attr3>..."
237 cmd = cls.rgetattr(cls, cmd)
--> 238 return cmd(*args, **kwargs)
239 except AttributeError:
240 pass
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in linear(*args)
201 Un-hook the function to have its detailed behaviour
202 """
--> 203 return torch.nn.functional.native_linear(*args)
204
205 module.linear = linear
/usr/local/lib/python3.7/site-packages/torch/nn/functional.py in linear(input, weight, bias)
1406 ret = torch.addmm(bias, input, weight.t())
1407 else:
-> 1408 output = input.matmul(weight.t())
1409 if bias is not None:
1410 output += bias
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in method_with_grad(*args, **kwargs)
138 )
139
--> 140 result = getattr(new_self, name)(*new_args, **new_kwargs)
141
142 # Put back SyftTensor on the tensors found in the response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/precision.py in matmul(self, *args, **kwargs)
414
415 # Send it to the appropriate class and get the response
--> 416 response = getattr(new_self, "matmul")(*new_args, **new_kwargs)
417
418 # Put back SyftTensor on the tensors found in the response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/additive_shared.py in matmul(self, other)
515 return self._public_mul(other, "matmul")
516
--> 517 return self._private_mul(other, "matmul")
518
519 def mm(self, *args, **kwargs):
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/additive_shared.py in _private_mul(self, other, equation)
413 raise AttributeError("For multiplication a crypto_provider must be passed.")
414
--> 415 shares = spdz.spdz_mul(cmd, self, other, self.crypto_provider, self.field)
416
417 return shares
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/crypto/spdz.py in spdz_mul(cmd, x_sh, y_sh, crypto_provider, field)
43 j = sy.MultiPointerTensor(children=[j1, j0])
44 else:
---> 45 j = sy.MultiPointerTensor(children=[j1] + j0.child.values())
46
47 delta_b = cmd(delta, b)
TypeError: can only concatenate list (not "dict_values") to list
|
TypeError
|
def division(x_sh, y_sh, bit_len_max=None):
"""Performs division of encrypted numbers
Args:
x_sh, y_sh (AdditiveSharingTensor): the private tensors on which the op applies
Returns:
element-wise integer division of x_sh by y_sh
"""
assert x_sh.dtype == y_sh.dtype != "custom", (
"`custom` dtype shares are unsupported in SecureNN, use dtype = `long` or `int` instead"
)
workers = x_sh.locations
crypto_provider = x_sh.crypto_provider
L = x_sh.field
dtype = get_dtype(L)
if bit_len_max is None:
bit_len_max = Q_BITS(L) // 2
x_shape = x_sh.shape
y_shape = y_sh.shape
assert x_shape == y_shape or list(y_shape) == [1]
x_sh = x_sh.view(-1)
y_sh = y_sh.view(-1)
# Common Randomness
w_sh = _shares_of_zero(bit_len_max, L, dtype, crypto_provider, *workers)
s_sh = _shares_of_zero(1, L, dtype, crypto_provider, *workers)
u_sh = _shares_of_zero(1, L, dtype, crypto_provider, *workers)
ks = []
for i in range(bit_len_max - 1, -1, -1):
# 3)
z_sh = x_sh - u_sh - 2**i * y_sh + w_sh[i]
# 4)
beta_sh = relu_deriv(z_sh)
# 5)
v_sh = beta_sh * (2**i * y_sh)
# 6)
k_sh = beta_sh * 2**i
ks.append(k_sh)
# 7)
u_sh = u_sh + v_sh
# 9)
q = sum(ks) + s_sh
if len(x_shape):
return q.view(*x_shape)
else:
return q
|
def division(x_sh, y_sh, bit_len_max=None):
"""Performs division of encrypted numbers
Args:
x_sh, y_sh (AdditiveSharingTensor): the private tensors on which the op applies
Returns:
element-wise integer division of x_sh by y_sh
"""
assert x_sh.dtype == y_sh.dtype != "custom", (
"`custom` dtype shares are unsupported in SecureNN, use dtype = `long` or `int` instead"
)
alice, bob = x_sh.locations
crypto_provider = x_sh.crypto_provider
L = x_sh.field
dtype = get_dtype(L)
if bit_len_max is None:
bit_len_max = Q_BITS(L) // 2
x_shape = x_sh.shape
y_shape = y_sh.shape
assert x_shape == y_shape or list(y_shape) == [1]
x_sh = x_sh.view(-1)
y_sh = y_sh.view(-1)
# Common Randomness
w_sh = _shares_of_zero(bit_len_max, L, dtype, crypto_provider, alice, bob)
s_sh = _shares_of_zero(1, L, dtype, crypto_provider, alice, bob)
u_sh = _shares_of_zero(1, L, dtype, crypto_provider, alice, bob)
ks = []
for i in range(bit_len_max - 1, -1, -1):
# 3)
z_sh = x_sh - u_sh - 2**i * y_sh + w_sh[i]
# 4)
beta_sh = relu_deriv(z_sh)
# 5)
v_sh = beta_sh * (2**i * y_sh)
# 6)
k_sh = beta_sh * 2**i
ks.append(k_sh)
# 7)
u_sh = u_sh + v_sh
# 9)
q = sum(ks) + s_sh
if len(x_shape):
return q.view(*x_shape)
else:
return q
|
https://github.com/OpenMined/PySyft/issues/2631
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-7-902fb9b77716> in <module>
12 x = th.tensor([1.1,2.0,3.2,4.0]).fix_precision().share(*workers, crypto_provider=sw, requires_grad=True)
13 m=th.nn.Linear(4, 1).fix_precision().share(*workers, crypto_provider=sw, requires_grad=True)
---> 14 o=m(x)
/usr/local/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
491 result = self._slow_forward(*input, **kwargs)
492 else:
--> 493 result = self.forward(*input, **kwargs)
494 for hook in self._forward_hooks.values():
495 hook_result = hook(self, input, result)
/usr/local/lib/python3.7/site-packages/torch/nn/modules/linear.py in forward(self, input)
90 @weak_script_method
91 def forward(self, input):
---> 92 return F.linear(input, self.weight, self.bias)
93
94 def extra_repr(self):
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/hook/hook.py in overloaded_func(*args, **kwargs)
745 handle_func_command = TorchTensor.handle_func_command
746
--> 747 response = handle_func_command(command)
748
749 return response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
310 new_command = (cmd, None, new_args, new_kwargs)
311 # Send it to the appropriate class and get the response
--> 312 response = new_type.handle_func_command(new_command)
313 # Put back the wrappers where needed
314 response = syft.frameworks.torch.hook_args.hook_response(
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in handle_func_command(cls, command)
236 # Try to get recursively the attributes in cmd = "<attr1>.<attr2>.<attr3>..."
237 cmd = cls.rgetattr(cls, cmd)
--> 238 return cmd(*args, **kwargs)
239 except AttributeError:
240 pass
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in linear(*args)
201 Un-hook the function to have its detailed behaviour
202 """
--> 203 return torch.nn.functional.native_linear(*args)
204
205 module.linear = linear
/usr/local/lib/python3.7/site-packages/torch/nn/functional.py in linear(input, weight, bias)
1406 ret = torch.addmm(bias, input, weight.t())
1407 else:
-> 1408 output = input.matmul(weight.t())
1409 if bias is not None:
1410 output += bias
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in method_with_grad(*args, **kwargs)
138 )
139
--> 140 result = getattr(new_self, name)(*new_args, **new_kwargs)
141
142 # Put back SyftTensor on the tensors found in the response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/precision.py in matmul(self, *args, **kwargs)
414
415 # Send it to the appropriate class and get the response
--> 416 response = getattr(new_self, "matmul")(*new_args, **new_kwargs)
417
418 # Put back SyftTensor on the tensors found in the response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/additive_shared.py in matmul(self, other)
515 return self._public_mul(other, "matmul")
516
--> 517 return self._private_mul(other, "matmul")
518
519 def mm(self, *args, **kwargs):
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/additive_shared.py in _private_mul(self, other, equation)
413 raise AttributeError("For multiplication a crypto_provider must be passed.")
414
--> 415 shares = spdz.spdz_mul(cmd, self, other, self.crypto_provider, self.field)
416
417 return shares
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/crypto/spdz.py in spdz_mul(cmd, x_sh, y_sh, crypto_provider, field)
43 j = sy.MultiPointerTensor(children=[j1, j0])
44 else:
---> 45 j = sy.MultiPointerTensor(children=[j1] + j0.child.values())
46
47 delta_b = cmd(delta, b)
TypeError: can only concatenate list (not "dict_values") to list
|
TypeError
|
def maxpool(x_sh):
"""Compute MaxPool: returns fresh shares of the max value in the input tensor
and the index of this value in the flattened tensor
Args:
x_sh (AdditiveSharingTensor): the private tensor on which the op applies
Returns:
maximum value as an AdditiveSharingTensor
index of this value in the flattened tensor as an AdditiveSharingTensor
"""
assert x_sh.dtype != "custom", (
"`custom` dtype shares are unsupported in SecureNN, use dtype = `long` or `int` instead"
)
if x_sh.is_wrapper:
x_sh = x_sh.child
workers = x_sh.locations
crypto_provider = x_sh.crypto_provider
L = x_sh.field
dtype = get_dtype(L)
x_sh = x_sh.contiguous().view(-1)
# Common Randomness
u_sh = _shares_of_zero(1, L, dtype, crypto_provider, *workers)
v_sh = _shares_of_zero(1, L, dtype, crypto_provider, *workers)
# 1)
max_sh = x_sh[0]
ind_sh = torch.tensor([0]).share(
*workers, field=L, dtype=dtype, crypto_provider=crypto_provider, **no_wrap
) # I did not manage to create an AST with 0 and 0 as shares
for i in range(1, len(x_sh)):
# 3)
w_sh = x_sh[i] - max_sh
# 4)
beta_sh = relu_deriv(w_sh)
# 5)
max_sh = select_share(beta_sh, max_sh, x_sh[i])
# 6)
k = torch.tensor([i]).share(
*workers, field=L, dtype=dtype, crypto_provider=crypto_provider, **no_wrap
) # I did not manage to create an AST with 0 and i as shares
# 7)
ind_sh = select_share(beta_sh, ind_sh, k)
return max_sh + u_sh, ind_sh + v_sh
|
def maxpool(x_sh):
"""Compute MaxPool: returns fresh shares of the max value in the input tensor
and the index of this value in the flattened tensor
Args:
x_sh (AdditiveSharingTensor): the private tensor on which the op applies
Returns:
maximum value as an AdditiveSharingTensor
index of this value in the flattened tensor as an AdditiveSharingTensor
"""
assert x_sh.dtype != "custom", (
"`custom` dtype shares are unsupported in SecureNN, use dtype = `long` or `int` instead"
)
if x_sh.is_wrapper:
x_sh = x_sh.child
alice, bob = x_sh.locations
crypto_provider = x_sh.crypto_provider
L = x_sh.field
dtype = get_dtype(L)
x_sh = x_sh.contiguous().view(-1)
# Common Randomness
u_sh = _shares_of_zero(1, L, dtype, crypto_provider, alice, bob)
v_sh = _shares_of_zero(1, L, dtype, crypto_provider, alice, bob)
# 1)
max_sh = x_sh[0]
ind_sh = torch.tensor([0]).share(
alice, bob, field=L, dtype=dtype, crypto_provider=crypto_provider, **no_wrap
) # I did not manage to create an AST with 0 and 0 as shares
for i in range(1, len(x_sh)):
# 3)
w_sh = x_sh[i] - max_sh
# 4)
beta_sh = relu_deriv(w_sh)
# 5)
max_sh = select_share(beta_sh, max_sh, x_sh[i])
# 6)
k = torch.tensor([i]).share(
alice, bob, field=L, dtype=dtype, crypto_provider=crypto_provider, **no_wrap
) # I did not manage to create an AST with 0 and i as shares
# 7)
ind_sh = select_share(beta_sh, ind_sh, k)
return max_sh + u_sh, ind_sh + v_sh
|
https://github.com/OpenMined/PySyft/issues/2631
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-7-902fb9b77716> in <module>
12 x = th.tensor([1.1,2.0,3.2,4.0]).fix_precision().share(*workers, crypto_provider=sw, requires_grad=True)
13 m=th.nn.Linear(4, 1).fix_precision().share(*workers, crypto_provider=sw, requires_grad=True)
---> 14 o=m(x)
/usr/local/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
491 result = self._slow_forward(*input, **kwargs)
492 else:
--> 493 result = self.forward(*input, **kwargs)
494 for hook in self._forward_hooks.values():
495 hook_result = hook(self, input, result)
/usr/local/lib/python3.7/site-packages/torch/nn/modules/linear.py in forward(self, input)
90 @weak_script_method
91 def forward(self, input):
---> 92 return F.linear(input, self.weight, self.bias)
93
94 def extra_repr(self):
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/hook/hook.py in overloaded_func(*args, **kwargs)
745 handle_func_command = TorchTensor.handle_func_command
746
--> 747 response = handle_func_command(command)
748
749 return response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
310 new_command = (cmd, None, new_args, new_kwargs)
311 # Send it to the appropriate class and get the response
--> 312 response = new_type.handle_func_command(new_command)
313 # Put back the wrappers where needed
314 response = syft.frameworks.torch.hook_args.hook_response(
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in handle_func_command(cls, command)
236 # Try to get recursively the attributes in cmd = "<attr1>.<attr2>.<attr3>..."
237 cmd = cls.rgetattr(cls, cmd)
--> 238 return cmd(*args, **kwargs)
239 except AttributeError:
240 pass
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in linear(*args)
201 Un-hook the function to have its detailed behaviour
202 """
--> 203 return torch.nn.functional.native_linear(*args)
204
205 module.linear = linear
/usr/local/lib/python3.7/site-packages/torch/nn/functional.py in linear(input, weight, bias)
1406 ret = torch.addmm(bias, input, weight.t())
1407 else:
-> 1408 output = input.matmul(weight.t())
1409 if bias is not None:
1410 output += bias
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in method_with_grad(*args, **kwargs)
138 )
139
--> 140 result = getattr(new_self, name)(*new_args, **new_kwargs)
141
142 # Put back SyftTensor on the tensors found in the response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/precision.py in matmul(self, *args, **kwargs)
414
415 # Send it to the appropriate class and get the response
--> 416 response = getattr(new_self, "matmul")(*new_args, **new_kwargs)
417
418 # Put back SyftTensor on the tensors found in the response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/additive_shared.py in matmul(self, other)
515 return self._public_mul(other, "matmul")
516
--> 517 return self._private_mul(other, "matmul")
518
519 def mm(self, *args, **kwargs):
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/additive_shared.py in _private_mul(self, other, equation)
413 raise AttributeError("For multiplication a crypto_provider must be passed.")
414
--> 415 shares = spdz.spdz_mul(cmd, self, other, self.crypto_provider, self.field)
416
417 return shares
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/crypto/spdz.py in spdz_mul(cmd, x_sh, y_sh, crypto_provider, field)
43 j = sy.MultiPointerTensor(children=[j1, j0])
44 else:
---> 45 j = sy.MultiPointerTensor(children=[j1] + j0.child.values())
46
47 delta_b = cmd(delta, b)
TypeError: can only concatenate list (not "dict_values") to list
|
TypeError
|
def maxpool_deriv(x_sh):
"""Compute derivative of MaxPool
Args:
x_sh (AdditiveSharingTensor): the private tensor on which the op applies
Returns:
an AdditiveSharingTensor of the same shape as x_sh full of zeros except for
a 1 at the position of the max value
"""
assert x_sh.dtype != "custom", (
"`custom` dtype shares are unsupported in SecureNN, use dtype = `long` or `int` instead"
)
workers = x_sh.locations
crypto_provider = x_sh.crypto_provider
L = x_sh.field
dtype = get_dtype(L)
torch_dtype = get_torch_dtype(L)
n1, n2 = x_sh.shape
n = n1 * n2
assert L % n == 0
x_sh = x_sh.view(-1)
# Common Randomness
U_sh = _shares_of_zero(n, L, dtype, crypto_provider, *workers)
r = _random_common_value(L, *workers)
# 1)
_, ind_max_sh = maxpool(x_sh)
# 2)
j = sy.MultiPointerTensor(
children=[
torch.tensor([int(i == 0)]).send(w, **no_wrap)
for i, w in enumerate(workers)
]
)
k_sh = ind_max_sh + j * r
# 3)
t = k_sh.get()
k = t % n
E_k = torch.zeros(n, dtype=torch_dtype)
E_k[k] = 1
E_sh = E_k.share(*workers, field=L, dtype=dtype, **no_wrap)
# 4)
g = r % n
D_sh = torch.roll(E_sh, -g)
maxpool_d_sh = D_sh + U_sh
return maxpool_d_sh.view(n1, n2)
|
def maxpool_deriv(x_sh):
"""Compute derivative of MaxPool
Args:
x_sh (AdditiveSharingTensor): the private tensor on which the op applies
Returns:
an AdditiveSharingTensor of the same shape as x_sh full of zeros except for
a 1 at the position of the max value
"""
assert x_sh.dtype != "custom", (
"`custom` dtype shares are unsupported in SecureNN, use dtype = `long` or `int` instead"
)
alice, bob = x_sh.locations
crypto_provider = x_sh.crypto_provider
L = x_sh.field
dtype = get_dtype(L)
torch_dtype = get_torch_dtype(L)
n1, n2 = x_sh.shape
n = n1 * n2
x_sh = x_sh.view(-1)
# Common Randomness
U_sh = _shares_of_zero(n, L, dtype, crypto_provider, alice, bob)
r = _random_common_value(L, alice, bob)
# 1)
_, ind_max_sh = maxpool(x_sh)
# 2)
j = sy.MultiPointerTensor(
children=[
torch.tensor([1]).send(alice, **no_wrap),
torch.tensor([0]).send(bob, **no_wrap),
]
)
k_sh = ind_max_sh + j * r
# 3)
t = k_sh.get()
k = t % n
E_k = torch.zeros(n, dtype=torch_dtype)
E_k[k] = 1
E_sh = E_k.share(alice, bob, field=L, dtype=dtype, **no_wrap)
# 4)
g = r % n
D_sh = torch.roll(E_sh, -g)
maxpool_d_sh = D_sh + U_sh
return maxpool_d_sh.view(n1, n2)
|
https://github.com/OpenMined/PySyft/issues/2631
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-7-902fb9b77716> in <module>
12 x = th.tensor([1.1,2.0,3.2,4.0]).fix_precision().share(*workers, crypto_provider=sw, requires_grad=True)
13 m=th.nn.Linear(4, 1).fix_precision().share(*workers, crypto_provider=sw, requires_grad=True)
---> 14 o=m(x)
/usr/local/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
491 result = self._slow_forward(*input, **kwargs)
492 else:
--> 493 result = self.forward(*input, **kwargs)
494 for hook in self._forward_hooks.values():
495 hook_result = hook(self, input, result)
/usr/local/lib/python3.7/site-packages/torch/nn/modules/linear.py in forward(self, input)
90 @weak_script_method
91 def forward(self, input):
---> 92 return F.linear(input, self.weight, self.bias)
93
94 def extra_repr(self):
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/hook/hook.py in overloaded_func(*args, **kwargs)
745 handle_func_command = TorchTensor.handle_func_command
746
--> 747 response = handle_func_command(command)
748
749 return response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
310 new_command = (cmd, None, new_args, new_kwargs)
311 # Send it to the appropriate class and get the response
--> 312 response = new_type.handle_func_command(new_command)
313 # Put back the wrappers where needed
314 response = syft.frameworks.torch.hook_args.hook_response(
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in handle_func_command(cls, command)
236 # Try to get recursively the attributes in cmd = "<attr1>.<attr2>.<attr3>..."
237 cmd = cls.rgetattr(cls, cmd)
--> 238 return cmd(*args, **kwargs)
239 except AttributeError:
240 pass
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in linear(*args)
201 Un-hook the function to have its detailed behaviour
202 """
--> 203 return torch.nn.functional.native_linear(*args)
204
205 module.linear = linear
/usr/local/lib/python3.7/site-packages/torch/nn/functional.py in linear(input, weight, bias)
1406 ret = torch.addmm(bias, input, weight.t())
1407 else:
-> 1408 output = input.matmul(weight.t())
1409 if bias is not None:
1410 output += bias
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in method_with_grad(*args, **kwargs)
138 )
139
--> 140 result = getattr(new_self, name)(*new_args, **new_kwargs)
141
142 # Put back SyftTensor on the tensors found in the response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/precision.py in matmul(self, *args, **kwargs)
414
415 # Send it to the appropriate class and get the response
--> 416 response = getattr(new_self, "matmul")(*new_args, **new_kwargs)
417
418 # Put back SyftTensor on the tensors found in the response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/additive_shared.py in matmul(self, other)
515 return self._public_mul(other, "matmul")
516
--> 517 return self._private_mul(other, "matmul")
518
519 def mm(self, *args, **kwargs):
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/additive_shared.py in _private_mul(self, other, equation)
413 raise AttributeError("For multiplication a crypto_provider must be passed.")
414
--> 415 shares = spdz.spdz_mul(cmd, self, other, self.crypto_provider, self.field)
416
417 return shares
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/crypto/spdz.py in spdz_mul(cmd, x_sh, y_sh, crypto_provider, field)
43 j = sy.MultiPointerTensor(children=[j1, j0])
44 else:
---> 45 j = sy.MultiPointerTensor(children=[j1] + j0.child.values())
46
47 delta_b = cmd(delta, b)
TypeError: can only concatenate list (not "dict_values") to list
|
TypeError
|
def maxpool2d(a_sh, kernel_size: int = 1, stride: int = 1, padding: int = 0):
"""Applies a 2D max pooling over an input signal composed of several input planes.
This interface is similar to torch.nn.MaxPool2D.
Args:
kernel_size: the size of the window to take a max over
stride: the stride of the window
padding: implicit zero padding to be added on both sides
"""
assert a_sh.dtype != "custom", (
"`custom` dtype shares are unsupported in SecureNN, use dtype = `long` or `int` instead"
)
assert len(a_sh.shape) == 4
# Change to tuple if not one
kernel = torch.nn.modules.utils._pair(kernel_size)
stride = torch.nn.modules.utils._pair(stride)
padding = torch.nn.modules.utils._pair(padding)
# TODO: support dilation.
dilation = torch.nn.modules.utils._pair(1)
# Extract a few useful values
batch_size, nb_channels, nb_rows_in, nb_cols_in = a_sh.shape
# Calculate output shapes
nb_rows_out = int(
(nb_rows_in + 2 * padding[0] - dilation[0] * (kernel[0] - 1) - 1) / stride[0]
+ 1
)
nb_cols_out = int(
(nb_cols_in + 2 * padding[1] - dilation[1] * (kernel[1] - 1) - 1) / stride[1]
+ 1
)
# Apply padding to the input
if padding != (0, 0):
a_sh = torch.nn.functional.pad(
a_sh, (padding[1], padding[1], padding[0], padding[0]), "constant"
)
# Update shape after padding
nb_rows_in += 2 * padding[0]
nb_cols_in += 2 * padding[1]
res = []
# TODO: make this operation more efficient in order to be used with cnn modules.
for batch in range(batch_size):
for channel in range(nb_channels):
for r_in in range(0, nb_rows_in - (kernel[0] - 1), stride[0]):
for c_in in range(0, nb_cols_in - (kernel[1] - 1), stride[1]):
m, _ = maxpool(
a_sh[
batch,
channel,
r_in : r_in + kernel[0],
c_in : c_in + kernel[1],
].child
)
res.append(m.wrap())
res = torch.stack(res).reshape(batch_size, nb_channels, nb_rows_out, nb_cols_out)
return res
|
def maxpool2d(a_sh, kernel_size: int = 1, stride: int = 1, padding: int = 0):
"""Applies a 2D max pooling over an input signal composed of several input planes.
This interface is similar to torch.nn.MaxPool2D.
Args:
kernel_size: the size of the window to take a max over
stride: the stride of the window
padding: implicit zero padding to be added on both sides
"""
assert a_sh.dtype != "custom", (
"`custom` dtype shares are unsupported in SecureNN, use dtype = `long` or `int` instead"
)
assert len(a_sh.shape) == 4
# Change to tuple if not one
kernel = torch.nn.modules.utils._pair(kernel_size)
stride = torch.nn.modules.utils._pair(stride)
padding = torch.nn.modules.utils._pair(padding)
# TODO: support dilation.
dilation = torch.nn.modules.utils._pair(1)
# Extract a few useful values
batch_size, nb_channels, nb_rows_in, nb_cols_in = a_sh.shape
# Calculate output shapes
nb_rows_out = int(
(nb_rows_in + 2 * padding[0] - dilation[0] * (kernel[0] - 1) - 1) / stride[0]
+ 1
)
nb_cols_out = int(
(nb_cols_in + 2 * padding[1] - dilation[1] * (kernel[1] - 1) - 1) / stride[1]
+ 1
)
# Apply padding to the input
if padding != (0, 0):
a_sh = torch.nn.functional.pad(
a_sh, (padding[1], padding[1], padding[0], padding[0]), "constant"
)
# Update shape after padding
nb_rows_in += 2 * padding[0]
nb_cols_in += 2 * padding[1]
res = []
# TODO: make this operation more efficient in order to be used with cnn modules.
for batch in range(batch_size):
for channel in range(nb_channels):
for r_in in range(0, nb_rows_in - (kernel[0] - 1), stride[0]):
for c_in in range(0, nb_cols_in - (kernel[1] - 1), stride[1]):
m, _ = maxpool(
a_sh[
batch,
channel,
r_in : r_in + kernel[0],
c_in : c_in + kernel[1],
].child
)
res.append(m.wrap())
res = torch.stack(res).reshape(batch_size, nb_channels, nb_rows_out, nb_cols_out)
return res
|
https://github.com/OpenMined/PySyft/issues/2631
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-7-902fb9b77716> in <module>
12 x = th.tensor([1.1,2.0,3.2,4.0]).fix_precision().share(*workers, crypto_provider=sw, requires_grad=True)
13 m=th.nn.Linear(4, 1).fix_precision().share(*workers, crypto_provider=sw, requires_grad=True)
---> 14 o=m(x)
/usr/local/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
491 result = self._slow_forward(*input, **kwargs)
492 else:
--> 493 result = self.forward(*input, **kwargs)
494 for hook in self._forward_hooks.values():
495 hook_result = hook(self, input, result)
/usr/local/lib/python3.7/site-packages/torch/nn/modules/linear.py in forward(self, input)
90 @weak_script_method
91 def forward(self, input):
---> 92 return F.linear(input, self.weight, self.bias)
93
94 def extra_repr(self):
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/hook/hook.py in overloaded_func(*args, **kwargs)
745 handle_func_command = TorchTensor.handle_func_command
746
--> 747 response = handle_func_command(command)
748
749 return response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
310 new_command = (cmd, None, new_args, new_kwargs)
311 # Send it to the appropriate class and get the response
--> 312 response = new_type.handle_func_command(new_command)
313 # Put back the wrappers where needed
314 response = syft.frameworks.torch.hook_args.hook_response(
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in handle_func_command(cls, command)
236 # Try to get recursively the attributes in cmd = "<attr1>.<attr2>.<attr3>..."
237 cmd = cls.rgetattr(cls, cmd)
--> 238 return cmd(*args, **kwargs)
239 except AttributeError:
240 pass
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in linear(*args)
201 Un-hook the function to have its detailed behaviour
202 """
--> 203 return torch.nn.functional.native_linear(*args)
204
205 module.linear = linear
/usr/local/lib/python3.7/site-packages/torch/nn/functional.py in linear(input, weight, bias)
1406 ret = torch.addmm(bias, input, weight.t())
1407 else:
-> 1408 output = input.matmul(weight.t())
1409 if bias is not None:
1410 output += bias
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/autograd.py in method_with_grad(*args, **kwargs)
138 )
139
--> 140 result = getattr(new_self, name)(*new_args, **new_kwargs)
141
142 # Put back SyftTensor on the tensors found in the response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/precision.py in matmul(self, *args, **kwargs)
414
415 # Send it to the appropriate class and get the response
--> 416 response = getattr(new_self, "matmul")(*new_args, **new_kwargs)
417
418 # Put back SyftTensor on the tensors found in the response
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/additive_shared.py in matmul(self, other)
515 return self._public_mul(other, "matmul")
516
--> 517 return self._private_mul(other, "matmul")
518
519 def mm(self, *args, **kwargs):
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/tensors/interpreters/additive_shared.py in _private_mul(self, other, equation)
413 raise AttributeError("For multiplication a crypto_provider must be passed.")
414
--> 415 shares = spdz.spdz_mul(cmd, self, other, self.crypto_provider, self.field)
416
417 return shares
/usr/local/lib/python3.7/site-packages/syft/frameworks/torch/crypto/spdz.py in spdz_mul(cmd, x_sh, y_sh, crypto_provider, field)
43 j = sy.MultiPointerTensor(children=[j1, j0])
44 else:
---> 45 j = sy.MultiPointerTensor(children=[j1] + j0.child.values())
46
47 delta_b = cmd(delta, b)
TypeError: can only concatenate list (not "dict_values") to list
|
TypeError
|
def create_wrapper(cls, wrapper_type):
# Note this overrides FrameworkHook.create_wrapper, so it must conform to
# that classmethod's signature
if wrapper_type is None or wrapper_type == torch.Tensor:
return torch.Tensor()
elif isinstance(wrapper_type, torch.dtype):
return torch.tensor([], dtype=wrapper_type)
else:
raise ValueError(
"Wrapper type should be None, torch.Tensor, or a torch.dtype like torch.long"
)
|
def create_wrapper(cls, wrapper_type):
# Note this overrides FrameworkHook.create_wrapper, so it must conform to
# that classmethod's signature
assert wrapper_type is None or wrapper_type == torch.Tensor, (
"TorchHook only uses torch.Tensor wrappers"
)
return torch.Tensor()
|
https://github.com/OpenMined/PySyft/issues/2498
|
WARNING: Logging before flag parsing goes to stderr.
W0813 08:46:34.190028 140520788572032 secure_random.py:26] Falling back to insecure randomness since the required custom op could not be found for the installed version of TensorFlow. Fix this by compiling custom ops. Missing file was '/usr/local/lib/python3.6/dist-packages/tf_encrypted/operations/secure_random/secure_random_module_tf_1.14.0.so'
W0813 08:46:34.211576 140520788572032 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/tf_encrypted/session.py:26: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.
0it [00:00, ?it/s]
Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to ../data/cifar-10-python.tar.gz
170500096it [00:04, 40609204.86it/s]
---------------------------------------------------------------------------
PureTorchTensorFoundError Traceback (most recent call last)
/content/PySyft/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
300 new_args, new_kwargs, new_type, args_type = syft.frameworks.torch.hook_args.unwrap_args_from_function(
--> 301 cmd, args, kwargs, return_args_type=True
302 )
16 frames
PureTorchTensorFoundError:
During handling of the above exception, another exception occurred:
RuntimeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in batch_norm(input, running_mean, running_var, weight, bias, training, momentum, eps)
1695 return torch.batch_norm(
1696 input, weight, bias, running_mean, running_var,
-> 1697 training, momentum, eps, torch.backends.cudnn.enabled
1698 )
1699
RuntimeError: running_mean should contain 3 elements not 32
|
PureTorchTensorFoundError
|
def _hook_module(self):
"""Overloading torch.nn.Module with PySyft functionality, the primary module
responsible for core ML functionality such as Neural network layers and
loss functions.
It is important to note that all the operations are actually in-place.
"""
self.element_iter_dict = {}
def register_element_iterator(name, func):
"""register an internal element buffer iterator"""
if name in self.element_iter_dict.keys():
return
self.element_iter_dict[name] = func
def tensor_iterator(nn_self):
"""adding relavant iterators for the tensor elements"""
iterators = [
"parameters",
"buffers",
] # all the element iterators from nn module should be listed here,
return [getattr(nn_self, iter) for iter in iterators]
def module_is_missing_grad(model):
"""Checks if all the parameters in the model have been assigned a gradient"""
for p in model.parameters():
if p.grad is None:
return True
return False
def create_grad_objects(model):
"""Assigns gradient to model parameters if not assigned"""
for p in model.parameters():
if p.requires_grad: # check if the object requires a grad object
o = p.sum()
o.backward()
if p.grad is not None:
p.grad -= p.grad
def module_send_(nn_self, *dest, force_send=False, **kwargs):
"""Overloads torch.nn instances so that they could be sent to other workers"""
if module_is_missing_grad(nn_self):
create_grad_objects(nn_self)
for element_iter in tensor_iterator(nn_self):
for p in element_iter():
p.send_(*dest, **kwargs)
if isinstance(nn_self.forward, Plan):
nn_self.forward.send(*dest, force=force_send)
return nn_self
self.torch.nn.Module.send = module_send_
self.torch.nn.Module.send_ = module_send_
def module_move_(nn_self, destination):
params = list(nn_self.parameters())
for p in params:
p.move_(destination)
self.torch.nn.Module.move = module_move_
# def module_end_get_(nn_self):
# """Overloads send to remote for torch.nn.Module."""
# if module_is_missing_grad(nn_self):
# create_grad_objects(nn_self)
#
# for p in nn_self.parameters():
# p.end_get()
#
# return nn_self
#
# self.torch.nn.Module.end_get = module_end_get_
#
# def module_move_(nn_self, dest):
# return nn_self.send(dest).end_get()
#
# self.torch.nn.Module.move = module_move_
def module_get_(nn_self):
"""overloads torch.nn instances with get method so that parameters could be sent back to owner"""
for element_iter in tensor_iterator(nn_self):
for p in element_iter():
p.get_()
if isinstance(nn_self.forward, Plan):
nn_self.forward.get()
return nn_self
self.torch.nn.Module.get_ = module_get_
self.torch.nn.Module.get = module_get_
def module_share_(nn_self, *args, **kwargs):
"""Overloads fix_precision for torch.nn.Module."""
if module_is_missing_grad(nn_self):
create_grad_objects(nn_self)
for element_iter in tensor_iterator(nn_self):
for p in element_iter():
p.share_(*args, **kwargs)
return nn_self
self.torch.nn.Module.share_ = module_share_
self.torch.nn.Module.share = module_share_
def module_fix_precision_(nn_self, *args, **kwargs):
"""Overloads fix_precision for torch.nn.Module."""
if module_is_missing_grad(nn_self):
create_grad_objects(nn_self)
for element_iter in tensor_iterator(nn_self):
for p in element_iter():
p.fix_precision_(*args, **kwargs)
return nn_self
self.torch.nn.Module.fix_precision_ = module_fix_precision_
self.torch.nn.Module.fix_precision = module_fix_precision_
self.torch.nn.Module.fix_prec = module_fix_precision_
def module_float_precision_(nn_self):
"""Overloads float_precision for torch.nn.Module, convert fix_precision
parameters to normal float parameters"""
# TODO: add .data and .grad to syft tensors
# if module_is_missing_grad(nn_self):
# create_grad_objects(nn_self)
for element_iter in tensor_iterator(nn_self):
for p in element_iter():
p.float_precision_()
return nn_self
self.torch.nn.Module.float_precision_ = module_float_precision_
self.torch.nn.Module.float_precision = module_float_precision_
self.torch.nn.Module.float_prec = module_float_precision_
def module_copy(nn_self):
"""Returns a copy of a torch.nn.Module"""
return copy.deepcopy(nn_self)
self.torch.nn.Module.copy = module_copy
@property
def owner(nn_self):
for p in nn_self.parameters():
return p.owner
self.torch.nn.Module.owner = owner
@property
def location(nn_self):
try:
for p in nn_self.parameters():
return p.location
except AttributeError:
raise AttributeError(
"Module has no attribute location, did you already send it to some location?"
)
self.torch.nn.Module.location = location
# Make sure PySyft uses the PyTorch version
self.torch.nn.modules.rnn._rnn_impls["LSTM"] = self.torch.lstm
# Add support for GRUs
self.torch.nn.modules.rnn._rnn_impls["GRU"] = self.torch.gru
# Override _VF.LSTM_Cell and _VF.GRU_Cell with torch.LSTM_Cell and torch.GRU_Cell
# With the pytorch-based version
self.torch.nn.modules.rnn._VF = self.torch
|
def _hook_module(self):
"""Overloading torch.nn.Module with PySyft functionality, the primary module
responsible for core ML functionality such as Neural network layers and
loss functions.
It is important to note that all the operations are actually in-place.
"""
self.element_iter_dict = {}
def register_element_iterator(name, func):
"""register an internal element buffer iterator"""
if name in self.element_iter_dict.keys():
return
self.element_iter_dict[name] = func
def tensor_iterator(nn_self):
"""adding relavant iterators for the tensor elements"""
iterators = [
"parameters",
"buffers",
] # all the element iterators from nn module should be listed here,
return [getattr(nn_self, iter) for iter in iterators]
def module_is_missing_grad(model):
"""Checks if all the parameters in the model have been assigned a gradient"""
for p in model.parameters():
if p.grad is None:
return True
return False
def create_grad_objects(model):
"""Assigns gradient to model parameters if not assigned"""
for p in model.parameters():
if p.requires_grad: # check if the object requires a grad object
o = p.sum()
o.backward()
if p.grad is not None:
p.grad -= p.grad
def module_send_(nn_self, *dest, force_send=False, **kwargs):
"""Overloads torch.nn instances so that they could be sent to other workers"""
if module_is_missing_grad(nn_self):
create_grad_objects(nn_self)
for element_iter in tensor_iterator(nn_self):
for p in element_iter():
p.send_(*dest, **kwargs)
if isinstance(nn_self.forward, Plan):
nn_self.forward.send(*dest, force=force_send)
return nn_self
self.torch.nn.Module.send = module_send_
self.torch.nn.Module.send_ = module_send_
def module_move_(nn_self, destination):
params = list(nn_self.parameters())
for p in params:
p.move_(destination)
self.torch.nn.Module.move = module_move_
# def module_end_get_(nn_self):
# """Overloads send to remote for torch.nn.Module."""
# if module_is_missing_grad(nn_self):
# create_grad_objects(nn_self)
#
# for p in nn_self.parameters():
# p.end_get()
#
# return nn_self
#
# self.torch.nn.Module.end_get = module_end_get_
#
# def module_move_(nn_self, dest):
# return nn_self.send(dest).end_get()
#
# self.torch.nn.Module.move = module_move_
def module_get_(nn_self):
"""overloads torch.nn instances with get method so that parameters could be sent back to owner"""
for p in nn_self.parameters():
p.get_()
if isinstance(nn_self.forward, Plan):
nn_self.forward.get()
return nn_self
self.torch.nn.Module.get_ = module_get_
self.torch.nn.Module.get = module_get_
def module_share_(nn_self, *args, **kwargs):
"""Overloads fix_precision for torch.nn.Module."""
# TODO: add .data and .grad to syft tensors
if module_is_missing_grad(nn_self):
create_grad_objects(nn_self)
for p in nn_self.parameters():
p.share_(*args, **kwargs)
return nn_self
self.torch.nn.Module.share_ = module_share_
self.torch.nn.Module.share = module_share_
def module_fix_precision_(nn_self, *args, **kwargs):
"""Overloads fix_precision for torch.nn.Module."""
if module_is_missing_grad(nn_self):
create_grad_objects(nn_self)
for p in nn_self.parameters():
p.fix_precision_(*args, **kwargs)
return nn_self
self.torch.nn.Module.fix_precision_ = module_fix_precision_
self.torch.nn.Module.fix_precision = module_fix_precision_
self.torch.nn.Module.fix_prec = module_fix_precision_
def module_float_precision_(nn_self):
"""Overloads float_precision for torch.nn.Module, convert fix_precision
parameters to normal float parameters"""
# TODO: add .data and .grad to syft tensors
# if module_is_missing_grad(nn_self):
# create_grad_objects(nn_self)
for p in nn_self.parameters():
p.float_precision_()
return nn_self
self.torch.nn.Module.float_precision_ = module_float_precision_
self.torch.nn.Module.float_precision = module_float_precision_
self.torch.nn.Module.float_prec = module_float_precision_
def module_copy(nn_self):
"""Returns a copy of a torch.nn.Module"""
return copy.deepcopy(nn_self)
self.torch.nn.Module.copy = module_copy
@property
def owner(nn_self):
for p in nn_self.parameters():
return p.owner
self.torch.nn.Module.owner = owner
@property
def location(nn_self):
try:
for p in nn_self.parameters():
return p.location
except AttributeError:
raise AttributeError(
"Module has no attribute location, did you already send it to some location?"
)
self.torch.nn.Module.location = location
# Make sure PySyft uses the PyTorch version
self.torch.nn.modules.rnn._rnn_impls["LSTM"] = self.torch.lstm
# Add support for GRUs
self.torch.nn.modules.rnn._rnn_impls["GRU"] = self.torch.gru
# Override _VF.LSTM_Cell and _VF.GRU_Cell with torch.LSTM_Cell and torch.GRU_Cell
# With the pytorch-based version
self.torch.nn.modules.rnn._VF = self.torch
|
https://github.com/OpenMined/PySyft/issues/2498
|
WARNING: Logging before flag parsing goes to stderr.
W0813 08:46:34.190028 140520788572032 secure_random.py:26] Falling back to insecure randomness since the required custom op could not be found for the installed version of TensorFlow. Fix this by compiling custom ops. Missing file was '/usr/local/lib/python3.6/dist-packages/tf_encrypted/operations/secure_random/secure_random_module_tf_1.14.0.so'
W0813 08:46:34.211576 140520788572032 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/tf_encrypted/session.py:26: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.
0it [00:00, ?it/s]
Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to ../data/cifar-10-python.tar.gz
170500096it [00:04, 40609204.86it/s]
---------------------------------------------------------------------------
PureTorchTensorFoundError Traceback (most recent call last)
/content/PySyft/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
300 new_args, new_kwargs, new_type, args_type = syft.frameworks.torch.hook_args.unwrap_args_from_function(
--> 301 cmd, args, kwargs, return_args_type=True
302 )
16 frames
PureTorchTensorFoundError:
During handling of the above exception, another exception occurred:
RuntimeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in batch_norm(input, running_mean, running_var, weight, bias, training, momentum, eps)
1695 return torch.batch_norm(
1696 input, weight, bias, running_mean, running_var,
-> 1697 training, momentum, eps, torch.backends.cudnn.enabled
1698 )
1699
RuntimeError: running_mean should contain 3 elements not 32
|
PureTorchTensorFoundError
|
def module_get_(nn_self):
"""overloads torch.nn instances with get method so that parameters could be sent back to owner"""
for element_iter in tensor_iterator(nn_self):
for p in element_iter():
p.get_()
if isinstance(nn_self.forward, Plan):
nn_self.forward.get()
return nn_self
|
def module_get_(nn_self):
"""overloads torch.nn instances with get method so that parameters could be sent back to owner"""
for p in nn_self.parameters():
p.get_()
if isinstance(nn_self.forward, Plan):
nn_self.forward.get()
return nn_self
|
https://github.com/OpenMined/PySyft/issues/2498
|
WARNING: Logging before flag parsing goes to stderr.
W0813 08:46:34.190028 140520788572032 secure_random.py:26] Falling back to insecure randomness since the required custom op could not be found for the installed version of TensorFlow. Fix this by compiling custom ops. Missing file was '/usr/local/lib/python3.6/dist-packages/tf_encrypted/operations/secure_random/secure_random_module_tf_1.14.0.so'
W0813 08:46:34.211576 140520788572032 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/tf_encrypted/session.py:26: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.
0it [00:00, ?it/s]
Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to ../data/cifar-10-python.tar.gz
170500096it [00:04, 40609204.86it/s]
---------------------------------------------------------------------------
PureTorchTensorFoundError Traceback (most recent call last)
/content/PySyft/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
300 new_args, new_kwargs, new_type, args_type = syft.frameworks.torch.hook_args.unwrap_args_from_function(
--> 301 cmd, args, kwargs, return_args_type=True
302 )
16 frames
PureTorchTensorFoundError:
During handling of the above exception, another exception occurred:
RuntimeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in batch_norm(input, running_mean, running_var, weight, bias, training, momentum, eps)
1695 return torch.batch_norm(
1696 input, weight, bias, running_mean, running_var,
-> 1697 training, momentum, eps, torch.backends.cudnn.enabled
1698 )
1699
RuntimeError: running_mean should contain 3 elements not 32
|
PureTorchTensorFoundError
|
def module_share_(nn_self, *args, **kwargs):
"""Overloads fix_precision for torch.nn.Module."""
if module_is_missing_grad(nn_self):
create_grad_objects(nn_self)
for element_iter in tensor_iterator(nn_self):
for p in element_iter():
p.share_(*args, **kwargs)
return nn_self
|
def module_share_(nn_self, *args, **kwargs):
"""Overloads fix_precision for torch.nn.Module."""
# TODO: add .data and .grad to syft tensors
if module_is_missing_grad(nn_self):
create_grad_objects(nn_self)
for p in nn_self.parameters():
p.share_(*args, **kwargs)
return nn_self
|
https://github.com/OpenMined/PySyft/issues/2498
|
WARNING: Logging before flag parsing goes to stderr.
W0813 08:46:34.190028 140520788572032 secure_random.py:26] Falling back to insecure randomness since the required custom op could not be found for the installed version of TensorFlow. Fix this by compiling custom ops. Missing file was '/usr/local/lib/python3.6/dist-packages/tf_encrypted/operations/secure_random/secure_random_module_tf_1.14.0.so'
W0813 08:46:34.211576 140520788572032 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/tf_encrypted/session.py:26: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.
0it [00:00, ?it/s]
Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to ../data/cifar-10-python.tar.gz
170500096it [00:04, 40609204.86it/s]
---------------------------------------------------------------------------
PureTorchTensorFoundError Traceback (most recent call last)
/content/PySyft/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
300 new_args, new_kwargs, new_type, args_type = syft.frameworks.torch.hook_args.unwrap_args_from_function(
--> 301 cmd, args, kwargs, return_args_type=True
302 )
16 frames
PureTorchTensorFoundError:
During handling of the above exception, another exception occurred:
RuntimeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in batch_norm(input, running_mean, running_var, weight, bias, training, momentum, eps)
1695 return torch.batch_norm(
1696 input, weight, bias, running_mean, running_var,
-> 1697 training, momentum, eps, torch.backends.cudnn.enabled
1698 )
1699
RuntimeError: running_mean should contain 3 elements not 32
|
PureTorchTensorFoundError
|
def module_fix_precision_(nn_self, *args, **kwargs):
"""Overloads fix_precision for torch.nn.Module."""
if module_is_missing_grad(nn_self):
create_grad_objects(nn_self)
for element_iter in tensor_iterator(nn_self):
for p in element_iter():
p.fix_precision_(*args, **kwargs)
return nn_self
|
def module_fix_precision_(nn_self, *args, **kwargs):
"""Overloads fix_precision for torch.nn.Module."""
if module_is_missing_grad(nn_self):
create_grad_objects(nn_self)
for p in nn_self.parameters():
p.fix_precision_(*args, **kwargs)
return nn_self
|
https://github.com/OpenMined/PySyft/issues/2498
|
WARNING: Logging before flag parsing goes to stderr.
W0813 08:46:34.190028 140520788572032 secure_random.py:26] Falling back to insecure randomness since the required custom op could not be found for the installed version of TensorFlow. Fix this by compiling custom ops. Missing file was '/usr/local/lib/python3.6/dist-packages/tf_encrypted/operations/secure_random/secure_random_module_tf_1.14.0.so'
W0813 08:46:34.211576 140520788572032 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/tf_encrypted/session.py:26: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.
0it [00:00, ?it/s]
Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to ../data/cifar-10-python.tar.gz
170500096it [00:04, 40609204.86it/s]
---------------------------------------------------------------------------
PureTorchTensorFoundError Traceback (most recent call last)
/content/PySyft/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
300 new_args, new_kwargs, new_type, args_type = syft.frameworks.torch.hook_args.unwrap_args_from_function(
--> 301 cmd, args, kwargs, return_args_type=True
302 )
16 frames
PureTorchTensorFoundError:
During handling of the above exception, another exception occurred:
RuntimeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in batch_norm(input, running_mean, running_var, weight, bias, training, momentum, eps)
1695 return torch.batch_norm(
1696 input, weight, bias, running_mean, running_var,
-> 1697 training, momentum, eps, torch.backends.cudnn.enabled
1698 )
1699
RuntimeError: running_mean should contain 3 elements not 32
|
PureTorchTensorFoundError
|
def module_float_precision_(nn_self):
"""Overloads float_precision for torch.nn.Module, convert fix_precision
parameters to normal float parameters"""
# TODO: add .data and .grad to syft tensors
# if module_is_missing_grad(nn_self):
# create_grad_objects(nn_self)
for element_iter in tensor_iterator(nn_self):
for p in element_iter():
p.float_precision_()
return nn_self
|
def module_float_precision_(nn_self):
"""Overloads float_precision for torch.nn.Module, convert fix_precision
parameters to normal float parameters"""
# TODO: add .data and .grad to syft tensors
# if module_is_missing_grad(nn_self):
# create_grad_objects(nn_self)
for p in nn_self.parameters():
p.float_precision_()
return nn_self
|
https://github.com/OpenMined/PySyft/issues/2498
|
WARNING: Logging before flag parsing goes to stderr.
W0813 08:46:34.190028 140520788572032 secure_random.py:26] Falling back to insecure randomness since the required custom op could not be found for the installed version of TensorFlow. Fix this by compiling custom ops. Missing file was '/usr/local/lib/python3.6/dist-packages/tf_encrypted/operations/secure_random/secure_random_module_tf_1.14.0.so'
W0813 08:46:34.211576 140520788572032 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/tf_encrypted/session.py:26: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.
0it [00:00, ?it/s]
Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to ../data/cifar-10-python.tar.gz
170500096it [00:04, 40609204.86it/s]
---------------------------------------------------------------------------
PureTorchTensorFoundError Traceback (most recent call last)
/content/PySyft/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
300 new_args, new_kwargs, new_type, args_type = syft.frameworks.torch.hook_args.unwrap_args_from_function(
--> 301 cmd, args, kwargs, return_args_type=True
302 )
16 frames
PureTorchTensorFoundError:
During handling of the above exception, another exception occurred:
RuntimeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in batch_norm(input, running_mean, running_var, weight, bias, training, momentum, eps)
1695 return torch.batch_norm(
1696 input, weight, bias, running_mean, running_var,
-> 1697 training, momentum, eps, torch.backends.cudnn.enabled
1698 )
1699
RuntimeError: running_mean should contain 3 elements not 32
|
PureTorchTensorFoundError
|
def create_pointer(
self,
location: BaseWorker = None,
id_at_location: (str or int) = None,
owner: BaseWorker = None,
ptr_id: (str or int) = None,
garbage_collect_data: bool = True,
shape=None,
**kwargs,
) -> PointerTensor:
"""Creates a pointer to the "self" torch.Tensor object.
Returns:
A PointerTensor pointer to self. Note that this
object will likely be wrapped by a torch.Tensor wrapper.
"""
if id_at_location is None:
id_at_location = self.id
if ptr_id is None:
if location is not None and location.id != self.owner.id:
ptr_id = self.id
else:
ptr_id = syft.ID_PROVIDER.pop()
if shape is None:
shape = self.shape
ptr = syft.PointerTensor.create_pointer(
self, location, id_at_location, owner, ptr_id, garbage_collect_data, shape
)
return ptr
|
def create_pointer(
self,
location: BaseWorker = None,
id_at_location: (str or int) = None,
register: bool = False,
owner: BaseWorker = None,
ptr_id: (str or int) = None,
garbage_collect_data: bool = True,
shape=None,
**kwargs,
) -> PointerTensor:
"""Creates a pointer to the "self" torch.Tensor object.
Returns:
A PointerTensor pointer to self. Note that this
object will likely be wrapped by a torch.Tensor wrapper.
"""
if id_at_location is None:
id_at_location = self.id
if ptr_id is None:
if location is not None and location.id != self.owner.id:
ptr_id = self.id
else:
ptr_id = syft.ID_PROVIDER.pop()
if shape is None:
shape = self.shape
ptr = syft.PointerTensor.create_pointer(
self,
location,
id_at_location,
register,
owner,
ptr_id,
garbage_collect_data,
shape,
)
return ptr
|
https://github.com/OpenMined/PySyft/issues/2498
|
WARNING: Logging before flag parsing goes to stderr.
W0813 08:46:34.190028 140520788572032 secure_random.py:26] Falling back to insecure randomness since the required custom op could not be found for the installed version of TensorFlow. Fix this by compiling custom ops. Missing file was '/usr/local/lib/python3.6/dist-packages/tf_encrypted/operations/secure_random/secure_random_module_tf_1.14.0.so'
W0813 08:46:34.211576 140520788572032 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/tf_encrypted/session.py:26: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.
0it [00:00, ?it/s]
Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to ../data/cifar-10-python.tar.gz
170500096it [00:04, 40609204.86it/s]
---------------------------------------------------------------------------
PureTorchTensorFoundError Traceback (most recent call last)
/content/PySyft/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
300 new_args, new_kwargs, new_type, args_type = syft.frameworks.torch.hook_args.unwrap_args_from_function(
--> 301 cmd, args, kwargs, return_args_type=True
302 )
16 frames
PureTorchTensorFoundError:
During handling of the above exception, another exception occurred:
RuntimeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in batch_norm(input, running_mean, running_var, weight, bias, training, momentum, eps)
1695 return torch.batch_norm(
1696 input, weight, bias, running_mean, running_var,
-> 1697 training, momentum, eps, torch.backends.cudnn.enabled
1698 )
1699
RuntimeError: running_mean should contain 3 elements not 32
|
PureTorchTensorFoundError
|
def _get_hooked_method(cls, tensor_type, method_name):
"""
Hook a method in order to replace all args/kwargs syft/torch tensors with
their child attribute if they exist
If so, forward this method with the new args and new self, get response
and "rebuild" the torch tensor wrapper upon all tensors found
If not, just execute the native torch methodn
Args:
attr (str): the method to hook
Return:
the hooked method
"""
@wraps(getattr(tensor_type, method_name))
def overloaded_native_method(self, *args, **kwargs):
"""
Operate the hooking
"""
if not hasattr(self, "child"): # means that it's not a wrapper
# if self is a natural tensor but the first argument isn't,
# wrap self with the appropriate type and re-run
if len(args) > 0 and hasattr(args[0], "child"):
# if we allow this for PointerTensors it opens the potential
# that we could accidentally serialize and send a tensor in the
# arguments
if not isinstance(args[0].child, PointerTensor):
self = type(args[0].child)().on(self, wrap=True)
args = [args[0]]
return overloaded_native_method(self, *args, **kwargs)
method = getattr(self, f"native_{method_name}")
# Run the native function with the new args
try:
response = method(*args, **kwargs)
except BaseException as e:
# we can make some errors more descriptive with this method
raise route_method_exception(e, self, args, kwargs)
else: # means that there is a wrapper to remove
try:
# Replace all torch tensor with their child attribute
new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
method_name, self, args, kwargs
)
except BaseException as e: # if there's a type mismatch, try to fix it!
try:
# if the first argument has no child (meaning it's probably raw data),
# try wrapping it with the type of self. We have to except PointerTensor
# because otherwise it can lead to inadvertently sending data to another
# machine
if not hasattr(args[0], "child") and not isinstance(
self.child, PointerTensor
):
# TODO: add check to make sure this isn't getting around a security class
_args = list()
_args.append(type(self)().on(args[0], wrap=False))
for a in args[1:]:
_args.append(a)
args = _args
# Replace all torch tensor with their child attribute
new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
method_name, self, args, kwargs
)
except BaseException as e:
# we can make some errors more descriptive with this method
raise route_method_exception(e, self, args, kwargs)
# Send the new command to the appropriate class and get the response
method = getattr(new_self, method_name)
response = method(*new_args, **new_kwargs)
# For inplace methods, just directly return self
if syft.framework.is_inplace_method(method_name):
return self
# Put back the wrappers where needed
response = hook_args.hook_response(
method_name,
response,
wrap_type=type(self),
new_self=self,
wrap_args=self.get_class_attributes(),
)
return response
return overloaded_native_method
|
def _get_hooked_method(cls, tensor_type, method_name):
"""
Hook a method in order to replace all args/kwargs syft/torch tensors with
their child attribute if they exist
If so, forward this method with the new args and new self, get response
and "rebuild" the torch tensor wrapper upon all tensors found
If not, just execute the native torch methodn
Args:
attr (str): the method to hook
Return:
the hooked method
"""
@wraps(getattr(tensor_type, method_name))
def overloaded_native_method(self, *args, **kwargs):
"""
Operate the hooking
"""
if not hasattr(self, "child"): # means that it's not a wrapper
# if self is a natural tensor but the first argument isn't,
# wrap self with the appropriate type and re-run
if len(args) > 0 and hasattr(args[0], "child"):
# if we allow this for PointerTensors it opens the potential
# that we could accidentally serialize and send a tensor in the
# arguments
if not isinstance(args[0].child, PointerTensor):
self = type(args[0].child)().on(self, wrap=True)
args = [args[0]]
return overloaded_native_method(self, *args, **kwargs)
method = getattr(self, f"native_{method_name}")
# Run the native function with the new args
try:
response = method(*args, **kwargs)
except BaseException as e:
# we can make some errors more descriptive with this method
raise route_method_exception(e, self, args, kwargs)
else: # means that there is a wrapper to remove
try:
# Replace all torch tensor with their child attribute
new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
method_name, self, args, kwargs
)
except BaseException as e: # if there's a type mismatch, try to fix it!
try:
# if the first argument has no child (meaning it's probably raw data),
# try wrapping it with the type of self. We have to except PointerTensor
# because otherwise it can lead to inadvertently sending data to another
# machine
if not hasattr(args[0], "child") and not isinstance(
self.child, PointerTensor
):
# TODO: add check to make sure this isn't getting around a security class
_args = list()
_args.append(type(self)().on(args[0], wrap=False))
for a in args[1:]:
_args.append(a)
args = _args
# Replace all torch tensor with their child attribute
new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
method_name, self, args, kwargs
)
except BaseException as e:
# we can make some errors more descriptive with this method
raise route_method_exception(e, self, args, kwargs)
# Send the new command to the appropriate class and get the response
method = getattr(new_self, method_name)
response = method(*new_args, **new_kwargs)
# For inplace methods, just directly return self
if syft.framework.is_inplace_method(method_name):
return self
# if object is a pointer of pointer, set register to False
if isinstance(self.child, PointerTensor):
wrap_args = {"register": False}
else:
wrap_args = {}
# Put back the wrappers where needed
response = hook_args.hook_response(
method_name,
response,
wrap_type=type(self),
new_self=self,
wrap_args=wrap_args,
)
return response
return overloaded_native_method
|
https://github.com/OpenMined/PySyft/issues/2498
|
WARNING: Logging before flag parsing goes to stderr.
W0813 08:46:34.190028 140520788572032 secure_random.py:26] Falling back to insecure randomness since the required custom op could not be found for the installed version of TensorFlow. Fix this by compiling custom ops. Missing file was '/usr/local/lib/python3.6/dist-packages/tf_encrypted/operations/secure_random/secure_random_module_tf_1.14.0.so'
W0813 08:46:34.211576 140520788572032 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/tf_encrypted/session.py:26: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.
0it [00:00, ?it/s]
Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to ../data/cifar-10-python.tar.gz
170500096it [00:04, 40609204.86it/s]
---------------------------------------------------------------------------
PureTorchTensorFoundError Traceback (most recent call last)
/content/PySyft/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
300 new_args, new_kwargs, new_type, args_type = syft.frameworks.torch.hook_args.unwrap_args_from_function(
--> 301 cmd, args, kwargs, return_args_type=True
302 )
16 frames
PureTorchTensorFoundError:
During handling of the above exception, another exception occurred:
RuntimeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in batch_norm(input, running_mean, running_var, weight, bias, training, momentum, eps)
1695 return torch.batch_norm(
1696 input, weight, bias, running_mean, running_var,
-> 1697 training, momentum, eps, torch.backends.cudnn.enabled
1698 )
1699
RuntimeError: running_mean should contain 3 elements not 32
|
PureTorchTensorFoundError
|
def overloaded_native_method(self, *args, **kwargs):
"""
Operate the hooking
"""
if not hasattr(self, "child"): # means that it's not a wrapper
# if self is a natural tensor but the first argument isn't,
# wrap self with the appropriate type and re-run
if len(args) > 0 and hasattr(args[0], "child"):
# if we allow this for PointerTensors it opens the potential
# that we could accidentally serialize and send a tensor in the
# arguments
if not isinstance(args[0].child, PointerTensor):
self = type(args[0].child)().on(self, wrap=True)
args = [args[0]]
return overloaded_native_method(self, *args, **kwargs)
method = getattr(self, f"native_{method_name}")
# Run the native function with the new args
try:
response = method(*args, **kwargs)
except BaseException as e:
# we can make some errors more descriptive with this method
raise route_method_exception(e, self, args, kwargs)
else: # means that there is a wrapper to remove
try:
# Replace all torch tensor with their child attribute
new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
method_name, self, args, kwargs
)
except BaseException as e: # if there's a type mismatch, try to fix it!
try:
# if the first argument has no child (meaning it's probably raw data),
# try wrapping it with the type of self. We have to except PointerTensor
# because otherwise it can lead to inadvertently sending data to another
# machine
if not hasattr(args[0], "child") and not isinstance(
self.child, PointerTensor
):
# TODO: add check to make sure this isn't getting around a security class
_args = list()
_args.append(type(self)().on(args[0], wrap=False))
for a in args[1:]:
_args.append(a)
args = _args
# Replace all torch tensor with their child attribute
new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
method_name, self, args, kwargs
)
except BaseException as e:
# we can make some errors more descriptive with this method
raise route_method_exception(e, self, args, kwargs)
# Send the new command to the appropriate class and get the response
method = getattr(new_self, method_name)
response = method(*new_args, **new_kwargs)
# For inplace methods, just directly return self
if syft.framework.is_inplace_method(method_name):
return self
# Put back the wrappers where needed
response = hook_args.hook_response(
method_name,
response,
wrap_type=type(self),
new_self=self,
wrap_args=self.get_class_attributes(),
)
return response
|
def overloaded_native_method(self, *args, **kwargs):
"""
Operate the hooking
"""
if not hasattr(self, "child"): # means that it's not a wrapper
# if self is a natural tensor but the first argument isn't,
# wrap self with the appropriate type and re-run
if len(args) > 0 and hasattr(args[0], "child"):
# if we allow this for PointerTensors it opens the potential
# that we could accidentally serialize and send a tensor in the
# arguments
if not isinstance(args[0].child, PointerTensor):
self = type(args[0].child)().on(self, wrap=True)
args = [args[0]]
return overloaded_native_method(self, *args, **kwargs)
method = getattr(self, f"native_{method_name}")
# Run the native function with the new args
try:
response = method(*args, **kwargs)
except BaseException as e:
# we can make some errors more descriptive with this method
raise route_method_exception(e, self, args, kwargs)
else: # means that there is a wrapper to remove
try:
# Replace all torch tensor with their child attribute
new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
method_name, self, args, kwargs
)
except BaseException as e: # if there's a type mismatch, try to fix it!
try:
# if the first argument has no child (meaning it's probably raw data),
# try wrapping it with the type of self. We have to except PointerTensor
# because otherwise it can lead to inadvertently sending data to another
# machine
if not hasattr(args[0], "child") and not isinstance(
self.child, PointerTensor
):
# TODO: add check to make sure this isn't getting around a security class
_args = list()
_args.append(type(self)().on(args[0], wrap=False))
for a in args[1:]:
_args.append(a)
args = _args
# Replace all torch tensor with their child attribute
new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(
method_name, self, args, kwargs
)
except BaseException as e:
# we can make some errors more descriptive with this method
raise route_method_exception(e, self, args, kwargs)
# Send the new command to the appropriate class and get the response
method = getattr(new_self, method_name)
response = method(*new_args, **new_kwargs)
# For inplace methods, just directly return self
if syft.framework.is_inplace_method(method_name):
return self
# if object is a pointer of pointer, set register to False
if isinstance(self.child, PointerTensor):
wrap_args = {"register": False}
else:
wrap_args = {}
# Put back the wrappers where needed
response = hook_args.hook_response(
method_name,
response,
wrap_type=type(self),
new_self=self,
wrap_args=wrap_args,
)
return response
|
https://github.com/OpenMined/PySyft/issues/2498
|
WARNING: Logging before flag parsing goes to stderr.
W0813 08:46:34.190028 140520788572032 secure_random.py:26] Falling back to insecure randomness since the required custom op could not be found for the installed version of TensorFlow. Fix this by compiling custom ops. Missing file was '/usr/local/lib/python3.6/dist-packages/tf_encrypted/operations/secure_random/secure_random_module_tf_1.14.0.so'
W0813 08:46:34.211576 140520788572032 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/tf_encrypted/session.py:26: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.
0it [00:00, ?it/s]
Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to ../data/cifar-10-python.tar.gz
170500096it [00:04, 40609204.86it/s]
---------------------------------------------------------------------------
PureTorchTensorFoundError Traceback (most recent call last)
/content/PySyft/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
300 new_args, new_kwargs, new_type, args_type = syft.frameworks.torch.hook_args.unwrap_args_from_function(
--> 301 cmd, args, kwargs, return_args_type=True
302 )
16 frames
PureTorchTensorFoundError:
During handling of the above exception, another exception occurred:
RuntimeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in batch_norm(input, running_mean, running_var, weight, bias, training, momentum, eps)
1695 return torch.batch_norm(
1696 input, weight, bias, running_mean, running_var,
-> 1697 training, momentum, eps, torch.backends.cudnn.enabled
1698 )
1699
RuntimeError: running_mean should contain 3 elements not 32
|
PureTorchTensorFoundError
|
def create_pointer(
tensor,
location: Union[AbstractWorker, str] = None,
id_at_location: (str or int) = None,
owner: Union[AbstractWorker, str] = None,
ptr_id: (str or int) = None,
garbage_collect_data=None,
shape=None,
) -> "PointerTensor":
"""Creates a pointer to the "self" FrameworkTensor object.
This method is called on a FrameworkTensor object, returning a pointer
to that object. This method is the CORRECT way to create a pointer,
and the parameters of this method give all possible attributes that
a pointer can be created with.
Args:
location: The AbstractWorker object which points to the worker on which
this pointer's object can be found. In nearly all cases, this
is self.owner and so this attribute can usually be left blank.
Very rarely you may know that you are about to move the Tensor
to another worker so you can pre-initialize the location
attribute of the pointer to some other worker, but this is a
rare exception.
id_at_location: A string or integer id of the tensor being pointed
to. Similar to location, this parameter is almost always
self.id and so you can leave this parameter to None. The only
exception is if you happen to know that the ID is going to be
something different than self.id, but again this is very rare
and most of the time, setting this means that you are probably
doing something you shouldn't.
owner: A AbstractWorker parameter to specify the worker on which the
pointer is located. It is also where the pointer is registered
if register is set to True.
ptr_id: A string or integer parameter to specify the id of the pointer
in case you wish to set it manually for any special reason.
Otherwise, it will be set randomly.
garbage_collect_data: If true (default), delete the remote tensor when the
pointer is deleted.
Returns:
A FrameworkTensor[PointerTensor] pointer to self. Note that this
object itself will likely be wrapped by a FrameworkTensor wrapper.
"""
if owner is None:
owner = tensor.owner
if location is None:
location = tensor.owner
owner = tensor.owner.get_worker(owner)
location = tensor.owner.get_worker(location)
# previous_pointer = owner.get_pointer_to(location, id_at_location)
previous_pointer = None
if previous_pointer is None:
ptr = PointerTensor(
location=location,
id_at_location=id_at_location,
owner=owner,
id=ptr_id,
garbage_collect_data=True
if garbage_collect_data is None
else garbage_collect_data,
shape=shape,
tags=tensor.tags,
description=tensor.description,
)
return ptr
|
def create_pointer(
tensor,
location: Union[AbstractWorker, str] = None,
id_at_location: (str or int) = None,
register: bool = False,
owner: Union[AbstractWorker, str] = None,
ptr_id: (str or int) = None,
garbage_collect_data=None,
shape=None,
) -> "PointerTensor":
"""Creates a pointer to the "self" FrameworkTensor object.
This method is called on a FrameworkTensor object, returning a pointer
to that object. This method is the CORRECT way to create a pointer,
and the parameters of this method give all possible attributes that
a pointer can be created with.
Args:
location: The AbstractWorker object which points to the worker on which
this pointer's object can be found. In nearly all cases, this
is self.owner and so this attribute can usually be left blank.
Very rarely you may know that you are about to move the Tensor
to another worker so you can pre-initialize the location
attribute of the pointer to some other worker, but this is a
rare exception.
id_at_location: A string or integer id of the tensor being pointed
to. Similar to location, this parameter is almost always
self.id and so you can leave this parameter to None. The only
exception is if you happen to know that the ID is going to be
something different than self.id, but again this is very rare
and most of the time, setting this means that you are probably
doing something you shouldn't.
register: A boolean parameter (default False) that determines
whether to register the new pointer that gets created. This is
set to false by default because most of the time a pointer is
initialized in this way so that it can be sent to someone else
(i.e., "Oh you need to point to my tensor? let me create a
pointer and send it to you" ). Thus, when a pointer gets
created, we want to skip being registered on the local worker
because the pointer is about to be sent elsewhere. However, if
you are initializing a pointer you intend to keep, then it is
probably a good idea to register it, especially if there is any
chance that someone else will initialize a pointer to your
pointer.
owner: A AbstractWorker parameter to specify the worker on which the
pointer is located. It is also where the pointer is registered
if register is set to True.
ptr_id: A string or integer parameter to specify the id of the pointer
in case you wish to set it manually for any special reason.
Otherwise, it will be set randomly.
garbage_collect_data: If true (default), delete the remote tensor when the
pointer is deleted.
Returns:
A FrameworkTensor[PointerTensor] pointer to self. Note that this
object itself will likely be wrapped by a FrameworkTensor wrapper.
"""
if owner is None:
owner = tensor.owner
if location is None:
location = tensor.owner
owner = tensor.owner.get_worker(owner)
location = tensor.owner.get_worker(location)
# previous_pointer = owner.get_pointer_to(location, id_at_location)
previous_pointer = None
if previous_pointer is None:
ptr = PointerTensor(
location=location,
id_at_location=id_at_location,
owner=owner,
id=ptr_id,
garbage_collect_data=True
if garbage_collect_data is None
else garbage_collect_data,
shape=shape,
tags=tensor.tags,
description=tensor.description,
)
return ptr
|
https://github.com/OpenMined/PySyft/issues/2498
|
WARNING: Logging before flag parsing goes to stderr.
W0813 08:46:34.190028 140520788572032 secure_random.py:26] Falling back to insecure randomness since the required custom op could not be found for the installed version of TensorFlow. Fix this by compiling custom ops. Missing file was '/usr/local/lib/python3.6/dist-packages/tf_encrypted/operations/secure_random/secure_random_module_tf_1.14.0.so'
W0813 08:46:34.211576 140520788572032 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/tf_encrypted/session.py:26: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.
0it [00:00, ?it/s]
Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to ../data/cifar-10-python.tar.gz
170500096it [00:04, 40609204.86it/s]
---------------------------------------------------------------------------
PureTorchTensorFoundError Traceback (most recent call last)
/content/PySyft/syft/frameworks/torch/tensors/interpreters/native.py in handle_func_command(cls, command)
300 new_args, new_kwargs, new_type, args_type = syft.frameworks.torch.hook_args.unwrap_args_from_function(
--> 301 cmd, args, kwargs, return_args_type=True
302 )
16 frames
PureTorchTensorFoundError:
During handling of the above exception, another exception occurred:
RuntimeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in batch_norm(input, running_mean, running_var, weight, bias, training, momentum, eps)
1695 return torch.batch_norm(
1696 input, weight, bias, running_mean, running_var,
-> 1697 training, momentum, eps, torch.backends.cudnn.enabled
1698 )
1699
RuntimeError: running_mean should contain 3 elements not 32
|
PureTorchTensorFoundError
|
def _hook_module(self):
"""Overloading torch.nn.Module with PySyft functionality, the primary module
responsible for core ML functionality such as Neural network layers and
loss functions.
It is important to note that all the operations are actually in-place.
"""
def module_is_missing_grad(model):
"""Checks if all the parameters in the model have been assigned a gradient"""
for p in model.parameters():
if p.grad is None:
return True
return False
def create_grad_objects(model):
"""Assigns gradient to model parameters if not assigned"""
for p in model.parameters():
if p.requires_grad: # check if the object requires a grad object
o = p.sum()
o.backward()
if p.grad is not None:
p.grad -= p.grad
def module_send_(nn_self, *dest, force_send=False, **kwargs):
"""Overloads torch.nn instances so that they could be sent to other workers"""
if module_is_missing_grad(nn_self):
create_grad_objects(nn_self)
for p in nn_self.parameters():
p.send_(*dest, **kwargs)
if isinstance(nn_self.forward, Plan):
nn_self.forward.send(*dest, force=force_send)
return nn_self
self.torch.nn.Module.send = module_send_
self.torch.nn.Module.send_ = module_send_
def module_move_(nn_self, destination):
params = list(nn_self.parameters())
for p in params:
p.move(destination)
self.torch.nn.Module.move = module_move_
# def module_end_get_(nn_self):
# """Overloads send to remote for torch.nn.Module."""
# if module_is_missing_grad(nn_self):
# create_grad_objects(nn_self)
#
# for p in nn_self.parameters():
# p.end_get()
#
# return nn_self
#
# self.torch.nn.Module.end_get = module_end_get_
#
# def module_move_(nn_self, dest):
# return nn_self.send(dest).end_get()
#
# self.torch.nn.Module.move = module_move_
def module_get_(nn_self):
"""overloads torch.nn instances with get method so that parameters could be sent back to owner"""
for p in nn_self.parameters():
p.get_()
if isinstance(nn_self.forward, Plan):
nn_self.forward.get()
return nn_self
self.torch.nn.Module.get_ = module_get_
self.torch.nn.Module.get = module_get_
def module_share_(nn_self, *args, **kwargs):
"""Overloads fix_precision for torch.nn.Module."""
# TODO: add .data and .grad to syft tensors
if module_is_missing_grad(nn_self):
create_grad_objects(nn_self)
for p in nn_self.parameters():
p.share_(*args, **kwargs)
return nn_self
self.torch.nn.Module.share_ = module_share_
self.torch.nn.Module.share = module_share_
def module_fix_precision_(nn_self, *args, **kwargs):
"""Overloads fix_precision for torch.nn.Module."""
if module_is_missing_grad(nn_self):
create_grad_objects(nn_self)
for p in nn_self.parameters():
p.fix_precision_(*args, **kwargs)
return nn_self
self.torch.nn.Module.fix_precision_ = module_fix_precision_
self.torch.nn.Module.fix_precision = module_fix_precision_
self.torch.nn.Module.fix_prec = module_fix_precision_
def module_float_precision_(nn_self):
"""Overloads float_precision for torch.nn.Module, convert fix_precision
parameters to normal float parameters"""
# TODO: add .data and .grad to syft tensors
# if module_is_missing_grad(nn_self):
# create_grad_objects(nn_self)
for p in nn_self.parameters():
p.float_precision_()
return nn_self
self.torch.nn.Module.float_precision_ = module_float_precision_
self.torch.nn.Module.float_precision = module_float_precision_
self.torch.nn.Module.float_prec = module_float_precision_
def module_copy(nn_self):
"""Returns a copy of a torch.nn.Module"""
return copy.deepcopy(nn_self)
self.torch.nn.Module.copy = module_copy
@property
def owner(nn_self):
for p in nn_self.parameters():
return p.owner
self.torch.nn.Module.owner = owner
@property
def location(nn_self):
try:
for p in nn_self.parameters():
return p.location
except AttributeError:
raise AttributeError(
"Module has no attribute location, did you already send it to some location?"
)
self.torch.nn.Module.location = location
# Make sure PySyft uses the PyTorch version
self.torch.nn.modules.rnn._rnn_impls["LSTM"] = self.torch.lstm
# Add support for GRUs
self.torch.nn.modules.rnn._rnn_impls["GRU"] = self.torch.gru
# Override _VF.LSTM_Cell and _VF.GRU_Cell with torch.LSTM_Cell and torch.GRU_Cell
# With the pytorch-based version
self.torch.nn.modules.rnn._VF = self.torch
|
def _hook_module(self):
"""Overloading torch.nn.Module with PySyft functionality, the primary module
responsible for core ML functionality such as Neural network layers and
loss functions.
It is important to note that all the operations are actually in-place.
"""
def module_is_missing_grad(model):
"""Checks if all the parameters in the model have been assigned a gradient"""
for p in model.parameters():
if p.grad is None:
return True
return False
def create_grad_objects(model):
"""Assigns gradient to model parameters if not assigned"""
for p in model.parameters():
o = p.sum()
o.backward()
if p.grad is not None:
p.grad -= p.grad
def module_send_(nn_self, *dest, force_send=False, **kwargs):
"""Overloads torch.nn instances so that they could be sent to other workers"""
if module_is_missing_grad(nn_self):
create_grad_objects(nn_self)
for p in nn_self.parameters():
p.send_(*dest, **kwargs)
if isinstance(nn_self.forward, Plan):
nn_self.forward.send(*dest, force=force_send)
return nn_self
self.torch.nn.Module.send = module_send_
self.torch.nn.Module.send_ = module_send_
def module_move_(nn_self, destination):
params = list(nn_self.parameters())
for p in params:
p.move(destination)
self.torch.nn.Module.move = module_move_
# def module_end_get_(nn_self):
# """Overloads send to remote for torch.nn.Module."""
# if module_is_missing_grad(nn_self):
# create_grad_objects(nn_self)
#
# for p in nn_self.parameters():
# p.end_get()
#
# return nn_self
#
# self.torch.nn.Module.end_get = module_end_get_
#
# def module_move_(nn_self, dest):
# return nn_self.send(dest).end_get()
#
# self.torch.nn.Module.move = module_move_
def module_get_(nn_self):
"""overloads torch.nn instances with get method so that parameters could be sent back to owner"""
for p in nn_self.parameters():
p.get_()
if isinstance(nn_self.forward, Plan):
nn_self.forward.get()
return nn_self
self.torch.nn.Module.get_ = module_get_
self.torch.nn.Module.get = module_get_
def module_share_(nn_self, *args, **kwargs):
"""Overloads fix_precision for torch.nn.Module."""
# TODO: add .data and .grad to syft tensors
if module_is_missing_grad(nn_self):
create_grad_objects(nn_self)
for p in nn_self.parameters():
p.share_(*args, **kwargs)
return nn_self
self.torch.nn.Module.share_ = module_share_
self.torch.nn.Module.share = module_share_
def module_fix_precision_(nn_self, *args, **kwargs):
"""Overloads fix_precision for torch.nn.Module."""
if module_is_missing_grad(nn_self):
create_grad_objects(nn_self)
for p in nn_self.parameters():
p.fix_precision_(*args, **kwargs)
return nn_self
self.torch.nn.Module.fix_precision_ = module_fix_precision_
self.torch.nn.Module.fix_precision = module_fix_precision_
self.torch.nn.Module.fix_prec = module_fix_precision_
def module_float_precision_(nn_self):
"""Overloads float_precision for torch.nn.Module, convert fix_precision
parameters to normal float parameters"""
# TODO: add .data and .grad to syft tensors
# if module_is_missing_grad(nn_self):
# create_grad_objects(nn_self)
for p in nn_self.parameters():
p.float_precision_()
return nn_self
self.torch.nn.Module.float_precision_ = module_float_precision_
self.torch.nn.Module.float_precision = module_float_precision_
self.torch.nn.Module.float_prec = module_float_precision_
def module_copy(nn_self):
"""Returns a copy of a torch.nn.Module"""
return copy.deepcopy(nn_self)
self.torch.nn.Module.copy = module_copy
@property
def owner(nn_self):
for p in nn_self.parameters():
return p.owner
self.torch.nn.Module.owner = owner
@property
def location(nn_self):
try:
for p in nn_self.parameters():
return p.location
except AttributeError:
raise AttributeError(
"Module has no attribute location, did you already send it to some location?"
)
self.torch.nn.Module.location = location
# Make sure PySyft uses the PyTorch version
self.torch.nn.modules.rnn._rnn_impls["LSTM"] = self.torch.lstm
# Add support for GRUs
self.torch.nn.modules.rnn._rnn_impls["GRU"] = self.torch.gru
# Override _VF.LSTM_Cell and _VF.GRU_Cell with torch.LSTM_Cell and torch.GRU_Cell
# With the pytorch-based version
self.torch.nn.modules.rnn._VF = self.torch
|
https://github.com/OpenMined/PySyft/issues/3180
|
import syft
import torch
from torchvision import models
import torch.nn as nn
hook = syft.TorchHook(torch)
worker = syft.VirtualWorker(hook, id="worker")
model = models.alexnet(pretrained=True)
for param in model.parameters():
param.requires_grad=False
model.classifier[6] = nn.Linear(model.classifier[6].in_features, 3)
model.send(worker)
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-15-a250859d9a13> in <module>
----> 1 model.send(worker)
~/implementation/PyGrid/gateway/src/syft/syft/frameworks/torch/hook/hook.py in module_send_(nn_self, force_send, *dest, **kwargs)
608
609 if module_is_missing_grad(nn_self):
--> 610 create_grad_objects(nn_self)
611
612 for p in nn_self.parameters():
~/implementation/PyGrid/gateway/src/syft/syft/frameworks/torch/hook/hook.py in create_grad_objects(model)
600 for p in model.parameters():
601 o = p.sum()
--> 602 o.backward()
603 if p.grad is not None:
604 p.grad -= p.grad
~/implementation/PyGrid/gateway/src/syft/syft/generic/frameworks/hook/trace.py in trace_wrapper(*args, **kwargs)
81 syft.hook.trace.logs.append((command, response))
82 else:
---> 83 response = func(*args, **kwargs)
84
85 return response
~/implementation/PyGrid/gateway/src/syft/syft/generic/frameworks/hook/hook.py in overloaded_native_method(self, *args, **kwargs)
436 except BaseException as e:
437 # we can make some errors more descriptive with this method
--> 438 raise route_method_exception(e, self, args, kwargs)
439
440 else: # means that there is a wrapper to remove
~/implementation/PyGrid/gateway/src/syft/syft/generic/frameworks/hook/hook.py in overloaded_native_method(self, *args, **kwargs)
432
433 try:
--> 434 response = method(*args, **kwargs)
435
436 except BaseException as e:
~/anaconda3/lib/python3.7/site-packages/torch/tensor.py in backward(self, gradient, retain_graph, create_graph)
193 products. Defaults to ``False``.
194 """
--> 195 torch.autograd.backward(self, gradient, retain_graph, create_graph)
196
197 def register_hook(self, hook):
~/anaconda3/lib/python3.7/site-packages/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables)
97 Variable._execution_engine.run_backward(
98 tensors, grad_tensors, retain_graph, create_graph,
---> 99 allow_unreachable=True) # allow_unreachable flag
100
101
RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn
|
RuntimeError
|
def create_grad_objects(model):
"""Assigns gradient to model parameters if not assigned"""
for p in model.parameters():
if p.requires_grad: # check if the object requires a grad object
o = p.sum()
o.backward()
if p.grad is not None:
p.grad -= p.grad
|
def create_grad_objects(model):
"""Assigns gradient to model parameters if not assigned"""
for p in model.parameters():
o = p.sum()
o.backward()
if p.grad is not None:
p.grad -= p.grad
|
https://github.com/OpenMined/PySyft/issues/3180
|
import syft
import torch
from torchvision import models
import torch.nn as nn
hook = syft.TorchHook(torch)
worker = syft.VirtualWorker(hook, id="worker")
model = models.alexnet(pretrained=True)
for param in model.parameters():
param.requires_grad=False
model.classifier[6] = nn.Linear(model.classifier[6].in_features, 3)
model.send(worker)
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-15-a250859d9a13> in <module>
----> 1 model.send(worker)
~/implementation/PyGrid/gateway/src/syft/syft/frameworks/torch/hook/hook.py in module_send_(nn_self, force_send, *dest, **kwargs)
608
609 if module_is_missing_grad(nn_self):
--> 610 create_grad_objects(nn_self)
611
612 for p in nn_self.parameters():
~/implementation/PyGrid/gateway/src/syft/syft/frameworks/torch/hook/hook.py in create_grad_objects(model)
600 for p in model.parameters():
601 o = p.sum()
--> 602 o.backward()
603 if p.grad is not None:
604 p.grad -= p.grad
~/implementation/PyGrid/gateway/src/syft/syft/generic/frameworks/hook/trace.py in trace_wrapper(*args, **kwargs)
81 syft.hook.trace.logs.append((command, response))
82 else:
---> 83 response = func(*args, **kwargs)
84
85 return response
~/implementation/PyGrid/gateway/src/syft/syft/generic/frameworks/hook/hook.py in overloaded_native_method(self, *args, **kwargs)
436 except BaseException as e:
437 # we can make some errors more descriptive with this method
--> 438 raise route_method_exception(e, self, args, kwargs)
439
440 else: # means that there is a wrapper to remove
~/implementation/PyGrid/gateway/src/syft/syft/generic/frameworks/hook/hook.py in overloaded_native_method(self, *args, **kwargs)
432
433 try:
--> 434 response = method(*args, **kwargs)
435
436 except BaseException as e:
~/anaconda3/lib/python3.7/site-packages/torch/tensor.py in backward(self, gradient, retain_graph, create_graph)
193 products. Defaults to ``False``.
194 """
--> 195 torch.autograd.backward(self, gradient, retain_graph, create_graph)
196
197 def register_hook(self, hook):
~/anaconda3/lib/python3.7/site-packages/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables)
97 Variable._execution_engine.run_backward(
98 tensors, grad_tensors, retain_graph, create_graph,
---> 99 allow_unreachable=True) # allow_unreachable flag
100
101
RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn
|
RuntimeError
|
def get(self):
"""Just a pass through. This is most commonly used when calling .get() on a
AutogradTensor which has also been shared."""
tensor = self.child.get()
if isinstance(tensor, torch.Tensor):
# Remove the autograd node if a simple tensor is received
if not tensor.is_wrapper:
return tensor
# If it's a wrapper, then insert the autograd under the wrapper
else:
self.child = tensor.child
tensor.child = self
return tensor
self.child = tensor
return self
|
def get(self):
"""Just a pass through. This is most commonly used when calling .get() on a
AutogradTensor which has also been shared."""
self.child = self.child.get()
# Remove the autograd node if a simple tensor is received
if isinstance(self.child, torch.Tensor) and not self.child.is_wrapper:
return self.child
return self
|
https://github.com/OpenMined/PySyft/issues/2503
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-1-db6dbbeffaa2> in <module>()
100 # Breaks
101 federated()
--> 102 encrypted() # breaks here - something about loss2.backward() causes the federated() demo to break
103 else:
104 # Works fine
<ipython-input-1-db6dbbeffaa2> in encrypted()
84
85 # 4) figure out which weights caused us to miss
---> 86 loss2.backward()
87
88 # # 5) change those weights
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.22a1-py3.6.egg/syft/frameworks/torch/hook/hook.py in overloaded_native_method(self, *args, **kwargs)
683 # Put back the wrappers where needed
684 response = syft.frameworks.torch.hook_args.hook_response(
--> 685 method_name, response, wrap_type=type(self), new_self=self
686 )
687
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.22a1-py3.6.egg/syft/frameworks/torch/hook/hook_args.py in hook_response(attr, response, wrap_type, wrap_args, new_self)
243 response_hook_function = hook_method_response_functions[attr_id]
244 # Try running it
--> 245 new_response = response_hook_function(response)
246
247 except (IndexError, KeyError, AssertionError): # Update the function in cas of an error
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.22a1-py3.6.egg/syft/frameworks/torch/hook/hook_args.py in <lambda>(x)
502 f = many_fold
503
--> 504 return lambda x: f(lambdas, x)
505
506
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.22a1-py3.6.egg/syft/frameworks/torch/hook/hook_args.py in two_fold(lambdas, args, **kwargs)
520
521 def two_fold(lambdas, args, **kwargs):
--> 522 return lambdas[0](args[0], **kwargs), lambdas[1](args[1], **kwargs)
523
524
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.22a1-py3.6.egg/syft/frameworks/torch/hook/hook_args.py in <lambda>(i)
480 if isinstance(r, (list, tuple)) # if the rule is a list or tuple.
481 # Last if not, rule is probably == 1 so use type to return the right transformation.
--> 482 else lambda i: backward_func[wrap_type](i, **wrap_args)
483 for a, r in zip(response, rules) # And do this for all the responses / rules provided
484 ]
/Users/atrask/anaconda/lib/python3.6/site-packages/syft-0.1.22a1-py3.6.egg/syft/frameworks/torch/hook/hook_args.py in <lambda>(i)
73 backward_func = {
74 TorchTensor: lambda i: i.wrap(),
---> 75 torch.Tensor: lambda i: i.wrap(),
76 torch.nn.Parameter: lambda i: torch.nn.Parameter(data=i),
77 PointerTensor: lambda i: i,
AttributeError: 'NoneType' object has no attribute 'wrap'
|
AttributeError
|
def simplify(tensor: "AdditiveSharingTensor") -> tuple:
"""
This function takes the attributes of a AdditiveSharingTensor and saves them in a tuple
Args:
tensor (AdditiveSharingTensor): a AdditiveSharingTensor
Returns:
tuple: a tuple holding the unique attributes of the additive shared tensor
Examples:
data = simplify(tensor)
"""
chain = None
if hasattr(tensor, "child"):
chain = sy.serde._simplify(tensor.child)
# Don't delete the remote values of the shares at simplification
tensor.set_garbage_collect_data(False)
return (tensor.id, tensor.field, tensor.crypto_provider.id, chain)
|
def simplify(tensor: "AdditiveSharingTensor") -> tuple:
"""
This function takes the attributes of a AdditiveSharingTensor and saves them in a tuple
Args:
tensor (AdditiveSharingTensor): a AdditiveSharingTensor
Returns:
tuple: a tuple holding the unique attributes of the additive shared tensor
Examples:
data = simplify(tensor)
"""
chain = None
if hasattr(tensor, "child"):
chain = sy.serde._simplify(tensor.child)
return (tensor.id, tensor.field, tensor.crypto_provider.id, chain)
|
https://github.com/OpenMined/PySyft/issues/2352
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
~/code/PySyft/syft/frameworks/torch/hook/hook_args.py in register_response(attr, response, response_ids, owner)
656 # Load the utility function to register the response and transform tensors with pointers
--> 657 register_response_function = register_response_functions[attr_id]
658 # Try running it
KeyError: 'numpy'
During handling of the above exception, another exception occurred:
AttributeError Traceback (most recent call last)
<ipython-input-1-95fc1ed4192b> in <module>
13 x = torch.tensor([1.])
14 x_ptr = x.send(alice)
---> 15 x_fp = x_ptr.fix_prec()
~/code/PySyft/syft/frameworks/torch/tensors/interpreters/native.py in fix_prec(self, *args, **kwargs)
666 prec_fractional = kwargs.get("precision_fractional", 3)
667 max_precision = _get_maximum_precision()
--> 668 if self._requires_large_precision(max_precision, base, prec_fractional):
669 return (
670 syft.LargePrecisionTensor(*args, **kwargs)
~/code/PySyft/syft/frameworks/torch/tensors/interpreters/native.py in _requires_large_precision(self, max_precision, base, precision_fractional)
691 # We need to use NumPy here as log2 is not yet implemented for LongTensor PyTorch objects
692 return np.any(
--> 693 np.log2(np.abs(self.clone().detach().numpy()) + 1) + base_fractional > max_precision
694 )
695
~/code/PySyft/syft/frameworks/torch/hook/hook.py in overloaded_native_method(self, *args, **kwargs)
675 # Send the new command to the appropriate class and get the response
676 method = getattr(new_self, method_name)
--> 677 response = method(*new_args, **new_kwargs)
678
679 # For inplace methods, just directly return self
~/code/PySyft/syft/frameworks/torch/hook/hook.py in overloaded_pointer_method(self, *args, **kwargs)
511 command = (attr, self, args, kwargs)
512
--> 513 response = owner.send_command(location, command)
514
515 return response
~/code/PySyft/syft/workers/base.py in send_command(self, recipient, message, return_ids)
425
426 try:
--> 427 ret_val = self.send_msg(codes.MSGTYPE.CMD, message, location=recipient)
428 except ResponseSignatureError as e:
429 ret_val = None
~/code/PySyft/syft/workers/base.py in send_msg(self, msg_type, message, location)
221
222 # Step 2: send the message and wait for a response
--> 223 bin_response = self._send_msg(bin_message, location)
224
225 # Step 3: deserialize the response
~/code/PySyft/syft/workers/virtual.py in _send_msg(self, message, location)
8 class VirtualWorker(BaseWorker, FederatedClient):
9 def _send_msg(self, message: bin, location: BaseWorker) -> bin:
---> 10 return location._recv_msg(message)
11
12 def _recv_msg(self, message: bin) -> bin:
~/code/PySyft/syft/workers/virtual.py in _recv_msg(self, message)
11
12 def _recv_msg(self, message: bin) -> bin:
---> 13 return self.recv_msg(message)
14
15 @staticmethod
~/code/PySyft/syft/workers/base.py in recv_msg(self, bin_message)
252 print(f"worker {self} received {sy.codes.code2MSGTYPE[msg_type]} {contents}")
253 # Step 1: route message to appropriate function
--> 254 response = self._message_router[msg_type](contents)
255
256 # Step 2: Serialize the message to simple python objects
~/code/PySyft/syft/workers/base.py in execute_command(self, message)
391 try:
392 response = sy.frameworks.torch.hook_args.register_response(
--> 393 command_name, response, list(return_ids), self
394 )
395 return response
~/code/PySyft/syft/frameworks/torch/hook/hook_args.py in register_response(attr, response, response_ids, owner)
664 register_response_functions[attr_id] = register_response_function
665 # Run it
--> 666 new_response = register_response_function(response, response_ids=response_ids, owner=owner)
667
668 # Remove the artificial tuple
~/code/PySyft/syft/frameworks/torch/hook/hook_args.py in <lambda>(x, **kwargs)
757 f = many_fold
758
--> 759 return lambda x, **kwargs: f(lambdas, x, **kwargs)
~/code/PySyft/syft/frameworks/torch/hook/hook_args.py in two_fold(lambdas, args, **kwargs)
514
515 def two_fold(lambdas, args, **kwargs):
--> 516 return lambdas[0](args[0], **kwargs), lambdas[1](args[1], **kwargs)
517
518
~/code/PySyft/syft/frameworks/torch/hook/hook_args.py in <lambda>(i, **kwargs)
735 if isinstance(r, (list, tuple)) # if the rule is a list or tuple.
736 # Last if not, rule is probably == 1 so use type to return the right transformation.
--> 737 else lambda i, **kwargs: register_tensor(i, **kwargs)
738 for a, r in zip(response, rules) # And do this for all the responses / rules provided
739 ]
~/code/PySyft/syft/frameworks/torch/hook/hook_args.py in register_tensor(tensor, owner, response_ids)
706 and each id is pop out when needed.
707 """
--> 708 tensor.owner = owner
709 try:
710 tensor.id = response_ids.pop(-1)
AttributeError: 'numpy.ndarray' object has no attribute 'owner'
|
KeyError
|
def fix_prec(self, *args, **kwargs):
if self.is_wrapper:
self.child = self.child.fix_prec(*args, **kwargs)
return self
else:
base = kwargs.get("base", 10)
prec_fractional = kwargs.get("precision_fractional", 3)
max_precision = _get_maximum_precision()
if self._requires_large_precision(max_precision, base, prec_fractional):
return (
syft.LargePrecisionTensor(*args, **kwargs)
.on(self)
.child.fix_large_precision()
.wrap()
)
else:
return (
syft.FixedPrecisionTensor(*args, **kwargs)
.on(self)
.enc_fix_prec()
.wrap()
)
|
def fix_prec(self, *args, **kwargs):
base = kwargs.get("base", 10)
prec_fractional = kwargs.get("precision_fractional", 3)
max_precision = _get_maximum_precision()
if self._requires_large_precision(max_precision, base, prec_fractional):
return (
syft.LargePrecisionTensor(*args, **kwargs)
.on(self)
.child.fix_large_precision()
.wrap()
)
else:
return syft.FixedPrecisionTensor(*args, **kwargs).on(self).enc_fix_prec().wrap()
|
https://github.com/OpenMined/PySyft/issues/2352
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
~/code/PySyft/syft/frameworks/torch/hook/hook_args.py in register_response(attr, response, response_ids, owner)
656 # Load the utility function to register the response and transform tensors with pointers
--> 657 register_response_function = register_response_functions[attr_id]
658 # Try running it
KeyError: 'numpy'
During handling of the above exception, another exception occurred:
AttributeError Traceback (most recent call last)
<ipython-input-1-95fc1ed4192b> in <module>
13 x = torch.tensor([1.])
14 x_ptr = x.send(alice)
---> 15 x_fp = x_ptr.fix_prec()
~/code/PySyft/syft/frameworks/torch/tensors/interpreters/native.py in fix_prec(self, *args, **kwargs)
666 prec_fractional = kwargs.get("precision_fractional", 3)
667 max_precision = _get_maximum_precision()
--> 668 if self._requires_large_precision(max_precision, base, prec_fractional):
669 return (
670 syft.LargePrecisionTensor(*args, **kwargs)
~/code/PySyft/syft/frameworks/torch/tensors/interpreters/native.py in _requires_large_precision(self, max_precision, base, precision_fractional)
691 # We need to use NumPy here as log2 is not yet implemented for LongTensor PyTorch objects
692 return np.any(
--> 693 np.log2(np.abs(self.clone().detach().numpy()) + 1) + base_fractional > max_precision
694 )
695
~/code/PySyft/syft/frameworks/torch/hook/hook.py in overloaded_native_method(self, *args, **kwargs)
675 # Send the new command to the appropriate class and get the response
676 method = getattr(new_self, method_name)
--> 677 response = method(*new_args, **new_kwargs)
678
679 # For inplace methods, just directly return self
~/code/PySyft/syft/frameworks/torch/hook/hook.py in overloaded_pointer_method(self, *args, **kwargs)
511 command = (attr, self, args, kwargs)
512
--> 513 response = owner.send_command(location, command)
514
515 return response
~/code/PySyft/syft/workers/base.py in send_command(self, recipient, message, return_ids)
425
426 try:
--> 427 ret_val = self.send_msg(codes.MSGTYPE.CMD, message, location=recipient)
428 except ResponseSignatureError as e:
429 ret_val = None
~/code/PySyft/syft/workers/base.py in send_msg(self, msg_type, message, location)
221
222 # Step 2: send the message and wait for a response
--> 223 bin_response = self._send_msg(bin_message, location)
224
225 # Step 3: deserialize the response
~/code/PySyft/syft/workers/virtual.py in _send_msg(self, message, location)
8 class VirtualWorker(BaseWorker, FederatedClient):
9 def _send_msg(self, message: bin, location: BaseWorker) -> bin:
---> 10 return location._recv_msg(message)
11
12 def _recv_msg(self, message: bin) -> bin:
~/code/PySyft/syft/workers/virtual.py in _recv_msg(self, message)
11
12 def _recv_msg(self, message: bin) -> bin:
---> 13 return self.recv_msg(message)
14
15 @staticmethod
~/code/PySyft/syft/workers/base.py in recv_msg(self, bin_message)
252 print(f"worker {self} received {sy.codes.code2MSGTYPE[msg_type]} {contents}")
253 # Step 1: route message to appropriate function
--> 254 response = self._message_router[msg_type](contents)
255
256 # Step 2: Serialize the message to simple python objects
~/code/PySyft/syft/workers/base.py in execute_command(self, message)
391 try:
392 response = sy.frameworks.torch.hook_args.register_response(
--> 393 command_name, response, list(return_ids), self
394 )
395 return response
~/code/PySyft/syft/frameworks/torch/hook/hook_args.py in register_response(attr, response, response_ids, owner)
664 register_response_functions[attr_id] = register_response_function
665 # Run it
--> 666 new_response = register_response_function(response, response_ids=response_ids, owner=owner)
667
668 # Remove the artificial tuple
~/code/PySyft/syft/frameworks/torch/hook/hook_args.py in <lambda>(x, **kwargs)
757 f = many_fold
758
--> 759 return lambda x, **kwargs: f(lambdas, x, **kwargs)
~/code/PySyft/syft/frameworks/torch/hook/hook_args.py in two_fold(lambdas, args, **kwargs)
514
515 def two_fold(lambdas, args, **kwargs):
--> 516 return lambdas[0](args[0], **kwargs), lambdas[1](args[1], **kwargs)
517
518
~/code/PySyft/syft/frameworks/torch/hook/hook_args.py in <lambda>(i, **kwargs)
735 if isinstance(r, (list, tuple)) # if the rule is a list or tuple.
736 # Last if not, rule is probably == 1 so use type to return the right transformation.
--> 737 else lambda i, **kwargs: register_tensor(i, **kwargs)
738 for a, r in zip(response, rules) # And do this for all the responses / rules provided
739 ]
~/code/PySyft/syft/frameworks/torch/hook/hook_args.py in register_tensor(tensor, owner, response_ids)
706 and each id is pop out when needed.
707 """
--> 708 tensor.owner = owner
709 try:
710 tensor.id = response_ids.pop(-1)
AttributeError: 'numpy.ndarray' object has no attribute 'owner'
|
KeyError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.