repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
talkincode/toughlib | toughlib/btforms/net.py | urlquote | python | def urlquote(val):
if val is None: return ''
if not isinstance(val, unicode): val = str(val)
else: val = val.encode('utf-8')
return urllib.quote(val) | Quotes a string for use in a URL.
>>> urlquote('://?f=1&j=1')
'%3A//%3Ff%3D1%26j%3D1'
>>> urlquote(None)
''
>>> urlquote(u'\u203d')
'%E2%80%BD' | train | https://github.com/talkincode/toughlib/blob/1c2f7dde3a7f101248f1b5f5d428cc85466995cf/toughlib/btforms/net.py#L100-L114 | null | #!/usr/bin/env python
#coding:utf-8
__all__ = [
"validipaddr", "validipport", "validip", "validaddr",
"urlquote",
"httpdate", "parsehttpdate",
"htmlquote", "htmlunquote", "websafe",
]
import urllib, time
try: import datetime
except ImportError: pass
def validipaddr(address):
"""
Returns True if `address` is a valid IPv4 address.
>>> validipaddr('192.168.1.1')
True
>>> validipaddr('192.168.1.800')
False
>>> validipaddr('192.168.1')
False
"""
try:
octets = address.split('.')
if len(octets) != 4:
return False
for x in octets:
if not (0 <= int(x) <= 255):
return False
except ValueError:
return False
return True
def validipport(port):
"""
Returns True if `port` is a valid IPv4 port.
>>> validipport('9000')
True
>>> validipport('foo')
False
>>> validipport('1000000')
False
"""
try:
if not (0 <= int(port) <= 65535):
return False
except ValueError:
return False
return True
def validip(ip, defaultaddr="0.0.0.0", defaultport=8080):
"""Returns `(ip_address, port)` from string `ip_addr_port`"""
addr = defaultaddr
port = defaultport
ip = ip.split(":", 1)
if len(ip) == 1:
if not ip[0]:
pass
elif validipaddr(ip[0]):
addr = ip[0]
elif validipport(ip[0]):
port = int(ip[0])
else:
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
elif len(ip) == 2:
addr, port = ip
if not validipaddr(addr) and validipport(port):
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
port = int(port)
else:
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
return (addr, port)
def validaddr(string_):
"""
Returns either (ip_address, port) or "/path/to/socket" from string_
>>> validaddr('/path/to/socket')
'/path/to/socket'
>>> validaddr('8000')
('0.0.0.0', 8000)
>>> validaddr('127.0.0.1')
('127.0.0.1', 8080)
>>> validaddr('127.0.0.1:8000')
('127.0.0.1', 8000)
>>> validaddr('fff')
Traceback (most recent call last):
...
ValueError: fff is not a valid IP address/port
"""
if '/' in string_:
return string_
else:
return validip(string_)
def urlquote(val):
"""
Quotes a string for use in a URL.
>>> urlquote('://?f=1&j=1')
'%3A//%3Ff%3D1%26j%3D1'
>>> urlquote(None)
''
>>> urlquote(u'\u203d')
'%E2%80%BD'
"""
if val is None: return ''
if not isinstance(val, unicode): val = str(val)
else: val = val.encode('utf-8')
return urllib.quote(val)
def httpdate(date_obj):
"""
Formats a datetime object for use in HTTP headers.
>>> import datetime
>>> httpdate(datetime.datetime(1970, 1, 1, 1, 1, 1))
'Thu, 01 Jan 1970 01:01:01 GMT'
"""
return date_obj.strftime("%a, %d %b %Y %H:%M:%S GMT")
def parsehttpdate(string_):
"""
Parses an HTTP date into a datetime object.
>>> parsehttpdate('Thu, 01 Jan 1970 01:01:01 GMT')
datetime.datetime(1970, 1, 1, 1, 1, 1)
"""
try:
t = time.strptime(string_, "%a, %d %b %Y %H:%M:%S %Z")
except ValueError:
return None
return datetime.datetime(*t[:6])
def htmlquote(text):
r"""
Encodes `text` for raw use in HTML.
>>> htmlquote(u"<'&\">")
u'<'&">'
"""
text = text.replace(u"&", u"&") # Must be done first!
text = text.replace(u"<", u"<")
text = text.replace(u">", u">")
text = text.replace(u"'", u"'")
text = text.replace(u'"', u""")
return text
def htmlunquote(text):
r"""
Decodes `text` that's HTML quoted.
>>> htmlunquote(u'<'&">')
u'<\'&">'
"""
text = text.replace(u""", u'"')
text = text.replace(u"'", u"'")
text = text.replace(u">", u">")
text = text.replace(u"<", u"<")
text = text.replace(u"&", u"&") # Must be done last!
return text
def websafe(val):
r"""Converts `val` so that it is safe for use in Unicode HTML.
>>> websafe("<'&\">")
u'<'&">'
>>> websafe(None)
u''
>>> websafe(u'\u203d')
u'\u203d'
>>> websafe('\xe2\x80\xbd')
u'\u203d'
"""
if val is None:
return u''
elif isinstance(val, str):
val = val.decode('utf-8')
elif not isinstance(val, unicode):
val = unicode(val)
return htmlquote(val)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
talkincode/toughlib | toughlib/btforms/net.py | parsehttpdate | python | def parsehttpdate(string_):
try:
t = time.strptime(string_, "%a, %d %b %Y %H:%M:%S %Z")
except ValueError:
return None
return datetime.datetime(*t[:6]) | Parses an HTTP date into a datetime object.
>>> parsehttpdate('Thu, 01 Jan 1970 01:01:01 GMT')
datetime.datetime(1970, 1, 1, 1, 1, 1) | train | https://github.com/talkincode/toughlib/blob/1c2f7dde3a7f101248f1b5f5d428cc85466995cf/toughlib/btforms/net.py#L126-L137 | null | #!/usr/bin/env python
#coding:utf-8
__all__ = [
"validipaddr", "validipport", "validip", "validaddr",
"urlquote",
"httpdate", "parsehttpdate",
"htmlquote", "htmlunquote", "websafe",
]
import urllib, time
try: import datetime
except ImportError: pass
def validipaddr(address):
"""
Returns True if `address` is a valid IPv4 address.
>>> validipaddr('192.168.1.1')
True
>>> validipaddr('192.168.1.800')
False
>>> validipaddr('192.168.1')
False
"""
try:
octets = address.split('.')
if len(octets) != 4:
return False
for x in octets:
if not (0 <= int(x) <= 255):
return False
except ValueError:
return False
return True
def validipport(port):
"""
Returns True if `port` is a valid IPv4 port.
>>> validipport('9000')
True
>>> validipport('foo')
False
>>> validipport('1000000')
False
"""
try:
if not (0 <= int(port) <= 65535):
return False
except ValueError:
return False
return True
def validip(ip, defaultaddr="0.0.0.0", defaultport=8080):
"""Returns `(ip_address, port)` from string `ip_addr_port`"""
addr = defaultaddr
port = defaultport
ip = ip.split(":", 1)
if len(ip) == 1:
if not ip[0]:
pass
elif validipaddr(ip[0]):
addr = ip[0]
elif validipport(ip[0]):
port = int(ip[0])
else:
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
elif len(ip) == 2:
addr, port = ip
if not validipaddr(addr) and validipport(port):
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
port = int(port)
else:
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
return (addr, port)
def validaddr(string_):
"""
Returns either (ip_address, port) or "/path/to/socket" from string_
>>> validaddr('/path/to/socket')
'/path/to/socket'
>>> validaddr('8000')
('0.0.0.0', 8000)
>>> validaddr('127.0.0.1')
('127.0.0.1', 8080)
>>> validaddr('127.0.0.1:8000')
('127.0.0.1', 8000)
>>> validaddr('fff')
Traceback (most recent call last):
...
ValueError: fff is not a valid IP address/port
"""
if '/' in string_:
return string_
else:
return validip(string_)
def urlquote(val):
"""
Quotes a string for use in a URL.
>>> urlquote('://?f=1&j=1')
'%3A//%3Ff%3D1%26j%3D1'
>>> urlquote(None)
''
>>> urlquote(u'\u203d')
'%E2%80%BD'
"""
if val is None: return ''
if not isinstance(val, unicode): val = str(val)
else: val = val.encode('utf-8')
return urllib.quote(val)
def httpdate(date_obj):
"""
Formats a datetime object for use in HTTP headers.
>>> import datetime
>>> httpdate(datetime.datetime(1970, 1, 1, 1, 1, 1))
'Thu, 01 Jan 1970 01:01:01 GMT'
"""
return date_obj.strftime("%a, %d %b %Y %H:%M:%S GMT")
def htmlquote(text):
r"""
Encodes `text` for raw use in HTML.
>>> htmlquote(u"<'&\">")
u'<'&">'
"""
text = text.replace(u"&", u"&") # Must be done first!
text = text.replace(u"<", u"<")
text = text.replace(u">", u">")
text = text.replace(u"'", u"'")
text = text.replace(u'"', u""")
return text
def htmlunquote(text):
r"""
Decodes `text` that's HTML quoted.
>>> htmlunquote(u'<'&">')
u'<\'&">'
"""
text = text.replace(u""", u'"')
text = text.replace(u"'", u"'")
text = text.replace(u">", u">")
text = text.replace(u"<", u"<")
text = text.replace(u"&", u"&") # Must be done last!
return text
def websafe(val):
r"""Converts `val` so that it is safe for use in Unicode HTML.
>>> websafe("<'&\">")
u'<'&">'
>>> websafe(None)
u''
>>> websafe(u'\u203d')
u'\u203d'
>>> websafe('\xe2\x80\xbd')
u'\u203d'
"""
if val is None:
return u''
elif isinstance(val, str):
val = val.decode('utf-8')
elif not isinstance(val, unicode):
val = unicode(val)
return htmlquote(val)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
talkincode/toughlib | toughlib/btforms/net.py | htmlquote | python | def htmlquote(text):
r"""
Encodes `text` for raw use in HTML.
>>> htmlquote(u"<'&\">")
u'<'&">'
"""
text = text.replace(u"&", u"&") # Must be done first!
text = text.replace(u"<", u"<")
text = text.replace(u">", u">")
text = text.replace(u"'", u"'")
text = text.replace(u'"', u""")
return text | r"""
Encodes `text` for raw use in HTML.
>>> htmlquote(u"<'&\">")
u'<'&">' | train | https://github.com/talkincode/toughlib/blob/1c2f7dde3a7f101248f1b5f5d428cc85466995cf/toughlib/btforms/net.py#L139-L151 | null | #!/usr/bin/env python
#coding:utf-8
__all__ = [
"validipaddr", "validipport", "validip", "validaddr",
"urlquote",
"httpdate", "parsehttpdate",
"htmlquote", "htmlunquote", "websafe",
]
import urllib, time
try: import datetime
except ImportError: pass
def validipaddr(address):
"""
Returns True if `address` is a valid IPv4 address.
>>> validipaddr('192.168.1.1')
True
>>> validipaddr('192.168.1.800')
False
>>> validipaddr('192.168.1')
False
"""
try:
octets = address.split('.')
if len(octets) != 4:
return False
for x in octets:
if not (0 <= int(x) <= 255):
return False
except ValueError:
return False
return True
def validipport(port):
"""
Returns True if `port` is a valid IPv4 port.
>>> validipport('9000')
True
>>> validipport('foo')
False
>>> validipport('1000000')
False
"""
try:
if not (0 <= int(port) <= 65535):
return False
except ValueError:
return False
return True
def validip(ip, defaultaddr="0.0.0.0", defaultport=8080):
"""Returns `(ip_address, port)` from string `ip_addr_port`"""
addr = defaultaddr
port = defaultport
ip = ip.split(":", 1)
if len(ip) == 1:
if not ip[0]:
pass
elif validipaddr(ip[0]):
addr = ip[0]
elif validipport(ip[0]):
port = int(ip[0])
else:
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
elif len(ip) == 2:
addr, port = ip
if not validipaddr(addr) and validipport(port):
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
port = int(port)
else:
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
return (addr, port)
def validaddr(string_):
"""
Returns either (ip_address, port) or "/path/to/socket" from string_
>>> validaddr('/path/to/socket')
'/path/to/socket'
>>> validaddr('8000')
('0.0.0.0', 8000)
>>> validaddr('127.0.0.1')
('127.0.0.1', 8080)
>>> validaddr('127.0.0.1:8000')
('127.0.0.1', 8000)
>>> validaddr('fff')
Traceback (most recent call last):
...
ValueError: fff is not a valid IP address/port
"""
if '/' in string_:
return string_
else:
return validip(string_)
def urlquote(val):
"""
Quotes a string for use in a URL.
>>> urlquote('://?f=1&j=1')
'%3A//%3Ff%3D1%26j%3D1'
>>> urlquote(None)
''
>>> urlquote(u'\u203d')
'%E2%80%BD'
"""
if val is None: return ''
if not isinstance(val, unicode): val = str(val)
else: val = val.encode('utf-8')
return urllib.quote(val)
def httpdate(date_obj):
"""
Formats a datetime object for use in HTTP headers.
>>> import datetime
>>> httpdate(datetime.datetime(1970, 1, 1, 1, 1, 1))
'Thu, 01 Jan 1970 01:01:01 GMT'
"""
return date_obj.strftime("%a, %d %b %Y %H:%M:%S GMT")
def parsehttpdate(string_):
"""
Parses an HTTP date into a datetime object.
>>> parsehttpdate('Thu, 01 Jan 1970 01:01:01 GMT')
datetime.datetime(1970, 1, 1, 1, 1, 1)
"""
try:
t = time.strptime(string_, "%a, %d %b %Y %H:%M:%S %Z")
except ValueError:
return None
return datetime.datetime(*t[:6])
def htmlquote(text):
r"""
Encodes `text` for raw use in HTML.
>>> htmlquote(u"<'&\">")
u'<'&">'
"""
text = text.replace(u"&", u"&") # Must be done first!
text = text.replace(u"<", u"<")
text = text.replace(u">", u">")
text = text.replace(u"'", u"'")
text = text.replace(u'"', u""")
return text
def htmlunquote(text):
r"""
Decodes `text` that's HTML quoted.
>>> htmlunquote(u'<'&">')
u'<\'&">'
"""
text = text.replace(u""", u'"')
text = text.replace(u"'", u"'")
text = text.replace(u">", u">")
text = text.replace(u"<", u"<")
text = text.replace(u"&", u"&") # Must be done last!
return text
def websafe(val):
r"""Converts `val` so that it is safe for use in Unicode HTML.
>>> websafe("<'&\">")
u'<'&">'
>>> websafe(None)
u''
>>> websafe(u'\u203d')
u'\u203d'
>>> websafe('\xe2\x80\xbd')
u'\u203d'
"""
if val is None:
return u''
elif isinstance(val, str):
val = val.decode('utf-8')
elif not isinstance(val, unicode):
val = unicode(val)
return htmlquote(val)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
talkincode/toughlib | toughlib/btforms/net.py | websafe | python | def websafe(val):
r"""Converts `val` so that it is safe for use in Unicode HTML.
>>> websafe("<'&\">")
u'<'&">'
>>> websafe(None)
u''
>>> websafe(u'\u203d')
u'\u203d'
>>> websafe('\xe2\x80\xbd')
u'\u203d'
"""
if val is None:
return u''
elif isinstance(val, str):
val = val.decode('utf-8')
elif not isinstance(val, unicode):
val = unicode(val)
return htmlquote(val) | r"""Converts `val` so that it is safe for use in Unicode HTML.
>>> websafe("<'&\">")
u'<'&">'
>>> websafe(None)
u''
>>> websafe(u'\u203d')
u'\u203d'
>>> websafe('\xe2\x80\xbd')
u'\u203d' | train | https://github.com/talkincode/toughlib/blob/1c2f7dde3a7f101248f1b5f5d428cc85466995cf/toughlib/btforms/net.py#L167-L186 | [
"def htmlquote(text):\n r\"\"\"\n Encodes `text` for raw use in HTML.\n\n >>> htmlquote(u\"<'&\\\">\")\n u'<'&">'\n \"\"\"\n text = text.replace(u\"&\", u\"&\") # Must be done first!\n text = text.replace(u\"<\", u\"<\")\n text = text.replace(u\">\", u\">... | #!/usr/bin/env python
#coding:utf-8
__all__ = [
"validipaddr", "validipport", "validip", "validaddr",
"urlquote",
"httpdate", "parsehttpdate",
"htmlquote", "htmlunquote", "websafe",
]
import urllib, time
try: import datetime
except ImportError: pass
def validipaddr(address):
"""
Returns True if `address` is a valid IPv4 address.
>>> validipaddr('192.168.1.1')
True
>>> validipaddr('192.168.1.800')
False
>>> validipaddr('192.168.1')
False
"""
try:
octets = address.split('.')
if len(octets) != 4:
return False
for x in octets:
if not (0 <= int(x) <= 255):
return False
except ValueError:
return False
return True
def validipport(port):
"""
Returns True if `port` is a valid IPv4 port.
>>> validipport('9000')
True
>>> validipport('foo')
False
>>> validipport('1000000')
False
"""
try:
if not (0 <= int(port) <= 65535):
return False
except ValueError:
return False
return True
def validip(ip, defaultaddr="0.0.0.0", defaultport=8080):
"""Returns `(ip_address, port)` from string `ip_addr_port`"""
addr = defaultaddr
port = defaultport
ip = ip.split(":", 1)
if len(ip) == 1:
if not ip[0]:
pass
elif validipaddr(ip[0]):
addr = ip[0]
elif validipport(ip[0]):
port = int(ip[0])
else:
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
elif len(ip) == 2:
addr, port = ip
if not validipaddr(addr) and validipport(port):
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
port = int(port)
else:
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
return (addr, port)
def validaddr(string_):
"""
Returns either (ip_address, port) or "/path/to/socket" from string_
>>> validaddr('/path/to/socket')
'/path/to/socket'
>>> validaddr('8000')
('0.0.0.0', 8000)
>>> validaddr('127.0.0.1')
('127.0.0.1', 8080)
>>> validaddr('127.0.0.1:8000')
('127.0.0.1', 8000)
>>> validaddr('fff')
Traceback (most recent call last):
...
ValueError: fff is not a valid IP address/port
"""
if '/' in string_:
return string_
else:
return validip(string_)
def urlquote(val):
"""
Quotes a string for use in a URL.
>>> urlquote('://?f=1&j=1')
'%3A//%3Ff%3D1%26j%3D1'
>>> urlquote(None)
''
>>> urlquote(u'\u203d')
'%E2%80%BD'
"""
if val is None: return ''
if not isinstance(val, unicode): val = str(val)
else: val = val.encode('utf-8')
return urllib.quote(val)
def httpdate(date_obj):
"""
Formats a datetime object for use in HTTP headers.
>>> import datetime
>>> httpdate(datetime.datetime(1970, 1, 1, 1, 1, 1))
'Thu, 01 Jan 1970 01:01:01 GMT'
"""
return date_obj.strftime("%a, %d %b %Y %H:%M:%S GMT")
def parsehttpdate(string_):
"""
Parses an HTTP date into a datetime object.
>>> parsehttpdate('Thu, 01 Jan 1970 01:01:01 GMT')
datetime.datetime(1970, 1, 1, 1, 1, 1)
"""
try:
t = time.strptime(string_, "%a, %d %b %Y %H:%M:%S %Z")
except ValueError:
return None
return datetime.datetime(*t[:6])
def htmlquote(text):
r"""
Encodes `text` for raw use in HTML.
>>> htmlquote(u"<'&\">")
u'<'&">'
"""
text = text.replace(u"&", u"&") # Must be done first!
text = text.replace(u"<", u"<")
text = text.replace(u">", u">")
text = text.replace(u"'", u"'")
text = text.replace(u'"', u""")
return text
def htmlunquote(text):
r"""
Decodes `text` that's HTML quoted.
>>> htmlunquote(u'<'&">')
u'<\'&">'
"""
text = text.replace(u""", u'"')
text = text.replace(u"'", u"'")
text = text.replace(u">", u">")
text = text.replace(u"<", u"<")
text = text.replace(u"&", u"&") # Must be done last!
return text
def websafe(val):
r"""Converts `val` so that it is safe for use in Unicode HTML.
>>> websafe("<'&\">")
u'<'&">'
>>> websafe(None)
u''
>>> websafe(u'\u203d')
u'\u203d'
>>> websafe('\xe2\x80\xbd')
u'\u203d'
"""
if val is None:
return u''
elif isinstance(val, str):
val = val.decode('utf-8')
elif not isinstance(val, unicode):
val = unicode(val)
return htmlquote(val)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
talkincode/toughlib | toughlib/mail.py | ContextFactory.getContext | python | def getContext(self):
ctx = ClientContextFactory.getContext(self)
ctx.set_options(OP_NO_SSLv3)
return ctx | Get the parent context but disable SSLv3. | train | https://github.com/talkincode/toughlib/blob/1c2f7dde3a7f101248f1b5f5d428cc85466995cf/toughlib/mail.py#L18-L22 | null | class ContextFactory(ClientContextFactory):
|
talkincode/toughlib | toughlib/choosereactor.py | install_optimal_reactor | python | def install_optimal_reactor(verbose=False):
import sys
from twisted.python import reflect
## determine currently installed reactor, if any
##
if 'twisted.internet.reactor' in sys.modules:
current_reactor = reflect.qual(sys.modules['twisted.internet.reactor'].__class__).split('.')[-1]
else:
current_reactor = None
## depending on platform, install optimal reactor
##
if 'bsd' in sys.platform or sys.platform.startswith('darwin'):
## *BSD and MacOSX
##
if current_reactor != 'KQueueReactor':
try:
v = sys.version_info
if v[0] == 1 or (v[0] == 2 and v[1] < 6) or (v[0] == 2 and v[1] == 6 and v[2] < 5):
raise Exception("Python version too old ({0}) to use kqueue reactor".format(sys.version))
from twisted.internet import kqreactor
kqreactor.install()
except Exception as e:
print(
"WARNING: Running on *BSD or MacOSX, but cannot install kqueue Twisted reactor ({0}).".format(e))
else:
if verbose:
print("Running on *BSD or MacOSX and optimal reactor (kqueue) was installed.")
else:
if verbose:
print("Running on *BSD or MacOSX and optimal reactor (kqueue) already installed.")
elif sys.platform in ['win32']:
## Windows
##
if current_reactor != 'IOCPReactor':
try:
from twisted.internet.iocpreactor import reactor as iocpreactor
iocpreactor.install()
except Exception as e:
print("WARNING: Running on Windows, but cannot install IOCP Twisted reactor ({0}).".format(e))
else:
if verbose:
print("Running on Windows and optimal reactor (ICOP) was installed.")
else:
if verbose:
print("Running on Windows and optimal reactor (ICOP) already installed.")
elif sys.platform.startswith('linux'):
## Linux
##
if current_reactor != 'EPollReactor':
try:
from twisted.internet import epollreactor
epollreactor.install()
except Exception as e:
print("WARNING: Running on Linux, but cannot install Epoll Twisted reactor ({0}).".format(e))
else:
if verbose:
print("Running on Linux and optimal reactor (epoll) was installed.")
else:
if verbose:
print("Running on Linux and optimal reactor (epoll) already installed.")
else:
try:
from twisted.internet import default as defaultreactor
defaultreactor.install()
except Exception as e:
print("WARNING: Could not install default Twisted reactor for this platform ({0}).".format(e)) | Try to install the optimal Twisted reactor for platform.
:param verbose: If ``True``, print what happens.
:type verbose: bool | train | https://github.com/talkincode/toughlib/blob/1c2f7dde3a7f101248f1b5f5d428cc85466995cf/toughlib/choosereactor.py#L25-L104 | null | ###############################################################################
##
## Copyright (C) 2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = (
'install_optimal_reactor',
'install_reactor'
)
def install_reactor(explicitReactor=None, verbose=False):
"""
Install Twisted reactor.
:param explicitReactor: If provided, install this reactor. Else, install optimal reactor.
:type explicitReactor: obj
:param verbose: If ``True``, print what happens.
:type verbose: bool
"""
import sys
if explicitReactor:
## install explicitly given reactor
##
from twisted.application.reactors import installReactor
print("Trying to install explicitly specified Twisted reactor '%s'" % explicitReactor)
try:
installReactor(explicitReactor)
except Exception as e:
print("Could not install Twisted reactor %s%s" % (explicitReactor, ' ["%s"]' % e if verbose else ''))
sys.exit(1)
else:
## automatically choose optimal reactor
##
if verbose:
print("Automatically choosing optimal Twisted reactor")
install_optimal_reactor(verbose)
## now the reactor is installed, import it
from twisted.internet import reactor
if verbose:
from twisted.python.reflect import qual
print("Running Twisted reactor %s" % qual(reactor.__class__))
return reactor
|
talkincode/toughlib | toughlib/choosereactor.py | install_reactor | python | def install_reactor(explicitReactor=None, verbose=False):
import sys
if explicitReactor:
## install explicitly given reactor
##
from twisted.application.reactors import installReactor
print("Trying to install explicitly specified Twisted reactor '%s'" % explicitReactor)
try:
installReactor(explicitReactor)
except Exception as e:
print("Could not install Twisted reactor %s%s" % (explicitReactor, ' ["%s"]' % e if verbose else ''))
sys.exit(1)
else:
## automatically choose optimal reactor
##
if verbose:
print("Automatically choosing optimal Twisted reactor")
install_optimal_reactor(verbose)
## now the reactor is installed, import it
from twisted.internet import reactor
if verbose:
from twisted.python.reflect import qual
print("Running Twisted reactor %s" % qual(reactor.__class__))
return reactor | Install Twisted reactor.
:param explicitReactor: If provided, install this reactor. Else, install optimal reactor.
:type explicitReactor: obj
:param verbose: If ``True``, print what happens.
:type verbose: bool | train | https://github.com/talkincode/toughlib/blob/1c2f7dde3a7f101248f1b5f5d428cc85466995cf/toughlib/choosereactor.py#L107-L142 | [
"def install_optimal_reactor(verbose=False):\n \"\"\"\n Try to install the optimal Twisted reactor for platform.\n\n :param verbose: If ``True``, print what happens.\n :type verbose: bool\n \"\"\"\n import sys\n from twisted.python import reflect\n\n ## determine currently installed reactor,... | ###############################################################################
##
## Copyright (C) 2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = (
'install_optimal_reactor',
'install_reactor'
)
def install_optimal_reactor(verbose=False):
"""
Try to install the optimal Twisted reactor for platform.
:param verbose: If ``True``, print what happens.
:type verbose: bool
"""
import sys
from twisted.python import reflect
## determine currently installed reactor, if any
##
if 'twisted.internet.reactor' in sys.modules:
current_reactor = reflect.qual(sys.modules['twisted.internet.reactor'].__class__).split('.')[-1]
else:
current_reactor = None
## depending on platform, install optimal reactor
##
if 'bsd' in sys.platform or sys.platform.startswith('darwin'):
## *BSD and MacOSX
##
if current_reactor != 'KQueueReactor':
try:
v = sys.version_info
if v[0] == 1 or (v[0] == 2 and v[1] < 6) or (v[0] == 2 and v[1] == 6 and v[2] < 5):
raise Exception("Python version too old ({0}) to use kqueue reactor".format(sys.version))
from twisted.internet import kqreactor
kqreactor.install()
except Exception as e:
print(
"WARNING: Running on *BSD or MacOSX, but cannot install kqueue Twisted reactor ({0}).".format(e))
else:
if verbose:
print("Running on *BSD or MacOSX and optimal reactor (kqueue) was installed.")
else:
if verbose:
print("Running on *BSD or MacOSX and optimal reactor (kqueue) already installed.")
elif sys.platform in ['win32']:
## Windows
##
if current_reactor != 'IOCPReactor':
try:
from twisted.internet.iocpreactor import reactor as iocpreactor
iocpreactor.install()
except Exception as e:
print("WARNING: Running on Windows, but cannot install IOCP Twisted reactor ({0}).".format(e))
else:
if verbose:
print("Running on Windows and optimal reactor (ICOP) was installed.")
else:
if verbose:
print("Running on Windows and optimal reactor (ICOP) already installed.")
elif sys.platform.startswith('linux'):
## Linux
##
if current_reactor != 'EPollReactor':
try:
from twisted.internet import epollreactor
epollreactor.install()
except Exception as e:
print("WARNING: Running on Linux, but cannot install Epoll Twisted reactor ({0}).".format(e))
else:
if verbose:
print("Running on Linux and optimal reactor (epoll) was installed.")
else:
if verbose:
print("Running on Linux and optimal reactor (epoll) already installed.")
else:
try:
from twisted.internet import default as defaultreactor
defaultreactor.install()
except Exception as e:
print("WARNING: Could not install default Twisted reactor for this platform ({0}).".format(e))
|
talkincode/toughlib | toughlib/paginator.py | Paginator.render | python | def render(self, form_id=None):
'''
动态输出html内容
'''
page_bar = self.page_bars.get(int(self.page / 10))
if page_bar is None:
return ''
_htmls = []
if form_id:
_htmls.append(u'''<script>
function goto_page(form_id,page){
var form=document.getElementById(form_id);
var page_input = document.createElement("input");
page_input.type="hidden";
page_input.name="page";
page_input.value=page;
form.appendChild(page_input);
form.submit();
}</script>''')
_htmls.append('<ul class="pagination pull-right">')
_htmls.append(u'\t<li class="disabled"><a href="#">查询记录数 %s</a></li>' % self.total)
current_start = self.page
if current_start == 1:
_htmls.append(u'\t<li class="disabled"><a href="#">首页</a></li>')
_htmls.append(u'\t<li class="disabled"><a href="#">← 上一页</a></li>')
else:
_htmls.append(u'\t<li><a href="%s">首页</a></li>' % self.url_func(1,form_id))
_htmls.append(u'\t<li><a href="%s">← 上一页</a></li>' % self.url_func(current_start - 1,form_id))
for page in page_bar:
_page_url = self.url_func(page,form_id)
if page == self.page:
_htmls.append(u'\t<li class="active"><span>%s <span class="sr-only">{current}</span></span></li>' % page)
else:
_htmls.append(u'\t<li><a href="%s">%s</a></li>' % (_page_url, page))
current_end = self.page
if current_end == self.page_num:
_htmls.append(u'\t<li class="disabled"><a href="#">下一页 →</a></li>')
_htmls.append(u'\t<li class="disabled"><a href="#">尾页</a></li>')
else:
_htmls.append(u'\t<li><a href="%s">下一页 →</a></li>' % self.url_func(current_end + 1,form_id))
_htmls.append(u'\t<li><a href="%s">尾页</a></li>' % self.url_func(self.page_num,form_id))
_htmls.append('</ul>')
return '\r\n'.join(_htmls) | 动态输出html内容 | train | https://github.com/talkincode/toughlib/blob/1c2f7dde3a7f101248f1b5f5d428cc85466995cf/toughlib/paginator.py#L26-L75 | null | class Paginator():
"""系统查询分页工具
"""
def __init__(self, url_func, page=1, total=0, page_size=20):
self.url_func = url_func
self.page = 1 if page < 1 else page
self.total = total
self.page_size = page_size
self.page_num = int(math.ceil(self.total / self.page_size)) if self.total > 0 else 0
self.page_bars = {}
self.data = ()
for _page in range(1, self.page_num + 1):
_index = int(_page / 10)
if not self.page_bars.has_key(_index):
self.page_bars[_index] = set([_page])
else:
self.page_bars[_index].add(_page)
|
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.get_account_history | python | def get_account_history(self, account_id, **kwargs):
endpoint = '/accounts/{}/ledger'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs) | List account activity. Account activity either increases or
decreases your account balance.
Entry type indicates the reason for the account change.
* transfer: Funds moved to/from Coinbase to cbpro
* match: Funds moved as a result of a trade
* fee: Fee as a result of a trade
* rebate: Fee rebate as per our fee schedule
If an entry is the result of a trade (match, fee), the details
field will contain additional information about the trade.
Args:
account_id (str): Account id to get history of.
kwargs (dict): Additional HTTP request parameters.
Returns:
list: History information for the account. Example::
[
{
"id": "100",
"created_at": "2014-11-07T08:19:27.028459Z",
"amount": "0.001",
"balance": "239.669",
"type": "fee",
"details": {
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"trade_id": "74",
"product_id": "BTC-USD"
}
},
{
...
}
] | train | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L91-L129 | [
"def _send_paginated_message(self, endpoint, params=None):\n \"\"\" Send API message that results in a paginated response.\n\n The paginated responses are abstracted away by making API requests on\n demand as the response is iterated over.\n\n Paginated API messages support 3 additional parameters: `bef... | class AuthenticatedClient(PublicClient):
""" Provides access to Private Endpoints on the cbpro API.
All requests default to the live `api_url`: 'https://api.pro.coinbase.com'.
To test your application using the sandbox modify the `api_url`.
Attributes:
url (str): The api url for this client instance to use.
auth (CBProAuth): Custom authentication handler for each request.
session (requests.Session): Persistent HTTP connection object.
"""
def __init__(self, key, b64secret, passphrase,
api_url="https://api.pro.coinbase.com"):
""" Create an instance of the AuthenticatedClient class.
Args:
key (str): Your API key.
b64secret (str): The secret key matching your API key.
passphrase (str): Passphrase chosen when setting up key.
api_url (Optional[str]): API URL. Defaults to cbpro API.
"""
super(AuthenticatedClient, self).__init__(api_url)
self.auth = CBProAuth(key, b64secret, passphrase)
self.session = requests.Session()
def get_account(self, account_id):
""" Get information for a single account.
Use this endpoint when you know the account_id.
Args:
account_id (str): Account id for account you want to get.
Returns:
dict: Account information. Example::
{
"id": "a1b2c3d4",
"balance": "1.100",
"holds": "0.100",
"available": "1.00",
"currency": "USD"
}
"""
return self._send_message('get', '/accounts/' + account_id)
def get_accounts(self):
""" Get a list of trading all accounts.
When you place an order, the funds for the order are placed on
hold. They cannot be used for other orders or withdrawn. Funds
will remain on hold until the order is filled or canceled. The
funds on hold for each account will be specified.
Returns:
list: Info about all accounts. Example::
[
{
"id": "71452118-efc7-4cc4-8780-a5e22d4baa53",
"currency": "BTC",
"balance": "0.0000000000000000",
"available": "0.0000000000000000",
"hold": "0.0000000000000000",
"profile_id": "75da88c5-05bf-4f54-bc85-5c775bd68254"
},
{
...
}
]
* Additional info included in response for margin accounts.
"""
return self.get_account('')
def get_account_holds(self, account_id, **kwargs):
""" Get holds on an account.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Holds are placed on an account for active orders or
pending withdraw requests.
As an order is filled, the hold amount is updated. If an order
is canceled, any remaining hold is removed. For a withdraw, once
it is completed, the hold is removed.
The `type` field will indicate why the hold exists. The hold
type is 'order' for holds related to open orders and 'transfer'
for holds related to a withdraw.
The `ref` field contains the id of the order or transfer which
created the hold.
Args:
account_id (str): Account id to get holds of.
kwargs (dict): Additional HTTP request parameters.
Returns:
generator(list): Hold information for the account. Example::
[
{
"id": "82dcd140-c3c7-4507-8de4-2c529cd1a28f",
"account_id": "e0b3f39a-183d-453e-b754-0c13e5bab0b3",
"created_at": "2014-11-06T10:34:47.123456Z",
"updated_at": "2014-11-06T10:40:47.123456Z",
"amount": "4.23",
"type": "order",
"ref": "0a205de4-dd35-4370-a285-fe8fc375a273",
},
{
...
}
]
"""
endpoint = '/accounts/{}/holds'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def place_order(self, product_id, side, order_type, **kwargs):
""" Place an order.
The three order types (limit, market, and stop) can be placed using this
method. Specific methods are provided for each order type, but if a
more generic interface is desired this method is available.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
order_type (str): Order type ('limit', 'market', or 'stop')
**client_oid (str): Order ID selected by you to identify your order.
This should be a UUID, which will be broadcast in the public
feed for `received` messages.
**stp (str): Self-trade prevention flag. cbpro doesn't allow self-
trading. This behavior can be modified with this flag.
Options:
'dc' Decrease and Cancel (default)
'co' Cancel oldest
'cn' Cancel newest
'cb' Cancel both
**overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
**funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
**kwargs: Additional arguments can be specified for different order
types. See the limit/market/stop order methods for details.
Returns:
dict: Order details. Example::
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "pending",
"settled": false
}
"""
# Margin parameter checks
if kwargs.get('overdraft_enabled') is not None and \
kwargs.get('funding_amount') is not None:
raise ValueError('Margin funding must be specified through use of '
'overdraft or by setting a funding amount, but not'
' both')
# Limit order checks
if order_type == 'limit':
if kwargs.get('cancel_after') is not None and \
kwargs.get('time_in_force') != 'GTT':
raise ValueError('May only specify a cancel period when time '
'in_force is `GTT`')
if kwargs.get('post_only') is not None and kwargs.get('time_in_force') in \
['IOC', 'FOK']:
raise ValueError('post_only is invalid when time in force is '
'`IOC` or `FOK`')
# Market and stop order checks
if order_type == 'market' or order_type == 'stop':
if not (kwargs.get('size') is None) ^ (kwargs.get('funds') is None):
raise ValueError('Either `size` or `funds` must be specified '
'for market/stop orders (but not both).')
# Build params dict
params = {'product_id': product_id,
'side': side,
'type': order_type}
params.update(kwargs)
return self._send_message('post', '/orders', data=json.dumps(params))
def buy(self, product_id, order_type, **kwargs):
"""Place a buy order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'buy', order_type, **kwargs)
def sell(self, product_id, order_type, **kwargs):
"""Place a sell order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'sell', order_type, **kwargs)
def place_limit_order(self, product_id, side, price, size,
client_oid=None,
stp=None,
time_in_force=None,
cancel_after=None,
post_only=None,
overdraft_enabled=None,
funding_amount=None):
"""Place a limit order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Price per cryptocurrency
size (Decimal): Amount of cryptocurrency to buy or sell
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
time_in_force (Optional[str]): Time in force. Options:
'GTC' Good till canceled
'GTT' Good till time (set by `cancel_after`)
'IOC' Immediate or cancel
'FOK' Fill or kill
cancel_after (Optional[str]): Cancel after this period for 'GTT'
orders. Options are 'min', 'hour', or 'day'.
post_only (Optional[bool]): Indicates that the order should only
make liquidity. If any part of the order results in taking
liquidity, the order will be rejected and no part of it will
execute.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'limit',
'price': price,
'size': size,
'client_oid': client_oid,
'stp': stp,
'time_in_force': time_in_force,
'cancel_after': cancel_after,
'post_only': post_only,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_market_order(self, product_id, side, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place market order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'market',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_stop_order(self, product_id, side, price, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place stop order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Desired price at which the stop order triggers.
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'price': price,
'order_type': 'stop',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def cancel_order(self, order_id):
""" Cancel a previously placed order.
If the order had no matches during its lifetime its record may
be purged. This means the order details will not be available
with get_order(order_id). If the order could not be canceled
(already filled or previously canceled, etc), then an error
response will indicate the reason in the message field.
**Caution**: The order id is the server-assigned order id and
not the optional client_oid.
Args:
order_id (str): The order_id of the order you want to cancel
Returns:
list: Containing the order_id of cancelled order. Example::
[ "c5ab5eae-76be-480e-8961-00792dc7e138" ]
"""
return self._send_message('delete', '/orders/' + order_id)
def cancel_all(self, product_id=None):
""" With best effort, cancel all open orders.
Args:
product_id (Optional[str]): Only cancel orders for this
product_id
Returns:
list: A list of ids of the canceled orders. Example::
[
"144c6f8e-713f-4682-8435-5280fbe8b2b4",
"debe4907-95dc-442f-af3b-cec12f42ebda",
"cf7aceee-7b08-4227-a76c-3858144323ab",
"dfc5ae27-cadb-4c0c-beef-8994936fde8a",
"34fecfbf-de33-4273-b2c6-baf8e8948be4"
]
"""
if product_id is not None:
params = {'product_id': product_id}
else:
params = None
return self._send_message('delete', '/orders', params=params)
def get_order(self, order_id):
""" Get a single order by order id.
If the order is canceled the response may have status code 404
if the order had no matches.
**Caution**: Open orders may change state between the request
and the response depending on market conditions.
Args:
order_id (str): The order to get information of.
Returns:
dict: Containing information on order. Example::
{
"created_at": "2017-06-18T00:27:42.920136Z",
"executed_value": "0.0000000000000000",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"id": "9456f388-67a9-4316-bad1-330c5353804f",
"post_only": true,
"price": "1.00000000",
"product_id": "BTC-USD",
"settled": false,
"side": "buy",
"size": "1.00000000",
"status": "pending",
"stp": "dc",
"time_in_force": "GTC",
"type": "limit"
}
"""
return self._send_message('get', '/orders/' + order_id)
def get_orders(self, product_id=None, status=None, **kwargs):
""" List your current open orders.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Only open or un-settled orders are returned. As soon as an
order is no longer open and settled, it will no longer appear
in the default request.
Orders which are no longer resting on the order book, will be
marked with the 'done' status. There is a small window between
an order being 'done' and 'settled'. An order is 'settled' when
all of the fills have settled and the remaining holds (if any)
have been removed.
For high-volume trading it is strongly recommended that you
maintain your own list of open orders and use one of the
streaming market data feeds to keep it updated. You should poll
the open orders endpoint once when you start trading to obtain
the current state of any open orders.
Args:
product_id (Optional[str]): Only list orders for this
product
status (Optional[list/str]): Limit list of orders to
this status or statuses. Passing 'all' returns orders
of all statuses.
** Options: 'open', 'pending', 'active', 'done',
'settled'
** default: ['open', 'pending', 'active']
Returns:
list: Containing information on orders. Example::
[
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "open",
"settled": false
},
{
...
}
]
"""
params = kwargs
if product_id is not None:
params['product_id'] = product_id
if status is not None:
params['status'] = status
return self._send_paginated_message('/orders', params=params)
def get_fills(self, product_id=None, order_id=None, **kwargs):
""" Get a list of recent fills.
As of 8/23/18 - Requests without either order_id or product_id
will be rejected
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Fees are recorded in two stages. Immediately after the matching
engine completes a match, the fill is inserted into our
datastore. Once the fill is recorded, a settlement process will
settle the fill and credit both trading counterparties.
The 'fee' field indicates the fees charged for this fill.
The 'liquidity' field indicates if the fill was the result of a
liquidity provider or liquidity taker. M indicates Maker and T
indicates Taker.
Args:
product_id (str): Limit list to this product_id
order_id (str): Limit list to this order_id
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on fills. Example::
[
{
"trade_id": 74,
"product_id": "BTC-USD",
"price": "10.00",
"size": "0.01",
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"created_at": "2014-11-07T22:19:28.578544Z",
"liquidity": "T",
"fee": "0.00025",
"settled": true,
"side": "buy"
},
{
...
}
]
"""
if (product_id is None) and (order_id is None):
raise ValueError('Either product_id or order_id must be specified.')
params = {}
if product_id:
params['product_id'] = product_id
if order_id:
params['order_id'] = order_id
params.update(kwargs)
return self._send_paginated_message('/fills', params=params)
def get_fundings(self, status=None, **kwargs):
""" Every order placed with a margin profile that draws funding
will create a funding record.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
status (list/str): Limit funding records to these statuses.
** Options: 'outstanding', 'settled', 'rejected'
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on margin funding. Example::
[
{
"id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"order_id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6",
"amount": "1057.6519956381537500",
"status": "settled",
"created_at": "2017-03-17T23:46:16.663397Z",
"currency": "USD",
"repaid_amount": "1057.6519956381537500",
"default_amount": "0",
"repaid_default": false
},
{
...
}
]
"""
params = {}
if status is not None:
params['status'] = status
params.update(kwargs)
return self._send_paginated_message('/funding', params=params)
def repay_funding(self, amount, currency):
""" Repay funding. Repays the older funding records first.
Args:
amount (int): Amount of currency to repay
currency (str): The currency, example USD
Returns:
Not specified by cbpro.
"""
params = {
'amount': amount,
'currency': currency # example: USD
}
return self._send_message('post', '/funding/repay',
data=json.dumps(params))
def margin_transfer(self, margin_profile_id, transfer_type, currency,
amount):
""" Transfer funds between your standard profile and a margin profile.
Args:
margin_profile_id (str): Margin profile ID to withdraw or deposit
from.
transfer_type (str): 'deposit' or 'withdraw'
currency (str): Currency to transfer (eg. 'USD')
amount (Decimal): Amount to transfer
Returns:
dict: Transfer details. Example::
{
"created_at": "2017-01-25T19:06:23.415126Z",
"id": "80bc6b74-8b1f-4c60-a089-c61f9810d4ab",
"user_id": "521c20b3d4ab09621f000011",
"profile_id": "cda95996-ac59-45a3-a42e-30daeb061867",
"margin_profile_id": "45fa9e3b-00ba-4631-b907-8a98cbdf21be",
"type": "deposit",
"amount": "2",
"currency": "USD",
"account_id": "23035fc7-0707-4b59-b0d2-95d0c035f8f5",
"margin_account_id": "e1d9862c-a259-4e83-96cd-376352a9d24d",
"margin_product_id": "BTC-USD",
"status": "completed",
"nonce": 25
}
"""
params = {'margin_profile_id': margin_profile_id,
'type': transfer_type,
'currency': currency, # example: USD
'amount': amount}
return self._send_message('post', '/profiles/margin-transfer',
data=json.dumps(params))
def get_position(self):
""" Get An overview of your margin profile.
Returns:
dict: Details about funding, accounts, and margin call.
"""
return self._send_message('get', '/position')
def close_position(self, repay_only):
""" Close position.
Args:
repay_only (bool): Undocumented by cbpro.
Returns:
Undocumented
"""
params = {'repay_only': repay_only}
return self._send_message('post', '/position/close',
data=json.dumps(params))
def deposit(self, amount, currency, payment_method_id):
""" Deposit funds from a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decmial): The amount to deposit.
currency (str): The type of currency.
payment_method_id (str): ID of the payment method.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/deposits/payment-method',
data=json.dumps(params))
def coinbase_deposit(self, amount, currency, coinbase_account_id):
""" Deposit funds from a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to deposit.
currency (str): The type of currency.
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/deposits/coinbase-account',
data=json.dumps(params))
def withdraw(self, amount, currency, payment_method_id):
""" Withdraw funds to a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decimal): The amount to withdraw.
currency (str): Currency type (eg. 'BTC')
payment_method_id (str): ID of the payment method.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/withdrawals/payment-method',
data=json.dumps(params))
def coinbase_withdraw(self, amount, currency, coinbase_account_id):
""" Withdraw funds to a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to withdraw.
currency (str): The type of currency (eg. 'BTC')
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/withdrawals/coinbase-account',
data=json.dumps(params))
def crypto_withdraw(self, amount, currency, crypto_address):
""" Withdraw funds to a crypto address.
Args:
amount (Decimal): The amount to withdraw
currency (str): The type of currency (eg. 'BTC')
crypto_address (str): Crypto address to withdraw to.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'crypto_address': crypto_address}
return self._send_message('post', '/withdrawals/crypto',
data=json.dumps(params))
def get_payment_methods(self):
""" Get a list of your payment methods.
Returns:
list: Payment method details.
"""
return self._send_message('get', '/payment-methods')
def get_coinbase_accounts(self):
""" Get a list of your coinbase accounts.
Returns:
list: Coinbase account details.
"""
return self._send_message('get', '/coinbase-accounts')
def create_report(self, report_type, start_date, end_date, product_id=None,
account_id=None, report_format='pdf', email=None):
""" Create report of historic information about your account.
The report will be generated when resources are available. Report status
can be queried via `get_report(report_id)`.
Args:
report_type (str): 'fills' or 'account'
start_date (str): Starting date for the report in ISO 8601
end_date (str): Ending date for the report in ISO 8601
product_id (Optional[str]): ID of the product to generate a fills
report for. Required if account_type is 'fills'
account_id (Optional[str]): ID of the account to generate an account
report for. Required if report_type is 'account'.
report_format (Optional[str]): 'pdf' or 'csv'. Default is 'pdf'.
email (Optional[str]): Email address to send the report to.
Returns:
dict: Report details. Example::
{
"id": "0428b97b-bec1-429e-a94c-59232926778d",
"type": "fills",
"status": "pending",
"created_at": "2015-01-06T10:34:47.000Z",
"completed_at": undefined,
"expires_at": "2015-01-13T10:35:47.000Z",
"file_url": undefined,
"params": {
"start_date": "2014-11-01T00:00:00.000Z",
"end_date": "2014-11-30T23:59:59.000Z"
}
}
"""
params = {'type': report_type,
'start_date': start_date,
'end_date': end_date,
'format': report_format}
if product_id is not None:
params['product_id'] = product_id
if account_id is not None:
params['account_id'] = account_id
if email is not None:
params['email'] = email
return self._send_message('post', '/reports',
data=json.dumps(params))
def get_report(self, report_id):
""" Get report status.
Use to query a specific report once it has been requested.
Args:
report_id (str): Report ID
Returns:
dict: Report details, including file url once it is created.
"""
return self._send_message('get', '/reports/' + report_id)
def get_trailing_volume(self):
""" Get your 30-day trailing volume for all products.
This is a cached value that's calculated every day at midnight UTC.
Returns:
list: 30-day trailing volumes. Example::
[
{
"product_id": "BTC-USD",
"exchange_volume": "11800.00000000",
"volume": "100.00000000",
"recorded_at": "1973-11-29T00:05:01.123456Z"
},
{
...
}
]
"""
return self._send_message('get', '/users/self/trailing-volume')
|
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.get_account_holds | python | def get_account_holds(self, account_id, **kwargs):
endpoint = '/accounts/{}/holds'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs) | Get holds on an account.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Holds are placed on an account for active orders or
pending withdraw requests.
As an order is filled, the hold amount is updated. If an order
is canceled, any remaining hold is removed. For a withdraw, once
it is completed, the hold is removed.
The `type` field will indicate why the hold exists. The hold
type is 'order' for holds related to open orders and 'transfer'
for holds related to a withdraw.
The `ref` field contains the id of the order or transfer which
created the hold.
Args:
account_id (str): Account id to get holds of.
kwargs (dict): Additional HTTP request parameters.
Returns:
generator(list): Hold information for the account. Example::
[
{
"id": "82dcd140-c3c7-4507-8de4-2c529cd1a28f",
"account_id": "e0b3f39a-183d-453e-b754-0c13e5bab0b3",
"created_at": "2014-11-06T10:34:47.123456Z",
"updated_at": "2014-11-06T10:40:47.123456Z",
"amount": "4.23",
"type": "order",
"ref": "0a205de4-dd35-4370-a285-fe8fc375a273",
},
{
...
}
] | train | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L131-L174 | [
"def _send_paginated_message(self, endpoint, params=None):\n \"\"\" Send API message that results in a paginated response.\n\n The paginated responses are abstracted away by making API requests on\n demand as the response is iterated over.\n\n Paginated API messages support 3 additional parameters: `bef... | class AuthenticatedClient(PublicClient):
""" Provides access to Private Endpoints on the cbpro API.
All requests default to the live `api_url`: 'https://api.pro.coinbase.com'.
To test your application using the sandbox modify the `api_url`.
Attributes:
url (str): The api url for this client instance to use.
auth (CBProAuth): Custom authentication handler for each request.
session (requests.Session): Persistent HTTP connection object.
"""
def __init__(self, key, b64secret, passphrase,
api_url="https://api.pro.coinbase.com"):
""" Create an instance of the AuthenticatedClient class.
Args:
key (str): Your API key.
b64secret (str): The secret key matching your API key.
passphrase (str): Passphrase chosen when setting up key.
api_url (Optional[str]): API URL. Defaults to cbpro API.
"""
super(AuthenticatedClient, self).__init__(api_url)
self.auth = CBProAuth(key, b64secret, passphrase)
self.session = requests.Session()
def get_account(self, account_id):
""" Get information for a single account.
Use this endpoint when you know the account_id.
Args:
account_id (str): Account id for account you want to get.
Returns:
dict: Account information. Example::
{
"id": "a1b2c3d4",
"balance": "1.100",
"holds": "0.100",
"available": "1.00",
"currency": "USD"
}
"""
return self._send_message('get', '/accounts/' + account_id)
def get_accounts(self):
""" Get a list of trading all accounts.
When you place an order, the funds for the order are placed on
hold. They cannot be used for other orders or withdrawn. Funds
will remain on hold until the order is filled or canceled. The
funds on hold for each account will be specified.
Returns:
list: Info about all accounts. Example::
[
{
"id": "71452118-efc7-4cc4-8780-a5e22d4baa53",
"currency": "BTC",
"balance": "0.0000000000000000",
"available": "0.0000000000000000",
"hold": "0.0000000000000000",
"profile_id": "75da88c5-05bf-4f54-bc85-5c775bd68254"
},
{
...
}
]
* Additional info included in response for margin accounts.
"""
return self.get_account('')
def get_account_history(self, account_id, **kwargs):
""" List account activity. Account activity either increases or
decreases your account balance.
Entry type indicates the reason for the account change.
* transfer: Funds moved to/from Coinbase to cbpro
* match: Funds moved as a result of a trade
* fee: Fee as a result of a trade
* rebate: Fee rebate as per our fee schedule
If an entry is the result of a trade (match, fee), the details
field will contain additional information about the trade.
Args:
account_id (str): Account id to get history of.
kwargs (dict): Additional HTTP request parameters.
Returns:
list: History information for the account. Example::
[
{
"id": "100",
"created_at": "2014-11-07T08:19:27.028459Z",
"amount": "0.001",
"balance": "239.669",
"type": "fee",
"details": {
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"trade_id": "74",
"product_id": "BTC-USD"
}
},
{
...
}
]
"""
endpoint = '/accounts/{}/ledger'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def place_order(self, product_id, side, order_type, **kwargs):
""" Place an order.
The three order types (limit, market, and stop) can be placed using this
method. Specific methods are provided for each order type, but if a
more generic interface is desired this method is available.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
order_type (str): Order type ('limit', 'market', or 'stop')
**client_oid (str): Order ID selected by you to identify your order.
This should be a UUID, which will be broadcast in the public
feed for `received` messages.
**stp (str): Self-trade prevention flag. cbpro doesn't allow self-
trading. This behavior can be modified with this flag.
Options:
'dc' Decrease and Cancel (default)
'co' Cancel oldest
'cn' Cancel newest
'cb' Cancel both
**overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
**funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
**kwargs: Additional arguments can be specified for different order
types. See the limit/market/stop order methods for details.
Returns:
dict: Order details. Example::
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "pending",
"settled": false
}
"""
# Margin parameter checks
if kwargs.get('overdraft_enabled') is not None and \
kwargs.get('funding_amount') is not None:
raise ValueError('Margin funding must be specified through use of '
'overdraft or by setting a funding amount, but not'
' both')
# Limit order checks
if order_type == 'limit':
if kwargs.get('cancel_after') is not None and \
kwargs.get('time_in_force') != 'GTT':
raise ValueError('May only specify a cancel period when time '
'in_force is `GTT`')
if kwargs.get('post_only') is not None and kwargs.get('time_in_force') in \
['IOC', 'FOK']:
raise ValueError('post_only is invalid when time in force is '
'`IOC` or `FOK`')
# Market and stop order checks
if order_type == 'market' or order_type == 'stop':
if not (kwargs.get('size') is None) ^ (kwargs.get('funds') is None):
raise ValueError('Either `size` or `funds` must be specified '
'for market/stop orders (but not both).')
# Build params dict
params = {'product_id': product_id,
'side': side,
'type': order_type}
params.update(kwargs)
return self._send_message('post', '/orders', data=json.dumps(params))
def buy(self, product_id, order_type, **kwargs):
"""Place a buy order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'buy', order_type, **kwargs)
def sell(self, product_id, order_type, **kwargs):
"""Place a sell order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'sell', order_type, **kwargs)
def place_limit_order(self, product_id, side, price, size,
client_oid=None,
stp=None,
time_in_force=None,
cancel_after=None,
post_only=None,
overdraft_enabled=None,
funding_amount=None):
"""Place a limit order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Price per cryptocurrency
size (Decimal): Amount of cryptocurrency to buy or sell
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
time_in_force (Optional[str]): Time in force. Options:
'GTC' Good till canceled
'GTT' Good till time (set by `cancel_after`)
'IOC' Immediate or cancel
'FOK' Fill or kill
cancel_after (Optional[str]): Cancel after this period for 'GTT'
orders. Options are 'min', 'hour', or 'day'.
post_only (Optional[bool]): Indicates that the order should only
make liquidity. If any part of the order results in taking
liquidity, the order will be rejected and no part of it will
execute.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'limit',
'price': price,
'size': size,
'client_oid': client_oid,
'stp': stp,
'time_in_force': time_in_force,
'cancel_after': cancel_after,
'post_only': post_only,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_market_order(self, product_id, side, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place market order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'market',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_stop_order(self, product_id, side, price, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place stop order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Desired price at which the stop order triggers.
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'price': price,
'order_type': 'stop',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def cancel_order(self, order_id):
""" Cancel a previously placed order.
If the order had no matches during its lifetime its record may
be purged. This means the order details will not be available
with get_order(order_id). If the order could not be canceled
(already filled or previously canceled, etc), then an error
response will indicate the reason in the message field.
**Caution**: The order id is the server-assigned order id and
not the optional client_oid.
Args:
order_id (str): The order_id of the order you want to cancel
Returns:
list: Containing the order_id of cancelled order. Example::
[ "c5ab5eae-76be-480e-8961-00792dc7e138" ]
"""
return self._send_message('delete', '/orders/' + order_id)
def cancel_all(self, product_id=None):
""" With best effort, cancel all open orders.
Args:
product_id (Optional[str]): Only cancel orders for this
product_id
Returns:
list: A list of ids of the canceled orders. Example::
[
"144c6f8e-713f-4682-8435-5280fbe8b2b4",
"debe4907-95dc-442f-af3b-cec12f42ebda",
"cf7aceee-7b08-4227-a76c-3858144323ab",
"dfc5ae27-cadb-4c0c-beef-8994936fde8a",
"34fecfbf-de33-4273-b2c6-baf8e8948be4"
]
"""
if product_id is not None:
params = {'product_id': product_id}
else:
params = None
return self._send_message('delete', '/orders', params=params)
def get_order(self, order_id):
""" Get a single order by order id.
If the order is canceled the response may have status code 404
if the order had no matches.
**Caution**: Open orders may change state between the request
and the response depending on market conditions.
Args:
order_id (str): The order to get information of.
Returns:
dict: Containing information on order. Example::
{
"created_at": "2017-06-18T00:27:42.920136Z",
"executed_value": "0.0000000000000000",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"id": "9456f388-67a9-4316-bad1-330c5353804f",
"post_only": true,
"price": "1.00000000",
"product_id": "BTC-USD",
"settled": false,
"side": "buy",
"size": "1.00000000",
"status": "pending",
"stp": "dc",
"time_in_force": "GTC",
"type": "limit"
}
"""
return self._send_message('get', '/orders/' + order_id)
def get_orders(self, product_id=None, status=None, **kwargs):
""" List your current open orders.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Only open or un-settled orders are returned. As soon as an
order is no longer open and settled, it will no longer appear
in the default request.
Orders which are no longer resting on the order book, will be
marked with the 'done' status. There is a small window between
an order being 'done' and 'settled'. An order is 'settled' when
all of the fills have settled and the remaining holds (if any)
have been removed.
For high-volume trading it is strongly recommended that you
maintain your own list of open orders and use one of the
streaming market data feeds to keep it updated. You should poll
the open orders endpoint once when you start trading to obtain
the current state of any open orders.
Args:
product_id (Optional[str]): Only list orders for this
product
status (Optional[list/str]): Limit list of orders to
this status or statuses. Passing 'all' returns orders
of all statuses.
** Options: 'open', 'pending', 'active', 'done',
'settled'
** default: ['open', 'pending', 'active']
Returns:
list: Containing information on orders. Example::
[
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "open",
"settled": false
},
{
...
}
]
"""
params = kwargs
if product_id is not None:
params['product_id'] = product_id
if status is not None:
params['status'] = status
return self._send_paginated_message('/orders', params=params)
def get_fills(self, product_id=None, order_id=None, **kwargs):
""" Get a list of recent fills.
As of 8/23/18 - Requests without either order_id or product_id
will be rejected
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Fees are recorded in two stages. Immediately after the matching
engine completes a match, the fill is inserted into our
datastore. Once the fill is recorded, a settlement process will
settle the fill and credit both trading counterparties.
The 'fee' field indicates the fees charged for this fill.
The 'liquidity' field indicates if the fill was the result of a
liquidity provider or liquidity taker. M indicates Maker and T
indicates Taker.
Args:
product_id (str): Limit list to this product_id
order_id (str): Limit list to this order_id
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on fills. Example::
[
{
"trade_id": 74,
"product_id": "BTC-USD",
"price": "10.00",
"size": "0.01",
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"created_at": "2014-11-07T22:19:28.578544Z",
"liquidity": "T",
"fee": "0.00025",
"settled": true,
"side": "buy"
},
{
...
}
]
"""
if (product_id is None) and (order_id is None):
raise ValueError('Either product_id or order_id must be specified.')
params = {}
if product_id:
params['product_id'] = product_id
if order_id:
params['order_id'] = order_id
params.update(kwargs)
return self._send_paginated_message('/fills', params=params)
def get_fundings(self, status=None, **kwargs):
""" Every order placed with a margin profile that draws funding
will create a funding record.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
status (list/str): Limit funding records to these statuses.
** Options: 'outstanding', 'settled', 'rejected'
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on margin funding. Example::
[
{
"id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"order_id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6",
"amount": "1057.6519956381537500",
"status": "settled",
"created_at": "2017-03-17T23:46:16.663397Z",
"currency": "USD",
"repaid_amount": "1057.6519956381537500",
"default_amount": "0",
"repaid_default": false
},
{
...
}
]
"""
params = {}
if status is not None:
params['status'] = status
params.update(kwargs)
return self._send_paginated_message('/funding', params=params)
def repay_funding(self, amount, currency):
""" Repay funding. Repays the older funding records first.
Args:
amount (int): Amount of currency to repay
currency (str): The currency, example USD
Returns:
Not specified by cbpro.
"""
params = {
'amount': amount,
'currency': currency # example: USD
}
return self._send_message('post', '/funding/repay',
data=json.dumps(params))
def margin_transfer(self, margin_profile_id, transfer_type, currency,
amount):
""" Transfer funds between your standard profile and a margin profile.
Args:
margin_profile_id (str): Margin profile ID to withdraw or deposit
from.
transfer_type (str): 'deposit' or 'withdraw'
currency (str): Currency to transfer (eg. 'USD')
amount (Decimal): Amount to transfer
Returns:
dict: Transfer details. Example::
{
"created_at": "2017-01-25T19:06:23.415126Z",
"id": "80bc6b74-8b1f-4c60-a089-c61f9810d4ab",
"user_id": "521c20b3d4ab09621f000011",
"profile_id": "cda95996-ac59-45a3-a42e-30daeb061867",
"margin_profile_id": "45fa9e3b-00ba-4631-b907-8a98cbdf21be",
"type": "deposit",
"amount": "2",
"currency": "USD",
"account_id": "23035fc7-0707-4b59-b0d2-95d0c035f8f5",
"margin_account_id": "e1d9862c-a259-4e83-96cd-376352a9d24d",
"margin_product_id": "BTC-USD",
"status": "completed",
"nonce": 25
}
"""
params = {'margin_profile_id': margin_profile_id,
'type': transfer_type,
'currency': currency, # example: USD
'amount': amount}
return self._send_message('post', '/profiles/margin-transfer',
data=json.dumps(params))
def get_position(self):
""" Get An overview of your margin profile.
Returns:
dict: Details about funding, accounts, and margin call.
"""
return self._send_message('get', '/position')
def close_position(self, repay_only):
""" Close position.
Args:
repay_only (bool): Undocumented by cbpro.
Returns:
Undocumented
"""
params = {'repay_only': repay_only}
return self._send_message('post', '/position/close',
data=json.dumps(params))
def deposit(self, amount, currency, payment_method_id):
""" Deposit funds from a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decmial): The amount to deposit.
currency (str): The type of currency.
payment_method_id (str): ID of the payment method.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/deposits/payment-method',
data=json.dumps(params))
def coinbase_deposit(self, amount, currency, coinbase_account_id):
""" Deposit funds from a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to deposit.
currency (str): The type of currency.
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/deposits/coinbase-account',
data=json.dumps(params))
def withdraw(self, amount, currency, payment_method_id):
""" Withdraw funds to a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decimal): The amount to withdraw.
currency (str): Currency type (eg. 'BTC')
payment_method_id (str): ID of the payment method.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/withdrawals/payment-method',
data=json.dumps(params))
def coinbase_withdraw(self, amount, currency, coinbase_account_id):
""" Withdraw funds to a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to withdraw.
currency (str): The type of currency (eg. 'BTC')
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/withdrawals/coinbase-account',
data=json.dumps(params))
def crypto_withdraw(self, amount, currency, crypto_address):
""" Withdraw funds to a crypto address.
Args:
amount (Decimal): The amount to withdraw
currency (str): The type of currency (eg. 'BTC')
crypto_address (str): Crypto address to withdraw to.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'crypto_address': crypto_address}
return self._send_message('post', '/withdrawals/crypto',
data=json.dumps(params))
def get_payment_methods(self):
""" Get a list of your payment methods.
Returns:
list: Payment method details.
"""
return self._send_message('get', '/payment-methods')
def get_coinbase_accounts(self):
""" Get a list of your coinbase accounts.
Returns:
list: Coinbase account details.
"""
return self._send_message('get', '/coinbase-accounts')
def create_report(self, report_type, start_date, end_date, product_id=None,
account_id=None, report_format='pdf', email=None):
""" Create report of historic information about your account.
The report will be generated when resources are available. Report status
can be queried via `get_report(report_id)`.
Args:
report_type (str): 'fills' or 'account'
start_date (str): Starting date for the report in ISO 8601
end_date (str): Ending date for the report in ISO 8601
product_id (Optional[str]): ID of the product to generate a fills
report for. Required if account_type is 'fills'
account_id (Optional[str]): ID of the account to generate an account
report for. Required if report_type is 'account'.
report_format (Optional[str]): 'pdf' or 'csv'. Default is 'pdf'.
email (Optional[str]): Email address to send the report to.
Returns:
dict: Report details. Example::
{
"id": "0428b97b-bec1-429e-a94c-59232926778d",
"type": "fills",
"status": "pending",
"created_at": "2015-01-06T10:34:47.000Z",
"completed_at": undefined,
"expires_at": "2015-01-13T10:35:47.000Z",
"file_url": undefined,
"params": {
"start_date": "2014-11-01T00:00:00.000Z",
"end_date": "2014-11-30T23:59:59.000Z"
}
}
"""
params = {'type': report_type,
'start_date': start_date,
'end_date': end_date,
'format': report_format}
if product_id is not None:
params['product_id'] = product_id
if account_id is not None:
params['account_id'] = account_id
if email is not None:
params['email'] = email
return self._send_message('post', '/reports',
data=json.dumps(params))
def get_report(self, report_id):
""" Get report status.
Use to query a specific report once it has been requested.
Args:
report_id (str): Report ID
Returns:
dict: Report details, including file url once it is created.
"""
return self._send_message('get', '/reports/' + report_id)
def get_trailing_volume(self):
""" Get your 30-day trailing volume for all products.
This is a cached value that's calculated every day at midnight UTC.
Returns:
list: 30-day trailing volumes. Example::
[
{
"product_id": "BTC-USD",
"exchange_volume": "11800.00000000",
"volume": "100.00000000",
"recorded_at": "1973-11-29T00:05:01.123456Z"
},
{
...
}
]
"""
return self._send_message('get', '/users/self/trailing-volume')
|
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.place_order | python | def place_order(self, product_id, side, order_type, **kwargs):
# Margin parameter checks
if kwargs.get('overdraft_enabled') is not None and \
kwargs.get('funding_amount') is not None:
raise ValueError('Margin funding must be specified through use of '
'overdraft or by setting a funding amount, but not'
' both')
# Limit order checks
if order_type == 'limit':
if kwargs.get('cancel_after') is not None and \
kwargs.get('time_in_force') != 'GTT':
raise ValueError('May only specify a cancel period when time '
'in_force is `GTT`')
if kwargs.get('post_only') is not None and kwargs.get('time_in_force') in \
['IOC', 'FOK']:
raise ValueError('post_only is invalid when time in force is '
'`IOC` or `FOK`')
# Market and stop order checks
if order_type == 'market' or order_type == 'stop':
if not (kwargs.get('size') is None) ^ (kwargs.get('funds') is None):
raise ValueError('Either `size` or `funds` must be specified '
'for market/stop orders (but not both).')
# Build params dict
params = {'product_id': product_id,
'side': side,
'type': order_type}
params.update(kwargs)
return self._send_message('post', '/orders', data=json.dumps(params)) | Place an order.
The three order types (limit, market, and stop) can be placed using this
method. Specific methods are provided for each order type, but if a
more generic interface is desired this method is available.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
order_type (str): Order type ('limit', 'market', or 'stop')
**client_oid (str): Order ID selected by you to identify your order.
This should be a UUID, which will be broadcast in the public
feed for `received` messages.
**stp (str): Self-trade prevention flag. cbpro doesn't allow self-
trading. This behavior can be modified with this flag.
Options:
'dc' Decrease and Cancel (default)
'co' Cancel oldest
'cn' Cancel newest
'cb' Cancel both
**overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
**funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
**kwargs: Additional arguments can be specified for different order
types. See the limit/market/stop order methods for details.
Returns:
dict: Order details. Example::
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "pending",
"settled": false
} | train | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L176-L256 | [
"def _send_message(self, method, endpoint, params=None, data=None):\n \"\"\"Send API request.\n\n Args:\n method (str): HTTP method (get, post, delete, etc.)\n endpoint (str): Endpoint (to be added to base URL)\n params (Optional[dict]): HTTP request parameters\n data (Optional[str... | class AuthenticatedClient(PublicClient):
""" Provides access to Private Endpoints on the cbpro API.
All requests default to the live `api_url`: 'https://api.pro.coinbase.com'.
To test your application using the sandbox modify the `api_url`.
Attributes:
url (str): The api url for this client instance to use.
auth (CBProAuth): Custom authentication handler for each request.
session (requests.Session): Persistent HTTP connection object.
"""
def __init__(self, key, b64secret, passphrase,
api_url="https://api.pro.coinbase.com"):
""" Create an instance of the AuthenticatedClient class.
Args:
key (str): Your API key.
b64secret (str): The secret key matching your API key.
passphrase (str): Passphrase chosen when setting up key.
api_url (Optional[str]): API URL. Defaults to cbpro API.
"""
super(AuthenticatedClient, self).__init__(api_url)
self.auth = CBProAuth(key, b64secret, passphrase)
self.session = requests.Session()
def get_account(self, account_id):
""" Get information for a single account.
Use this endpoint when you know the account_id.
Args:
account_id (str): Account id for account you want to get.
Returns:
dict: Account information. Example::
{
"id": "a1b2c3d4",
"balance": "1.100",
"holds": "0.100",
"available": "1.00",
"currency": "USD"
}
"""
return self._send_message('get', '/accounts/' + account_id)
def get_accounts(self):
""" Get a list of trading all accounts.
When you place an order, the funds for the order are placed on
hold. They cannot be used for other orders or withdrawn. Funds
will remain on hold until the order is filled or canceled. The
funds on hold for each account will be specified.
Returns:
list: Info about all accounts. Example::
[
{
"id": "71452118-efc7-4cc4-8780-a5e22d4baa53",
"currency": "BTC",
"balance": "0.0000000000000000",
"available": "0.0000000000000000",
"hold": "0.0000000000000000",
"profile_id": "75da88c5-05bf-4f54-bc85-5c775bd68254"
},
{
...
}
]
* Additional info included in response for margin accounts.
"""
return self.get_account('')
def get_account_history(self, account_id, **kwargs):
""" List account activity. Account activity either increases or
decreases your account balance.
Entry type indicates the reason for the account change.
* transfer: Funds moved to/from Coinbase to cbpro
* match: Funds moved as a result of a trade
* fee: Fee as a result of a trade
* rebate: Fee rebate as per our fee schedule
If an entry is the result of a trade (match, fee), the details
field will contain additional information about the trade.
Args:
account_id (str): Account id to get history of.
kwargs (dict): Additional HTTP request parameters.
Returns:
list: History information for the account. Example::
[
{
"id": "100",
"created_at": "2014-11-07T08:19:27.028459Z",
"amount": "0.001",
"balance": "239.669",
"type": "fee",
"details": {
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"trade_id": "74",
"product_id": "BTC-USD"
}
},
{
...
}
]
"""
endpoint = '/accounts/{}/ledger'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def get_account_holds(self, account_id, **kwargs):
""" Get holds on an account.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Holds are placed on an account for active orders or
pending withdraw requests.
As an order is filled, the hold amount is updated. If an order
is canceled, any remaining hold is removed. For a withdraw, once
it is completed, the hold is removed.
The `type` field will indicate why the hold exists. The hold
type is 'order' for holds related to open orders and 'transfer'
for holds related to a withdraw.
The `ref` field contains the id of the order or transfer which
created the hold.
Args:
account_id (str): Account id to get holds of.
kwargs (dict): Additional HTTP request parameters.
Returns:
generator(list): Hold information for the account. Example::
[
{
"id": "82dcd140-c3c7-4507-8de4-2c529cd1a28f",
"account_id": "e0b3f39a-183d-453e-b754-0c13e5bab0b3",
"created_at": "2014-11-06T10:34:47.123456Z",
"updated_at": "2014-11-06T10:40:47.123456Z",
"amount": "4.23",
"type": "order",
"ref": "0a205de4-dd35-4370-a285-fe8fc375a273",
},
{
...
}
]
"""
endpoint = '/accounts/{}/holds'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def buy(self, product_id, order_type, **kwargs):
"""Place a buy order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'buy', order_type, **kwargs)
def sell(self, product_id, order_type, **kwargs):
"""Place a sell order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'sell', order_type, **kwargs)
def place_limit_order(self, product_id, side, price, size,
client_oid=None,
stp=None,
time_in_force=None,
cancel_after=None,
post_only=None,
overdraft_enabled=None,
funding_amount=None):
"""Place a limit order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Price per cryptocurrency
size (Decimal): Amount of cryptocurrency to buy or sell
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
time_in_force (Optional[str]): Time in force. Options:
'GTC' Good till canceled
'GTT' Good till time (set by `cancel_after`)
'IOC' Immediate or cancel
'FOK' Fill or kill
cancel_after (Optional[str]): Cancel after this period for 'GTT'
orders. Options are 'min', 'hour', or 'day'.
post_only (Optional[bool]): Indicates that the order should only
make liquidity. If any part of the order results in taking
liquidity, the order will be rejected and no part of it will
execute.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'limit',
'price': price,
'size': size,
'client_oid': client_oid,
'stp': stp,
'time_in_force': time_in_force,
'cancel_after': cancel_after,
'post_only': post_only,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_market_order(self, product_id, side, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place market order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'market',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_stop_order(self, product_id, side, price, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place stop order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Desired price at which the stop order triggers.
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'price': price,
'order_type': 'stop',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def cancel_order(self, order_id):
""" Cancel a previously placed order.
If the order had no matches during its lifetime its record may
be purged. This means the order details will not be available
with get_order(order_id). If the order could not be canceled
(already filled or previously canceled, etc), then an error
response will indicate the reason in the message field.
**Caution**: The order id is the server-assigned order id and
not the optional client_oid.
Args:
order_id (str): The order_id of the order you want to cancel
Returns:
list: Containing the order_id of cancelled order. Example::
[ "c5ab5eae-76be-480e-8961-00792dc7e138" ]
"""
return self._send_message('delete', '/orders/' + order_id)
def cancel_all(self, product_id=None):
""" With best effort, cancel all open orders.
Args:
product_id (Optional[str]): Only cancel orders for this
product_id
Returns:
list: A list of ids of the canceled orders. Example::
[
"144c6f8e-713f-4682-8435-5280fbe8b2b4",
"debe4907-95dc-442f-af3b-cec12f42ebda",
"cf7aceee-7b08-4227-a76c-3858144323ab",
"dfc5ae27-cadb-4c0c-beef-8994936fde8a",
"34fecfbf-de33-4273-b2c6-baf8e8948be4"
]
"""
if product_id is not None:
params = {'product_id': product_id}
else:
params = None
return self._send_message('delete', '/orders', params=params)
def get_order(self, order_id):
""" Get a single order by order id.
If the order is canceled the response may have status code 404
if the order had no matches.
**Caution**: Open orders may change state between the request
and the response depending on market conditions.
Args:
order_id (str): The order to get information of.
Returns:
dict: Containing information on order. Example::
{
"created_at": "2017-06-18T00:27:42.920136Z",
"executed_value": "0.0000000000000000",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"id": "9456f388-67a9-4316-bad1-330c5353804f",
"post_only": true,
"price": "1.00000000",
"product_id": "BTC-USD",
"settled": false,
"side": "buy",
"size": "1.00000000",
"status": "pending",
"stp": "dc",
"time_in_force": "GTC",
"type": "limit"
}
"""
return self._send_message('get', '/orders/' + order_id)
def get_orders(self, product_id=None, status=None, **kwargs):
""" List your current open orders.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Only open or un-settled orders are returned. As soon as an
order is no longer open and settled, it will no longer appear
in the default request.
Orders which are no longer resting on the order book, will be
marked with the 'done' status. There is a small window between
an order being 'done' and 'settled'. An order is 'settled' when
all of the fills have settled and the remaining holds (if any)
have been removed.
For high-volume trading it is strongly recommended that you
maintain your own list of open orders and use one of the
streaming market data feeds to keep it updated. You should poll
the open orders endpoint once when you start trading to obtain
the current state of any open orders.
Args:
product_id (Optional[str]): Only list orders for this
product
status (Optional[list/str]): Limit list of orders to
this status or statuses. Passing 'all' returns orders
of all statuses.
** Options: 'open', 'pending', 'active', 'done',
'settled'
** default: ['open', 'pending', 'active']
Returns:
list: Containing information on orders. Example::
[
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "open",
"settled": false
},
{
...
}
]
"""
params = kwargs
if product_id is not None:
params['product_id'] = product_id
if status is not None:
params['status'] = status
return self._send_paginated_message('/orders', params=params)
def get_fills(self, product_id=None, order_id=None, **kwargs):
""" Get a list of recent fills.
As of 8/23/18 - Requests without either order_id or product_id
will be rejected
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Fees are recorded in two stages. Immediately after the matching
engine completes a match, the fill is inserted into our
datastore. Once the fill is recorded, a settlement process will
settle the fill and credit both trading counterparties.
The 'fee' field indicates the fees charged for this fill.
The 'liquidity' field indicates if the fill was the result of a
liquidity provider or liquidity taker. M indicates Maker and T
indicates Taker.
Args:
product_id (str): Limit list to this product_id
order_id (str): Limit list to this order_id
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on fills. Example::
[
{
"trade_id": 74,
"product_id": "BTC-USD",
"price": "10.00",
"size": "0.01",
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"created_at": "2014-11-07T22:19:28.578544Z",
"liquidity": "T",
"fee": "0.00025",
"settled": true,
"side": "buy"
},
{
...
}
]
"""
if (product_id is None) and (order_id is None):
raise ValueError('Either product_id or order_id must be specified.')
params = {}
if product_id:
params['product_id'] = product_id
if order_id:
params['order_id'] = order_id
params.update(kwargs)
return self._send_paginated_message('/fills', params=params)
def get_fundings(self, status=None, **kwargs):
""" Every order placed with a margin profile that draws funding
will create a funding record.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
status (list/str): Limit funding records to these statuses.
** Options: 'outstanding', 'settled', 'rejected'
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on margin funding. Example::
[
{
"id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"order_id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6",
"amount": "1057.6519956381537500",
"status": "settled",
"created_at": "2017-03-17T23:46:16.663397Z",
"currency": "USD",
"repaid_amount": "1057.6519956381537500",
"default_amount": "0",
"repaid_default": false
},
{
...
}
]
"""
params = {}
if status is not None:
params['status'] = status
params.update(kwargs)
return self._send_paginated_message('/funding', params=params)
def repay_funding(self, amount, currency):
""" Repay funding. Repays the older funding records first.
Args:
amount (int): Amount of currency to repay
currency (str): The currency, example USD
Returns:
Not specified by cbpro.
"""
params = {
'amount': amount,
'currency': currency # example: USD
}
return self._send_message('post', '/funding/repay',
data=json.dumps(params))
def margin_transfer(self, margin_profile_id, transfer_type, currency,
amount):
""" Transfer funds between your standard profile and a margin profile.
Args:
margin_profile_id (str): Margin profile ID to withdraw or deposit
from.
transfer_type (str): 'deposit' or 'withdraw'
currency (str): Currency to transfer (eg. 'USD')
amount (Decimal): Amount to transfer
Returns:
dict: Transfer details. Example::
{
"created_at": "2017-01-25T19:06:23.415126Z",
"id": "80bc6b74-8b1f-4c60-a089-c61f9810d4ab",
"user_id": "521c20b3d4ab09621f000011",
"profile_id": "cda95996-ac59-45a3-a42e-30daeb061867",
"margin_profile_id": "45fa9e3b-00ba-4631-b907-8a98cbdf21be",
"type": "deposit",
"amount": "2",
"currency": "USD",
"account_id": "23035fc7-0707-4b59-b0d2-95d0c035f8f5",
"margin_account_id": "e1d9862c-a259-4e83-96cd-376352a9d24d",
"margin_product_id": "BTC-USD",
"status": "completed",
"nonce": 25
}
"""
params = {'margin_profile_id': margin_profile_id,
'type': transfer_type,
'currency': currency, # example: USD
'amount': amount}
return self._send_message('post', '/profiles/margin-transfer',
data=json.dumps(params))
def get_position(self):
""" Get An overview of your margin profile.
Returns:
dict: Details about funding, accounts, and margin call.
"""
return self._send_message('get', '/position')
def close_position(self, repay_only):
""" Close position.
Args:
repay_only (bool): Undocumented by cbpro.
Returns:
Undocumented
"""
params = {'repay_only': repay_only}
return self._send_message('post', '/position/close',
data=json.dumps(params))
def deposit(self, amount, currency, payment_method_id):
""" Deposit funds from a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decmial): The amount to deposit.
currency (str): The type of currency.
payment_method_id (str): ID of the payment method.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/deposits/payment-method',
data=json.dumps(params))
def coinbase_deposit(self, amount, currency, coinbase_account_id):
""" Deposit funds from a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to deposit.
currency (str): The type of currency.
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/deposits/coinbase-account',
data=json.dumps(params))
def withdraw(self, amount, currency, payment_method_id):
""" Withdraw funds to a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decimal): The amount to withdraw.
currency (str): Currency type (eg. 'BTC')
payment_method_id (str): ID of the payment method.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/withdrawals/payment-method',
data=json.dumps(params))
def coinbase_withdraw(self, amount, currency, coinbase_account_id):
""" Withdraw funds to a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to withdraw.
currency (str): The type of currency (eg. 'BTC')
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/withdrawals/coinbase-account',
data=json.dumps(params))
def crypto_withdraw(self, amount, currency, crypto_address):
""" Withdraw funds to a crypto address.
Args:
amount (Decimal): The amount to withdraw
currency (str): The type of currency (eg. 'BTC')
crypto_address (str): Crypto address to withdraw to.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'crypto_address': crypto_address}
return self._send_message('post', '/withdrawals/crypto',
data=json.dumps(params))
def get_payment_methods(self):
""" Get a list of your payment methods.
Returns:
list: Payment method details.
"""
return self._send_message('get', '/payment-methods')
def get_coinbase_accounts(self):
""" Get a list of your coinbase accounts.
Returns:
list: Coinbase account details.
"""
return self._send_message('get', '/coinbase-accounts')
def create_report(self, report_type, start_date, end_date, product_id=None,
account_id=None, report_format='pdf', email=None):
""" Create report of historic information about your account.
The report will be generated when resources are available. Report status
can be queried via `get_report(report_id)`.
Args:
report_type (str): 'fills' or 'account'
start_date (str): Starting date for the report in ISO 8601
end_date (str): Ending date for the report in ISO 8601
product_id (Optional[str]): ID of the product to generate a fills
report for. Required if account_type is 'fills'
account_id (Optional[str]): ID of the account to generate an account
report for. Required if report_type is 'account'.
report_format (Optional[str]): 'pdf' or 'csv'. Default is 'pdf'.
email (Optional[str]): Email address to send the report to.
Returns:
dict: Report details. Example::
{
"id": "0428b97b-bec1-429e-a94c-59232926778d",
"type": "fills",
"status": "pending",
"created_at": "2015-01-06T10:34:47.000Z",
"completed_at": undefined,
"expires_at": "2015-01-13T10:35:47.000Z",
"file_url": undefined,
"params": {
"start_date": "2014-11-01T00:00:00.000Z",
"end_date": "2014-11-30T23:59:59.000Z"
}
}
"""
params = {'type': report_type,
'start_date': start_date,
'end_date': end_date,
'format': report_format}
if product_id is not None:
params['product_id'] = product_id
if account_id is not None:
params['account_id'] = account_id
if email is not None:
params['email'] = email
return self._send_message('post', '/reports',
data=json.dumps(params))
def get_report(self, report_id):
""" Get report status.
Use to query a specific report once it has been requested.
Args:
report_id (str): Report ID
Returns:
dict: Report details, including file url once it is created.
"""
return self._send_message('get', '/reports/' + report_id)
def get_trailing_volume(self):
""" Get your 30-day trailing volume for all products.
This is a cached value that's calculated every day at midnight UTC.
Returns:
list: 30-day trailing volumes. Example::
[
{
"product_id": "BTC-USD",
"exchange_volume": "11800.00000000",
"volume": "100.00000000",
"recorded_at": "1973-11-29T00:05:01.123456Z"
},
{
...
}
]
"""
return self._send_message('get', '/users/self/trailing-volume')
|
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.buy | python | def buy(self, product_id, order_type, **kwargs):
return self.place_order(product_id, 'buy', order_type, **kwargs) | Place a buy order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example. | train | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L258-L276 | [
"def place_order(self, product_id, side, order_type, **kwargs):\n \"\"\" Place an order.\n\n The three order types (limit, market, and stop) can be placed using this\n method. Specific methods are provided for each order type, but if a\n more generic interface is desired this method is available.\n\n ... | class AuthenticatedClient(PublicClient):
""" Provides access to Private Endpoints on the cbpro API.
All requests default to the live `api_url`: 'https://api.pro.coinbase.com'.
To test your application using the sandbox modify the `api_url`.
Attributes:
url (str): The api url for this client instance to use.
auth (CBProAuth): Custom authentication handler for each request.
session (requests.Session): Persistent HTTP connection object.
"""
def __init__(self, key, b64secret, passphrase,
api_url="https://api.pro.coinbase.com"):
""" Create an instance of the AuthenticatedClient class.
Args:
key (str): Your API key.
b64secret (str): The secret key matching your API key.
passphrase (str): Passphrase chosen when setting up key.
api_url (Optional[str]): API URL. Defaults to cbpro API.
"""
super(AuthenticatedClient, self).__init__(api_url)
self.auth = CBProAuth(key, b64secret, passphrase)
self.session = requests.Session()
def get_account(self, account_id):
""" Get information for a single account.
Use this endpoint when you know the account_id.
Args:
account_id (str): Account id for account you want to get.
Returns:
dict: Account information. Example::
{
"id": "a1b2c3d4",
"balance": "1.100",
"holds": "0.100",
"available": "1.00",
"currency": "USD"
}
"""
return self._send_message('get', '/accounts/' + account_id)
def get_accounts(self):
""" Get a list of trading all accounts.
When you place an order, the funds for the order are placed on
hold. They cannot be used for other orders or withdrawn. Funds
will remain on hold until the order is filled or canceled. The
funds on hold for each account will be specified.
Returns:
list: Info about all accounts. Example::
[
{
"id": "71452118-efc7-4cc4-8780-a5e22d4baa53",
"currency": "BTC",
"balance": "0.0000000000000000",
"available": "0.0000000000000000",
"hold": "0.0000000000000000",
"profile_id": "75da88c5-05bf-4f54-bc85-5c775bd68254"
},
{
...
}
]
* Additional info included in response for margin accounts.
"""
return self.get_account('')
def get_account_history(self, account_id, **kwargs):
""" List account activity. Account activity either increases or
decreases your account balance.
Entry type indicates the reason for the account change.
* transfer: Funds moved to/from Coinbase to cbpro
* match: Funds moved as a result of a trade
* fee: Fee as a result of a trade
* rebate: Fee rebate as per our fee schedule
If an entry is the result of a trade (match, fee), the details
field will contain additional information about the trade.
Args:
account_id (str): Account id to get history of.
kwargs (dict): Additional HTTP request parameters.
Returns:
list: History information for the account. Example::
[
{
"id": "100",
"created_at": "2014-11-07T08:19:27.028459Z",
"amount": "0.001",
"balance": "239.669",
"type": "fee",
"details": {
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"trade_id": "74",
"product_id": "BTC-USD"
}
},
{
...
}
]
"""
endpoint = '/accounts/{}/ledger'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def get_account_holds(self, account_id, **kwargs):
""" Get holds on an account.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Holds are placed on an account for active orders or
pending withdraw requests.
As an order is filled, the hold amount is updated. If an order
is canceled, any remaining hold is removed. For a withdraw, once
it is completed, the hold is removed.
The `type` field will indicate why the hold exists. The hold
type is 'order' for holds related to open orders and 'transfer'
for holds related to a withdraw.
The `ref` field contains the id of the order or transfer which
created the hold.
Args:
account_id (str): Account id to get holds of.
kwargs (dict): Additional HTTP request parameters.
Returns:
generator(list): Hold information for the account. Example::
[
{
"id": "82dcd140-c3c7-4507-8de4-2c529cd1a28f",
"account_id": "e0b3f39a-183d-453e-b754-0c13e5bab0b3",
"created_at": "2014-11-06T10:34:47.123456Z",
"updated_at": "2014-11-06T10:40:47.123456Z",
"amount": "4.23",
"type": "order",
"ref": "0a205de4-dd35-4370-a285-fe8fc375a273",
},
{
...
}
]
"""
endpoint = '/accounts/{}/holds'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def place_order(self, product_id, side, order_type, **kwargs):
""" Place an order.
The three order types (limit, market, and stop) can be placed using this
method. Specific methods are provided for each order type, but if a
more generic interface is desired this method is available.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
order_type (str): Order type ('limit', 'market', or 'stop')
**client_oid (str): Order ID selected by you to identify your order.
This should be a UUID, which will be broadcast in the public
feed for `received` messages.
**stp (str): Self-trade prevention flag. cbpro doesn't allow self-
trading. This behavior can be modified with this flag.
Options:
'dc' Decrease and Cancel (default)
'co' Cancel oldest
'cn' Cancel newest
'cb' Cancel both
**overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
**funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
**kwargs: Additional arguments can be specified for different order
types. See the limit/market/stop order methods for details.
Returns:
dict: Order details. Example::
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "pending",
"settled": false
}
"""
# Margin parameter checks
if kwargs.get('overdraft_enabled') is not None and \
kwargs.get('funding_amount') is not None:
raise ValueError('Margin funding must be specified through use of '
'overdraft or by setting a funding amount, but not'
' both')
# Limit order checks
if order_type == 'limit':
if kwargs.get('cancel_after') is not None and \
kwargs.get('time_in_force') != 'GTT':
raise ValueError('May only specify a cancel period when time '
'in_force is `GTT`')
if kwargs.get('post_only') is not None and kwargs.get('time_in_force') in \
['IOC', 'FOK']:
raise ValueError('post_only is invalid when time in force is '
'`IOC` or `FOK`')
# Market and stop order checks
if order_type == 'market' or order_type == 'stop':
if not (kwargs.get('size') is None) ^ (kwargs.get('funds') is None):
raise ValueError('Either `size` or `funds` must be specified '
'for market/stop orders (but not both).')
# Build params dict
params = {'product_id': product_id,
'side': side,
'type': order_type}
params.update(kwargs)
return self._send_message('post', '/orders', data=json.dumps(params))
def sell(self, product_id, order_type, **kwargs):
"""Place a sell order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'sell', order_type, **kwargs)
def place_limit_order(self, product_id, side, price, size,
client_oid=None,
stp=None,
time_in_force=None,
cancel_after=None,
post_only=None,
overdraft_enabled=None,
funding_amount=None):
"""Place a limit order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Price per cryptocurrency
size (Decimal): Amount of cryptocurrency to buy or sell
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
time_in_force (Optional[str]): Time in force. Options:
'GTC' Good till canceled
'GTT' Good till time (set by `cancel_after`)
'IOC' Immediate or cancel
'FOK' Fill or kill
cancel_after (Optional[str]): Cancel after this period for 'GTT'
orders. Options are 'min', 'hour', or 'day'.
post_only (Optional[bool]): Indicates that the order should only
make liquidity. If any part of the order results in taking
liquidity, the order will be rejected and no part of it will
execute.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'limit',
'price': price,
'size': size,
'client_oid': client_oid,
'stp': stp,
'time_in_force': time_in_force,
'cancel_after': cancel_after,
'post_only': post_only,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_market_order(self, product_id, side, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place market order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'market',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_stop_order(self, product_id, side, price, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place stop order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Desired price at which the stop order triggers.
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'price': price,
'order_type': 'stop',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def cancel_order(self, order_id):
""" Cancel a previously placed order.
If the order had no matches during its lifetime its record may
be purged. This means the order details will not be available
with get_order(order_id). If the order could not be canceled
(already filled or previously canceled, etc), then an error
response will indicate the reason in the message field.
**Caution**: The order id is the server-assigned order id and
not the optional client_oid.
Args:
order_id (str): The order_id of the order you want to cancel
Returns:
list: Containing the order_id of cancelled order. Example::
[ "c5ab5eae-76be-480e-8961-00792dc7e138" ]
"""
return self._send_message('delete', '/orders/' + order_id)
def cancel_all(self, product_id=None):
""" With best effort, cancel all open orders.
Args:
product_id (Optional[str]): Only cancel orders for this
product_id
Returns:
list: A list of ids of the canceled orders. Example::
[
"144c6f8e-713f-4682-8435-5280fbe8b2b4",
"debe4907-95dc-442f-af3b-cec12f42ebda",
"cf7aceee-7b08-4227-a76c-3858144323ab",
"dfc5ae27-cadb-4c0c-beef-8994936fde8a",
"34fecfbf-de33-4273-b2c6-baf8e8948be4"
]
"""
if product_id is not None:
params = {'product_id': product_id}
else:
params = None
return self._send_message('delete', '/orders', params=params)
def get_order(self, order_id):
""" Get a single order by order id.
If the order is canceled the response may have status code 404
if the order had no matches.
**Caution**: Open orders may change state between the request
and the response depending on market conditions.
Args:
order_id (str): The order to get information of.
Returns:
dict: Containing information on order. Example::
{
"created_at": "2017-06-18T00:27:42.920136Z",
"executed_value": "0.0000000000000000",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"id": "9456f388-67a9-4316-bad1-330c5353804f",
"post_only": true,
"price": "1.00000000",
"product_id": "BTC-USD",
"settled": false,
"side": "buy",
"size": "1.00000000",
"status": "pending",
"stp": "dc",
"time_in_force": "GTC",
"type": "limit"
}
"""
return self._send_message('get', '/orders/' + order_id)
def get_orders(self, product_id=None, status=None, **kwargs):
""" List your current open orders.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Only open or un-settled orders are returned. As soon as an
order is no longer open and settled, it will no longer appear
in the default request.
Orders which are no longer resting on the order book, will be
marked with the 'done' status. There is a small window between
an order being 'done' and 'settled'. An order is 'settled' when
all of the fills have settled and the remaining holds (if any)
have been removed.
For high-volume trading it is strongly recommended that you
maintain your own list of open orders and use one of the
streaming market data feeds to keep it updated. You should poll
the open orders endpoint once when you start trading to obtain
the current state of any open orders.
Args:
product_id (Optional[str]): Only list orders for this
product
status (Optional[list/str]): Limit list of orders to
this status or statuses. Passing 'all' returns orders
of all statuses.
** Options: 'open', 'pending', 'active', 'done',
'settled'
** default: ['open', 'pending', 'active']
Returns:
list: Containing information on orders. Example::
[
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "open",
"settled": false
},
{
...
}
]
"""
params = kwargs
if product_id is not None:
params['product_id'] = product_id
if status is not None:
params['status'] = status
return self._send_paginated_message('/orders', params=params)
def get_fills(self, product_id=None, order_id=None, **kwargs):
""" Get a list of recent fills.
As of 8/23/18 - Requests without either order_id or product_id
will be rejected
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Fees are recorded in two stages. Immediately after the matching
engine completes a match, the fill is inserted into our
datastore. Once the fill is recorded, a settlement process will
settle the fill and credit both trading counterparties.
The 'fee' field indicates the fees charged for this fill.
The 'liquidity' field indicates if the fill was the result of a
liquidity provider or liquidity taker. M indicates Maker and T
indicates Taker.
Args:
product_id (str): Limit list to this product_id
order_id (str): Limit list to this order_id
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on fills. Example::
[
{
"trade_id": 74,
"product_id": "BTC-USD",
"price": "10.00",
"size": "0.01",
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"created_at": "2014-11-07T22:19:28.578544Z",
"liquidity": "T",
"fee": "0.00025",
"settled": true,
"side": "buy"
},
{
...
}
]
"""
if (product_id is None) and (order_id is None):
raise ValueError('Either product_id or order_id must be specified.')
params = {}
if product_id:
params['product_id'] = product_id
if order_id:
params['order_id'] = order_id
params.update(kwargs)
return self._send_paginated_message('/fills', params=params)
def get_fundings(self, status=None, **kwargs):
""" Every order placed with a margin profile that draws funding
will create a funding record.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
status (list/str): Limit funding records to these statuses.
** Options: 'outstanding', 'settled', 'rejected'
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on margin funding. Example::
[
{
"id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"order_id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6",
"amount": "1057.6519956381537500",
"status": "settled",
"created_at": "2017-03-17T23:46:16.663397Z",
"currency": "USD",
"repaid_amount": "1057.6519956381537500",
"default_amount": "0",
"repaid_default": false
},
{
...
}
]
"""
params = {}
if status is not None:
params['status'] = status
params.update(kwargs)
return self._send_paginated_message('/funding', params=params)
def repay_funding(self, amount, currency):
""" Repay funding. Repays the older funding records first.
Args:
amount (int): Amount of currency to repay
currency (str): The currency, example USD
Returns:
Not specified by cbpro.
"""
params = {
'amount': amount,
'currency': currency # example: USD
}
return self._send_message('post', '/funding/repay',
data=json.dumps(params))
def margin_transfer(self, margin_profile_id, transfer_type, currency,
amount):
""" Transfer funds between your standard profile and a margin profile.
Args:
margin_profile_id (str): Margin profile ID to withdraw or deposit
from.
transfer_type (str): 'deposit' or 'withdraw'
currency (str): Currency to transfer (eg. 'USD')
amount (Decimal): Amount to transfer
Returns:
dict: Transfer details. Example::
{
"created_at": "2017-01-25T19:06:23.415126Z",
"id": "80bc6b74-8b1f-4c60-a089-c61f9810d4ab",
"user_id": "521c20b3d4ab09621f000011",
"profile_id": "cda95996-ac59-45a3-a42e-30daeb061867",
"margin_profile_id": "45fa9e3b-00ba-4631-b907-8a98cbdf21be",
"type": "deposit",
"amount": "2",
"currency": "USD",
"account_id": "23035fc7-0707-4b59-b0d2-95d0c035f8f5",
"margin_account_id": "e1d9862c-a259-4e83-96cd-376352a9d24d",
"margin_product_id": "BTC-USD",
"status": "completed",
"nonce": 25
}
"""
params = {'margin_profile_id': margin_profile_id,
'type': transfer_type,
'currency': currency, # example: USD
'amount': amount}
return self._send_message('post', '/profiles/margin-transfer',
data=json.dumps(params))
def get_position(self):
""" Get An overview of your margin profile.
Returns:
dict: Details about funding, accounts, and margin call.
"""
return self._send_message('get', '/position')
def close_position(self, repay_only):
""" Close position.
Args:
repay_only (bool): Undocumented by cbpro.
Returns:
Undocumented
"""
params = {'repay_only': repay_only}
return self._send_message('post', '/position/close',
data=json.dumps(params))
def deposit(self, amount, currency, payment_method_id):
""" Deposit funds from a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decmial): The amount to deposit.
currency (str): The type of currency.
payment_method_id (str): ID of the payment method.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/deposits/payment-method',
data=json.dumps(params))
def coinbase_deposit(self, amount, currency, coinbase_account_id):
""" Deposit funds from a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to deposit.
currency (str): The type of currency.
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/deposits/coinbase-account',
data=json.dumps(params))
def withdraw(self, amount, currency, payment_method_id):
""" Withdraw funds to a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decimal): The amount to withdraw.
currency (str): Currency type (eg. 'BTC')
payment_method_id (str): ID of the payment method.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/withdrawals/payment-method',
data=json.dumps(params))
def coinbase_withdraw(self, amount, currency, coinbase_account_id):
""" Withdraw funds to a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to withdraw.
currency (str): The type of currency (eg. 'BTC')
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/withdrawals/coinbase-account',
data=json.dumps(params))
def crypto_withdraw(self, amount, currency, crypto_address):
""" Withdraw funds to a crypto address.
Args:
amount (Decimal): The amount to withdraw
currency (str): The type of currency (eg. 'BTC')
crypto_address (str): Crypto address to withdraw to.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'crypto_address': crypto_address}
return self._send_message('post', '/withdrawals/crypto',
data=json.dumps(params))
def get_payment_methods(self):
""" Get a list of your payment methods.
Returns:
list: Payment method details.
"""
return self._send_message('get', '/payment-methods')
def get_coinbase_accounts(self):
""" Get a list of your coinbase accounts.
Returns:
list: Coinbase account details.
"""
return self._send_message('get', '/coinbase-accounts')
def create_report(self, report_type, start_date, end_date, product_id=None,
account_id=None, report_format='pdf', email=None):
""" Create report of historic information about your account.
The report will be generated when resources are available. Report status
can be queried via `get_report(report_id)`.
Args:
report_type (str): 'fills' or 'account'
start_date (str): Starting date for the report in ISO 8601
end_date (str): Ending date for the report in ISO 8601
product_id (Optional[str]): ID of the product to generate a fills
report for. Required if account_type is 'fills'
account_id (Optional[str]): ID of the account to generate an account
report for. Required if report_type is 'account'.
report_format (Optional[str]): 'pdf' or 'csv'. Default is 'pdf'.
email (Optional[str]): Email address to send the report to.
Returns:
dict: Report details. Example::
{
"id": "0428b97b-bec1-429e-a94c-59232926778d",
"type": "fills",
"status": "pending",
"created_at": "2015-01-06T10:34:47.000Z",
"completed_at": undefined,
"expires_at": "2015-01-13T10:35:47.000Z",
"file_url": undefined,
"params": {
"start_date": "2014-11-01T00:00:00.000Z",
"end_date": "2014-11-30T23:59:59.000Z"
}
}
"""
params = {'type': report_type,
'start_date': start_date,
'end_date': end_date,
'format': report_format}
if product_id is not None:
params['product_id'] = product_id
if account_id is not None:
params['account_id'] = account_id
if email is not None:
params['email'] = email
return self._send_message('post', '/reports',
data=json.dumps(params))
def get_report(self, report_id):
""" Get report status.
Use to query a specific report once it has been requested.
Args:
report_id (str): Report ID
Returns:
dict: Report details, including file url once it is created.
"""
return self._send_message('get', '/reports/' + report_id)
def get_trailing_volume(self):
""" Get your 30-day trailing volume for all products.
This is a cached value that's calculated every day at midnight UTC.
Returns:
list: 30-day trailing volumes. Example::
[
{
"product_id": "BTC-USD",
"exchange_volume": "11800.00000000",
"volume": "100.00000000",
"recorded_at": "1973-11-29T00:05:01.123456Z"
},
{
...
}
]
"""
return self._send_message('get', '/users/self/trailing-volume')
|
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.sell | python | def sell(self, product_id, order_type, **kwargs):
return self.place_order(product_id, 'sell', order_type, **kwargs) | Place a sell order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example. | train | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L278-L296 | [
"def place_order(self, product_id, side, order_type, **kwargs):\n \"\"\" Place an order.\n\n The three order types (limit, market, and stop) can be placed using this\n method. Specific methods are provided for each order type, but if a\n more generic interface is desired this method is available.\n\n ... | class AuthenticatedClient(PublicClient):
""" Provides access to Private Endpoints on the cbpro API.
All requests default to the live `api_url`: 'https://api.pro.coinbase.com'.
To test your application using the sandbox modify the `api_url`.
Attributes:
url (str): The api url for this client instance to use.
auth (CBProAuth): Custom authentication handler for each request.
session (requests.Session): Persistent HTTP connection object.
"""
def __init__(self, key, b64secret, passphrase,
api_url="https://api.pro.coinbase.com"):
""" Create an instance of the AuthenticatedClient class.
Args:
key (str): Your API key.
b64secret (str): The secret key matching your API key.
passphrase (str): Passphrase chosen when setting up key.
api_url (Optional[str]): API URL. Defaults to cbpro API.
"""
super(AuthenticatedClient, self).__init__(api_url)
self.auth = CBProAuth(key, b64secret, passphrase)
self.session = requests.Session()
def get_account(self, account_id):
""" Get information for a single account.
Use this endpoint when you know the account_id.
Args:
account_id (str): Account id for account you want to get.
Returns:
dict: Account information. Example::
{
"id": "a1b2c3d4",
"balance": "1.100",
"holds": "0.100",
"available": "1.00",
"currency": "USD"
}
"""
return self._send_message('get', '/accounts/' + account_id)
def get_accounts(self):
""" Get a list of trading all accounts.
When you place an order, the funds for the order are placed on
hold. They cannot be used for other orders or withdrawn. Funds
will remain on hold until the order is filled or canceled. The
funds on hold for each account will be specified.
Returns:
list: Info about all accounts. Example::
[
{
"id": "71452118-efc7-4cc4-8780-a5e22d4baa53",
"currency": "BTC",
"balance": "0.0000000000000000",
"available": "0.0000000000000000",
"hold": "0.0000000000000000",
"profile_id": "75da88c5-05bf-4f54-bc85-5c775bd68254"
},
{
...
}
]
* Additional info included in response for margin accounts.
"""
return self.get_account('')
def get_account_history(self, account_id, **kwargs):
""" List account activity. Account activity either increases or
decreases your account balance.
Entry type indicates the reason for the account change.
* transfer: Funds moved to/from Coinbase to cbpro
* match: Funds moved as a result of a trade
* fee: Fee as a result of a trade
* rebate: Fee rebate as per our fee schedule
If an entry is the result of a trade (match, fee), the details
field will contain additional information about the trade.
Args:
account_id (str): Account id to get history of.
kwargs (dict): Additional HTTP request parameters.
Returns:
list: History information for the account. Example::
[
{
"id": "100",
"created_at": "2014-11-07T08:19:27.028459Z",
"amount": "0.001",
"balance": "239.669",
"type": "fee",
"details": {
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"trade_id": "74",
"product_id": "BTC-USD"
}
},
{
...
}
]
"""
endpoint = '/accounts/{}/ledger'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def get_account_holds(self, account_id, **kwargs):
""" Get holds on an account.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Holds are placed on an account for active orders or
pending withdraw requests.
As an order is filled, the hold amount is updated. If an order
is canceled, any remaining hold is removed. For a withdraw, once
it is completed, the hold is removed.
The `type` field will indicate why the hold exists. The hold
type is 'order' for holds related to open orders and 'transfer'
for holds related to a withdraw.
The `ref` field contains the id of the order or transfer which
created the hold.
Args:
account_id (str): Account id to get holds of.
kwargs (dict): Additional HTTP request parameters.
Returns:
generator(list): Hold information for the account. Example::
[
{
"id": "82dcd140-c3c7-4507-8de4-2c529cd1a28f",
"account_id": "e0b3f39a-183d-453e-b754-0c13e5bab0b3",
"created_at": "2014-11-06T10:34:47.123456Z",
"updated_at": "2014-11-06T10:40:47.123456Z",
"amount": "4.23",
"type": "order",
"ref": "0a205de4-dd35-4370-a285-fe8fc375a273",
},
{
...
}
]
"""
endpoint = '/accounts/{}/holds'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def place_order(self, product_id, side, order_type, **kwargs):
""" Place an order.
The three order types (limit, market, and stop) can be placed using this
method. Specific methods are provided for each order type, but if a
more generic interface is desired this method is available.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
order_type (str): Order type ('limit', 'market', or 'stop')
**client_oid (str): Order ID selected by you to identify your order.
This should be a UUID, which will be broadcast in the public
feed for `received` messages.
**stp (str): Self-trade prevention flag. cbpro doesn't allow self-
trading. This behavior can be modified with this flag.
Options:
'dc' Decrease and Cancel (default)
'co' Cancel oldest
'cn' Cancel newest
'cb' Cancel both
**overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
**funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
**kwargs: Additional arguments can be specified for different order
types. See the limit/market/stop order methods for details.
Returns:
dict: Order details. Example::
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "pending",
"settled": false
}
"""
# Margin parameter checks
if kwargs.get('overdraft_enabled') is not None and \
kwargs.get('funding_amount') is not None:
raise ValueError('Margin funding must be specified through use of '
'overdraft or by setting a funding amount, but not'
' both')
# Limit order checks
if order_type == 'limit':
if kwargs.get('cancel_after') is not None and \
kwargs.get('time_in_force') != 'GTT':
raise ValueError('May only specify a cancel period when time '
'in_force is `GTT`')
if kwargs.get('post_only') is not None and kwargs.get('time_in_force') in \
['IOC', 'FOK']:
raise ValueError('post_only is invalid when time in force is '
'`IOC` or `FOK`')
# Market and stop order checks
if order_type == 'market' or order_type == 'stop':
if not (kwargs.get('size') is None) ^ (kwargs.get('funds') is None):
raise ValueError('Either `size` or `funds` must be specified '
'for market/stop orders (but not both).')
# Build params dict
params = {'product_id': product_id,
'side': side,
'type': order_type}
params.update(kwargs)
return self._send_message('post', '/orders', data=json.dumps(params))
def buy(self, product_id, order_type, **kwargs):
"""Place a buy order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'buy', order_type, **kwargs)
def place_limit_order(self, product_id, side, price, size,
client_oid=None,
stp=None,
time_in_force=None,
cancel_after=None,
post_only=None,
overdraft_enabled=None,
funding_amount=None):
"""Place a limit order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Price per cryptocurrency
size (Decimal): Amount of cryptocurrency to buy or sell
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
time_in_force (Optional[str]): Time in force. Options:
'GTC' Good till canceled
'GTT' Good till time (set by `cancel_after`)
'IOC' Immediate or cancel
'FOK' Fill or kill
cancel_after (Optional[str]): Cancel after this period for 'GTT'
orders. Options are 'min', 'hour', or 'day'.
post_only (Optional[bool]): Indicates that the order should only
make liquidity. If any part of the order results in taking
liquidity, the order will be rejected and no part of it will
execute.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'limit',
'price': price,
'size': size,
'client_oid': client_oid,
'stp': stp,
'time_in_force': time_in_force,
'cancel_after': cancel_after,
'post_only': post_only,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_market_order(self, product_id, side, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place market order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'market',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_stop_order(self, product_id, side, price, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place stop order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Desired price at which the stop order triggers.
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'price': price,
'order_type': 'stop',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def cancel_order(self, order_id):
""" Cancel a previously placed order.
If the order had no matches during its lifetime its record may
be purged. This means the order details will not be available
with get_order(order_id). If the order could not be canceled
(already filled or previously canceled, etc), then an error
response will indicate the reason in the message field.
**Caution**: The order id is the server-assigned order id and
not the optional client_oid.
Args:
order_id (str): The order_id of the order you want to cancel
Returns:
list: Containing the order_id of cancelled order. Example::
[ "c5ab5eae-76be-480e-8961-00792dc7e138" ]
"""
return self._send_message('delete', '/orders/' + order_id)
def cancel_all(self, product_id=None):
""" With best effort, cancel all open orders.
Args:
product_id (Optional[str]): Only cancel orders for this
product_id
Returns:
list: A list of ids of the canceled orders. Example::
[
"144c6f8e-713f-4682-8435-5280fbe8b2b4",
"debe4907-95dc-442f-af3b-cec12f42ebda",
"cf7aceee-7b08-4227-a76c-3858144323ab",
"dfc5ae27-cadb-4c0c-beef-8994936fde8a",
"34fecfbf-de33-4273-b2c6-baf8e8948be4"
]
"""
if product_id is not None:
params = {'product_id': product_id}
else:
params = None
return self._send_message('delete', '/orders', params=params)
def get_order(self, order_id):
""" Get a single order by order id.
If the order is canceled the response may have status code 404
if the order had no matches.
**Caution**: Open orders may change state between the request
and the response depending on market conditions.
Args:
order_id (str): The order to get information of.
Returns:
dict: Containing information on order. Example::
{
"created_at": "2017-06-18T00:27:42.920136Z",
"executed_value": "0.0000000000000000",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"id": "9456f388-67a9-4316-bad1-330c5353804f",
"post_only": true,
"price": "1.00000000",
"product_id": "BTC-USD",
"settled": false,
"side": "buy",
"size": "1.00000000",
"status": "pending",
"stp": "dc",
"time_in_force": "GTC",
"type": "limit"
}
"""
return self._send_message('get', '/orders/' + order_id)
def get_orders(self, product_id=None, status=None, **kwargs):
""" List your current open orders.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Only open or un-settled orders are returned. As soon as an
order is no longer open and settled, it will no longer appear
in the default request.
Orders which are no longer resting on the order book, will be
marked with the 'done' status. There is a small window between
an order being 'done' and 'settled'. An order is 'settled' when
all of the fills have settled and the remaining holds (if any)
have been removed.
For high-volume trading it is strongly recommended that you
maintain your own list of open orders and use one of the
streaming market data feeds to keep it updated. You should poll
the open orders endpoint once when you start trading to obtain
the current state of any open orders.
Args:
product_id (Optional[str]): Only list orders for this
product
status (Optional[list/str]): Limit list of orders to
this status or statuses. Passing 'all' returns orders
of all statuses.
** Options: 'open', 'pending', 'active', 'done',
'settled'
** default: ['open', 'pending', 'active']
Returns:
list: Containing information on orders. Example::
[
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "open",
"settled": false
},
{
...
}
]
"""
params = kwargs
if product_id is not None:
params['product_id'] = product_id
if status is not None:
params['status'] = status
return self._send_paginated_message('/orders', params=params)
def get_fills(self, product_id=None, order_id=None, **kwargs):
""" Get a list of recent fills.
As of 8/23/18 - Requests without either order_id or product_id
will be rejected
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Fees are recorded in two stages. Immediately after the matching
engine completes a match, the fill is inserted into our
datastore. Once the fill is recorded, a settlement process will
settle the fill and credit both trading counterparties.
The 'fee' field indicates the fees charged for this fill.
The 'liquidity' field indicates if the fill was the result of a
liquidity provider or liquidity taker. M indicates Maker and T
indicates Taker.
Args:
product_id (str): Limit list to this product_id
order_id (str): Limit list to this order_id
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on fills. Example::
[
{
"trade_id": 74,
"product_id": "BTC-USD",
"price": "10.00",
"size": "0.01",
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"created_at": "2014-11-07T22:19:28.578544Z",
"liquidity": "T",
"fee": "0.00025",
"settled": true,
"side": "buy"
},
{
...
}
]
"""
if (product_id is None) and (order_id is None):
raise ValueError('Either product_id or order_id must be specified.')
params = {}
if product_id:
params['product_id'] = product_id
if order_id:
params['order_id'] = order_id
params.update(kwargs)
return self._send_paginated_message('/fills', params=params)
def get_fundings(self, status=None, **kwargs):
""" Every order placed with a margin profile that draws funding
will create a funding record.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
status (list/str): Limit funding records to these statuses.
** Options: 'outstanding', 'settled', 'rejected'
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on margin funding. Example::
[
{
"id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"order_id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6",
"amount": "1057.6519956381537500",
"status": "settled",
"created_at": "2017-03-17T23:46:16.663397Z",
"currency": "USD",
"repaid_amount": "1057.6519956381537500",
"default_amount": "0",
"repaid_default": false
},
{
...
}
]
"""
params = {}
if status is not None:
params['status'] = status
params.update(kwargs)
return self._send_paginated_message('/funding', params=params)
def repay_funding(self, amount, currency):
""" Repay funding. Repays the older funding records first.
Args:
amount (int): Amount of currency to repay
currency (str): The currency, example USD
Returns:
Not specified by cbpro.
"""
params = {
'amount': amount,
'currency': currency # example: USD
}
return self._send_message('post', '/funding/repay',
data=json.dumps(params))
def margin_transfer(self, margin_profile_id, transfer_type, currency,
amount):
""" Transfer funds between your standard profile and a margin profile.
Args:
margin_profile_id (str): Margin profile ID to withdraw or deposit
from.
transfer_type (str): 'deposit' or 'withdraw'
currency (str): Currency to transfer (eg. 'USD')
amount (Decimal): Amount to transfer
Returns:
dict: Transfer details. Example::
{
"created_at": "2017-01-25T19:06:23.415126Z",
"id": "80bc6b74-8b1f-4c60-a089-c61f9810d4ab",
"user_id": "521c20b3d4ab09621f000011",
"profile_id": "cda95996-ac59-45a3-a42e-30daeb061867",
"margin_profile_id": "45fa9e3b-00ba-4631-b907-8a98cbdf21be",
"type": "deposit",
"amount": "2",
"currency": "USD",
"account_id": "23035fc7-0707-4b59-b0d2-95d0c035f8f5",
"margin_account_id": "e1d9862c-a259-4e83-96cd-376352a9d24d",
"margin_product_id": "BTC-USD",
"status": "completed",
"nonce": 25
}
"""
params = {'margin_profile_id': margin_profile_id,
'type': transfer_type,
'currency': currency, # example: USD
'amount': amount}
return self._send_message('post', '/profiles/margin-transfer',
data=json.dumps(params))
def get_position(self):
""" Get An overview of your margin profile.
Returns:
dict: Details about funding, accounts, and margin call.
"""
return self._send_message('get', '/position')
def close_position(self, repay_only):
""" Close position.
Args:
repay_only (bool): Undocumented by cbpro.
Returns:
Undocumented
"""
params = {'repay_only': repay_only}
return self._send_message('post', '/position/close',
data=json.dumps(params))
def deposit(self, amount, currency, payment_method_id):
""" Deposit funds from a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decmial): The amount to deposit.
currency (str): The type of currency.
payment_method_id (str): ID of the payment method.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/deposits/payment-method',
data=json.dumps(params))
def coinbase_deposit(self, amount, currency, coinbase_account_id):
""" Deposit funds from a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to deposit.
currency (str): The type of currency.
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/deposits/coinbase-account',
data=json.dumps(params))
def withdraw(self, amount, currency, payment_method_id):
""" Withdraw funds to a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decimal): The amount to withdraw.
currency (str): Currency type (eg. 'BTC')
payment_method_id (str): ID of the payment method.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/withdrawals/payment-method',
data=json.dumps(params))
def coinbase_withdraw(self, amount, currency, coinbase_account_id):
""" Withdraw funds to a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to withdraw.
currency (str): The type of currency (eg. 'BTC')
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/withdrawals/coinbase-account',
data=json.dumps(params))
def crypto_withdraw(self, amount, currency, crypto_address):
""" Withdraw funds to a crypto address.
Args:
amount (Decimal): The amount to withdraw
currency (str): The type of currency (eg. 'BTC')
crypto_address (str): Crypto address to withdraw to.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'crypto_address': crypto_address}
return self._send_message('post', '/withdrawals/crypto',
data=json.dumps(params))
def get_payment_methods(self):
""" Get a list of your payment methods.
Returns:
list: Payment method details.
"""
return self._send_message('get', '/payment-methods')
def get_coinbase_accounts(self):
""" Get a list of your coinbase accounts.
Returns:
list: Coinbase account details.
"""
return self._send_message('get', '/coinbase-accounts')
def create_report(self, report_type, start_date, end_date, product_id=None,
account_id=None, report_format='pdf', email=None):
""" Create report of historic information about your account.
The report will be generated when resources are available. Report status
can be queried via `get_report(report_id)`.
Args:
report_type (str): 'fills' or 'account'
start_date (str): Starting date for the report in ISO 8601
end_date (str): Ending date for the report in ISO 8601
product_id (Optional[str]): ID of the product to generate a fills
report for. Required if account_type is 'fills'
account_id (Optional[str]): ID of the account to generate an account
report for. Required if report_type is 'account'.
report_format (Optional[str]): 'pdf' or 'csv'. Default is 'pdf'.
email (Optional[str]): Email address to send the report to.
Returns:
dict: Report details. Example::
{
"id": "0428b97b-bec1-429e-a94c-59232926778d",
"type": "fills",
"status": "pending",
"created_at": "2015-01-06T10:34:47.000Z",
"completed_at": undefined,
"expires_at": "2015-01-13T10:35:47.000Z",
"file_url": undefined,
"params": {
"start_date": "2014-11-01T00:00:00.000Z",
"end_date": "2014-11-30T23:59:59.000Z"
}
}
"""
params = {'type': report_type,
'start_date': start_date,
'end_date': end_date,
'format': report_format}
if product_id is not None:
params['product_id'] = product_id
if account_id is not None:
params['account_id'] = account_id
if email is not None:
params['email'] = email
return self._send_message('post', '/reports',
data=json.dumps(params))
def get_report(self, report_id):
""" Get report status.
Use to query a specific report once it has been requested.
Args:
report_id (str): Report ID
Returns:
dict: Report details, including file url once it is created.
"""
return self._send_message('get', '/reports/' + report_id)
def get_trailing_volume(self):
""" Get your 30-day trailing volume for all products.
This is a cached value that's calculated every day at midnight UTC.
Returns:
list: 30-day trailing volumes. Example::
[
{
"product_id": "BTC-USD",
"exchange_volume": "11800.00000000",
"volume": "100.00000000",
"recorded_at": "1973-11-29T00:05:01.123456Z"
},
{
...
}
]
"""
return self._send_message('get', '/users/self/trailing-volume')
|
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.place_limit_order | python | def place_limit_order(self, product_id, side, price, size,
client_oid=None,
stp=None,
time_in_force=None,
cancel_after=None,
post_only=None,
overdraft_enabled=None,
funding_amount=None):
params = {'product_id': product_id,
'side': side,
'order_type': 'limit',
'price': price,
'size': size,
'client_oid': client_oid,
'stp': stp,
'time_in_force': time_in_force,
'cancel_after': cancel_after,
'post_only': post_only,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params) | Place a limit order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Price per cryptocurrency
size (Decimal): Amount of cryptocurrency to buy or sell
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
time_in_force (Optional[str]): Time in force. Options:
'GTC' Good till canceled
'GTT' Good till time (set by `cancel_after`)
'IOC' Immediate or cancel
'FOK' Fill or kill
cancel_after (Optional[str]): Cancel after this period for 'GTT'
orders. Options are 'min', 'hour', or 'day'.
post_only (Optional[bool]): Indicates that the order should only
make liquidity. If any part of the order results in taking
liquidity, the order will be rejected and no part of it will
execute.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example. | train | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L298-L352 | [
"def place_order(self, product_id, side, order_type, **kwargs):\n \"\"\" Place an order.\n\n The three order types (limit, market, and stop) can be placed using this\n method. Specific methods are provided for each order type, but if a\n more generic interface is desired this method is available.\n\n ... | class AuthenticatedClient(PublicClient):
""" Provides access to Private Endpoints on the cbpro API.
All requests default to the live `api_url`: 'https://api.pro.coinbase.com'.
To test your application using the sandbox modify the `api_url`.
Attributes:
url (str): The api url for this client instance to use.
auth (CBProAuth): Custom authentication handler for each request.
session (requests.Session): Persistent HTTP connection object.
"""
def __init__(self, key, b64secret, passphrase,
api_url="https://api.pro.coinbase.com"):
""" Create an instance of the AuthenticatedClient class.
Args:
key (str): Your API key.
b64secret (str): The secret key matching your API key.
passphrase (str): Passphrase chosen when setting up key.
api_url (Optional[str]): API URL. Defaults to cbpro API.
"""
super(AuthenticatedClient, self).__init__(api_url)
self.auth = CBProAuth(key, b64secret, passphrase)
self.session = requests.Session()
def get_account(self, account_id):
""" Get information for a single account.
Use this endpoint when you know the account_id.
Args:
account_id (str): Account id for account you want to get.
Returns:
dict: Account information. Example::
{
"id": "a1b2c3d4",
"balance": "1.100",
"holds": "0.100",
"available": "1.00",
"currency": "USD"
}
"""
return self._send_message('get', '/accounts/' + account_id)
def get_accounts(self):
""" Get a list of trading all accounts.
When you place an order, the funds for the order are placed on
hold. They cannot be used for other orders or withdrawn. Funds
will remain on hold until the order is filled or canceled. The
funds on hold for each account will be specified.
Returns:
list: Info about all accounts. Example::
[
{
"id": "71452118-efc7-4cc4-8780-a5e22d4baa53",
"currency": "BTC",
"balance": "0.0000000000000000",
"available": "0.0000000000000000",
"hold": "0.0000000000000000",
"profile_id": "75da88c5-05bf-4f54-bc85-5c775bd68254"
},
{
...
}
]
* Additional info included in response for margin accounts.
"""
return self.get_account('')
def get_account_history(self, account_id, **kwargs):
""" List account activity. Account activity either increases or
decreases your account balance.
Entry type indicates the reason for the account change.
* transfer: Funds moved to/from Coinbase to cbpro
* match: Funds moved as a result of a trade
* fee: Fee as a result of a trade
* rebate: Fee rebate as per our fee schedule
If an entry is the result of a trade (match, fee), the details
field will contain additional information about the trade.
Args:
account_id (str): Account id to get history of.
kwargs (dict): Additional HTTP request parameters.
Returns:
list: History information for the account. Example::
[
{
"id": "100",
"created_at": "2014-11-07T08:19:27.028459Z",
"amount": "0.001",
"balance": "239.669",
"type": "fee",
"details": {
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"trade_id": "74",
"product_id": "BTC-USD"
}
},
{
...
}
]
"""
endpoint = '/accounts/{}/ledger'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def get_account_holds(self, account_id, **kwargs):
""" Get holds on an account.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Holds are placed on an account for active orders or
pending withdraw requests.
As an order is filled, the hold amount is updated. If an order
is canceled, any remaining hold is removed. For a withdraw, once
it is completed, the hold is removed.
The `type` field will indicate why the hold exists. The hold
type is 'order' for holds related to open orders and 'transfer'
for holds related to a withdraw.
The `ref` field contains the id of the order or transfer which
created the hold.
Args:
account_id (str): Account id to get holds of.
kwargs (dict): Additional HTTP request parameters.
Returns:
generator(list): Hold information for the account. Example::
[
{
"id": "82dcd140-c3c7-4507-8de4-2c529cd1a28f",
"account_id": "e0b3f39a-183d-453e-b754-0c13e5bab0b3",
"created_at": "2014-11-06T10:34:47.123456Z",
"updated_at": "2014-11-06T10:40:47.123456Z",
"amount": "4.23",
"type": "order",
"ref": "0a205de4-dd35-4370-a285-fe8fc375a273",
},
{
...
}
]
"""
endpoint = '/accounts/{}/holds'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def place_order(self, product_id, side, order_type, **kwargs):
""" Place an order.
The three order types (limit, market, and stop) can be placed using this
method. Specific methods are provided for each order type, but if a
more generic interface is desired this method is available.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
order_type (str): Order type ('limit', 'market', or 'stop')
**client_oid (str): Order ID selected by you to identify your order.
This should be a UUID, which will be broadcast in the public
feed for `received` messages.
**stp (str): Self-trade prevention flag. cbpro doesn't allow self-
trading. This behavior can be modified with this flag.
Options:
'dc' Decrease and Cancel (default)
'co' Cancel oldest
'cn' Cancel newest
'cb' Cancel both
**overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
**funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
**kwargs: Additional arguments can be specified for different order
types. See the limit/market/stop order methods for details.
Returns:
dict: Order details. Example::
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "pending",
"settled": false
}
"""
# Margin parameter checks
if kwargs.get('overdraft_enabled') is not None and \
kwargs.get('funding_amount') is not None:
raise ValueError('Margin funding must be specified through use of '
'overdraft or by setting a funding amount, but not'
' both')
# Limit order checks
if order_type == 'limit':
if kwargs.get('cancel_after') is not None and \
kwargs.get('time_in_force') != 'GTT':
raise ValueError('May only specify a cancel period when time '
'in_force is `GTT`')
if kwargs.get('post_only') is not None and kwargs.get('time_in_force') in \
['IOC', 'FOK']:
raise ValueError('post_only is invalid when time in force is '
'`IOC` or `FOK`')
# Market and stop order checks
if order_type == 'market' or order_type == 'stop':
if not (kwargs.get('size') is None) ^ (kwargs.get('funds') is None):
raise ValueError('Either `size` or `funds` must be specified '
'for market/stop orders (but not both).')
# Build params dict
params = {'product_id': product_id,
'side': side,
'type': order_type}
params.update(kwargs)
return self._send_message('post', '/orders', data=json.dumps(params))
def buy(self, product_id, order_type, **kwargs):
"""Place a buy order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'buy', order_type, **kwargs)
def sell(self, product_id, order_type, **kwargs):
"""Place a sell order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'sell', order_type, **kwargs)
def place_market_order(self, product_id, side, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place market order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'market',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_stop_order(self, product_id, side, price, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place stop order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Desired price at which the stop order triggers.
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'price': price,
'order_type': 'stop',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def cancel_order(self, order_id):
""" Cancel a previously placed order.
If the order had no matches during its lifetime its record may
be purged. This means the order details will not be available
with get_order(order_id). If the order could not be canceled
(already filled or previously canceled, etc), then an error
response will indicate the reason in the message field.
**Caution**: The order id is the server-assigned order id and
not the optional client_oid.
Args:
order_id (str): The order_id of the order you want to cancel
Returns:
list: Containing the order_id of cancelled order. Example::
[ "c5ab5eae-76be-480e-8961-00792dc7e138" ]
"""
return self._send_message('delete', '/orders/' + order_id)
def cancel_all(self, product_id=None):
""" With best effort, cancel all open orders.
Args:
product_id (Optional[str]): Only cancel orders for this
product_id
Returns:
list: A list of ids of the canceled orders. Example::
[
"144c6f8e-713f-4682-8435-5280fbe8b2b4",
"debe4907-95dc-442f-af3b-cec12f42ebda",
"cf7aceee-7b08-4227-a76c-3858144323ab",
"dfc5ae27-cadb-4c0c-beef-8994936fde8a",
"34fecfbf-de33-4273-b2c6-baf8e8948be4"
]
"""
if product_id is not None:
params = {'product_id': product_id}
else:
params = None
return self._send_message('delete', '/orders', params=params)
def get_order(self, order_id):
""" Get a single order by order id.
If the order is canceled the response may have status code 404
if the order had no matches.
**Caution**: Open orders may change state between the request
and the response depending on market conditions.
Args:
order_id (str): The order to get information of.
Returns:
dict: Containing information on order. Example::
{
"created_at": "2017-06-18T00:27:42.920136Z",
"executed_value": "0.0000000000000000",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"id": "9456f388-67a9-4316-bad1-330c5353804f",
"post_only": true,
"price": "1.00000000",
"product_id": "BTC-USD",
"settled": false,
"side": "buy",
"size": "1.00000000",
"status": "pending",
"stp": "dc",
"time_in_force": "GTC",
"type": "limit"
}
"""
return self._send_message('get', '/orders/' + order_id)
def get_orders(self, product_id=None, status=None, **kwargs):
""" List your current open orders.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Only open or un-settled orders are returned. As soon as an
order is no longer open and settled, it will no longer appear
in the default request.
Orders which are no longer resting on the order book, will be
marked with the 'done' status. There is a small window between
an order being 'done' and 'settled'. An order is 'settled' when
all of the fills have settled and the remaining holds (if any)
have been removed.
For high-volume trading it is strongly recommended that you
maintain your own list of open orders and use one of the
streaming market data feeds to keep it updated. You should poll
the open orders endpoint once when you start trading to obtain
the current state of any open orders.
Args:
product_id (Optional[str]): Only list orders for this
product
status (Optional[list/str]): Limit list of orders to
this status or statuses. Passing 'all' returns orders
of all statuses.
** Options: 'open', 'pending', 'active', 'done',
'settled'
** default: ['open', 'pending', 'active']
Returns:
list: Containing information on orders. Example::
[
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "open",
"settled": false
},
{
...
}
]
"""
params = kwargs
if product_id is not None:
params['product_id'] = product_id
if status is not None:
params['status'] = status
return self._send_paginated_message('/orders', params=params)
def get_fills(self, product_id=None, order_id=None, **kwargs):
""" Get a list of recent fills.
As of 8/23/18 - Requests without either order_id or product_id
will be rejected
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Fees are recorded in two stages. Immediately after the matching
engine completes a match, the fill is inserted into our
datastore. Once the fill is recorded, a settlement process will
settle the fill and credit both trading counterparties.
The 'fee' field indicates the fees charged for this fill.
The 'liquidity' field indicates if the fill was the result of a
liquidity provider or liquidity taker. M indicates Maker and T
indicates Taker.
Args:
product_id (str): Limit list to this product_id
order_id (str): Limit list to this order_id
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on fills. Example::
[
{
"trade_id": 74,
"product_id": "BTC-USD",
"price": "10.00",
"size": "0.01",
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"created_at": "2014-11-07T22:19:28.578544Z",
"liquidity": "T",
"fee": "0.00025",
"settled": true,
"side": "buy"
},
{
...
}
]
"""
if (product_id is None) and (order_id is None):
raise ValueError('Either product_id or order_id must be specified.')
params = {}
if product_id:
params['product_id'] = product_id
if order_id:
params['order_id'] = order_id
params.update(kwargs)
return self._send_paginated_message('/fills', params=params)
def get_fundings(self, status=None, **kwargs):
""" Every order placed with a margin profile that draws funding
will create a funding record.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
status (list/str): Limit funding records to these statuses.
** Options: 'outstanding', 'settled', 'rejected'
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on margin funding. Example::
[
{
"id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"order_id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6",
"amount": "1057.6519956381537500",
"status": "settled",
"created_at": "2017-03-17T23:46:16.663397Z",
"currency": "USD",
"repaid_amount": "1057.6519956381537500",
"default_amount": "0",
"repaid_default": false
},
{
...
}
]
"""
params = {}
if status is not None:
params['status'] = status
params.update(kwargs)
return self._send_paginated_message('/funding', params=params)
def repay_funding(self, amount, currency):
""" Repay funding. Repays the older funding records first.
Args:
amount (int): Amount of currency to repay
currency (str): The currency, example USD
Returns:
Not specified by cbpro.
"""
params = {
'amount': amount,
'currency': currency # example: USD
}
return self._send_message('post', '/funding/repay',
data=json.dumps(params))
def margin_transfer(self, margin_profile_id, transfer_type, currency,
amount):
""" Transfer funds between your standard profile and a margin profile.
Args:
margin_profile_id (str): Margin profile ID to withdraw or deposit
from.
transfer_type (str): 'deposit' or 'withdraw'
currency (str): Currency to transfer (eg. 'USD')
amount (Decimal): Amount to transfer
Returns:
dict: Transfer details. Example::
{
"created_at": "2017-01-25T19:06:23.415126Z",
"id": "80bc6b74-8b1f-4c60-a089-c61f9810d4ab",
"user_id": "521c20b3d4ab09621f000011",
"profile_id": "cda95996-ac59-45a3-a42e-30daeb061867",
"margin_profile_id": "45fa9e3b-00ba-4631-b907-8a98cbdf21be",
"type": "deposit",
"amount": "2",
"currency": "USD",
"account_id": "23035fc7-0707-4b59-b0d2-95d0c035f8f5",
"margin_account_id": "e1d9862c-a259-4e83-96cd-376352a9d24d",
"margin_product_id": "BTC-USD",
"status": "completed",
"nonce": 25
}
"""
params = {'margin_profile_id': margin_profile_id,
'type': transfer_type,
'currency': currency, # example: USD
'amount': amount}
return self._send_message('post', '/profiles/margin-transfer',
data=json.dumps(params))
def get_position(self):
""" Get An overview of your margin profile.
Returns:
dict: Details about funding, accounts, and margin call.
"""
return self._send_message('get', '/position')
def close_position(self, repay_only):
""" Close position.
Args:
repay_only (bool): Undocumented by cbpro.
Returns:
Undocumented
"""
params = {'repay_only': repay_only}
return self._send_message('post', '/position/close',
data=json.dumps(params))
def deposit(self, amount, currency, payment_method_id):
""" Deposit funds from a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decmial): The amount to deposit.
currency (str): The type of currency.
payment_method_id (str): ID of the payment method.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/deposits/payment-method',
data=json.dumps(params))
def coinbase_deposit(self, amount, currency, coinbase_account_id):
""" Deposit funds from a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to deposit.
currency (str): The type of currency.
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/deposits/coinbase-account',
data=json.dumps(params))
def withdraw(self, amount, currency, payment_method_id):
""" Withdraw funds to a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decimal): The amount to withdraw.
currency (str): Currency type (eg. 'BTC')
payment_method_id (str): ID of the payment method.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/withdrawals/payment-method',
data=json.dumps(params))
def coinbase_withdraw(self, amount, currency, coinbase_account_id):
""" Withdraw funds to a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to withdraw.
currency (str): The type of currency (eg. 'BTC')
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/withdrawals/coinbase-account',
data=json.dumps(params))
def crypto_withdraw(self, amount, currency, crypto_address):
""" Withdraw funds to a crypto address.
Args:
amount (Decimal): The amount to withdraw
currency (str): The type of currency (eg. 'BTC')
crypto_address (str): Crypto address to withdraw to.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'crypto_address': crypto_address}
return self._send_message('post', '/withdrawals/crypto',
data=json.dumps(params))
def get_payment_methods(self):
""" Get a list of your payment methods.
Returns:
list: Payment method details.
"""
return self._send_message('get', '/payment-methods')
def get_coinbase_accounts(self):
""" Get a list of your coinbase accounts.
Returns:
list: Coinbase account details.
"""
return self._send_message('get', '/coinbase-accounts')
def create_report(self, report_type, start_date, end_date, product_id=None,
account_id=None, report_format='pdf', email=None):
""" Create report of historic information about your account.
The report will be generated when resources are available. Report status
can be queried via `get_report(report_id)`.
Args:
report_type (str): 'fills' or 'account'
start_date (str): Starting date for the report in ISO 8601
end_date (str): Ending date for the report in ISO 8601
product_id (Optional[str]): ID of the product to generate a fills
report for. Required if account_type is 'fills'
account_id (Optional[str]): ID of the account to generate an account
report for. Required if report_type is 'account'.
report_format (Optional[str]): 'pdf' or 'csv'. Default is 'pdf'.
email (Optional[str]): Email address to send the report to.
Returns:
dict: Report details. Example::
{
"id": "0428b97b-bec1-429e-a94c-59232926778d",
"type": "fills",
"status": "pending",
"created_at": "2015-01-06T10:34:47.000Z",
"completed_at": undefined,
"expires_at": "2015-01-13T10:35:47.000Z",
"file_url": undefined,
"params": {
"start_date": "2014-11-01T00:00:00.000Z",
"end_date": "2014-11-30T23:59:59.000Z"
}
}
"""
params = {'type': report_type,
'start_date': start_date,
'end_date': end_date,
'format': report_format}
if product_id is not None:
params['product_id'] = product_id
if account_id is not None:
params['account_id'] = account_id
if email is not None:
params['email'] = email
return self._send_message('post', '/reports',
data=json.dumps(params))
def get_report(self, report_id):
""" Get report status.
Use to query a specific report once it has been requested.
Args:
report_id (str): Report ID
Returns:
dict: Report details, including file url once it is created.
"""
return self._send_message('get', '/reports/' + report_id)
def get_trailing_volume(self):
""" Get your 30-day trailing volume for all products.
This is a cached value that's calculated every day at midnight UTC.
Returns:
list: 30-day trailing volumes. Example::
[
{
"product_id": "BTC-USD",
"exchange_volume": "11800.00000000",
"volume": "100.00000000",
"recorded_at": "1973-11-29T00:05:01.123456Z"
},
{
...
}
]
"""
return self._send_message('get', '/users/self/trailing-volume')
|
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.place_market_order | python | def place_market_order(self, product_id, side, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
params = {'product_id': product_id,
'side': side,
'order_type': 'market',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params) | Place market order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example. | train | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L354-L393 | [
"def place_order(self, product_id, side, order_type, **kwargs):\n \"\"\" Place an order.\n\n The three order types (limit, market, and stop) can be placed using this\n method. Specific methods are provided for each order type, but if a\n more generic interface is desired this method is available.\n\n ... | class AuthenticatedClient(PublicClient):
""" Provides access to Private Endpoints on the cbpro API.
All requests default to the live `api_url`: 'https://api.pro.coinbase.com'.
To test your application using the sandbox modify the `api_url`.
Attributes:
url (str): The api url for this client instance to use.
auth (CBProAuth): Custom authentication handler for each request.
session (requests.Session): Persistent HTTP connection object.
"""
def __init__(self, key, b64secret, passphrase,
api_url="https://api.pro.coinbase.com"):
""" Create an instance of the AuthenticatedClient class.
Args:
key (str): Your API key.
b64secret (str): The secret key matching your API key.
passphrase (str): Passphrase chosen when setting up key.
api_url (Optional[str]): API URL. Defaults to cbpro API.
"""
super(AuthenticatedClient, self).__init__(api_url)
self.auth = CBProAuth(key, b64secret, passphrase)
self.session = requests.Session()
def get_account(self, account_id):
""" Get information for a single account.
Use this endpoint when you know the account_id.
Args:
account_id (str): Account id for account you want to get.
Returns:
dict: Account information. Example::
{
"id": "a1b2c3d4",
"balance": "1.100",
"holds": "0.100",
"available": "1.00",
"currency": "USD"
}
"""
return self._send_message('get', '/accounts/' + account_id)
def get_accounts(self):
""" Get a list of trading all accounts.
When you place an order, the funds for the order are placed on
hold. They cannot be used for other orders or withdrawn. Funds
will remain on hold until the order is filled or canceled. The
funds on hold for each account will be specified.
Returns:
list: Info about all accounts. Example::
[
{
"id": "71452118-efc7-4cc4-8780-a5e22d4baa53",
"currency": "BTC",
"balance": "0.0000000000000000",
"available": "0.0000000000000000",
"hold": "0.0000000000000000",
"profile_id": "75da88c5-05bf-4f54-bc85-5c775bd68254"
},
{
...
}
]
* Additional info included in response for margin accounts.
"""
return self.get_account('')
def get_account_history(self, account_id, **kwargs):
""" List account activity. Account activity either increases or
decreases your account balance.
Entry type indicates the reason for the account change.
* transfer: Funds moved to/from Coinbase to cbpro
* match: Funds moved as a result of a trade
* fee: Fee as a result of a trade
* rebate: Fee rebate as per our fee schedule
If an entry is the result of a trade (match, fee), the details
field will contain additional information about the trade.
Args:
account_id (str): Account id to get history of.
kwargs (dict): Additional HTTP request parameters.
Returns:
list: History information for the account. Example::
[
{
"id": "100",
"created_at": "2014-11-07T08:19:27.028459Z",
"amount": "0.001",
"balance": "239.669",
"type": "fee",
"details": {
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"trade_id": "74",
"product_id": "BTC-USD"
}
},
{
...
}
]
"""
endpoint = '/accounts/{}/ledger'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def get_account_holds(self, account_id, **kwargs):
""" Get holds on an account.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Holds are placed on an account for active orders or
pending withdraw requests.
As an order is filled, the hold amount is updated. If an order
is canceled, any remaining hold is removed. For a withdraw, once
it is completed, the hold is removed.
The `type` field will indicate why the hold exists. The hold
type is 'order' for holds related to open orders and 'transfer'
for holds related to a withdraw.
The `ref` field contains the id of the order or transfer which
created the hold.
Args:
account_id (str): Account id to get holds of.
kwargs (dict): Additional HTTP request parameters.
Returns:
generator(list): Hold information for the account. Example::
[
{
"id": "82dcd140-c3c7-4507-8de4-2c529cd1a28f",
"account_id": "e0b3f39a-183d-453e-b754-0c13e5bab0b3",
"created_at": "2014-11-06T10:34:47.123456Z",
"updated_at": "2014-11-06T10:40:47.123456Z",
"amount": "4.23",
"type": "order",
"ref": "0a205de4-dd35-4370-a285-fe8fc375a273",
},
{
...
}
]
"""
endpoint = '/accounts/{}/holds'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def place_order(self, product_id, side, order_type, **kwargs):
""" Place an order.
The three order types (limit, market, and stop) can be placed using this
method. Specific methods are provided for each order type, but if a
more generic interface is desired this method is available.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
order_type (str): Order type ('limit', 'market', or 'stop')
**client_oid (str): Order ID selected by you to identify your order.
This should be a UUID, which will be broadcast in the public
feed for `received` messages.
**stp (str): Self-trade prevention flag. cbpro doesn't allow self-
trading. This behavior can be modified with this flag.
Options:
'dc' Decrease and Cancel (default)
'co' Cancel oldest
'cn' Cancel newest
'cb' Cancel both
**overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
**funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
**kwargs: Additional arguments can be specified for different order
types. See the limit/market/stop order methods for details.
Returns:
dict: Order details. Example::
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "pending",
"settled": false
}
"""
# Margin parameter checks
if kwargs.get('overdraft_enabled') is not None and \
kwargs.get('funding_amount') is not None:
raise ValueError('Margin funding must be specified through use of '
'overdraft or by setting a funding amount, but not'
' both')
# Limit order checks
if order_type == 'limit':
if kwargs.get('cancel_after') is not None and \
kwargs.get('time_in_force') != 'GTT':
raise ValueError('May only specify a cancel period when time '
'in_force is `GTT`')
if kwargs.get('post_only') is not None and kwargs.get('time_in_force') in \
['IOC', 'FOK']:
raise ValueError('post_only is invalid when time in force is '
'`IOC` or `FOK`')
# Market and stop order checks
if order_type == 'market' or order_type == 'stop':
if not (kwargs.get('size') is None) ^ (kwargs.get('funds') is None):
raise ValueError('Either `size` or `funds` must be specified '
'for market/stop orders (but not both).')
# Build params dict
params = {'product_id': product_id,
'side': side,
'type': order_type}
params.update(kwargs)
return self._send_message('post', '/orders', data=json.dumps(params))
def buy(self, product_id, order_type, **kwargs):
"""Place a buy order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'buy', order_type, **kwargs)
def sell(self, product_id, order_type, **kwargs):
"""Place a sell order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'sell', order_type, **kwargs)
def place_limit_order(self, product_id, side, price, size,
client_oid=None,
stp=None,
time_in_force=None,
cancel_after=None,
post_only=None,
overdraft_enabled=None,
funding_amount=None):
"""Place a limit order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Price per cryptocurrency
size (Decimal): Amount of cryptocurrency to buy or sell
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
time_in_force (Optional[str]): Time in force. Options:
'GTC' Good till canceled
'GTT' Good till time (set by `cancel_after`)
'IOC' Immediate or cancel
'FOK' Fill or kill
cancel_after (Optional[str]): Cancel after this period for 'GTT'
orders. Options are 'min', 'hour', or 'day'.
post_only (Optional[bool]): Indicates that the order should only
make liquidity. If any part of the order results in taking
liquidity, the order will be rejected and no part of it will
execute.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'limit',
'price': price,
'size': size,
'client_oid': client_oid,
'stp': stp,
'time_in_force': time_in_force,
'cancel_after': cancel_after,
'post_only': post_only,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_stop_order(self, product_id, side, price, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place stop order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Desired price at which the stop order triggers.
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'price': price,
'order_type': 'stop',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def cancel_order(self, order_id):
""" Cancel a previously placed order.
If the order had no matches during its lifetime its record may
be purged. This means the order details will not be available
with get_order(order_id). If the order could not be canceled
(already filled or previously canceled, etc), then an error
response will indicate the reason in the message field.
**Caution**: The order id is the server-assigned order id and
not the optional client_oid.
Args:
order_id (str): The order_id of the order you want to cancel
Returns:
list: Containing the order_id of cancelled order. Example::
[ "c5ab5eae-76be-480e-8961-00792dc7e138" ]
"""
return self._send_message('delete', '/orders/' + order_id)
def cancel_all(self, product_id=None):
""" With best effort, cancel all open orders.
Args:
product_id (Optional[str]): Only cancel orders for this
product_id
Returns:
list: A list of ids of the canceled orders. Example::
[
"144c6f8e-713f-4682-8435-5280fbe8b2b4",
"debe4907-95dc-442f-af3b-cec12f42ebda",
"cf7aceee-7b08-4227-a76c-3858144323ab",
"dfc5ae27-cadb-4c0c-beef-8994936fde8a",
"34fecfbf-de33-4273-b2c6-baf8e8948be4"
]
"""
if product_id is not None:
params = {'product_id': product_id}
else:
params = None
return self._send_message('delete', '/orders', params=params)
def get_order(self, order_id):
""" Get a single order by order id.
If the order is canceled the response may have status code 404
if the order had no matches.
**Caution**: Open orders may change state between the request
and the response depending on market conditions.
Args:
order_id (str): The order to get information of.
Returns:
dict: Containing information on order. Example::
{
"created_at": "2017-06-18T00:27:42.920136Z",
"executed_value": "0.0000000000000000",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"id": "9456f388-67a9-4316-bad1-330c5353804f",
"post_only": true,
"price": "1.00000000",
"product_id": "BTC-USD",
"settled": false,
"side": "buy",
"size": "1.00000000",
"status": "pending",
"stp": "dc",
"time_in_force": "GTC",
"type": "limit"
}
"""
return self._send_message('get', '/orders/' + order_id)
def get_orders(self, product_id=None, status=None, **kwargs):
""" List your current open orders.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Only open or un-settled orders are returned. As soon as an
order is no longer open and settled, it will no longer appear
in the default request.
Orders which are no longer resting on the order book, will be
marked with the 'done' status. There is a small window between
an order being 'done' and 'settled'. An order is 'settled' when
all of the fills have settled and the remaining holds (if any)
have been removed.
For high-volume trading it is strongly recommended that you
maintain your own list of open orders and use one of the
streaming market data feeds to keep it updated. You should poll
the open orders endpoint once when you start trading to obtain
the current state of any open orders.
Args:
product_id (Optional[str]): Only list orders for this
product
status (Optional[list/str]): Limit list of orders to
this status or statuses. Passing 'all' returns orders
of all statuses.
** Options: 'open', 'pending', 'active', 'done',
'settled'
** default: ['open', 'pending', 'active']
Returns:
list: Containing information on orders. Example::
[
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "open",
"settled": false
},
{
...
}
]
"""
params = kwargs
if product_id is not None:
params['product_id'] = product_id
if status is not None:
params['status'] = status
return self._send_paginated_message('/orders', params=params)
def get_fills(self, product_id=None, order_id=None, **kwargs):
""" Get a list of recent fills.
As of 8/23/18 - Requests without either order_id or product_id
will be rejected
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Fees are recorded in two stages. Immediately after the matching
engine completes a match, the fill is inserted into our
datastore. Once the fill is recorded, a settlement process will
settle the fill and credit both trading counterparties.
The 'fee' field indicates the fees charged for this fill.
The 'liquidity' field indicates if the fill was the result of a
liquidity provider or liquidity taker. M indicates Maker and T
indicates Taker.
Args:
product_id (str): Limit list to this product_id
order_id (str): Limit list to this order_id
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on fills. Example::
[
{
"trade_id": 74,
"product_id": "BTC-USD",
"price": "10.00",
"size": "0.01",
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"created_at": "2014-11-07T22:19:28.578544Z",
"liquidity": "T",
"fee": "0.00025",
"settled": true,
"side": "buy"
},
{
...
}
]
"""
if (product_id is None) and (order_id is None):
raise ValueError('Either product_id or order_id must be specified.')
params = {}
if product_id:
params['product_id'] = product_id
if order_id:
params['order_id'] = order_id
params.update(kwargs)
return self._send_paginated_message('/fills', params=params)
def get_fundings(self, status=None, **kwargs):
""" Every order placed with a margin profile that draws funding
will create a funding record.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
status (list/str): Limit funding records to these statuses.
** Options: 'outstanding', 'settled', 'rejected'
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on margin funding. Example::
[
{
"id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"order_id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6",
"amount": "1057.6519956381537500",
"status": "settled",
"created_at": "2017-03-17T23:46:16.663397Z",
"currency": "USD",
"repaid_amount": "1057.6519956381537500",
"default_amount": "0",
"repaid_default": false
},
{
...
}
]
"""
params = {}
if status is not None:
params['status'] = status
params.update(kwargs)
return self._send_paginated_message('/funding', params=params)
def repay_funding(self, amount, currency):
""" Repay funding. Repays the older funding records first.
Args:
amount (int): Amount of currency to repay
currency (str): The currency, example USD
Returns:
Not specified by cbpro.
"""
params = {
'amount': amount,
'currency': currency # example: USD
}
return self._send_message('post', '/funding/repay',
data=json.dumps(params))
def margin_transfer(self, margin_profile_id, transfer_type, currency,
amount):
""" Transfer funds between your standard profile and a margin profile.
Args:
margin_profile_id (str): Margin profile ID to withdraw or deposit
from.
transfer_type (str): 'deposit' or 'withdraw'
currency (str): Currency to transfer (eg. 'USD')
amount (Decimal): Amount to transfer
Returns:
dict: Transfer details. Example::
{
"created_at": "2017-01-25T19:06:23.415126Z",
"id": "80bc6b74-8b1f-4c60-a089-c61f9810d4ab",
"user_id": "521c20b3d4ab09621f000011",
"profile_id": "cda95996-ac59-45a3-a42e-30daeb061867",
"margin_profile_id": "45fa9e3b-00ba-4631-b907-8a98cbdf21be",
"type": "deposit",
"amount": "2",
"currency": "USD",
"account_id": "23035fc7-0707-4b59-b0d2-95d0c035f8f5",
"margin_account_id": "e1d9862c-a259-4e83-96cd-376352a9d24d",
"margin_product_id": "BTC-USD",
"status": "completed",
"nonce": 25
}
"""
params = {'margin_profile_id': margin_profile_id,
'type': transfer_type,
'currency': currency, # example: USD
'amount': amount}
return self._send_message('post', '/profiles/margin-transfer',
data=json.dumps(params))
def get_position(self):
""" Get An overview of your margin profile.
Returns:
dict: Details about funding, accounts, and margin call.
"""
return self._send_message('get', '/position')
def close_position(self, repay_only):
""" Close position.
Args:
repay_only (bool): Undocumented by cbpro.
Returns:
Undocumented
"""
params = {'repay_only': repay_only}
return self._send_message('post', '/position/close',
data=json.dumps(params))
def deposit(self, amount, currency, payment_method_id):
""" Deposit funds from a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decmial): The amount to deposit.
currency (str): The type of currency.
payment_method_id (str): ID of the payment method.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/deposits/payment-method',
data=json.dumps(params))
def coinbase_deposit(self, amount, currency, coinbase_account_id):
""" Deposit funds from a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to deposit.
currency (str): The type of currency.
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/deposits/coinbase-account',
data=json.dumps(params))
def withdraw(self, amount, currency, payment_method_id):
""" Withdraw funds to a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decimal): The amount to withdraw.
currency (str): Currency type (eg. 'BTC')
payment_method_id (str): ID of the payment method.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/withdrawals/payment-method',
data=json.dumps(params))
def coinbase_withdraw(self, amount, currency, coinbase_account_id):
""" Withdraw funds to a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to withdraw.
currency (str): The type of currency (eg. 'BTC')
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/withdrawals/coinbase-account',
data=json.dumps(params))
def crypto_withdraw(self, amount, currency, crypto_address):
""" Withdraw funds to a crypto address.
Args:
amount (Decimal): The amount to withdraw
currency (str): The type of currency (eg. 'BTC')
crypto_address (str): Crypto address to withdraw to.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'crypto_address': crypto_address}
return self._send_message('post', '/withdrawals/crypto',
data=json.dumps(params))
def get_payment_methods(self):
""" Get a list of your payment methods.
Returns:
list: Payment method details.
"""
return self._send_message('get', '/payment-methods')
def get_coinbase_accounts(self):
""" Get a list of your coinbase accounts.
Returns:
list: Coinbase account details.
"""
return self._send_message('get', '/coinbase-accounts')
def create_report(self, report_type, start_date, end_date, product_id=None,
account_id=None, report_format='pdf', email=None):
""" Create report of historic information about your account.
The report will be generated when resources are available. Report status
can be queried via `get_report(report_id)`.
Args:
report_type (str): 'fills' or 'account'
start_date (str): Starting date for the report in ISO 8601
end_date (str): Ending date for the report in ISO 8601
product_id (Optional[str]): ID of the product to generate a fills
report for. Required if account_type is 'fills'
account_id (Optional[str]): ID of the account to generate an account
report for. Required if report_type is 'account'.
report_format (Optional[str]): 'pdf' or 'csv'. Default is 'pdf'.
email (Optional[str]): Email address to send the report to.
Returns:
dict: Report details. Example::
{
"id": "0428b97b-bec1-429e-a94c-59232926778d",
"type": "fills",
"status": "pending",
"created_at": "2015-01-06T10:34:47.000Z",
"completed_at": undefined,
"expires_at": "2015-01-13T10:35:47.000Z",
"file_url": undefined,
"params": {
"start_date": "2014-11-01T00:00:00.000Z",
"end_date": "2014-11-30T23:59:59.000Z"
}
}
"""
params = {'type': report_type,
'start_date': start_date,
'end_date': end_date,
'format': report_format}
if product_id is not None:
params['product_id'] = product_id
if account_id is not None:
params['account_id'] = account_id
if email is not None:
params['email'] = email
return self._send_message('post', '/reports',
data=json.dumps(params))
def get_report(self, report_id):
""" Get report status.
Use to query a specific report once it has been requested.
Args:
report_id (str): Report ID
Returns:
dict: Report details, including file url once it is created.
"""
return self._send_message('get', '/reports/' + report_id)
def get_trailing_volume(self):
""" Get your 30-day trailing volume for all products.
This is a cached value that's calculated every day at midnight UTC.
Returns:
list: 30-day trailing volumes. Example::
[
{
"product_id": "BTC-USD",
"exchange_volume": "11800.00000000",
"volume": "100.00000000",
"recorded_at": "1973-11-29T00:05:01.123456Z"
},
{
...
}
]
"""
return self._send_message('get', '/users/self/trailing-volume')
|
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.cancel_all | python | def cancel_all(self, product_id=None):
if product_id is not None:
params = {'product_id': product_id}
else:
params = None
return self._send_message('delete', '/orders', params=params) | With best effort, cancel all open orders.
Args:
product_id (Optional[str]): Only cancel orders for this
product_id
Returns:
list: A list of ids of the canceled orders. Example::
[
"144c6f8e-713f-4682-8435-5280fbe8b2b4",
"debe4907-95dc-442f-af3b-cec12f42ebda",
"cf7aceee-7b08-4227-a76c-3858144323ab",
"dfc5ae27-cadb-4c0c-beef-8994936fde8a",
"34fecfbf-de33-4273-b2c6-baf8e8948be4"
] | train | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L460-L482 | [
"def _send_message(self, method, endpoint, params=None, data=None):\n \"\"\"Send API request.\n\n Args:\n method (str): HTTP method (get, post, delete, etc.)\n endpoint (str): Endpoint (to be added to base URL)\n params (Optional[dict]): HTTP request parameters\n data (Optional[str... | class AuthenticatedClient(PublicClient):
""" Provides access to Private Endpoints on the cbpro API.
All requests default to the live `api_url`: 'https://api.pro.coinbase.com'.
To test your application using the sandbox modify the `api_url`.
Attributes:
url (str): The api url for this client instance to use.
auth (CBProAuth): Custom authentication handler for each request.
session (requests.Session): Persistent HTTP connection object.
"""
def __init__(self, key, b64secret, passphrase,
api_url="https://api.pro.coinbase.com"):
""" Create an instance of the AuthenticatedClient class.
Args:
key (str): Your API key.
b64secret (str): The secret key matching your API key.
passphrase (str): Passphrase chosen when setting up key.
api_url (Optional[str]): API URL. Defaults to cbpro API.
"""
super(AuthenticatedClient, self).__init__(api_url)
self.auth = CBProAuth(key, b64secret, passphrase)
self.session = requests.Session()
def get_account(self, account_id):
""" Get information for a single account.
Use this endpoint when you know the account_id.
Args:
account_id (str): Account id for account you want to get.
Returns:
dict: Account information. Example::
{
"id": "a1b2c3d4",
"balance": "1.100",
"holds": "0.100",
"available": "1.00",
"currency": "USD"
}
"""
return self._send_message('get', '/accounts/' + account_id)
def get_accounts(self):
""" Get a list of trading all accounts.
When you place an order, the funds for the order are placed on
hold. They cannot be used for other orders or withdrawn. Funds
will remain on hold until the order is filled or canceled. The
funds on hold for each account will be specified.
Returns:
list: Info about all accounts. Example::
[
{
"id": "71452118-efc7-4cc4-8780-a5e22d4baa53",
"currency": "BTC",
"balance": "0.0000000000000000",
"available": "0.0000000000000000",
"hold": "0.0000000000000000",
"profile_id": "75da88c5-05bf-4f54-bc85-5c775bd68254"
},
{
...
}
]
* Additional info included in response for margin accounts.
"""
return self.get_account('')
def get_account_history(self, account_id, **kwargs):
""" List account activity. Account activity either increases or
decreases your account balance.
Entry type indicates the reason for the account change.
* transfer: Funds moved to/from Coinbase to cbpro
* match: Funds moved as a result of a trade
* fee: Fee as a result of a trade
* rebate: Fee rebate as per our fee schedule
If an entry is the result of a trade (match, fee), the details
field will contain additional information about the trade.
Args:
account_id (str): Account id to get history of.
kwargs (dict): Additional HTTP request parameters.
Returns:
list: History information for the account. Example::
[
{
"id": "100",
"created_at": "2014-11-07T08:19:27.028459Z",
"amount": "0.001",
"balance": "239.669",
"type": "fee",
"details": {
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"trade_id": "74",
"product_id": "BTC-USD"
}
},
{
...
}
]
"""
endpoint = '/accounts/{}/ledger'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def get_account_holds(self, account_id, **kwargs):
""" Get holds on an account.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Holds are placed on an account for active orders or
pending withdraw requests.
As an order is filled, the hold amount is updated. If an order
is canceled, any remaining hold is removed. For a withdraw, once
it is completed, the hold is removed.
The `type` field will indicate why the hold exists. The hold
type is 'order' for holds related to open orders and 'transfer'
for holds related to a withdraw.
The `ref` field contains the id of the order or transfer which
created the hold.
Args:
account_id (str): Account id to get holds of.
kwargs (dict): Additional HTTP request parameters.
Returns:
generator(list): Hold information for the account. Example::
[
{
"id": "82dcd140-c3c7-4507-8de4-2c529cd1a28f",
"account_id": "e0b3f39a-183d-453e-b754-0c13e5bab0b3",
"created_at": "2014-11-06T10:34:47.123456Z",
"updated_at": "2014-11-06T10:40:47.123456Z",
"amount": "4.23",
"type": "order",
"ref": "0a205de4-dd35-4370-a285-fe8fc375a273",
},
{
...
}
]
"""
endpoint = '/accounts/{}/holds'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def place_order(self, product_id, side, order_type, **kwargs):
""" Place an order.
The three order types (limit, market, and stop) can be placed using this
method. Specific methods are provided for each order type, but if a
more generic interface is desired this method is available.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
order_type (str): Order type ('limit', 'market', or 'stop')
**client_oid (str): Order ID selected by you to identify your order.
This should be a UUID, which will be broadcast in the public
feed for `received` messages.
**stp (str): Self-trade prevention flag. cbpro doesn't allow self-
trading. This behavior can be modified with this flag.
Options:
'dc' Decrease and Cancel (default)
'co' Cancel oldest
'cn' Cancel newest
'cb' Cancel both
**overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
**funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
**kwargs: Additional arguments can be specified for different order
types. See the limit/market/stop order methods for details.
Returns:
dict: Order details. Example::
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "pending",
"settled": false
}
"""
# Margin parameter checks
if kwargs.get('overdraft_enabled') is not None and \
kwargs.get('funding_amount') is not None:
raise ValueError('Margin funding must be specified through use of '
'overdraft or by setting a funding amount, but not'
' both')
# Limit order checks
if order_type == 'limit':
if kwargs.get('cancel_after') is not None and \
kwargs.get('time_in_force') != 'GTT':
raise ValueError('May only specify a cancel period when time '
'in_force is `GTT`')
if kwargs.get('post_only') is not None and kwargs.get('time_in_force') in \
['IOC', 'FOK']:
raise ValueError('post_only is invalid when time in force is '
'`IOC` or `FOK`')
# Market and stop order checks
if order_type == 'market' or order_type == 'stop':
if not (kwargs.get('size') is None) ^ (kwargs.get('funds') is None):
raise ValueError('Either `size` or `funds` must be specified '
'for market/stop orders (but not both).')
# Build params dict
params = {'product_id': product_id,
'side': side,
'type': order_type}
params.update(kwargs)
return self._send_message('post', '/orders', data=json.dumps(params))
def buy(self, product_id, order_type, **kwargs):
"""Place a buy order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'buy', order_type, **kwargs)
def sell(self, product_id, order_type, **kwargs):
"""Place a sell order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'sell', order_type, **kwargs)
def place_limit_order(self, product_id, side, price, size,
client_oid=None,
stp=None,
time_in_force=None,
cancel_after=None,
post_only=None,
overdraft_enabled=None,
funding_amount=None):
"""Place a limit order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Price per cryptocurrency
size (Decimal): Amount of cryptocurrency to buy or sell
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
time_in_force (Optional[str]): Time in force. Options:
'GTC' Good till canceled
'GTT' Good till time (set by `cancel_after`)
'IOC' Immediate or cancel
'FOK' Fill or kill
cancel_after (Optional[str]): Cancel after this period for 'GTT'
orders. Options are 'min', 'hour', or 'day'.
post_only (Optional[bool]): Indicates that the order should only
make liquidity. If any part of the order results in taking
liquidity, the order will be rejected and no part of it will
execute.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'limit',
'price': price,
'size': size,
'client_oid': client_oid,
'stp': stp,
'time_in_force': time_in_force,
'cancel_after': cancel_after,
'post_only': post_only,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_market_order(self, product_id, side, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place market order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'market',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_stop_order(self, product_id, side, price, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place stop order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Desired price at which the stop order triggers.
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'price': price,
'order_type': 'stop',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def cancel_order(self, order_id):
""" Cancel a previously placed order.
If the order had no matches during its lifetime its record may
be purged. This means the order details will not be available
with get_order(order_id). If the order could not be canceled
(already filled or previously canceled, etc), then an error
response will indicate the reason in the message field.
**Caution**: The order id is the server-assigned order id and
not the optional client_oid.
Args:
order_id (str): The order_id of the order you want to cancel
Returns:
list: Containing the order_id of cancelled order. Example::
[ "c5ab5eae-76be-480e-8961-00792dc7e138" ]
"""
return self._send_message('delete', '/orders/' + order_id)
def get_order(self, order_id):
""" Get a single order by order id.
If the order is canceled the response may have status code 404
if the order had no matches.
**Caution**: Open orders may change state between the request
and the response depending on market conditions.
Args:
order_id (str): The order to get information of.
Returns:
dict: Containing information on order. Example::
{
"created_at": "2017-06-18T00:27:42.920136Z",
"executed_value": "0.0000000000000000",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"id": "9456f388-67a9-4316-bad1-330c5353804f",
"post_only": true,
"price": "1.00000000",
"product_id": "BTC-USD",
"settled": false,
"side": "buy",
"size": "1.00000000",
"status": "pending",
"stp": "dc",
"time_in_force": "GTC",
"type": "limit"
}
"""
return self._send_message('get', '/orders/' + order_id)
def get_orders(self, product_id=None, status=None, **kwargs):
""" List your current open orders.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Only open or un-settled orders are returned. As soon as an
order is no longer open and settled, it will no longer appear
in the default request.
Orders which are no longer resting on the order book, will be
marked with the 'done' status. There is a small window between
an order being 'done' and 'settled'. An order is 'settled' when
all of the fills have settled and the remaining holds (if any)
have been removed.
For high-volume trading it is strongly recommended that you
maintain your own list of open orders and use one of the
streaming market data feeds to keep it updated. You should poll
the open orders endpoint once when you start trading to obtain
the current state of any open orders.
Args:
product_id (Optional[str]): Only list orders for this
product
status (Optional[list/str]): Limit list of orders to
this status or statuses. Passing 'all' returns orders
of all statuses.
** Options: 'open', 'pending', 'active', 'done',
'settled'
** default: ['open', 'pending', 'active']
Returns:
list: Containing information on orders. Example::
[
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "open",
"settled": false
},
{
...
}
]
"""
params = kwargs
if product_id is not None:
params['product_id'] = product_id
if status is not None:
params['status'] = status
return self._send_paginated_message('/orders', params=params)
def get_fills(self, product_id=None, order_id=None, **kwargs):
""" Get a list of recent fills.
As of 8/23/18 - Requests without either order_id or product_id
will be rejected
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Fees are recorded in two stages. Immediately after the matching
engine completes a match, the fill is inserted into our
datastore. Once the fill is recorded, a settlement process will
settle the fill and credit both trading counterparties.
The 'fee' field indicates the fees charged for this fill.
The 'liquidity' field indicates if the fill was the result of a
liquidity provider or liquidity taker. M indicates Maker and T
indicates Taker.
Args:
product_id (str): Limit list to this product_id
order_id (str): Limit list to this order_id
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on fills. Example::
[
{
"trade_id": 74,
"product_id": "BTC-USD",
"price": "10.00",
"size": "0.01",
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"created_at": "2014-11-07T22:19:28.578544Z",
"liquidity": "T",
"fee": "0.00025",
"settled": true,
"side": "buy"
},
{
...
}
]
"""
if (product_id is None) and (order_id is None):
raise ValueError('Either product_id or order_id must be specified.')
params = {}
if product_id:
params['product_id'] = product_id
if order_id:
params['order_id'] = order_id
params.update(kwargs)
return self._send_paginated_message('/fills', params=params)
def get_fundings(self, status=None, **kwargs):
""" Every order placed with a margin profile that draws funding
will create a funding record.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
status (list/str): Limit funding records to these statuses.
** Options: 'outstanding', 'settled', 'rejected'
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on margin funding. Example::
[
{
"id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"order_id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6",
"amount": "1057.6519956381537500",
"status": "settled",
"created_at": "2017-03-17T23:46:16.663397Z",
"currency": "USD",
"repaid_amount": "1057.6519956381537500",
"default_amount": "0",
"repaid_default": false
},
{
...
}
]
"""
params = {}
if status is not None:
params['status'] = status
params.update(kwargs)
return self._send_paginated_message('/funding', params=params)
def repay_funding(self, amount, currency):
""" Repay funding. Repays the older funding records first.
Args:
amount (int): Amount of currency to repay
currency (str): The currency, example USD
Returns:
Not specified by cbpro.
"""
params = {
'amount': amount,
'currency': currency # example: USD
}
return self._send_message('post', '/funding/repay',
data=json.dumps(params))
def margin_transfer(self, margin_profile_id, transfer_type, currency,
amount):
""" Transfer funds between your standard profile and a margin profile.
Args:
margin_profile_id (str): Margin profile ID to withdraw or deposit
from.
transfer_type (str): 'deposit' or 'withdraw'
currency (str): Currency to transfer (eg. 'USD')
amount (Decimal): Amount to transfer
Returns:
dict: Transfer details. Example::
{
"created_at": "2017-01-25T19:06:23.415126Z",
"id": "80bc6b74-8b1f-4c60-a089-c61f9810d4ab",
"user_id": "521c20b3d4ab09621f000011",
"profile_id": "cda95996-ac59-45a3-a42e-30daeb061867",
"margin_profile_id": "45fa9e3b-00ba-4631-b907-8a98cbdf21be",
"type": "deposit",
"amount": "2",
"currency": "USD",
"account_id": "23035fc7-0707-4b59-b0d2-95d0c035f8f5",
"margin_account_id": "e1d9862c-a259-4e83-96cd-376352a9d24d",
"margin_product_id": "BTC-USD",
"status": "completed",
"nonce": 25
}
"""
params = {'margin_profile_id': margin_profile_id,
'type': transfer_type,
'currency': currency, # example: USD
'amount': amount}
return self._send_message('post', '/profiles/margin-transfer',
data=json.dumps(params))
def get_position(self):
""" Get An overview of your margin profile.
Returns:
dict: Details about funding, accounts, and margin call.
"""
return self._send_message('get', '/position')
def close_position(self, repay_only):
""" Close position.
Args:
repay_only (bool): Undocumented by cbpro.
Returns:
Undocumented
"""
params = {'repay_only': repay_only}
return self._send_message('post', '/position/close',
data=json.dumps(params))
def deposit(self, amount, currency, payment_method_id):
""" Deposit funds from a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decmial): The amount to deposit.
currency (str): The type of currency.
payment_method_id (str): ID of the payment method.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/deposits/payment-method',
data=json.dumps(params))
def coinbase_deposit(self, amount, currency, coinbase_account_id):
""" Deposit funds from a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to deposit.
currency (str): The type of currency.
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/deposits/coinbase-account',
data=json.dumps(params))
def withdraw(self, amount, currency, payment_method_id):
""" Withdraw funds to a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decimal): The amount to withdraw.
currency (str): Currency type (eg. 'BTC')
payment_method_id (str): ID of the payment method.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/withdrawals/payment-method',
data=json.dumps(params))
def coinbase_withdraw(self, amount, currency, coinbase_account_id):
""" Withdraw funds to a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to withdraw.
currency (str): The type of currency (eg. 'BTC')
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/withdrawals/coinbase-account',
data=json.dumps(params))
def crypto_withdraw(self, amount, currency, crypto_address):
""" Withdraw funds to a crypto address.
Args:
amount (Decimal): The amount to withdraw
currency (str): The type of currency (eg. 'BTC')
crypto_address (str): Crypto address to withdraw to.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'crypto_address': crypto_address}
return self._send_message('post', '/withdrawals/crypto',
data=json.dumps(params))
def get_payment_methods(self):
""" Get a list of your payment methods.
Returns:
list: Payment method details.
"""
return self._send_message('get', '/payment-methods')
def get_coinbase_accounts(self):
""" Get a list of your coinbase accounts.
Returns:
list: Coinbase account details.
"""
return self._send_message('get', '/coinbase-accounts')
def create_report(self, report_type, start_date, end_date, product_id=None,
account_id=None, report_format='pdf', email=None):
""" Create report of historic information about your account.
The report will be generated when resources are available. Report status
can be queried via `get_report(report_id)`.
Args:
report_type (str): 'fills' or 'account'
start_date (str): Starting date for the report in ISO 8601
end_date (str): Ending date for the report in ISO 8601
product_id (Optional[str]): ID of the product to generate a fills
report for. Required if account_type is 'fills'
account_id (Optional[str]): ID of the account to generate an account
report for. Required if report_type is 'account'.
report_format (Optional[str]): 'pdf' or 'csv'. Default is 'pdf'.
email (Optional[str]): Email address to send the report to.
Returns:
dict: Report details. Example::
{
"id": "0428b97b-bec1-429e-a94c-59232926778d",
"type": "fills",
"status": "pending",
"created_at": "2015-01-06T10:34:47.000Z",
"completed_at": undefined,
"expires_at": "2015-01-13T10:35:47.000Z",
"file_url": undefined,
"params": {
"start_date": "2014-11-01T00:00:00.000Z",
"end_date": "2014-11-30T23:59:59.000Z"
}
}
"""
params = {'type': report_type,
'start_date': start_date,
'end_date': end_date,
'format': report_format}
if product_id is not None:
params['product_id'] = product_id
if account_id is not None:
params['account_id'] = account_id
if email is not None:
params['email'] = email
return self._send_message('post', '/reports',
data=json.dumps(params))
def get_report(self, report_id):
""" Get report status.
Use to query a specific report once it has been requested.
Args:
report_id (str): Report ID
Returns:
dict: Report details, including file url once it is created.
"""
return self._send_message('get', '/reports/' + report_id)
def get_trailing_volume(self):
""" Get your 30-day trailing volume for all products.
This is a cached value that's calculated every day at midnight UTC.
Returns:
list: 30-day trailing volumes. Example::
[
{
"product_id": "BTC-USD",
"exchange_volume": "11800.00000000",
"volume": "100.00000000",
"recorded_at": "1973-11-29T00:05:01.123456Z"
},
{
...
}
]
"""
return self._send_message('get', '/users/self/trailing-volume')
|
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.get_orders | python | def get_orders(self, product_id=None, status=None, **kwargs):
params = kwargs
if product_id is not None:
params['product_id'] = product_id
if status is not None:
params['status'] = status
return self._send_paginated_message('/orders', params=params) | List your current open orders.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Only open or un-settled orders are returned. As soon as an
order is no longer open and settled, it will no longer appear
in the default request.
Orders which are no longer resting on the order book, will be
marked with the 'done' status. There is a small window between
an order being 'done' and 'settled'. An order is 'settled' when
all of the fills have settled and the remaining holds (if any)
have been removed.
For high-volume trading it is strongly recommended that you
maintain your own list of open orders and use one of the
streaming market data feeds to keep it updated. You should poll
the open orders endpoint once when you start trading to obtain
the current state of any open orders.
Args:
product_id (Optional[str]): Only list orders for this
product
status (Optional[list/str]): Limit list of orders to
this status or statuses. Passing 'all' returns orders
of all statuses.
** Options: 'open', 'pending', 'active', 'done',
'settled'
** default: ['open', 'pending', 'active']
Returns:
list: Containing information on orders. Example::
[
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "open",
"settled": false
},
{
...
}
] | train | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L519-L582 | [
"def _send_paginated_message(self, endpoint, params=None):\n \"\"\" Send API message that results in a paginated response.\n\n The paginated responses are abstracted away by making API requests on\n demand as the response is iterated over.\n\n Paginated API messages support 3 additional parameters: `bef... | class AuthenticatedClient(PublicClient):
""" Provides access to Private Endpoints on the cbpro API.
All requests default to the live `api_url`: 'https://api.pro.coinbase.com'.
To test your application using the sandbox modify the `api_url`.
Attributes:
url (str): The api url for this client instance to use.
auth (CBProAuth): Custom authentication handler for each request.
session (requests.Session): Persistent HTTP connection object.
"""
def __init__(self, key, b64secret, passphrase,
api_url="https://api.pro.coinbase.com"):
""" Create an instance of the AuthenticatedClient class.
Args:
key (str): Your API key.
b64secret (str): The secret key matching your API key.
passphrase (str): Passphrase chosen when setting up key.
api_url (Optional[str]): API URL. Defaults to cbpro API.
"""
super(AuthenticatedClient, self).__init__(api_url)
self.auth = CBProAuth(key, b64secret, passphrase)
self.session = requests.Session()
def get_account(self, account_id):
""" Get information for a single account.
Use this endpoint when you know the account_id.
Args:
account_id (str): Account id for account you want to get.
Returns:
dict: Account information. Example::
{
"id": "a1b2c3d4",
"balance": "1.100",
"holds": "0.100",
"available": "1.00",
"currency": "USD"
}
"""
return self._send_message('get', '/accounts/' + account_id)
def get_accounts(self):
""" Get a list of trading all accounts.
When you place an order, the funds for the order are placed on
hold. They cannot be used for other orders or withdrawn. Funds
will remain on hold until the order is filled or canceled. The
funds on hold for each account will be specified.
Returns:
list: Info about all accounts. Example::
[
{
"id": "71452118-efc7-4cc4-8780-a5e22d4baa53",
"currency": "BTC",
"balance": "0.0000000000000000",
"available": "0.0000000000000000",
"hold": "0.0000000000000000",
"profile_id": "75da88c5-05bf-4f54-bc85-5c775bd68254"
},
{
...
}
]
* Additional info included in response for margin accounts.
"""
return self.get_account('')
def get_account_history(self, account_id, **kwargs):
""" List account activity. Account activity either increases or
decreases your account balance.
Entry type indicates the reason for the account change.
* transfer: Funds moved to/from Coinbase to cbpro
* match: Funds moved as a result of a trade
* fee: Fee as a result of a trade
* rebate: Fee rebate as per our fee schedule
If an entry is the result of a trade (match, fee), the details
field will contain additional information about the trade.
Args:
account_id (str): Account id to get history of.
kwargs (dict): Additional HTTP request parameters.
Returns:
list: History information for the account. Example::
[
{
"id": "100",
"created_at": "2014-11-07T08:19:27.028459Z",
"amount": "0.001",
"balance": "239.669",
"type": "fee",
"details": {
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"trade_id": "74",
"product_id": "BTC-USD"
}
},
{
...
}
]
"""
endpoint = '/accounts/{}/ledger'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def get_account_holds(self, account_id, **kwargs):
""" Get holds on an account.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Holds are placed on an account for active orders or
pending withdraw requests.
As an order is filled, the hold amount is updated. If an order
is canceled, any remaining hold is removed. For a withdraw, once
it is completed, the hold is removed.
The `type` field will indicate why the hold exists. The hold
type is 'order' for holds related to open orders and 'transfer'
for holds related to a withdraw.
The `ref` field contains the id of the order or transfer which
created the hold.
Args:
account_id (str): Account id to get holds of.
kwargs (dict): Additional HTTP request parameters.
Returns:
generator(list): Hold information for the account. Example::
[
{
"id": "82dcd140-c3c7-4507-8de4-2c529cd1a28f",
"account_id": "e0b3f39a-183d-453e-b754-0c13e5bab0b3",
"created_at": "2014-11-06T10:34:47.123456Z",
"updated_at": "2014-11-06T10:40:47.123456Z",
"amount": "4.23",
"type": "order",
"ref": "0a205de4-dd35-4370-a285-fe8fc375a273",
},
{
...
}
]
"""
endpoint = '/accounts/{}/holds'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def place_order(self, product_id, side, order_type, **kwargs):
""" Place an order.
The three order types (limit, market, and stop) can be placed using this
method. Specific methods are provided for each order type, but if a
more generic interface is desired this method is available.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
order_type (str): Order type ('limit', 'market', or 'stop')
**client_oid (str): Order ID selected by you to identify your order.
This should be a UUID, which will be broadcast in the public
feed for `received` messages.
**stp (str): Self-trade prevention flag. cbpro doesn't allow self-
trading. This behavior can be modified with this flag.
Options:
'dc' Decrease and Cancel (default)
'co' Cancel oldest
'cn' Cancel newest
'cb' Cancel both
**overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
**funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
**kwargs: Additional arguments can be specified for different order
types. See the limit/market/stop order methods for details.
Returns:
dict: Order details. Example::
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "pending",
"settled": false
}
"""
# Margin parameter checks
if kwargs.get('overdraft_enabled') is not None and \
kwargs.get('funding_amount') is not None:
raise ValueError('Margin funding must be specified through use of '
'overdraft or by setting a funding amount, but not'
' both')
# Limit order checks
if order_type == 'limit':
if kwargs.get('cancel_after') is not None and \
kwargs.get('time_in_force') != 'GTT':
raise ValueError('May only specify a cancel period when time '
'in_force is `GTT`')
if kwargs.get('post_only') is not None and kwargs.get('time_in_force') in \
['IOC', 'FOK']:
raise ValueError('post_only is invalid when time in force is '
'`IOC` or `FOK`')
# Market and stop order checks
if order_type == 'market' or order_type == 'stop':
if not (kwargs.get('size') is None) ^ (kwargs.get('funds') is None):
raise ValueError('Either `size` or `funds` must be specified '
'for market/stop orders (but not both).')
# Build params dict
params = {'product_id': product_id,
'side': side,
'type': order_type}
params.update(kwargs)
return self._send_message('post', '/orders', data=json.dumps(params))
def buy(self, product_id, order_type, **kwargs):
"""Place a buy order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'buy', order_type, **kwargs)
def sell(self, product_id, order_type, **kwargs):
"""Place a sell order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'sell', order_type, **kwargs)
def place_limit_order(self, product_id, side, price, size,
client_oid=None,
stp=None,
time_in_force=None,
cancel_after=None,
post_only=None,
overdraft_enabled=None,
funding_amount=None):
"""Place a limit order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Price per cryptocurrency
size (Decimal): Amount of cryptocurrency to buy or sell
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
time_in_force (Optional[str]): Time in force. Options:
'GTC' Good till canceled
'GTT' Good till time (set by `cancel_after`)
'IOC' Immediate or cancel
'FOK' Fill or kill
cancel_after (Optional[str]): Cancel after this period for 'GTT'
orders. Options are 'min', 'hour', or 'day'.
post_only (Optional[bool]): Indicates that the order should only
make liquidity. If any part of the order results in taking
liquidity, the order will be rejected and no part of it will
execute.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'limit',
'price': price,
'size': size,
'client_oid': client_oid,
'stp': stp,
'time_in_force': time_in_force,
'cancel_after': cancel_after,
'post_only': post_only,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_market_order(self, product_id, side, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place market order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'market',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_stop_order(self, product_id, side, price, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place stop order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Desired price at which the stop order triggers.
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'price': price,
'order_type': 'stop',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def cancel_order(self, order_id):
""" Cancel a previously placed order.
If the order had no matches during its lifetime its record may
be purged. This means the order details will not be available
with get_order(order_id). If the order could not be canceled
(already filled or previously canceled, etc), then an error
response will indicate the reason in the message field.
**Caution**: The order id is the server-assigned order id and
not the optional client_oid.
Args:
order_id (str): The order_id of the order you want to cancel
Returns:
list: Containing the order_id of cancelled order. Example::
[ "c5ab5eae-76be-480e-8961-00792dc7e138" ]
"""
return self._send_message('delete', '/orders/' + order_id)
def cancel_all(self, product_id=None):
""" With best effort, cancel all open orders.
Args:
product_id (Optional[str]): Only cancel orders for this
product_id
Returns:
list: A list of ids of the canceled orders. Example::
[
"144c6f8e-713f-4682-8435-5280fbe8b2b4",
"debe4907-95dc-442f-af3b-cec12f42ebda",
"cf7aceee-7b08-4227-a76c-3858144323ab",
"dfc5ae27-cadb-4c0c-beef-8994936fde8a",
"34fecfbf-de33-4273-b2c6-baf8e8948be4"
]
"""
if product_id is not None:
params = {'product_id': product_id}
else:
params = None
return self._send_message('delete', '/orders', params=params)
def get_order(self, order_id):
""" Get a single order by order id.
If the order is canceled the response may have status code 404
if the order had no matches.
**Caution**: Open orders may change state between the request
and the response depending on market conditions.
Args:
order_id (str): The order to get information of.
Returns:
dict: Containing information on order. Example::
{
"created_at": "2017-06-18T00:27:42.920136Z",
"executed_value": "0.0000000000000000",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"id": "9456f388-67a9-4316-bad1-330c5353804f",
"post_only": true,
"price": "1.00000000",
"product_id": "BTC-USD",
"settled": false,
"side": "buy",
"size": "1.00000000",
"status": "pending",
"stp": "dc",
"time_in_force": "GTC",
"type": "limit"
}
"""
return self._send_message('get', '/orders/' + order_id)
def get_fills(self, product_id=None, order_id=None, **kwargs):
""" Get a list of recent fills.
As of 8/23/18 - Requests without either order_id or product_id
will be rejected
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Fees are recorded in two stages. Immediately after the matching
engine completes a match, the fill is inserted into our
datastore. Once the fill is recorded, a settlement process will
settle the fill and credit both trading counterparties.
The 'fee' field indicates the fees charged for this fill.
The 'liquidity' field indicates if the fill was the result of a
liquidity provider or liquidity taker. M indicates Maker and T
indicates Taker.
Args:
product_id (str): Limit list to this product_id
order_id (str): Limit list to this order_id
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on fills. Example::
[
{
"trade_id": 74,
"product_id": "BTC-USD",
"price": "10.00",
"size": "0.01",
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"created_at": "2014-11-07T22:19:28.578544Z",
"liquidity": "T",
"fee": "0.00025",
"settled": true,
"side": "buy"
},
{
...
}
]
"""
if (product_id is None) and (order_id is None):
raise ValueError('Either product_id or order_id must be specified.')
params = {}
if product_id:
params['product_id'] = product_id
if order_id:
params['order_id'] = order_id
params.update(kwargs)
return self._send_paginated_message('/fills', params=params)
def get_fundings(self, status=None, **kwargs):
""" Every order placed with a margin profile that draws funding
will create a funding record.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
status (list/str): Limit funding records to these statuses.
** Options: 'outstanding', 'settled', 'rejected'
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on margin funding. Example::
[
{
"id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"order_id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6",
"amount": "1057.6519956381537500",
"status": "settled",
"created_at": "2017-03-17T23:46:16.663397Z",
"currency": "USD",
"repaid_amount": "1057.6519956381537500",
"default_amount": "0",
"repaid_default": false
},
{
...
}
]
"""
params = {}
if status is not None:
params['status'] = status
params.update(kwargs)
return self._send_paginated_message('/funding', params=params)
def repay_funding(self, amount, currency):
""" Repay funding. Repays the older funding records first.
Args:
amount (int): Amount of currency to repay
currency (str): The currency, example USD
Returns:
Not specified by cbpro.
"""
params = {
'amount': amount,
'currency': currency # example: USD
}
return self._send_message('post', '/funding/repay',
data=json.dumps(params))
def margin_transfer(self, margin_profile_id, transfer_type, currency,
amount):
""" Transfer funds between your standard profile and a margin profile.
Args:
margin_profile_id (str): Margin profile ID to withdraw or deposit
from.
transfer_type (str): 'deposit' or 'withdraw'
currency (str): Currency to transfer (eg. 'USD')
amount (Decimal): Amount to transfer
Returns:
dict: Transfer details. Example::
{
"created_at": "2017-01-25T19:06:23.415126Z",
"id": "80bc6b74-8b1f-4c60-a089-c61f9810d4ab",
"user_id": "521c20b3d4ab09621f000011",
"profile_id": "cda95996-ac59-45a3-a42e-30daeb061867",
"margin_profile_id": "45fa9e3b-00ba-4631-b907-8a98cbdf21be",
"type": "deposit",
"amount": "2",
"currency": "USD",
"account_id": "23035fc7-0707-4b59-b0d2-95d0c035f8f5",
"margin_account_id": "e1d9862c-a259-4e83-96cd-376352a9d24d",
"margin_product_id": "BTC-USD",
"status": "completed",
"nonce": 25
}
"""
params = {'margin_profile_id': margin_profile_id,
'type': transfer_type,
'currency': currency, # example: USD
'amount': amount}
return self._send_message('post', '/profiles/margin-transfer',
data=json.dumps(params))
def get_position(self):
""" Get An overview of your margin profile.
Returns:
dict: Details about funding, accounts, and margin call.
"""
return self._send_message('get', '/position')
def close_position(self, repay_only):
""" Close position.
Args:
repay_only (bool): Undocumented by cbpro.
Returns:
Undocumented
"""
params = {'repay_only': repay_only}
return self._send_message('post', '/position/close',
data=json.dumps(params))
def deposit(self, amount, currency, payment_method_id):
""" Deposit funds from a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decmial): The amount to deposit.
currency (str): The type of currency.
payment_method_id (str): ID of the payment method.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/deposits/payment-method',
data=json.dumps(params))
def coinbase_deposit(self, amount, currency, coinbase_account_id):
""" Deposit funds from a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to deposit.
currency (str): The type of currency.
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/deposits/coinbase-account',
data=json.dumps(params))
def withdraw(self, amount, currency, payment_method_id):
""" Withdraw funds to a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decimal): The amount to withdraw.
currency (str): Currency type (eg. 'BTC')
payment_method_id (str): ID of the payment method.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/withdrawals/payment-method',
data=json.dumps(params))
def coinbase_withdraw(self, amount, currency, coinbase_account_id):
""" Withdraw funds to a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to withdraw.
currency (str): The type of currency (eg. 'BTC')
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/withdrawals/coinbase-account',
data=json.dumps(params))
def crypto_withdraw(self, amount, currency, crypto_address):
""" Withdraw funds to a crypto address.
Args:
amount (Decimal): The amount to withdraw
currency (str): The type of currency (eg. 'BTC')
crypto_address (str): Crypto address to withdraw to.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'crypto_address': crypto_address}
return self._send_message('post', '/withdrawals/crypto',
data=json.dumps(params))
def get_payment_methods(self):
""" Get a list of your payment methods.
Returns:
list: Payment method details.
"""
return self._send_message('get', '/payment-methods')
def get_coinbase_accounts(self):
""" Get a list of your coinbase accounts.
Returns:
list: Coinbase account details.
"""
return self._send_message('get', '/coinbase-accounts')
def create_report(self, report_type, start_date, end_date, product_id=None,
account_id=None, report_format='pdf', email=None):
""" Create report of historic information about your account.
The report will be generated when resources are available. Report status
can be queried via `get_report(report_id)`.
Args:
report_type (str): 'fills' or 'account'
start_date (str): Starting date for the report in ISO 8601
end_date (str): Ending date for the report in ISO 8601
product_id (Optional[str]): ID of the product to generate a fills
report for. Required if account_type is 'fills'
account_id (Optional[str]): ID of the account to generate an account
report for. Required if report_type is 'account'.
report_format (Optional[str]): 'pdf' or 'csv'. Default is 'pdf'.
email (Optional[str]): Email address to send the report to.
Returns:
dict: Report details. Example::
{
"id": "0428b97b-bec1-429e-a94c-59232926778d",
"type": "fills",
"status": "pending",
"created_at": "2015-01-06T10:34:47.000Z",
"completed_at": undefined,
"expires_at": "2015-01-13T10:35:47.000Z",
"file_url": undefined,
"params": {
"start_date": "2014-11-01T00:00:00.000Z",
"end_date": "2014-11-30T23:59:59.000Z"
}
}
"""
params = {'type': report_type,
'start_date': start_date,
'end_date': end_date,
'format': report_format}
if product_id is not None:
params['product_id'] = product_id
if account_id is not None:
params['account_id'] = account_id
if email is not None:
params['email'] = email
return self._send_message('post', '/reports',
data=json.dumps(params))
def get_report(self, report_id):
""" Get report status.
Use to query a specific report once it has been requested.
Args:
report_id (str): Report ID
Returns:
dict: Report details, including file url once it is created.
"""
return self._send_message('get', '/reports/' + report_id)
def get_trailing_volume(self):
""" Get your 30-day trailing volume for all products.
This is a cached value that's calculated every day at midnight UTC.
Returns:
list: 30-day trailing volumes. Example::
[
{
"product_id": "BTC-USD",
"exchange_volume": "11800.00000000",
"volume": "100.00000000",
"recorded_at": "1973-11-29T00:05:01.123456Z"
},
{
...
}
]
"""
return self._send_message('get', '/users/self/trailing-volume')
|
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.get_fills | python | def get_fills(self, product_id=None, order_id=None, **kwargs):
if (product_id is None) and (order_id is None):
raise ValueError('Either product_id or order_id must be specified.')
params = {}
if product_id:
params['product_id'] = product_id
if order_id:
params['order_id'] = order_id
params.update(kwargs)
return self._send_paginated_message('/fills', params=params) | Get a list of recent fills.
As of 8/23/18 - Requests without either order_id or product_id
will be rejected
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Fees are recorded in two stages. Immediately after the matching
engine completes a match, the fill is inserted into our
datastore. Once the fill is recorded, a settlement process will
settle the fill and credit both trading counterparties.
The 'fee' field indicates the fees charged for this fill.
The 'liquidity' field indicates if the fill was the result of a
liquidity provider or liquidity taker. M indicates Maker and T
indicates Taker.
Args:
product_id (str): Limit list to this product_id
order_id (str): Limit list to this order_id
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on fills. Example::
[
{
"trade_id": 74,
"product_id": "BTC-USD",
"price": "10.00",
"size": "0.01",
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"created_at": "2014-11-07T22:19:28.578544Z",
"liquidity": "T",
"fee": "0.00025",
"settled": true,
"side": "buy"
},
{
...
}
] | train | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L584-L640 | [
"def _send_paginated_message(self, endpoint, params=None):\n \"\"\" Send API message that results in a paginated response.\n\n The paginated responses are abstracted away by making API requests on\n demand as the response is iterated over.\n\n Paginated API messages support 3 additional parameters: `bef... | class AuthenticatedClient(PublicClient):
""" Provides access to Private Endpoints on the cbpro API.
All requests default to the live `api_url`: 'https://api.pro.coinbase.com'.
To test your application using the sandbox modify the `api_url`.
Attributes:
url (str): The api url for this client instance to use.
auth (CBProAuth): Custom authentication handler for each request.
session (requests.Session): Persistent HTTP connection object.
"""
def __init__(self, key, b64secret, passphrase,
api_url="https://api.pro.coinbase.com"):
""" Create an instance of the AuthenticatedClient class.
Args:
key (str): Your API key.
b64secret (str): The secret key matching your API key.
passphrase (str): Passphrase chosen when setting up key.
api_url (Optional[str]): API URL. Defaults to cbpro API.
"""
super(AuthenticatedClient, self).__init__(api_url)
self.auth = CBProAuth(key, b64secret, passphrase)
self.session = requests.Session()
def get_account(self, account_id):
""" Get information for a single account.
Use this endpoint when you know the account_id.
Args:
account_id (str): Account id for account you want to get.
Returns:
dict: Account information. Example::
{
"id": "a1b2c3d4",
"balance": "1.100",
"holds": "0.100",
"available": "1.00",
"currency": "USD"
}
"""
return self._send_message('get', '/accounts/' + account_id)
def get_accounts(self):
""" Get a list of trading all accounts.
When you place an order, the funds for the order are placed on
hold. They cannot be used for other orders or withdrawn. Funds
will remain on hold until the order is filled or canceled. The
funds on hold for each account will be specified.
Returns:
list: Info about all accounts. Example::
[
{
"id": "71452118-efc7-4cc4-8780-a5e22d4baa53",
"currency": "BTC",
"balance": "0.0000000000000000",
"available": "0.0000000000000000",
"hold": "0.0000000000000000",
"profile_id": "75da88c5-05bf-4f54-bc85-5c775bd68254"
},
{
...
}
]
* Additional info included in response for margin accounts.
"""
return self.get_account('')
def get_account_history(self, account_id, **kwargs):
""" List account activity. Account activity either increases or
decreases your account balance.
Entry type indicates the reason for the account change.
* transfer: Funds moved to/from Coinbase to cbpro
* match: Funds moved as a result of a trade
* fee: Fee as a result of a trade
* rebate: Fee rebate as per our fee schedule
If an entry is the result of a trade (match, fee), the details
field will contain additional information about the trade.
Args:
account_id (str): Account id to get history of.
kwargs (dict): Additional HTTP request parameters.
Returns:
list: History information for the account. Example::
[
{
"id": "100",
"created_at": "2014-11-07T08:19:27.028459Z",
"amount": "0.001",
"balance": "239.669",
"type": "fee",
"details": {
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"trade_id": "74",
"product_id": "BTC-USD"
}
},
{
...
}
]
"""
endpoint = '/accounts/{}/ledger'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def get_account_holds(self, account_id, **kwargs):
""" Get holds on an account.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Holds are placed on an account for active orders or
pending withdraw requests.
As an order is filled, the hold amount is updated. If an order
is canceled, any remaining hold is removed. For a withdraw, once
it is completed, the hold is removed.
The `type` field will indicate why the hold exists. The hold
type is 'order' for holds related to open orders and 'transfer'
for holds related to a withdraw.
The `ref` field contains the id of the order or transfer which
created the hold.
Args:
account_id (str): Account id to get holds of.
kwargs (dict): Additional HTTP request parameters.
Returns:
generator(list): Hold information for the account. Example::
[
{
"id": "82dcd140-c3c7-4507-8de4-2c529cd1a28f",
"account_id": "e0b3f39a-183d-453e-b754-0c13e5bab0b3",
"created_at": "2014-11-06T10:34:47.123456Z",
"updated_at": "2014-11-06T10:40:47.123456Z",
"amount": "4.23",
"type": "order",
"ref": "0a205de4-dd35-4370-a285-fe8fc375a273",
},
{
...
}
]
"""
endpoint = '/accounts/{}/holds'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def place_order(self, product_id, side, order_type, **kwargs):
""" Place an order.
The three order types (limit, market, and stop) can be placed using this
method. Specific methods are provided for each order type, but if a
more generic interface is desired this method is available.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
order_type (str): Order type ('limit', 'market', or 'stop')
**client_oid (str): Order ID selected by you to identify your order.
This should be a UUID, which will be broadcast in the public
feed for `received` messages.
**stp (str): Self-trade prevention flag. cbpro doesn't allow self-
trading. This behavior can be modified with this flag.
Options:
'dc' Decrease and Cancel (default)
'co' Cancel oldest
'cn' Cancel newest
'cb' Cancel both
**overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
**funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
**kwargs: Additional arguments can be specified for different order
types. See the limit/market/stop order methods for details.
Returns:
dict: Order details. Example::
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "pending",
"settled": false
}
"""
# Margin parameter checks
if kwargs.get('overdraft_enabled') is not None and \
kwargs.get('funding_amount') is not None:
raise ValueError('Margin funding must be specified through use of '
'overdraft or by setting a funding amount, but not'
' both')
# Limit order checks
if order_type == 'limit':
if kwargs.get('cancel_after') is not None and \
kwargs.get('time_in_force') != 'GTT':
raise ValueError('May only specify a cancel period when time '
'in_force is `GTT`')
if kwargs.get('post_only') is not None and kwargs.get('time_in_force') in \
['IOC', 'FOK']:
raise ValueError('post_only is invalid when time in force is '
'`IOC` or `FOK`')
# Market and stop order checks
if order_type == 'market' or order_type == 'stop':
if not (kwargs.get('size') is None) ^ (kwargs.get('funds') is None):
raise ValueError('Either `size` or `funds` must be specified '
'for market/stop orders (but not both).')
# Build params dict
params = {'product_id': product_id,
'side': side,
'type': order_type}
params.update(kwargs)
return self._send_message('post', '/orders', data=json.dumps(params))
def buy(self, product_id, order_type, **kwargs):
"""Place a buy order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'buy', order_type, **kwargs)
def sell(self, product_id, order_type, **kwargs):
"""Place a sell order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'sell', order_type, **kwargs)
def place_limit_order(self, product_id, side, price, size,
client_oid=None,
stp=None,
time_in_force=None,
cancel_after=None,
post_only=None,
overdraft_enabled=None,
funding_amount=None):
"""Place a limit order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Price per cryptocurrency
size (Decimal): Amount of cryptocurrency to buy or sell
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
time_in_force (Optional[str]): Time in force. Options:
'GTC' Good till canceled
'GTT' Good till time (set by `cancel_after`)
'IOC' Immediate or cancel
'FOK' Fill or kill
cancel_after (Optional[str]): Cancel after this period for 'GTT'
orders. Options are 'min', 'hour', or 'day'.
post_only (Optional[bool]): Indicates that the order should only
make liquidity. If any part of the order results in taking
liquidity, the order will be rejected and no part of it will
execute.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'limit',
'price': price,
'size': size,
'client_oid': client_oid,
'stp': stp,
'time_in_force': time_in_force,
'cancel_after': cancel_after,
'post_only': post_only,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_market_order(self, product_id, side, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place market order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'market',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_stop_order(self, product_id, side, price, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place stop order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Desired price at which the stop order triggers.
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'price': price,
'order_type': 'stop',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def cancel_order(self, order_id):
""" Cancel a previously placed order.
If the order had no matches during its lifetime its record may
be purged. This means the order details will not be available
with get_order(order_id). If the order could not be canceled
(already filled or previously canceled, etc), then an error
response will indicate the reason in the message field.
**Caution**: The order id is the server-assigned order id and
not the optional client_oid.
Args:
order_id (str): The order_id of the order you want to cancel
Returns:
list: Containing the order_id of cancelled order. Example::
[ "c5ab5eae-76be-480e-8961-00792dc7e138" ]
"""
return self._send_message('delete', '/orders/' + order_id)
def cancel_all(self, product_id=None):
""" With best effort, cancel all open orders.
Args:
product_id (Optional[str]): Only cancel orders for this
product_id
Returns:
list: A list of ids of the canceled orders. Example::
[
"144c6f8e-713f-4682-8435-5280fbe8b2b4",
"debe4907-95dc-442f-af3b-cec12f42ebda",
"cf7aceee-7b08-4227-a76c-3858144323ab",
"dfc5ae27-cadb-4c0c-beef-8994936fde8a",
"34fecfbf-de33-4273-b2c6-baf8e8948be4"
]
"""
if product_id is not None:
params = {'product_id': product_id}
else:
params = None
return self._send_message('delete', '/orders', params=params)
def get_order(self, order_id):
""" Get a single order by order id.
If the order is canceled the response may have status code 404
if the order had no matches.
**Caution**: Open orders may change state between the request
and the response depending on market conditions.
Args:
order_id (str): The order to get information of.
Returns:
dict: Containing information on order. Example::
{
"created_at": "2017-06-18T00:27:42.920136Z",
"executed_value": "0.0000000000000000",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"id": "9456f388-67a9-4316-bad1-330c5353804f",
"post_only": true,
"price": "1.00000000",
"product_id": "BTC-USD",
"settled": false,
"side": "buy",
"size": "1.00000000",
"status": "pending",
"stp": "dc",
"time_in_force": "GTC",
"type": "limit"
}
"""
return self._send_message('get', '/orders/' + order_id)
def get_orders(self, product_id=None, status=None, **kwargs):
""" List your current open orders.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Only open or un-settled orders are returned. As soon as an
order is no longer open and settled, it will no longer appear
in the default request.
Orders which are no longer resting on the order book, will be
marked with the 'done' status. There is a small window between
an order being 'done' and 'settled'. An order is 'settled' when
all of the fills have settled and the remaining holds (if any)
have been removed.
For high-volume trading it is strongly recommended that you
maintain your own list of open orders and use one of the
streaming market data feeds to keep it updated. You should poll
the open orders endpoint once when you start trading to obtain
the current state of any open orders.
Args:
product_id (Optional[str]): Only list orders for this
product
status (Optional[list/str]): Limit list of orders to
this status or statuses. Passing 'all' returns orders
of all statuses.
** Options: 'open', 'pending', 'active', 'done',
'settled'
** default: ['open', 'pending', 'active']
Returns:
list: Containing information on orders. Example::
[
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "open",
"settled": false
},
{
...
}
]
"""
params = kwargs
if product_id is not None:
params['product_id'] = product_id
if status is not None:
params['status'] = status
return self._send_paginated_message('/orders', params=params)
def get_fundings(self, status=None, **kwargs):
""" Every order placed with a margin profile that draws funding
will create a funding record.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
status (list/str): Limit funding records to these statuses.
** Options: 'outstanding', 'settled', 'rejected'
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on margin funding. Example::
[
{
"id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"order_id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6",
"amount": "1057.6519956381537500",
"status": "settled",
"created_at": "2017-03-17T23:46:16.663397Z",
"currency": "USD",
"repaid_amount": "1057.6519956381537500",
"default_amount": "0",
"repaid_default": false
},
{
...
}
]
"""
params = {}
if status is not None:
params['status'] = status
params.update(kwargs)
return self._send_paginated_message('/funding', params=params)
def repay_funding(self, amount, currency):
""" Repay funding. Repays the older funding records first.
Args:
amount (int): Amount of currency to repay
currency (str): The currency, example USD
Returns:
Not specified by cbpro.
"""
params = {
'amount': amount,
'currency': currency # example: USD
}
return self._send_message('post', '/funding/repay',
data=json.dumps(params))
def margin_transfer(self, margin_profile_id, transfer_type, currency,
amount):
""" Transfer funds between your standard profile and a margin profile.
Args:
margin_profile_id (str): Margin profile ID to withdraw or deposit
from.
transfer_type (str): 'deposit' or 'withdraw'
currency (str): Currency to transfer (eg. 'USD')
amount (Decimal): Amount to transfer
Returns:
dict: Transfer details. Example::
{
"created_at": "2017-01-25T19:06:23.415126Z",
"id": "80bc6b74-8b1f-4c60-a089-c61f9810d4ab",
"user_id": "521c20b3d4ab09621f000011",
"profile_id": "cda95996-ac59-45a3-a42e-30daeb061867",
"margin_profile_id": "45fa9e3b-00ba-4631-b907-8a98cbdf21be",
"type": "deposit",
"amount": "2",
"currency": "USD",
"account_id": "23035fc7-0707-4b59-b0d2-95d0c035f8f5",
"margin_account_id": "e1d9862c-a259-4e83-96cd-376352a9d24d",
"margin_product_id": "BTC-USD",
"status": "completed",
"nonce": 25
}
"""
params = {'margin_profile_id': margin_profile_id,
'type': transfer_type,
'currency': currency, # example: USD
'amount': amount}
return self._send_message('post', '/profiles/margin-transfer',
data=json.dumps(params))
def get_position(self):
""" Get An overview of your margin profile.
Returns:
dict: Details about funding, accounts, and margin call.
"""
return self._send_message('get', '/position')
def close_position(self, repay_only):
""" Close position.
Args:
repay_only (bool): Undocumented by cbpro.
Returns:
Undocumented
"""
params = {'repay_only': repay_only}
return self._send_message('post', '/position/close',
data=json.dumps(params))
def deposit(self, amount, currency, payment_method_id):
""" Deposit funds from a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decmial): The amount to deposit.
currency (str): The type of currency.
payment_method_id (str): ID of the payment method.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/deposits/payment-method',
data=json.dumps(params))
def coinbase_deposit(self, amount, currency, coinbase_account_id):
""" Deposit funds from a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to deposit.
currency (str): The type of currency.
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/deposits/coinbase-account',
data=json.dumps(params))
def withdraw(self, amount, currency, payment_method_id):
""" Withdraw funds to a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decimal): The amount to withdraw.
currency (str): Currency type (eg. 'BTC')
payment_method_id (str): ID of the payment method.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/withdrawals/payment-method',
data=json.dumps(params))
def coinbase_withdraw(self, amount, currency, coinbase_account_id):
""" Withdraw funds to a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to withdraw.
currency (str): The type of currency (eg. 'BTC')
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/withdrawals/coinbase-account',
data=json.dumps(params))
def crypto_withdraw(self, amount, currency, crypto_address):
""" Withdraw funds to a crypto address.
Args:
amount (Decimal): The amount to withdraw
currency (str): The type of currency (eg. 'BTC')
crypto_address (str): Crypto address to withdraw to.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'crypto_address': crypto_address}
return self._send_message('post', '/withdrawals/crypto',
data=json.dumps(params))
def get_payment_methods(self):
""" Get a list of your payment methods.
Returns:
list: Payment method details.
"""
return self._send_message('get', '/payment-methods')
def get_coinbase_accounts(self):
""" Get a list of your coinbase accounts.
Returns:
list: Coinbase account details.
"""
return self._send_message('get', '/coinbase-accounts')
def create_report(self, report_type, start_date, end_date, product_id=None,
account_id=None, report_format='pdf', email=None):
""" Create report of historic information about your account.
The report will be generated when resources are available. Report status
can be queried via `get_report(report_id)`.
Args:
report_type (str): 'fills' or 'account'
start_date (str): Starting date for the report in ISO 8601
end_date (str): Ending date for the report in ISO 8601
product_id (Optional[str]): ID of the product to generate a fills
report for. Required if account_type is 'fills'
account_id (Optional[str]): ID of the account to generate an account
report for. Required if report_type is 'account'.
report_format (Optional[str]): 'pdf' or 'csv'. Default is 'pdf'.
email (Optional[str]): Email address to send the report to.
Returns:
dict: Report details. Example::
{
"id": "0428b97b-bec1-429e-a94c-59232926778d",
"type": "fills",
"status": "pending",
"created_at": "2015-01-06T10:34:47.000Z",
"completed_at": undefined,
"expires_at": "2015-01-13T10:35:47.000Z",
"file_url": undefined,
"params": {
"start_date": "2014-11-01T00:00:00.000Z",
"end_date": "2014-11-30T23:59:59.000Z"
}
}
"""
params = {'type': report_type,
'start_date': start_date,
'end_date': end_date,
'format': report_format}
if product_id is not None:
params['product_id'] = product_id
if account_id is not None:
params['account_id'] = account_id
if email is not None:
params['email'] = email
return self._send_message('post', '/reports',
data=json.dumps(params))
def get_report(self, report_id):
""" Get report status.
Use to query a specific report once it has been requested.
Args:
report_id (str): Report ID
Returns:
dict: Report details, including file url once it is created.
"""
return self._send_message('get', '/reports/' + report_id)
def get_trailing_volume(self):
""" Get your 30-day trailing volume for all products.
This is a cached value that's calculated every day at midnight UTC.
Returns:
list: 30-day trailing volumes. Example::
[
{
"product_id": "BTC-USD",
"exchange_volume": "11800.00000000",
"volume": "100.00000000",
"recorded_at": "1973-11-29T00:05:01.123456Z"
},
{
...
}
]
"""
return self._send_message('get', '/users/self/trailing-volume')
|
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.get_fundings | python | def get_fundings(self, status=None, **kwargs):
params = {}
if status is not None:
params['status'] = status
params.update(kwargs)
return self._send_paginated_message('/funding', params=params) | Every order placed with a margin profile that draws funding
will create a funding record.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
status (list/str): Limit funding records to these statuses.
** Options: 'outstanding', 'settled', 'rejected'
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on margin funding. Example::
[
{
"id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"order_id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6",
"amount": "1057.6519956381537500",
"status": "settled",
"created_at": "2017-03-17T23:46:16.663397Z",
"currency": "USD",
"repaid_amount": "1057.6519956381537500",
"default_amount": "0",
"repaid_default": false
},
{
...
}
] | train | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L642-L679 | [
"def _send_paginated_message(self, endpoint, params=None):\n \"\"\" Send API message that results in a paginated response.\n\n The paginated responses are abstracted away by making API requests on\n demand as the response is iterated over.\n\n Paginated API messages support 3 additional parameters: `bef... | class AuthenticatedClient(PublicClient):
""" Provides access to Private Endpoints on the cbpro API.
All requests default to the live `api_url`: 'https://api.pro.coinbase.com'.
To test your application using the sandbox modify the `api_url`.
Attributes:
url (str): The api url for this client instance to use.
auth (CBProAuth): Custom authentication handler for each request.
session (requests.Session): Persistent HTTP connection object.
"""
def __init__(self, key, b64secret, passphrase,
api_url="https://api.pro.coinbase.com"):
""" Create an instance of the AuthenticatedClient class.
Args:
key (str): Your API key.
b64secret (str): The secret key matching your API key.
passphrase (str): Passphrase chosen when setting up key.
api_url (Optional[str]): API URL. Defaults to cbpro API.
"""
super(AuthenticatedClient, self).__init__(api_url)
self.auth = CBProAuth(key, b64secret, passphrase)
self.session = requests.Session()
def get_account(self, account_id):
""" Get information for a single account.
Use this endpoint when you know the account_id.
Args:
account_id (str): Account id for account you want to get.
Returns:
dict: Account information. Example::
{
"id": "a1b2c3d4",
"balance": "1.100",
"holds": "0.100",
"available": "1.00",
"currency": "USD"
}
"""
return self._send_message('get', '/accounts/' + account_id)
def get_accounts(self):
""" Get a list of trading all accounts.
When you place an order, the funds for the order are placed on
hold. They cannot be used for other orders or withdrawn. Funds
will remain on hold until the order is filled or canceled. The
funds on hold for each account will be specified.
Returns:
list: Info about all accounts. Example::
[
{
"id": "71452118-efc7-4cc4-8780-a5e22d4baa53",
"currency": "BTC",
"balance": "0.0000000000000000",
"available": "0.0000000000000000",
"hold": "0.0000000000000000",
"profile_id": "75da88c5-05bf-4f54-bc85-5c775bd68254"
},
{
...
}
]
* Additional info included in response for margin accounts.
"""
return self.get_account('')
def get_account_history(self, account_id, **kwargs):
""" List account activity. Account activity either increases or
decreases your account balance.
Entry type indicates the reason for the account change.
* transfer: Funds moved to/from Coinbase to cbpro
* match: Funds moved as a result of a trade
* fee: Fee as a result of a trade
* rebate: Fee rebate as per our fee schedule
If an entry is the result of a trade (match, fee), the details
field will contain additional information about the trade.
Args:
account_id (str): Account id to get history of.
kwargs (dict): Additional HTTP request parameters.
Returns:
list: History information for the account. Example::
[
{
"id": "100",
"created_at": "2014-11-07T08:19:27.028459Z",
"amount": "0.001",
"balance": "239.669",
"type": "fee",
"details": {
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"trade_id": "74",
"product_id": "BTC-USD"
}
},
{
...
}
]
"""
endpoint = '/accounts/{}/ledger'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def get_account_holds(self, account_id, **kwargs):
""" Get holds on an account.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Holds are placed on an account for active orders or
pending withdraw requests.
As an order is filled, the hold amount is updated. If an order
is canceled, any remaining hold is removed. For a withdraw, once
it is completed, the hold is removed.
The `type` field will indicate why the hold exists. The hold
type is 'order' for holds related to open orders and 'transfer'
for holds related to a withdraw.
The `ref` field contains the id of the order or transfer which
created the hold.
Args:
account_id (str): Account id to get holds of.
kwargs (dict): Additional HTTP request parameters.
Returns:
generator(list): Hold information for the account. Example::
[
{
"id": "82dcd140-c3c7-4507-8de4-2c529cd1a28f",
"account_id": "e0b3f39a-183d-453e-b754-0c13e5bab0b3",
"created_at": "2014-11-06T10:34:47.123456Z",
"updated_at": "2014-11-06T10:40:47.123456Z",
"amount": "4.23",
"type": "order",
"ref": "0a205de4-dd35-4370-a285-fe8fc375a273",
},
{
...
}
]
"""
endpoint = '/accounts/{}/holds'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def place_order(self, product_id, side, order_type, **kwargs):
""" Place an order.
The three order types (limit, market, and stop) can be placed using this
method. Specific methods are provided for each order type, but if a
more generic interface is desired this method is available.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
order_type (str): Order type ('limit', 'market', or 'stop')
**client_oid (str): Order ID selected by you to identify your order.
This should be a UUID, which will be broadcast in the public
feed for `received` messages.
**stp (str): Self-trade prevention flag. cbpro doesn't allow self-
trading. This behavior can be modified with this flag.
Options:
'dc' Decrease and Cancel (default)
'co' Cancel oldest
'cn' Cancel newest
'cb' Cancel both
**overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
**funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
**kwargs: Additional arguments can be specified for different order
types. See the limit/market/stop order methods for details.
Returns:
dict: Order details. Example::
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "pending",
"settled": false
}
"""
# Margin parameter checks
if kwargs.get('overdraft_enabled') is not None and \
kwargs.get('funding_amount') is not None:
raise ValueError('Margin funding must be specified through use of '
'overdraft or by setting a funding amount, but not'
' both')
# Limit order checks
if order_type == 'limit':
if kwargs.get('cancel_after') is not None and \
kwargs.get('time_in_force') != 'GTT':
raise ValueError('May only specify a cancel period when time '
'in_force is `GTT`')
if kwargs.get('post_only') is not None and kwargs.get('time_in_force') in \
['IOC', 'FOK']:
raise ValueError('post_only is invalid when time in force is '
'`IOC` or `FOK`')
# Market and stop order checks
if order_type == 'market' or order_type == 'stop':
if not (kwargs.get('size') is None) ^ (kwargs.get('funds') is None):
raise ValueError('Either `size` or `funds` must be specified '
'for market/stop orders (but not both).')
# Build params dict
params = {'product_id': product_id,
'side': side,
'type': order_type}
params.update(kwargs)
return self._send_message('post', '/orders', data=json.dumps(params))
def buy(self, product_id, order_type, **kwargs):
"""Place a buy order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'buy', order_type, **kwargs)
def sell(self, product_id, order_type, **kwargs):
"""Place a sell order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'sell', order_type, **kwargs)
def place_limit_order(self, product_id, side, price, size,
client_oid=None,
stp=None,
time_in_force=None,
cancel_after=None,
post_only=None,
overdraft_enabled=None,
funding_amount=None):
"""Place a limit order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Price per cryptocurrency
size (Decimal): Amount of cryptocurrency to buy or sell
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
time_in_force (Optional[str]): Time in force. Options:
'GTC' Good till canceled
'GTT' Good till time (set by `cancel_after`)
'IOC' Immediate or cancel
'FOK' Fill or kill
cancel_after (Optional[str]): Cancel after this period for 'GTT'
orders. Options are 'min', 'hour', or 'day'.
post_only (Optional[bool]): Indicates that the order should only
make liquidity. If any part of the order results in taking
liquidity, the order will be rejected and no part of it will
execute.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'limit',
'price': price,
'size': size,
'client_oid': client_oid,
'stp': stp,
'time_in_force': time_in_force,
'cancel_after': cancel_after,
'post_only': post_only,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_market_order(self, product_id, side, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place market order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'market',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_stop_order(self, product_id, side, price, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place stop order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Desired price at which the stop order triggers.
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'price': price,
'order_type': 'stop',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def cancel_order(self, order_id):
""" Cancel a previously placed order.
If the order had no matches during its lifetime its record may
be purged. This means the order details will not be available
with get_order(order_id). If the order could not be canceled
(already filled or previously canceled, etc), then an error
response will indicate the reason in the message field.
**Caution**: The order id is the server-assigned order id and
not the optional client_oid.
Args:
order_id (str): The order_id of the order you want to cancel
Returns:
list: Containing the order_id of cancelled order. Example::
[ "c5ab5eae-76be-480e-8961-00792dc7e138" ]
"""
return self._send_message('delete', '/orders/' + order_id)
def cancel_all(self, product_id=None):
""" With best effort, cancel all open orders.
Args:
product_id (Optional[str]): Only cancel orders for this
product_id
Returns:
list: A list of ids of the canceled orders. Example::
[
"144c6f8e-713f-4682-8435-5280fbe8b2b4",
"debe4907-95dc-442f-af3b-cec12f42ebda",
"cf7aceee-7b08-4227-a76c-3858144323ab",
"dfc5ae27-cadb-4c0c-beef-8994936fde8a",
"34fecfbf-de33-4273-b2c6-baf8e8948be4"
]
"""
if product_id is not None:
params = {'product_id': product_id}
else:
params = None
return self._send_message('delete', '/orders', params=params)
def get_order(self, order_id):
""" Get a single order by order id.
If the order is canceled the response may have status code 404
if the order had no matches.
**Caution**: Open orders may change state between the request
and the response depending on market conditions.
Args:
order_id (str): The order to get information of.
Returns:
dict: Containing information on order. Example::
{
"created_at": "2017-06-18T00:27:42.920136Z",
"executed_value": "0.0000000000000000",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"id": "9456f388-67a9-4316-bad1-330c5353804f",
"post_only": true,
"price": "1.00000000",
"product_id": "BTC-USD",
"settled": false,
"side": "buy",
"size": "1.00000000",
"status": "pending",
"stp": "dc",
"time_in_force": "GTC",
"type": "limit"
}
"""
return self._send_message('get', '/orders/' + order_id)
def get_orders(self, product_id=None, status=None, **kwargs):
""" List your current open orders.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Only open or un-settled orders are returned. As soon as an
order is no longer open and settled, it will no longer appear
in the default request.
Orders which are no longer resting on the order book, will be
marked with the 'done' status. There is a small window between
an order being 'done' and 'settled'. An order is 'settled' when
all of the fills have settled and the remaining holds (if any)
have been removed.
For high-volume trading it is strongly recommended that you
maintain your own list of open orders and use one of the
streaming market data feeds to keep it updated. You should poll
the open orders endpoint once when you start trading to obtain
the current state of any open orders.
Args:
product_id (Optional[str]): Only list orders for this
product
status (Optional[list/str]): Limit list of orders to
this status or statuses. Passing 'all' returns orders
of all statuses.
** Options: 'open', 'pending', 'active', 'done',
'settled'
** default: ['open', 'pending', 'active']
Returns:
list: Containing information on orders. Example::
[
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "open",
"settled": false
},
{
...
}
]
"""
params = kwargs
if product_id is not None:
params['product_id'] = product_id
if status is not None:
params['status'] = status
return self._send_paginated_message('/orders', params=params)
def get_fills(self, product_id=None, order_id=None, **kwargs):
""" Get a list of recent fills.
As of 8/23/18 - Requests without either order_id or product_id
will be rejected
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Fees are recorded in two stages. Immediately after the matching
engine completes a match, the fill is inserted into our
datastore. Once the fill is recorded, a settlement process will
settle the fill and credit both trading counterparties.
The 'fee' field indicates the fees charged for this fill.
The 'liquidity' field indicates if the fill was the result of a
liquidity provider or liquidity taker. M indicates Maker and T
indicates Taker.
Args:
product_id (str): Limit list to this product_id
order_id (str): Limit list to this order_id
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on fills. Example::
[
{
"trade_id": 74,
"product_id": "BTC-USD",
"price": "10.00",
"size": "0.01",
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"created_at": "2014-11-07T22:19:28.578544Z",
"liquidity": "T",
"fee": "0.00025",
"settled": true,
"side": "buy"
},
{
...
}
]
"""
if (product_id is None) and (order_id is None):
raise ValueError('Either product_id or order_id must be specified.')
params = {}
if product_id:
params['product_id'] = product_id
if order_id:
params['order_id'] = order_id
params.update(kwargs)
return self._send_paginated_message('/fills', params=params)
def repay_funding(self, amount, currency):
""" Repay funding. Repays the older funding records first.
Args:
amount (int): Amount of currency to repay
currency (str): The currency, example USD
Returns:
Not specified by cbpro.
"""
params = {
'amount': amount,
'currency': currency # example: USD
}
return self._send_message('post', '/funding/repay',
data=json.dumps(params))
def margin_transfer(self, margin_profile_id, transfer_type, currency,
amount):
""" Transfer funds between your standard profile and a margin profile.
Args:
margin_profile_id (str): Margin profile ID to withdraw or deposit
from.
transfer_type (str): 'deposit' or 'withdraw'
currency (str): Currency to transfer (eg. 'USD')
amount (Decimal): Amount to transfer
Returns:
dict: Transfer details. Example::
{
"created_at": "2017-01-25T19:06:23.415126Z",
"id": "80bc6b74-8b1f-4c60-a089-c61f9810d4ab",
"user_id": "521c20b3d4ab09621f000011",
"profile_id": "cda95996-ac59-45a3-a42e-30daeb061867",
"margin_profile_id": "45fa9e3b-00ba-4631-b907-8a98cbdf21be",
"type": "deposit",
"amount": "2",
"currency": "USD",
"account_id": "23035fc7-0707-4b59-b0d2-95d0c035f8f5",
"margin_account_id": "e1d9862c-a259-4e83-96cd-376352a9d24d",
"margin_product_id": "BTC-USD",
"status": "completed",
"nonce": 25
}
"""
params = {'margin_profile_id': margin_profile_id,
'type': transfer_type,
'currency': currency, # example: USD
'amount': amount}
return self._send_message('post', '/profiles/margin-transfer',
data=json.dumps(params))
def get_position(self):
""" Get An overview of your margin profile.
Returns:
dict: Details about funding, accounts, and margin call.
"""
return self._send_message('get', '/position')
def close_position(self, repay_only):
""" Close position.
Args:
repay_only (bool): Undocumented by cbpro.
Returns:
Undocumented
"""
params = {'repay_only': repay_only}
return self._send_message('post', '/position/close',
data=json.dumps(params))
def deposit(self, amount, currency, payment_method_id):
""" Deposit funds from a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decmial): The amount to deposit.
currency (str): The type of currency.
payment_method_id (str): ID of the payment method.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/deposits/payment-method',
data=json.dumps(params))
def coinbase_deposit(self, amount, currency, coinbase_account_id):
""" Deposit funds from a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to deposit.
currency (str): The type of currency.
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/deposits/coinbase-account',
data=json.dumps(params))
def withdraw(self, amount, currency, payment_method_id):
""" Withdraw funds to a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decimal): The amount to withdraw.
currency (str): Currency type (eg. 'BTC')
payment_method_id (str): ID of the payment method.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/withdrawals/payment-method',
data=json.dumps(params))
def coinbase_withdraw(self, amount, currency, coinbase_account_id):
""" Withdraw funds to a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to withdraw.
currency (str): The type of currency (eg. 'BTC')
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/withdrawals/coinbase-account',
data=json.dumps(params))
def crypto_withdraw(self, amount, currency, crypto_address):
""" Withdraw funds to a crypto address.
Args:
amount (Decimal): The amount to withdraw
currency (str): The type of currency (eg. 'BTC')
crypto_address (str): Crypto address to withdraw to.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'crypto_address': crypto_address}
return self._send_message('post', '/withdrawals/crypto',
data=json.dumps(params))
def get_payment_methods(self):
""" Get a list of your payment methods.
Returns:
list: Payment method details.
"""
return self._send_message('get', '/payment-methods')
def get_coinbase_accounts(self):
""" Get a list of your coinbase accounts.
Returns:
list: Coinbase account details.
"""
return self._send_message('get', '/coinbase-accounts')
def create_report(self, report_type, start_date, end_date, product_id=None,
account_id=None, report_format='pdf', email=None):
""" Create report of historic information about your account.
The report will be generated when resources are available. Report status
can be queried via `get_report(report_id)`.
Args:
report_type (str): 'fills' or 'account'
start_date (str): Starting date for the report in ISO 8601
end_date (str): Ending date for the report in ISO 8601
product_id (Optional[str]): ID of the product to generate a fills
report for. Required if account_type is 'fills'
account_id (Optional[str]): ID of the account to generate an account
report for. Required if report_type is 'account'.
report_format (Optional[str]): 'pdf' or 'csv'. Default is 'pdf'.
email (Optional[str]): Email address to send the report to.
Returns:
dict: Report details. Example::
{
"id": "0428b97b-bec1-429e-a94c-59232926778d",
"type": "fills",
"status": "pending",
"created_at": "2015-01-06T10:34:47.000Z",
"completed_at": undefined,
"expires_at": "2015-01-13T10:35:47.000Z",
"file_url": undefined,
"params": {
"start_date": "2014-11-01T00:00:00.000Z",
"end_date": "2014-11-30T23:59:59.000Z"
}
}
"""
params = {'type': report_type,
'start_date': start_date,
'end_date': end_date,
'format': report_format}
if product_id is not None:
params['product_id'] = product_id
if account_id is not None:
params['account_id'] = account_id
if email is not None:
params['email'] = email
return self._send_message('post', '/reports',
data=json.dumps(params))
def get_report(self, report_id):
""" Get report status.
Use to query a specific report once it has been requested.
Args:
report_id (str): Report ID
Returns:
dict: Report details, including file url once it is created.
"""
return self._send_message('get', '/reports/' + report_id)
def get_trailing_volume(self):
""" Get your 30-day trailing volume for all products.
This is a cached value that's calculated every day at midnight UTC.
Returns:
list: 30-day trailing volumes. Example::
[
{
"product_id": "BTC-USD",
"exchange_volume": "11800.00000000",
"volume": "100.00000000",
"recorded_at": "1973-11-29T00:05:01.123456Z"
},
{
...
}
]
"""
return self._send_message('get', '/users/self/trailing-volume')
|
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.repay_funding | python | def repay_funding(self, amount, currency):
params = {
'amount': amount,
'currency': currency # example: USD
}
return self._send_message('post', '/funding/repay',
data=json.dumps(params)) | Repay funding. Repays the older funding records first.
Args:
amount (int): Amount of currency to repay
currency (str): The currency, example USD
Returns:
Not specified by cbpro. | train | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L681-L697 | [
"def _send_message(self, method, endpoint, params=None, data=None):\n \"\"\"Send API request.\n\n Args:\n method (str): HTTP method (get, post, delete, etc.)\n endpoint (str): Endpoint (to be added to base URL)\n params (Optional[dict]): HTTP request parameters\n data (Optional[str... | class AuthenticatedClient(PublicClient):
""" Provides access to Private Endpoints on the cbpro API.
All requests default to the live `api_url`: 'https://api.pro.coinbase.com'.
To test your application using the sandbox modify the `api_url`.
Attributes:
url (str): The api url for this client instance to use.
auth (CBProAuth): Custom authentication handler for each request.
session (requests.Session): Persistent HTTP connection object.
"""
def __init__(self, key, b64secret, passphrase,
api_url="https://api.pro.coinbase.com"):
""" Create an instance of the AuthenticatedClient class.
Args:
key (str): Your API key.
b64secret (str): The secret key matching your API key.
passphrase (str): Passphrase chosen when setting up key.
api_url (Optional[str]): API URL. Defaults to cbpro API.
"""
super(AuthenticatedClient, self).__init__(api_url)
self.auth = CBProAuth(key, b64secret, passphrase)
self.session = requests.Session()
def get_account(self, account_id):
""" Get information for a single account.
Use this endpoint when you know the account_id.
Args:
account_id (str): Account id for account you want to get.
Returns:
dict: Account information. Example::
{
"id": "a1b2c3d4",
"balance": "1.100",
"holds": "0.100",
"available": "1.00",
"currency": "USD"
}
"""
return self._send_message('get', '/accounts/' + account_id)
def get_accounts(self):
""" Get a list of trading all accounts.
When you place an order, the funds for the order are placed on
hold. They cannot be used for other orders or withdrawn. Funds
will remain on hold until the order is filled or canceled. The
funds on hold for each account will be specified.
Returns:
list: Info about all accounts. Example::
[
{
"id": "71452118-efc7-4cc4-8780-a5e22d4baa53",
"currency": "BTC",
"balance": "0.0000000000000000",
"available": "0.0000000000000000",
"hold": "0.0000000000000000",
"profile_id": "75da88c5-05bf-4f54-bc85-5c775bd68254"
},
{
...
}
]
* Additional info included in response for margin accounts.
"""
return self.get_account('')
def get_account_history(self, account_id, **kwargs):
""" List account activity. Account activity either increases or
decreases your account balance.
Entry type indicates the reason for the account change.
* transfer: Funds moved to/from Coinbase to cbpro
* match: Funds moved as a result of a trade
* fee: Fee as a result of a trade
* rebate: Fee rebate as per our fee schedule
If an entry is the result of a trade (match, fee), the details
field will contain additional information about the trade.
Args:
account_id (str): Account id to get history of.
kwargs (dict): Additional HTTP request parameters.
Returns:
list: History information for the account. Example::
[
{
"id": "100",
"created_at": "2014-11-07T08:19:27.028459Z",
"amount": "0.001",
"balance": "239.669",
"type": "fee",
"details": {
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"trade_id": "74",
"product_id": "BTC-USD"
}
},
{
...
}
]
"""
endpoint = '/accounts/{}/ledger'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def get_account_holds(self, account_id, **kwargs):
""" Get holds on an account.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Holds are placed on an account for active orders or
pending withdraw requests.
As an order is filled, the hold amount is updated. If an order
is canceled, any remaining hold is removed. For a withdraw, once
it is completed, the hold is removed.
The `type` field will indicate why the hold exists. The hold
type is 'order' for holds related to open orders and 'transfer'
for holds related to a withdraw.
The `ref` field contains the id of the order or transfer which
created the hold.
Args:
account_id (str): Account id to get holds of.
kwargs (dict): Additional HTTP request parameters.
Returns:
generator(list): Hold information for the account. Example::
[
{
"id": "82dcd140-c3c7-4507-8de4-2c529cd1a28f",
"account_id": "e0b3f39a-183d-453e-b754-0c13e5bab0b3",
"created_at": "2014-11-06T10:34:47.123456Z",
"updated_at": "2014-11-06T10:40:47.123456Z",
"amount": "4.23",
"type": "order",
"ref": "0a205de4-dd35-4370-a285-fe8fc375a273",
},
{
...
}
]
"""
endpoint = '/accounts/{}/holds'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def place_order(self, product_id, side, order_type, **kwargs):
""" Place an order.
The three order types (limit, market, and stop) can be placed using this
method. Specific methods are provided for each order type, but if a
more generic interface is desired this method is available.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
order_type (str): Order type ('limit', 'market', or 'stop')
**client_oid (str): Order ID selected by you to identify your order.
This should be a UUID, which will be broadcast in the public
feed for `received` messages.
**stp (str): Self-trade prevention flag. cbpro doesn't allow self-
trading. This behavior can be modified with this flag.
Options:
'dc' Decrease and Cancel (default)
'co' Cancel oldest
'cn' Cancel newest
'cb' Cancel both
**overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
**funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
**kwargs: Additional arguments can be specified for different order
types. See the limit/market/stop order methods for details.
Returns:
dict: Order details. Example::
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "pending",
"settled": false
}
"""
# Margin parameter checks
if kwargs.get('overdraft_enabled') is not None and \
kwargs.get('funding_amount') is not None:
raise ValueError('Margin funding must be specified through use of '
'overdraft or by setting a funding amount, but not'
' both')
# Limit order checks
if order_type == 'limit':
if kwargs.get('cancel_after') is not None and \
kwargs.get('time_in_force') != 'GTT':
raise ValueError('May only specify a cancel period when time '
'in_force is `GTT`')
if kwargs.get('post_only') is not None and kwargs.get('time_in_force') in \
['IOC', 'FOK']:
raise ValueError('post_only is invalid when time in force is '
'`IOC` or `FOK`')
# Market and stop order checks
if order_type == 'market' or order_type == 'stop':
if not (kwargs.get('size') is None) ^ (kwargs.get('funds') is None):
raise ValueError('Either `size` or `funds` must be specified '
'for market/stop orders (but not both).')
# Build params dict
params = {'product_id': product_id,
'side': side,
'type': order_type}
params.update(kwargs)
return self._send_message('post', '/orders', data=json.dumps(params))
def buy(self, product_id, order_type, **kwargs):
"""Place a buy order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'buy', order_type, **kwargs)
def sell(self, product_id, order_type, **kwargs):
"""Place a sell order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'sell', order_type, **kwargs)
def place_limit_order(self, product_id, side, price, size,
client_oid=None,
stp=None,
time_in_force=None,
cancel_after=None,
post_only=None,
overdraft_enabled=None,
funding_amount=None):
"""Place a limit order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Price per cryptocurrency
size (Decimal): Amount of cryptocurrency to buy or sell
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
time_in_force (Optional[str]): Time in force. Options:
'GTC' Good till canceled
'GTT' Good till time (set by `cancel_after`)
'IOC' Immediate or cancel
'FOK' Fill or kill
cancel_after (Optional[str]): Cancel after this period for 'GTT'
orders. Options are 'min', 'hour', or 'day'.
post_only (Optional[bool]): Indicates that the order should only
make liquidity. If any part of the order results in taking
liquidity, the order will be rejected and no part of it will
execute.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'limit',
'price': price,
'size': size,
'client_oid': client_oid,
'stp': stp,
'time_in_force': time_in_force,
'cancel_after': cancel_after,
'post_only': post_only,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_market_order(self, product_id, side, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place market order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'market',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_stop_order(self, product_id, side, price, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place stop order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Desired price at which the stop order triggers.
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'price': price,
'order_type': 'stop',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def cancel_order(self, order_id):
""" Cancel a previously placed order.
If the order had no matches during its lifetime its record may
be purged. This means the order details will not be available
with get_order(order_id). If the order could not be canceled
(already filled or previously canceled, etc), then an error
response will indicate the reason in the message field.
**Caution**: The order id is the server-assigned order id and
not the optional client_oid.
Args:
order_id (str): The order_id of the order you want to cancel
Returns:
list: Containing the order_id of cancelled order. Example::
[ "c5ab5eae-76be-480e-8961-00792dc7e138" ]
"""
return self._send_message('delete', '/orders/' + order_id)
def cancel_all(self, product_id=None):
""" With best effort, cancel all open orders.
Args:
product_id (Optional[str]): Only cancel orders for this
product_id
Returns:
list: A list of ids of the canceled orders. Example::
[
"144c6f8e-713f-4682-8435-5280fbe8b2b4",
"debe4907-95dc-442f-af3b-cec12f42ebda",
"cf7aceee-7b08-4227-a76c-3858144323ab",
"dfc5ae27-cadb-4c0c-beef-8994936fde8a",
"34fecfbf-de33-4273-b2c6-baf8e8948be4"
]
"""
if product_id is not None:
params = {'product_id': product_id}
else:
params = None
return self._send_message('delete', '/orders', params=params)
def get_order(self, order_id):
""" Get a single order by order id.
If the order is canceled the response may have status code 404
if the order had no matches.
**Caution**: Open orders may change state between the request
and the response depending on market conditions.
Args:
order_id (str): The order to get information of.
Returns:
dict: Containing information on order. Example::
{
"created_at": "2017-06-18T00:27:42.920136Z",
"executed_value": "0.0000000000000000",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"id": "9456f388-67a9-4316-bad1-330c5353804f",
"post_only": true,
"price": "1.00000000",
"product_id": "BTC-USD",
"settled": false,
"side": "buy",
"size": "1.00000000",
"status": "pending",
"stp": "dc",
"time_in_force": "GTC",
"type": "limit"
}
"""
return self._send_message('get', '/orders/' + order_id)
def get_orders(self, product_id=None, status=None, **kwargs):
""" List your current open orders.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Only open or un-settled orders are returned. As soon as an
order is no longer open and settled, it will no longer appear
in the default request.
Orders which are no longer resting on the order book, will be
marked with the 'done' status. There is a small window between
an order being 'done' and 'settled'. An order is 'settled' when
all of the fills have settled and the remaining holds (if any)
have been removed.
For high-volume trading it is strongly recommended that you
maintain your own list of open orders and use one of the
streaming market data feeds to keep it updated. You should poll
the open orders endpoint once when you start trading to obtain
the current state of any open orders.
Args:
product_id (Optional[str]): Only list orders for this
product
status (Optional[list/str]): Limit list of orders to
this status or statuses. Passing 'all' returns orders
of all statuses.
** Options: 'open', 'pending', 'active', 'done',
'settled'
** default: ['open', 'pending', 'active']
Returns:
list: Containing information on orders. Example::
[
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "open",
"settled": false
},
{
...
}
]
"""
params = kwargs
if product_id is not None:
params['product_id'] = product_id
if status is not None:
params['status'] = status
return self._send_paginated_message('/orders', params=params)
def get_fills(self, product_id=None, order_id=None, **kwargs):
""" Get a list of recent fills.
As of 8/23/18 - Requests without either order_id or product_id
will be rejected
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Fees are recorded in two stages. Immediately after the matching
engine completes a match, the fill is inserted into our
datastore. Once the fill is recorded, a settlement process will
settle the fill and credit both trading counterparties.
The 'fee' field indicates the fees charged for this fill.
The 'liquidity' field indicates if the fill was the result of a
liquidity provider or liquidity taker. M indicates Maker and T
indicates Taker.
Args:
product_id (str): Limit list to this product_id
order_id (str): Limit list to this order_id
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on fills. Example::
[
{
"trade_id": 74,
"product_id": "BTC-USD",
"price": "10.00",
"size": "0.01",
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"created_at": "2014-11-07T22:19:28.578544Z",
"liquidity": "T",
"fee": "0.00025",
"settled": true,
"side": "buy"
},
{
...
}
]
"""
if (product_id is None) and (order_id is None):
raise ValueError('Either product_id or order_id must be specified.')
params = {}
if product_id:
params['product_id'] = product_id
if order_id:
params['order_id'] = order_id
params.update(kwargs)
return self._send_paginated_message('/fills', params=params)
def get_fundings(self, status=None, **kwargs):
""" Every order placed with a margin profile that draws funding
will create a funding record.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
status (list/str): Limit funding records to these statuses.
** Options: 'outstanding', 'settled', 'rejected'
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on margin funding. Example::
[
{
"id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"order_id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6",
"amount": "1057.6519956381537500",
"status": "settled",
"created_at": "2017-03-17T23:46:16.663397Z",
"currency": "USD",
"repaid_amount": "1057.6519956381537500",
"default_amount": "0",
"repaid_default": false
},
{
...
}
]
"""
params = {}
if status is not None:
params['status'] = status
params.update(kwargs)
return self._send_paginated_message('/funding', params=params)
def margin_transfer(self, margin_profile_id, transfer_type, currency,
amount):
""" Transfer funds between your standard profile and a margin profile.
Args:
margin_profile_id (str): Margin profile ID to withdraw or deposit
from.
transfer_type (str): 'deposit' or 'withdraw'
currency (str): Currency to transfer (eg. 'USD')
amount (Decimal): Amount to transfer
Returns:
dict: Transfer details. Example::
{
"created_at": "2017-01-25T19:06:23.415126Z",
"id": "80bc6b74-8b1f-4c60-a089-c61f9810d4ab",
"user_id": "521c20b3d4ab09621f000011",
"profile_id": "cda95996-ac59-45a3-a42e-30daeb061867",
"margin_profile_id": "45fa9e3b-00ba-4631-b907-8a98cbdf21be",
"type": "deposit",
"amount": "2",
"currency": "USD",
"account_id": "23035fc7-0707-4b59-b0d2-95d0c035f8f5",
"margin_account_id": "e1d9862c-a259-4e83-96cd-376352a9d24d",
"margin_product_id": "BTC-USD",
"status": "completed",
"nonce": 25
}
"""
params = {'margin_profile_id': margin_profile_id,
'type': transfer_type,
'currency': currency, # example: USD
'amount': amount}
return self._send_message('post', '/profiles/margin-transfer',
data=json.dumps(params))
def get_position(self):
""" Get An overview of your margin profile.
Returns:
dict: Details about funding, accounts, and margin call.
"""
return self._send_message('get', '/position')
def close_position(self, repay_only):
""" Close position.
Args:
repay_only (bool): Undocumented by cbpro.
Returns:
Undocumented
"""
params = {'repay_only': repay_only}
return self._send_message('post', '/position/close',
data=json.dumps(params))
def deposit(self, amount, currency, payment_method_id):
""" Deposit funds from a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decmial): The amount to deposit.
currency (str): The type of currency.
payment_method_id (str): ID of the payment method.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/deposits/payment-method',
data=json.dumps(params))
def coinbase_deposit(self, amount, currency, coinbase_account_id):
""" Deposit funds from a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to deposit.
currency (str): The type of currency.
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/deposits/coinbase-account',
data=json.dumps(params))
def withdraw(self, amount, currency, payment_method_id):
""" Withdraw funds to a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decimal): The amount to withdraw.
currency (str): Currency type (eg. 'BTC')
payment_method_id (str): ID of the payment method.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/withdrawals/payment-method',
data=json.dumps(params))
def coinbase_withdraw(self, amount, currency, coinbase_account_id):
""" Withdraw funds to a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to withdraw.
currency (str): The type of currency (eg. 'BTC')
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/withdrawals/coinbase-account',
data=json.dumps(params))
def crypto_withdraw(self, amount, currency, crypto_address):
""" Withdraw funds to a crypto address.
Args:
amount (Decimal): The amount to withdraw
currency (str): The type of currency (eg. 'BTC')
crypto_address (str): Crypto address to withdraw to.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'crypto_address': crypto_address}
return self._send_message('post', '/withdrawals/crypto',
data=json.dumps(params))
def get_payment_methods(self):
""" Get a list of your payment methods.
Returns:
list: Payment method details.
"""
return self._send_message('get', '/payment-methods')
def get_coinbase_accounts(self):
""" Get a list of your coinbase accounts.
Returns:
list: Coinbase account details.
"""
return self._send_message('get', '/coinbase-accounts')
def create_report(self, report_type, start_date, end_date, product_id=None,
account_id=None, report_format='pdf', email=None):
""" Create report of historic information about your account.
The report will be generated when resources are available. Report status
can be queried via `get_report(report_id)`.
Args:
report_type (str): 'fills' or 'account'
start_date (str): Starting date for the report in ISO 8601
end_date (str): Ending date for the report in ISO 8601
product_id (Optional[str]): ID of the product to generate a fills
report for. Required if account_type is 'fills'
account_id (Optional[str]): ID of the account to generate an account
report for. Required if report_type is 'account'.
report_format (Optional[str]): 'pdf' or 'csv'. Default is 'pdf'.
email (Optional[str]): Email address to send the report to.
Returns:
dict: Report details. Example::
{
"id": "0428b97b-bec1-429e-a94c-59232926778d",
"type": "fills",
"status": "pending",
"created_at": "2015-01-06T10:34:47.000Z",
"completed_at": undefined,
"expires_at": "2015-01-13T10:35:47.000Z",
"file_url": undefined,
"params": {
"start_date": "2014-11-01T00:00:00.000Z",
"end_date": "2014-11-30T23:59:59.000Z"
}
}
"""
params = {'type': report_type,
'start_date': start_date,
'end_date': end_date,
'format': report_format}
if product_id is not None:
params['product_id'] = product_id
if account_id is not None:
params['account_id'] = account_id
if email is not None:
params['email'] = email
return self._send_message('post', '/reports',
data=json.dumps(params))
def get_report(self, report_id):
""" Get report status.
Use to query a specific report once it has been requested.
Args:
report_id (str): Report ID
Returns:
dict: Report details, including file url once it is created.
"""
return self._send_message('get', '/reports/' + report_id)
def get_trailing_volume(self):
""" Get your 30-day trailing volume for all products.
This is a cached value that's calculated every day at midnight UTC.
Returns:
list: 30-day trailing volumes. Example::
[
{
"product_id": "BTC-USD",
"exchange_volume": "11800.00000000",
"volume": "100.00000000",
"recorded_at": "1973-11-29T00:05:01.123456Z"
},
{
...
}
]
"""
return self._send_message('get', '/users/self/trailing-volume')
|
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.margin_transfer | python | def margin_transfer(self, margin_profile_id, transfer_type, currency,
amount):
params = {'margin_profile_id': margin_profile_id,
'type': transfer_type,
'currency': currency, # example: USD
'amount': amount}
return self._send_message('post', '/profiles/margin-transfer',
data=json.dumps(params)) | Transfer funds between your standard profile and a margin profile.
Args:
margin_profile_id (str): Margin profile ID to withdraw or deposit
from.
transfer_type (str): 'deposit' or 'withdraw'
currency (str): Currency to transfer (eg. 'USD')
amount (Decimal): Amount to transfer
Returns:
dict: Transfer details. Example::
{
"created_at": "2017-01-25T19:06:23.415126Z",
"id": "80bc6b74-8b1f-4c60-a089-c61f9810d4ab",
"user_id": "521c20b3d4ab09621f000011",
"profile_id": "cda95996-ac59-45a3-a42e-30daeb061867",
"margin_profile_id": "45fa9e3b-00ba-4631-b907-8a98cbdf21be",
"type": "deposit",
"amount": "2",
"currency": "USD",
"account_id": "23035fc7-0707-4b59-b0d2-95d0c035f8f5",
"margin_account_id": "e1d9862c-a259-4e83-96cd-376352a9d24d",
"margin_product_id": "BTC-USD",
"status": "completed",
"nonce": 25
} | train | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L699-L734 | [
"def _send_message(self, method, endpoint, params=None, data=None):\n \"\"\"Send API request.\n\n Args:\n method (str): HTTP method (get, post, delete, etc.)\n endpoint (str): Endpoint (to be added to base URL)\n params (Optional[dict]): HTTP request parameters\n data (Optional[str... | class AuthenticatedClient(PublicClient):
""" Provides access to Private Endpoints on the cbpro API.
All requests default to the live `api_url`: 'https://api.pro.coinbase.com'.
To test your application using the sandbox modify the `api_url`.
Attributes:
url (str): The api url for this client instance to use.
auth (CBProAuth): Custom authentication handler for each request.
session (requests.Session): Persistent HTTP connection object.
"""
def __init__(self, key, b64secret, passphrase,
api_url="https://api.pro.coinbase.com"):
""" Create an instance of the AuthenticatedClient class.
Args:
key (str): Your API key.
b64secret (str): The secret key matching your API key.
passphrase (str): Passphrase chosen when setting up key.
api_url (Optional[str]): API URL. Defaults to cbpro API.
"""
super(AuthenticatedClient, self).__init__(api_url)
self.auth = CBProAuth(key, b64secret, passphrase)
self.session = requests.Session()
def get_account(self, account_id):
""" Get information for a single account.
Use this endpoint when you know the account_id.
Args:
account_id (str): Account id for account you want to get.
Returns:
dict: Account information. Example::
{
"id": "a1b2c3d4",
"balance": "1.100",
"holds": "0.100",
"available": "1.00",
"currency": "USD"
}
"""
return self._send_message('get', '/accounts/' + account_id)
def get_accounts(self):
""" Get a list of trading all accounts.
When you place an order, the funds for the order are placed on
hold. They cannot be used for other orders or withdrawn. Funds
will remain on hold until the order is filled or canceled. The
funds on hold for each account will be specified.
Returns:
list: Info about all accounts. Example::
[
{
"id": "71452118-efc7-4cc4-8780-a5e22d4baa53",
"currency": "BTC",
"balance": "0.0000000000000000",
"available": "0.0000000000000000",
"hold": "0.0000000000000000",
"profile_id": "75da88c5-05bf-4f54-bc85-5c775bd68254"
},
{
...
}
]
* Additional info included in response for margin accounts.
"""
return self.get_account('')
def get_account_history(self, account_id, **kwargs):
""" List account activity. Account activity either increases or
decreases your account balance.
Entry type indicates the reason for the account change.
* transfer: Funds moved to/from Coinbase to cbpro
* match: Funds moved as a result of a trade
* fee: Fee as a result of a trade
* rebate: Fee rebate as per our fee schedule
If an entry is the result of a trade (match, fee), the details
field will contain additional information about the trade.
Args:
account_id (str): Account id to get history of.
kwargs (dict): Additional HTTP request parameters.
Returns:
list: History information for the account. Example::
[
{
"id": "100",
"created_at": "2014-11-07T08:19:27.028459Z",
"amount": "0.001",
"balance": "239.669",
"type": "fee",
"details": {
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"trade_id": "74",
"product_id": "BTC-USD"
}
},
{
...
}
]
"""
endpoint = '/accounts/{}/ledger'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def get_account_holds(self, account_id, **kwargs):
""" Get holds on an account.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Holds are placed on an account for active orders or
pending withdraw requests.
As an order is filled, the hold amount is updated. If an order
is canceled, any remaining hold is removed. For a withdraw, once
it is completed, the hold is removed.
The `type` field will indicate why the hold exists. The hold
type is 'order' for holds related to open orders and 'transfer'
for holds related to a withdraw.
The `ref` field contains the id of the order or transfer which
created the hold.
Args:
account_id (str): Account id to get holds of.
kwargs (dict): Additional HTTP request parameters.
Returns:
generator(list): Hold information for the account. Example::
[
{
"id": "82dcd140-c3c7-4507-8de4-2c529cd1a28f",
"account_id": "e0b3f39a-183d-453e-b754-0c13e5bab0b3",
"created_at": "2014-11-06T10:34:47.123456Z",
"updated_at": "2014-11-06T10:40:47.123456Z",
"amount": "4.23",
"type": "order",
"ref": "0a205de4-dd35-4370-a285-fe8fc375a273",
},
{
...
}
]
"""
endpoint = '/accounts/{}/holds'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def place_order(self, product_id, side, order_type, **kwargs):
""" Place an order.
The three order types (limit, market, and stop) can be placed using this
method. Specific methods are provided for each order type, but if a
more generic interface is desired this method is available.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
order_type (str): Order type ('limit', 'market', or 'stop')
**client_oid (str): Order ID selected by you to identify your order.
This should be a UUID, which will be broadcast in the public
feed for `received` messages.
**stp (str): Self-trade prevention flag. cbpro doesn't allow self-
trading. This behavior can be modified with this flag.
Options:
'dc' Decrease and Cancel (default)
'co' Cancel oldest
'cn' Cancel newest
'cb' Cancel both
**overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
**funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
**kwargs: Additional arguments can be specified for different order
types. See the limit/market/stop order methods for details.
Returns:
dict: Order details. Example::
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "pending",
"settled": false
}
"""
# Margin parameter checks
if kwargs.get('overdraft_enabled') is not None and \
kwargs.get('funding_amount') is not None:
raise ValueError('Margin funding must be specified through use of '
'overdraft or by setting a funding amount, but not'
' both')
# Limit order checks
if order_type == 'limit':
if kwargs.get('cancel_after') is not None and \
kwargs.get('time_in_force') != 'GTT':
raise ValueError('May only specify a cancel period when time '
'in_force is `GTT`')
if kwargs.get('post_only') is not None and kwargs.get('time_in_force') in \
['IOC', 'FOK']:
raise ValueError('post_only is invalid when time in force is '
'`IOC` or `FOK`')
# Market and stop order checks
if order_type == 'market' or order_type == 'stop':
if not (kwargs.get('size') is None) ^ (kwargs.get('funds') is None):
raise ValueError('Either `size` or `funds` must be specified '
'for market/stop orders (but not both).')
# Build params dict
params = {'product_id': product_id,
'side': side,
'type': order_type}
params.update(kwargs)
return self._send_message('post', '/orders', data=json.dumps(params))
def buy(self, product_id, order_type, **kwargs):
"""Place a buy order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'buy', order_type, **kwargs)
def sell(self, product_id, order_type, **kwargs):
"""Place a sell order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'sell', order_type, **kwargs)
def place_limit_order(self, product_id, side, price, size,
client_oid=None,
stp=None,
time_in_force=None,
cancel_after=None,
post_only=None,
overdraft_enabled=None,
funding_amount=None):
"""Place a limit order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Price per cryptocurrency
size (Decimal): Amount of cryptocurrency to buy or sell
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
time_in_force (Optional[str]): Time in force. Options:
'GTC' Good till canceled
'GTT' Good till time (set by `cancel_after`)
'IOC' Immediate or cancel
'FOK' Fill or kill
cancel_after (Optional[str]): Cancel after this period for 'GTT'
orders. Options are 'min', 'hour', or 'day'.
post_only (Optional[bool]): Indicates that the order should only
make liquidity. If any part of the order results in taking
liquidity, the order will be rejected and no part of it will
execute.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'limit',
'price': price,
'size': size,
'client_oid': client_oid,
'stp': stp,
'time_in_force': time_in_force,
'cancel_after': cancel_after,
'post_only': post_only,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_market_order(self, product_id, side, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place market order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'market',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_stop_order(self, product_id, side, price, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place stop order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Desired price at which the stop order triggers.
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'price': price,
'order_type': 'stop',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def cancel_order(self, order_id):
""" Cancel a previously placed order.
If the order had no matches during its lifetime its record may
be purged. This means the order details will not be available
with get_order(order_id). If the order could not be canceled
(already filled or previously canceled, etc), then an error
response will indicate the reason in the message field.
**Caution**: The order id is the server-assigned order id and
not the optional client_oid.
Args:
order_id (str): The order_id of the order you want to cancel
Returns:
list: Containing the order_id of cancelled order. Example::
[ "c5ab5eae-76be-480e-8961-00792dc7e138" ]
"""
return self._send_message('delete', '/orders/' + order_id)
def cancel_all(self, product_id=None):
""" With best effort, cancel all open orders.
Args:
product_id (Optional[str]): Only cancel orders for this
product_id
Returns:
list: A list of ids of the canceled orders. Example::
[
"144c6f8e-713f-4682-8435-5280fbe8b2b4",
"debe4907-95dc-442f-af3b-cec12f42ebda",
"cf7aceee-7b08-4227-a76c-3858144323ab",
"dfc5ae27-cadb-4c0c-beef-8994936fde8a",
"34fecfbf-de33-4273-b2c6-baf8e8948be4"
]
"""
if product_id is not None:
params = {'product_id': product_id}
else:
params = None
return self._send_message('delete', '/orders', params=params)
def get_order(self, order_id):
""" Get a single order by order id.
If the order is canceled the response may have status code 404
if the order had no matches.
**Caution**: Open orders may change state between the request
and the response depending on market conditions.
Args:
order_id (str): The order to get information of.
Returns:
dict: Containing information on order. Example::
{
"created_at": "2017-06-18T00:27:42.920136Z",
"executed_value": "0.0000000000000000",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"id": "9456f388-67a9-4316-bad1-330c5353804f",
"post_only": true,
"price": "1.00000000",
"product_id": "BTC-USD",
"settled": false,
"side": "buy",
"size": "1.00000000",
"status": "pending",
"stp": "dc",
"time_in_force": "GTC",
"type": "limit"
}
"""
return self._send_message('get', '/orders/' + order_id)
def get_orders(self, product_id=None, status=None, **kwargs):
""" List your current open orders.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Only open or un-settled orders are returned. As soon as an
order is no longer open and settled, it will no longer appear
in the default request.
Orders which are no longer resting on the order book, will be
marked with the 'done' status. There is a small window between
an order being 'done' and 'settled'. An order is 'settled' when
all of the fills have settled and the remaining holds (if any)
have been removed.
For high-volume trading it is strongly recommended that you
maintain your own list of open orders and use one of the
streaming market data feeds to keep it updated. You should poll
the open orders endpoint once when you start trading to obtain
the current state of any open orders.
Args:
product_id (Optional[str]): Only list orders for this
product
status (Optional[list/str]): Limit list of orders to
this status or statuses. Passing 'all' returns orders
of all statuses.
** Options: 'open', 'pending', 'active', 'done',
'settled'
** default: ['open', 'pending', 'active']
Returns:
list: Containing information on orders. Example::
[
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "open",
"settled": false
},
{
...
}
]
"""
params = kwargs
if product_id is not None:
params['product_id'] = product_id
if status is not None:
params['status'] = status
return self._send_paginated_message('/orders', params=params)
def get_fills(self, product_id=None, order_id=None, **kwargs):
""" Get a list of recent fills.
As of 8/23/18 - Requests without either order_id or product_id
will be rejected
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Fees are recorded in two stages. Immediately after the matching
engine completes a match, the fill is inserted into our
datastore. Once the fill is recorded, a settlement process will
settle the fill and credit both trading counterparties.
The 'fee' field indicates the fees charged for this fill.
The 'liquidity' field indicates if the fill was the result of a
liquidity provider or liquidity taker. M indicates Maker and T
indicates Taker.
Args:
product_id (str): Limit list to this product_id
order_id (str): Limit list to this order_id
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on fills. Example::
[
{
"trade_id": 74,
"product_id": "BTC-USD",
"price": "10.00",
"size": "0.01",
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"created_at": "2014-11-07T22:19:28.578544Z",
"liquidity": "T",
"fee": "0.00025",
"settled": true,
"side": "buy"
},
{
...
}
]
"""
if (product_id is None) and (order_id is None):
raise ValueError('Either product_id or order_id must be specified.')
params = {}
if product_id:
params['product_id'] = product_id
if order_id:
params['order_id'] = order_id
params.update(kwargs)
return self._send_paginated_message('/fills', params=params)
def get_fundings(self, status=None, **kwargs):
""" Every order placed with a margin profile that draws funding
will create a funding record.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
status (list/str): Limit funding records to these statuses.
** Options: 'outstanding', 'settled', 'rejected'
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on margin funding. Example::
[
{
"id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"order_id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6",
"amount": "1057.6519956381537500",
"status": "settled",
"created_at": "2017-03-17T23:46:16.663397Z",
"currency": "USD",
"repaid_amount": "1057.6519956381537500",
"default_amount": "0",
"repaid_default": false
},
{
...
}
]
"""
params = {}
if status is not None:
params['status'] = status
params.update(kwargs)
return self._send_paginated_message('/funding', params=params)
def repay_funding(self, amount, currency):
""" Repay funding. Repays the older funding records first.
Args:
amount (int): Amount of currency to repay
currency (str): The currency, example USD
Returns:
Not specified by cbpro.
"""
params = {
'amount': amount,
'currency': currency # example: USD
}
return self._send_message('post', '/funding/repay',
data=json.dumps(params))
def get_position(self):
""" Get An overview of your margin profile.
Returns:
dict: Details about funding, accounts, and margin call.
"""
return self._send_message('get', '/position')
def close_position(self, repay_only):
""" Close position.
Args:
repay_only (bool): Undocumented by cbpro.
Returns:
Undocumented
"""
params = {'repay_only': repay_only}
return self._send_message('post', '/position/close',
data=json.dumps(params))
def deposit(self, amount, currency, payment_method_id):
""" Deposit funds from a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decmial): The amount to deposit.
currency (str): The type of currency.
payment_method_id (str): ID of the payment method.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/deposits/payment-method',
data=json.dumps(params))
def coinbase_deposit(self, amount, currency, coinbase_account_id):
""" Deposit funds from a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to deposit.
currency (str): The type of currency.
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/deposits/coinbase-account',
data=json.dumps(params))
def withdraw(self, amount, currency, payment_method_id):
""" Withdraw funds to a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decimal): The amount to withdraw.
currency (str): Currency type (eg. 'BTC')
payment_method_id (str): ID of the payment method.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/withdrawals/payment-method',
data=json.dumps(params))
def coinbase_withdraw(self, amount, currency, coinbase_account_id):
""" Withdraw funds to a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to withdraw.
currency (str): The type of currency (eg. 'BTC')
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/withdrawals/coinbase-account',
data=json.dumps(params))
def crypto_withdraw(self, amount, currency, crypto_address):
""" Withdraw funds to a crypto address.
Args:
amount (Decimal): The amount to withdraw
currency (str): The type of currency (eg. 'BTC')
crypto_address (str): Crypto address to withdraw to.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'crypto_address': crypto_address}
return self._send_message('post', '/withdrawals/crypto',
data=json.dumps(params))
def get_payment_methods(self):
""" Get a list of your payment methods.
Returns:
list: Payment method details.
"""
return self._send_message('get', '/payment-methods')
def get_coinbase_accounts(self):
""" Get a list of your coinbase accounts.
Returns:
list: Coinbase account details.
"""
return self._send_message('get', '/coinbase-accounts')
def create_report(self, report_type, start_date, end_date, product_id=None,
account_id=None, report_format='pdf', email=None):
""" Create report of historic information about your account.
The report will be generated when resources are available. Report status
can be queried via `get_report(report_id)`.
Args:
report_type (str): 'fills' or 'account'
start_date (str): Starting date for the report in ISO 8601
end_date (str): Ending date for the report in ISO 8601
product_id (Optional[str]): ID of the product to generate a fills
report for. Required if account_type is 'fills'
account_id (Optional[str]): ID of the account to generate an account
report for. Required if report_type is 'account'.
report_format (Optional[str]): 'pdf' or 'csv'. Default is 'pdf'.
email (Optional[str]): Email address to send the report to.
Returns:
dict: Report details. Example::
{
"id": "0428b97b-bec1-429e-a94c-59232926778d",
"type": "fills",
"status": "pending",
"created_at": "2015-01-06T10:34:47.000Z",
"completed_at": undefined,
"expires_at": "2015-01-13T10:35:47.000Z",
"file_url": undefined,
"params": {
"start_date": "2014-11-01T00:00:00.000Z",
"end_date": "2014-11-30T23:59:59.000Z"
}
}
"""
params = {'type': report_type,
'start_date': start_date,
'end_date': end_date,
'format': report_format}
if product_id is not None:
params['product_id'] = product_id
if account_id is not None:
params['account_id'] = account_id
if email is not None:
params['email'] = email
return self._send_message('post', '/reports',
data=json.dumps(params))
def get_report(self, report_id):
""" Get report status.
Use to query a specific report once it has been requested.
Args:
report_id (str): Report ID
Returns:
dict: Report details, including file url once it is created.
"""
return self._send_message('get', '/reports/' + report_id)
def get_trailing_volume(self):
""" Get your 30-day trailing volume for all products.
This is a cached value that's calculated every day at midnight UTC.
Returns:
list: 30-day trailing volumes. Example::
[
{
"product_id": "BTC-USD",
"exchange_volume": "11800.00000000",
"volume": "100.00000000",
"recorded_at": "1973-11-29T00:05:01.123456Z"
},
{
...
}
]
"""
return self._send_message('get', '/users/self/trailing-volume')
|
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.close_position | python | def close_position(self, repay_only):
params = {'repay_only': repay_only}
return self._send_message('post', '/position/close',
data=json.dumps(params)) | Close position.
Args:
repay_only (bool): Undocumented by cbpro.
Returns:
Undocumented | train | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L745-L757 | [
"def _send_message(self, method, endpoint, params=None, data=None):\n \"\"\"Send API request.\n\n Args:\n method (str): HTTP method (get, post, delete, etc.)\n endpoint (str): Endpoint (to be added to base URL)\n params (Optional[dict]): HTTP request parameters\n data (Optional[str... | class AuthenticatedClient(PublicClient):
""" Provides access to Private Endpoints on the cbpro API.
All requests default to the live `api_url`: 'https://api.pro.coinbase.com'.
To test your application using the sandbox modify the `api_url`.
Attributes:
url (str): The api url for this client instance to use.
auth (CBProAuth): Custom authentication handler for each request.
session (requests.Session): Persistent HTTP connection object.
"""
def __init__(self, key, b64secret, passphrase,
api_url="https://api.pro.coinbase.com"):
""" Create an instance of the AuthenticatedClient class.
Args:
key (str): Your API key.
b64secret (str): The secret key matching your API key.
passphrase (str): Passphrase chosen when setting up key.
api_url (Optional[str]): API URL. Defaults to cbpro API.
"""
super(AuthenticatedClient, self).__init__(api_url)
self.auth = CBProAuth(key, b64secret, passphrase)
self.session = requests.Session()
def get_account(self, account_id):
""" Get information for a single account.
Use this endpoint when you know the account_id.
Args:
account_id (str): Account id for account you want to get.
Returns:
dict: Account information. Example::
{
"id": "a1b2c3d4",
"balance": "1.100",
"holds": "0.100",
"available": "1.00",
"currency": "USD"
}
"""
return self._send_message('get', '/accounts/' + account_id)
def get_accounts(self):
""" Get a list of trading all accounts.
When you place an order, the funds for the order are placed on
hold. They cannot be used for other orders or withdrawn. Funds
will remain on hold until the order is filled or canceled. The
funds on hold for each account will be specified.
Returns:
list: Info about all accounts. Example::
[
{
"id": "71452118-efc7-4cc4-8780-a5e22d4baa53",
"currency": "BTC",
"balance": "0.0000000000000000",
"available": "0.0000000000000000",
"hold": "0.0000000000000000",
"profile_id": "75da88c5-05bf-4f54-bc85-5c775bd68254"
},
{
...
}
]
* Additional info included in response for margin accounts.
"""
return self.get_account('')
def get_account_history(self, account_id, **kwargs):
""" List account activity. Account activity either increases or
decreases your account balance.
Entry type indicates the reason for the account change.
* transfer: Funds moved to/from Coinbase to cbpro
* match: Funds moved as a result of a trade
* fee: Fee as a result of a trade
* rebate: Fee rebate as per our fee schedule
If an entry is the result of a trade (match, fee), the details
field will contain additional information about the trade.
Args:
account_id (str): Account id to get history of.
kwargs (dict): Additional HTTP request parameters.
Returns:
list: History information for the account. Example::
[
{
"id": "100",
"created_at": "2014-11-07T08:19:27.028459Z",
"amount": "0.001",
"balance": "239.669",
"type": "fee",
"details": {
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"trade_id": "74",
"product_id": "BTC-USD"
}
},
{
...
}
]
"""
endpoint = '/accounts/{}/ledger'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def get_account_holds(self, account_id, **kwargs):
""" Get holds on an account.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Holds are placed on an account for active orders or
pending withdraw requests.
As an order is filled, the hold amount is updated. If an order
is canceled, any remaining hold is removed. For a withdraw, once
it is completed, the hold is removed.
The `type` field will indicate why the hold exists. The hold
type is 'order' for holds related to open orders and 'transfer'
for holds related to a withdraw.
The `ref` field contains the id of the order or transfer which
created the hold.
Args:
account_id (str): Account id to get holds of.
kwargs (dict): Additional HTTP request parameters.
Returns:
generator(list): Hold information for the account. Example::
[
{
"id": "82dcd140-c3c7-4507-8de4-2c529cd1a28f",
"account_id": "e0b3f39a-183d-453e-b754-0c13e5bab0b3",
"created_at": "2014-11-06T10:34:47.123456Z",
"updated_at": "2014-11-06T10:40:47.123456Z",
"amount": "4.23",
"type": "order",
"ref": "0a205de4-dd35-4370-a285-fe8fc375a273",
},
{
...
}
]
"""
endpoint = '/accounts/{}/holds'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def place_order(self, product_id, side, order_type, **kwargs):
""" Place an order.
The three order types (limit, market, and stop) can be placed using this
method. Specific methods are provided for each order type, but if a
more generic interface is desired this method is available.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
order_type (str): Order type ('limit', 'market', or 'stop')
**client_oid (str): Order ID selected by you to identify your order.
This should be a UUID, which will be broadcast in the public
feed for `received` messages.
**stp (str): Self-trade prevention flag. cbpro doesn't allow self-
trading. This behavior can be modified with this flag.
Options:
'dc' Decrease and Cancel (default)
'co' Cancel oldest
'cn' Cancel newest
'cb' Cancel both
**overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
**funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
**kwargs: Additional arguments can be specified for different order
types. See the limit/market/stop order methods for details.
Returns:
dict: Order details. Example::
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "pending",
"settled": false
}
"""
# Margin parameter checks
if kwargs.get('overdraft_enabled') is not None and \
kwargs.get('funding_amount') is not None:
raise ValueError('Margin funding must be specified through use of '
'overdraft or by setting a funding amount, but not'
' both')
# Limit order checks
if order_type == 'limit':
if kwargs.get('cancel_after') is not None and \
kwargs.get('time_in_force') != 'GTT':
raise ValueError('May only specify a cancel period when time '
'in_force is `GTT`')
if kwargs.get('post_only') is not None and kwargs.get('time_in_force') in \
['IOC', 'FOK']:
raise ValueError('post_only is invalid when time in force is '
'`IOC` or `FOK`')
# Market and stop order checks
if order_type == 'market' or order_type == 'stop':
if not (kwargs.get('size') is None) ^ (kwargs.get('funds') is None):
raise ValueError('Either `size` or `funds` must be specified '
'for market/stop orders (but not both).')
# Build params dict
params = {'product_id': product_id,
'side': side,
'type': order_type}
params.update(kwargs)
return self._send_message('post', '/orders', data=json.dumps(params))
def buy(self, product_id, order_type, **kwargs):
"""Place a buy order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'buy', order_type, **kwargs)
def sell(self, product_id, order_type, **kwargs):
"""Place a sell order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'sell', order_type, **kwargs)
def place_limit_order(self, product_id, side, price, size,
client_oid=None,
stp=None,
time_in_force=None,
cancel_after=None,
post_only=None,
overdraft_enabled=None,
funding_amount=None):
"""Place a limit order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Price per cryptocurrency
size (Decimal): Amount of cryptocurrency to buy or sell
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
time_in_force (Optional[str]): Time in force. Options:
'GTC' Good till canceled
'GTT' Good till time (set by `cancel_after`)
'IOC' Immediate or cancel
'FOK' Fill or kill
cancel_after (Optional[str]): Cancel after this period for 'GTT'
orders. Options are 'min', 'hour', or 'day'.
post_only (Optional[bool]): Indicates that the order should only
make liquidity. If any part of the order results in taking
liquidity, the order will be rejected and no part of it will
execute.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'limit',
'price': price,
'size': size,
'client_oid': client_oid,
'stp': stp,
'time_in_force': time_in_force,
'cancel_after': cancel_after,
'post_only': post_only,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_market_order(self, product_id, side, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place market order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'market',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_stop_order(self, product_id, side, price, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place stop order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Desired price at which the stop order triggers.
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'price': price,
'order_type': 'stop',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def cancel_order(self, order_id):
""" Cancel a previously placed order.
If the order had no matches during its lifetime its record may
be purged. This means the order details will not be available
with get_order(order_id). If the order could not be canceled
(already filled or previously canceled, etc), then an error
response will indicate the reason in the message field.
**Caution**: The order id is the server-assigned order id and
not the optional client_oid.
Args:
order_id (str): The order_id of the order you want to cancel
Returns:
list: Containing the order_id of cancelled order. Example::
[ "c5ab5eae-76be-480e-8961-00792dc7e138" ]
"""
return self._send_message('delete', '/orders/' + order_id)
def cancel_all(self, product_id=None):
""" With best effort, cancel all open orders.
Args:
product_id (Optional[str]): Only cancel orders for this
product_id
Returns:
list: A list of ids of the canceled orders. Example::
[
"144c6f8e-713f-4682-8435-5280fbe8b2b4",
"debe4907-95dc-442f-af3b-cec12f42ebda",
"cf7aceee-7b08-4227-a76c-3858144323ab",
"dfc5ae27-cadb-4c0c-beef-8994936fde8a",
"34fecfbf-de33-4273-b2c6-baf8e8948be4"
]
"""
if product_id is not None:
params = {'product_id': product_id}
else:
params = None
return self._send_message('delete', '/orders', params=params)
def get_order(self, order_id):
""" Get a single order by order id.
If the order is canceled the response may have status code 404
if the order had no matches.
**Caution**: Open orders may change state between the request
and the response depending on market conditions.
Args:
order_id (str): The order to get information of.
Returns:
dict: Containing information on order. Example::
{
"created_at": "2017-06-18T00:27:42.920136Z",
"executed_value": "0.0000000000000000",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"id": "9456f388-67a9-4316-bad1-330c5353804f",
"post_only": true,
"price": "1.00000000",
"product_id": "BTC-USD",
"settled": false,
"side": "buy",
"size": "1.00000000",
"status": "pending",
"stp": "dc",
"time_in_force": "GTC",
"type": "limit"
}
"""
return self._send_message('get', '/orders/' + order_id)
def get_orders(self, product_id=None, status=None, **kwargs):
""" List your current open orders.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Only open or un-settled orders are returned. As soon as an
order is no longer open and settled, it will no longer appear
in the default request.
Orders which are no longer resting on the order book, will be
marked with the 'done' status. There is a small window between
an order being 'done' and 'settled'. An order is 'settled' when
all of the fills have settled and the remaining holds (if any)
have been removed.
For high-volume trading it is strongly recommended that you
maintain your own list of open orders and use one of the
streaming market data feeds to keep it updated. You should poll
the open orders endpoint once when you start trading to obtain
the current state of any open orders.
Args:
product_id (Optional[str]): Only list orders for this
product
status (Optional[list/str]): Limit list of orders to
this status or statuses. Passing 'all' returns orders
of all statuses.
** Options: 'open', 'pending', 'active', 'done',
'settled'
** default: ['open', 'pending', 'active']
Returns:
list: Containing information on orders. Example::
[
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "open",
"settled": false
},
{
...
}
]
"""
params = kwargs
if product_id is not None:
params['product_id'] = product_id
if status is not None:
params['status'] = status
return self._send_paginated_message('/orders', params=params)
def get_fills(self, product_id=None, order_id=None, **kwargs):
""" Get a list of recent fills.
As of 8/23/18 - Requests without either order_id or product_id
will be rejected
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Fees are recorded in two stages. Immediately after the matching
engine completes a match, the fill is inserted into our
datastore. Once the fill is recorded, a settlement process will
settle the fill and credit both trading counterparties.
The 'fee' field indicates the fees charged for this fill.
The 'liquidity' field indicates if the fill was the result of a
liquidity provider or liquidity taker. M indicates Maker and T
indicates Taker.
Args:
product_id (str): Limit list to this product_id
order_id (str): Limit list to this order_id
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on fills. Example::
[
{
"trade_id": 74,
"product_id": "BTC-USD",
"price": "10.00",
"size": "0.01",
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"created_at": "2014-11-07T22:19:28.578544Z",
"liquidity": "T",
"fee": "0.00025",
"settled": true,
"side": "buy"
},
{
...
}
]
"""
if (product_id is None) and (order_id is None):
raise ValueError('Either product_id or order_id must be specified.')
params = {}
if product_id:
params['product_id'] = product_id
if order_id:
params['order_id'] = order_id
params.update(kwargs)
return self._send_paginated_message('/fills', params=params)
def get_fundings(self, status=None, **kwargs):
""" Every order placed with a margin profile that draws funding
will create a funding record.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
status (list/str): Limit funding records to these statuses.
** Options: 'outstanding', 'settled', 'rejected'
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on margin funding. Example::
[
{
"id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"order_id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6",
"amount": "1057.6519956381537500",
"status": "settled",
"created_at": "2017-03-17T23:46:16.663397Z",
"currency": "USD",
"repaid_amount": "1057.6519956381537500",
"default_amount": "0",
"repaid_default": false
},
{
...
}
]
"""
params = {}
if status is not None:
params['status'] = status
params.update(kwargs)
return self._send_paginated_message('/funding', params=params)
def repay_funding(self, amount, currency):
""" Repay funding. Repays the older funding records first.
Args:
amount (int): Amount of currency to repay
currency (str): The currency, example USD
Returns:
Not specified by cbpro.
"""
params = {
'amount': amount,
'currency': currency # example: USD
}
return self._send_message('post', '/funding/repay',
data=json.dumps(params))
def margin_transfer(self, margin_profile_id, transfer_type, currency,
amount):
""" Transfer funds between your standard profile and a margin profile.
Args:
margin_profile_id (str): Margin profile ID to withdraw or deposit
from.
transfer_type (str): 'deposit' or 'withdraw'
currency (str): Currency to transfer (eg. 'USD')
amount (Decimal): Amount to transfer
Returns:
dict: Transfer details. Example::
{
"created_at": "2017-01-25T19:06:23.415126Z",
"id": "80bc6b74-8b1f-4c60-a089-c61f9810d4ab",
"user_id": "521c20b3d4ab09621f000011",
"profile_id": "cda95996-ac59-45a3-a42e-30daeb061867",
"margin_profile_id": "45fa9e3b-00ba-4631-b907-8a98cbdf21be",
"type": "deposit",
"amount": "2",
"currency": "USD",
"account_id": "23035fc7-0707-4b59-b0d2-95d0c035f8f5",
"margin_account_id": "e1d9862c-a259-4e83-96cd-376352a9d24d",
"margin_product_id": "BTC-USD",
"status": "completed",
"nonce": 25
}
"""
params = {'margin_profile_id': margin_profile_id,
'type': transfer_type,
'currency': currency, # example: USD
'amount': amount}
return self._send_message('post', '/profiles/margin-transfer',
data=json.dumps(params))
def get_position(self):
""" Get An overview of your margin profile.
Returns:
dict: Details about funding, accounts, and margin call.
"""
return self._send_message('get', '/position')
def deposit(self, amount, currency, payment_method_id):
""" Deposit funds from a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decmial): The amount to deposit.
currency (str): The type of currency.
payment_method_id (str): ID of the payment method.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/deposits/payment-method',
data=json.dumps(params))
def coinbase_deposit(self, amount, currency, coinbase_account_id):
""" Deposit funds from a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to deposit.
currency (str): The type of currency.
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/deposits/coinbase-account',
data=json.dumps(params))
def withdraw(self, amount, currency, payment_method_id):
""" Withdraw funds to a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decimal): The amount to withdraw.
currency (str): Currency type (eg. 'BTC')
payment_method_id (str): ID of the payment method.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/withdrawals/payment-method',
data=json.dumps(params))
def coinbase_withdraw(self, amount, currency, coinbase_account_id):
""" Withdraw funds to a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to withdraw.
currency (str): The type of currency (eg. 'BTC')
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/withdrawals/coinbase-account',
data=json.dumps(params))
def crypto_withdraw(self, amount, currency, crypto_address):
""" Withdraw funds to a crypto address.
Args:
amount (Decimal): The amount to withdraw
currency (str): The type of currency (eg. 'BTC')
crypto_address (str): Crypto address to withdraw to.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'crypto_address': crypto_address}
return self._send_message('post', '/withdrawals/crypto',
data=json.dumps(params))
def get_payment_methods(self):
""" Get a list of your payment methods.
Returns:
list: Payment method details.
"""
return self._send_message('get', '/payment-methods')
def get_coinbase_accounts(self):
""" Get a list of your coinbase accounts.
Returns:
list: Coinbase account details.
"""
return self._send_message('get', '/coinbase-accounts')
def create_report(self, report_type, start_date, end_date, product_id=None,
account_id=None, report_format='pdf', email=None):
""" Create report of historic information about your account.
The report will be generated when resources are available. Report status
can be queried via `get_report(report_id)`.
Args:
report_type (str): 'fills' or 'account'
start_date (str): Starting date for the report in ISO 8601
end_date (str): Ending date for the report in ISO 8601
product_id (Optional[str]): ID of the product to generate a fills
report for. Required if account_type is 'fills'
account_id (Optional[str]): ID of the account to generate an account
report for. Required if report_type is 'account'.
report_format (Optional[str]): 'pdf' or 'csv'. Default is 'pdf'.
email (Optional[str]): Email address to send the report to.
Returns:
dict: Report details. Example::
{
"id": "0428b97b-bec1-429e-a94c-59232926778d",
"type": "fills",
"status": "pending",
"created_at": "2015-01-06T10:34:47.000Z",
"completed_at": undefined,
"expires_at": "2015-01-13T10:35:47.000Z",
"file_url": undefined,
"params": {
"start_date": "2014-11-01T00:00:00.000Z",
"end_date": "2014-11-30T23:59:59.000Z"
}
}
"""
params = {'type': report_type,
'start_date': start_date,
'end_date': end_date,
'format': report_format}
if product_id is not None:
params['product_id'] = product_id
if account_id is not None:
params['account_id'] = account_id
if email is not None:
params['email'] = email
return self._send_message('post', '/reports',
data=json.dumps(params))
def get_report(self, report_id):
""" Get report status.
Use to query a specific report once it has been requested.
Args:
report_id (str): Report ID
Returns:
dict: Report details, including file url once it is created.
"""
return self._send_message('get', '/reports/' + report_id)
def get_trailing_volume(self):
""" Get your 30-day trailing volume for all products.
This is a cached value that's calculated every day at midnight UTC.
Returns:
list: 30-day trailing volumes. Example::
[
{
"product_id": "BTC-USD",
"exchange_volume": "11800.00000000",
"volume": "100.00000000",
"recorded_at": "1973-11-29T00:05:01.123456Z"
},
{
...
}
]
"""
return self._send_message('get', '/users/self/trailing-volume')
|
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.withdraw | python | def withdraw(self, amount, currency, payment_method_id):
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/withdrawals/payment-method',
data=json.dumps(params)) | Withdraw funds to a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decimal): The amount to withdraw.
currency (str): Currency type (eg. 'BTC')
payment_method_id (str): ID of the payment method.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
} | train | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L816-L841 | [
"def _send_message(self, method, endpoint, params=None, data=None):\n \"\"\"Send API request.\n\n Args:\n method (str): HTTP method (get, post, delete, etc.)\n endpoint (str): Endpoint (to be added to base URL)\n params (Optional[dict]): HTTP request parameters\n data (Optional[str... | class AuthenticatedClient(PublicClient):
""" Provides access to Private Endpoints on the cbpro API.
All requests default to the live `api_url`: 'https://api.pro.coinbase.com'.
To test your application using the sandbox modify the `api_url`.
Attributes:
url (str): The api url for this client instance to use.
auth (CBProAuth): Custom authentication handler for each request.
session (requests.Session): Persistent HTTP connection object.
"""
def __init__(self, key, b64secret, passphrase,
api_url="https://api.pro.coinbase.com"):
""" Create an instance of the AuthenticatedClient class.
Args:
key (str): Your API key.
b64secret (str): The secret key matching your API key.
passphrase (str): Passphrase chosen when setting up key.
api_url (Optional[str]): API URL. Defaults to cbpro API.
"""
super(AuthenticatedClient, self).__init__(api_url)
self.auth = CBProAuth(key, b64secret, passphrase)
self.session = requests.Session()
def get_account(self, account_id):
""" Get information for a single account.
Use this endpoint when you know the account_id.
Args:
account_id (str): Account id for account you want to get.
Returns:
dict: Account information. Example::
{
"id": "a1b2c3d4",
"balance": "1.100",
"holds": "0.100",
"available": "1.00",
"currency": "USD"
}
"""
return self._send_message('get', '/accounts/' + account_id)
def get_accounts(self):
""" Get a list of trading all accounts.
When you place an order, the funds for the order are placed on
hold. They cannot be used for other orders or withdrawn. Funds
will remain on hold until the order is filled or canceled. The
funds on hold for each account will be specified.
Returns:
list: Info about all accounts. Example::
[
{
"id": "71452118-efc7-4cc4-8780-a5e22d4baa53",
"currency": "BTC",
"balance": "0.0000000000000000",
"available": "0.0000000000000000",
"hold": "0.0000000000000000",
"profile_id": "75da88c5-05bf-4f54-bc85-5c775bd68254"
},
{
...
}
]
* Additional info included in response for margin accounts.
"""
return self.get_account('')
def get_account_history(self, account_id, **kwargs):
""" List account activity. Account activity either increases or
decreases your account balance.
Entry type indicates the reason for the account change.
* transfer: Funds moved to/from Coinbase to cbpro
* match: Funds moved as a result of a trade
* fee: Fee as a result of a trade
* rebate: Fee rebate as per our fee schedule
If an entry is the result of a trade (match, fee), the details
field will contain additional information about the trade.
Args:
account_id (str): Account id to get history of.
kwargs (dict): Additional HTTP request parameters.
Returns:
list: History information for the account. Example::
[
{
"id": "100",
"created_at": "2014-11-07T08:19:27.028459Z",
"amount": "0.001",
"balance": "239.669",
"type": "fee",
"details": {
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"trade_id": "74",
"product_id": "BTC-USD"
}
},
{
...
}
]
"""
endpoint = '/accounts/{}/ledger'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def get_account_holds(self, account_id, **kwargs):
""" Get holds on an account.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Holds are placed on an account for active orders or
pending withdraw requests.
As an order is filled, the hold amount is updated. If an order
is canceled, any remaining hold is removed. For a withdraw, once
it is completed, the hold is removed.
The `type` field will indicate why the hold exists. The hold
type is 'order' for holds related to open orders and 'transfer'
for holds related to a withdraw.
The `ref` field contains the id of the order or transfer which
created the hold.
Args:
account_id (str): Account id to get holds of.
kwargs (dict): Additional HTTP request parameters.
Returns:
generator(list): Hold information for the account. Example::
[
{
"id": "82dcd140-c3c7-4507-8de4-2c529cd1a28f",
"account_id": "e0b3f39a-183d-453e-b754-0c13e5bab0b3",
"created_at": "2014-11-06T10:34:47.123456Z",
"updated_at": "2014-11-06T10:40:47.123456Z",
"amount": "4.23",
"type": "order",
"ref": "0a205de4-dd35-4370-a285-fe8fc375a273",
},
{
...
}
]
"""
endpoint = '/accounts/{}/holds'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def place_order(self, product_id, side, order_type, **kwargs):
""" Place an order.
The three order types (limit, market, and stop) can be placed using this
method. Specific methods are provided for each order type, but if a
more generic interface is desired this method is available.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
order_type (str): Order type ('limit', 'market', or 'stop')
**client_oid (str): Order ID selected by you to identify your order.
This should be a UUID, which will be broadcast in the public
feed for `received` messages.
**stp (str): Self-trade prevention flag. cbpro doesn't allow self-
trading. This behavior can be modified with this flag.
Options:
'dc' Decrease and Cancel (default)
'co' Cancel oldest
'cn' Cancel newest
'cb' Cancel both
**overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
**funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
**kwargs: Additional arguments can be specified for different order
types. See the limit/market/stop order methods for details.
Returns:
dict: Order details. Example::
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "pending",
"settled": false
}
"""
# Margin parameter checks
if kwargs.get('overdraft_enabled') is not None and \
kwargs.get('funding_amount') is not None:
raise ValueError('Margin funding must be specified through use of '
'overdraft or by setting a funding amount, but not'
' both')
# Limit order checks
if order_type == 'limit':
if kwargs.get('cancel_after') is not None and \
kwargs.get('time_in_force') != 'GTT':
raise ValueError('May only specify a cancel period when time '
'in_force is `GTT`')
if kwargs.get('post_only') is not None and kwargs.get('time_in_force') in \
['IOC', 'FOK']:
raise ValueError('post_only is invalid when time in force is '
'`IOC` or `FOK`')
# Market and stop order checks
if order_type == 'market' or order_type == 'stop':
if not (kwargs.get('size') is None) ^ (kwargs.get('funds') is None):
raise ValueError('Either `size` or `funds` must be specified '
'for market/stop orders (but not both).')
# Build params dict
params = {'product_id': product_id,
'side': side,
'type': order_type}
params.update(kwargs)
return self._send_message('post', '/orders', data=json.dumps(params))
def buy(self, product_id, order_type, **kwargs):
"""Place a buy order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'buy', order_type, **kwargs)
def sell(self, product_id, order_type, **kwargs):
"""Place a sell order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'sell', order_type, **kwargs)
def place_limit_order(self, product_id, side, price, size,
client_oid=None,
stp=None,
time_in_force=None,
cancel_after=None,
post_only=None,
overdraft_enabled=None,
funding_amount=None):
"""Place a limit order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Price per cryptocurrency
size (Decimal): Amount of cryptocurrency to buy or sell
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
time_in_force (Optional[str]): Time in force. Options:
'GTC' Good till canceled
'GTT' Good till time (set by `cancel_after`)
'IOC' Immediate or cancel
'FOK' Fill or kill
cancel_after (Optional[str]): Cancel after this period for 'GTT'
orders. Options are 'min', 'hour', or 'day'.
post_only (Optional[bool]): Indicates that the order should only
make liquidity. If any part of the order results in taking
liquidity, the order will be rejected and no part of it will
execute.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'limit',
'price': price,
'size': size,
'client_oid': client_oid,
'stp': stp,
'time_in_force': time_in_force,
'cancel_after': cancel_after,
'post_only': post_only,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_market_order(self, product_id, side, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place market order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'market',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_stop_order(self, product_id, side, price, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place stop order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Desired price at which the stop order triggers.
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'price': price,
'order_type': 'stop',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def cancel_order(self, order_id):
""" Cancel a previously placed order.
If the order had no matches during its lifetime its record may
be purged. This means the order details will not be available
with get_order(order_id). If the order could not be canceled
(already filled or previously canceled, etc), then an error
response will indicate the reason in the message field.
**Caution**: The order id is the server-assigned order id and
not the optional client_oid.
Args:
order_id (str): The order_id of the order you want to cancel
Returns:
list: Containing the order_id of cancelled order. Example::
[ "c5ab5eae-76be-480e-8961-00792dc7e138" ]
"""
return self._send_message('delete', '/orders/' + order_id)
def cancel_all(self, product_id=None):
""" With best effort, cancel all open orders.
Args:
product_id (Optional[str]): Only cancel orders for this
product_id
Returns:
list: A list of ids of the canceled orders. Example::
[
"144c6f8e-713f-4682-8435-5280fbe8b2b4",
"debe4907-95dc-442f-af3b-cec12f42ebda",
"cf7aceee-7b08-4227-a76c-3858144323ab",
"dfc5ae27-cadb-4c0c-beef-8994936fde8a",
"34fecfbf-de33-4273-b2c6-baf8e8948be4"
]
"""
if product_id is not None:
params = {'product_id': product_id}
else:
params = None
return self._send_message('delete', '/orders', params=params)
def get_order(self, order_id):
""" Get a single order by order id.
If the order is canceled the response may have status code 404
if the order had no matches.
**Caution**: Open orders may change state between the request
and the response depending on market conditions.
Args:
order_id (str): The order to get information of.
Returns:
dict: Containing information on order. Example::
{
"created_at": "2017-06-18T00:27:42.920136Z",
"executed_value": "0.0000000000000000",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"id": "9456f388-67a9-4316-bad1-330c5353804f",
"post_only": true,
"price": "1.00000000",
"product_id": "BTC-USD",
"settled": false,
"side": "buy",
"size": "1.00000000",
"status": "pending",
"stp": "dc",
"time_in_force": "GTC",
"type": "limit"
}
"""
return self._send_message('get', '/orders/' + order_id)
def get_orders(self, product_id=None, status=None, **kwargs):
""" List your current open orders.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Only open or un-settled orders are returned. As soon as an
order is no longer open and settled, it will no longer appear
in the default request.
Orders which are no longer resting on the order book, will be
marked with the 'done' status. There is a small window between
an order being 'done' and 'settled'. An order is 'settled' when
all of the fills have settled and the remaining holds (if any)
have been removed.
For high-volume trading it is strongly recommended that you
maintain your own list of open orders and use one of the
streaming market data feeds to keep it updated. You should poll
the open orders endpoint once when you start trading to obtain
the current state of any open orders.
Args:
product_id (Optional[str]): Only list orders for this
product
status (Optional[list/str]): Limit list of orders to
this status or statuses. Passing 'all' returns orders
of all statuses.
** Options: 'open', 'pending', 'active', 'done',
'settled'
** default: ['open', 'pending', 'active']
Returns:
list: Containing information on orders. Example::
[
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "open",
"settled": false
},
{
...
}
]
"""
params = kwargs
if product_id is not None:
params['product_id'] = product_id
if status is not None:
params['status'] = status
return self._send_paginated_message('/orders', params=params)
def get_fills(self, product_id=None, order_id=None, **kwargs):
""" Get a list of recent fills.
As of 8/23/18 - Requests without either order_id or product_id
will be rejected
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Fees are recorded in two stages. Immediately after the matching
engine completes a match, the fill is inserted into our
datastore. Once the fill is recorded, a settlement process will
settle the fill and credit both trading counterparties.
The 'fee' field indicates the fees charged for this fill.
The 'liquidity' field indicates if the fill was the result of a
liquidity provider or liquidity taker. M indicates Maker and T
indicates Taker.
Args:
product_id (str): Limit list to this product_id
order_id (str): Limit list to this order_id
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on fills. Example::
[
{
"trade_id": 74,
"product_id": "BTC-USD",
"price": "10.00",
"size": "0.01",
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"created_at": "2014-11-07T22:19:28.578544Z",
"liquidity": "T",
"fee": "0.00025",
"settled": true,
"side": "buy"
},
{
...
}
]
"""
if (product_id is None) and (order_id is None):
raise ValueError('Either product_id or order_id must be specified.')
params = {}
if product_id:
params['product_id'] = product_id
if order_id:
params['order_id'] = order_id
params.update(kwargs)
return self._send_paginated_message('/fills', params=params)
def get_fundings(self, status=None, **kwargs):
""" Every order placed with a margin profile that draws funding
will create a funding record.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
status (list/str): Limit funding records to these statuses.
** Options: 'outstanding', 'settled', 'rejected'
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on margin funding. Example::
[
{
"id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"order_id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6",
"amount": "1057.6519956381537500",
"status": "settled",
"created_at": "2017-03-17T23:46:16.663397Z",
"currency": "USD",
"repaid_amount": "1057.6519956381537500",
"default_amount": "0",
"repaid_default": false
},
{
...
}
]
"""
params = {}
if status is not None:
params['status'] = status
params.update(kwargs)
return self._send_paginated_message('/funding', params=params)
def repay_funding(self, amount, currency):
""" Repay funding. Repays the older funding records first.
Args:
amount (int): Amount of currency to repay
currency (str): The currency, example USD
Returns:
Not specified by cbpro.
"""
params = {
'amount': amount,
'currency': currency # example: USD
}
return self._send_message('post', '/funding/repay',
data=json.dumps(params))
def margin_transfer(self, margin_profile_id, transfer_type, currency,
amount):
""" Transfer funds between your standard profile and a margin profile.
Args:
margin_profile_id (str): Margin profile ID to withdraw or deposit
from.
transfer_type (str): 'deposit' or 'withdraw'
currency (str): Currency to transfer (eg. 'USD')
amount (Decimal): Amount to transfer
Returns:
dict: Transfer details. Example::
{
"created_at": "2017-01-25T19:06:23.415126Z",
"id": "80bc6b74-8b1f-4c60-a089-c61f9810d4ab",
"user_id": "521c20b3d4ab09621f000011",
"profile_id": "cda95996-ac59-45a3-a42e-30daeb061867",
"margin_profile_id": "45fa9e3b-00ba-4631-b907-8a98cbdf21be",
"type": "deposit",
"amount": "2",
"currency": "USD",
"account_id": "23035fc7-0707-4b59-b0d2-95d0c035f8f5",
"margin_account_id": "e1d9862c-a259-4e83-96cd-376352a9d24d",
"margin_product_id": "BTC-USD",
"status": "completed",
"nonce": 25
}
"""
params = {'margin_profile_id': margin_profile_id,
'type': transfer_type,
'currency': currency, # example: USD
'amount': amount}
return self._send_message('post', '/profiles/margin-transfer',
data=json.dumps(params))
def get_position(self):
""" Get An overview of your margin profile.
Returns:
dict: Details about funding, accounts, and margin call.
"""
return self._send_message('get', '/position')
def close_position(self, repay_only):
""" Close position.
Args:
repay_only (bool): Undocumented by cbpro.
Returns:
Undocumented
"""
params = {'repay_only': repay_only}
return self._send_message('post', '/position/close',
data=json.dumps(params))
def deposit(self, amount, currency, payment_method_id):
""" Deposit funds from a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decmial): The amount to deposit.
currency (str): The type of currency.
payment_method_id (str): ID of the payment method.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/deposits/payment-method',
data=json.dumps(params))
def coinbase_deposit(self, amount, currency, coinbase_account_id):
""" Deposit funds from a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to deposit.
currency (str): The type of currency.
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/deposits/coinbase-account',
data=json.dumps(params))
def coinbase_withdraw(self, amount, currency, coinbase_account_id):
""" Withdraw funds to a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to withdraw.
currency (str): The type of currency (eg. 'BTC')
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/withdrawals/coinbase-account',
data=json.dumps(params))
def crypto_withdraw(self, amount, currency, crypto_address):
""" Withdraw funds to a crypto address.
Args:
amount (Decimal): The amount to withdraw
currency (str): The type of currency (eg. 'BTC')
crypto_address (str): Crypto address to withdraw to.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'crypto_address': crypto_address}
return self._send_message('post', '/withdrawals/crypto',
data=json.dumps(params))
def get_payment_methods(self):
""" Get a list of your payment methods.
Returns:
list: Payment method details.
"""
return self._send_message('get', '/payment-methods')
def get_coinbase_accounts(self):
""" Get a list of your coinbase accounts.
Returns:
list: Coinbase account details.
"""
return self._send_message('get', '/coinbase-accounts')
def create_report(self, report_type, start_date, end_date, product_id=None,
account_id=None, report_format='pdf', email=None):
""" Create report of historic information about your account.
The report will be generated when resources are available. Report status
can be queried via `get_report(report_id)`.
Args:
report_type (str): 'fills' or 'account'
start_date (str): Starting date for the report in ISO 8601
end_date (str): Ending date for the report in ISO 8601
product_id (Optional[str]): ID of the product to generate a fills
report for. Required if account_type is 'fills'
account_id (Optional[str]): ID of the account to generate an account
report for. Required if report_type is 'account'.
report_format (Optional[str]): 'pdf' or 'csv'. Default is 'pdf'.
email (Optional[str]): Email address to send the report to.
Returns:
dict: Report details. Example::
{
"id": "0428b97b-bec1-429e-a94c-59232926778d",
"type": "fills",
"status": "pending",
"created_at": "2015-01-06T10:34:47.000Z",
"completed_at": undefined,
"expires_at": "2015-01-13T10:35:47.000Z",
"file_url": undefined,
"params": {
"start_date": "2014-11-01T00:00:00.000Z",
"end_date": "2014-11-30T23:59:59.000Z"
}
}
"""
params = {'type': report_type,
'start_date': start_date,
'end_date': end_date,
'format': report_format}
if product_id is not None:
params['product_id'] = product_id
if account_id is not None:
params['account_id'] = account_id
if email is not None:
params['email'] = email
return self._send_message('post', '/reports',
data=json.dumps(params))
def get_report(self, report_id):
""" Get report status.
Use to query a specific report once it has been requested.
Args:
report_id (str): Report ID
Returns:
dict: Report details, including file url once it is created.
"""
return self._send_message('get', '/reports/' + report_id)
def get_trailing_volume(self):
""" Get your 30-day trailing volume for all products.
This is a cached value that's calculated every day at midnight UTC.
Returns:
list: 30-day trailing volumes. Example::
[
{
"product_id": "BTC-USD",
"exchange_volume": "11800.00000000",
"volume": "100.00000000",
"recorded_at": "1973-11-29T00:05:01.123456Z"
},
{
...
}
]
"""
return self._send_message('get', '/users/self/trailing-volume')
|
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.coinbase_withdraw | python | def coinbase_withdraw(self, amount, currency, coinbase_account_id):
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/withdrawals/coinbase-account',
data=json.dumps(params)) | Withdraw funds to a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to withdraw.
currency (str): The type of currency (eg. 'BTC')
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
} | train | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L843-L871 | [
"def _send_message(self, method, endpoint, params=None, data=None):\n \"\"\"Send API request.\n\n Args:\n method (str): HTTP method (get, post, delete, etc.)\n endpoint (str): Endpoint (to be added to base URL)\n params (Optional[dict]): HTTP request parameters\n data (Optional[str... | class AuthenticatedClient(PublicClient):
""" Provides access to Private Endpoints on the cbpro API.
All requests default to the live `api_url`: 'https://api.pro.coinbase.com'.
To test your application using the sandbox modify the `api_url`.
Attributes:
url (str): The api url for this client instance to use.
auth (CBProAuth): Custom authentication handler for each request.
session (requests.Session): Persistent HTTP connection object.
"""
def __init__(self, key, b64secret, passphrase,
api_url="https://api.pro.coinbase.com"):
""" Create an instance of the AuthenticatedClient class.
Args:
key (str): Your API key.
b64secret (str): The secret key matching your API key.
passphrase (str): Passphrase chosen when setting up key.
api_url (Optional[str]): API URL. Defaults to cbpro API.
"""
super(AuthenticatedClient, self).__init__(api_url)
self.auth = CBProAuth(key, b64secret, passphrase)
self.session = requests.Session()
def get_account(self, account_id):
""" Get information for a single account.
Use this endpoint when you know the account_id.
Args:
account_id (str): Account id for account you want to get.
Returns:
dict: Account information. Example::
{
"id": "a1b2c3d4",
"balance": "1.100",
"holds": "0.100",
"available": "1.00",
"currency": "USD"
}
"""
return self._send_message('get', '/accounts/' + account_id)
def get_accounts(self):
""" Get a list of trading all accounts.
When you place an order, the funds for the order are placed on
hold. They cannot be used for other orders or withdrawn. Funds
will remain on hold until the order is filled or canceled. The
funds on hold for each account will be specified.
Returns:
list: Info about all accounts. Example::
[
{
"id": "71452118-efc7-4cc4-8780-a5e22d4baa53",
"currency": "BTC",
"balance": "0.0000000000000000",
"available": "0.0000000000000000",
"hold": "0.0000000000000000",
"profile_id": "75da88c5-05bf-4f54-bc85-5c775bd68254"
},
{
...
}
]
* Additional info included in response for margin accounts.
"""
return self.get_account('')
def get_account_history(self, account_id, **kwargs):
""" List account activity. Account activity either increases or
decreases your account balance.
Entry type indicates the reason for the account change.
* transfer: Funds moved to/from Coinbase to cbpro
* match: Funds moved as a result of a trade
* fee: Fee as a result of a trade
* rebate: Fee rebate as per our fee schedule
If an entry is the result of a trade (match, fee), the details
field will contain additional information about the trade.
Args:
account_id (str): Account id to get history of.
kwargs (dict): Additional HTTP request parameters.
Returns:
list: History information for the account. Example::
[
{
"id": "100",
"created_at": "2014-11-07T08:19:27.028459Z",
"amount": "0.001",
"balance": "239.669",
"type": "fee",
"details": {
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"trade_id": "74",
"product_id": "BTC-USD"
}
},
{
...
}
]
"""
endpoint = '/accounts/{}/ledger'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def get_account_holds(self, account_id, **kwargs):
""" Get holds on an account.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Holds are placed on an account for active orders or
pending withdraw requests.
As an order is filled, the hold amount is updated. If an order
is canceled, any remaining hold is removed. For a withdraw, once
it is completed, the hold is removed.
The `type` field will indicate why the hold exists. The hold
type is 'order' for holds related to open orders and 'transfer'
for holds related to a withdraw.
The `ref` field contains the id of the order or transfer which
created the hold.
Args:
account_id (str): Account id to get holds of.
kwargs (dict): Additional HTTP request parameters.
Returns:
generator(list): Hold information for the account. Example::
[
{
"id": "82dcd140-c3c7-4507-8de4-2c529cd1a28f",
"account_id": "e0b3f39a-183d-453e-b754-0c13e5bab0b3",
"created_at": "2014-11-06T10:34:47.123456Z",
"updated_at": "2014-11-06T10:40:47.123456Z",
"amount": "4.23",
"type": "order",
"ref": "0a205de4-dd35-4370-a285-fe8fc375a273",
},
{
...
}
]
"""
endpoint = '/accounts/{}/holds'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def place_order(self, product_id, side, order_type, **kwargs):
""" Place an order.
The three order types (limit, market, and stop) can be placed using this
method. Specific methods are provided for each order type, but if a
more generic interface is desired this method is available.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
order_type (str): Order type ('limit', 'market', or 'stop')
**client_oid (str): Order ID selected by you to identify your order.
This should be a UUID, which will be broadcast in the public
feed for `received` messages.
**stp (str): Self-trade prevention flag. cbpro doesn't allow self-
trading. This behavior can be modified with this flag.
Options:
'dc' Decrease and Cancel (default)
'co' Cancel oldest
'cn' Cancel newest
'cb' Cancel both
**overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
**funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
**kwargs: Additional arguments can be specified for different order
types. See the limit/market/stop order methods for details.
Returns:
dict: Order details. Example::
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "pending",
"settled": false
}
"""
# Margin parameter checks
if kwargs.get('overdraft_enabled') is not None and \
kwargs.get('funding_amount') is not None:
raise ValueError('Margin funding must be specified through use of '
'overdraft or by setting a funding amount, but not'
' both')
# Limit order checks
if order_type == 'limit':
if kwargs.get('cancel_after') is not None and \
kwargs.get('time_in_force') != 'GTT':
raise ValueError('May only specify a cancel period when time '
'in_force is `GTT`')
if kwargs.get('post_only') is not None and kwargs.get('time_in_force') in \
['IOC', 'FOK']:
raise ValueError('post_only is invalid when time in force is '
'`IOC` or `FOK`')
# Market and stop order checks
if order_type == 'market' or order_type == 'stop':
if not (kwargs.get('size') is None) ^ (kwargs.get('funds') is None):
raise ValueError('Either `size` or `funds` must be specified '
'for market/stop orders (but not both).')
# Build params dict
params = {'product_id': product_id,
'side': side,
'type': order_type}
params.update(kwargs)
return self._send_message('post', '/orders', data=json.dumps(params))
def buy(self, product_id, order_type, **kwargs):
"""Place a buy order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'buy', order_type, **kwargs)
def sell(self, product_id, order_type, **kwargs):
"""Place a sell order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'sell', order_type, **kwargs)
def place_limit_order(self, product_id, side, price, size,
client_oid=None,
stp=None,
time_in_force=None,
cancel_after=None,
post_only=None,
overdraft_enabled=None,
funding_amount=None):
"""Place a limit order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Price per cryptocurrency
size (Decimal): Amount of cryptocurrency to buy or sell
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
time_in_force (Optional[str]): Time in force. Options:
'GTC' Good till canceled
'GTT' Good till time (set by `cancel_after`)
'IOC' Immediate or cancel
'FOK' Fill or kill
cancel_after (Optional[str]): Cancel after this period for 'GTT'
orders. Options are 'min', 'hour', or 'day'.
post_only (Optional[bool]): Indicates that the order should only
make liquidity. If any part of the order results in taking
liquidity, the order will be rejected and no part of it will
execute.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'limit',
'price': price,
'size': size,
'client_oid': client_oid,
'stp': stp,
'time_in_force': time_in_force,
'cancel_after': cancel_after,
'post_only': post_only,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_market_order(self, product_id, side, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place market order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'market',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_stop_order(self, product_id, side, price, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place stop order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Desired price at which the stop order triggers.
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'price': price,
'order_type': 'stop',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def cancel_order(self, order_id):
""" Cancel a previously placed order.
If the order had no matches during its lifetime its record may
be purged. This means the order details will not be available
with get_order(order_id). If the order could not be canceled
(already filled or previously canceled, etc), then an error
response will indicate the reason in the message field.
**Caution**: The order id is the server-assigned order id and
not the optional client_oid.
Args:
order_id (str): The order_id of the order you want to cancel
Returns:
list: Containing the order_id of cancelled order. Example::
[ "c5ab5eae-76be-480e-8961-00792dc7e138" ]
"""
return self._send_message('delete', '/orders/' + order_id)
def cancel_all(self, product_id=None):
""" With best effort, cancel all open orders.
Args:
product_id (Optional[str]): Only cancel orders for this
product_id
Returns:
list: A list of ids of the canceled orders. Example::
[
"144c6f8e-713f-4682-8435-5280fbe8b2b4",
"debe4907-95dc-442f-af3b-cec12f42ebda",
"cf7aceee-7b08-4227-a76c-3858144323ab",
"dfc5ae27-cadb-4c0c-beef-8994936fde8a",
"34fecfbf-de33-4273-b2c6-baf8e8948be4"
]
"""
if product_id is not None:
params = {'product_id': product_id}
else:
params = None
return self._send_message('delete', '/orders', params=params)
def get_order(self, order_id):
""" Get a single order by order id.
If the order is canceled the response may have status code 404
if the order had no matches.
**Caution**: Open orders may change state between the request
and the response depending on market conditions.
Args:
order_id (str): The order to get information of.
Returns:
dict: Containing information on order. Example::
{
"created_at": "2017-06-18T00:27:42.920136Z",
"executed_value": "0.0000000000000000",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"id": "9456f388-67a9-4316-bad1-330c5353804f",
"post_only": true,
"price": "1.00000000",
"product_id": "BTC-USD",
"settled": false,
"side": "buy",
"size": "1.00000000",
"status": "pending",
"stp": "dc",
"time_in_force": "GTC",
"type": "limit"
}
"""
return self._send_message('get', '/orders/' + order_id)
def get_orders(self, product_id=None, status=None, **kwargs):
""" List your current open orders.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Only open or un-settled orders are returned. As soon as an
order is no longer open and settled, it will no longer appear
in the default request.
Orders which are no longer resting on the order book, will be
marked with the 'done' status. There is a small window between
an order being 'done' and 'settled'. An order is 'settled' when
all of the fills have settled and the remaining holds (if any)
have been removed.
For high-volume trading it is strongly recommended that you
maintain your own list of open orders and use one of the
streaming market data feeds to keep it updated. You should poll
the open orders endpoint once when you start trading to obtain
the current state of any open orders.
Args:
product_id (Optional[str]): Only list orders for this
product
status (Optional[list/str]): Limit list of orders to
this status or statuses. Passing 'all' returns orders
of all statuses.
** Options: 'open', 'pending', 'active', 'done',
'settled'
** default: ['open', 'pending', 'active']
Returns:
list: Containing information on orders. Example::
[
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "open",
"settled": false
},
{
...
}
]
"""
params = kwargs
if product_id is not None:
params['product_id'] = product_id
if status is not None:
params['status'] = status
return self._send_paginated_message('/orders', params=params)
def get_fills(self, product_id=None, order_id=None, **kwargs):
""" Get a list of recent fills.
As of 8/23/18 - Requests without either order_id or product_id
will be rejected
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Fees are recorded in two stages. Immediately after the matching
engine completes a match, the fill is inserted into our
datastore. Once the fill is recorded, a settlement process will
settle the fill and credit both trading counterparties.
The 'fee' field indicates the fees charged for this fill.
The 'liquidity' field indicates if the fill was the result of a
liquidity provider or liquidity taker. M indicates Maker and T
indicates Taker.
Args:
product_id (str): Limit list to this product_id
order_id (str): Limit list to this order_id
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on fills. Example::
[
{
"trade_id": 74,
"product_id": "BTC-USD",
"price": "10.00",
"size": "0.01",
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"created_at": "2014-11-07T22:19:28.578544Z",
"liquidity": "T",
"fee": "0.00025",
"settled": true,
"side": "buy"
},
{
...
}
]
"""
if (product_id is None) and (order_id is None):
raise ValueError('Either product_id or order_id must be specified.')
params = {}
if product_id:
params['product_id'] = product_id
if order_id:
params['order_id'] = order_id
params.update(kwargs)
return self._send_paginated_message('/fills', params=params)
def get_fundings(self, status=None, **kwargs):
""" Every order placed with a margin profile that draws funding
will create a funding record.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
status (list/str): Limit funding records to these statuses.
** Options: 'outstanding', 'settled', 'rejected'
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on margin funding. Example::
[
{
"id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"order_id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6",
"amount": "1057.6519956381537500",
"status": "settled",
"created_at": "2017-03-17T23:46:16.663397Z",
"currency": "USD",
"repaid_amount": "1057.6519956381537500",
"default_amount": "0",
"repaid_default": false
},
{
...
}
]
"""
params = {}
if status is not None:
params['status'] = status
params.update(kwargs)
return self._send_paginated_message('/funding', params=params)
def repay_funding(self, amount, currency):
""" Repay funding. Repays the older funding records first.
Args:
amount (int): Amount of currency to repay
currency (str): The currency, example USD
Returns:
Not specified by cbpro.
"""
params = {
'amount': amount,
'currency': currency # example: USD
}
return self._send_message('post', '/funding/repay',
data=json.dumps(params))
def margin_transfer(self, margin_profile_id, transfer_type, currency,
amount):
""" Transfer funds between your standard profile and a margin profile.
Args:
margin_profile_id (str): Margin profile ID to withdraw or deposit
from.
transfer_type (str): 'deposit' or 'withdraw'
currency (str): Currency to transfer (eg. 'USD')
amount (Decimal): Amount to transfer
Returns:
dict: Transfer details. Example::
{
"created_at": "2017-01-25T19:06:23.415126Z",
"id": "80bc6b74-8b1f-4c60-a089-c61f9810d4ab",
"user_id": "521c20b3d4ab09621f000011",
"profile_id": "cda95996-ac59-45a3-a42e-30daeb061867",
"margin_profile_id": "45fa9e3b-00ba-4631-b907-8a98cbdf21be",
"type": "deposit",
"amount": "2",
"currency": "USD",
"account_id": "23035fc7-0707-4b59-b0d2-95d0c035f8f5",
"margin_account_id": "e1d9862c-a259-4e83-96cd-376352a9d24d",
"margin_product_id": "BTC-USD",
"status": "completed",
"nonce": 25
}
"""
params = {'margin_profile_id': margin_profile_id,
'type': transfer_type,
'currency': currency, # example: USD
'amount': amount}
return self._send_message('post', '/profiles/margin-transfer',
data=json.dumps(params))
def get_position(self):
""" Get An overview of your margin profile.
Returns:
dict: Details about funding, accounts, and margin call.
"""
return self._send_message('get', '/position')
def close_position(self, repay_only):
""" Close position.
Args:
repay_only (bool): Undocumented by cbpro.
Returns:
Undocumented
"""
params = {'repay_only': repay_only}
return self._send_message('post', '/position/close',
data=json.dumps(params))
def deposit(self, amount, currency, payment_method_id):
""" Deposit funds from a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decmial): The amount to deposit.
currency (str): The type of currency.
payment_method_id (str): ID of the payment method.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/deposits/payment-method',
data=json.dumps(params))
def coinbase_deposit(self, amount, currency, coinbase_account_id):
""" Deposit funds from a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to deposit.
currency (str): The type of currency.
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/deposits/coinbase-account',
data=json.dumps(params))
def withdraw(self, amount, currency, payment_method_id):
""" Withdraw funds to a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decimal): The amount to withdraw.
currency (str): Currency type (eg. 'BTC')
payment_method_id (str): ID of the payment method.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/withdrawals/payment-method',
data=json.dumps(params))
def crypto_withdraw(self, amount, currency, crypto_address):
""" Withdraw funds to a crypto address.
Args:
amount (Decimal): The amount to withdraw
currency (str): The type of currency (eg. 'BTC')
crypto_address (str): Crypto address to withdraw to.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'crypto_address': crypto_address}
return self._send_message('post', '/withdrawals/crypto',
data=json.dumps(params))
def get_payment_methods(self):
""" Get a list of your payment methods.
Returns:
list: Payment method details.
"""
return self._send_message('get', '/payment-methods')
def get_coinbase_accounts(self):
""" Get a list of your coinbase accounts.
Returns:
list: Coinbase account details.
"""
return self._send_message('get', '/coinbase-accounts')
def create_report(self, report_type, start_date, end_date, product_id=None,
account_id=None, report_format='pdf', email=None):
""" Create report of historic information about your account.
The report will be generated when resources are available. Report status
can be queried via `get_report(report_id)`.
Args:
report_type (str): 'fills' or 'account'
start_date (str): Starting date for the report in ISO 8601
end_date (str): Ending date for the report in ISO 8601
product_id (Optional[str]): ID of the product to generate a fills
report for. Required if account_type is 'fills'
account_id (Optional[str]): ID of the account to generate an account
report for. Required if report_type is 'account'.
report_format (Optional[str]): 'pdf' or 'csv'. Default is 'pdf'.
email (Optional[str]): Email address to send the report to.
Returns:
dict: Report details. Example::
{
"id": "0428b97b-bec1-429e-a94c-59232926778d",
"type": "fills",
"status": "pending",
"created_at": "2015-01-06T10:34:47.000Z",
"completed_at": undefined,
"expires_at": "2015-01-13T10:35:47.000Z",
"file_url": undefined,
"params": {
"start_date": "2014-11-01T00:00:00.000Z",
"end_date": "2014-11-30T23:59:59.000Z"
}
}
"""
params = {'type': report_type,
'start_date': start_date,
'end_date': end_date,
'format': report_format}
if product_id is not None:
params['product_id'] = product_id
if account_id is not None:
params['account_id'] = account_id
if email is not None:
params['email'] = email
return self._send_message('post', '/reports',
data=json.dumps(params))
def get_report(self, report_id):
""" Get report status.
Use to query a specific report once it has been requested.
Args:
report_id (str): Report ID
Returns:
dict: Report details, including file url once it is created.
"""
return self._send_message('get', '/reports/' + report_id)
def get_trailing_volume(self):
""" Get your 30-day trailing volume for all products.
This is a cached value that's calculated every day at midnight UTC.
Returns:
list: 30-day trailing volumes. Example::
[
{
"product_id": "BTC-USD",
"exchange_volume": "11800.00000000",
"volume": "100.00000000",
"recorded_at": "1973-11-29T00:05:01.123456Z"
},
{
...
}
]
"""
return self._send_message('get', '/users/self/trailing-volume')
|
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.crypto_withdraw | python | def crypto_withdraw(self, amount, currency, crypto_address):
params = {'amount': amount,
'currency': currency,
'crypto_address': crypto_address}
return self._send_message('post', '/withdrawals/crypto',
data=json.dumps(params)) | Withdraw funds to a crypto address.
Args:
amount (Decimal): The amount to withdraw
currency (str): The type of currency (eg. 'BTC')
crypto_address (str): Crypto address to withdraw to.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
} | train | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L873-L894 | [
"def _send_message(self, method, endpoint, params=None, data=None):\n \"\"\"Send API request.\n\n Args:\n method (str): HTTP method (get, post, delete, etc.)\n endpoint (str): Endpoint (to be added to base URL)\n params (Optional[dict]): HTTP request parameters\n data (Optional[str... | class AuthenticatedClient(PublicClient):
""" Provides access to Private Endpoints on the cbpro API.
All requests default to the live `api_url`: 'https://api.pro.coinbase.com'.
To test your application using the sandbox modify the `api_url`.
Attributes:
url (str): The api url for this client instance to use.
auth (CBProAuth): Custom authentication handler for each request.
session (requests.Session): Persistent HTTP connection object.
"""
def __init__(self, key, b64secret, passphrase,
api_url="https://api.pro.coinbase.com"):
""" Create an instance of the AuthenticatedClient class.
Args:
key (str): Your API key.
b64secret (str): The secret key matching your API key.
passphrase (str): Passphrase chosen when setting up key.
api_url (Optional[str]): API URL. Defaults to cbpro API.
"""
super(AuthenticatedClient, self).__init__(api_url)
self.auth = CBProAuth(key, b64secret, passphrase)
self.session = requests.Session()
def get_account(self, account_id):
""" Get information for a single account.
Use this endpoint when you know the account_id.
Args:
account_id (str): Account id for account you want to get.
Returns:
dict: Account information. Example::
{
"id": "a1b2c3d4",
"balance": "1.100",
"holds": "0.100",
"available": "1.00",
"currency": "USD"
}
"""
return self._send_message('get', '/accounts/' + account_id)
def get_accounts(self):
""" Get a list of trading all accounts.
When you place an order, the funds for the order are placed on
hold. They cannot be used for other orders or withdrawn. Funds
will remain on hold until the order is filled or canceled. The
funds on hold for each account will be specified.
Returns:
list: Info about all accounts. Example::
[
{
"id": "71452118-efc7-4cc4-8780-a5e22d4baa53",
"currency": "BTC",
"balance": "0.0000000000000000",
"available": "0.0000000000000000",
"hold": "0.0000000000000000",
"profile_id": "75da88c5-05bf-4f54-bc85-5c775bd68254"
},
{
...
}
]
* Additional info included in response for margin accounts.
"""
return self.get_account('')
def get_account_history(self, account_id, **kwargs):
""" List account activity. Account activity either increases or
decreases your account balance.
Entry type indicates the reason for the account change.
* transfer: Funds moved to/from Coinbase to cbpro
* match: Funds moved as a result of a trade
* fee: Fee as a result of a trade
* rebate: Fee rebate as per our fee schedule
If an entry is the result of a trade (match, fee), the details
field will contain additional information about the trade.
Args:
account_id (str): Account id to get history of.
kwargs (dict): Additional HTTP request parameters.
Returns:
list: History information for the account. Example::
[
{
"id": "100",
"created_at": "2014-11-07T08:19:27.028459Z",
"amount": "0.001",
"balance": "239.669",
"type": "fee",
"details": {
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"trade_id": "74",
"product_id": "BTC-USD"
}
},
{
...
}
]
"""
endpoint = '/accounts/{}/ledger'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def get_account_holds(self, account_id, **kwargs):
""" Get holds on an account.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Holds are placed on an account for active orders or
pending withdraw requests.
As an order is filled, the hold amount is updated. If an order
is canceled, any remaining hold is removed. For a withdraw, once
it is completed, the hold is removed.
The `type` field will indicate why the hold exists. The hold
type is 'order' for holds related to open orders and 'transfer'
for holds related to a withdraw.
The `ref` field contains the id of the order or transfer which
created the hold.
Args:
account_id (str): Account id to get holds of.
kwargs (dict): Additional HTTP request parameters.
Returns:
generator(list): Hold information for the account. Example::
[
{
"id": "82dcd140-c3c7-4507-8de4-2c529cd1a28f",
"account_id": "e0b3f39a-183d-453e-b754-0c13e5bab0b3",
"created_at": "2014-11-06T10:34:47.123456Z",
"updated_at": "2014-11-06T10:40:47.123456Z",
"amount": "4.23",
"type": "order",
"ref": "0a205de4-dd35-4370-a285-fe8fc375a273",
},
{
...
}
]
"""
endpoint = '/accounts/{}/holds'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def place_order(self, product_id, side, order_type, **kwargs):
""" Place an order.
The three order types (limit, market, and stop) can be placed using this
method. Specific methods are provided for each order type, but if a
more generic interface is desired this method is available.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
order_type (str): Order type ('limit', 'market', or 'stop')
**client_oid (str): Order ID selected by you to identify your order.
This should be a UUID, which will be broadcast in the public
feed for `received` messages.
**stp (str): Self-trade prevention flag. cbpro doesn't allow self-
trading. This behavior can be modified with this flag.
Options:
'dc' Decrease and Cancel (default)
'co' Cancel oldest
'cn' Cancel newest
'cb' Cancel both
**overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
**funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
**kwargs: Additional arguments can be specified for different order
types. See the limit/market/stop order methods for details.
Returns:
dict: Order details. Example::
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "pending",
"settled": false
}
"""
# Margin parameter checks
if kwargs.get('overdraft_enabled') is not None and \
kwargs.get('funding_amount') is not None:
raise ValueError('Margin funding must be specified through use of '
'overdraft or by setting a funding amount, but not'
' both')
# Limit order checks
if order_type == 'limit':
if kwargs.get('cancel_after') is not None and \
kwargs.get('time_in_force') != 'GTT':
raise ValueError('May only specify a cancel period when time '
'in_force is `GTT`')
if kwargs.get('post_only') is not None and kwargs.get('time_in_force') in \
['IOC', 'FOK']:
raise ValueError('post_only is invalid when time in force is '
'`IOC` or `FOK`')
# Market and stop order checks
if order_type == 'market' or order_type == 'stop':
if not (kwargs.get('size') is None) ^ (kwargs.get('funds') is None):
raise ValueError('Either `size` or `funds` must be specified '
'for market/stop orders (but not both).')
# Build params dict
params = {'product_id': product_id,
'side': side,
'type': order_type}
params.update(kwargs)
return self._send_message('post', '/orders', data=json.dumps(params))
def buy(self, product_id, order_type, **kwargs):
"""Place a buy order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'buy', order_type, **kwargs)
def sell(self, product_id, order_type, **kwargs):
"""Place a sell order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'sell', order_type, **kwargs)
def place_limit_order(self, product_id, side, price, size,
client_oid=None,
stp=None,
time_in_force=None,
cancel_after=None,
post_only=None,
overdraft_enabled=None,
funding_amount=None):
"""Place a limit order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Price per cryptocurrency
size (Decimal): Amount of cryptocurrency to buy or sell
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
time_in_force (Optional[str]): Time in force. Options:
'GTC' Good till canceled
'GTT' Good till time (set by `cancel_after`)
'IOC' Immediate or cancel
'FOK' Fill or kill
cancel_after (Optional[str]): Cancel after this period for 'GTT'
orders. Options are 'min', 'hour', or 'day'.
post_only (Optional[bool]): Indicates that the order should only
make liquidity. If any part of the order results in taking
liquidity, the order will be rejected and no part of it will
execute.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'limit',
'price': price,
'size': size,
'client_oid': client_oid,
'stp': stp,
'time_in_force': time_in_force,
'cancel_after': cancel_after,
'post_only': post_only,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_market_order(self, product_id, side, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place market order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'market',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_stop_order(self, product_id, side, price, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place stop order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Desired price at which the stop order triggers.
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'price': price,
'order_type': 'stop',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def cancel_order(self, order_id):
""" Cancel a previously placed order.
If the order had no matches during its lifetime its record may
be purged. This means the order details will not be available
with get_order(order_id). If the order could not be canceled
(already filled or previously canceled, etc), then an error
response will indicate the reason in the message field.
**Caution**: The order id is the server-assigned order id and
not the optional client_oid.
Args:
order_id (str): The order_id of the order you want to cancel
Returns:
list: Containing the order_id of cancelled order. Example::
[ "c5ab5eae-76be-480e-8961-00792dc7e138" ]
"""
return self._send_message('delete', '/orders/' + order_id)
def cancel_all(self, product_id=None):
""" With best effort, cancel all open orders.
Args:
product_id (Optional[str]): Only cancel orders for this
product_id
Returns:
list: A list of ids of the canceled orders. Example::
[
"144c6f8e-713f-4682-8435-5280fbe8b2b4",
"debe4907-95dc-442f-af3b-cec12f42ebda",
"cf7aceee-7b08-4227-a76c-3858144323ab",
"dfc5ae27-cadb-4c0c-beef-8994936fde8a",
"34fecfbf-de33-4273-b2c6-baf8e8948be4"
]
"""
if product_id is not None:
params = {'product_id': product_id}
else:
params = None
return self._send_message('delete', '/orders', params=params)
def get_order(self, order_id):
""" Get a single order by order id.
If the order is canceled the response may have status code 404
if the order had no matches.
**Caution**: Open orders may change state between the request
and the response depending on market conditions.
Args:
order_id (str): The order to get information of.
Returns:
dict: Containing information on order. Example::
{
"created_at": "2017-06-18T00:27:42.920136Z",
"executed_value": "0.0000000000000000",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"id": "9456f388-67a9-4316-bad1-330c5353804f",
"post_only": true,
"price": "1.00000000",
"product_id": "BTC-USD",
"settled": false,
"side": "buy",
"size": "1.00000000",
"status": "pending",
"stp": "dc",
"time_in_force": "GTC",
"type": "limit"
}
"""
return self._send_message('get', '/orders/' + order_id)
def get_orders(self, product_id=None, status=None, **kwargs):
""" List your current open orders.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Only open or un-settled orders are returned. As soon as an
order is no longer open and settled, it will no longer appear
in the default request.
Orders which are no longer resting on the order book, will be
marked with the 'done' status. There is a small window between
an order being 'done' and 'settled'. An order is 'settled' when
all of the fills have settled and the remaining holds (if any)
have been removed.
For high-volume trading it is strongly recommended that you
maintain your own list of open orders and use one of the
streaming market data feeds to keep it updated. You should poll
the open orders endpoint once when you start trading to obtain
the current state of any open orders.
Args:
product_id (Optional[str]): Only list orders for this
product
status (Optional[list/str]): Limit list of orders to
this status or statuses. Passing 'all' returns orders
of all statuses.
** Options: 'open', 'pending', 'active', 'done',
'settled'
** default: ['open', 'pending', 'active']
Returns:
list: Containing information on orders. Example::
[
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "open",
"settled": false
},
{
...
}
]
"""
params = kwargs
if product_id is not None:
params['product_id'] = product_id
if status is not None:
params['status'] = status
return self._send_paginated_message('/orders', params=params)
def get_fills(self, product_id=None, order_id=None, **kwargs):
""" Get a list of recent fills.
As of 8/23/18 - Requests without either order_id or product_id
will be rejected
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Fees are recorded in two stages. Immediately after the matching
engine completes a match, the fill is inserted into our
datastore. Once the fill is recorded, a settlement process will
settle the fill and credit both trading counterparties.
The 'fee' field indicates the fees charged for this fill.
The 'liquidity' field indicates if the fill was the result of a
liquidity provider or liquidity taker. M indicates Maker and T
indicates Taker.
Args:
product_id (str): Limit list to this product_id
order_id (str): Limit list to this order_id
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on fills. Example::
[
{
"trade_id": 74,
"product_id": "BTC-USD",
"price": "10.00",
"size": "0.01",
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"created_at": "2014-11-07T22:19:28.578544Z",
"liquidity": "T",
"fee": "0.00025",
"settled": true,
"side": "buy"
},
{
...
}
]
"""
if (product_id is None) and (order_id is None):
raise ValueError('Either product_id or order_id must be specified.')
params = {}
if product_id:
params['product_id'] = product_id
if order_id:
params['order_id'] = order_id
params.update(kwargs)
return self._send_paginated_message('/fills', params=params)
def get_fundings(self, status=None, **kwargs):
""" Every order placed with a margin profile that draws funding
will create a funding record.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
status (list/str): Limit funding records to these statuses.
** Options: 'outstanding', 'settled', 'rejected'
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on margin funding. Example::
[
{
"id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"order_id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6",
"amount": "1057.6519956381537500",
"status": "settled",
"created_at": "2017-03-17T23:46:16.663397Z",
"currency": "USD",
"repaid_amount": "1057.6519956381537500",
"default_amount": "0",
"repaid_default": false
},
{
...
}
]
"""
params = {}
if status is not None:
params['status'] = status
params.update(kwargs)
return self._send_paginated_message('/funding', params=params)
def repay_funding(self, amount, currency):
""" Repay funding. Repays the older funding records first.
Args:
amount (int): Amount of currency to repay
currency (str): The currency, example USD
Returns:
Not specified by cbpro.
"""
params = {
'amount': amount,
'currency': currency # example: USD
}
return self._send_message('post', '/funding/repay',
data=json.dumps(params))
def margin_transfer(self, margin_profile_id, transfer_type, currency,
amount):
""" Transfer funds between your standard profile and a margin profile.
Args:
margin_profile_id (str): Margin profile ID to withdraw or deposit
from.
transfer_type (str): 'deposit' or 'withdraw'
currency (str): Currency to transfer (eg. 'USD')
amount (Decimal): Amount to transfer
Returns:
dict: Transfer details. Example::
{
"created_at": "2017-01-25T19:06:23.415126Z",
"id": "80bc6b74-8b1f-4c60-a089-c61f9810d4ab",
"user_id": "521c20b3d4ab09621f000011",
"profile_id": "cda95996-ac59-45a3-a42e-30daeb061867",
"margin_profile_id": "45fa9e3b-00ba-4631-b907-8a98cbdf21be",
"type": "deposit",
"amount": "2",
"currency": "USD",
"account_id": "23035fc7-0707-4b59-b0d2-95d0c035f8f5",
"margin_account_id": "e1d9862c-a259-4e83-96cd-376352a9d24d",
"margin_product_id": "BTC-USD",
"status": "completed",
"nonce": 25
}
"""
params = {'margin_profile_id': margin_profile_id,
'type': transfer_type,
'currency': currency, # example: USD
'amount': amount}
return self._send_message('post', '/profiles/margin-transfer',
data=json.dumps(params))
def get_position(self):
""" Get An overview of your margin profile.
Returns:
dict: Details about funding, accounts, and margin call.
"""
return self._send_message('get', '/position')
def close_position(self, repay_only):
""" Close position.
Args:
repay_only (bool): Undocumented by cbpro.
Returns:
Undocumented
"""
params = {'repay_only': repay_only}
return self._send_message('post', '/position/close',
data=json.dumps(params))
def deposit(self, amount, currency, payment_method_id):
""" Deposit funds from a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decmial): The amount to deposit.
currency (str): The type of currency.
payment_method_id (str): ID of the payment method.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/deposits/payment-method',
data=json.dumps(params))
def coinbase_deposit(self, amount, currency, coinbase_account_id):
""" Deposit funds from a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to deposit.
currency (str): The type of currency.
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/deposits/coinbase-account',
data=json.dumps(params))
def withdraw(self, amount, currency, payment_method_id):
""" Withdraw funds to a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decimal): The amount to withdraw.
currency (str): Currency type (eg. 'BTC')
payment_method_id (str): ID of the payment method.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/withdrawals/payment-method',
data=json.dumps(params))
def coinbase_withdraw(self, amount, currency, coinbase_account_id):
""" Withdraw funds to a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to withdraw.
currency (str): The type of currency (eg. 'BTC')
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/withdrawals/coinbase-account',
data=json.dumps(params))
def get_payment_methods(self):
""" Get a list of your payment methods.
Returns:
list: Payment method details.
"""
return self._send_message('get', '/payment-methods')
def get_coinbase_accounts(self):
""" Get a list of your coinbase accounts.
Returns:
list: Coinbase account details.
"""
return self._send_message('get', '/coinbase-accounts')
def create_report(self, report_type, start_date, end_date, product_id=None,
account_id=None, report_format='pdf', email=None):
""" Create report of historic information about your account.
The report will be generated when resources are available. Report status
can be queried via `get_report(report_id)`.
Args:
report_type (str): 'fills' or 'account'
start_date (str): Starting date for the report in ISO 8601
end_date (str): Ending date for the report in ISO 8601
product_id (Optional[str]): ID of the product to generate a fills
report for. Required if account_type is 'fills'
account_id (Optional[str]): ID of the account to generate an account
report for. Required if report_type is 'account'.
report_format (Optional[str]): 'pdf' or 'csv'. Default is 'pdf'.
email (Optional[str]): Email address to send the report to.
Returns:
dict: Report details. Example::
{
"id": "0428b97b-bec1-429e-a94c-59232926778d",
"type": "fills",
"status": "pending",
"created_at": "2015-01-06T10:34:47.000Z",
"completed_at": undefined,
"expires_at": "2015-01-13T10:35:47.000Z",
"file_url": undefined,
"params": {
"start_date": "2014-11-01T00:00:00.000Z",
"end_date": "2014-11-30T23:59:59.000Z"
}
}
"""
params = {'type': report_type,
'start_date': start_date,
'end_date': end_date,
'format': report_format}
if product_id is not None:
params['product_id'] = product_id
if account_id is not None:
params['account_id'] = account_id
if email is not None:
params['email'] = email
return self._send_message('post', '/reports',
data=json.dumps(params))
def get_report(self, report_id):
""" Get report status.
Use to query a specific report once it has been requested.
Args:
report_id (str): Report ID
Returns:
dict: Report details, including file url once it is created.
"""
return self._send_message('get', '/reports/' + report_id)
def get_trailing_volume(self):
""" Get your 30-day trailing volume for all products.
This is a cached value that's calculated every day at midnight UTC.
Returns:
list: 30-day trailing volumes. Example::
[
{
"product_id": "BTC-USD",
"exchange_volume": "11800.00000000",
"volume": "100.00000000",
"recorded_at": "1973-11-29T00:05:01.123456Z"
},
{
...
}
]
"""
return self._send_message('get', '/users/self/trailing-volume')
|
danpaquin/coinbasepro-python | cbpro/authenticated_client.py | AuthenticatedClient.create_report | python | def create_report(self, report_type, start_date, end_date, product_id=None,
account_id=None, report_format='pdf', email=None):
params = {'type': report_type,
'start_date': start_date,
'end_date': end_date,
'format': report_format}
if product_id is not None:
params['product_id'] = product_id
if account_id is not None:
params['account_id'] = account_id
if email is not None:
params['email'] = email
return self._send_message('post', '/reports',
data=json.dumps(params)) | Create report of historic information about your account.
The report will be generated when resources are available. Report status
can be queried via `get_report(report_id)`.
Args:
report_type (str): 'fills' or 'account'
start_date (str): Starting date for the report in ISO 8601
end_date (str): Ending date for the report in ISO 8601
product_id (Optional[str]): ID of the product to generate a fills
report for. Required if account_type is 'fills'
account_id (Optional[str]): ID of the account to generate an account
report for. Required if report_type is 'account'.
report_format (Optional[str]): 'pdf' or 'csv'. Default is 'pdf'.
email (Optional[str]): Email address to send the report to.
Returns:
dict: Report details. Example::
{
"id": "0428b97b-bec1-429e-a94c-59232926778d",
"type": "fills",
"status": "pending",
"created_at": "2015-01-06T10:34:47.000Z",
"completed_at": undefined,
"expires_at": "2015-01-13T10:35:47.000Z",
"file_url": undefined,
"params": {
"start_date": "2014-11-01T00:00:00.000Z",
"end_date": "2014-11-30T23:59:59.000Z"
}
} | train | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/authenticated_client.py#L914-L961 | [
"def _send_message(self, method, endpoint, params=None, data=None):\n \"\"\"Send API request.\n\n Args:\n method (str): HTTP method (get, post, delete, etc.)\n endpoint (str): Endpoint (to be added to base URL)\n params (Optional[dict]): HTTP request parameters\n data (Optional[str... | class AuthenticatedClient(PublicClient):
""" Provides access to Private Endpoints on the cbpro API.
All requests default to the live `api_url`: 'https://api.pro.coinbase.com'.
To test your application using the sandbox modify the `api_url`.
Attributes:
url (str): The api url for this client instance to use.
auth (CBProAuth): Custom authentication handler for each request.
session (requests.Session): Persistent HTTP connection object.
"""
def __init__(self, key, b64secret, passphrase,
api_url="https://api.pro.coinbase.com"):
""" Create an instance of the AuthenticatedClient class.
Args:
key (str): Your API key.
b64secret (str): The secret key matching your API key.
passphrase (str): Passphrase chosen when setting up key.
api_url (Optional[str]): API URL. Defaults to cbpro API.
"""
super(AuthenticatedClient, self).__init__(api_url)
self.auth = CBProAuth(key, b64secret, passphrase)
self.session = requests.Session()
def get_account(self, account_id):
""" Get information for a single account.
Use this endpoint when you know the account_id.
Args:
account_id (str): Account id for account you want to get.
Returns:
dict: Account information. Example::
{
"id": "a1b2c3d4",
"balance": "1.100",
"holds": "0.100",
"available": "1.00",
"currency": "USD"
}
"""
return self._send_message('get', '/accounts/' + account_id)
def get_accounts(self):
""" Get a list of trading all accounts.
When you place an order, the funds for the order are placed on
hold. They cannot be used for other orders or withdrawn. Funds
will remain on hold until the order is filled or canceled. The
funds on hold for each account will be specified.
Returns:
list: Info about all accounts. Example::
[
{
"id": "71452118-efc7-4cc4-8780-a5e22d4baa53",
"currency": "BTC",
"balance": "0.0000000000000000",
"available": "0.0000000000000000",
"hold": "0.0000000000000000",
"profile_id": "75da88c5-05bf-4f54-bc85-5c775bd68254"
},
{
...
}
]
* Additional info included in response for margin accounts.
"""
return self.get_account('')
def get_account_history(self, account_id, **kwargs):
""" List account activity. Account activity either increases or
decreases your account balance.
Entry type indicates the reason for the account change.
* transfer: Funds moved to/from Coinbase to cbpro
* match: Funds moved as a result of a trade
* fee: Fee as a result of a trade
* rebate: Fee rebate as per our fee schedule
If an entry is the result of a trade (match, fee), the details
field will contain additional information about the trade.
Args:
account_id (str): Account id to get history of.
kwargs (dict): Additional HTTP request parameters.
Returns:
list: History information for the account. Example::
[
{
"id": "100",
"created_at": "2014-11-07T08:19:27.028459Z",
"amount": "0.001",
"balance": "239.669",
"type": "fee",
"details": {
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"trade_id": "74",
"product_id": "BTC-USD"
}
},
{
...
}
]
"""
endpoint = '/accounts/{}/ledger'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def get_account_holds(self, account_id, **kwargs):
""" Get holds on an account.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Holds are placed on an account for active orders or
pending withdraw requests.
As an order is filled, the hold amount is updated. If an order
is canceled, any remaining hold is removed. For a withdraw, once
it is completed, the hold is removed.
The `type` field will indicate why the hold exists. The hold
type is 'order' for holds related to open orders and 'transfer'
for holds related to a withdraw.
The `ref` field contains the id of the order or transfer which
created the hold.
Args:
account_id (str): Account id to get holds of.
kwargs (dict): Additional HTTP request parameters.
Returns:
generator(list): Hold information for the account. Example::
[
{
"id": "82dcd140-c3c7-4507-8de4-2c529cd1a28f",
"account_id": "e0b3f39a-183d-453e-b754-0c13e5bab0b3",
"created_at": "2014-11-06T10:34:47.123456Z",
"updated_at": "2014-11-06T10:40:47.123456Z",
"amount": "4.23",
"type": "order",
"ref": "0a205de4-dd35-4370-a285-fe8fc375a273",
},
{
...
}
]
"""
endpoint = '/accounts/{}/holds'.format(account_id)
return self._send_paginated_message(endpoint, params=kwargs)
def place_order(self, product_id, side, order_type, **kwargs):
""" Place an order.
The three order types (limit, market, and stop) can be placed using this
method. Specific methods are provided for each order type, but if a
more generic interface is desired this method is available.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
order_type (str): Order type ('limit', 'market', or 'stop')
**client_oid (str): Order ID selected by you to identify your order.
This should be a UUID, which will be broadcast in the public
feed for `received` messages.
**stp (str): Self-trade prevention flag. cbpro doesn't allow self-
trading. This behavior can be modified with this flag.
Options:
'dc' Decrease and Cancel (default)
'co' Cancel oldest
'cn' Cancel newest
'cb' Cancel both
**overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
**funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
**kwargs: Additional arguments can be specified for different order
types. See the limit/market/stop order methods for details.
Returns:
dict: Order details. Example::
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "pending",
"settled": false
}
"""
# Margin parameter checks
if kwargs.get('overdraft_enabled') is not None and \
kwargs.get('funding_amount') is not None:
raise ValueError('Margin funding must be specified through use of '
'overdraft or by setting a funding amount, but not'
' both')
# Limit order checks
if order_type == 'limit':
if kwargs.get('cancel_after') is not None and \
kwargs.get('time_in_force') != 'GTT':
raise ValueError('May only specify a cancel period when time '
'in_force is `GTT`')
if kwargs.get('post_only') is not None and kwargs.get('time_in_force') in \
['IOC', 'FOK']:
raise ValueError('post_only is invalid when time in force is '
'`IOC` or `FOK`')
# Market and stop order checks
if order_type == 'market' or order_type == 'stop':
if not (kwargs.get('size') is None) ^ (kwargs.get('funds') is None):
raise ValueError('Either `size` or `funds` must be specified '
'for market/stop orders (but not both).')
# Build params dict
params = {'product_id': product_id,
'side': side,
'type': order_type}
params.update(kwargs)
return self._send_message('post', '/orders', data=json.dumps(params))
def buy(self, product_id, order_type, **kwargs):
"""Place a buy order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'buy', order_type, **kwargs)
def sell(self, product_id, order_type, **kwargs):
"""Place a sell order.
This is included to maintain backwards compatibility with older versions
of cbpro-Python. For maximum support from docstrings and function
signatures see the order type-specific functions place_limit_order,
place_market_order, and place_stop_order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
order_type (str): Order type ('limit', 'market', or 'stop')
**kwargs: Additional arguments can be specified for different order
types.
Returns:
dict: Order details. See `place_order` for example.
"""
return self.place_order(product_id, 'sell', order_type, **kwargs)
def place_limit_order(self, product_id, side, price, size,
client_oid=None,
stp=None,
time_in_force=None,
cancel_after=None,
post_only=None,
overdraft_enabled=None,
funding_amount=None):
"""Place a limit order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Price per cryptocurrency
size (Decimal): Amount of cryptocurrency to buy or sell
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
time_in_force (Optional[str]): Time in force. Options:
'GTC' Good till canceled
'GTT' Good till time (set by `cancel_after`)
'IOC' Immediate or cancel
'FOK' Fill or kill
cancel_after (Optional[str]): Cancel after this period for 'GTT'
orders. Options are 'min', 'hour', or 'day'.
post_only (Optional[bool]): Indicates that the order should only
make liquidity. If any part of the order results in taking
liquidity, the order will be rejected and no part of it will
execute.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'limit',
'price': price,
'size': size,
'client_oid': client_oid,
'stp': stp,
'time_in_force': time_in_force,
'cancel_after': cancel_after,
'post_only': post_only,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_market_order(self, product_id, side, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place market order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'order_type': 'market',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def place_stop_order(self, product_id, side, price, size=None, funds=None,
client_oid=None,
stp=None,
overdraft_enabled=None,
funding_amount=None):
""" Place stop order.
Args:
product_id (str): Product to order (eg. 'BTC-USD')
side (str): Order side ('buy' or 'sell)
price (Decimal): Desired price at which the stop order triggers.
size (Optional[Decimal]): Desired amount in crypto. Specify this or
`funds`.
funds (Optional[Decimal]): Desired amount of quote currency to use.
Specify this or `size`.
client_oid (Optional[str]): User-specified Order ID
stp (Optional[str]): Self-trade prevention flag. See `place_order`
for details.
overdraft_enabled (Optional[bool]): If true funding above and
beyond the account balance will be provided by margin, as
necessary.
funding_amount (Optional[Decimal]): Amount of margin funding to be
provided for the order. Mutually exclusive with
`overdraft_enabled`.
Returns:
dict: Order details. See `place_order` for example.
"""
params = {'product_id': product_id,
'side': side,
'price': price,
'order_type': 'stop',
'size': size,
'funds': funds,
'client_oid': client_oid,
'stp': stp,
'overdraft_enabled': overdraft_enabled,
'funding_amount': funding_amount}
params = dict((k, v) for k, v in params.items() if v is not None)
return self.place_order(**params)
def cancel_order(self, order_id):
""" Cancel a previously placed order.
If the order had no matches during its lifetime its record may
be purged. This means the order details will not be available
with get_order(order_id). If the order could not be canceled
(already filled or previously canceled, etc), then an error
response will indicate the reason in the message field.
**Caution**: The order id is the server-assigned order id and
not the optional client_oid.
Args:
order_id (str): The order_id of the order you want to cancel
Returns:
list: Containing the order_id of cancelled order. Example::
[ "c5ab5eae-76be-480e-8961-00792dc7e138" ]
"""
return self._send_message('delete', '/orders/' + order_id)
def cancel_all(self, product_id=None):
""" With best effort, cancel all open orders.
Args:
product_id (Optional[str]): Only cancel orders for this
product_id
Returns:
list: A list of ids of the canceled orders. Example::
[
"144c6f8e-713f-4682-8435-5280fbe8b2b4",
"debe4907-95dc-442f-af3b-cec12f42ebda",
"cf7aceee-7b08-4227-a76c-3858144323ab",
"dfc5ae27-cadb-4c0c-beef-8994936fde8a",
"34fecfbf-de33-4273-b2c6-baf8e8948be4"
]
"""
if product_id is not None:
params = {'product_id': product_id}
else:
params = None
return self._send_message('delete', '/orders', params=params)
def get_order(self, order_id):
""" Get a single order by order id.
If the order is canceled the response may have status code 404
if the order had no matches.
**Caution**: Open orders may change state between the request
and the response depending on market conditions.
Args:
order_id (str): The order to get information of.
Returns:
dict: Containing information on order. Example::
{
"created_at": "2017-06-18T00:27:42.920136Z",
"executed_value": "0.0000000000000000",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"id": "9456f388-67a9-4316-bad1-330c5353804f",
"post_only": true,
"price": "1.00000000",
"product_id": "BTC-USD",
"settled": false,
"side": "buy",
"size": "1.00000000",
"status": "pending",
"stp": "dc",
"time_in_force": "GTC",
"type": "limit"
}
"""
return self._send_message('get', '/orders/' + order_id)
def get_orders(self, product_id=None, status=None, **kwargs):
""" List your current open orders.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Only open or un-settled orders are returned. As soon as an
order is no longer open and settled, it will no longer appear
in the default request.
Orders which are no longer resting on the order book, will be
marked with the 'done' status. There is a small window between
an order being 'done' and 'settled'. An order is 'settled' when
all of the fills have settled and the remaining holds (if any)
have been removed.
For high-volume trading it is strongly recommended that you
maintain your own list of open orders and use one of the
streaming market data feeds to keep it updated. You should poll
the open orders endpoint once when you start trading to obtain
the current state of any open orders.
Args:
product_id (Optional[str]): Only list orders for this
product
status (Optional[list/str]): Limit list of orders to
this status or statuses. Passing 'all' returns orders
of all statuses.
** Options: 'open', 'pending', 'active', 'done',
'settled'
** default: ['open', 'pending', 'active']
Returns:
list: Containing information on orders. Example::
[
{
"id": "d0c5340b-6d6c-49d9-b567-48c4bfca13d2",
"price": "0.10000000",
"size": "0.01000000",
"product_id": "BTC-USD",
"side": "buy",
"stp": "dc",
"type": "limit",
"time_in_force": "GTC",
"post_only": false,
"created_at": "2016-12-08T20:02:28.53864Z",
"fill_fees": "0.0000000000000000",
"filled_size": "0.00000000",
"executed_value": "0.0000000000000000",
"status": "open",
"settled": false
},
{
...
}
]
"""
params = kwargs
if product_id is not None:
params['product_id'] = product_id
if status is not None:
params['status'] = status
return self._send_paginated_message('/orders', params=params)
def get_fills(self, product_id=None, order_id=None, **kwargs):
""" Get a list of recent fills.
As of 8/23/18 - Requests without either order_id or product_id
will be rejected
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Fees are recorded in two stages. Immediately after the matching
engine completes a match, the fill is inserted into our
datastore. Once the fill is recorded, a settlement process will
settle the fill and credit both trading counterparties.
The 'fee' field indicates the fees charged for this fill.
The 'liquidity' field indicates if the fill was the result of a
liquidity provider or liquidity taker. M indicates Maker and T
indicates Taker.
Args:
product_id (str): Limit list to this product_id
order_id (str): Limit list to this order_id
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on fills. Example::
[
{
"trade_id": 74,
"product_id": "BTC-USD",
"price": "10.00",
"size": "0.01",
"order_id": "d50ec984-77a8-460a-b958-66f114b0de9b",
"created_at": "2014-11-07T22:19:28.578544Z",
"liquidity": "T",
"fee": "0.00025",
"settled": true,
"side": "buy"
},
{
...
}
]
"""
if (product_id is None) and (order_id is None):
raise ValueError('Either product_id or order_id must be specified.')
params = {}
if product_id:
params['product_id'] = product_id
if order_id:
params['order_id'] = order_id
params.update(kwargs)
return self._send_paginated_message('/fills', params=params)
def get_fundings(self, status=None, **kwargs):
""" Every order placed with a margin profile that draws funding
will create a funding record.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
status (list/str): Limit funding records to these statuses.
** Options: 'outstanding', 'settled', 'rejected'
kwargs (dict): Additional HTTP request parameters.
Returns:
list: Containing information on margin funding. Example::
[
{
"id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"order_id": "b93d26cd-7193-4c8d-bfcc-446b2fe18f71",
"profile_id": "d881e5a6-58eb-47cd-b8e2-8d9f2e3ec6f6",
"amount": "1057.6519956381537500",
"status": "settled",
"created_at": "2017-03-17T23:46:16.663397Z",
"currency": "USD",
"repaid_amount": "1057.6519956381537500",
"default_amount": "0",
"repaid_default": false
},
{
...
}
]
"""
params = {}
if status is not None:
params['status'] = status
params.update(kwargs)
return self._send_paginated_message('/funding', params=params)
def repay_funding(self, amount, currency):
""" Repay funding. Repays the older funding records first.
Args:
amount (int): Amount of currency to repay
currency (str): The currency, example USD
Returns:
Not specified by cbpro.
"""
params = {
'amount': amount,
'currency': currency # example: USD
}
return self._send_message('post', '/funding/repay',
data=json.dumps(params))
def margin_transfer(self, margin_profile_id, transfer_type, currency,
amount):
""" Transfer funds between your standard profile and a margin profile.
Args:
margin_profile_id (str): Margin profile ID to withdraw or deposit
from.
transfer_type (str): 'deposit' or 'withdraw'
currency (str): Currency to transfer (eg. 'USD')
amount (Decimal): Amount to transfer
Returns:
dict: Transfer details. Example::
{
"created_at": "2017-01-25T19:06:23.415126Z",
"id": "80bc6b74-8b1f-4c60-a089-c61f9810d4ab",
"user_id": "521c20b3d4ab09621f000011",
"profile_id": "cda95996-ac59-45a3-a42e-30daeb061867",
"margin_profile_id": "45fa9e3b-00ba-4631-b907-8a98cbdf21be",
"type": "deposit",
"amount": "2",
"currency": "USD",
"account_id": "23035fc7-0707-4b59-b0d2-95d0c035f8f5",
"margin_account_id": "e1d9862c-a259-4e83-96cd-376352a9d24d",
"margin_product_id": "BTC-USD",
"status": "completed",
"nonce": 25
}
"""
params = {'margin_profile_id': margin_profile_id,
'type': transfer_type,
'currency': currency, # example: USD
'amount': amount}
return self._send_message('post', '/profiles/margin-transfer',
data=json.dumps(params))
def get_position(self):
""" Get An overview of your margin profile.
Returns:
dict: Details about funding, accounts, and margin call.
"""
return self._send_message('get', '/position')
def close_position(self, repay_only):
""" Close position.
Args:
repay_only (bool): Undocumented by cbpro.
Returns:
Undocumented
"""
params = {'repay_only': repay_only}
return self._send_message('post', '/position/close',
data=json.dumps(params))
def deposit(self, amount, currency, payment_method_id):
""" Deposit funds from a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decmial): The amount to deposit.
currency (str): The type of currency.
payment_method_id (str): ID of the payment method.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/deposits/payment-method',
data=json.dumps(params))
def coinbase_deposit(self, amount, currency, coinbase_account_id):
""" Deposit funds from a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to deposit.
currency (str): The type of currency.
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id": "593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/deposits/coinbase-account',
data=json.dumps(params))
def withdraw(self, amount, currency, payment_method_id):
""" Withdraw funds to a payment method.
See AuthenticatedClient.get_payment_methods() to receive
information regarding payment methods.
Args:
amount (Decimal): The amount to withdraw.
currency (str): Currency type (eg. 'BTC')
payment_method_id (str): ID of the payment method.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount": "10.00",
"currency": "USD",
"payout_at": "2016-08-20T00:31:09Z"
}
"""
params = {'amount': amount,
'currency': currency,
'payment_method_id': payment_method_id}
return self._send_message('post', '/withdrawals/payment-method',
data=json.dumps(params))
def coinbase_withdraw(self, amount, currency, coinbase_account_id):
""" Withdraw funds to a coinbase account.
You can move funds between your Coinbase accounts and your cbpro
trading accounts within your daily limits. Moving funds between
Coinbase and cbpro is instant and free.
See AuthenticatedClient.get_coinbase_accounts() to receive
information regarding your coinbase_accounts.
Args:
amount (Decimal): The amount to withdraw.
currency (str): The type of currency (eg. 'BTC')
coinbase_account_id (str): ID of the coinbase account.
Returns:
dict: Information about the deposit. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'coinbase_account_id': coinbase_account_id}
return self._send_message('post', '/withdrawals/coinbase-account',
data=json.dumps(params))
def crypto_withdraw(self, amount, currency, crypto_address):
""" Withdraw funds to a crypto address.
Args:
amount (Decimal): The amount to withdraw
currency (str): The type of currency (eg. 'BTC')
crypto_address (str): Crypto address to withdraw to.
Returns:
dict: Withdraw details. Example::
{
"id":"593533d2-ff31-46e0-b22e-ca754147a96a",
"amount":"10.00",
"currency": "BTC",
}
"""
params = {'amount': amount,
'currency': currency,
'crypto_address': crypto_address}
return self._send_message('post', '/withdrawals/crypto',
data=json.dumps(params))
def get_payment_methods(self):
""" Get a list of your payment methods.
Returns:
list: Payment method details.
"""
return self._send_message('get', '/payment-methods')
def get_coinbase_accounts(self):
""" Get a list of your coinbase accounts.
Returns:
list: Coinbase account details.
"""
return self._send_message('get', '/coinbase-accounts')
def get_report(self, report_id):
""" Get report status.
Use to query a specific report once it has been requested.
Args:
report_id (str): Report ID
Returns:
dict: Report details, including file url once it is created.
"""
return self._send_message('get', '/reports/' + report_id)
def get_trailing_volume(self):
""" Get your 30-day trailing volume for all products.
This is a cached value that's calculated every day at midnight UTC.
Returns:
list: 30-day trailing volumes. Example::
[
{
"product_id": "BTC-USD",
"exchange_volume": "11800.00000000",
"volume": "100.00000000",
"recorded_at": "1973-11-29T00:05:01.123456Z"
},
{
...
}
]
"""
return self._send_message('get', '/users/self/trailing-volume')
|
danpaquin/coinbasepro-python | cbpro/public_client.py | PublicClient.get_product_order_book | python | def get_product_order_book(self, product_id, level=1):
params = {'level': level}
return self._send_message('get',
'/products/{}/book'.format(product_id),
params=params) | Get a list of open orders for a product.
The amount of detail shown can be customized with the `level`
parameter:
* 1: Only the best bid and ask
* 2: Top 50 bids and asks (aggregated)
* 3: Full order book (non aggregated)
Level 1 and Level 2 are recommended for polling. For the most
up-to-date data, consider using the websocket stream.
**Caution**: Level 3 is only recommended for users wishing to
maintain a full real-time order book using the websocket
stream. Abuse of Level 3 via polling will cause your access to
be limited or blocked.
Args:
product_id (str): Product
level (Optional[int]): Order book level (1, 2, or 3).
Default is 1.
Returns:
dict: Order book. Example for level 1::
{
"sequence": "3",
"bids": [
[ price, size, num-orders ],
],
"asks": [
[ price, size, num-orders ],
]
} | train | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/public_client.py#L52-L90 | [
"def _send_message(self, method, endpoint, params=None, data=None):\n \"\"\"Send API request.\n\n Args:\n method (str): HTTP method (get, post, delete, etc.)\n endpoint (str): Endpoint (to be added to base URL)\n params (Optional[dict]): HTTP request parameters\n data (Optional[str... | class PublicClient(object):
"""cbpro public client API.
All requests default to the `product_id` specified at object
creation if not otherwise specified.
Attributes:
url (Optional[str]): API URL. Defaults to cbpro API.
"""
def __init__(self, api_url='https://api.pro.coinbase.com', timeout=30):
"""Create cbpro API public client.
Args:
api_url (Optional[str]): API URL. Defaults to cbpro API.
"""
self.url = api_url.rstrip('/')
self.auth = None
self.session = requests.Session()
def get_products(self):
"""Get a list of available currency pairs for trading.
Returns:
list: Info about all currency pairs. Example::
[
{
"id": "BTC-USD",
"display_name": "BTC/USD",
"base_currency": "BTC",
"quote_currency": "USD",
"base_min_size": "0.01",
"base_max_size": "10000.00",
"quote_increment": "0.01"
}
]
"""
return self._send_message('get', '/products')
def get_product_ticker(self, product_id):
"""Snapshot about the last trade (tick), best bid/ask and 24h volume.
**Caution**: Polling is discouraged in favor of connecting via
the websocket stream and listening for match messages.
Args:
product_id (str): Product
Returns:
dict: Ticker info. Example::
{
"trade_id": 4729088,
"price": "333.99",
"size": "0.193",
"bid": "333.98",
"ask": "333.99",
"volume": "5957.11914015",
"time": "2015-11-14T20:46:03.511254Z"
}
"""
return self._send_message('get',
'/products/{}/ticker'.format(product_id))
def get_product_trades(self, product_id, before='', after='', limit=None, result=None):
"""List the latest trades for a product.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
product_id (str): Product
before (Optional[str]): start time in ISO 8601
after (Optional[str]): end time in ISO 8601
limit (Optional[int]): the desired number of trades (can be more than 100,
automatically paginated)
results (Optional[list]): list of results that is used for the pagination
Returns:
list: Latest trades. Example::
[{
"time": "2014-11-07T22:19:28.578544Z",
"trade_id": 74,
"price": "10.00000000",
"size": "0.01000000",
"side": "buy"
}, {
"time": "2014-11-07T01:08:43.642366Z",
"trade_id": 73,
"price": "100.00000000",
"size": "0.01000000",
"side": "sell"
}]
"""
return self._send_paginated_message('/products/{}/trades'
.format(product_id))
def get_product_historic_rates(self, product_id, start=None, end=None,
granularity=None):
"""Historic rates for a product.
Rates are returned in grouped buckets based on requested
`granularity`. If start, end, and granularity aren't provided,
the exchange will assume some (currently unknown) default values.
Historical rate data may be incomplete. No data is published for
intervals where there are no ticks.
**Caution**: Historical rates should not be polled frequently.
If you need real-time information, use the trade and book
endpoints along with the websocket feed.
The maximum number of data points for a single request is 200
candles. If your selection of start/end time and granularity
will result in more than 200 data points, your request will be
rejected. If you wish to retrieve fine granularity data over a
larger time range, you will need to make multiple requests with
new start/end ranges.
Args:
product_id (str): Product
start (Optional[str]): Start time in ISO 8601
end (Optional[str]): End time in ISO 8601
granularity (Optional[int]): Desired time slice in seconds
Returns:
list: Historic candle data. Example:
[
[ time, low, high, open, close, volume ],
[ 1415398768, 0.32, 4.2, 0.35, 4.2, 12.3 ],
...
]
"""
params = {}
if start is not None:
params['start'] = start
if end is not None:
params['end'] = end
if granularity is not None:
acceptedGrans = [60, 300, 900, 3600, 21600, 86400]
if granularity not in acceptedGrans:
raise ValueError( 'Specified granularity is {}, must be in approved values: {}'.format(
granularity, acceptedGrans) )
params['granularity'] = granularity
return self._send_message('get',
'/products/{}/candles'.format(product_id),
params=params)
def get_product_24hr_stats(self, product_id):
"""Get 24 hr stats for the product.
Args:
product_id (str): Product
Returns:
dict: 24 hour stats. Volume is in base currency units.
Open, high, low are in quote currency units. Example::
{
"open": "34.19000000",
"high": "95.70000000",
"low": "7.06000000",
"volume": "2.41000000"
}
"""
return self._send_message('get',
'/products/{}/stats'.format(product_id))
def get_currencies(self):
"""List known currencies.
Returns:
list: List of currencies. Example::
[{
"id": "BTC",
"name": "Bitcoin",
"min_size": "0.00000001"
}, {
"id": "USD",
"name": "United States Dollar",
"min_size": "0.01000000"
}]
"""
return self._send_message('get', '/currencies')
def get_time(self):
"""Get the API server time.
Returns:
dict: Server time in ISO and epoch format (decimal seconds
since Unix epoch). Example::
{
"iso": "2015-01-07T23:47:25.201Z",
"epoch": 1420674445.201
}
"""
return self._send_message('get', '/time')
def _send_message(self, method, endpoint, params=None, data=None):
"""Send API request.
Args:
method (str): HTTP method (get, post, delete, etc.)
endpoint (str): Endpoint (to be added to base URL)
params (Optional[dict]): HTTP request parameters
data (Optional[str]): JSON-encoded string payload for POST
Returns:
dict/list: JSON response
"""
url = self.url + endpoint
r = self.session.request(method, url, params=params, data=data,
auth=self.auth, timeout=30)
return r.json()
def _send_paginated_message(self, endpoint, params=None):
""" Send API message that results in a paginated response.
The paginated responses are abstracted away by making API requests on
demand as the response is iterated over.
Paginated API messages support 3 additional parameters: `before`,
`after`, and `limit`. `before` and `after` are mutually exclusive. To
use them, supply an index value for that endpoint (the field used for
indexing varies by endpoint - get_fills() uses 'trade_id', for example).
`before`: Only get data that occurs more recently than index
`after`: Only get data that occurs further in the past than index
`limit`: Set amount of data per HTTP response. Default (and
maximum) of 100.
Args:
endpoint (str): Endpoint (to be added to base URL)
params (Optional[dict]): HTTP request parameters
Yields:
dict: API response objects
"""
if params is None:
params = dict()
url = self.url + endpoint
while True:
r = self.session.get(url, params=params, auth=self.auth, timeout=30)
results = r.json()
for result in results:
yield result
# If there are no more pages, we're done. Otherwise update `after`
# param to get next page.
# If this request included `before` don't get any more pages - the
# cbpro API doesn't support multiple pages in that case.
if not r.headers.get('cb-after') or \
params.get('before') is not None:
break
else:
params['after'] = r.headers['cb-after']
|
danpaquin/coinbasepro-python | cbpro/public_client.py | PublicClient.get_product_trades | python | def get_product_trades(self, product_id, before='', after='', limit=None, result=None):
return self._send_paginated_message('/products/{}/trades'
.format(product_id)) | List the latest trades for a product.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
product_id (str): Product
before (Optional[str]): start time in ISO 8601
after (Optional[str]): end time in ISO 8601
limit (Optional[int]): the desired number of trades (can be more than 100,
automatically paginated)
results (Optional[list]): list of results that is used for the pagination
Returns:
list: Latest trades. Example::
[{
"time": "2014-11-07T22:19:28.578544Z",
"trade_id": 74,
"price": "10.00000000",
"size": "0.01000000",
"side": "buy"
}, {
"time": "2014-11-07T01:08:43.642366Z",
"trade_id": 73,
"price": "100.00000000",
"size": "0.01000000",
"side": "sell"
}] | train | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/public_client.py#L117-L147 | [
"def _send_paginated_message(self, endpoint, params=None):\n \"\"\" Send API message that results in a paginated response.\n\n The paginated responses are abstracted away by making API requests on\n demand as the response is iterated over.\n\n Paginated API messages support 3 additional parameters: `bef... | class PublicClient(object):
"""cbpro public client API.
All requests default to the `product_id` specified at object
creation if not otherwise specified.
Attributes:
url (Optional[str]): API URL. Defaults to cbpro API.
"""
def __init__(self, api_url='https://api.pro.coinbase.com', timeout=30):
"""Create cbpro API public client.
Args:
api_url (Optional[str]): API URL. Defaults to cbpro API.
"""
self.url = api_url.rstrip('/')
self.auth = None
self.session = requests.Session()
def get_products(self):
"""Get a list of available currency pairs for trading.
Returns:
list: Info about all currency pairs. Example::
[
{
"id": "BTC-USD",
"display_name": "BTC/USD",
"base_currency": "BTC",
"quote_currency": "USD",
"base_min_size": "0.01",
"base_max_size": "10000.00",
"quote_increment": "0.01"
}
]
"""
return self._send_message('get', '/products')
def get_product_order_book(self, product_id, level=1):
"""Get a list of open orders for a product.
The amount of detail shown can be customized with the `level`
parameter:
* 1: Only the best bid and ask
* 2: Top 50 bids and asks (aggregated)
* 3: Full order book (non aggregated)
Level 1 and Level 2 are recommended for polling. For the most
up-to-date data, consider using the websocket stream.
**Caution**: Level 3 is only recommended for users wishing to
maintain a full real-time order book using the websocket
stream. Abuse of Level 3 via polling will cause your access to
be limited or blocked.
Args:
product_id (str): Product
level (Optional[int]): Order book level (1, 2, or 3).
Default is 1.
Returns:
dict: Order book. Example for level 1::
{
"sequence": "3",
"bids": [
[ price, size, num-orders ],
],
"asks": [
[ price, size, num-orders ],
]
}
"""
params = {'level': level}
return self._send_message('get',
'/products/{}/book'.format(product_id),
params=params)
def get_product_ticker(self, product_id):
"""Snapshot about the last trade (tick), best bid/ask and 24h volume.
**Caution**: Polling is discouraged in favor of connecting via
the websocket stream and listening for match messages.
Args:
product_id (str): Product
Returns:
dict: Ticker info. Example::
{
"trade_id": 4729088,
"price": "333.99",
"size": "0.193",
"bid": "333.98",
"ask": "333.99",
"volume": "5957.11914015",
"time": "2015-11-14T20:46:03.511254Z"
}
"""
return self._send_message('get',
'/products/{}/ticker'.format(product_id))
def get_product_historic_rates(self, product_id, start=None, end=None,
granularity=None):
"""Historic rates for a product.
Rates are returned in grouped buckets based on requested
`granularity`. If start, end, and granularity aren't provided,
the exchange will assume some (currently unknown) default values.
Historical rate data may be incomplete. No data is published for
intervals where there are no ticks.
**Caution**: Historical rates should not be polled frequently.
If you need real-time information, use the trade and book
endpoints along with the websocket feed.
The maximum number of data points for a single request is 200
candles. If your selection of start/end time and granularity
will result in more than 200 data points, your request will be
rejected. If you wish to retrieve fine granularity data over a
larger time range, you will need to make multiple requests with
new start/end ranges.
Args:
product_id (str): Product
start (Optional[str]): Start time in ISO 8601
end (Optional[str]): End time in ISO 8601
granularity (Optional[int]): Desired time slice in seconds
Returns:
list: Historic candle data. Example:
[
[ time, low, high, open, close, volume ],
[ 1415398768, 0.32, 4.2, 0.35, 4.2, 12.3 ],
...
]
"""
params = {}
if start is not None:
params['start'] = start
if end is not None:
params['end'] = end
if granularity is not None:
acceptedGrans = [60, 300, 900, 3600, 21600, 86400]
if granularity not in acceptedGrans:
raise ValueError( 'Specified granularity is {}, must be in approved values: {}'.format(
granularity, acceptedGrans) )
params['granularity'] = granularity
return self._send_message('get',
'/products/{}/candles'.format(product_id),
params=params)
def get_product_24hr_stats(self, product_id):
"""Get 24 hr stats for the product.
Args:
product_id (str): Product
Returns:
dict: 24 hour stats. Volume is in base currency units.
Open, high, low are in quote currency units. Example::
{
"open": "34.19000000",
"high": "95.70000000",
"low": "7.06000000",
"volume": "2.41000000"
}
"""
return self._send_message('get',
'/products/{}/stats'.format(product_id))
def get_currencies(self):
"""List known currencies.
Returns:
list: List of currencies. Example::
[{
"id": "BTC",
"name": "Bitcoin",
"min_size": "0.00000001"
}, {
"id": "USD",
"name": "United States Dollar",
"min_size": "0.01000000"
}]
"""
return self._send_message('get', '/currencies')
def get_time(self):
"""Get the API server time.
Returns:
dict: Server time in ISO and epoch format (decimal seconds
since Unix epoch). Example::
{
"iso": "2015-01-07T23:47:25.201Z",
"epoch": 1420674445.201
}
"""
return self._send_message('get', '/time')
def _send_message(self, method, endpoint, params=None, data=None):
"""Send API request.
Args:
method (str): HTTP method (get, post, delete, etc.)
endpoint (str): Endpoint (to be added to base URL)
params (Optional[dict]): HTTP request parameters
data (Optional[str]): JSON-encoded string payload for POST
Returns:
dict/list: JSON response
"""
url = self.url + endpoint
r = self.session.request(method, url, params=params, data=data,
auth=self.auth, timeout=30)
return r.json()
def _send_paginated_message(self, endpoint, params=None):
""" Send API message that results in a paginated response.
The paginated responses are abstracted away by making API requests on
demand as the response is iterated over.
Paginated API messages support 3 additional parameters: `before`,
`after`, and `limit`. `before` and `after` are mutually exclusive. To
use them, supply an index value for that endpoint (the field used for
indexing varies by endpoint - get_fills() uses 'trade_id', for example).
`before`: Only get data that occurs more recently than index
`after`: Only get data that occurs further in the past than index
`limit`: Set amount of data per HTTP response. Default (and
maximum) of 100.
Args:
endpoint (str): Endpoint (to be added to base URL)
params (Optional[dict]): HTTP request parameters
Yields:
dict: API response objects
"""
if params is None:
params = dict()
url = self.url + endpoint
while True:
r = self.session.get(url, params=params, auth=self.auth, timeout=30)
results = r.json()
for result in results:
yield result
# If there are no more pages, we're done. Otherwise update `after`
# param to get next page.
# If this request included `before` don't get any more pages - the
# cbpro API doesn't support multiple pages in that case.
if not r.headers.get('cb-after') or \
params.get('before') is not None:
break
else:
params['after'] = r.headers['cb-after']
|
danpaquin/coinbasepro-python | cbpro/public_client.py | PublicClient.get_product_historic_rates | python | def get_product_historic_rates(self, product_id, start=None, end=None,
granularity=None):
params = {}
if start is not None:
params['start'] = start
if end is not None:
params['end'] = end
if granularity is not None:
acceptedGrans = [60, 300, 900, 3600, 21600, 86400]
if granularity not in acceptedGrans:
raise ValueError( 'Specified granularity is {}, must be in approved values: {}'.format(
granularity, acceptedGrans) )
params['granularity'] = granularity
return self._send_message('get',
'/products/{}/candles'.format(product_id),
params=params) | Historic rates for a product.
Rates are returned in grouped buckets based on requested
`granularity`. If start, end, and granularity aren't provided,
the exchange will assume some (currently unknown) default values.
Historical rate data may be incomplete. No data is published for
intervals where there are no ticks.
**Caution**: Historical rates should not be polled frequently.
If you need real-time information, use the trade and book
endpoints along with the websocket feed.
The maximum number of data points for a single request is 200
candles. If your selection of start/end time and granularity
will result in more than 200 data points, your request will be
rejected. If you wish to retrieve fine granularity data over a
larger time range, you will need to make multiple requests with
new start/end ranges.
Args:
product_id (str): Product
start (Optional[str]): Start time in ISO 8601
end (Optional[str]): End time in ISO 8601
granularity (Optional[int]): Desired time slice in seconds
Returns:
list: Historic candle data. Example:
[
[ time, low, high, open, close, volume ],
[ 1415398768, 0.32, 4.2, 0.35, 4.2, 12.3 ],
...
] | train | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/public_client.py#L149-L200 | [
"def _send_message(self, method, endpoint, params=None, data=None):\n \"\"\"Send API request.\n\n Args:\n method (str): HTTP method (get, post, delete, etc.)\n endpoint (str): Endpoint (to be added to base URL)\n params (Optional[dict]): HTTP request parameters\n data (Optional[str... | class PublicClient(object):
"""cbpro public client API.
All requests default to the `product_id` specified at object
creation if not otherwise specified.
Attributes:
url (Optional[str]): API URL. Defaults to cbpro API.
"""
def __init__(self, api_url='https://api.pro.coinbase.com', timeout=30):
"""Create cbpro API public client.
Args:
api_url (Optional[str]): API URL. Defaults to cbpro API.
"""
self.url = api_url.rstrip('/')
self.auth = None
self.session = requests.Session()
def get_products(self):
"""Get a list of available currency pairs for trading.
Returns:
list: Info about all currency pairs. Example::
[
{
"id": "BTC-USD",
"display_name": "BTC/USD",
"base_currency": "BTC",
"quote_currency": "USD",
"base_min_size": "0.01",
"base_max_size": "10000.00",
"quote_increment": "0.01"
}
]
"""
return self._send_message('get', '/products')
def get_product_order_book(self, product_id, level=1):
"""Get a list of open orders for a product.
The amount of detail shown can be customized with the `level`
parameter:
* 1: Only the best bid and ask
* 2: Top 50 bids and asks (aggregated)
* 3: Full order book (non aggregated)
Level 1 and Level 2 are recommended for polling. For the most
up-to-date data, consider using the websocket stream.
**Caution**: Level 3 is only recommended for users wishing to
maintain a full real-time order book using the websocket
stream. Abuse of Level 3 via polling will cause your access to
be limited or blocked.
Args:
product_id (str): Product
level (Optional[int]): Order book level (1, 2, or 3).
Default is 1.
Returns:
dict: Order book. Example for level 1::
{
"sequence": "3",
"bids": [
[ price, size, num-orders ],
],
"asks": [
[ price, size, num-orders ],
]
}
"""
params = {'level': level}
return self._send_message('get',
'/products/{}/book'.format(product_id),
params=params)
def get_product_ticker(self, product_id):
"""Snapshot about the last trade (tick), best bid/ask and 24h volume.
**Caution**: Polling is discouraged in favor of connecting via
the websocket stream and listening for match messages.
Args:
product_id (str): Product
Returns:
dict: Ticker info. Example::
{
"trade_id": 4729088,
"price": "333.99",
"size": "0.193",
"bid": "333.98",
"ask": "333.99",
"volume": "5957.11914015",
"time": "2015-11-14T20:46:03.511254Z"
}
"""
return self._send_message('get',
'/products/{}/ticker'.format(product_id))
def get_product_trades(self, product_id, before='', after='', limit=None, result=None):
"""List the latest trades for a product.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
product_id (str): Product
before (Optional[str]): start time in ISO 8601
after (Optional[str]): end time in ISO 8601
limit (Optional[int]): the desired number of trades (can be more than 100,
automatically paginated)
results (Optional[list]): list of results that is used for the pagination
Returns:
list: Latest trades. Example::
[{
"time": "2014-11-07T22:19:28.578544Z",
"trade_id": 74,
"price": "10.00000000",
"size": "0.01000000",
"side": "buy"
}, {
"time": "2014-11-07T01:08:43.642366Z",
"trade_id": 73,
"price": "100.00000000",
"size": "0.01000000",
"side": "sell"
}]
"""
return self._send_paginated_message('/products/{}/trades'
.format(product_id))
def get_product_24hr_stats(self, product_id):
"""Get 24 hr stats for the product.
Args:
product_id (str): Product
Returns:
dict: 24 hour stats. Volume is in base currency units.
Open, high, low are in quote currency units. Example::
{
"open": "34.19000000",
"high": "95.70000000",
"low": "7.06000000",
"volume": "2.41000000"
}
"""
return self._send_message('get',
'/products/{}/stats'.format(product_id))
def get_currencies(self):
"""List known currencies.
Returns:
list: List of currencies. Example::
[{
"id": "BTC",
"name": "Bitcoin",
"min_size": "0.00000001"
}, {
"id": "USD",
"name": "United States Dollar",
"min_size": "0.01000000"
}]
"""
return self._send_message('get', '/currencies')
def get_time(self):
"""Get the API server time.
Returns:
dict: Server time in ISO and epoch format (decimal seconds
since Unix epoch). Example::
{
"iso": "2015-01-07T23:47:25.201Z",
"epoch": 1420674445.201
}
"""
return self._send_message('get', '/time')
def _send_message(self, method, endpoint, params=None, data=None):
"""Send API request.
Args:
method (str): HTTP method (get, post, delete, etc.)
endpoint (str): Endpoint (to be added to base URL)
params (Optional[dict]): HTTP request parameters
data (Optional[str]): JSON-encoded string payload for POST
Returns:
dict/list: JSON response
"""
url = self.url + endpoint
r = self.session.request(method, url, params=params, data=data,
auth=self.auth, timeout=30)
return r.json()
def _send_paginated_message(self, endpoint, params=None):
""" Send API message that results in a paginated response.
The paginated responses are abstracted away by making API requests on
demand as the response is iterated over.
Paginated API messages support 3 additional parameters: `before`,
`after`, and `limit`. `before` and `after` are mutually exclusive. To
use them, supply an index value for that endpoint (the field used for
indexing varies by endpoint - get_fills() uses 'trade_id', for example).
`before`: Only get data that occurs more recently than index
`after`: Only get data that occurs further in the past than index
`limit`: Set amount of data per HTTP response. Default (and
maximum) of 100.
Args:
endpoint (str): Endpoint (to be added to base URL)
params (Optional[dict]): HTTP request parameters
Yields:
dict: API response objects
"""
if params is None:
params = dict()
url = self.url + endpoint
while True:
r = self.session.get(url, params=params, auth=self.auth, timeout=30)
results = r.json()
for result in results:
yield result
# If there are no more pages, we're done. Otherwise update `after`
# param to get next page.
# If this request included `before` don't get any more pages - the
# cbpro API doesn't support multiple pages in that case.
if not r.headers.get('cb-after') or \
params.get('before') is not None:
break
else:
params['after'] = r.headers['cb-after']
|
danpaquin/coinbasepro-python | cbpro/public_client.py | PublicClient._send_message | python | def _send_message(self, method, endpoint, params=None, data=None):
url = self.url + endpoint
r = self.session.request(method, url, params=params, data=data,
auth=self.auth, timeout=30)
return r.json() | Send API request.
Args:
method (str): HTTP method (get, post, delete, etc.)
endpoint (str): Endpoint (to be added to base URL)
params (Optional[dict]): HTTP request parameters
data (Optional[str]): JSON-encoded string payload for POST
Returns:
dict/list: JSON response | train | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/public_client.py#L254-L270 | null | class PublicClient(object):
"""cbpro public client API.
All requests default to the `product_id` specified at object
creation if not otherwise specified.
Attributes:
url (Optional[str]): API URL. Defaults to cbpro API.
"""
def __init__(self, api_url='https://api.pro.coinbase.com', timeout=30):
"""Create cbpro API public client.
Args:
api_url (Optional[str]): API URL. Defaults to cbpro API.
"""
self.url = api_url.rstrip('/')
self.auth = None
self.session = requests.Session()
def get_products(self):
"""Get a list of available currency pairs for trading.
Returns:
list: Info about all currency pairs. Example::
[
{
"id": "BTC-USD",
"display_name": "BTC/USD",
"base_currency": "BTC",
"quote_currency": "USD",
"base_min_size": "0.01",
"base_max_size": "10000.00",
"quote_increment": "0.01"
}
]
"""
return self._send_message('get', '/products')
def get_product_order_book(self, product_id, level=1):
"""Get a list of open orders for a product.
The amount of detail shown can be customized with the `level`
parameter:
* 1: Only the best bid and ask
* 2: Top 50 bids and asks (aggregated)
* 3: Full order book (non aggregated)
Level 1 and Level 2 are recommended for polling. For the most
up-to-date data, consider using the websocket stream.
**Caution**: Level 3 is only recommended for users wishing to
maintain a full real-time order book using the websocket
stream. Abuse of Level 3 via polling will cause your access to
be limited or blocked.
Args:
product_id (str): Product
level (Optional[int]): Order book level (1, 2, or 3).
Default is 1.
Returns:
dict: Order book. Example for level 1::
{
"sequence": "3",
"bids": [
[ price, size, num-orders ],
],
"asks": [
[ price, size, num-orders ],
]
}
"""
params = {'level': level}
return self._send_message('get',
'/products/{}/book'.format(product_id),
params=params)
def get_product_ticker(self, product_id):
"""Snapshot about the last trade (tick), best bid/ask and 24h volume.
**Caution**: Polling is discouraged in favor of connecting via
the websocket stream and listening for match messages.
Args:
product_id (str): Product
Returns:
dict: Ticker info. Example::
{
"trade_id": 4729088,
"price": "333.99",
"size": "0.193",
"bid": "333.98",
"ask": "333.99",
"volume": "5957.11914015",
"time": "2015-11-14T20:46:03.511254Z"
}
"""
return self._send_message('get',
'/products/{}/ticker'.format(product_id))
def get_product_trades(self, product_id, before='', after='', limit=None, result=None):
"""List the latest trades for a product.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
product_id (str): Product
before (Optional[str]): start time in ISO 8601
after (Optional[str]): end time in ISO 8601
limit (Optional[int]): the desired number of trades (can be more than 100,
automatically paginated)
results (Optional[list]): list of results that is used for the pagination
Returns:
list: Latest trades. Example::
[{
"time": "2014-11-07T22:19:28.578544Z",
"trade_id": 74,
"price": "10.00000000",
"size": "0.01000000",
"side": "buy"
}, {
"time": "2014-11-07T01:08:43.642366Z",
"trade_id": 73,
"price": "100.00000000",
"size": "0.01000000",
"side": "sell"
}]
"""
return self._send_paginated_message('/products/{}/trades'
.format(product_id))
def get_product_historic_rates(self, product_id, start=None, end=None,
granularity=None):
"""Historic rates for a product.
Rates are returned in grouped buckets based on requested
`granularity`. If start, end, and granularity aren't provided,
the exchange will assume some (currently unknown) default values.
Historical rate data may be incomplete. No data is published for
intervals where there are no ticks.
**Caution**: Historical rates should not be polled frequently.
If you need real-time information, use the trade and book
endpoints along with the websocket feed.
The maximum number of data points for a single request is 200
candles. If your selection of start/end time and granularity
will result in more than 200 data points, your request will be
rejected. If you wish to retrieve fine granularity data over a
larger time range, you will need to make multiple requests with
new start/end ranges.
Args:
product_id (str): Product
start (Optional[str]): Start time in ISO 8601
end (Optional[str]): End time in ISO 8601
granularity (Optional[int]): Desired time slice in seconds
Returns:
list: Historic candle data. Example:
[
[ time, low, high, open, close, volume ],
[ 1415398768, 0.32, 4.2, 0.35, 4.2, 12.3 ],
...
]
"""
params = {}
if start is not None:
params['start'] = start
if end is not None:
params['end'] = end
if granularity is not None:
acceptedGrans = [60, 300, 900, 3600, 21600, 86400]
if granularity not in acceptedGrans:
raise ValueError( 'Specified granularity is {}, must be in approved values: {}'.format(
granularity, acceptedGrans) )
params['granularity'] = granularity
return self._send_message('get',
'/products/{}/candles'.format(product_id),
params=params)
def get_product_24hr_stats(self, product_id):
"""Get 24 hr stats for the product.
Args:
product_id (str): Product
Returns:
dict: 24 hour stats. Volume is in base currency units.
Open, high, low are in quote currency units. Example::
{
"open": "34.19000000",
"high": "95.70000000",
"low": "7.06000000",
"volume": "2.41000000"
}
"""
return self._send_message('get',
'/products/{}/stats'.format(product_id))
def get_currencies(self):
"""List known currencies.
Returns:
list: List of currencies. Example::
[{
"id": "BTC",
"name": "Bitcoin",
"min_size": "0.00000001"
}, {
"id": "USD",
"name": "United States Dollar",
"min_size": "0.01000000"
}]
"""
return self._send_message('get', '/currencies')
def get_time(self):
"""Get the API server time.
Returns:
dict: Server time in ISO and epoch format (decimal seconds
since Unix epoch). Example::
{
"iso": "2015-01-07T23:47:25.201Z",
"epoch": 1420674445.201
}
"""
return self._send_message('get', '/time')
def _send_paginated_message(self, endpoint, params=None):
""" Send API message that results in a paginated response.
The paginated responses are abstracted away by making API requests on
demand as the response is iterated over.
Paginated API messages support 3 additional parameters: `before`,
`after`, and `limit`. `before` and `after` are mutually exclusive. To
use them, supply an index value for that endpoint (the field used for
indexing varies by endpoint - get_fills() uses 'trade_id', for example).
`before`: Only get data that occurs more recently than index
`after`: Only get data that occurs further in the past than index
`limit`: Set amount of data per HTTP response. Default (and
maximum) of 100.
Args:
endpoint (str): Endpoint (to be added to base URL)
params (Optional[dict]): HTTP request parameters
Yields:
dict: API response objects
"""
if params is None:
params = dict()
url = self.url + endpoint
while True:
r = self.session.get(url, params=params, auth=self.auth, timeout=30)
results = r.json()
for result in results:
yield result
# If there are no more pages, we're done. Otherwise update `after`
# param to get next page.
# If this request included `before` don't get any more pages - the
# cbpro API doesn't support multiple pages in that case.
if not r.headers.get('cb-after') or \
params.get('before') is not None:
break
else:
params['after'] = r.headers['cb-after']
|
danpaquin/coinbasepro-python | cbpro/public_client.py | PublicClient._send_paginated_message | python | def _send_paginated_message(self, endpoint, params=None):
if params is None:
params = dict()
url = self.url + endpoint
while True:
r = self.session.get(url, params=params, auth=self.auth, timeout=30)
results = r.json()
for result in results:
yield result
# If there are no more pages, we're done. Otherwise update `after`
# param to get next page.
# If this request included `before` don't get any more pages - the
# cbpro API doesn't support multiple pages in that case.
if not r.headers.get('cb-after') or \
params.get('before') is not None:
break
else:
params['after'] = r.headers['cb-after'] | Send API message that results in a paginated response.
The paginated responses are abstracted away by making API requests on
demand as the response is iterated over.
Paginated API messages support 3 additional parameters: `before`,
`after`, and `limit`. `before` and `after` are mutually exclusive. To
use them, supply an index value for that endpoint (the field used for
indexing varies by endpoint - get_fills() uses 'trade_id', for example).
`before`: Only get data that occurs more recently than index
`after`: Only get data that occurs further in the past than index
`limit`: Set amount of data per HTTP response. Default (and
maximum) of 100.
Args:
endpoint (str): Endpoint (to be added to base URL)
params (Optional[dict]): HTTP request parameters
Yields:
dict: API response objects | train | https://github.com/danpaquin/coinbasepro-python/blob/0a9dbd86a25ae266d0e0eefeb112368c284b7dcc/cbpro/public_client.py#L272-L311 | null | class PublicClient(object):
"""cbpro public client API.
All requests default to the `product_id` specified at object
creation if not otherwise specified.
Attributes:
url (Optional[str]): API URL. Defaults to cbpro API.
"""
def __init__(self, api_url='https://api.pro.coinbase.com', timeout=30):
"""Create cbpro API public client.
Args:
api_url (Optional[str]): API URL. Defaults to cbpro API.
"""
self.url = api_url.rstrip('/')
self.auth = None
self.session = requests.Session()
def get_products(self):
"""Get a list of available currency pairs for trading.
Returns:
list: Info about all currency pairs. Example::
[
{
"id": "BTC-USD",
"display_name": "BTC/USD",
"base_currency": "BTC",
"quote_currency": "USD",
"base_min_size": "0.01",
"base_max_size": "10000.00",
"quote_increment": "0.01"
}
]
"""
return self._send_message('get', '/products')
def get_product_order_book(self, product_id, level=1):
"""Get a list of open orders for a product.
The amount of detail shown can be customized with the `level`
parameter:
* 1: Only the best bid and ask
* 2: Top 50 bids and asks (aggregated)
* 3: Full order book (non aggregated)
Level 1 and Level 2 are recommended for polling. For the most
up-to-date data, consider using the websocket stream.
**Caution**: Level 3 is only recommended for users wishing to
maintain a full real-time order book using the websocket
stream. Abuse of Level 3 via polling will cause your access to
be limited or blocked.
Args:
product_id (str): Product
level (Optional[int]): Order book level (1, 2, or 3).
Default is 1.
Returns:
dict: Order book. Example for level 1::
{
"sequence": "3",
"bids": [
[ price, size, num-orders ],
],
"asks": [
[ price, size, num-orders ],
]
}
"""
params = {'level': level}
return self._send_message('get',
'/products/{}/book'.format(product_id),
params=params)
def get_product_ticker(self, product_id):
"""Snapshot about the last trade (tick), best bid/ask and 24h volume.
**Caution**: Polling is discouraged in favor of connecting via
the websocket stream and listening for match messages.
Args:
product_id (str): Product
Returns:
dict: Ticker info. Example::
{
"trade_id": 4729088,
"price": "333.99",
"size": "0.193",
"bid": "333.98",
"ask": "333.99",
"volume": "5957.11914015",
"time": "2015-11-14T20:46:03.511254Z"
}
"""
return self._send_message('get',
'/products/{}/ticker'.format(product_id))
def get_product_trades(self, product_id, before='', after='', limit=None, result=None):
"""List the latest trades for a product.
This method returns a generator which may make multiple HTTP requests
while iterating through it.
Args:
product_id (str): Product
before (Optional[str]): start time in ISO 8601
after (Optional[str]): end time in ISO 8601
limit (Optional[int]): the desired number of trades (can be more than 100,
automatically paginated)
results (Optional[list]): list of results that is used for the pagination
Returns:
list: Latest trades. Example::
[{
"time": "2014-11-07T22:19:28.578544Z",
"trade_id": 74,
"price": "10.00000000",
"size": "0.01000000",
"side": "buy"
}, {
"time": "2014-11-07T01:08:43.642366Z",
"trade_id": 73,
"price": "100.00000000",
"size": "0.01000000",
"side": "sell"
}]
"""
return self._send_paginated_message('/products/{}/trades'
.format(product_id))
def get_product_historic_rates(self, product_id, start=None, end=None,
granularity=None):
"""Historic rates for a product.
Rates are returned in grouped buckets based on requested
`granularity`. If start, end, and granularity aren't provided,
the exchange will assume some (currently unknown) default values.
Historical rate data may be incomplete. No data is published for
intervals where there are no ticks.
**Caution**: Historical rates should not be polled frequently.
If you need real-time information, use the trade and book
endpoints along with the websocket feed.
The maximum number of data points for a single request is 200
candles. If your selection of start/end time and granularity
will result in more than 200 data points, your request will be
rejected. If you wish to retrieve fine granularity data over a
larger time range, you will need to make multiple requests with
new start/end ranges.
Args:
product_id (str): Product
start (Optional[str]): Start time in ISO 8601
end (Optional[str]): End time in ISO 8601
granularity (Optional[int]): Desired time slice in seconds
Returns:
list: Historic candle data. Example:
[
[ time, low, high, open, close, volume ],
[ 1415398768, 0.32, 4.2, 0.35, 4.2, 12.3 ],
...
]
"""
params = {}
if start is not None:
params['start'] = start
if end is not None:
params['end'] = end
if granularity is not None:
acceptedGrans = [60, 300, 900, 3600, 21600, 86400]
if granularity not in acceptedGrans:
raise ValueError( 'Specified granularity is {}, must be in approved values: {}'.format(
granularity, acceptedGrans) )
params['granularity'] = granularity
return self._send_message('get',
'/products/{}/candles'.format(product_id),
params=params)
def get_product_24hr_stats(self, product_id):
"""Get 24 hr stats for the product.
Args:
product_id (str): Product
Returns:
dict: 24 hour stats. Volume is in base currency units.
Open, high, low are in quote currency units. Example::
{
"open": "34.19000000",
"high": "95.70000000",
"low": "7.06000000",
"volume": "2.41000000"
}
"""
return self._send_message('get',
'/products/{}/stats'.format(product_id))
def get_currencies(self):
"""List known currencies.
Returns:
list: List of currencies. Example::
[{
"id": "BTC",
"name": "Bitcoin",
"min_size": "0.00000001"
}, {
"id": "USD",
"name": "United States Dollar",
"min_size": "0.01000000"
}]
"""
return self._send_message('get', '/currencies')
def get_time(self):
"""Get the API server time.
Returns:
dict: Server time in ISO and epoch format (decimal seconds
since Unix epoch). Example::
{
"iso": "2015-01-07T23:47:25.201Z",
"epoch": 1420674445.201
}
"""
return self._send_message('get', '/time')
def _send_message(self, method, endpoint, params=None, data=None):
"""Send API request.
Args:
method (str): HTTP method (get, post, delete, etc.)
endpoint (str): Endpoint (to be added to base URL)
params (Optional[dict]): HTTP request parameters
data (Optional[str]): JSON-encoded string payload for POST
Returns:
dict/list: JSON response
"""
url = self.url + endpoint
r = self.session.request(method, url, params=params, data=data,
auth=self.auth, timeout=30)
return r.json()
|
scikit-tda/kepler-mapper | kmapper/kmapper.py | KeplerMapper.project | python | def project(
self,
X,
projection="sum",
scaler=preprocessing.MinMaxScaler(),
distance_matrix=None,
):
# Sae original values off so they can be referenced by later functions in the pipeline
self.inverse = X
self.scaler = scaler
self.projection = str(projection)
self.distance_matrix = distance_matrix
if self.verbose > 0:
print("..Projecting on data shaped %s" % (str(X.shape)))
# If distance_matrix is a scipy.spatial.pdist string, we create a square distance matrix
# from the vectors, before applying a projection.
if self.distance_matrix in [
"braycurtis",
"canberra",
"chebyshev",
"cityblock",
"correlation",
"cosine",
"dice",
"euclidean",
"hamming",
"jaccard",
"kulsinski",
"mahalanobis",
"matching",
"minkowski",
"rogerstanimoto",
"russellrao",
"seuclidean",
"sokalmichener",
"sokalsneath",
"sqeuclidean",
"yule",
]:
X = distance.squareform(distance.pdist(X, metric=distance_matrix))
if self.verbose > 0:
print(
"Created distance matrix, shape: %s, with distance metric `%s`"
% (X.shape, distance_matrix)
)
# Detect if projection is a class (for scikit-learn)
try:
p = projection.get_params() # fail quickly
reducer = projection
if self.verbose > 0:
try:
projection.set_params(**{"verbose": self.verbose})
except:
pass
print("\n..Projecting data using: \n\t%s\n" % str(projection))
X = reducer.fit_transform(X)
except:
pass
# What is this used for?
if isinstance(projection, tuple):
X = self._process_projection_tuple(projection)
# Detect if projection is a string (for standard functions)
# TODO: test each one of these projections
if isinstance(projection, str):
if self.verbose > 0:
print("\n..Projecting data using: %s" % (projection))
def dist_mean(X, axis=1):
X_mean = np.mean(X, axis=0)
X = np.sum(np.sqrt((X - X_mean) ** 2), axis=1)
return X
projection_funcs = {
"sum": np.sum,
"mean": np.mean,
"median": np.median,
"max": np.max,
"min": np.min,
"std": np.std,
"l2norm": np.linalg.norm,
"dist_mean": dist_mean,
}
if projection in projection_funcs.keys():
X = projection_funcs[projection](X, axis=1).reshape((X.shape[0], 1))
if "knn_distance_" in projection:
n_neighbors = int(projection.split("_")[2])
if (
self.distance_matrix
): # We use the distance matrix for finding neighbors
X = np.sum(np.sort(X, axis=1)[:, :n_neighbors], axis=1).reshape(
(X.shape[0], 1)
)
else:
from sklearn import neighbors
nn = neighbors.NearestNeighbors(n_neighbors=n_neighbors)
nn.fit(X)
X = np.sum(
nn.kneighbors(X, n_neighbors=n_neighbors, return_distance=True)[
0
],
axis=1,
).reshape((X.shape[0], 1))
# Detect if projection is a list (with dimension indices)
if isinstance(projection, list):
if self.verbose > 0:
print("\n..Projecting data using: %s" % (str(projection)))
X = X[:, np.array(projection)]
# If projection produced sparse output, turn into a dense array
if issparse(X):
X = X.toarray()
if self.verbose > 0:
print("\n..Created projection shaped %s" % (str(X.shape)))
# Scaling
if scaler is not None:
if self.verbose > 0:
print("\n..Scaling with: %s\n" % str(scaler))
X = scaler.fit_transform(X)
return X | Creates the projection/lens from a dataset. Input the data set. Specify a projection/lens type. Output the projected data/lens.
Parameters
----------
X : Numpy Array
The data to fit a projection/lens to.
projection :
Projection parameter is either a string, a Scikit-learn class with fit_transform, like manifold.TSNE(), or a list of dimension indices. A string from ["sum", "mean", "median", "max", "min", "std", "dist_mean", "l2norm", "knn_distance_n"]. If using knn_distance_n write the number of desired neighbors in place of n: knn_distance_5 for summed distances to 5 nearest neighbors. Default = "sum".
scaler : Scikit-Learn API compatible scaler.
Scaler of the data applied after mapping. Use None for no scaling. Default = preprocessing.MinMaxScaler() if None, do no scaling, else apply scaling to the projection. Default: Min-Max scaling
distance_matrix : Either str or None
If not None, then any of ["braycurtis", "canberra", "chebyshev", "cityblock", "correlation", "cosine", "dice", "euclidean", "hamming", "jaccard", "kulsinski", "mahalanobis", "matching", "minkowski", "rogerstanimoto", "russellrao", "seuclidean", "sokalmichener", "sokalsneath", "sqeuclidean", "yule"].
If False do nothing, else create a squared distance matrix with the chosen metric, before applying the projection.
Returns
-------
lens : Numpy Array
projected data.
Examples
--------
>>> # Project by taking the first dimension and third dimension
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection=[0,2]
>>> )
>>> # Project by taking the sum of row values
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection="sum"
>>> )
>>> # Do not scale the projection (default is minmax-scaling)
>>> X_projected = mapper.project(
>>> X_inverse,
>>> scaler=None
>>> )
>>> # Project by standard-scaled summed distance to 5 nearest neighbors
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection="knn_distance_5",
>>> scaler=sklearn.preprocessing.StandardScaler()
>>> )
>>> # Project by first two PCA components
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection=sklearn.decomposition.PCA()
>>> )
>>> # Project by first three UMAP components
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection=umap.UMAP(n_components=3)
>>> )
>>> # Project by L2-norm on squared Pearson distance matrix
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection="l2norm",
>>> distance_matrix="pearson"
>>> )
>>> # Mix and match different projections
>>> X_projected = np.c_[
>>> mapper.project(X_inverse, projection=sklearn.decomposition.PCA()),
>>> mapper.project(X_inverse, projection="knn_distance_5")
>>> ]
>>> # Stack / chain projections. You could do this manually,
>>> # or pipeline with `.fit_transform()`. Works the same as `.project()`,
>>> # but accepts lists. f(raw text) -> f(tfidf) -> f(isomap 100d) -> f(umap 2d)
>>> projected_X = mapper.fit_transform(
>>> X,
>>> projections=[TfidfVectorizer(analyzer="char",
>>> ngram_range=(1,6),
>>> max_df=0.93,
>>> min_df=0.03),
>>> manifold.Isomap(n_components=100,
>>> n_jobs=-1),
>>> umap.UMAP(n_components=2,
>>> random_state=1)],
>>> scalers=[None,
>>> None,
>>> preprocessing.MinMaxScaler()],
>>> distance_matrices=[False,
>>> False,
>>> False]) | train | https://github.com/scikit-tda/kepler-mapper/blob/d4ed39f6392b0a134dd573d7d9c4aa65fbef3a7d/kmapper/kmapper.py#L78-L303 | [
"def _process_projection_tuple(self, projection):\n # Detect if projection is a tuple (for prediction functions)\n # TODO: multi-label models\n # TODO: infer binary classification and select positive class preds\n # TODO: turn into smaller functions for better tests and complexity\n\n # TODO: this se... | class KeplerMapper(object):
"""With this class you can build topological networks from (high-dimensional) data.
1) Fit a projection/lens/function to a dataset and transform it.
For instance "mean_of_row(x) for x in X"
2) Map this projection with overlapping intervals/hypercubes.
Cluster the points inside the interval
(Note: we cluster on the inverse image/original data to lessen projection loss).
If two clusters/nodes have the same members (due to the overlap), then:
connect these with an edge.
3) Visualize the network using HTML and D3.js.
KM has a number of nice features, some which get forgotten.
- ``project``: Some projections it makes sense to use a distance matrix, such as knn_distance_#. Using ``distance_matrix = <metric>`` for a custom metric.
- ``fit_transform``: Applies a sequence of projections. Currently, this API is a little confusing and might be changed in the future.
"""
def __init__(self, verbose=0):
"""Constructor for KeplerMapper class.
Parameters
===========
verbose: int, default is 0
Logging level. Currently 3 levels (0,1,2) are supported. For no logging, set `verbose=0`. For some logging, set `verbose=1`. For complete logging, set `verbose=2`.
"""
# TODO: move as many of the arguments from fit_transform and map into here.
self.verbose = verbose
self.projection = None
self.scaler = None
self.cover = None
if verbose > 0:
print(self)
def __repr__(self):
return "KeplerMapper(verbose={})".format(self.verbose)
def fit_transform(
self,
X,
projection="sum",
scaler=preprocessing.MinMaxScaler(),
distance_matrix=False,
):
"""Same as .project() but accepts lists for arguments so you can chain.
"""
projections = projection
scalers = scaler
distance_matrices = distance_matrix
# Turn single projection arguments into a pipeline
if isinstance(projection, list) and isinstance(projection[0], int):
projections = [projection]
if not isinstance(projection, list):
projections = [projection]
# Turn single scaler arguments into a pipeline
if not isinstance(scaler, list):
scalers = [scaler]
# Turn single distance matrix arguments into a pipeline
if not isinstance(distance_matrix, list):
distance_matrices = [distance_matrix]
# set defaults to first list item, if not (correctly) set by the user
if len(scalers) != len(projections):
scalers = [scalers[0]] * len(projections)
if len(distance_matrices) != len(projections):
distance_matrices = [distance_matrices[0]] * len(projections)
if self.verbose > 0:
print("..Composing projection pipeline of length %s:" % (len(projections)))
print("\tProjections: %s" % ("\n\t\t".join(map(str, projections))))
print("\tDistance matrices: %s" % ("\n".join(map(str, distance_matrices))))
print("\tScalers: %s" % ("\n".join(map(str, scalers))))
# Pipeline Stack the projection functions
lens = X
for projection, scaler, distance_matrix in zip(
projections, scalers, distance_matrices
):
lens = self.project(
lens,
projection=projection,
scaler=scaler,
distance_matrix=distance_matrix,
)
return lens
def map(
self,
lens,
X=None,
clusterer=cluster.DBSCAN(eps=0.5, min_samples=3),
cover=Cover(n_cubes=10, perc_overlap=0.1),
nerve=GraphNerve(),
precomputed=False,
remove_duplicate_nodes=False,
# These arguments are all deprecated
overlap_perc=None,
nr_cubes=None
):
"""Apply Mapper algorithm on this projection and build a simplicial complex. Returns a dictionary with nodes and links.
Parameters
----------
lens: Numpy Array
Lower dimensional representation of data. In general will be output of `fit_transform`.
X: Numpy Array
Original data or data to run clustering on. If `None`, then use `lens` as default. X can be a SciPy sparse matrix.
clusterer: Default: DBSCAN
Scikit-learn API compatible clustering algorithm. Must provide `fit` and `predict`.
cover: kmapper.Cover
Cover scheme for lens. Instance of kmapper.cover providing methods `fit` and `transform`.
nerve: kmapper.Nerve
Nerve builder implementing `__call__(nodes)` API
precomputed : Boolean
Tell Mapper whether the data that you are clustering on is a precomputed distance matrix. If set to
`True`, the assumption is that you are also telling your `clusterer` that `metric='precomputed'` (which
is an argument for DBSCAN among others), which
will then cause the clusterer to expect a square distance matrix for each hypercube. `precomputed=True` will give a square matrix
to the clusterer to fit on for each hypercube.
remove_duplicate_nodes: Boolean
Removes duplicate nodes before edges are determined. A node is considered to be duplicate
if it has exactly the same set of points as another node.
nr_cubes: Int
.. deprecated:: 1.1.6
define Cover explicitly in future versions
The number of intervals/hypercubes to create. Default = 10.
overlap_perc: Float
.. deprecated:: 1.1.6
define Cover explicitly in future versions
The percentage of overlap "between" the intervals/hypercubes. Default = 0.1.
Returns
=======
simplicial_complex : dict
A dictionary with "nodes", "links" and "meta" information.
Examples
========
>>> # Default mapping.
>>> graph = mapper.map(X_projected, X_inverse)
>>> # Apply clustering on the projection instead of on inverse X
>>> graph = mapper.map(X_projected)
>>> # Use 20 cubes/intervals per projection dimension, with a 50% overlap
>>> graph = mapper.map(X_projected, X_inverse,
>>> cover=kmapper.Cover(n_cubes=20, perc_overlap=0.5))
>>> # Use multiple different cubes/intervals per projection dimension,
>>> # And vary the overlap
>>> graph = mapper.map(X_projected, X_inverse,
>>> cover=km.Cover(n_cubes=[10,20,5],
>>> perc_overlap=[0.1,0.2,0.5]))
>>> # Use KMeans with 2 clusters
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=sklearn.cluster.KMeans(2))
>>> # Use DBSCAN with "cosine"-distance
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=sklearn.cluster.DBSCAN(metric="cosine"))
>>> # Use HDBSCAN as the clusterer
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=hdbscan.HDBSCAN())
>>> # Parametrize the nerve of the covering
>>> graph = mapper.map(X_projected, X_inverse,
>>> nerve=km.GraphNerve(min_intersection=3))
"""
start = datetime.now()
nodes = defaultdict(list)
meta = defaultdict(list)
graph = {}
# If inverse image is not provided, we use the projection as the inverse image (suffer projection loss)
if X is None:
X = lens
# Deprecation warnings
if nr_cubes is not None or overlap_perc is not None:
warnings.warn(
"Deprecation Warning: Please supply km.Cover object. Explicitly passing in n_cubes/nr_cubes and overlap_perc will be deprecated in future releases. ",
DeprecationWarning,
)
# If user supplied nr_cubes, overlap_perc, or coverer, opt for those
# TODO: remove this conditional after release in 1.2
if nr_cubes is not None or overlap_perc is not None:
n_cubes = nr_cubes if nr_cubes else 10
overlap_perc = overlap_perc if overlap_perc else 0.1
self.cover = Cover(n_cubes=n_cubes, perc_overlap=overlap_perc)
else:
self.cover = cover
if self.verbose > 0:
print(
"Mapping on data shaped %s using lens shaped %s\n"
% (str(X.shape), str(lens.shape))
)
# Prefix'ing the data with an ID column
ids = np.array([x for x in range(lens.shape[0])])
lens = np.c_[ids, lens]
if issparse(X):
X = hstack([ids[np.newaxis].T, X], format='csr')
else:
X = np.c_[ids, X]
# Cover scheme defines a list of elements
bins = self.cover.fit(lens)
# Algo's like K-Means, have a set number of clusters. We need this number
# to adjust for the minimal number of samples inside an interval before
# we consider clustering or skipping it.
cluster_params = clusterer.get_params()
min_cluster_samples = cluster_params.get(
"n_clusters",
cluster_params.get(
"min_cluster_size", cluster_params.get("min_samples", 1)
),
)
if self.verbose > 1:
print(
"Minimal points in hypercube before clustering: %d"
% (min_cluster_samples)
)
# Subdivide the projected data X in intervals/hypercubes with overlap
if self.verbose > 0:
bins = list(bins) # extract list from generator
total_bins = len(bins)
print("Creating %s hypercubes." % total_bins)
for i, hypercube in enumerate(self.cover.transform(lens)):
# If at least min_cluster_samples samples inside the hypercube
if hypercube.shape[0] >= min_cluster_samples:
# Cluster the data point(s) in the cube, skipping the id-column
# Note that we apply clustering on the inverse image (original data samples) that fall inside the cube.
ids = [int(nn) for nn in hypercube[:, 0]]
X_cube = X[ids]
fit_data = X_cube[:, 1:]
if precomputed:
fit_data = fit_data[:, ids]
cluster_predictions = clusterer.fit_predict(fit_data)
if self.verbose > 1:
print(
" > Found %s clusters in hypercube %s."
% (
np.unique(
cluster_predictions[cluster_predictions > -1]
).shape[0], i
)
)
for pred in np.unique(cluster_predictions):
# if not predicted as noise
if pred != -1 and not np.isnan(pred):
cluster_id = "cube{}_cluster{}".format(i, int(pred))
nodes[cluster_id] = hypercube[:, 0][cluster_predictions == pred].astype(int).tolist()
elif self.verbose > 1:
print("Cube_%s is empty.\n" % (i))
if remove_duplicate_nodes:
nodes = self._remove_duplicate_nodes(nodes)
links, simplices = nerve.compute(nodes)
graph["nodes"] = nodes
graph["links"] = links
graph["simplices"] = simplices
graph["meta_data"] = {
"projection": self.projection if self.projection else "custom",
"n_cubes": self.cover.n_cubes,
"perc_overlap": self.cover.perc_overlap,
"clusterer": str(clusterer),
"scaler": str(self.scaler),
}
graph["meta_nodes"] = meta
if self.verbose > 0:
self._summary(graph, str(datetime.now() - start))
return graph
def _remove_duplicate_nodes(self, nodes):
# invert node list and merge duplicate nodes
deduped_items = defaultdict(list)
for node_id, items in nodes.items():
deduped_items[frozenset(items)].append(node_id)
deduped_nodes = {
"|".join(node_id_list): list(frozen_items)
for frozen_items, node_id_list in deduped_items.items()
}
if self.verbose > 0:
total_merged = len(nodes) - len(deduped_items)
if total_merged:
print("Merged {} duplicate nodes.\n".format(total_merged))
print(
"Number of nodes before merger: {}; after merger: {}\n".format(
len(nodes), len(deduped_nodes)
)
)
else:
print("No duplicate nodes found to remove.\n")
return deduped_nodes
def _summary(self, graph, time):
# TODO: this summary is dependant on the type of Nerve being built.
links = graph["links"]
nodes = graph["nodes"]
nr_links = sum(len(v) for k, v in links.items())
print("\nCreated %s edges and %s nodes in %s." % (nr_links, len(nodes), time))
def visualize(
self,
graph,
color_function=None,
custom_tooltips=None,
custom_meta=None,
path_html="mapper_visualization_output.html",
title="Kepler Mapper",
save_file=True,
X=None,
X_names=None,
lens=None,
lens_names=None,
show_tooltips=True,
nbins=10,
):
"""Generate a visualization of the simplicial complex mapper output. Turns the complex dictionary into a HTML/D3.js visualization
Parameters
----------
graph : dict
Simplicial complex output from the `map` method.
color_function : list or 1d array
A 1d vector with length equal to number of data points used to build Mapper. Each value should correspond to a value for each data point and color of node is computed as the average value for members in a node.
path_html : String
file name for outputing the resulting html.
custom_meta: dict
Render (key, value) in the Mapper Summary pane.
custom_tooltip: list or array like
Value to display for each entry in the node. The cluster data pane will display entry for all values in the node. Default is index of data.
save_file: bool, default is True
Save file to `path_html`.
X: numpy arraylike
If supplied, compute statistics information about the original data source with respect to each node.
X_names: list of strings
Names of each variable in `X` to be displayed. If None, then display names by index.
lens: numpy arraylike
If supplied, compute statistics of each node based on the projection/lens
lens_name: list of strings
Names of each variable in `lens` to be displayed. In None, then display names by index.
show_tooltips: bool, default is True.
If false, completely disable tooltips. This is useful when using output in space-tight pages or will display node data in custom ways.
nbins: int, default is 10
Number of bins shown in histogram of tooltip color distributions.
Returns
--------
html: string
Returns the same html that is normally output to `path_html`. Complete graph and data ready for viewing.
Examples
---------
>>> # Basic creation of a `.html` file at `kepler-mapper-output.html`
>>> html = mapper.visualize(graph, path_html="kepler-mapper-output.html")
>>> # Jupyter Notebook support
>>> from kmapper import jupyter
>>> html = mapper.visualize(graph, path_html="kepler-mapper-output.html")
>>> jupyter.display(path_html="kepler-mapper-output.html")
>>> # Customizing the output text
>>> html = mapper.visualize(
>>> graph,
>>> path_html="kepler-mapper-output.html",
>>> title="Fashion MNIST with UMAP",
>>> custom_meta={"Description":"A short description.",
>>> "Cluster": "HBSCAN()"}
>>> )
>>> # Custom coloring function based on your 1d lens
>>> html = mapper.visualize(
>>> graph,
>>> color_function=lens
>>> )
>>> # Custom coloring function based on the first variable
>>> cf = mapper.project(X, projection=[0])
>>> html = mapper.visualize(
>>> graph,
>>> color_function=cf
>>> )
>>> # Customizing the tooltips with binary target variables
>>> X, y = split_data(df)
>>> html = mapper.visualize(
>>> graph,
>>> path_html="kepler-mapper-output.html",
>>> title="Fashion MNIST with UMAP",
>>> custom_tooltips=y
>>> )
>>> # Customizing the tooltips with html-strings: locally stored images of an image dataset
>>> html = mapper.visualize(
>>> graph,
>>> path_html="kepler-mapper-output.html",
>>> title="Fashion MNIST with UMAP",
>>> custom_tooltips=np.array(
>>> ["<img src='img/%s.jpg'>"%i for i in range(inverse_X.shape[0])]
>>> )
>>> )
"""
# TODO:
# - Make color functions more intuitive. How do they even work?
# - Allow multiple color functions that can be toggled on and off.
if not len(graph["nodes"]) > 0:
raise Exception(
"Visualize requires a mapper with more than 0 nodes. \nIt is possible that the constructed mapper could have been constructed with bad parameters. This can occasionally happens when using the default clustering algorithm. Try changing `eps` or `min_samples` in the DBSCAN clustering algorithm."
)
# Find the module absolute path and locate templates
module_root = os.path.join(os.path.dirname(__file__), "templates")
env = Environment(loader=FileSystemLoader(module_root))
# Color function is a vector of colors?
color_function = init_color_function(graph, color_function)
if X_names is None:
X_names = []
if lens_names is None:
lens_names = []
mapper_data = format_mapper_data(
graph,
color_function,
X,
X_names,
lens,
lens_names,
custom_tooltips,
env,
nbins,
)
colorscale = colorscale_default
histogram = graph_data_distribution(graph, color_function, colorscale)
mapper_summary = format_meta(graph, custom_meta)
# Find the absolute module path and the static files
js_path = os.path.join(os.path.dirname(__file__), "static", "kmapper.js")
with open(js_path, "r") as f:
js_text = f.read()
css_path = os.path.join(os.path.dirname(__file__), "static", "style.css")
with open(css_path, "r") as f:
css_text = f.read()
# Render the Jinja template, filling fields as appropriate
template = env.get_template("base.html").render(
title=title,
mapper_summary=mapper_summary,
histogram=histogram,
dist_label="Node",
mapper_data=mapper_data,
colorscale=colorscale,
js_text=js_text,
css_text=css_text,
show_tooltips=True,
)
if save_file:
with open(path_html, "wb") as outfile:
if self.verbose > 0:
print("Wrote visualization to: %s" % (path_html))
outfile.write(template.encode("utf-8"))
return template
def data_from_cluster_id(self, cluster_id, graph, data):
"""Returns the original data of each cluster member for a given cluster ID
Parameters
----------
cluster_id : String
ID of the cluster.
graph : dict
The resulting dictionary after applying map()
data : Numpy Array
Original dataset. Accepts both 1-D and 2-D array.
Returns
-------
entries:
rows of cluster member data as Numpy array.
"""
if cluster_id in graph["nodes"]:
cluster_members = graph["nodes"][cluster_id]
cluster_members_data = data[cluster_members]
return cluster_members_data
else:
return np.array([])
def _process_projection_tuple(self, projection):
# Detect if projection is a tuple (for prediction functions)
# TODO: multi-label models
# TODO: infer binary classification and select positive class preds
# TODO: turn into smaller functions for better tests and complexity
# TODO: this seems like outside the purview of mapper. Can we add something like Mapper utils that can do this?
def blend(X_blend, pred_fun, folder, X_data, y):
for train_index, test_index in folder.split(X_data, y):
fold_X_train = X_data[train_index]
fold_y_train = y[train_index]
fold_X_test = X_data[test_index]
fold_y_test = y[test_index]
model.fit(fold_X_train, fold_y_train)
fold_preds = pred_fun(fold_X_test)
X_blend[test_index] = fold_preds
return X_blend
# If projection was passed without ground truth
# assume we are predicting a fitted model on a test set
if len(projection) == 2:
model, X_data = projection
# Are we dealing with a classifier or a regressor?
estimator_type = getattr(model, "_estimator_type", None)
if estimator_type == "classifier":
# classifier probabilities
X_blend = model.predict_proba(X_data)
elif estimator_type == "regressor":
X_blend = model.predict(X_data)
else:
warnings.warn("Unknown estimator type for: %s" % (model))
# If projection is passed with ground truth do 5-fold stratified
# cross-validation, saving the out-of-fold predictions.
# this is called "Stacked Generalization" (see: Wolpert 1992)
elif len(projection) == 3:
model, X_data, y = projection
estimator_type = getattr(model, "_estimator_type", None)
if estimator_type == "classifier":
X_blend = np.zeros((X_data.shape[0], np.unique(y).shape[0]))
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=1729)
blend(X_blend, model.predict_proba, skf, X_data, y)
elif estimator_type == "regressor":
X_blend = np.zeros(X_data.shape[0])
kf = KFold(n_splits=5, shuffle=True, random_state=1729)
blend(X_blend, model.predict, kf, X_data, y)
else:
warnings.warn("Unknown estimator type for: %s" % (model))
else:
# Warn for malformed input and provide help to avoid it.
warnings.warn(
"Passing a model function should be"
+ "(model, X) or (model, X, y)."
+ "Instead got %s" % (str(projection))
)
# Reshape 1-D arrays (regressor outputs) to 2-D arrays
if X_blend.ndim == 1:
X_blend = X_blend.reshape((X_blend.shape[0], 1))
X = X_blend
return X
|
scikit-tda/kepler-mapper | kmapper/kmapper.py | KeplerMapper.fit_transform | python | def fit_transform(
self,
X,
projection="sum",
scaler=preprocessing.MinMaxScaler(),
distance_matrix=False,
):
projections = projection
scalers = scaler
distance_matrices = distance_matrix
# Turn single projection arguments into a pipeline
if isinstance(projection, list) and isinstance(projection[0], int):
projections = [projection]
if not isinstance(projection, list):
projections = [projection]
# Turn single scaler arguments into a pipeline
if not isinstance(scaler, list):
scalers = [scaler]
# Turn single distance matrix arguments into a pipeline
if not isinstance(distance_matrix, list):
distance_matrices = [distance_matrix]
# set defaults to first list item, if not (correctly) set by the user
if len(scalers) != len(projections):
scalers = [scalers[0]] * len(projections)
if len(distance_matrices) != len(projections):
distance_matrices = [distance_matrices[0]] * len(projections)
if self.verbose > 0:
print("..Composing projection pipeline of length %s:" % (len(projections)))
print("\tProjections: %s" % ("\n\t\t".join(map(str, projections))))
print("\tDistance matrices: %s" % ("\n".join(map(str, distance_matrices))))
print("\tScalers: %s" % ("\n".join(map(str, scalers))))
# Pipeline Stack the projection functions
lens = X
for projection, scaler, distance_matrix in zip(
projections, scalers, distance_matrices
):
lens = self.project(
lens,
projection=projection,
scaler=scaler,
distance_matrix=distance_matrix,
)
return lens | Same as .project() but accepts lists for arguments so you can chain. | train | https://github.com/scikit-tda/kepler-mapper/blob/d4ed39f6392b0a134dd573d7d9c4aa65fbef3a7d/kmapper/kmapper.py#L305-L360 | [
"def project(\n self,\n X,\n projection=\"sum\",\n scaler=preprocessing.MinMaxScaler(),\n distance_matrix=None,\n):\n \"\"\"Creates the projection/lens from a dataset. Input the data set. Specify a projection/lens type. Output the projected data/lens.\n\n Parameters\n ----------\n\n X : N... | class KeplerMapper(object):
"""With this class you can build topological networks from (high-dimensional) data.
1) Fit a projection/lens/function to a dataset and transform it.
For instance "mean_of_row(x) for x in X"
2) Map this projection with overlapping intervals/hypercubes.
Cluster the points inside the interval
(Note: we cluster on the inverse image/original data to lessen projection loss).
If two clusters/nodes have the same members (due to the overlap), then:
connect these with an edge.
3) Visualize the network using HTML and D3.js.
KM has a number of nice features, some which get forgotten.
- ``project``: Some projections it makes sense to use a distance matrix, such as knn_distance_#. Using ``distance_matrix = <metric>`` for a custom metric.
- ``fit_transform``: Applies a sequence of projections. Currently, this API is a little confusing and might be changed in the future.
"""
def __init__(self, verbose=0):
"""Constructor for KeplerMapper class.
Parameters
===========
verbose: int, default is 0
Logging level. Currently 3 levels (0,1,2) are supported. For no logging, set `verbose=0`. For some logging, set `verbose=1`. For complete logging, set `verbose=2`.
"""
# TODO: move as many of the arguments from fit_transform and map into here.
self.verbose = verbose
self.projection = None
self.scaler = None
self.cover = None
if verbose > 0:
print(self)
def __repr__(self):
return "KeplerMapper(verbose={})".format(self.verbose)
def project(
self,
X,
projection="sum",
scaler=preprocessing.MinMaxScaler(),
distance_matrix=None,
):
"""Creates the projection/lens from a dataset. Input the data set. Specify a projection/lens type. Output the projected data/lens.
Parameters
----------
X : Numpy Array
The data to fit a projection/lens to.
projection :
Projection parameter is either a string, a Scikit-learn class with fit_transform, like manifold.TSNE(), or a list of dimension indices. A string from ["sum", "mean", "median", "max", "min", "std", "dist_mean", "l2norm", "knn_distance_n"]. If using knn_distance_n write the number of desired neighbors in place of n: knn_distance_5 for summed distances to 5 nearest neighbors. Default = "sum".
scaler : Scikit-Learn API compatible scaler.
Scaler of the data applied after mapping. Use None for no scaling. Default = preprocessing.MinMaxScaler() if None, do no scaling, else apply scaling to the projection. Default: Min-Max scaling
distance_matrix : Either str or None
If not None, then any of ["braycurtis", "canberra", "chebyshev", "cityblock", "correlation", "cosine", "dice", "euclidean", "hamming", "jaccard", "kulsinski", "mahalanobis", "matching", "minkowski", "rogerstanimoto", "russellrao", "seuclidean", "sokalmichener", "sokalsneath", "sqeuclidean", "yule"].
If False do nothing, else create a squared distance matrix with the chosen metric, before applying the projection.
Returns
-------
lens : Numpy Array
projected data.
Examples
--------
>>> # Project by taking the first dimension and third dimension
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection=[0,2]
>>> )
>>> # Project by taking the sum of row values
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection="sum"
>>> )
>>> # Do not scale the projection (default is minmax-scaling)
>>> X_projected = mapper.project(
>>> X_inverse,
>>> scaler=None
>>> )
>>> # Project by standard-scaled summed distance to 5 nearest neighbors
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection="knn_distance_5",
>>> scaler=sklearn.preprocessing.StandardScaler()
>>> )
>>> # Project by first two PCA components
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection=sklearn.decomposition.PCA()
>>> )
>>> # Project by first three UMAP components
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection=umap.UMAP(n_components=3)
>>> )
>>> # Project by L2-norm on squared Pearson distance matrix
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection="l2norm",
>>> distance_matrix="pearson"
>>> )
>>> # Mix and match different projections
>>> X_projected = np.c_[
>>> mapper.project(X_inverse, projection=sklearn.decomposition.PCA()),
>>> mapper.project(X_inverse, projection="knn_distance_5")
>>> ]
>>> # Stack / chain projections. You could do this manually,
>>> # or pipeline with `.fit_transform()`. Works the same as `.project()`,
>>> # but accepts lists. f(raw text) -> f(tfidf) -> f(isomap 100d) -> f(umap 2d)
>>> projected_X = mapper.fit_transform(
>>> X,
>>> projections=[TfidfVectorizer(analyzer="char",
>>> ngram_range=(1,6),
>>> max_df=0.93,
>>> min_df=0.03),
>>> manifold.Isomap(n_components=100,
>>> n_jobs=-1),
>>> umap.UMAP(n_components=2,
>>> random_state=1)],
>>> scalers=[None,
>>> None,
>>> preprocessing.MinMaxScaler()],
>>> distance_matrices=[False,
>>> False,
>>> False])
"""
# Sae original values off so they can be referenced by later functions in the pipeline
self.inverse = X
self.scaler = scaler
self.projection = str(projection)
self.distance_matrix = distance_matrix
if self.verbose > 0:
print("..Projecting on data shaped %s" % (str(X.shape)))
# If distance_matrix is a scipy.spatial.pdist string, we create a square distance matrix
# from the vectors, before applying a projection.
if self.distance_matrix in [
"braycurtis",
"canberra",
"chebyshev",
"cityblock",
"correlation",
"cosine",
"dice",
"euclidean",
"hamming",
"jaccard",
"kulsinski",
"mahalanobis",
"matching",
"minkowski",
"rogerstanimoto",
"russellrao",
"seuclidean",
"sokalmichener",
"sokalsneath",
"sqeuclidean",
"yule",
]:
X = distance.squareform(distance.pdist(X, metric=distance_matrix))
if self.verbose > 0:
print(
"Created distance matrix, shape: %s, with distance metric `%s`"
% (X.shape, distance_matrix)
)
# Detect if projection is a class (for scikit-learn)
try:
p = projection.get_params() # fail quickly
reducer = projection
if self.verbose > 0:
try:
projection.set_params(**{"verbose": self.verbose})
except:
pass
print("\n..Projecting data using: \n\t%s\n" % str(projection))
X = reducer.fit_transform(X)
except:
pass
# What is this used for?
if isinstance(projection, tuple):
X = self._process_projection_tuple(projection)
# Detect if projection is a string (for standard functions)
# TODO: test each one of these projections
if isinstance(projection, str):
if self.verbose > 0:
print("\n..Projecting data using: %s" % (projection))
def dist_mean(X, axis=1):
X_mean = np.mean(X, axis=0)
X = np.sum(np.sqrt((X - X_mean) ** 2), axis=1)
return X
projection_funcs = {
"sum": np.sum,
"mean": np.mean,
"median": np.median,
"max": np.max,
"min": np.min,
"std": np.std,
"l2norm": np.linalg.norm,
"dist_mean": dist_mean,
}
if projection in projection_funcs.keys():
X = projection_funcs[projection](X, axis=1).reshape((X.shape[0], 1))
if "knn_distance_" in projection:
n_neighbors = int(projection.split("_")[2])
if (
self.distance_matrix
): # We use the distance matrix for finding neighbors
X = np.sum(np.sort(X, axis=1)[:, :n_neighbors], axis=1).reshape(
(X.shape[0], 1)
)
else:
from sklearn import neighbors
nn = neighbors.NearestNeighbors(n_neighbors=n_neighbors)
nn.fit(X)
X = np.sum(
nn.kneighbors(X, n_neighbors=n_neighbors, return_distance=True)[
0
],
axis=1,
).reshape((X.shape[0], 1))
# Detect if projection is a list (with dimension indices)
if isinstance(projection, list):
if self.verbose > 0:
print("\n..Projecting data using: %s" % (str(projection)))
X = X[:, np.array(projection)]
# If projection produced sparse output, turn into a dense array
if issparse(X):
X = X.toarray()
if self.verbose > 0:
print("\n..Created projection shaped %s" % (str(X.shape)))
# Scaling
if scaler is not None:
if self.verbose > 0:
print("\n..Scaling with: %s\n" % str(scaler))
X = scaler.fit_transform(X)
return X
def map(
self,
lens,
X=None,
clusterer=cluster.DBSCAN(eps=0.5, min_samples=3),
cover=Cover(n_cubes=10, perc_overlap=0.1),
nerve=GraphNerve(),
precomputed=False,
remove_duplicate_nodes=False,
# These arguments are all deprecated
overlap_perc=None,
nr_cubes=None
):
"""Apply Mapper algorithm on this projection and build a simplicial complex. Returns a dictionary with nodes and links.
Parameters
----------
lens: Numpy Array
Lower dimensional representation of data. In general will be output of `fit_transform`.
X: Numpy Array
Original data or data to run clustering on. If `None`, then use `lens` as default. X can be a SciPy sparse matrix.
clusterer: Default: DBSCAN
Scikit-learn API compatible clustering algorithm. Must provide `fit` and `predict`.
cover: kmapper.Cover
Cover scheme for lens. Instance of kmapper.cover providing methods `fit` and `transform`.
nerve: kmapper.Nerve
Nerve builder implementing `__call__(nodes)` API
precomputed : Boolean
Tell Mapper whether the data that you are clustering on is a precomputed distance matrix. If set to
`True`, the assumption is that you are also telling your `clusterer` that `metric='precomputed'` (which
is an argument for DBSCAN among others), which
will then cause the clusterer to expect a square distance matrix for each hypercube. `precomputed=True` will give a square matrix
to the clusterer to fit on for each hypercube.
remove_duplicate_nodes: Boolean
Removes duplicate nodes before edges are determined. A node is considered to be duplicate
if it has exactly the same set of points as another node.
nr_cubes: Int
.. deprecated:: 1.1.6
define Cover explicitly in future versions
The number of intervals/hypercubes to create. Default = 10.
overlap_perc: Float
.. deprecated:: 1.1.6
define Cover explicitly in future versions
The percentage of overlap "between" the intervals/hypercubes. Default = 0.1.
Returns
=======
simplicial_complex : dict
A dictionary with "nodes", "links" and "meta" information.
Examples
========
>>> # Default mapping.
>>> graph = mapper.map(X_projected, X_inverse)
>>> # Apply clustering on the projection instead of on inverse X
>>> graph = mapper.map(X_projected)
>>> # Use 20 cubes/intervals per projection dimension, with a 50% overlap
>>> graph = mapper.map(X_projected, X_inverse,
>>> cover=kmapper.Cover(n_cubes=20, perc_overlap=0.5))
>>> # Use multiple different cubes/intervals per projection dimension,
>>> # And vary the overlap
>>> graph = mapper.map(X_projected, X_inverse,
>>> cover=km.Cover(n_cubes=[10,20,5],
>>> perc_overlap=[0.1,0.2,0.5]))
>>> # Use KMeans with 2 clusters
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=sklearn.cluster.KMeans(2))
>>> # Use DBSCAN with "cosine"-distance
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=sklearn.cluster.DBSCAN(metric="cosine"))
>>> # Use HDBSCAN as the clusterer
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=hdbscan.HDBSCAN())
>>> # Parametrize the nerve of the covering
>>> graph = mapper.map(X_projected, X_inverse,
>>> nerve=km.GraphNerve(min_intersection=3))
"""
start = datetime.now()
nodes = defaultdict(list)
meta = defaultdict(list)
graph = {}
# If inverse image is not provided, we use the projection as the inverse image (suffer projection loss)
if X is None:
X = lens
# Deprecation warnings
if nr_cubes is not None or overlap_perc is not None:
warnings.warn(
"Deprecation Warning: Please supply km.Cover object. Explicitly passing in n_cubes/nr_cubes and overlap_perc will be deprecated in future releases. ",
DeprecationWarning,
)
# If user supplied nr_cubes, overlap_perc, or coverer, opt for those
# TODO: remove this conditional after release in 1.2
if nr_cubes is not None or overlap_perc is not None:
n_cubes = nr_cubes if nr_cubes else 10
overlap_perc = overlap_perc if overlap_perc else 0.1
self.cover = Cover(n_cubes=n_cubes, perc_overlap=overlap_perc)
else:
self.cover = cover
if self.verbose > 0:
print(
"Mapping on data shaped %s using lens shaped %s\n"
% (str(X.shape), str(lens.shape))
)
# Prefix'ing the data with an ID column
ids = np.array([x for x in range(lens.shape[0])])
lens = np.c_[ids, lens]
if issparse(X):
X = hstack([ids[np.newaxis].T, X], format='csr')
else:
X = np.c_[ids, X]
# Cover scheme defines a list of elements
bins = self.cover.fit(lens)
# Algo's like K-Means, have a set number of clusters. We need this number
# to adjust for the minimal number of samples inside an interval before
# we consider clustering or skipping it.
cluster_params = clusterer.get_params()
min_cluster_samples = cluster_params.get(
"n_clusters",
cluster_params.get(
"min_cluster_size", cluster_params.get("min_samples", 1)
),
)
if self.verbose > 1:
print(
"Minimal points in hypercube before clustering: %d"
% (min_cluster_samples)
)
# Subdivide the projected data X in intervals/hypercubes with overlap
if self.verbose > 0:
bins = list(bins) # extract list from generator
total_bins = len(bins)
print("Creating %s hypercubes." % total_bins)
for i, hypercube in enumerate(self.cover.transform(lens)):
# If at least min_cluster_samples samples inside the hypercube
if hypercube.shape[0] >= min_cluster_samples:
# Cluster the data point(s) in the cube, skipping the id-column
# Note that we apply clustering on the inverse image (original data samples) that fall inside the cube.
ids = [int(nn) for nn in hypercube[:, 0]]
X_cube = X[ids]
fit_data = X_cube[:, 1:]
if precomputed:
fit_data = fit_data[:, ids]
cluster_predictions = clusterer.fit_predict(fit_data)
if self.verbose > 1:
print(
" > Found %s clusters in hypercube %s."
% (
np.unique(
cluster_predictions[cluster_predictions > -1]
).shape[0], i
)
)
for pred in np.unique(cluster_predictions):
# if not predicted as noise
if pred != -1 and not np.isnan(pred):
cluster_id = "cube{}_cluster{}".format(i, int(pred))
nodes[cluster_id] = hypercube[:, 0][cluster_predictions == pred].astype(int).tolist()
elif self.verbose > 1:
print("Cube_%s is empty.\n" % (i))
if remove_duplicate_nodes:
nodes = self._remove_duplicate_nodes(nodes)
links, simplices = nerve.compute(nodes)
graph["nodes"] = nodes
graph["links"] = links
graph["simplices"] = simplices
graph["meta_data"] = {
"projection": self.projection if self.projection else "custom",
"n_cubes": self.cover.n_cubes,
"perc_overlap": self.cover.perc_overlap,
"clusterer": str(clusterer),
"scaler": str(self.scaler),
}
graph["meta_nodes"] = meta
if self.verbose > 0:
self._summary(graph, str(datetime.now() - start))
return graph
def _remove_duplicate_nodes(self, nodes):
# invert node list and merge duplicate nodes
deduped_items = defaultdict(list)
for node_id, items in nodes.items():
deduped_items[frozenset(items)].append(node_id)
deduped_nodes = {
"|".join(node_id_list): list(frozen_items)
for frozen_items, node_id_list in deduped_items.items()
}
if self.verbose > 0:
total_merged = len(nodes) - len(deduped_items)
if total_merged:
print("Merged {} duplicate nodes.\n".format(total_merged))
print(
"Number of nodes before merger: {}; after merger: {}\n".format(
len(nodes), len(deduped_nodes)
)
)
else:
print("No duplicate nodes found to remove.\n")
return deduped_nodes
def _summary(self, graph, time):
# TODO: this summary is dependant on the type of Nerve being built.
links = graph["links"]
nodes = graph["nodes"]
nr_links = sum(len(v) for k, v in links.items())
print("\nCreated %s edges and %s nodes in %s." % (nr_links, len(nodes), time))
def visualize(
self,
graph,
color_function=None,
custom_tooltips=None,
custom_meta=None,
path_html="mapper_visualization_output.html",
title="Kepler Mapper",
save_file=True,
X=None,
X_names=None,
lens=None,
lens_names=None,
show_tooltips=True,
nbins=10,
):
"""Generate a visualization of the simplicial complex mapper output. Turns the complex dictionary into a HTML/D3.js visualization
Parameters
----------
graph : dict
Simplicial complex output from the `map` method.
color_function : list or 1d array
A 1d vector with length equal to number of data points used to build Mapper. Each value should correspond to a value for each data point and color of node is computed as the average value for members in a node.
path_html : String
file name for outputing the resulting html.
custom_meta: dict
Render (key, value) in the Mapper Summary pane.
custom_tooltip: list or array like
Value to display for each entry in the node. The cluster data pane will display entry for all values in the node. Default is index of data.
save_file: bool, default is True
Save file to `path_html`.
X: numpy arraylike
If supplied, compute statistics information about the original data source with respect to each node.
X_names: list of strings
Names of each variable in `X` to be displayed. If None, then display names by index.
lens: numpy arraylike
If supplied, compute statistics of each node based on the projection/lens
lens_name: list of strings
Names of each variable in `lens` to be displayed. In None, then display names by index.
show_tooltips: bool, default is True.
If false, completely disable tooltips. This is useful when using output in space-tight pages or will display node data in custom ways.
nbins: int, default is 10
Number of bins shown in histogram of tooltip color distributions.
Returns
--------
html: string
Returns the same html that is normally output to `path_html`. Complete graph and data ready for viewing.
Examples
---------
>>> # Basic creation of a `.html` file at `kepler-mapper-output.html`
>>> html = mapper.visualize(graph, path_html="kepler-mapper-output.html")
>>> # Jupyter Notebook support
>>> from kmapper import jupyter
>>> html = mapper.visualize(graph, path_html="kepler-mapper-output.html")
>>> jupyter.display(path_html="kepler-mapper-output.html")
>>> # Customizing the output text
>>> html = mapper.visualize(
>>> graph,
>>> path_html="kepler-mapper-output.html",
>>> title="Fashion MNIST with UMAP",
>>> custom_meta={"Description":"A short description.",
>>> "Cluster": "HBSCAN()"}
>>> )
>>> # Custom coloring function based on your 1d lens
>>> html = mapper.visualize(
>>> graph,
>>> color_function=lens
>>> )
>>> # Custom coloring function based on the first variable
>>> cf = mapper.project(X, projection=[0])
>>> html = mapper.visualize(
>>> graph,
>>> color_function=cf
>>> )
>>> # Customizing the tooltips with binary target variables
>>> X, y = split_data(df)
>>> html = mapper.visualize(
>>> graph,
>>> path_html="kepler-mapper-output.html",
>>> title="Fashion MNIST with UMAP",
>>> custom_tooltips=y
>>> )
>>> # Customizing the tooltips with html-strings: locally stored images of an image dataset
>>> html = mapper.visualize(
>>> graph,
>>> path_html="kepler-mapper-output.html",
>>> title="Fashion MNIST with UMAP",
>>> custom_tooltips=np.array(
>>> ["<img src='img/%s.jpg'>"%i for i in range(inverse_X.shape[0])]
>>> )
>>> )
"""
# TODO:
# - Make color functions more intuitive. How do they even work?
# - Allow multiple color functions that can be toggled on and off.
if not len(graph["nodes"]) > 0:
raise Exception(
"Visualize requires a mapper with more than 0 nodes. \nIt is possible that the constructed mapper could have been constructed with bad parameters. This can occasionally happens when using the default clustering algorithm. Try changing `eps` or `min_samples` in the DBSCAN clustering algorithm."
)
# Find the module absolute path and locate templates
module_root = os.path.join(os.path.dirname(__file__), "templates")
env = Environment(loader=FileSystemLoader(module_root))
# Color function is a vector of colors?
color_function = init_color_function(graph, color_function)
if X_names is None:
X_names = []
if lens_names is None:
lens_names = []
mapper_data = format_mapper_data(
graph,
color_function,
X,
X_names,
lens,
lens_names,
custom_tooltips,
env,
nbins,
)
colorscale = colorscale_default
histogram = graph_data_distribution(graph, color_function, colorscale)
mapper_summary = format_meta(graph, custom_meta)
# Find the absolute module path and the static files
js_path = os.path.join(os.path.dirname(__file__), "static", "kmapper.js")
with open(js_path, "r") as f:
js_text = f.read()
css_path = os.path.join(os.path.dirname(__file__), "static", "style.css")
with open(css_path, "r") as f:
css_text = f.read()
# Render the Jinja template, filling fields as appropriate
template = env.get_template("base.html").render(
title=title,
mapper_summary=mapper_summary,
histogram=histogram,
dist_label="Node",
mapper_data=mapper_data,
colorscale=colorscale,
js_text=js_text,
css_text=css_text,
show_tooltips=True,
)
if save_file:
with open(path_html, "wb") as outfile:
if self.verbose > 0:
print("Wrote visualization to: %s" % (path_html))
outfile.write(template.encode("utf-8"))
return template
def data_from_cluster_id(self, cluster_id, graph, data):
"""Returns the original data of each cluster member for a given cluster ID
Parameters
----------
cluster_id : String
ID of the cluster.
graph : dict
The resulting dictionary after applying map()
data : Numpy Array
Original dataset. Accepts both 1-D and 2-D array.
Returns
-------
entries:
rows of cluster member data as Numpy array.
"""
if cluster_id in graph["nodes"]:
cluster_members = graph["nodes"][cluster_id]
cluster_members_data = data[cluster_members]
return cluster_members_data
else:
return np.array([])
def _process_projection_tuple(self, projection):
# Detect if projection is a tuple (for prediction functions)
# TODO: multi-label models
# TODO: infer binary classification and select positive class preds
# TODO: turn into smaller functions for better tests and complexity
# TODO: this seems like outside the purview of mapper. Can we add something like Mapper utils that can do this?
def blend(X_blend, pred_fun, folder, X_data, y):
for train_index, test_index in folder.split(X_data, y):
fold_X_train = X_data[train_index]
fold_y_train = y[train_index]
fold_X_test = X_data[test_index]
fold_y_test = y[test_index]
model.fit(fold_X_train, fold_y_train)
fold_preds = pred_fun(fold_X_test)
X_blend[test_index] = fold_preds
return X_blend
# If projection was passed without ground truth
# assume we are predicting a fitted model on a test set
if len(projection) == 2:
model, X_data = projection
# Are we dealing with a classifier or a regressor?
estimator_type = getattr(model, "_estimator_type", None)
if estimator_type == "classifier":
# classifier probabilities
X_blend = model.predict_proba(X_data)
elif estimator_type == "regressor":
X_blend = model.predict(X_data)
else:
warnings.warn("Unknown estimator type for: %s" % (model))
# If projection is passed with ground truth do 5-fold stratified
# cross-validation, saving the out-of-fold predictions.
# this is called "Stacked Generalization" (see: Wolpert 1992)
elif len(projection) == 3:
model, X_data, y = projection
estimator_type = getattr(model, "_estimator_type", None)
if estimator_type == "classifier":
X_blend = np.zeros((X_data.shape[0], np.unique(y).shape[0]))
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=1729)
blend(X_blend, model.predict_proba, skf, X_data, y)
elif estimator_type == "regressor":
X_blend = np.zeros(X_data.shape[0])
kf = KFold(n_splits=5, shuffle=True, random_state=1729)
blend(X_blend, model.predict, kf, X_data, y)
else:
warnings.warn("Unknown estimator type for: %s" % (model))
else:
# Warn for malformed input and provide help to avoid it.
warnings.warn(
"Passing a model function should be"
+ "(model, X) or (model, X, y)."
+ "Instead got %s" % (str(projection))
)
# Reshape 1-D arrays (regressor outputs) to 2-D arrays
if X_blend.ndim == 1:
X_blend = X_blend.reshape((X_blend.shape[0], 1))
X = X_blend
return X
|
scikit-tda/kepler-mapper | kmapper/kmapper.py | KeplerMapper.map | python | def map(
self,
lens,
X=None,
clusterer=cluster.DBSCAN(eps=0.5, min_samples=3),
cover=Cover(n_cubes=10, perc_overlap=0.1),
nerve=GraphNerve(),
precomputed=False,
remove_duplicate_nodes=False,
# These arguments are all deprecated
overlap_perc=None,
nr_cubes=None
):
start = datetime.now()
nodes = defaultdict(list)
meta = defaultdict(list)
graph = {}
# If inverse image is not provided, we use the projection as the inverse image (suffer projection loss)
if X is None:
X = lens
# Deprecation warnings
if nr_cubes is not None or overlap_perc is not None:
warnings.warn(
"Deprecation Warning: Please supply km.Cover object. Explicitly passing in n_cubes/nr_cubes and overlap_perc will be deprecated in future releases. ",
DeprecationWarning,
)
# If user supplied nr_cubes, overlap_perc, or coverer, opt for those
# TODO: remove this conditional after release in 1.2
if nr_cubes is not None or overlap_perc is not None:
n_cubes = nr_cubes if nr_cubes else 10
overlap_perc = overlap_perc if overlap_perc else 0.1
self.cover = Cover(n_cubes=n_cubes, perc_overlap=overlap_perc)
else:
self.cover = cover
if self.verbose > 0:
print(
"Mapping on data shaped %s using lens shaped %s\n"
% (str(X.shape), str(lens.shape))
)
# Prefix'ing the data with an ID column
ids = np.array([x for x in range(lens.shape[0])])
lens = np.c_[ids, lens]
if issparse(X):
X = hstack([ids[np.newaxis].T, X], format='csr')
else:
X = np.c_[ids, X]
# Cover scheme defines a list of elements
bins = self.cover.fit(lens)
# Algo's like K-Means, have a set number of clusters. We need this number
# to adjust for the minimal number of samples inside an interval before
# we consider clustering or skipping it.
cluster_params = clusterer.get_params()
min_cluster_samples = cluster_params.get(
"n_clusters",
cluster_params.get(
"min_cluster_size", cluster_params.get("min_samples", 1)
),
)
if self.verbose > 1:
print(
"Minimal points in hypercube before clustering: %d"
% (min_cluster_samples)
)
# Subdivide the projected data X in intervals/hypercubes with overlap
if self.verbose > 0:
bins = list(bins) # extract list from generator
total_bins = len(bins)
print("Creating %s hypercubes." % total_bins)
for i, hypercube in enumerate(self.cover.transform(lens)):
# If at least min_cluster_samples samples inside the hypercube
if hypercube.shape[0] >= min_cluster_samples:
# Cluster the data point(s) in the cube, skipping the id-column
# Note that we apply clustering on the inverse image (original data samples) that fall inside the cube.
ids = [int(nn) for nn in hypercube[:, 0]]
X_cube = X[ids]
fit_data = X_cube[:, 1:]
if precomputed:
fit_data = fit_data[:, ids]
cluster_predictions = clusterer.fit_predict(fit_data)
if self.verbose > 1:
print(
" > Found %s clusters in hypercube %s."
% (
np.unique(
cluster_predictions[cluster_predictions > -1]
).shape[0], i
)
)
for pred in np.unique(cluster_predictions):
# if not predicted as noise
if pred != -1 and not np.isnan(pred):
cluster_id = "cube{}_cluster{}".format(i, int(pred))
nodes[cluster_id] = hypercube[:, 0][cluster_predictions == pred].astype(int).tolist()
elif self.verbose > 1:
print("Cube_%s is empty.\n" % (i))
if remove_duplicate_nodes:
nodes = self._remove_duplicate_nodes(nodes)
links, simplices = nerve.compute(nodes)
graph["nodes"] = nodes
graph["links"] = links
graph["simplices"] = simplices
graph["meta_data"] = {
"projection": self.projection if self.projection else "custom",
"n_cubes": self.cover.n_cubes,
"perc_overlap": self.cover.perc_overlap,
"clusterer": str(clusterer),
"scaler": str(self.scaler),
}
graph["meta_nodes"] = meta
if self.verbose > 0:
self._summary(graph, str(datetime.now() - start))
return graph | Apply Mapper algorithm on this projection and build a simplicial complex. Returns a dictionary with nodes and links.
Parameters
----------
lens: Numpy Array
Lower dimensional representation of data. In general will be output of `fit_transform`.
X: Numpy Array
Original data or data to run clustering on. If `None`, then use `lens` as default. X can be a SciPy sparse matrix.
clusterer: Default: DBSCAN
Scikit-learn API compatible clustering algorithm. Must provide `fit` and `predict`.
cover: kmapper.Cover
Cover scheme for lens. Instance of kmapper.cover providing methods `fit` and `transform`.
nerve: kmapper.Nerve
Nerve builder implementing `__call__(nodes)` API
precomputed : Boolean
Tell Mapper whether the data that you are clustering on is a precomputed distance matrix. If set to
`True`, the assumption is that you are also telling your `clusterer` that `metric='precomputed'` (which
is an argument for DBSCAN among others), which
will then cause the clusterer to expect a square distance matrix for each hypercube. `precomputed=True` will give a square matrix
to the clusterer to fit on for each hypercube.
remove_duplicate_nodes: Boolean
Removes duplicate nodes before edges are determined. A node is considered to be duplicate
if it has exactly the same set of points as another node.
nr_cubes: Int
.. deprecated:: 1.1.6
define Cover explicitly in future versions
The number of intervals/hypercubes to create. Default = 10.
overlap_perc: Float
.. deprecated:: 1.1.6
define Cover explicitly in future versions
The percentage of overlap "between" the intervals/hypercubes. Default = 0.1.
Returns
=======
simplicial_complex : dict
A dictionary with "nodes", "links" and "meta" information.
Examples
========
>>> # Default mapping.
>>> graph = mapper.map(X_projected, X_inverse)
>>> # Apply clustering on the projection instead of on inverse X
>>> graph = mapper.map(X_projected)
>>> # Use 20 cubes/intervals per projection dimension, with a 50% overlap
>>> graph = mapper.map(X_projected, X_inverse,
>>> cover=kmapper.Cover(n_cubes=20, perc_overlap=0.5))
>>> # Use multiple different cubes/intervals per projection dimension,
>>> # And vary the overlap
>>> graph = mapper.map(X_projected, X_inverse,
>>> cover=km.Cover(n_cubes=[10,20,5],
>>> perc_overlap=[0.1,0.2,0.5]))
>>> # Use KMeans with 2 clusters
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=sklearn.cluster.KMeans(2))
>>> # Use DBSCAN with "cosine"-distance
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=sklearn.cluster.DBSCAN(metric="cosine"))
>>> # Use HDBSCAN as the clusterer
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=hdbscan.HDBSCAN())
>>> # Parametrize the nerve of the covering
>>> graph = mapper.map(X_projected, X_inverse,
>>> nerve=km.GraphNerve(min_intersection=3)) | train | https://github.com/scikit-tda/kepler-mapper/blob/d4ed39f6392b0a134dd573d7d9c4aa65fbef3a7d/kmapper/kmapper.py#L362-L587 | [
"def fit(self, data):\n \"\"\" Fit a cover on the data. This method constructs centers and radii in each dimension given the `perc_overlap` and `n_cube`.\n\n Parameters\n ============\n\n data: array-like\n Data to apply the cover to. Warning: First column must be an index column.\n\n Returns\... | class KeplerMapper(object):
"""With this class you can build topological networks from (high-dimensional) data.
1) Fit a projection/lens/function to a dataset and transform it.
For instance "mean_of_row(x) for x in X"
2) Map this projection with overlapping intervals/hypercubes.
Cluster the points inside the interval
(Note: we cluster on the inverse image/original data to lessen projection loss).
If two clusters/nodes have the same members (due to the overlap), then:
connect these with an edge.
3) Visualize the network using HTML and D3.js.
KM has a number of nice features, some which get forgotten.
- ``project``: Some projections it makes sense to use a distance matrix, such as knn_distance_#. Using ``distance_matrix = <metric>`` for a custom metric.
- ``fit_transform``: Applies a sequence of projections. Currently, this API is a little confusing and might be changed in the future.
"""
def __init__(self, verbose=0):
"""Constructor for KeplerMapper class.
Parameters
===========
verbose: int, default is 0
Logging level. Currently 3 levels (0,1,2) are supported. For no logging, set `verbose=0`. For some logging, set `verbose=1`. For complete logging, set `verbose=2`.
"""
# TODO: move as many of the arguments from fit_transform and map into here.
self.verbose = verbose
self.projection = None
self.scaler = None
self.cover = None
if verbose > 0:
print(self)
def __repr__(self):
return "KeplerMapper(verbose={})".format(self.verbose)
def project(
self,
X,
projection="sum",
scaler=preprocessing.MinMaxScaler(),
distance_matrix=None,
):
"""Creates the projection/lens from a dataset. Input the data set. Specify a projection/lens type. Output the projected data/lens.
Parameters
----------
X : Numpy Array
The data to fit a projection/lens to.
projection :
Projection parameter is either a string, a Scikit-learn class with fit_transform, like manifold.TSNE(), or a list of dimension indices. A string from ["sum", "mean", "median", "max", "min", "std", "dist_mean", "l2norm", "knn_distance_n"]. If using knn_distance_n write the number of desired neighbors in place of n: knn_distance_5 for summed distances to 5 nearest neighbors. Default = "sum".
scaler : Scikit-Learn API compatible scaler.
Scaler of the data applied after mapping. Use None for no scaling. Default = preprocessing.MinMaxScaler() if None, do no scaling, else apply scaling to the projection. Default: Min-Max scaling
distance_matrix : Either str or None
If not None, then any of ["braycurtis", "canberra", "chebyshev", "cityblock", "correlation", "cosine", "dice", "euclidean", "hamming", "jaccard", "kulsinski", "mahalanobis", "matching", "minkowski", "rogerstanimoto", "russellrao", "seuclidean", "sokalmichener", "sokalsneath", "sqeuclidean", "yule"].
If False do nothing, else create a squared distance matrix with the chosen metric, before applying the projection.
Returns
-------
lens : Numpy Array
projected data.
Examples
--------
>>> # Project by taking the first dimension and third dimension
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection=[0,2]
>>> )
>>> # Project by taking the sum of row values
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection="sum"
>>> )
>>> # Do not scale the projection (default is minmax-scaling)
>>> X_projected = mapper.project(
>>> X_inverse,
>>> scaler=None
>>> )
>>> # Project by standard-scaled summed distance to 5 nearest neighbors
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection="knn_distance_5",
>>> scaler=sklearn.preprocessing.StandardScaler()
>>> )
>>> # Project by first two PCA components
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection=sklearn.decomposition.PCA()
>>> )
>>> # Project by first three UMAP components
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection=umap.UMAP(n_components=3)
>>> )
>>> # Project by L2-norm on squared Pearson distance matrix
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection="l2norm",
>>> distance_matrix="pearson"
>>> )
>>> # Mix and match different projections
>>> X_projected = np.c_[
>>> mapper.project(X_inverse, projection=sklearn.decomposition.PCA()),
>>> mapper.project(X_inverse, projection="knn_distance_5")
>>> ]
>>> # Stack / chain projections. You could do this manually,
>>> # or pipeline with `.fit_transform()`. Works the same as `.project()`,
>>> # but accepts lists. f(raw text) -> f(tfidf) -> f(isomap 100d) -> f(umap 2d)
>>> projected_X = mapper.fit_transform(
>>> X,
>>> projections=[TfidfVectorizer(analyzer="char",
>>> ngram_range=(1,6),
>>> max_df=0.93,
>>> min_df=0.03),
>>> manifold.Isomap(n_components=100,
>>> n_jobs=-1),
>>> umap.UMAP(n_components=2,
>>> random_state=1)],
>>> scalers=[None,
>>> None,
>>> preprocessing.MinMaxScaler()],
>>> distance_matrices=[False,
>>> False,
>>> False])
"""
# Sae original values off so they can be referenced by later functions in the pipeline
self.inverse = X
self.scaler = scaler
self.projection = str(projection)
self.distance_matrix = distance_matrix
if self.verbose > 0:
print("..Projecting on data shaped %s" % (str(X.shape)))
# If distance_matrix is a scipy.spatial.pdist string, we create a square distance matrix
# from the vectors, before applying a projection.
if self.distance_matrix in [
"braycurtis",
"canberra",
"chebyshev",
"cityblock",
"correlation",
"cosine",
"dice",
"euclidean",
"hamming",
"jaccard",
"kulsinski",
"mahalanobis",
"matching",
"minkowski",
"rogerstanimoto",
"russellrao",
"seuclidean",
"sokalmichener",
"sokalsneath",
"sqeuclidean",
"yule",
]:
X = distance.squareform(distance.pdist(X, metric=distance_matrix))
if self.verbose > 0:
print(
"Created distance matrix, shape: %s, with distance metric `%s`"
% (X.shape, distance_matrix)
)
# Detect if projection is a class (for scikit-learn)
try:
p = projection.get_params() # fail quickly
reducer = projection
if self.verbose > 0:
try:
projection.set_params(**{"verbose": self.verbose})
except:
pass
print("\n..Projecting data using: \n\t%s\n" % str(projection))
X = reducer.fit_transform(X)
except:
pass
# What is this used for?
if isinstance(projection, tuple):
X = self._process_projection_tuple(projection)
# Detect if projection is a string (for standard functions)
# TODO: test each one of these projections
if isinstance(projection, str):
if self.verbose > 0:
print("\n..Projecting data using: %s" % (projection))
def dist_mean(X, axis=1):
X_mean = np.mean(X, axis=0)
X = np.sum(np.sqrt((X - X_mean) ** 2), axis=1)
return X
projection_funcs = {
"sum": np.sum,
"mean": np.mean,
"median": np.median,
"max": np.max,
"min": np.min,
"std": np.std,
"l2norm": np.linalg.norm,
"dist_mean": dist_mean,
}
if projection in projection_funcs.keys():
X = projection_funcs[projection](X, axis=1).reshape((X.shape[0], 1))
if "knn_distance_" in projection:
n_neighbors = int(projection.split("_")[2])
if (
self.distance_matrix
): # We use the distance matrix for finding neighbors
X = np.sum(np.sort(X, axis=1)[:, :n_neighbors], axis=1).reshape(
(X.shape[0], 1)
)
else:
from sklearn import neighbors
nn = neighbors.NearestNeighbors(n_neighbors=n_neighbors)
nn.fit(X)
X = np.sum(
nn.kneighbors(X, n_neighbors=n_neighbors, return_distance=True)[
0
],
axis=1,
).reshape((X.shape[0], 1))
# Detect if projection is a list (with dimension indices)
if isinstance(projection, list):
if self.verbose > 0:
print("\n..Projecting data using: %s" % (str(projection)))
X = X[:, np.array(projection)]
# If projection produced sparse output, turn into a dense array
if issparse(X):
X = X.toarray()
if self.verbose > 0:
print("\n..Created projection shaped %s" % (str(X.shape)))
# Scaling
if scaler is not None:
if self.verbose > 0:
print("\n..Scaling with: %s\n" % str(scaler))
X = scaler.fit_transform(X)
return X
def fit_transform(
self,
X,
projection="sum",
scaler=preprocessing.MinMaxScaler(),
distance_matrix=False,
):
"""Same as .project() but accepts lists for arguments so you can chain.
"""
projections = projection
scalers = scaler
distance_matrices = distance_matrix
# Turn single projection arguments into a pipeline
if isinstance(projection, list) and isinstance(projection[0], int):
projections = [projection]
if not isinstance(projection, list):
projections = [projection]
# Turn single scaler arguments into a pipeline
if not isinstance(scaler, list):
scalers = [scaler]
# Turn single distance matrix arguments into a pipeline
if not isinstance(distance_matrix, list):
distance_matrices = [distance_matrix]
# set defaults to first list item, if not (correctly) set by the user
if len(scalers) != len(projections):
scalers = [scalers[0]] * len(projections)
if len(distance_matrices) != len(projections):
distance_matrices = [distance_matrices[0]] * len(projections)
if self.verbose > 0:
print("..Composing projection pipeline of length %s:" % (len(projections)))
print("\tProjections: %s" % ("\n\t\t".join(map(str, projections))))
print("\tDistance matrices: %s" % ("\n".join(map(str, distance_matrices))))
print("\tScalers: %s" % ("\n".join(map(str, scalers))))
# Pipeline Stack the projection functions
lens = X
for projection, scaler, distance_matrix in zip(
projections, scalers, distance_matrices
):
lens = self.project(
lens,
projection=projection,
scaler=scaler,
distance_matrix=distance_matrix,
)
return lens
def map(
self,
lens,
X=None,
clusterer=cluster.DBSCAN(eps=0.5, min_samples=3),
cover=Cover(n_cubes=10, perc_overlap=0.1),
nerve=GraphNerve(),
precomputed=False,
remove_duplicate_nodes=False,
# These arguments are all deprecated
overlap_perc=None,
nr_cubes=None
):
"""Apply Mapper algorithm on this projection and build a simplicial complex. Returns a dictionary with nodes and links.
Parameters
----------
lens: Numpy Array
Lower dimensional representation of data. In general will be output of `fit_transform`.
X: Numpy Array
Original data or data to run clustering on. If `None`, then use `lens` as default. X can be a SciPy sparse matrix.
clusterer: Default: DBSCAN
Scikit-learn API compatible clustering algorithm. Must provide `fit` and `predict`.
cover: kmapper.Cover
Cover scheme for lens. Instance of kmapper.cover providing methods `fit` and `transform`.
nerve: kmapper.Nerve
Nerve builder implementing `__call__(nodes)` API
precomputed : Boolean
Tell Mapper whether the data that you are clustering on is a precomputed distance matrix. If set to
`True`, the assumption is that you are also telling your `clusterer` that `metric='precomputed'` (which
is an argument for DBSCAN among others), which
will then cause the clusterer to expect a square distance matrix for each hypercube. `precomputed=True` will give a square matrix
to the clusterer to fit on for each hypercube.
remove_duplicate_nodes: Boolean
Removes duplicate nodes before edges are determined. A node is considered to be duplicate
if it has exactly the same set of points as another node.
nr_cubes: Int
.. deprecated:: 1.1.6
define Cover explicitly in future versions
The number of intervals/hypercubes to create. Default = 10.
overlap_perc: Float
.. deprecated:: 1.1.6
define Cover explicitly in future versions
The percentage of overlap "between" the intervals/hypercubes. Default = 0.1.
Returns
=======
simplicial_complex : dict
A dictionary with "nodes", "links" and "meta" information.
Examples
========
>>> # Default mapping.
>>> graph = mapper.map(X_projected, X_inverse)
>>> # Apply clustering on the projection instead of on inverse X
>>> graph = mapper.map(X_projected)
>>> # Use 20 cubes/intervals per projection dimension, with a 50% overlap
>>> graph = mapper.map(X_projected, X_inverse,
>>> cover=kmapper.Cover(n_cubes=20, perc_overlap=0.5))
>>> # Use multiple different cubes/intervals per projection dimension,
>>> # And vary the overlap
>>> graph = mapper.map(X_projected, X_inverse,
>>> cover=km.Cover(n_cubes=[10,20,5],
>>> perc_overlap=[0.1,0.2,0.5]))
>>> # Use KMeans with 2 clusters
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=sklearn.cluster.KMeans(2))
>>> # Use DBSCAN with "cosine"-distance
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=sklearn.cluster.DBSCAN(metric="cosine"))
>>> # Use HDBSCAN as the clusterer
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=hdbscan.HDBSCAN())
>>> # Parametrize the nerve of the covering
>>> graph = mapper.map(X_projected, X_inverse,
>>> nerve=km.GraphNerve(min_intersection=3))
"""
start = datetime.now()
nodes = defaultdict(list)
meta = defaultdict(list)
graph = {}
# If inverse image is not provided, we use the projection as the inverse image (suffer projection loss)
if X is None:
X = lens
# Deprecation warnings
if nr_cubes is not None or overlap_perc is not None:
warnings.warn(
"Deprecation Warning: Please supply km.Cover object. Explicitly passing in n_cubes/nr_cubes and overlap_perc will be deprecated in future releases. ",
DeprecationWarning,
)
# If user supplied nr_cubes, overlap_perc, or coverer, opt for those
# TODO: remove this conditional after release in 1.2
if nr_cubes is not None or overlap_perc is not None:
n_cubes = nr_cubes if nr_cubes else 10
overlap_perc = overlap_perc if overlap_perc else 0.1
self.cover = Cover(n_cubes=n_cubes, perc_overlap=overlap_perc)
else:
self.cover = cover
if self.verbose > 0:
print(
"Mapping on data shaped %s using lens shaped %s\n"
% (str(X.shape), str(lens.shape))
)
# Prefix'ing the data with an ID column
ids = np.array([x for x in range(lens.shape[0])])
lens = np.c_[ids, lens]
if issparse(X):
X = hstack([ids[np.newaxis].T, X], format='csr')
else:
X = np.c_[ids, X]
# Cover scheme defines a list of elements
bins = self.cover.fit(lens)
# Algo's like K-Means, have a set number of clusters. We need this number
# to adjust for the minimal number of samples inside an interval before
# we consider clustering or skipping it.
cluster_params = clusterer.get_params()
min_cluster_samples = cluster_params.get(
"n_clusters",
cluster_params.get(
"min_cluster_size", cluster_params.get("min_samples", 1)
),
)
if self.verbose > 1:
print(
"Minimal points in hypercube before clustering: %d"
% (min_cluster_samples)
)
# Subdivide the projected data X in intervals/hypercubes with overlap
if self.verbose > 0:
bins = list(bins) # extract list from generator
total_bins = len(bins)
print("Creating %s hypercubes." % total_bins)
for i, hypercube in enumerate(self.cover.transform(lens)):
# If at least min_cluster_samples samples inside the hypercube
if hypercube.shape[0] >= min_cluster_samples:
# Cluster the data point(s) in the cube, skipping the id-column
# Note that we apply clustering on the inverse image (original data samples) that fall inside the cube.
ids = [int(nn) for nn in hypercube[:, 0]]
X_cube = X[ids]
fit_data = X_cube[:, 1:]
if precomputed:
fit_data = fit_data[:, ids]
cluster_predictions = clusterer.fit_predict(fit_data)
if self.verbose > 1:
print(
" > Found %s clusters in hypercube %s."
% (
np.unique(
cluster_predictions[cluster_predictions > -1]
).shape[0], i
)
)
for pred in np.unique(cluster_predictions):
# if not predicted as noise
if pred != -1 and not np.isnan(pred):
cluster_id = "cube{}_cluster{}".format(i, int(pred))
nodes[cluster_id] = hypercube[:, 0][cluster_predictions == pred].astype(int).tolist()
elif self.verbose > 1:
print("Cube_%s is empty.\n" % (i))
if remove_duplicate_nodes:
nodes = self._remove_duplicate_nodes(nodes)
links, simplices = nerve.compute(nodes)
graph["nodes"] = nodes
graph["links"] = links
graph["simplices"] = simplices
graph["meta_data"] = {
"projection": self.projection if self.projection else "custom",
"n_cubes": self.cover.n_cubes,
"perc_overlap": self.cover.perc_overlap,
"clusterer": str(clusterer),
"scaler": str(self.scaler),
}
graph["meta_nodes"] = meta
if self.verbose > 0:
self._summary(graph, str(datetime.now() - start))
return graph
def _remove_duplicate_nodes(self, nodes):
# invert node list and merge duplicate nodes
deduped_items = defaultdict(list)
for node_id, items in nodes.items():
deduped_items[frozenset(items)].append(node_id)
deduped_nodes = {
"|".join(node_id_list): list(frozen_items)
for frozen_items, node_id_list in deduped_items.items()
}
if self.verbose > 0:
total_merged = len(nodes) - len(deduped_items)
if total_merged:
print("Merged {} duplicate nodes.\n".format(total_merged))
print(
"Number of nodes before merger: {}; after merger: {}\n".format(
len(nodes), len(deduped_nodes)
)
)
else:
print("No duplicate nodes found to remove.\n")
return deduped_nodes
def _summary(self, graph, time):
# TODO: this summary is dependant on the type of Nerve being built.
links = graph["links"]
nodes = graph["nodes"]
nr_links = sum(len(v) for k, v in links.items())
print("\nCreated %s edges and %s nodes in %s." % (nr_links, len(nodes), time))
def visualize(
self,
graph,
color_function=None,
custom_tooltips=None,
custom_meta=None,
path_html="mapper_visualization_output.html",
title="Kepler Mapper",
save_file=True,
X=None,
X_names=None,
lens=None,
lens_names=None,
show_tooltips=True,
nbins=10,
):
"""Generate a visualization of the simplicial complex mapper output. Turns the complex dictionary into a HTML/D3.js visualization
Parameters
----------
graph : dict
Simplicial complex output from the `map` method.
color_function : list or 1d array
A 1d vector with length equal to number of data points used to build Mapper. Each value should correspond to a value for each data point and color of node is computed as the average value for members in a node.
path_html : String
file name for outputing the resulting html.
custom_meta: dict
Render (key, value) in the Mapper Summary pane.
custom_tooltip: list or array like
Value to display for each entry in the node. The cluster data pane will display entry for all values in the node. Default is index of data.
save_file: bool, default is True
Save file to `path_html`.
X: numpy arraylike
If supplied, compute statistics information about the original data source with respect to each node.
X_names: list of strings
Names of each variable in `X` to be displayed. If None, then display names by index.
lens: numpy arraylike
If supplied, compute statistics of each node based on the projection/lens
lens_name: list of strings
Names of each variable in `lens` to be displayed. In None, then display names by index.
show_tooltips: bool, default is True.
If false, completely disable tooltips. This is useful when using output in space-tight pages or will display node data in custom ways.
nbins: int, default is 10
Number of bins shown in histogram of tooltip color distributions.
Returns
--------
html: string
Returns the same html that is normally output to `path_html`. Complete graph and data ready for viewing.
Examples
---------
>>> # Basic creation of a `.html` file at `kepler-mapper-output.html`
>>> html = mapper.visualize(graph, path_html="kepler-mapper-output.html")
>>> # Jupyter Notebook support
>>> from kmapper import jupyter
>>> html = mapper.visualize(graph, path_html="kepler-mapper-output.html")
>>> jupyter.display(path_html="kepler-mapper-output.html")
>>> # Customizing the output text
>>> html = mapper.visualize(
>>> graph,
>>> path_html="kepler-mapper-output.html",
>>> title="Fashion MNIST with UMAP",
>>> custom_meta={"Description":"A short description.",
>>> "Cluster": "HBSCAN()"}
>>> )
>>> # Custom coloring function based on your 1d lens
>>> html = mapper.visualize(
>>> graph,
>>> color_function=lens
>>> )
>>> # Custom coloring function based on the first variable
>>> cf = mapper.project(X, projection=[0])
>>> html = mapper.visualize(
>>> graph,
>>> color_function=cf
>>> )
>>> # Customizing the tooltips with binary target variables
>>> X, y = split_data(df)
>>> html = mapper.visualize(
>>> graph,
>>> path_html="kepler-mapper-output.html",
>>> title="Fashion MNIST with UMAP",
>>> custom_tooltips=y
>>> )
>>> # Customizing the tooltips with html-strings: locally stored images of an image dataset
>>> html = mapper.visualize(
>>> graph,
>>> path_html="kepler-mapper-output.html",
>>> title="Fashion MNIST with UMAP",
>>> custom_tooltips=np.array(
>>> ["<img src='img/%s.jpg'>"%i for i in range(inverse_X.shape[0])]
>>> )
>>> )
"""
# TODO:
# - Make color functions more intuitive. How do they even work?
# - Allow multiple color functions that can be toggled on and off.
if not len(graph["nodes"]) > 0:
raise Exception(
"Visualize requires a mapper with more than 0 nodes. \nIt is possible that the constructed mapper could have been constructed with bad parameters. This can occasionally happens when using the default clustering algorithm. Try changing `eps` or `min_samples` in the DBSCAN clustering algorithm."
)
# Find the module absolute path and locate templates
module_root = os.path.join(os.path.dirname(__file__), "templates")
env = Environment(loader=FileSystemLoader(module_root))
# Color function is a vector of colors?
color_function = init_color_function(graph, color_function)
if X_names is None:
X_names = []
if lens_names is None:
lens_names = []
mapper_data = format_mapper_data(
graph,
color_function,
X,
X_names,
lens,
lens_names,
custom_tooltips,
env,
nbins,
)
colorscale = colorscale_default
histogram = graph_data_distribution(graph, color_function, colorscale)
mapper_summary = format_meta(graph, custom_meta)
# Find the absolute module path and the static files
js_path = os.path.join(os.path.dirname(__file__), "static", "kmapper.js")
with open(js_path, "r") as f:
js_text = f.read()
css_path = os.path.join(os.path.dirname(__file__), "static", "style.css")
with open(css_path, "r") as f:
css_text = f.read()
# Render the Jinja template, filling fields as appropriate
template = env.get_template("base.html").render(
title=title,
mapper_summary=mapper_summary,
histogram=histogram,
dist_label="Node",
mapper_data=mapper_data,
colorscale=colorscale,
js_text=js_text,
css_text=css_text,
show_tooltips=True,
)
if save_file:
with open(path_html, "wb") as outfile:
if self.verbose > 0:
print("Wrote visualization to: %s" % (path_html))
outfile.write(template.encode("utf-8"))
return template
def data_from_cluster_id(self, cluster_id, graph, data):
"""Returns the original data of each cluster member for a given cluster ID
Parameters
----------
cluster_id : String
ID of the cluster.
graph : dict
The resulting dictionary after applying map()
data : Numpy Array
Original dataset. Accepts both 1-D and 2-D array.
Returns
-------
entries:
rows of cluster member data as Numpy array.
"""
if cluster_id in graph["nodes"]:
cluster_members = graph["nodes"][cluster_id]
cluster_members_data = data[cluster_members]
return cluster_members_data
else:
return np.array([])
def _process_projection_tuple(self, projection):
# Detect if projection is a tuple (for prediction functions)
# TODO: multi-label models
# TODO: infer binary classification and select positive class preds
# TODO: turn into smaller functions for better tests and complexity
# TODO: this seems like outside the purview of mapper. Can we add something like Mapper utils that can do this?
def blend(X_blend, pred_fun, folder, X_data, y):
for train_index, test_index in folder.split(X_data, y):
fold_X_train = X_data[train_index]
fold_y_train = y[train_index]
fold_X_test = X_data[test_index]
fold_y_test = y[test_index]
model.fit(fold_X_train, fold_y_train)
fold_preds = pred_fun(fold_X_test)
X_blend[test_index] = fold_preds
return X_blend
# If projection was passed without ground truth
# assume we are predicting a fitted model on a test set
if len(projection) == 2:
model, X_data = projection
# Are we dealing with a classifier or a regressor?
estimator_type = getattr(model, "_estimator_type", None)
if estimator_type == "classifier":
# classifier probabilities
X_blend = model.predict_proba(X_data)
elif estimator_type == "regressor":
X_blend = model.predict(X_data)
else:
warnings.warn("Unknown estimator type for: %s" % (model))
# If projection is passed with ground truth do 5-fold stratified
# cross-validation, saving the out-of-fold predictions.
# this is called "Stacked Generalization" (see: Wolpert 1992)
elif len(projection) == 3:
model, X_data, y = projection
estimator_type = getattr(model, "_estimator_type", None)
if estimator_type == "classifier":
X_blend = np.zeros((X_data.shape[0], np.unique(y).shape[0]))
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=1729)
blend(X_blend, model.predict_proba, skf, X_data, y)
elif estimator_type == "regressor":
X_blend = np.zeros(X_data.shape[0])
kf = KFold(n_splits=5, shuffle=True, random_state=1729)
blend(X_blend, model.predict, kf, X_data, y)
else:
warnings.warn("Unknown estimator type for: %s" % (model))
else:
# Warn for malformed input and provide help to avoid it.
warnings.warn(
"Passing a model function should be"
+ "(model, X) or (model, X, y)."
+ "Instead got %s" % (str(projection))
)
# Reshape 1-D arrays (regressor outputs) to 2-D arrays
if X_blend.ndim == 1:
X_blend = X_blend.reshape((X_blend.shape[0], 1))
X = X_blend
return X
|
scikit-tda/kepler-mapper | kmapper/kmapper.py | KeplerMapper.visualize | python | def visualize(
self,
graph,
color_function=None,
custom_tooltips=None,
custom_meta=None,
path_html="mapper_visualization_output.html",
title="Kepler Mapper",
save_file=True,
X=None,
X_names=None,
lens=None,
lens_names=None,
show_tooltips=True,
nbins=10,
):
# TODO:
# - Make color functions more intuitive. How do they even work?
# - Allow multiple color functions that can be toggled on and off.
if not len(graph["nodes"]) > 0:
raise Exception(
"Visualize requires a mapper with more than 0 nodes. \nIt is possible that the constructed mapper could have been constructed with bad parameters. This can occasionally happens when using the default clustering algorithm. Try changing `eps` or `min_samples` in the DBSCAN clustering algorithm."
)
# Find the module absolute path and locate templates
module_root = os.path.join(os.path.dirname(__file__), "templates")
env = Environment(loader=FileSystemLoader(module_root))
# Color function is a vector of colors?
color_function = init_color_function(graph, color_function)
if X_names is None:
X_names = []
if lens_names is None:
lens_names = []
mapper_data = format_mapper_data(
graph,
color_function,
X,
X_names,
lens,
lens_names,
custom_tooltips,
env,
nbins,
)
colorscale = colorscale_default
histogram = graph_data_distribution(graph, color_function, colorscale)
mapper_summary = format_meta(graph, custom_meta)
# Find the absolute module path and the static files
js_path = os.path.join(os.path.dirname(__file__), "static", "kmapper.js")
with open(js_path, "r") as f:
js_text = f.read()
css_path = os.path.join(os.path.dirname(__file__), "static", "style.css")
with open(css_path, "r") as f:
css_text = f.read()
# Render the Jinja template, filling fields as appropriate
template = env.get_template("base.html").render(
title=title,
mapper_summary=mapper_summary,
histogram=histogram,
dist_label="Node",
mapper_data=mapper_data,
colorscale=colorscale,
js_text=js_text,
css_text=css_text,
show_tooltips=True,
)
if save_file:
with open(path_html, "wb") as outfile:
if self.verbose > 0:
print("Wrote visualization to: %s" % (path_html))
outfile.write(template.encode("utf-8"))
return template | Generate a visualization of the simplicial complex mapper output. Turns the complex dictionary into a HTML/D3.js visualization
Parameters
----------
graph : dict
Simplicial complex output from the `map` method.
color_function : list or 1d array
A 1d vector with length equal to number of data points used to build Mapper. Each value should correspond to a value for each data point and color of node is computed as the average value for members in a node.
path_html : String
file name for outputing the resulting html.
custom_meta: dict
Render (key, value) in the Mapper Summary pane.
custom_tooltip: list or array like
Value to display for each entry in the node. The cluster data pane will display entry for all values in the node. Default is index of data.
save_file: bool, default is True
Save file to `path_html`.
X: numpy arraylike
If supplied, compute statistics information about the original data source with respect to each node.
X_names: list of strings
Names of each variable in `X` to be displayed. If None, then display names by index.
lens: numpy arraylike
If supplied, compute statistics of each node based on the projection/lens
lens_name: list of strings
Names of each variable in `lens` to be displayed. In None, then display names by index.
show_tooltips: bool, default is True.
If false, completely disable tooltips. This is useful when using output in space-tight pages or will display node data in custom ways.
nbins: int, default is 10
Number of bins shown in histogram of tooltip color distributions.
Returns
--------
html: string
Returns the same html that is normally output to `path_html`. Complete graph and data ready for viewing.
Examples
---------
>>> # Basic creation of a `.html` file at `kepler-mapper-output.html`
>>> html = mapper.visualize(graph, path_html="kepler-mapper-output.html")
>>> # Jupyter Notebook support
>>> from kmapper import jupyter
>>> html = mapper.visualize(graph, path_html="kepler-mapper-output.html")
>>> jupyter.display(path_html="kepler-mapper-output.html")
>>> # Customizing the output text
>>> html = mapper.visualize(
>>> graph,
>>> path_html="kepler-mapper-output.html",
>>> title="Fashion MNIST with UMAP",
>>> custom_meta={"Description":"A short description.",
>>> "Cluster": "HBSCAN()"}
>>> )
>>> # Custom coloring function based on your 1d lens
>>> html = mapper.visualize(
>>> graph,
>>> color_function=lens
>>> )
>>> # Custom coloring function based on the first variable
>>> cf = mapper.project(X, projection=[0])
>>> html = mapper.visualize(
>>> graph,
>>> color_function=cf
>>> )
>>> # Customizing the tooltips with binary target variables
>>> X, y = split_data(df)
>>> html = mapper.visualize(
>>> graph,
>>> path_html="kepler-mapper-output.html",
>>> title="Fashion MNIST with UMAP",
>>> custom_tooltips=y
>>> )
>>> # Customizing the tooltips with html-strings: locally stored images of an image dataset
>>> html = mapper.visualize(
>>> graph,
>>> path_html="kepler-mapper-output.html",
>>> title="Fashion MNIST with UMAP",
>>> custom_tooltips=np.array(
>>> ["<img src='img/%s.jpg'>"%i for i in range(inverse_X.shape[0])]
>>> )
>>> ) | train | https://github.com/scikit-tda/kepler-mapper/blob/d4ed39f6392b0a134dd573d7d9c4aa65fbef3a7d/kmapper/kmapper.py#L623-L805 | [
"def init_color_function(graph, color_function=None):\n # If no color_function provided we color by row order in data set\n # Reshaping to 2-D array is required for sklearn 0.19\n n_samples = np.max([i for s in graph[\"nodes\"].values() for i in s]) + 1\n if color_function is None:\n color_functi... | class KeplerMapper(object):
"""With this class you can build topological networks from (high-dimensional) data.
1) Fit a projection/lens/function to a dataset and transform it.
For instance "mean_of_row(x) for x in X"
2) Map this projection with overlapping intervals/hypercubes.
Cluster the points inside the interval
(Note: we cluster on the inverse image/original data to lessen projection loss).
If two clusters/nodes have the same members (due to the overlap), then:
connect these with an edge.
3) Visualize the network using HTML and D3.js.
KM has a number of nice features, some which get forgotten.
- ``project``: Some projections it makes sense to use a distance matrix, such as knn_distance_#. Using ``distance_matrix = <metric>`` for a custom metric.
- ``fit_transform``: Applies a sequence of projections. Currently, this API is a little confusing and might be changed in the future.
"""
def __init__(self, verbose=0):
"""Constructor for KeplerMapper class.
Parameters
===========
verbose: int, default is 0
Logging level. Currently 3 levels (0,1,2) are supported. For no logging, set `verbose=0`. For some logging, set `verbose=1`. For complete logging, set `verbose=2`.
"""
# TODO: move as many of the arguments from fit_transform and map into here.
self.verbose = verbose
self.projection = None
self.scaler = None
self.cover = None
if verbose > 0:
print(self)
def __repr__(self):
return "KeplerMapper(verbose={})".format(self.verbose)
def project(
self,
X,
projection="sum",
scaler=preprocessing.MinMaxScaler(),
distance_matrix=None,
):
"""Creates the projection/lens from a dataset. Input the data set. Specify a projection/lens type. Output the projected data/lens.
Parameters
----------
X : Numpy Array
The data to fit a projection/lens to.
projection :
Projection parameter is either a string, a Scikit-learn class with fit_transform, like manifold.TSNE(), or a list of dimension indices. A string from ["sum", "mean", "median", "max", "min", "std", "dist_mean", "l2norm", "knn_distance_n"]. If using knn_distance_n write the number of desired neighbors in place of n: knn_distance_5 for summed distances to 5 nearest neighbors. Default = "sum".
scaler : Scikit-Learn API compatible scaler.
Scaler of the data applied after mapping. Use None for no scaling. Default = preprocessing.MinMaxScaler() if None, do no scaling, else apply scaling to the projection. Default: Min-Max scaling
distance_matrix : Either str or None
If not None, then any of ["braycurtis", "canberra", "chebyshev", "cityblock", "correlation", "cosine", "dice", "euclidean", "hamming", "jaccard", "kulsinski", "mahalanobis", "matching", "minkowski", "rogerstanimoto", "russellrao", "seuclidean", "sokalmichener", "sokalsneath", "sqeuclidean", "yule"].
If False do nothing, else create a squared distance matrix with the chosen metric, before applying the projection.
Returns
-------
lens : Numpy Array
projected data.
Examples
--------
>>> # Project by taking the first dimension and third dimension
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection=[0,2]
>>> )
>>> # Project by taking the sum of row values
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection="sum"
>>> )
>>> # Do not scale the projection (default is minmax-scaling)
>>> X_projected = mapper.project(
>>> X_inverse,
>>> scaler=None
>>> )
>>> # Project by standard-scaled summed distance to 5 nearest neighbors
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection="knn_distance_5",
>>> scaler=sklearn.preprocessing.StandardScaler()
>>> )
>>> # Project by first two PCA components
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection=sklearn.decomposition.PCA()
>>> )
>>> # Project by first three UMAP components
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection=umap.UMAP(n_components=3)
>>> )
>>> # Project by L2-norm on squared Pearson distance matrix
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection="l2norm",
>>> distance_matrix="pearson"
>>> )
>>> # Mix and match different projections
>>> X_projected = np.c_[
>>> mapper.project(X_inverse, projection=sklearn.decomposition.PCA()),
>>> mapper.project(X_inverse, projection="knn_distance_5")
>>> ]
>>> # Stack / chain projections. You could do this manually,
>>> # or pipeline with `.fit_transform()`. Works the same as `.project()`,
>>> # but accepts lists. f(raw text) -> f(tfidf) -> f(isomap 100d) -> f(umap 2d)
>>> projected_X = mapper.fit_transform(
>>> X,
>>> projections=[TfidfVectorizer(analyzer="char",
>>> ngram_range=(1,6),
>>> max_df=0.93,
>>> min_df=0.03),
>>> manifold.Isomap(n_components=100,
>>> n_jobs=-1),
>>> umap.UMAP(n_components=2,
>>> random_state=1)],
>>> scalers=[None,
>>> None,
>>> preprocessing.MinMaxScaler()],
>>> distance_matrices=[False,
>>> False,
>>> False])
"""
# Sae original values off so they can be referenced by later functions in the pipeline
self.inverse = X
self.scaler = scaler
self.projection = str(projection)
self.distance_matrix = distance_matrix
if self.verbose > 0:
print("..Projecting on data shaped %s" % (str(X.shape)))
# If distance_matrix is a scipy.spatial.pdist string, we create a square distance matrix
# from the vectors, before applying a projection.
if self.distance_matrix in [
"braycurtis",
"canberra",
"chebyshev",
"cityblock",
"correlation",
"cosine",
"dice",
"euclidean",
"hamming",
"jaccard",
"kulsinski",
"mahalanobis",
"matching",
"minkowski",
"rogerstanimoto",
"russellrao",
"seuclidean",
"sokalmichener",
"sokalsneath",
"sqeuclidean",
"yule",
]:
X = distance.squareform(distance.pdist(X, metric=distance_matrix))
if self.verbose > 0:
print(
"Created distance matrix, shape: %s, with distance metric `%s`"
% (X.shape, distance_matrix)
)
# Detect if projection is a class (for scikit-learn)
try:
p = projection.get_params() # fail quickly
reducer = projection
if self.verbose > 0:
try:
projection.set_params(**{"verbose": self.verbose})
except:
pass
print("\n..Projecting data using: \n\t%s\n" % str(projection))
X = reducer.fit_transform(X)
except:
pass
# What is this used for?
if isinstance(projection, tuple):
X = self._process_projection_tuple(projection)
# Detect if projection is a string (for standard functions)
# TODO: test each one of these projections
if isinstance(projection, str):
if self.verbose > 0:
print("\n..Projecting data using: %s" % (projection))
def dist_mean(X, axis=1):
X_mean = np.mean(X, axis=0)
X = np.sum(np.sqrt((X - X_mean) ** 2), axis=1)
return X
projection_funcs = {
"sum": np.sum,
"mean": np.mean,
"median": np.median,
"max": np.max,
"min": np.min,
"std": np.std,
"l2norm": np.linalg.norm,
"dist_mean": dist_mean,
}
if projection in projection_funcs.keys():
X = projection_funcs[projection](X, axis=1).reshape((X.shape[0], 1))
if "knn_distance_" in projection:
n_neighbors = int(projection.split("_")[2])
if (
self.distance_matrix
): # We use the distance matrix for finding neighbors
X = np.sum(np.sort(X, axis=1)[:, :n_neighbors], axis=1).reshape(
(X.shape[0], 1)
)
else:
from sklearn import neighbors
nn = neighbors.NearestNeighbors(n_neighbors=n_neighbors)
nn.fit(X)
X = np.sum(
nn.kneighbors(X, n_neighbors=n_neighbors, return_distance=True)[
0
],
axis=1,
).reshape((X.shape[0], 1))
# Detect if projection is a list (with dimension indices)
if isinstance(projection, list):
if self.verbose > 0:
print("\n..Projecting data using: %s" % (str(projection)))
X = X[:, np.array(projection)]
# If projection produced sparse output, turn into a dense array
if issparse(X):
X = X.toarray()
if self.verbose > 0:
print("\n..Created projection shaped %s" % (str(X.shape)))
# Scaling
if scaler is not None:
if self.verbose > 0:
print("\n..Scaling with: %s\n" % str(scaler))
X = scaler.fit_transform(X)
return X
def fit_transform(
self,
X,
projection="sum",
scaler=preprocessing.MinMaxScaler(),
distance_matrix=False,
):
"""Same as .project() but accepts lists for arguments so you can chain.
"""
projections = projection
scalers = scaler
distance_matrices = distance_matrix
# Turn single projection arguments into a pipeline
if isinstance(projection, list) and isinstance(projection[0], int):
projections = [projection]
if not isinstance(projection, list):
projections = [projection]
# Turn single scaler arguments into a pipeline
if not isinstance(scaler, list):
scalers = [scaler]
# Turn single distance matrix arguments into a pipeline
if not isinstance(distance_matrix, list):
distance_matrices = [distance_matrix]
# set defaults to first list item, if not (correctly) set by the user
if len(scalers) != len(projections):
scalers = [scalers[0]] * len(projections)
if len(distance_matrices) != len(projections):
distance_matrices = [distance_matrices[0]] * len(projections)
if self.verbose > 0:
print("..Composing projection pipeline of length %s:" % (len(projections)))
print("\tProjections: %s" % ("\n\t\t".join(map(str, projections))))
print("\tDistance matrices: %s" % ("\n".join(map(str, distance_matrices))))
print("\tScalers: %s" % ("\n".join(map(str, scalers))))
# Pipeline Stack the projection functions
lens = X
for projection, scaler, distance_matrix in zip(
projections, scalers, distance_matrices
):
lens = self.project(
lens,
projection=projection,
scaler=scaler,
distance_matrix=distance_matrix,
)
return lens
def map(
self,
lens,
X=None,
clusterer=cluster.DBSCAN(eps=0.5, min_samples=3),
cover=Cover(n_cubes=10, perc_overlap=0.1),
nerve=GraphNerve(),
precomputed=False,
remove_duplicate_nodes=False,
# These arguments are all deprecated
overlap_perc=None,
nr_cubes=None
):
"""Apply Mapper algorithm on this projection and build a simplicial complex. Returns a dictionary with nodes and links.
Parameters
----------
lens: Numpy Array
Lower dimensional representation of data. In general will be output of `fit_transform`.
X: Numpy Array
Original data or data to run clustering on. If `None`, then use `lens` as default. X can be a SciPy sparse matrix.
clusterer: Default: DBSCAN
Scikit-learn API compatible clustering algorithm. Must provide `fit` and `predict`.
cover: kmapper.Cover
Cover scheme for lens. Instance of kmapper.cover providing methods `fit` and `transform`.
nerve: kmapper.Nerve
Nerve builder implementing `__call__(nodes)` API
precomputed : Boolean
Tell Mapper whether the data that you are clustering on is a precomputed distance matrix. If set to
`True`, the assumption is that you are also telling your `clusterer` that `metric='precomputed'` (which
is an argument for DBSCAN among others), which
will then cause the clusterer to expect a square distance matrix for each hypercube. `precomputed=True` will give a square matrix
to the clusterer to fit on for each hypercube.
remove_duplicate_nodes: Boolean
Removes duplicate nodes before edges are determined. A node is considered to be duplicate
if it has exactly the same set of points as another node.
nr_cubes: Int
.. deprecated:: 1.1.6
define Cover explicitly in future versions
The number of intervals/hypercubes to create. Default = 10.
overlap_perc: Float
.. deprecated:: 1.1.6
define Cover explicitly in future versions
The percentage of overlap "between" the intervals/hypercubes. Default = 0.1.
Returns
=======
simplicial_complex : dict
A dictionary with "nodes", "links" and "meta" information.
Examples
========
>>> # Default mapping.
>>> graph = mapper.map(X_projected, X_inverse)
>>> # Apply clustering on the projection instead of on inverse X
>>> graph = mapper.map(X_projected)
>>> # Use 20 cubes/intervals per projection dimension, with a 50% overlap
>>> graph = mapper.map(X_projected, X_inverse,
>>> cover=kmapper.Cover(n_cubes=20, perc_overlap=0.5))
>>> # Use multiple different cubes/intervals per projection dimension,
>>> # And vary the overlap
>>> graph = mapper.map(X_projected, X_inverse,
>>> cover=km.Cover(n_cubes=[10,20,5],
>>> perc_overlap=[0.1,0.2,0.5]))
>>> # Use KMeans with 2 clusters
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=sklearn.cluster.KMeans(2))
>>> # Use DBSCAN with "cosine"-distance
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=sklearn.cluster.DBSCAN(metric="cosine"))
>>> # Use HDBSCAN as the clusterer
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=hdbscan.HDBSCAN())
>>> # Parametrize the nerve of the covering
>>> graph = mapper.map(X_projected, X_inverse,
>>> nerve=km.GraphNerve(min_intersection=3))
"""
start = datetime.now()
nodes = defaultdict(list)
meta = defaultdict(list)
graph = {}
# If inverse image is not provided, we use the projection as the inverse image (suffer projection loss)
if X is None:
X = lens
# Deprecation warnings
if nr_cubes is not None or overlap_perc is not None:
warnings.warn(
"Deprecation Warning: Please supply km.Cover object. Explicitly passing in n_cubes/nr_cubes and overlap_perc will be deprecated in future releases. ",
DeprecationWarning,
)
# If user supplied nr_cubes, overlap_perc, or coverer, opt for those
# TODO: remove this conditional after release in 1.2
if nr_cubes is not None or overlap_perc is not None:
n_cubes = nr_cubes if nr_cubes else 10
overlap_perc = overlap_perc if overlap_perc else 0.1
self.cover = Cover(n_cubes=n_cubes, perc_overlap=overlap_perc)
else:
self.cover = cover
if self.verbose > 0:
print(
"Mapping on data shaped %s using lens shaped %s\n"
% (str(X.shape), str(lens.shape))
)
# Prefix'ing the data with an ID column
ids = np.array([x for x in range(lens.shape[0])])
lens = np.c_[ids, lens]
if issparse(X):
X = hstack([ids[np.newaxis].T, X], format='csr')
else:
X = np.c_[ids, X]
# Cover scheme defines a list of elements
bins = self.cover.fit(lens)
# Algo's like K-Means, have a set number of clusters. We need this number
# to adjust for the minimal number of samples inside an interval before
# we consider clustering or skipping it.
cluster_params = clusterer.get_params()
min_cluster_samples = cluster_params.get(
"n_clusters",
cluster_params.get(
"min_cluster_size", cluster_params.get("min_samples", 1)
),
)
if self.verbose > 1:
print(
"Minimal points in hypercube before clustering: %d"
% (min_cluster_samples)
)
# Subdivide the projected data X in intervals/hypercubes with overlap
if self.verbose > 0:
bins = list(bins) # extract list from generator
total_bins = len(bins)
print("Creating %s hypercubes." % total_bins)
for i, hypercube in enumerate(self.cover.transform(lens)):
# If at least min_cluster_samples samples inside the hypercube
if hypercube.shape[0] >= min_cluster_samples:
# Cluster the data point(s) in the cube, skipping the id-column
# Note that we apply clustering on the inverse image (original data samples) that fall inside the cube.
ids = [int(nn) for nn in hypercube[:, 0]]
X_cube = X[ids]
fit_data = X_cube[:, 1:]
if precomputed:
fit_data = fit_data[:, ids]
cluster_predictions = clusterer.fit_predict(fit_data)
if self.verbose > 1:
print(
" > Found %s clusters in hypercube %s."
% (
np.unique(
cluster_predictions[cluster_predictions > -1]
).shape[0], i
)
)
for pred in np.unique(cluster_predictions):
# if not predicted as noise
if pred != -1 and not np.isnan(pred):
cluster_id = "cube{}_cluster{}".format(i, int(pred))
nodes[cluster_id] = hypercube[:, 0][cluster_predictions == pred].astype(int).tolist()
elif self.verbose > 1:
print("Cube_%s is empty.\n" % (i))
if remove_duplicate_nodes:
nodes = self._remove_duplicate_nodes(nodes)
links, simplices = nerve.compute(nodes)
graph["nodes"] = nodes
graph["links"] = links
graph["simplices"] = simplices
graph["meta_data"] = {
"projection": self.projection if self.projection else "custom",
"n_cubes": self.cover.n_cubes,
"perc_overlap": self.cover.perc_overlap,
"clusterer": str(clusterer),
"scaler": str(self.scaler),
}
graph["meta_nodes"] = meta
if self.verbose > 0:
self._summary(graph, str(datetime.now() - start))
return graph
def _remove_duplicate_nodes(self, nodes):
# invert node list and merge duplicate nodes
deduped_items = defaultdict(list)
for node_id, items in nodes.items():
deduped_items[frozenset(items)].append(node_id)
deduped_nodes = {
"|".join(node_id_list): list(frozen_items)
for frozen_items, node_id_list in deduped_items.items()
}
if self.verbose > 0:
total_merged = len(nodes) - len(deduped_items)
if total_merged:
print("Merged {} duplicate nodes.\n".format(total_merged))
print(
"Number of nodes before merger: {}; after merger: {}\n".format(
len(nodes), len(deduped_nodes)
)
)
else:
print("No duplicate nodes found to remove.\n")
return deduped_nodes
def _summary(self, graph, time):
# TODO: this summary is dependant on the type of Nerve being built.
links = graph["links"]
nodes = graph["nodes"]
nr_links = sum(len(v) for k, v in links.items())
print("\nCreated %s edges and %s nodes in %s." % (nr_links, len(nodes), time))
def data_from_cluster_id(self, cluster_id, graph, data):
"""Returns the original data of each cluster member for a given cluster ID
Parameters
----------
cluster_id : String
ID of the cluster.
graph : dict
The resulting dictionary after applying map()
data : Numpy Array
Original dataset. Accepts both 1-D and 2-D array.
Returns
-------
entries:
rows of cluster member data as Numpy array.
"""
if cluster_id in graph["nodes"]:
cluster_members = graph["nodes"][cluster_id]
cluster_members_data = data[cluster_members]
return cluster_members_data
else:
return np.array([])
def _process_projection_tuple(self, projection):
# Detect if projection is a tuple (for prediction functions)
# TODO: multi-label models
# TODO: infer binary classification and select positive class preds
# TODO: turn into smaller functions for better tests and complexity
# TODO: this seems like outside the purview of mapper. Can we add something like Mapper utils that can do this?
def blend(X_blend, pred_fun, folder, X_data, y):
for train_index, test_index in folder.split(X_data, y):
fold_X_train = X_data[train_index]
fold_y_train = y[train_index]
fold_X_test = X_data[test_index]
fold_y_test = y[test_index]
model.fit(fold_X_train, fold_y_train)
fold_preds = pred_fun(fold_X_test)
X_blend[test_index] = fold_preds
return X_blend
# If projection was passed without ground truth
# assume we are predicting a fitted model on a test set
if len(projection) == 2:
model, X_data = projection
# Are we dealing with a classifier or a regressor?
estimator_type = getattr(model, "_estimator_type", None)
if estimator_type == "classifier":
# classifier probabilities
X_blend = model.predict_proba(X_data)
elif estimator_type == "regressor":
X_blend = model.predict(X_data)
else:
warnings.warn("Unknown estimator type for: %s" % (model))
# If projection is passed with ground truth do 5-fold stratified
# cross-validation, saving the out-of-fold predictions.
# this is called "Stacked Generalization" (see: Wolpert 1992)
elif len(projection) == 3:
model, X_data, y = projection
estimator_type = getattr(model, "_estimator_type", None)
if estimator_type == "classifier":
X_blend = np.zeros((X_data.shape[0], np.unique(y).shape[0]))
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=1729)
blend(X_blend, model.predict_proba, skf, X_data, y)
elif estimator_type == "regressor":
X_blend = np.zeros(X_data.shape[0])
kf = KFold(n_splits=5, shuffle=True, random_state=1729)
blend(X_blend, model.predict, kf, X_data, y)
else:
warnings.warn("Unknown estimator type for: %s" % (model))
else:
# Warn for malformed input and provide help to avoid it.
warnings.warn(
"Passing a model function should be"
+ "(model, X) or (model, X, y)."
+ "Instead got %s" % (str(projection))
)
# Reshape 1-D arrays (regressor outputs) to 2-D arrays
if X_blend.ndim == 1:
X_blend = X_blend.reshape((X_blend.shape[0], 1))
X = X_blend
return X
|
scikit-tda/kepler-mapper | kmapper/kmapper.py | KeplerMapper.data_from_cluster_id | python | def data_from_cluster_id(self, cluster_id, graph, data):
if cluster_id in graph["nodes"]:
cluster_members = graph["nodes"][cluster_id]
cluster_members_data = data[cluster_members]
return cluster_members_data
else:
return np.array([]) | Returns the original data of each cluster member for a given cluster ID
Parameters
----------
cluster_id : String
ID of the cluster.
graph : dict
The resulting dictionary after applying map()
data : Numpy Array
Original dataset. Accepts both 1-D and 2-D array.
Returns
-------
entries:
rows of cluster member data as Numpy array. | train | https://github.com/scikit-tda/kepler-mapper/blob/d4ed39f6392b0a134dd573d7d9c4aa65fbef3a7d/kmapper/kmapper.py#L807-L830 | null | class KeplerMapper(object):
"""With this class you can build topological networks from (high-dimensional) data.
1) Fit a projection/lens/function to a dataset and transform it.
For instance "mean_of_row(x) for x in X"
2) Map this projection with overlapping intervals/hypercubes.
Cluster the points inside the interval
(Note: we cluster on the inverse image/original data to lessen projection loss).
If two clusters/nodes have the same members (due to the overlap), then:
connect these with an edge.
3) Visualize the network using HTML and D3.js.
KM has a number of nice features, some which get forgotten.
- ``project``: Some projections it makes sense to use a distance matrix, such as knn_distance_#. Using ``distance_matrix = <metric>`` for a custom metric.
- ``fit_transform``: Applies a sequence of projections. Currently, this API is a little confusing and might be changed in the future.
"""
def __init__(self, verbose=0):
"""Constructor for KeplerMapper class.
Parameters
===========
verbose: int, default is 0
Logging level. Currently 3 levels (0,1,2) are supported. For no logging, set `verbose=0`. For some logging, set `verbose=1`. For complete logging, set `verbose=2`.
"""
# TODO: move as many of the arguments from fit_transform and map into here.
self.verbose = verbose
self.projection = None
self.scaler = None
self.cover = None
if verbose > 0:
print(self)
def __repr__(self):
return "KeplerMapper(verbose={})".format(self.verbose)
def project(
self,
X,
projection="sum",
scaler=preprocessing.MinMaxScaler(),
distance_matrix=None,
):
"""Creates the projection/lens from a dataset. Input the data set. Specify a projection/lens type. Output the projected data/lens.
Parameters
----------
X : Numpy Array
The data to fit a projection/lens to.
projection :
Projection parameter is either a string, a Scikit-learn class with fit_transform, like manifold.TSNE(), or a list of dimension indices. A string from ["sum", "mean", "median", "max", "min", "std", "dist_mean", "l2norm", "knn_distance_n"]. If using knn_distance_n write the number of desired neighbors in place of n: knn_distance_5 for summed distances to 5 nearest neighbors. Default = "sum".
scaler : Scikit-Learn API compatible scaler.
Scaler of the data applied after mapping. Use None for no scaling. Default = preprocessing.MinMaxScaler() if None, do no scaling, else apply scaling to the projection. Default: Min-Max scaling
distance_matrix : Either str or None
If not None, then any of ["braycurtis", "canberra", "chebyshev", "cityblock", "correlation", "cosine", "dice", "euclidean", "hamming", "jaccard", "kulsinski", "mahalanobis", "matching", "minkowski", "rogerstanimoto", "russellrao", "seuclidean", "sokalmichener", "sokalsneath", "sqeuclidean", "yule"].
If False do nothing, else create a squared distance matrix with the chosen metric, before applying the projection.
Returns
-------
lens : Numpy Array
projected data.
Examples
--------
>>> # Project by taking the first dimension and third dimension
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection=[0,2]
>>> )
>>> # Project by taking the sum of row values
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection="sum"
>>> )
>>> # Do not scale the projection (default is minmax-scaling)
>>> X_projected = mapper.project(
>>> X_inverse,
>>> scaler=None
>>> )
>>> # Project by standard-scaled summed distance to 5 nearest neighbors
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection="knn_distance_5",
>>> scaler=sklearn.preprocessing.StandardScaler()
>>> )
>>> # Project by first two PCA components
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection=sklearn.decomposition.PCA()
>>> )
>>> # Project by first three UMAP components
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection=umap.UMAP(n_components=3)
>>> )
>>> # Project by L2-norm on squared Pearson distance matrix
>>> X_projected = mapper.project(
>>> X_inverse,
>>> projection="l2norm",
>>> distance_matrix="pearson"
>>> )
>>> # Mix and match different projections
>>> X_projected = np.c_[
>>> mapper.project(X_inverse, projection=sklearn.decomposition.PCA()),
>>> mapper.project(X_inverse, projection="knn_distance_5")
>>> ]
>>> # Stack / chain projections. You could do this manually,
>>> # or pipeline with `.fit_transform()`. Works the same as `.project()`,
>>> # but accepts lists. f(raw text) -> f(tfidf) -> f(isomap 100d) -> f(umap 2d)
>>> projected_X = mapper.fit_transform(
>>> X,
>>> projections=[TfidfVectorizer(analyzer="char",
>>> ngram_range=(1,6),
>>> max_df=0.93,
>>> min_df=0.03),
>>> manifold.Isomap(n_components=100,
>>> n_jobs=-1),
>>> umap.UMAP(n_components=2,
>>> random_state=1)],
>>> scalers=[None,
>>> None,
>>> preprocessing.MinMaxScaler()],
>>> distance_matrices=[False,
>>> False,
>>> False])
"""
# Sae original values off so they can be referenced by later functions in the pipeline
self.inverse = X
self.scaler = scaler
self.projection = str(projection)
self.distance_matrix = distance_matrix
if self.verbose > 0:
print("..Projecting on data shaped %s" % (str(X.shape)))
# If distance_matrix is a scipy.spatial.pdist string, we create a square distance matrix
# from the vectors, before applying a projection.
if self.distance_matrix in [
"braycurtis",
"canberra",
"chebyshev",
"cityblock",
"correlation",
"cosine",
"dice",
"euclidean",
"hamming",
"jaccard",
"kulsinski",
"mahalanobis",
"matching",
"minkowski",
"rogerstanimoto",
"russellrao",
"seuclidean",
"sokalmichener",
"sokalsneath",
"sqeuclidean",
"yule",
]:
X = distance.squareform(distance.pdist(X, metric=distance_matrix))
if self.verbose > 0:
print(
"Created distance matrix, shape: %s, with distance metric `%s`"
% (X.shape, distance_matrix)
)
# Detect if projection is a class (for scikit-learn)
try:
p = projection.get_params() # fail quickly
reducer = projection
if self.verbose > 0:
try:
projection.set_params(**{"verbose": self.verbose})
except:
pass
print("\n..Projecting data using: \n\t%s\n" % str(projection))
X = reducer.fit_transform(X)
except:
pass
# What is this used for?
if isinstance(projection, tuple):
X = self._process_projection_tuple(projection)
# Detect if projection is a string (for standard functions)
# TODO: test each one of these projections
if isinstance(projection, str):
if self.verbose > 0:
print("\n..Projecting data using: %s" % (projection))
def dist_mean(X, axis=1):
X_mean = np.mean(X, axis=0)
X = np.sum(np.sqrt((X - X_mean) ** 2), axis=1)
return X
projection_funcs = {
"sum": np.sum,
"mean": np.mean,
"median": np.median,
"max": np.max,
"min": np.min,
"std": np.std,
"l2norm": np.linalg.norm,
"dist_mean": dist_mean,
}
if projection in projection_funcs.keys():
X = projection_funcs[projection](X, axis=1).reshape((X.shape[0], 1))
if "knn_distance_" in projection:
n_neighbors = int(projection.split("_")[2])
if (
self.distance_matrix
): # We use the distance matrix for finding neighbors
X = np.sum(np.sort(X, axis=1)[:, :n_neighbors], axis=1).reshape(
(X.shape[0], 1)
)
else:
from sklearn import neighbors
nn = neighbors.NearestNeighbors(n_neighbors=n_neighbors)
nn.fit(X)
X = np.sum(
nn.kneighbors(X, n_neighbors=n_neighbors, return_distance=True)[
0
],
axis=1,
).reshape((X.shape[0], 1))
# Detect if projection is a list (with dimension indices)
if isinstance(projection, list):
if self.verbose > 0:
print("\n..Projecting data using: %s" % (str(projection)))
X = X[:, np.array(projection)]
# If projection produced sparse output, turn into a dense array
if issparse(X):
X = X.toarray()
if self.verbose > 0:
print("\n..Created projection shaped %s" % (str(X.shape)))
# Scaling
if scaler is not None:
if self.verbose > 0:
print("\n..Scaling with: %s\n" % str(scaler))
X = scaler.fit_transform(X)
return X
def fit_transform(
self,
X,
projection="sum",
scaler=preprocessing.MinMaxScaler(),
distance_matrix=False,
):
"""Same as .project() but accepts lists for arguments so you can chain.
"""
projections = projection
scalers = scaler
distance_matrices = distance_matrix
# Turn single projection arguments into a pipeline
if isinstance(projection, list) and isinstance(projection[0], int):
projections = [projection]
if not isinstance(projection, list):
projections = [projection]
# Turn single scaler arguments into a pipeline
if not isinstance(scaler, list):
scalers = [scaler]
# Turn single distance matrix arguments into a pipeline
if not isinstance(distance_matrix, list):
distance_matrices = [distance_matrix]
# set defaults to first list item, if not (correctly) set by the user
if len(scalers) != len(projections):
scalers = [scalers[0]] * len(projections)
if len(distance_matrices) != len(projections):
distance_matrices = [distance_matrices[0]] * len(projections)
if self.verbose > 0:
print("..Composing projection pipeline of length %s:" % (len(projections)))
print("\tProjections: %s" % ("\n\t\t".join(map(str, projections))))
print("\tDistance matrices: %s" % ("\n".join(map(str, distance_matrices))))
print("\tScalers: %s" % ("\n".join(map(str, scalers))))
# Pipeline Stack the projection functions
lens = X
for projection, scaler, distance_matrix in zip(
projections, scalers, distance_matrices
):
lens = self.project(
lens,
projection=projection,
scaler=scaler,
distance_matrix=distance_matrix,
)
return lens
def map(
self,
lens,
X=None,
clusterer=cluster.DBSCAN(eps=0.5, min_samples=3),
cover=Cover(n_cubes=10, perc_overlap=0.1),
nerve=GraphNerve(),
precomputed=False,
remove_duplicate_nodes=False,
# These arguments are all deprecated
overlap_perc=None,
nr_cubes=None
):
"""Apply Mapper algorithm on this projection and build a simplicial complex. Returns a dictionary with nodes and links.
Parameters
----------
lens: Numpy Array
Lower dimensional representation of data. In general will be output of `fit_transform`.
X: Numpy Array
Original data or data to run clustering on. If `None`, then use `lens` as default. X can be a SciPy sparse matrix.
clusterer: Default: DBSCAN
Scikit-learn API compatible clustering algorithm. Must provide `fit` and `predict`.
cover: kmapper.Cover
Cover scheme for lens. Instance of kmapper.cover providing methods `fit` and `transform`.
nerve: kmapper.Nerve
Nerve builder implementing `__call__(nodes)` API
precomputed : Boolean
Tell Mapper whether the data that you are clustering on is a precomputed distance matrix. If set to
`True`, the assumption is that you are also telling your `clusterer` that `metric='precomputed'` (which
is an argument for DBSCAN among others), which
will then cause the clusterer to expect a square distance matrix for each hypercube. `precomputed=True` will give a square matrix
to the clusterer to fit on for each hypercube.
remove_duplicate_nodes: Boolean
Removes duplicate nodes before edges are determined. A node is considered to be duplicate
if it has exactly the same set of points as another node.
nr_cubes: Int
.. deprecated:: 1.1.6
define Cover explicitly in future versions
The number of intervals/hypercubes to create. Default = 10.
overlap_perc: Float
.. deprecated:: 1.1.6
define Cover explicitly in future versions
The percentage of overlap "between" the intervals/hypercubes. Default = 0.1.
Returns
=======
simplicial_complex : dict
A dictionary with "nodes", "links" and "meta" information.
Examples
========
>>> # Default mapping.
>>> graph = mapper.map(X_projected, X_inverse)
>>> # Apply clustering on the projection instead of on inverse X
>>> graph = mapper.map(X_projected)
>>> # Use 20 cubes/intervals per projection dimension, with a 50% overlap
>>> graph = mapper.map(X_projected, X_inverse,
>>> cover=kmapper.Cover(n_cubes=20, perc_overlap=0.5))
>>> # Use multiple different cubes/intervals per projection dimension,
>>> # And vary the overlap
>>> graph = mapper.map(X_projected, X_inverse,
>>> cover=km.Cover(n_cubes=[10,20,5],
>>> perc_overlap=[0.1,0.2,0.5]))
>>> # Use KMeans with 2 clusters
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=sklearn.cluster.KMeans(2))
>>> # Use DBSCAN with "cosine"-distance
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=sklearn.cluster.DBSCAN(metric="cosine"))
>>> # Use HDBSCAN as the clusterer
>>> graph = mapper.map(X_projected, X_inverse,
>>> clusterer=hdbscan.HDBSCAN())
>>> # Parametrize the nerve of the covering
>>> graph = mapper.map(X_projected, X_inverse,
>>> nerve=km.GraphNerve(min_intersection=3))
"""
start = datetime.now()
nodes = defaultdict(list)
meta = defaultdict(list)
graph = {}
# If inverse image is not provided, we use the projection as the inverse image (suffer projection loss)
if X is None:
X = lens
# Deprecation warnings
if nr_cubes is not None or overlap_perc is not None:
warnings.warn(
"Deprecation Warning: Please supply km.Cover object. Explicitly passing in n_cubes/nr_cubes and overlap_perc will be deprecated in future releases. ",
DeprecationWarning,
)
# If user supplied nr_cubes, overlap_perc, or coverer, opt for those
# TODO: remove this conditional after release in 1.2
if nr_cubes is not None or overlap_perc is not None:
n_cubes = nr_cubes if nr_cubes else 10
overlap_perc = overlap_perc if overlap_perc else 0.1
self.cover = Cover(n_cubes=n_cubes, perc_overlap=overlap_perc)
else:
self.cover = cover
if self.verbose > 0:
print(
"Mapping on data shaped %s using lens shaped %s\n"
% (str(X.shape), str(lens.shape))
)
# Prefix'ing the data with an ID column
ids = np.array([x for x in range(lens.shape[0])])
lens = np.c_[ids, lens]
if issparse(X):
X = hstack([ids[np.newaxis].T, X], format='csr')
else:
X = np.c_[ids, X]
# Cover scheme defines a list of elements
bins = self.cover.fit(lens)
# Algo's like K-Means, have a set number of clusters. We need this number
# to adjust for the minimal number of samples inside an interval before
# we consider clustering or skipping it.
cluster_params = clusterer.get_params()
min_cluster_samples = cluster_params.get(
"n_clusters",
cluster_params.get(
"min_cluster_size", cluster_params.get("min_samples", 1)
),
)
if self.verbose > 1:
print(
"Minimal points in hypercube before clustering: %d"
% (min_cluster_samples)
)
# Subdivide the projected data X in intervals/hypercubes with overlap
if self.verbose > 0:
bins = list(bins) # extract list from generator
total_bins = len(bins)
print("Creating %s hypercubes." % total_bins)
for i, hypercube in enumerate(self.cover.transform(lens)):
# If at least min_cluster_samples samples inside the hypercube
if hypercube.shape[0] >= min_cluster_samples:
# Cluster the data point(s) in the cube, skipping the id-column
# Note that we apply clustering on the inverse image (original data samples) that fall inside the cube.
ids = [int(nn) for nn in hypercube[:, 0]]
X_cube = X[ids]
fit_data = X_cube[:, 1:]
if precomputed:
fit_data = fit_data[:, ids]
cluster_predictions = clusterer.fit_predict(fit_data)
if self.verbose > 1:
print(
" > Found %s clusters in hypercube %s."
% (
np.unique(
cluster_predictions[cluster_predictions > -1]
).shape[0], i
)
)
for pred in np.unique(cluster_predictions):
# if not predicted as noise
if pred != -1 and not np.isnan(pred):
cluster_id = "cube{}_cluster{}".format(i, int(pred))
nodes[cluster_id] = hypercube[:, 0][cluster_predictions == pred].astype(int).tolist()
elif self.verbose > 1:
print("Cube_%s is empty.\n" % (i))
if remove_duplicate_nodes:
nodes = self._remove_duplicate_nodes(nodes)
links, simplices = nerve.compute(nodes)
graph["nodes"] = nodes
graph["links"] = links
graph["simplices"] = simplices
graph["meta_data"] = {
"projection": self.projection if self.projection else "custom",
"n_cubes": self.cover.n_cubes,
"perc_overlap": self.cover.perc_overlap,
"clusterer": str(clusterer),
"scaler": str(self.scaler),
}
graph["meta_nodes"] = meta
if self.verbose > 0:
self._summary(graph, str(datetime.now() - start))
return graph
def _remove_duplicate_nodes(self, nodes):
# invert node list and merge duplicate nodes
deduped_items = defaultdict(list)
for node_id, items in nodes.items():
deduped_items[frozenset(items)].append(node_id)
deduped_nodes = {
"|".join(node_id_list): list(frozen_items)
for frozen_items, node_id_list in deduped_items.items()
}
if self.verbose > 0:
total_merged = len(nodes) - len(deduped_items)
if total_merged:
print("Merged {} duplicate nodes.\n".format(total_merged))
print(
"Number of nodes before merger: {}; after merger: {}\n".format(
len(nodes), len(deduped_nodes)
)
)
else:
print("No duplicate nodes found to remove.\n")
return deduped_nodes
def _summary(self, graph, time):
# TODO: this summary is dependant on the type of Nerve being built.
links = graph["links"]
nodes = graph["nodes"]
nr_links = sum(len(v) for k, v in links.items())
print("\nCreated %s edges and %s nodes in %s." % (nr_links, len(nodes), time))
def visualize(
self,
graph,
color_function=None,
custom_tooltips=None,
custom_meta=None,
path_html="mapper_visualization_output.html",
title="Kepler Mapper",
save_file=True,
X=None,
X_names=None,
lens=None,
lens_names=None,
show_tooltips=True,
nbins=10,
):
"""Generate a visualization of the simplicial complex mapper output. Turns the complex dictionary into a HTML/D3.js visualization
Parameters
----------
graph : dict
Simplicial complex output from the `map` method.
color_function : list or 1d array
A 1d vector with length equal to number of data points used to build Mapper. Each value should correspond to a value for each data point and color of node is computed as the average value for members in a node.
path_html : String
file name for outputing the resulting html.
custom_meta: dict
Render (key, value) in the Mapper Summary pane.
custom_tooltip: list or array like
Value to display for each entry in the node. The cluster data pane will display entry for all values in the node. Default is index of data.
save_file: bool, default is True
Save file to `path_html`.
X: numpy arraylike
If supplied, compute statistics information about the original data source with respect to each node.
X_names: list of strings
Names of each variable in `X` to be displayed. If None, then display names by index.
lens: numpy arraylike
If supplied, compute statistics of each node based on the projection/lens
lens_name: list of strings
Names of each variable in `lens` to be displayed. In None, then display names by index.
show_tooltips: bool, default is True.
If false, completely disable tooltips. This is useful when using output in space-tight pages or will display node data in custom ways.
nbins: int, default is 10
Number of bins shown in histogram of tooltip color distributions.
Returns
--------
html: string
Returns the same html that is normally output to `path_html`. Complete graph and data ready for viewing.
Examples
---------
>>> # Basic creation of a `.html` file at `kepler-mapper-output.html`
>>> html = mapper.visualize(graph, path_html="kepler-mapper-output.html")
>>> # Jupyter Notebook support
>>> from kmapper import jupyter
>>> html = mapper.visualize(graph, path_html="kepler-mapper-output.html")
>>> jupyter.display(path_html="kepler-mapper-output.html")
>>> # Customizing the output text
>>> html = mapper.visualize(
>>> graph,
>>> path_html="kepler-mapper-output.html",
>>> title="Fashion MNIST with UMAP",
>>> custom_meta={"Description":"A short description.",
>>> "Cluster": "HBSCAN()"}
>>> )
>>> # Custom coloring function based on your 1d lens
>>> html = mapper.visualize(
>>> graph,
>>> color_function=lens
>>> )
>>> # Custom coloring function based on the first variable
>>> cf = mapper.project(X, projection=[0])
>>> html = mapper.visualize(
>>> graph,
>>> color_function=cf
>>> )
>>> # Customizing the tooltips with binary target variables
>>> X, y = split_data(df)
>>> html = mapper.visualize(
>>> graph,
>>> path_html="kepler-mapper-output.html",
>>> title="Fashion MNIST with UMAP",
>>> custom_tooltips=y
>>> )
>>> # Customizing the tooltips with html-strings: locally stored images of an image dataset
>>> html = mapper.visualize(
>>> graph,
>>> path_html="kepler-mapper-output.html",
>>> title="Fashion MNIST with UMAP",
>>> custom_tooltips=np.array(
>>> ["<img src='img/%s.jpg'>"%i for i in range(inverse_X.shape[0])]
>>> )
>>> )
"""
# TODO:
# - Make color functions more intuitive. How do they even work?
# - Allow multiple color functions that can be toggled on and off.
if not len(graph["nodes"]) > 0:
raise Exception(
"Visualize requires a mapper with more than 0 nodes. \nIt is possible that the constructed mapper could have been constructed with bad parameters. This can occasionally happens when using the default clustering algorithm. Try changing `eps` or `min_samples` in the DBSCAN clustering algorithm."
)
# Find the module absolute path and locate templates
module_root = os.path.join(os.path.dirname(__file__), "templates")
env = Environment(loader=FileSystemLoader(module_root))
# Color function is a vector of colors?
color_function = init_color_function(graph, color_function)
if X_names is None:
X_names = []
if lens_names is None:
lens_names = []
mapper_data = format_mapper_data(
graph,
color_function,
X,
X_names,
lens,
lens_names,
custom_tooltips,
env,
nbins,
)
colorscale = colorscale_default
histogram = graph_data_distribution(graph, color_function, colorscale)
mapper_summary = format_meta(graph, custom_meta)
# Find the absolute module path and the static files
js_path = os.path.join(os.path.dirname(__file__), "static", "kmapper.js")
with open(js_path, "r") as f:
js_text = f.read()
css_path = os.path.join(os.path.dirname(__file__), "static", "style.css")
with open(css_path, "r") as f:
css_text = f.read()
# Render the Jinja template, filling fields as appropriate
template = env.get_template("base.html").render(
title=title,
mapper_summary=mapper_summary,
histogram=histogram,
dist_label="Node",
mapper_data=mapper_data,
colorscale=colorscale,
js_text=js_text,
css_text=css_text,
show_tooltips=True,
)
if save_file:
with open(path_html, "wb") as outfile:
if self.verbose > 0:
print("Wrote visualization to: %s" % (path_html))
outfile.write(template.encode("utf-8"))
return template
def _process_projection_tuple(self, projection):
# Detect if projection is a tuple (for prediction functions)
# TODO: multi-label models
# TODO: infer binary classification and select positive class preds
# TODO: turn into smaller functions for better tests and complexity
# TODO: this seems like outside the purview of mapper. Can we add something like Mapper utils that can do this?
def blend(X_blend, pred_fun, folder, X_data, y):
for train_index, test_index in folder.split(X_data, y):
fold_X_train = X_data[train_index]
fold_y_train = y[train_index]
fold_X_test = X_data[test_index]
fold_y_test = y[test_index]
model.fit(fold_X_train, fold_y_train)
fold_preds = pred_fun(fold_X_test)
X_blend[test_index] = fold_preds
return X_blend
# If projection was passed without ground truth
# assume we are predicting a fitted model on a test set
if len(projection) == 2:
model, X_data = projection
# Are we dealing with a classifier or a regressor?
estimator_type = getattr(model, "_estimator_type", None)
if estimator_type == "classifier":
# classifier probabilities
X_blend = model.predict_proba(X_data)
elif estimator_type == "regressor":
X_blend = model.predict(X_data)
else:
warnings.warn("Unknown estimator type for: %s" % (model))
# If projection is passed with ground truth do 5-fold stratified
# cross-validation, saving the out-of-fold predictions.
# this is called "Stacked Generalization" (see: Wolpert 1992)
elif len(projection) == 3:
model, X_data, y = projection
estimator_type = getattr(model, "_estimator_type", None)
if estimator_type == "classifier":
X_blend = np.zeros((X_data.shape[0], np.unique(y).shape[0]))
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=1729)
blend(X_blend, model.predict_proba, skf, X_data, y)
elif estimator_type == "regressor":
X_blend = np.zeros(X_data.shape[0])
kf = KFold(n_splits=5, shuffle=True, random_state=1729)
blend(X_blend, model.predict, kf, X_data, y)
else:
warnings.warn("Unknown estimator type for: %s" % (model))
else:
# Warn for malformed input and provide help to avoid it.
warnings.warn(
"Passing a model function should be"
+ "(model, X) or (model, X, y)."
+ "Instead got %s" % (str(projection))
)
# Reshape 1-D arrays (regressor outputs) to 2-D arrays
if X_blend.ndim == 1:
X_blend = X_blend.reshape((X_blend.shape[0], 1))
X = X_blend
return X
|
scikit-tda/kepler-mapper | kmapper/jupyter.py | display | python | def display(path_html="mapper_visualization_output.html"):
iframe = (
"<iframe src=" + path_html + ' width=100%% height=800 frameBorder="0"></iframe>'
)
IPython.core.display.display(IPython.core.display.HTML(iframe)) | Displays a html file inside a Jupyter Notebook output cell.
.. note::
Must run ``KeplerMapper.visualize`` first to generate html. This function will then render that output from a file saved to disk.
.. note::
Thanks to `smartinsightsfromdata <https://github.com/smartinsightsfromdata>`_ for the `github issue 10 <https://github.com/MLWave/kepler-mapper/issues/10>`_ that suggested this method.
Parameters
============
path_html : str
Path to html. Use file name for file inside current working
directory. Use ``file://`` browser url-format for path to local file.
Use ``https://`` urls for externally hosted resources.
Examples
=========
::
import numpy as np
import kmapper as km
from kmapper.jupyter import display
data = np.random.random((2000, 2))
mapper = km.KeplerMapper()
lens = km.project(data)
graph = km.map(lens, data)
_ = km.visualize(graph, path_html="filename.html")
display("filename.html")
The default filename is the same default as the ``.visualize`` method, so using both without arguments will show the last constructed graph:
>>> _ = km.visualize(graph)
>>> display() | train | https://github.com/scikit-tda/kepler-mapper/blob/d4ed39f6392b0a134dd573d7d9c4aa65fbef3a7d/kmapper/jupyter.py#L11-L61 | null | import IPython
# Here we set the custom CSS to override Jupyter's default
CUSTOM_CSS = """<style>
.container { width:100% !important; }
.output_scroll {height: 800px !important;}
</style>"""
IPython.core.display.display(IPython.core.display.HTML(CUSTOM_CSS))
def display(path_html="mapper_visualization_output.html"):
""" Displays a html file inside a Jupyter Notebook output cell.
.. note::
Must run ``KeplerMapper.visualize`` first to generate html. This function will then render that output from a file saved to disk.
.. note::
Thanks to `smartinsightsfromdata <https://github.com/smartinsightsfromdata>`_ for the `github issue 10 <https://github.com/MLWave/kepler-mapper/issues/10>`_ that suggested this method.
Parameters
============
path_html : str
Path to html. Use file name for file inside current working
directory. Use ``file://`` browser url-format for path to local file.
Use ``https://`` urls for externally hosted resources.
Examples
=========
::
import numpy as np
import kmapper as km
from kmapper.jupyter import display
data = np.random.random((2000, 2))
mapper = km.KeplerMapper()
lens = km.project(data)
graph = km.map(lens, data)
_ = km.visualize(graph, path_html="filename.html")
display("filename.html")
The default filename is the same default as the ``.visualize`` method, so using both without arguments will show the last constructed graph:
>>> _ = km.visualize(graph)
>>> display()
"""
iframe = (
"<iframe src=" + path_html + ' width=100%% height=800 frameBorder="0"></iframe>'
)
IPython.core.display.display(IPython.core.display.HTML(iframe))
|
scikit-tda/kepler-mapper | kmapper/visuals.py | _colors_to_rgb | python | def _colors_to_rgb(colorscale):
if colorscale[0][1][0] == "#":
plotly_colors = np.array(colorscale)[:, 1].tolist()
for k, hexcode in enumerate(plotly_colors):
hexcode = hexcode.lstrip("#")
hex_len = len(hexcode)
step = hex_len // 3
colorscale[k][1] = "rgb" + str(
tuple(int(hexcode[j : j + step], 16) for j in range(0, hex_len, step))
)
return colorscale | Ensure that the color scale is formatted in rgb strings.
If the colorscale is a hex string, then convert to rgb. | train | https://github.com/scikit-tda/kepler-mapper/blob/d4ed39f6392b0a134dd573d7d9c4aa65fbef3a7d/kmapper/visuals.py#L60-L74 | null | # A small helper class to house functions needed by KeplerMapper.visualize
import numpy as np
from sklearn import preprocessing
import json
from collections import defaultdict
from ast import literal_eval
colorscale_default = [
[0.0, "rgb(68, 1, 84)"], # Viridis
[0.1, "rgb(72, 35, 116)"],
[0.2, "rgb(64, 67, 135)"],
[0.3, "rgb(52, 94, 141)"],
[0.4, "rgb(41, 120, 142)"],
[0.5, "rgb(32, 144, 140)"],
[0.6, "rgb(34, 167, 132)"],
[0.7, "rgb(68, 190, 112)"],
[0.8, "rgb(121, 209, 81)"],
[0.9, "rgb(189, 222, 38)"],
[1.0, "rgb(253, 231, 36)"],
]
palette = [
"#0500ff",
"#0300ff",
"#0100ff",
"#0002ff",
"#0022ff",
"#0044ff",
"#0064ff",
"#0084ff",
"#00a4ff",
"#00a4ff",
"#00c4ff",
"#00e4ff",
"#00ffd0",
"#00ff83",
"#00ff36",
"#17ff00",
"#65ff00",
"#b0ff00",
"#fdff00",
"#FFf000",
"#FFdc00",
"#FFc800",
"#FFb400",
"#FFa000",
"#FF8c00",
"#FF7800",
"#FF6400",
"#FF5000",
"#FF3c00",
"#FF2800",
"#FF1400",
"#FF0000",
]
def _to_html_format(st):
return st.replace("\n", "<br>")
def _map_val2color(val, vmin, vmax, colorscale=None):
""" Maps a value val in [vmin, vmax] to the corresponding color in
the colorscale
returns the rgb color code of that color
"""
colorscale = colorscale or colorscale_default
if vmin >= vmax:
raise ValueError("vmin should be < vmax")
scale = list(map(float, np.array(colorscale)[:, 0]))
colors = np.array(colorscale)[:, 1]
colors_01 = (
np.array(list(map(literal_eval, [color[3:] for color in colors]))) / 255.0
)
v = (val - vmin) / float((vmax - vmin)) # val is mapped to v in[0,1]
idx = 0
# sequential search for the two consecutive indices idx, idx+1 such that
# v belongs to the interval [scale[idx], scale[idx+1]
while v > scale[idx + 1]:
idx += 1
left_scale_val = scale[idx]
right_scale_val = scale[idx + 1]
vv = (v - left_scale_val) / (right_scale_val - left_scale_val)
# get the triplet of three values in [0,1] that represent the rgb color
# corresponding to val
val_color01 = colors_01[idx] + vv * (colors_01[idx + 1] - colors_01[idx])
val_color_0255 = list(map(np.uint8, 255 * val_color01))
return "rgb" + str(tuple(val_color_0255))
def init_color_function(graph, color_function=None):
# If no color_function provided we color by row order in data set
# Reshaping to 2-D array is required for sklearn 0.19
n_samples = np.max([i for s in graph["nodes"].values() for i in s]) + 1
if color_function is None:
color_function = np.arange(n_samples).reshape(-1, 1)
else:
color_function = color_function.reshape(-1, 1)
color_function = color_function.astype(np.float64)
# MinMax Scaling to be friendly to non-scaled input.
scaler = preprocessing.MinMaxScaler()
color_function = scaler.fit_transform(color_function).ravel()
# "Scaler might have floating point issues, 1.0000...0002". Force max and min
color_function[color_function > 1] = 1
color_function[color_function < 0] = 0
return color_function
def format_meta(graph, custom_meta=None, color_function_name=None):
n = [l for l in graph["nodes"].values()]
n_unique = len(set([i for s in n for i in s]))
if custom_meta is None:
custom_meta = graph["meta_data"]
if "clusterer" in custom_meta.keys():
clusterer = custom_meta["clusterer"]
custom_meta["clusterer"] = _to_html_format(clusterer)
if "projection" in custom_meta.keys():
projection = custom_meta["projection"]
custom_meta["projection"] = _to_html_format(projection)
if color_function_name is not None:
custom_meta["color_function"] = color_function_name
mapper_summary = {
"custom_meta": custom_meta,
"n_nodes": len(graph["nodes"]),
"n_edges": sum([len(l) for l in graph["links"].values()]),
"n_total": sum([len(l) for l in graph["nodes"].values()]),
"n_unique": n_unique,
}
return mapper_summary
def format_mapper_data(
graph, color_function, X, X_names, lens, lens_names, custom_tooltips, env, nbins=10
):
# import pdb; pdb.set_trace()
json_dict = {"nodes": [], "links": []}
node_id_to_num = {}
for i, (node_id, member_ids) in enumerate(graph["nodes"].items()):
node_id_to_num[node_id] = i
c = _color_function(member_ids, color_function)
t = _type_node()
s = _size_node(member_ids)
tt = _format_tooltip(
env,
member_ids,
custom_tooltips,
X,
X_names,
lens,
lens_names,
color_function,
node_id,
nbins,
)
n = {
"id": "",
"name": node_id,
"color": c,
"type": _type_node(),
"size": s,
"tooltip": tt,
}
json_dict["nodes"].append(n)
for i, (node_id, linked_node_ids) in enumerate(graph["links"].items()):
for linked_node_id in linked_node_ids:
json_dict["links"].append(
{
"source": node_id_to_num[node_id],
"target": node_id_to_num[linked_node_id],
"width": _size_link_width(graph, node_id, linked_node_id),
}
)
return json_dict
def build_histogram(data, colorscale=None, nbins=10):
""" Build histogram of data based on values of color_function
"""
if colorscale is None:
colorscale = colorscale_default
# TODO: we should weave this method of handling colors into the normal build_histogram and combine both functions
colorscale = _colors_to_rgb(colorscale)
h_min, h_max = 0, 1
hist, bin_edges = np.histogram(data, range=(h_min, h_max), bins=nbins)
bin_mids = np.mean(np.array(list(zip(bin_edges, bin_edges[1:]))), axis=1)
histogram = []
max_bucket_value = max(hist)
sum_bucket_value = sum(hist)
for bar, mid in zip(hist, bin_mids):
height = np.floor(((bar / max_bucket_value) * 100) + 0.5)
perc = round((bar / sum_bucket_value) * 100.0, 1)
color = _map_val2color(mid, 0.0, 1.0, colorscale)
histogram.append({"height": height, "perc": perc, "color": color})
return histogram
def graph_data_distribution(graph, color_function, colorscale, nbins=10):
node_averages = []
for node_id, member_ids in graph["nodes"].items():
member_colors = color_function[member_ids]
node_averages.append(np.mean(member_colors))
histogram = build_histogram(node_averages, colorscale=colorscale, nbins=nbins)
return histogram
def _format_cluster_statistics(member_ids, X, X_names):
# TODO: Cache X_mean and X_std for all clusters.
# TODO: replace long tuples with named tuples.
# TODO: Name all the single letter variables.
# TODO: remove duplication between above_stats and below_stats
# TODO: Should we only show variables that are much above or below the mean?
cluster_data = {"above": [], "below": [], "size": len(member_ids)}
cluster_stats = ""
if X is not None:
# List vs. numpy handling: cast to numpy array
if isinstance(X_names, list):
X_names = np.array(X_names)
# Defaults when providing no X_names
if X_names.shape[0] == 0:
X_names = np.array(["f_%s" % (i) for i in range(X.shape[1])])
cluster_X_mean = np.mean(X[member_ids], axis=0)
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
above_mean = cluster_X_mean > X_mean
std_m = np.sqrt((cluster_X_mean - X_mean) ** 2) / X_std
stat_zip = list(
zip(
std_m,
X_names,
np.mean(X, axis=0),
cluster_X_mean,
above_mean,
np.std(X, axis=0),
)
)
stats = sorted(stat_zip, reverse=True)
above_stats = [a for a in stats if bool(a[4]) is True]
below_stats = [a for a in stats if bool(a[4]) is False]
if len(above_stats) > 0:
for s, f, i, c, a, v in above_stats[:5]:
cluster_data["above"].append(
{"feature": f, "mean": round(c, 3), "std": round(s, 1)}
)
if len(below_stats) > 0:
for s, f, i, c, a, v in below_stats[:5]:
cluster_data["below"].append(
{"feature": f, "mean": round(c, 3), "std": round(s, 1)}
)
return cluster_data
def _format_projection_statistics(member_ids, lens, lens_names):
projection_data = []
if lens is not None:
if isinstance(lens_names, list):
lens_names = np.array(lens_names)
# Create defaults when providing no lens_names
if lens_names.shape[0] == 0:
lens_names = np.array(["p_%s" % (i) for i in range(lens.shape[1])])
means_v = np.mean(lens[member_ids], axis=0)
maxs_v = np.max(lens[member_ids], axis=0)
mins_v = np.min(lens[member_ids], axis=0)
for name, mean_v, max_v, min_v in zip(lens_names, means_v, maxs_v, mins_v):
projection_data.append(
{
"name": name,
"mean": round(mean_v, 3),
"max": round(max_v, 3),
"min": round(min_v, 3),
}
)
return projection_data
def _tooltip_components(
member_ids,
X,
X_names,
lens,
lens_names,
color_function,
node_ID,
colorscale,
nbins=10,
):
projection_stats = _format_projection_statistics(member_ids, lens, lens_names)
cluster_stats = _format_cluster_statistics(member_ids, X, X_names)
member_histogram = build_histogram(
color_function[member_ids], colorscale=colorscale, nbins=nbins
)
return projection_stats, cluster_stats, member_histogram
def _format_tooltip(
env,
member_ids,
custom_tooltips,
X,
X_names,
lens,
lens_names,
color_function,
node_ID,
nbins,
):
# TODO: Allow customization in the form of aggregate per node and per entry in node.
# TODO: Allow users to turn off tooltip completely.
custom_tooltips = (
custom_tooltips[member_ids] if custom_tooltips is not None else member_ids
)
# list will render better than numpy arrays
custom_tooltips = list(custom_tooltips)
colorscale = colorscale_default
projection_stats, cluster_stats, histogram = _tooltip_components(
member_ids,
X,
X_names,
lens,
lens_names,
color_function,
node_ID,
colorscale,
nbins,
)
tooltip = env.get_template("cluster_tooltip.html").render(
projection_stats=projection_stats,
cluster_stats=cluster_stats,
custom_tooltips=custom_tooltips,
histogram=histogram,
dist_label="Member",
node_id=node_ID,
)
return tooltip
def _color_function(member_ids, color_function):
return np.mean(color_function[member_ids])
def _size_node(member_ids):
return int(np.log(len(member_ids) + 1) + 1)
def _type_node():
return "circle"
def _size_link_width(graph, node_id, linked_node_id):
return 1
|
scikit-tda/kepler-mapper | kmapper/visuals.py | _map_val2color | python | def _map_val2color(val, vmin, vmax, colorscale=None):
colorscale = colorscale or colorscale_default
if vmin >= vmax:
raise ValueError("vmin should be < vmax")
scale = list(map(float, np.array(colorscale)[:, 0]))
colors = np.array(colorscale)[:, 1]
colors_01 = (
np.array(list(map(literal_eval, [color[3:] for color in colors]))) / 255.0
)
v = (val - vmin) / float((vmax - vmin)) # val is mapped to v in[0,1]
idx = 0
# sequential search for the two consecutive indices idx, idx+1 such that
# v belongs to the interval [scale[idx], scale[idx+1]
while v > scale[idx + 1]:
idx += 1
left_scale_val = scale[idx]
right_scale_val = scale[idx + 1]
vv = (v - left_scale_val) / (right_scale_val - left_scale_val)
# get the triplet of three values in [0,1] that represent the rgb color
# corresponding to val
val_color01 = colors_01[idx] + vv * (colors_01[idx + 1] - colors_01[idx])
val_color_0255 = list(map(np.uint8, 255 * val_color01))
return "rgb" + str(tuple(val_color_0255)) | Maps a value val in [vmin, vmax] to the corresponding color in
the colorscale
returns the rgb color code of that color | train | https://github.com/scikit-tda/kepler-mapper/blob/d4ed39f6392b0a134dd573d7d9c4aa65fbef3a7d/kmapper/visuals.py#L81-L114 | null | # A small helper class to house functions needed by KeplerMapper.visualize
import numpy as np
from sklearn import preprocessing
import json
from collections import defaultdict
from ast import literal_eval
colorscale_default = [
[0.0, "rgb(68, 1, 84)"], # Viridis
[0.1, "rgb(72, 35, 116)"],
[0.2, "rgb(64, 67, 135)"],
[0.3, "rgb(52, 94, 141)"],
[0.4, "rgb(41, 120, 142)"],
[0.5, "rgb(32, 144, 140)"],
[0.6, "rgb(34, 167, 132)"],
[0.7, "rgb(68, 190, 112)"],
[0.8, "rgb(121, 209, 81)"],
[0.9, "rgb(189, 222, 38)"],
[1.0, "rgb(253, 231, 36)"],
]
palette = [
"#0500ff",
"#0300ff",
"#0100ff",
"#0002ff",
"#0022ff",
"#0044ff",
"#0064ff",
"#0084ff",
"#00a4ff",
"#00a4ff",
"#00c4ff",
"#00e4ff",
"#00ffd0",
"#00ff83",
"#00ff36",
"#17ff00",
"#65ff00",
"#b0ff00",
"#fdff00",
"#FFf000",
"#FFdc00",
"#FFc800",
"#FFb400",
"#FFa000",
"#FF8c00",
"#FF7800",
"#FF6400",
"#FF5000",
"#FF3c00",
"#FF2800",
"#FF1400",
"#FF0000",
]
def _colors_to_rgb(colorscale):
""" Ensure that the color scale is formatted in rgb strings.
If the colorscale is a hex string, then convert to rgb.
"""
if colorscale[0][1][0] == "#":
plotly_colors = np.array(colorscale)[:, 1].tolist()
for k, hexcode in enumerate(plotly_colors):
hexcode = hexcode.lstrip("#")
hex_len = len(hexcode)
step = hex_len // 3
colorscale[k][1] = "rgb" + str(
tuple(int(hexcode[j : j + step], 16) for j in range(0, hex_len, step))
)
return colorscale
def _to_html_format(st):
return st.replace("\n", "<br>")
def init_color_function(graph, color_function=None):
# If no color_function provided we color by row order in data set
# Reshaping to 2-D array is required for sklearn 0.19
n_samples = np.max([i for s in graph["nodes"].values() for i in s]) + 1
if color_function is None:
color_function = np.arange(n_samples).reshape(-1, 1)
else:
color_function = color_function.reshape(-1, 1)
color_function = color_function.astype(np.float64)
# MinMax Scaling to be friendly to non-scaled input.
scaler = preprocessing.MinMaxScaler()
color_function = scaler.fit_transform(color_function).ravel()
# "Scaler might have floating point issues, 1.0000...0002". Force max and min
color_function[color_function > 1] = 1
color_function[color_function < 0] = 0
return color_function
def format_meta(graph, custom_meta=None, color_function_name=None):
n = [l for l in graph["nodes"].values()]
n_unique = len(set([i for s in n for i in s]))
if custom_meta is None:
custom_meta = graph["meta_data"]
if "clusterer" in custom_meta.keys():
clusterer = custom_meta["clusterer"]
custom_meta["clusterer"] = _to_html_format(clusterer)
if "projection" in custom_meta.keys():
projection = custom_meta["projection"]
custom_meta["projection"] = _to_html_format(projection)
if color_function_name is not None:
custom_meta["color_function"] = color_function_name
mapper_summary = {
"custom_meta": custom_meta,
"n_nodes": len(graph["nodes"]),
"n_edges": sum([len(l) for l in graph["links"].values()]),
"n_total": sum([len(l) for l in graph["nodes"].values()]),
"n_unique": n_unique,
}
return mapper_summary
def format_mapper_data(
graph, color_function, X, X_names, lens, lens_names, custom_tooltips, env, nbins=10
):
# import pdb; pdb.set_trace()
json_dict = {"nodes": [], "links": []}
node_id_to_num = {}
for i, (node_id, member_ids) in enumerate(graph["nodes"].items()):
node_id_to_num[node_id] = i
c = _color_function(member_ids, color_function)
t = _type_node()
s = _size_node(member_ids)
tt = _format_tooltip(
env,
member_ids,
custom_tooltips,
X,
X_names,
lens,
lens_names,
color_function,
node_id,
nbins,
)
n = {
"id": "",
"name": node_id,
"color": c,
"type": _type_node(),
"size": s,
"tooltip": tt,
}
json_dict["nodes"].append(n)
for i, (node_id, linked_node_ids) in enumerate(graph["links"].items()):
for linked_node_id in linked_node_ids:
json_dict["links"].append(
{
"source": node_id_to_num[node_id],
"target": node_id_to_num[linked_node_id],
"width": _size_link_width(graph, node_id, linked_node_id),
}
)
return json_dict
def build_histogram(data, colorscale=None, nbins=10):
""" Build histogram of data based on values of color_function
"""
if colorscale is None:
colorscale = colorscale_default
# TODO: we should weave this method of handling colors into the normal build_histogram and combine both functions
colorscale = _colors_to_rgb(colorscale)
h_min, h_max = 0, 1
hist, bin_edges = np.histogram(data, range=(h_min, h_max), bins=nbins)
bin_mids = np.mean(np.array(list(zip(bin_edges, bin_edges[1:]))), axis=1)
histogram = []
max_bucket_value = max(hist)
sum_bucket_value = sum(hist)
for bar, mid in zip(hist, bin_mids):
height = np.floor(((bar / max_bucket_value) * 100) + 0.5)
perc = round((bar / sum_bucket_value) * 100.0, 1)
color = _map_val2color(mid, 0.0, 1.0, colorscale)
histogram.append({"height": height, "perc": perc, "color": color})
return histogram
def graph_data_distribution(graph, color_function, colorscale, nbins=10):
node_averages = []
for node_id, member_ids in graph["nodes"].items():
member_colors = color_function[member_ids]
node_averages.append(np.mean(member_colors))
histogram = build_histogram(node_averages, colorscale=colorscale, nbins=nbins)
return histogram
def _format_cluster_statistics(member_ids, X, X_names):
# TODO: Cache X_mean and X_std for all clusters.
# TODO: replace long tuples with named tuples.
# TODO: Name all the single letter variables.
# TODO: remove duplication between above_stats and below_stats
# TODO: Should we only show variables that are much above or below the mean?
cluster_data = {"above": [], "below": [], "size": len(member_ids)}
cluster_stats = ""
if X is not None:
# List vs. numpy handling: cast to numpy array
if isinstance(X_names, list):
X_names = np.array(X_names)
# Defaults when providing no X_names
if X_names.shape[0] == 0:
X_names = np.array(["f_%s" % (i) for i in range(X.shape[1])])
cluster_X_mean = np.mean(X[member_ids], axis=0)
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
above_mean = cluster_X_mean > X_mean
std_m = np.sqrt((cluster_X_mean - X_mean) ** 2) / X_std
stat_zip = list(
zip(
std_m,
X_names,
np.mean(X, axis=0),
cluster_X_mean,
above_mean,
np.std(X, axis=0),
)
)
stats = sorted(stat_zip, reverse=True)
above_stats = [a for a in stats if bool(a[4]) is True]
below_stats = [a for a in stats if bool(a[4]) is False]
if len(above_stats) > 0:
for s, f, i, c, a, v in above_stats[:5]:
cluster_data["above"].append(
{"feature": f, "mean": round(c, 3), "std": round(s, 1)}
)
if len(below_stats) > 0:
for s, f, i, c, a, v in below_stats[:5]:
cluster_data["below"].append(
{"feature": f, "mean": round(c, 3), "std": round(s, 1)}
)
return cluster_data
def _format_projection_statistics(member_ids, lens, lens_names):
projection_data = []
if lens is not None:
if isinstance(lens_names, list):
lens_names = np.array(lens_names)
# Create defaults when providing no lens_names
if lens_names.shape[0] == 0:
lens_names = np.array(["p_%s" % (i) for i in range(lens.shape[1])])
means_v = np.mean(lens[member_ids], axis=0)
maxs_v = np.max(lens[member_ids], axis=0)
mins_v = np.min(lens[member_ids], axis=0)
for name, mean_v, max_v, min_v in zip(lens_names, means_v, maxs_v, mins_v):
projection_data.append(
{
"name": name,
"mean": round(mean_v, 3),
"max": round(max_v, 3),
"min": round(min_v, 3),
}
)
return projection_data
def _tooltip_components(
member_ids,
X,
X_names,
lens,
lens_names,
color_function,
node_ID,
colorscale,
nbins=10,
):
projection_stats = _format_projection_statistics(member_ids, lens, lens_names)
cluster_stats = _format_cluster_statistics(member_ids, X, X_names)
member_histogram = build_histogram(
color_function[member_ids], colorscale=colorscale, nbins=nbins
)
return projection_stats, cluster_stats, member_histogram
def _format_tooltip(
env,
member_ids,
custom_tooltips,
X,
X_names,
lens,
lens_names,
color_function,
node_ID,
nbins,
):
# TODO: Allow customization in the form of aggregate per node and per entry in node.
# TODO: Allow users to turn off tooltip completely.
custom_tooltips = (
custom_tooltips[member_ids] if custom_tooltips is not None else member_ids
)
# list will render better than numpy arrays
custom_tooltips = list(custom_tooltips)
colorscale = colorscale_default
projection_stats, cluster_stats, histogram = _tooltip_components(
member_ids,
X,
X_names,
lens,
lens_names,
color_function,
node_ID,
colorscale,
nbins,
)
tooltip = env.get_template("cluster_tooltip.html").render(
projection_stats=projection_stats,
cluster_stats=cluster_stats,
custom_tooltips=custom_tooltips,
histogram=histogram,
dist_label="Member",
node_id=node_ID,
)
return tooltip
def _color_function(member_ids, color_function):
return np.mean(color_function[member_ids])
def _size_node(member_ids):
return int(np.log(len(member_ids) + 1) + 1)
def _type_node():
return "circle"
def _size_link_width(graph, node_id, linked_node_id):
return 1
|
scikit-tda/kepler-mapper | kmapper/visuals.py | build_histogram | python | def build_histogram(data, colorscale=None, nbins=10):
if colorscale is None:
colorscale = colorscale_default
# TODO: we should weave this method of handling colors into the normal build_histogram and combine both functions
colorscale = _colors_to_rgb(colorscale)
h_min, h_max = 0, 1
hist, bin_edges = np.histogram(data, range=(h_min, h_max), bins=nbins)
bin_mids = np.mean(np.array(list(zip(bin_edges, bin_edges[1:]))), axis=1)
histogram = []
max_bucket_value = max(hist)
sum_bucket_value = sum(hist)
for bar, mid in zip(hist, bin_mids):
height = np.floor(((bar / max_bucket_value) * 100) + 0.5)
perc = round((bar / sum_bucket_value) * 100.0, 1)
color = _map_val2color(mid, 0.0, 1.0, colorscale)
histogram.append({"height": height, "perc": perc, "color": color})
return histogram | Build histogram of data based on values of color_function | train | https://github.com/scikit-tda/kepler-mapper/blob/d4ed39f6392b0a134dd573d7d9c4aa65fbef3a7d/kmapper/visuals.py#L212-L236 | [
"def _map_val2color(val, vmin, vmax, colorscale=None):\n \"\"\" Maps a value val in [vmin, vmax] to the corresponding color in\n the colorscale\n returns the rgb color code of that color\n \"\"\"\n colorscale = colorscale or colorscale_default\n\n if vmin >= vmax:\n raise ValueError... | # A small helper class to house functions needed by KeplerMapper.visualize
import numpy as np
from sklearn import preprocessing
import json
from collections import defaultdict
from ast import literal_eval
colorscale_default = [
[0.0, "rgb(68, 1, 84)"], # Viridis
[0.1, "rgb(72, 35, 116)"],
[0.2, "rgb(64, 67, 135)"],
[0.3, "rgb(52, 94, 141)"],
[0.4, "rgb(41, 120, 142)"],
[0.5, "rgb(32, 144, 140)"],
[0.6, "rgb(34, 167, 132)"],
[0.7, "rgb(68, 190, 112)"],
[0.8, "rgb(121, 209, 81)"],
[0.9, "rgb(189, 222, 38)"],
[1.0, "rgb(253, 231, 36)"],
]
palette = [
"#0500ff",
"#0300ff",
"#0100ff",
"#0002ff",
"#0022ff",
"#0044ff",
"#0064ff",
"#0084ff",
"#00a4ff",
"#00a4ff",
"#00c4ff",
"#00e4ff",
"#00ffd0",
"#00ff83",
"#00ff36",
"#17ff00",
"#65ff00",
"#b0ff00",
"#fdff00",
"#FFf000",
"#FFdc00",
"#FFc800",
"#FFb400",
"#FFa000",
"#FF8c00",
"#FF7800",
"#FF6400",
"#FF5000",
"#FF3c00",
"#FF2800",
"#FF1400",
"#FF0000",
]
def _colors_to_rgb(colorscale):
""" Ensure that the color scale is formatted in rgb strings.
If the colorscale is a hex string, then convert to rgb.
"""
if colorscale[0][1][0] == "#":
plotly_colors = np.array(colorscale)[:, 1].tolist()
for k, hexcode in enumerate(plotly_colors):
hexcode = hexcode.lstrip("#")
hex_len = len(hexcode)
step = hex_len // 3
colorscale[k][1] = "rgb" + str(
tuple(int(hexcode[j : j + step], 16) for j in range(0, hex_len, step))
)
return colorscale
def _to_html_format(st):
return st.replace("\n", "<br>")
def _map_val2color(val, vmin, vmax, colorscale=None):
""" Maps a value val in [vmin, vmax] to the corresponding color in
the colorscale
returns the rgb color code of that color
"""
colorscale = colorscale or colorscale_default
if vmin >= vmax:
raise ValueError("vmin should be < vmax")
scale = list(map(float, np.array(colorscale)[:, 0]))
colors = np.array(colorscale)[:, 1]
colors_01 = (
np.array(list(map(literal_eval, [color[3:] for color in colors]))) / 255.0
)
v = (val - vmin) / float((vmax - vmin)) # val is mapped to v in[0,1]
idx = 0
# sequential search for the two consecutive indices idx, idx+1 such that
# v belongs to the interval [scale[idx], scale[idx+1]
while v > scale[idx + 1]:
idx += 1
left_scale_val = scale[idx]
right_scale_val = scale[idx + 1]
vv = (v - left_scale_val) / (right_scale_val - left_scale_val)
# get the triplet of three values in [0,1] that represent the rgb color
# corresponding to val
val_color01 = colors_01[idx] + vv * (colors_01[idx + 1] - colors_01[idx])
val_color_0255 = list(map(np.uint8, 255 * val_color01))
return "rgb" + str(tuple(val_color_0255))
def init_color_function(graph, color_function=None):
# If no color_function provided we color by row order in data set
# Reshaping to 2-D array is required for sklearn 0.19
n_samples = np.max([i for s in graph["nodes"].values() for i in s]) + 1
if color_function is None:
color_function = np.arange(n_samples).reshape(-1, 1)
else:
color_function = color_function.reshape(-1, 1)
color_function = color_function.astype(np.float64)
# MinMax Scaling to be friendly to non-scaled input.
scaler = preprocessing.MinMaxScaler()
color_function = scaler.fit_transform(color_function).ravel()
# "Scaler might have floating point issues, 1.0000...0002". Force max and min
color_function[color_function > 1] = 1
color_function[color_function < 0] = 0
return color_function
def format_meta(graph, custom_meta=None, color_function_name=None):
n = [l for l in graph["nodes"].values()]
n_unique = len(set([i for s in n for i in s]))
if custom_meta is None:
custom_meta = graph["meta_data"]
if "clusterer" in custom_meta.keys():
clusterer = custom_meta["clusterer"]
custom_meta["clusterer"] = _to_html_format(clusterer)
if "projection" in custom_meta.keys():
projection = custom_meta["projection"]
custom_meta["projection"] = _to_html_format(projection)
if color_function_name is not None:
custom_meta["color_function"] = color_function_name
mapper_summary = {
"custom_meta": custom_meta,
"n_nodes": len(graph["nodes"]),
"n_edges": sum([len(l) for l in graph["links"].values()]),
"n_total": sum([len(l) for l in graph["nodes"].values()]),
"n_unique": n_unique,
}
return mapper_summary
def format_mapper_data(
graph, color_function, X, X_names, lens, lens_names, custom_tooltips, env, nbins=10
):
# import pdb; pdb.set_trace()
json_dict = {"nodes": [], "links": []}
node_id_to_num = {}
for i, (node_id, member_ids) in enumerate(graph["nodes"].items()):
node_id_to_num[node_id] = i
c = _color_function(member_ids, color_function)
t = _type_node()
s = _size_node(member_ids)
tt = _format_tooltip(
env,
member_ids,
custom_tooltips,
X,
X_names,
lens,
lens_names,
color_function,
node_id,
nbins,
)
n = {
"id": "",
"name": node_id,
"color": c,
"type": _type_node(),
"size": s,
"tooltip": tt,
}
json_dict["nodes"].append(n)
for i, (node_id, linked_node_ids) in enumerate(graph["links"].items()):
for linked_node_id in linked_node_ids:
json_dict["links"].append(
{
"source": node_id_to_num[node_id],
"target": node_id_to_num[linked_node_id],
"width": _size_link_width(graph, node_id, linked_node_id),
}
)
return json_dict
def graph_data_distribution(graph, color_function, colorscale, nbins=10):
node_averages = []
for node_id, member_ids in graph["nodes"].items():
member_colors = color_function[member_ids]
node_averages.append(np.mean(member_colors))
histogram = build_histogram(node_averages, colorscale=colorscale, nbins=nbins)
return histogram
def _format_cluster_statistics(member_ids, X, X_names):
# TODO: Cache X_mean and X_std for all clusters.
# TODO: replace long tuples with named tuples.
# TODO: Name all the single letter variables.
# TODO: remove duplication between above_stats and below_stats
# TODO: Should we only show variables that are much above or below the mean?
cluster_data = {"above": [], "below": [], "size": len(member_ids)}
cluster_stats = ""
if X is not None:
# List vs. numpy handling: cast to numpy array
if isinstance(X_names, list):
X_names = np.array(X_names)
# Defaults when providing no X_names
if X_names.shape[0] == 0:
X_names = np.array(["f_%s" % (i) for i in range(X.shape[1])])
cluster_X_mean = np.mean(X[member_ids], axis=0)
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
above_mean = cluster_X_mean > X_mean
std_m = np.sqrt((cluster_X_mean - X_mean) ** 2) / X_std
stat_zip = list(
zip(
std_m,
X_names,
np.mean(X, axis=0),
cluster_X_mean,
above_mean,
np.std(X, axis=0),
)
)
stats = sorted(stat_zip, reverse=True)
above_stats = [a for a in stats if bool(a[4]) is True]
below_stats = [a for a in stats if bool(a[4]) is False]
if len(above_stats) > 0:
for s, f, i, c, a, v in above_stats[:5]:
cluster_data["above"].append(
{"feature": f, "mean": round(c, 3), "std": round(s, 1)}
)
if len(below_stats) > 0:
for s, f, i, c, a, v in below_stats[:5]:
cluster_data["below"].append(
{"feature": f, "mean": round(c, 3), "std": round(s, 1)}
)
return cluster_data
def _format_projection_statistics(member_ids, lens, lens_names):
projection_data = []
if lens is not None:
if isinstance(lens_names, list):
lens_names = np.array(lens_names)
# Create defaults when providing no lens_names
if lens_names.shape[0] == 0:
lens_names = np.array(["p_%s" % (i) for i in range(lens.shape[1])])
means_v = np.mean(lens[member_ids], axis=0)
maxs_v = np.max(lens[member_ids], axis=0)
mins_v = np.min(lens[member_ids], axis=0)
for name, mean_v, max_v, min_v in zip(lens_names, means_v, maxs_v, mins_v):
projection_data.append(
{
"name": name,
"mean": round(mean_v, 3),
"max": round(max_v, 3),
"min": round(min_v, 3),
}
)
return projection_data
def _tooltip_components(
member_ids,
X,
X_names,
lens,
lens_names,
color_function,
node_ID,
colorscale,
nbins=10,
):
projection_stats = _format_projection_statistics(member_ids, lens, lens_names)
cluster_stats = _format_cluster_statistics(member_ids, X, X_names)
member_histogram = build_histogram(
color_function[member_ids], colorscale=colorscale, nbins=nbins
)
return projection_stats, cluster_stats, member_histogram
def _format_tooltip(
env,
member_ids,
custom_tooltips,
X,
X_names,
lens,
lens_names,
color_function,
node_ID,
nbins,
):
# TODO: Allow customization in the form of aggregate per node and per entry in node.
# TODO: Allow users to turn off tooltip completely.
custom_tooltips = (
custom_tooltips[member_ids] if custom_tooltips is not None else member_ids
)
# list will render better than numpy arrays
custom_tooltips = list(custom_tooltips)
colorscale = colorscale_default
projection_stats, cluster_stats, histogram = _tooltip_components(
member_ids,
X,
X_names,
lens,
lens_names,
color_function,
node_ID,
colorscale,
nbins,
)
tooltip = env.get_template("cluster_tooltip.html").render(
projection_stats=projection_stats,
cluster_stats=cluster_stats,
custom_tooltips=custom_tooltips,
histogram=histogram,
dist_label="Member",
node_id=node_ID,
)
return tooltip
def _color_function(member_ids, color_function):
return np.mean(color_function[member_ids])
def _size_node(member_ids):
return int(np.log(len(member_ids) + 1) + 1)
def _type_node():
return "circle"
def _size_link_width(graph, node_id, linked_node_id):
return 1
|
scikit-tda/kepler-mapper | kmapper/drawing.py | draw_matplotlib | python | def draw_matplotlib(g, ax=None, fig=None):
import networkx as nx
import matplotlib.pyplot as plt
fig = fig if fig else plt.figure()
ax = ax if ax else plt.gca()
if not isinstance(g, nx.Graph):
from .adapter import to_networkx
g = to_networkx(g)
# Determine a fine size for nodes
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
area = width * height * fig.dpi
n_nodes = len(g.nodes)
# size of node should be related to area and number of nodes -- heuristic
node_size = np.pi * area / n_nodes
node_r = np.sqrt(node_size / np.pi)
node_edge = node_r / 3
pos = nx.spring_layout(g)
nodes = nx.draw_networkx_nodes(g, node_size=node_size, pos=pos)
edges = nx.draw_networkx_edges(g, pos=pos)
nodes.set_edgecolor("w")
nodes.set_linewidth(node_edge)
plt.axis("square")
plt.axis("off")
return nodes | Draw the graph using NetworkX drawing functionality.
Parameters
------------
g: graph object returned by ``map``
The Mapper graph as constructed by ``KeplerMapper.map``
ax: matplotlib Axes object
A matplotlib axes object to plot graph on. If none, then use ``plt.gca()``
fig: matplotlib Figure object
A matplotlib Figure object to plot graph on. If none, then use ``plt.figure()``
Returns
--------
nodes: nx node set object list
List of nodes constructed with Networkx ``draw_networkx_nodes``. This can be used to further customize node attributes. | train | https://github.com/scikit-tda/kepler-mapper/blob/d4ed39f6392b0a134dd573d7d9c4aa65fbef3a7d/kmapper/drawing.py#L11-L65 | [
"def to_networkx(graph):\n \"\"\" Convert a Mapper 1-complex to a networkx graph.\n\n Parameters\n -----------\n\n graph: dictionary, graph object returned from `kmapper.map`\n\n Returns\n --------\n\n g: graph as networkx.Graph() object\n\n \"\"\"\n\n # import here so networkx is not alw... | """
Methods for drawing graphs
"""
import numpy as np
__all__ = ["draw_matplotlib"]
def draw_matplotlib(g, ax=None, fig=None):
"""Draw the graph using NetworkX drawing functionality.
Parameters
------------
g: graph object returned by ``map``
The Mapper graph as constructed by ``KeplerMapper.map``
ax: matplotlib Axes object
A matplotlib axes object to plot graph on. If none, then use ``plt.gca()``
fig: matplotlib Figure object
A matplotlib Figure object to plot graph on. If none, then use ``plt.figure()``
Returns
--------
nodes: nx node set object list
List of nodes constructed with Networkx ``draw_networkx_nodes``. This can be used to further customize node attributes.
"""
import networkx as nx
import matplotlib.pyplot as plt
fig = fig if fig else plt.figure()
ax = ax if ax else plt.gca()
if not isinstance(g, nx.Graph):
from .adapter import to_networkx
g = to_networkx(g)
# Determine a fine size for nodes
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
area = width * height * fig.dpi
n_nodes = len(g.nodes)
# size of node should be related to area and number of nodes -- heuristic
node_size = np.pi * area / n_nodes
node_r = np.sqrt(node_size / np.pi)
node_edge = node_r / 3
pos = nx.spring_layout(g)
nodes = nx.draw_networkx_nodes(g, node_size=node_size, pos=pos)
edges = nx.draw_networkx_edges(g, pos=pos)
nodes.set_edgecolor("w")
nodes.set_linewidth(node_edge)
plt.axis("square")
plt.axis("off")
return nodes
|
scikit-tda/kepler-mapper | kmapper/plotlyviz.py | plotlyviz | python | def plotlyviz(
scomplex,
colorscale=None,
title="Kepler Mapper",
graph_layout="kk",
color_function=None,
color_function_name=None,
dashboard=False,
graph_data=False,
factor_size=3,
edge_linewidth=1.5,
node_linecolor="rgb(200,200,200)",
width=600,
height=500,
bgcolor="rgba(240, 240, 240, 0.95)",
left=10,
bottom=35,
summary_height=300,
summary_width=600,
summary_left=20,
summary_right=20,
hist_left=25,
hist_right=25,
member_textbox_width=800,
filename=None,
):
"""
Visualizations and dashboards for kmapper graphs using Plotly. This method is suitable for use in Jupyter notebooks.
The generated FigureWidget can be updated (by performing a restyle or relayout). For example, let us add a title
to the colorbar (the name of the color function, if any),
and set the title font size. To perform these updates faster, Plotly 3.+ provides a context manager that batches up all data and layout updates:
To display more info on the generated kmapper-graph, define two more FigureWidget(s):
the global node distribution figure, and a dummy figure
that displays info on the algorithms involved in getting the graph from data, as well as sklearn class instances.
A FigureWidget has event listeners for hovering, clicking or selecting. Using the first one for `fw_graph`
we define, via the function `hovering_widgets()`, widgets that display the node distribution, when the node is hovered over, and two textboxes for the cluster size and the member ids/labels of the hovered node members.
Parameters
-----------
scomplex: dict
Simplicial complex is the output from the KeplerMapper `map` method.
title: str
Title of output graphic
graph_layout: igraph layout;
recommended 'kk' (kamada-kawai) or 'fr' (fruchterman-reingold)
colorscale:
Plotly colorscale(colormap) to color graph nodes
dashboard: bool, default is False
If true, display complete dashboard of node information
graph_data: bool, default is False
If true, display graph metadata
factor_size: double, default is 3
a factor for the node size
edge_linewidth : double, default is 1.5
node_linecolor: color str, default is "rgb(200,200,200)"
width: int, default is 600,
height: int, default is 500,
bgcolor: color str, default is "rgba(240, 240, 240, 0.95)",
left: int, default is 10,
bottom: int, default is 35,
summary_height: int, default is 300,
summary_width: int, default is 600,
summary_left: int, default is 20,
summary_right: int, default is 20,
hist_left: int, default is 25,
hist_right: int, default is 25,
member_textbox_width: int, default is 800,
filename: str, default is None
if filename is given, the graphic will be saved to that file.
Returns
---------
result: plotly.FigureWidget
A FigureWidget that can be shown or editted. See the Plotly Demo notebook for examples of use.
"""
if not colorscale:
colorscale = default_colorscale
kmgraph, mapper_summary, n_color_distribution = get_mapper_graph(
scomplex,
colorscale=colorscale,
color_function=color_function,
color_function_name=color_function_name,
)
annotation = get_kmgraph_meta(mapper_summary)
plgraph_data = plotly_graph(
kmgraph,
graph_layout=graph_layout,
colorscale=colorscale,
factor_size=factor_size,
edge_linewidth=edge_linewidth,
node_linecolor=node_linecolor,
)
layout = plot_layout(
title=title,
width=width,
height=height,
annotation_text=annotation,
bgcolor=bgcolor,
left=left,
bottom=bottom,
)
result = go.FigureWidget(data=plgraph_data, layout=layout)
if color_function_name:
with result.batch_update():
result.data[1].marker.colorbar.title = color_function_name
result.data[1].marker.colorbar.titlefont.size = 10
if dashboard or graph_data:
fw_hist = node_hist_fig(n_color_distribution, left=hist_left, right=hist_right)
fw_summary = summary_fig(
mapper_summary,
width=summary_width,
height=summary_height,
left=summary_left,
right=summary_right,
)
fw_graph = result
result = hovering_widgets(
kmgraph, fw_graph, member_textbox_width=member_textbox_width
)
if graph_data:
result = ipw.VBox([fw_graph, ipw.HBox([fw_summary, fw_hist])])
if filename:
pio.write_image(result, filename)
return result | Visualizations and dashboards for kmapper graphs using Plotly. This method is suitable for use in Jupyter notebooks.
The generated FigureWidget can be updated (by performing a restyle or relayout). For example, let us add a title
to the colorbar (the name of the color function, if any),
and set the title font size. To perform these updates faster, Plotly 3.+ provides a context manager that batches up all data and layout updates:
To display more info on the generated kmapper-graph, define two more FigureWidget(s):
the global node distribution figure, and a dummy figure
that displays info on the algorithms involved in getting the graph from data, as well as sklearn class instances.
A FigureWidget has event listeners for hovering, clicking or selecting. Using the first one for `fw_graph`
we define, via the function `hovering_widgets()`, widgets that display the node distribution, when the node is hovered over, and two textboxes for the cluster size and the member ids/labels of the hovered node members.
Parameters
-----------
scomplex: dict
Simplicial complex is the output from the KeplerMapper `map` method.
title: str
Title of output graphic
graph_layout: igraph layout;
recommended 'kk' (kamada-kawai) or 'fr' (fruchterman-reingold)
colorscale:
Plotly colorscale(colormap) to color graph nodes
dashboard: bool, default is False
If true, display complete dashboard of node information
graph_data: bool, default is False
If true, display graph metadata
factor_size: double, default is 3
a factor for the node size
edge_linewidth : double, default is 1.5
node_linecolor: color str, default is "rgb(200,200,200)"
width: int, default is 600,
height: int, default is 500,
bgcolor: color str, default is "rgba(240, 240, 240, 0.95)",
left: int, default is 10,
bottom: int, default is 35,
summary_height: int, default is 300,
summary_width: int, default is 600,
summary_left: int, default is 20,
summary_right: int, default is 20,
hist_left: int, default is 25,
hist_right: int, default is 25,
member_textbox_width: int, default is 800,
filename: str, default is None
if filename is given, the graphic will be saved to that file.
Returns
---------
result: plotly.FigureWidget
A FigureWidget that can be shown or editted. See the Plotly Demo notebook for examples of use. | train | https://github.com/scikit-tda/kepler-mapper/blob/d4ed39f6392b0a134dd573d7d9c4aa65fbef3a7d/kmapper/plotlyviz.py#L60-L210 | [
"def get_mapper_graph(\n simplicial_complex,\n color_function=None,\n color_function_name=None,\n colorscale=None,\n custom_tooltips=None,\n custom_meta=None,\n X=None,\n X_names=None,\n lens=None,\n lens_names=None,\n):\n \"\"\"Generate data for mapper graph visualization and annot... | from __future__ import division
import numpy as np
from .visuals import (
init_color_function,
_size_node,
_format_projection_statistics,
_format_cluster_statistics,
_color_function,
format_meta,
_to_html_format,
_map_val2color,
graph_data_distribution,
build_histogram,
_tooltip_components,
)
try:
import igraph as ig
import plotly.graph_objs as go
import ipywidgets as ipw
import plotly.io as pio
except ImportError:
print(
"""To use the plotly visualization tools, you must have the packages python-igraph, plotly, and ipywidgets installed in your environment."""
""" It looks like at least one of these is missing. Please install again with"""
"""\n\n\t`pip install python-igraph plotly ipywidgets`\n\nand try again"""
)
raise
default_colorscale = [
[0.0, "rgb(68, 1, 84)"], # Viridis
[0.1, "rgb(72, 35, 116)"],
[0.2, "rgb(64, 67, 135)"],
[0.3, "rgb(52, 94, 141)"],
[0.4, "rgb(41, 120, 142)"],
[0.5, "rgb(32, 144, 140)"],
[0.6, "rgb(34, 167, 132)"],
[0.7, "rgb(68, 190, 112)"],
[0.8, "rgb(121, 209, 81)"],
[0.9, "rgb(189, 222, 38)"],
[1.0, "rgb(253, 231, 36)"],
]
def mpl_to_plotly(cmap, n_entries):
h = 1.0 / (n_entries - 1)
pl_colorscale = []
for k in range(n_entries):
C = list(map(np.uint8, np.array(cmap(k * h)[:3]) * 255))
pl_colorscale.append(
[round(k * h, 2), "rgb" + str((C[0], C[1], C[2]))]
) # Python 2.7+
# pl_colorscale.append([round(k*h, 2), f'rgb({C[0]}, {C[1]}, {C[2]})']) # Python 3.6+
return pl_colorscale
def plotlyviz(
scomplex,
colorscale=None,
title="Kepler Mapper",
graph_layout="kk",
color_function=None,
color_function_name=None,
dashboard=False,
graph_data=False,
factor_size=3,
edge_linewidth=1.5,
node_linecolor="rgb(200,200,200)",
width=600,
height=500,
bgcolor="rgba(240, 240, 240, 0.95)",
left=10,
bottom=35,
summary_height=300,
summary_width=600,
summary_left=20,
summary_right=20,
hist_left=25,
hist_right=25,
member_textbox_width=800,
filename=None,
):
"""
Visualizations and dashboards for kmapper graphs using Plotly. This method is suitable for use in Jupyter notebooks.
The generated FigureWidget can be updated (by performing a restyle or relayout). For example, let us add a title
to the colorbar (the name of the color function, if any),
and set the title font size. To perform these updates faster, Plotly 3.+ provides a context manager that batches up all data and layout updates:
To display more info on the generated kmapper-graph, define two more FigureWidget(s):
the global node distribution figure, and a dummy figure
that displays info on the algorithms involved in getting the graph from data, as well as sklearn class instances.
A FigureWidget has event listeners for hovering, clicking or selecting. Using the first one for `fw_graph`
we define, via the function `hovering_widgets()`, widgets that display the node distribution, when the node is hovered over, and two textboxes for the cluster size and the member ids/labels of the hovered node members.
Parameters
-----------
scomplex: dict
Simplicial complex is the output from the KeplerMapper `map` method.
title: str
Title of output graphic
graph_layout: igraph layout;
recommended 'kk' (kamada-kawai) or 'fr' (fruchterman-reingold)
colorscale:
Plotly colorscale(colormap) to color graph nodes
dashboard: bool, default is False
If true, display complete dashboard of node information
graph_data: bool, default is False
If true, display graph metadata
factor_size: double, default is 3
a factor for the node size
edge_linewidth : double, default is 1.5
node_linecolor: color str, default is "rgb(200,200,200)"
width: int, default is 600,
height: int, default is 500,
bgcolor: color str, default is "rgba(240, 240, 240, 0.95)",
left: int, default is 10,
bottom: int, default is 35,
summary_height: int, default is 300,
summary_width: int, default is 600,
summary_left: int, default is 20,
summary_right: int, default is 20,
hist_left: int, default is 25,
hist_right: int, default is 25,
member_textbox_width: int, default is 800,
filename: str, default is None
if filename is given, the graphic will be saved to that file.
Returns
---------
result: plotly.FigureWidget
A FigureWidget that can be shown or editted. See the Plotly Demo notebook for examples of use.
"""
if not colorscale:
colorscale = default_colorscale
kmgraph, mapper_summary, n_color_distribution = get_mapper_graph(
scomplex,
colorscale=colorscale,
color_function=color_function,
color_function_name=color_function_name,
)
annotation = get_kmgraph_meta(mapper_summary)
plgraph_data = plotly_graph(
kmgraph,
graph_layout=graph_layout,
colorscale=colorscale,
factor_size=factor_size,
edge_linewidth=edge_linewidth,
node_linecolor=node_linecolor,
)
layout = plot_layout(
title=title,
width=width,
height=height,
annotation_text=annotation,
bgcolor=bgcolor,
left=left,
bottom=bottom,
)
result = go.FigureWidget(data=plgraph_data, layout=layout)
if color_function_name:
with result.batch_update():
result.data[1].marker.colorbar.title = color_function_name
result.data[1].marker.colorbar.titlefont.size = 10
if dashboard or graph_data:
fw_hist = node_hist_fig(n_color_distribution, left=hist_left, right=hist_right)
fw_summary = summary_fig(
mapper_summary,
width=summary_width,
height=summary_height,
left=summary_left,
right=summary_right,
)
fw_graph = result
result = hovering_widgets(
kmgraph, fw_graph, member_textbox_width=member_textbox_width
)
if graph_data:
result = ipw.VBox([fw_graph, ipw.HBox([fw_summary, fw_hist])])
if filename:
pio.write_image(result, filename)
return result
def scomplex_to_graph(
simplicial_complex,
color_function,
X,
X_names,
lens,
lens_names,
custom_tooltips,
colorscale,
):
json_dict = {"nodes": [], "links": []}
node_id_to_num = {}
for i, (node_id, member_ids) in enumerate(simplicial_complex["nodes"].items()):
node_id_to_num[node_id] = i
projection_stats, cluster_stats, member_histogram = _tooltip_components(
member_ids, X, X_names, lens, lens_names, color_function, i, colorscale
)
n = {
"id": i,
"name": node_id,
"member_ids": member_ids,
"color": _color_function(member_ids, color_function),
"size": _size_node(member_ids),
"cluster": cluster_stats,
"distribution": member_histogram,
"projection": projection_stats,
"custom_tooltips": custom_tooltips,
}
json_dict["nodes"].append(n)
for i, (node_id, linked_node_ids) in enumerate(simplicial_complex["links"].items()):
for linked_node_id in linked_node_ids:
lnk = {
"source": node_id_to_num[node_id],
"target": node_id_to_num[linked_node_id],
}
json_dict["links"].append(lnk)
return json_dict
def get_mapper_graph(
simplicial_complex,
color_function=None,
color_function_name=None,
colorscale=None,
custom_tooltips=None,
custom_meta=None,
X=None,
X_names=None,
lens=None,
lens_names=None,
):
"""Generate data for mapper graph visualization and annotation.
Parameters
----------
simplicial_complex : dict
Simplicial complex is the output from the KeplerMapper `map` method.
Returns
-------
the graph dictionary in a json representation, the mapper summary
and the node_distribution
Example
-------
>>> kmgraph, mapper_summary, n_distribution = get_mapper_graph(simplicial_complex)
"""
if not colorscale:
colorscale = default_colorscale
if not len(simplicial_complex["nodes"]) > 0:
raise Exception(
"A mapper graph should have more than 0 nodes. This might be because your clustering algorithm might be too sensitive and be classifying all points as noise."
)
color_function = init_color_function(simplicial_complex, color_function)
if X_names is None:
X_names = []
if lens_names is None:
lens_names = []
json_graph = scomplex_to_graph(
simplicial_complex,
color_function,
X,
X_names,
lens,
lens_names,
custom_tooltips,
colorscale=colorscale,
)
colorf_distribution = graph_data_distribution(
simplicial_complex, color_function, colorscale
)
mapper_summary = format_meta(
simplicial_complex,
color_function_name=color_function_name,
custom_meta=custom_meta,
)
return json_graph, mapper_summary, colorf_distribution
def plotly_graph(
kmgraph,
graph_layout="kk",
colorscale=None,
showscale=True,
factor_size=3,
edge_linecolor="rgb(180,180,180)",
edge_linewidth=1.5,
node_linecolor="rgb(255,255,255)",
node_linewidth=1.0,
):
"""Generate Plotly data structures that represent the mapper graph
Parameters
----------
kmgraph: dict representing the mapper graph,
returned by the function get_mapper_graph()
graph_layout: igraph layout; recommended 'kk' (kamada-kawai)
or 'fr' (fruchterman-reingold)
colorscale: a Plotly colorscale(colormap) to color graph nodes
showscale: boolean to display or not the colorbar
factor_size: a factor for the node size
Returns
-------
The plotly traces (dicts) representing the graph edges and nodes
"""
if not colorscale:
colorscale = default_colorscale
# define an igraph.Graph instance of n_nodes
n_nodes = len(kmgraph["nodes"])
if n_nodes == 0:
raise ValueError("Your graph has 0 nodes")
G = ig.Graph(n=n_nodes)
links = [(e["source"], e["target"]) for e in kmgraph["links"]]
G.add_edges(links)
layt = G.layout(graph_layout)
hover_text = [node["name"] for node in kmgraph["nodes"]]
color_vals = [node["color"] for node in kmgraph["nodes"]]
node_size = np.array(
[factor_size * node["size"] for node in kmgraph["nodes"]], dtype=np.int
)
Xn, Yn, Xe, Ye = _get_plotly_data(links, layt)
edge_trace = dict(
type="scatter",
x=Xe,
y=Ye,
mode="lines",
line=dict(color=edge_linecolor, width=edge_linewidth),
hoverinfo="none",
)
node_trace = dict(
type="scatter",
x=Xn,
y=Yn,
mode="markers",
marker=dict(
size=node_size.tolist(),
color=color_vals,
opacity=1.0,
colorscale=colorscale,
showscale=showscale,
line=dict(color=node_linecolor, width=node_linewidth),
colorbar=dict(thickness=20, ticklen=4, x=1.01, tickfont=dict(size=10)),
),
text=hover_text,
hoverinfo="text",
)
return [edge_trace, node_trace]
def get_kmgraph_meta(mapper_summary):
""" Extract info from mapper summary to be displayed below the graph plot
"""
d = mapper_summary["custom_meta"]
meta = (
"<b>N_cubes:</b> "
+ str(d["n_cubes"])
+ " <b>Perc_overlap:</b> "
+ str(d["perc_overlap"])
)
meta += (
"<br><b>Nodes:</b> "
+ str(mapper_summary["n_nodes"])
+ " <b>Edges:</b> "
+ str(mapper_summary["n_edges"])
+ " <b>Total samples:</b> "
+ str(mapper_summary["n_total"])
+ " <b>Unique_samples:</b> "
+ str(mapper_summary["n_unique"])
)
return meta
def plot_layout(
title="TDA KMapper",
width=600,
height=600,
bgcolor="rgba(255, 255, 255, 1)",
annotation_text=None,
annotation_x=0,
annotation_y=-0.01,
top=100,
left=60,
right=60,
bottom=60,
):
"""Set the plotly layout
Parameters
----------
width, height: integers
setting width and height of plot window
bgcolor: string,
rgba or hex color code for the background color
annotation_text: string
meta data to be displayed
annotation_x & annotation_y:
The coordinates of the point where we insert the annotation; the negative sign for y coord points output that annotation is inserted below the plot
"""
pl_layout = dict(
title=title,
font=dict(size=12),
showlegend=False,
autosize=False,
width=width,
height=height,
xaxis=dict(visible=False),
yaxis=dict(visible=False),
hovermode="closest",
plot_bgcolor=bgcolor,
margin=dict(t=top, b=bottom, l=left, r=right),
)
if annotation_text is None:
return pl_layout
else:
annotations = [
dict(
showarrow=False,
text=annotation_text,
xref="paper",
yref="paper",
x=annotation_x,
y=annotation_y,
align="left",
xanchor="left",
yanchor="top",
font=dict(size=12),
)
]
pl_layout.update(annotations=annotations)
return pl_layout
def node_hist_fig(
node_color_distribution,
title="Graph Node Distribution",
width=400,
height=300,
top=60,
left=25,
bottom=60,
right=25,
bgcolor="rgb(240,240,240)",
y_gridcolor="white",
):
"""Define the plotly plot representing the node histogram
Parameters
----------
node_color_distribution: list of dicts describing the build_histogram
width, height: integers - width and height of the histogram FigureWidget
left, top, right, bottom: ints; number of pixels around the FigureWidget
bgcolor: rgb of hex color code for the figure background color
y_gridcolor: rgb of hex color code for the yaxis y_gridcolor
Returns
-------
FigureWidget object representing the histogram of the graph nodes
"""
text = [
"{perc}%".format(**locals())
for perc in [d["perc"] for d in node_color_distribution]
]
pl_hist = go.Bar(
y=[d["height"] for d in node_color_distribution],
marker=dict(color=[d["color"] for d in node_color_distribution]),
text=text,
hoverinfo="y+text",
)
hist_layout = dict(
title=title,
width=width,
height=height,
font=dict(size=12),
xaxis=dict(showline=True, zeroline=False, showgrid=False, showticklabels=False),
yaxis=dict(showline=False, gridcolor=y_gridcolor, tickfont=dict(size=10)),
bargap=0.01,
margin=dict(l=left, r=right, b=bottom, t=top),
hovermode="x",
plot_bgcolor=bgcolor,
)
return go.FigureWidget(data=[pl_hist], layout=hist_layout)
def summary_fig(
mapper_summary,
width=600,
height=500,
top=60,
left=20,
bottom=60,
right=20,
bgcolor="rgb(240,240,240)",
):
"""Define a dummy figure that displays info on the algorithms and
sklearn class instances or methods used
Returns a FigureWidget object representing the figure
"""
text = _text_mapper_summary(mapper_summary)
data = [
dict(
type="scatter",
x=[0, width],
y=[height, 0],
mode="text",
text=[text, ""],
textposition="bottom right",
hoverinfo="none",
)
]
layout = dict(
title="Algorithms and scikit-learn objects/methods",
width=width,
height=height,
font=dict(size=12),
xaxis=dict(visible=False),
yaxis=dict(visible=False, range=[0, height + 5]),
margin=dict(t=top, b=bottom, l=left, r=right),
plot_bgcolor=bgcolor,
)
return go.FigureWidget(data=data, layout=layout)
def hovering_widgets(
kmgraph,
graph_fw,
ctooltips=False,
width=400,
height=300,
top=100,
left=50,
bgcolor="rgb(240,240,240)",
y_gridcolor="white",
member_textbox_width=200,
):
"""Defines the widgets that display the distribution of each node on hover
and the members of each nodes
Parameters
----------
kmgraph: the kepler-mapper graph dict returned by `get_mapper_graph()``
graph_fw: the FigureWidget representing the graph
ctooltips: boolean; if True/False the node["custom_tooltips"]/"member_ids"
are passed to member_textbox
width, height, top refer to the figure
size and position of the hovered node distribution
Returns
-------
a box containing the graph figure, the figure of the hovered node
distribution, and the textboxes displaying the cluster size and member_ids
or custom tooltips for hovered node members
"""
fnode = kmgraph["nodes"][0]
fwc = node_hist_fig(
fnode["distribution"],
title="Cluster Member Distribution",
width=width,
height=height,
top=top,
left=left,
bgcolor=bgcolor,
y_gridcolor=y_gridcolor,
)
clust_textbox = ipw.Text(
value="{:d}".format(fnode["cluster"]["size"]),
description="Cluster size:",
disabled=False,
continuous_update=True,
)
clust_textbox.layout = dict(margin="10px 10px 10px 10px", width="200px")
member_textbox = ipw.Textarea(
value=", ".join(str(x) for x in fnode["member_ids"])
if not ctooltips
else ", ".join(str(x) for x in fnode["custom_tooltips"]),
description="Members:",
disabled=False,
continuous_update=True,
)
member_textbox.layout = dict(
margin="5px 5px 5px 10px", width=str(member_textbox_width) + "px"
)
def do_on_hover(trace, points, state):
if not points.point_inds:
return
ind = points.point_inds[0] # get the index of the hovered node
node = kmgraph["nodes"][ind]
# on hover do:
with fwc.batch_update(): # update data in the cluster member histogr
fwc.data[0].text = [
"{:.1f}%".format(d["perc"]) for d in node["distribution"]
]
fwc.data[0].y = [d["height"] for d in node["distribution"]]
fwc.data[0].marker.color = [d["color"] for d in node["distribution"]]
clust_textbox.value = "{:d}".format(node["cluster"]["size"])
member_textbox.value = (
", ".join(str(x) for x in node["member_ids"])
if not ctooltips
else ", ".join(str(x) for x in node["custom_tooltips"])
)
trace = graph_fw.data[1]
trace.on_hover(do_on_hover)
return ipw.VBox([ipw.HBox([graph_fw, fwc]), clust_textbox, member_textbox])
def _get_plotly_data(E, coords):
# E : the list of tuples representing the graph edges
# coords: list of node coordinates assigned by igraph.Layout
N = len(coords)
Xnodes = [coords[k][0] for k in range(N)] # x-coordinates of nodes
Ynodes = [coords[k][1] for k in range(N)] # y-coordnates of nodes
Xedges = []
Yedges = []
for e in E:
Xedges.extend([coords[e[0]][0], coords[e[1]][0], None])
Yedges.extend([coords[e[0]][1], coords[e[1]][1], None])
return Xnodes, Ynodes, Xedges, Yedges
def _text_mapper_summary(mapper_summary):
d = mapper_summary["custom_meta"]
text = "<br><b>Projection: </b>" + d["projection"]
text += (
"<br><b>Clusterer: </b>" + d["clusterer"] + "<br><b>Scaler: </b>" + d["scaler"]
)
if "color_function" in d.keys():
text += "<br><b>Color function: </b>" + d["color_function"]
return text
def _hover_format(member_ids, custom_tooltips, X, X_names, lens, lens_names):
cluster_data = _format_cluster_statistics(member_ids, X, X_names)
tooltip = ""
custom_tooltips = (
custom_tooltips[member_ids] if custom_tooltips is not None else member_ids
)
val_size = cluster_data["size"]
tooltip += "{val_size}".format(**locals())
return tooltip
|
scikit-tda/kepler-mapper | kmapper/plotlyviz.py | get_mapper_graph | python | def get_mapper_graph(
simplicial_complex,
color_function=None,
color_function_name=None,
colorscale=None,
custom_tooltips=None,
custom_meta=None,
X=None,
X_names=None,
lens=None,
lens_names=None,
):
"""Generate data for mapper graph visualization and annotation.
Parameters
----------
simplicial_complex : dict
Simplicial complex is the output from the KeplerMapper `map` method.
Returns
-------
the graph dictionary in a json representation, the mapper summary
and the node_distribution
Example
-------
>>> kmgraph, mapper_summary, n_distribution = get_mapper_graph(simplicial_complex)
"""
if not colorscale:
colorscale = default_colorscale
if not len(simplicial_complex["nodes"]) > 0:
raise Exception(
"A mapper graph should have more than 0 nodes. This might be because your clustering algorithm might be too sensitive and be classifying all points as noise."
)
color_function = init_color_function(simplicial_complex, color_function)
if X_names is None:
X_names = []
if lens_names is None:
lens_names = []
json_graph = scomplex_to_graph(
simplicial_complex,
color_function,
X,
X_names,
lens,
lens_names,
custom_tooltips,
colorscale=colorscale,
)
colorf_distribution = graph_data_distribution(
simplicial_complex, color_function, colorscale
)
mapper_summary = format_meta(
simplicial_complex,
color_function_name=color_function_name,
custom_meta=custom_meta,
)
return json_graph, mapper_summary, colorf_distribution | Generate data for mapper graph visualization and annotation.
Parameters
----------
simplicial_complex : dict
Simplicial complex is the output from the KeplerMapper `map` method.
Returns
-------
the graph dictionary in a json representation, the mapper summary
and the node_distribution
Example
-------
>>> kmgraph, mapper_summary, n_distribution = get_mapper_graph(simplicial_complex) | train | https://github.com/scikit-tda/kepler-mapper/blob/d4ed39f6392b0a134dd573d7d9c4aa65fbef3a7d/kmapper/plotlyviz.py#L256-L322 | [
"def init_color_function(graph, color_function=None):\n # If no color_function provided we color by row order in data set\n # Reshaping to 2-D array is required for sklearn 0.19\n n_samples = np.max([i for s in graph[\"nodes\"].values() for i in s]) + 1\n if color_function is None:\n color_functi... | from __future__ import division
import numpy as np
from .visuals import (
init_color_function,
_size_node,
_format_projection_statistics,
_format_cluster_statistics,
_color_function,
format_meta,
_to_html_format,
_map_val2color,
graph_data_distribution,
build_histogram,
_tooltip_components,
)
try:
import igraph as ig
import plotly.graph_objs as go
import ipywidgets as ipw
import plotly.io as pio
except ImportError:
print(
"""To use the plotly visualization tools, you must have the packages python-igraph, plotly, and ipywidgets installed in your environment."""
""" It looks like at least one of these is missing. Please install again with"""
"""\n\n\t`pip install python-igraph plotly ipywidgets`\n\nand try again"""
)
raise
default_colorscale = [
[0.0, "rgb(68, 1, 84)"], # Viridis
[0.1, "rgb(72, 35, 116)"],
[0.2, "rgb(64, 67, 135)"],
[0.3, "rgb(52, 94, 141)"],
[0.4, "rgb(41, 120, 142)"],
[0.5, "rgb(32, 144, 140)"],
[0.6, "rgb(34, 167, 132)"],
[0.7, "rgb(68, 190, 112)"],
[0.8, "rgb(121, 209, 81)"],
[0.9, "rgb(189, 222, 38)"],
[1.0, "rgb(253, 231, 36)"],
]
def mpl_to_plotly(cmap, n_entries):
h = 1.0 / (n_entries - 1)
pl_colorscale = []
for k in range(n_entries):
C = list(map(np.uint8, np.array(cmap(k * h)[:3]) * 255))
pl_colorscale.append(
[round(k * h, 2), "rgb" + str((C[0], C[1], C[2]))]
) # Python 2.7+
# pl_colorscale.append([round(k*h, 2), f'rgb({C[0]}, {C[1]}, {C[2]})']) # Python 3.6+
return pl_colorscale
def plotlyviz(
scomplex,
colorscale=None,
title="Kepler Mapper",
graph_layout="kk",
color_function=None,
color_function_name=None,
dashboard=False,
graph_data=False,
factor_size=3,
edge_linewidth=1.5,
node_linecolor="rgb(200,200,200)",
width=600,
height=500,
bgcolor="rgba(240, 240, 240, 0.95)",
left=10,
bottom=35,
summary_height=300,
summary_width=600,
summary_left=20,
summary_right=20,
hist_left=25,
hist_right=25,
member_textbox_width=800,
filename=None,
):
"""
Visualizations and dashboards for kmapper graphs using Plotly. This method is suitable for use in Jupyter notebooks.
The generated FigureWidget can be updated (by performing a restyle or relayout). For example, let us add a title
to the colorbar (the name of the color function, if any),
and set the title font size. To perform these updates faster, Plotly 3.+ provides a context manager that batches up all data and layout updates:
To display more info on the generated kmapper-graph, define two more FigureWidget(s):
the global node distribution figure, and a dummy figure
that displays info on the algorithms involved in getting the graph from data, as well as sklearn class instances.
A FigureWidget has event listeners for hovering, clicking or selecting. Using the first one for `fw_graph`
we define, via the function `hovering_widgets()`, widgets that display the node distribution, when the node is hovered over, and two textboxes for the cluster size and the member ids/labels of the hovered node members.
Parameters
-----------
scomplex: dict
Simplicial complex is the output from the KeplerMapper `map` method.
title: str
Title of output graphic
graph_layout: igraph layout;
recommended 'kk' (kamada-kawai) or 'fr' (fruchterman-reingold)
colorscale:
Plotly colorscale(colormap) to color graph nodes
dashboard: bool, default is False
If true, display complete dashboard of node information
graph_data: bool, default is False
If true, display graph metadata
factor_size: double, default is 3
a factor for the node size
edge_linewidth : double, default is 1.5
node_linecolor: color str, default is "rgb(200,200,200)"
width: int, default is 600,
height: int, default is 500,
bgcolor: color str, default is "rgba(240, 240, 240, 0.95)",
left: int, default is 10,
bottom: int, default is 35,
summary_height: int, default is 300,
summary_width: int, default is 600,
summary_left: int, default is 20,
summary_right: int, default is 20,
hist_left: int, default is 25,
hist_right: int, default is 25,
member_textbox_width: int, default is 800,
filename: str, default is None
if filename is given, the graphic will be saved to that file.
Returns
---------
result: plotly.FigureWidget
A FigureWidget that can be shown or editted. See the Plotly Demo notebook for examples of use.
"""
if not colorscale:
colorscale = default_colorscale
kmgraph, mapper_summary, n_color_distribution = get_mapper_graph(
scomplex,
colorscale=colorscale,
color_function=color_function,
color_function_name=color_function_name,
)
annotation = get_kmgraph_meta(mapper_summary)
plgraph_data = plotly_graph(
kmgraph,
graph_layout=graph_layout,
colorscale=colorscale,
factor_size=factor_size,
edge_linewidth=edge_linewidth,
node_linecolor=node_linecolor,
)
layout = plot_layout(
title=title,
width=width,
height=height,
annotation_text=annotation,
bgcolor=bgcolor,
left=left,
bottom=bottom,
)
result = go.FigureWidget(data=plgraph_data, layout=layout)
if color_function_name:
with result.batch_update():
result.data[1].marker.colorbar.title = color_function_name
result.data[1].marker.colorbar.titlefont.size = 10
if dashboard or graph_data:
fw_hist = node_hist_fig(n_color_distribution, left=hist_left, right=hist_right)
fw_summary = summary_fig(
mapper_summary,
width=summary_width,
height=summary_height,
left=summary_left,
right=summary_right,
)
fw_graph = result
result = hovering_widgets(
kmgraph, fw_graph, member_textbox_width=member_textbox_width
)
if graph_data:
result = ipw.VBox([fw_graph, ipw.HBox([fw_summary, fw_hist])])
if filename:
pio.write_image(result, filename)
return result
def scomplex_to_graph(
simplicial_complex,
color_function,
X,
X_names,
lens,
lens_names,
custom_tooltips,
colorscale,
):
json_dict = {"nodes": [], "links": []}
node_id_to_num = {}
for i, (node_id, member_ids) in enumerate(simplicial_complex["nodes"].items()):
node_id_to_num[node_id] = i
projection_stats, cluster_stats, member_histogram = _tooltip_components(
member_ids, X, X_names, lens, lens_names, color_function, i, colorscale
)
n = {
"id": i,
"name": node_id,
"member_ids": member_ids,
"color": _color_function(member_ids, color_function),
"size": _size_node(member_ids),
"cluster": cluster_stats,
"distribution": member_histogram,
"projection": projection_stats,
"custom_tooltips": custom_tooltips,
}
json_dict["nodes"].append(n)
for i, (node_id, linked_node_ids) in enumerate(simplicial_complex["links"].items()):
for linked_node_id in linked_node_ids:
lnk = {
"source": node_id_to_num[node_id],
"target": node_id_to_num[linked_node_id],
}
json_dict["links"].append(lnk)
return json_dict
def get_mapper_graph(
simplicial_complex,
color_function=None,
color_function_name=None,
colorscale=None,
custom_tooltips=None,
custom_meta=None,
X=None,
X_names=None,
lens=None,
lens_names=None,
):
"""Generate data for mapper graph visualization and annotation.
Parameters
----------
simplicial_complex : dict
Simplicial complex is the output from the KeplerMapper `map` method.
Returns
-------
the graph dictionary in a json representation, the mapper summary
and the node_distribution
Example
-------
>>> kmgraph, mapper_summary, n_distribution = get_mapper_graph(simplicial_complex)
"""
if not colorscale:
colorscale = default_colorscale
if not len(simplicial_complex["nodes"]) > 0:
raise Exception(
"A mapper graph should have more than 0 nodes. This might be because your clustering algorithm might be too sensitive and be classifying all points as noise."
)
color_function = init_color_function(simplicial_complex, color_function)
if X_names is None:
X_names = []
if lens_names is None:
lens_names = []
json_graph = scomplex_to_graph(
simplicial_complex,
color_function,
X,
X_names,
lens,
lens_names,
custom_tooltips,
colorscale=colorscale,
)
colorf_distribution = graph_data_distribution(
simplicial_complex, color_function, colorscale
)
mapper_summary = format_meta(
simplicial_complex,
color_function_name=color_function_name,
custom_meta=custom_meta,
)
return json_graph, mapper_summary, colorf_distribution
def plotly_graph(
kmgraph,
graph_layout="kk",
colorscale=None,
showscale=True,
factor_size=3,
edge_linecolor="rgb(180,180,180)",
edge_linewidth=1.5,
node_linecolor="rgb(255,255,255)",
node_linewidth=1.0,
):
"""Generate Plotly data structures that represent the mapper graph
Parameters
----------
kmgraph: dict representing the mapper graph,
returned by the function get_mapper_graph()
graph_layout: igraph layout; recommended 'kk' (kamada-kawai)
or 'fr' (fruchterman-reingold)
colorscale: a Plotly colorscale(colormap) to color graph nodes
showscale: boolean to display or not the colorbar
factor_size: a factor for the node size
Returns
-------
The plotly traces (dicts) representing the graph edges and nodes
"""
if not colorscale:
colorscale = default_colorscale
# define an igraph.Graph instance of n_nodes
n_nodes = len(kmgraph["nodes"])
if n_nodes == 0:
raise ValueError("Your graph has 0 nodes")
G = ig.Graph(n=n_nodes)
links = [(e["source"], e["target"]) for e in kmgraph["links"]]
G.add_edges(links)
layt = G.layout(graph_layout)
hover_text = [node["name"] for node in kmgraph["nodes"]]
color_vals = [node["color"] for node in kmgraph["nodes"]]
node_size = np.array(
[factor_size * node["size"] for node in kmgraph["nodes"]], dtype=np.int
)
Xn, Yn, Xe, Ye = _get_plotly_data(links, layt)
edge_trace = dict(
type="scatter",
x=Xe,
y=Ye,
mode="lines",
line=dict(color=edge_linecolor, width=edge_linewidth),
hoverinfo="none",
)
node_trace = dict(
type="scatter",
x=Xn,
y=Yn,
mode="markers",
marker=dict(
size=node_size.tolist(),
color=color_vals,
opacity=1.0,
colorscale=colorscale,
showscale=showscale,
line=dict(color=node_linecolor, width=node_linewidth),
colorbar=dict(thickness=20, ticklen=4, x=1.01, tickfont=dict(size=10)),
),
text=hover_text,
hoverinfo="text",
)
return [edge_trace, node_trace]
def get_kmgraph_meta(mapper_summary):
""" Extract info from mapper summary to be displayed below the graph plot
"""
d = mapper_summary["custom_meta"]
meta = (
"<b>N_cubes:</b> "
+ str(d["n_cubes"])
+ " <b>Perc_overlap:</b> "
+ str(d["perc_overlap"])
)
meta += (
"<br><b>Nodes:</b> "
+ str(mapper_summary["n_nodes"])
+ " <b>Edges:</b> "
+ str(mapper_summary["n_edges"])
+ " <b>Total samples:</b> "
+ str(mapper_summary["n_total"])
+ " <b>Unique_samples:</b> "
+ str(mapper_summary["n_unique"])
)
return meta
def plot_layout(
title="TDA KMapper",
width=600,
height=600,
bgcolor="rgba(255, 255, 255, 1)",
annotation_text=None,
annotation_x=0,
annotation_y=-0.01,
top=100,
left=60,
right=60,
bottom=60,
):
"""Set the plotly layout
Parameters
----------
width, height: integers
setting width and height of plot window
bgcolor: string,
rgba or hex color code for the background color
annotation_text: string
meta data to be displayed
annotation_x & annotation_y:
The coordinates of the point where we insert the annotation; the negative sign for y coord points output that annotation is inserted below the plot
"""
pl_layout = dict(
title=title,
font=dict(size=12),
showlegend=False,
autosize=False,
width=width,
height=height,
xaxis=dict(visible=False),
yaxis=dict(visible=False),
hovermode="closest",
plot_bgcolor=bgcolor,
margin=dict(t=top, b=bottom, l=left, r=right),
)
if annotation_text is None:
return pl_layout
else:
annotations = [
dict(
showarrow=False,
text=annotation_text,
xref="paper",
yref="paper",
x=annotation_x,
y=annotation_y,
align="left",
xanchor="left",
yanchor="top",
font=dict(size=12),
)
]
pl_layout.update(annotations=annotations)
return pl_layout
def node_hist_fig(
node_color_distribution,
title="Graph Node Distribution",
width=400,
height=300,
top=60,
left=25,
bottom=60,
right=25,
bgcolor="rgb(240,240,240)",
y_gridcolor="white",
):
"""Define the plotly plot representing the node histogram
Parameters
----------
node_color_distribution: list of dicts describing the build_histogram
width, height: integers - width and height of the histogram FigureWidget
left, top, right, bottom: ints; number of pixels around the FigureWidget
bgcolor: rgb of hex color code for the figure background color
y_gridcolor: rgb of hex color code for the yaxis y_gridcolor
Returns
-------
FigureWidget object representing the histogram of the graph nodes
"""
text = [
"{perc}%".format(**locals())
for perc in [d["perc"] for d in node_color_distribution]
]
pl_hist = go.Bar(
y=[d["height"] for d in node_color_distribution],
marker=dict(color=[d["color"] for d in node_color_distribution]),
text=text,
hoverinfo="y+text",
)
hist_layout = dict(
title=title,
width=width,
height=height,
font=dict(size=12),
xaxis=dict(showline=True, zeroline=False, showgrid=False, showticklabels=False),
yaxis=dict(showline=False, gridcolor=y_gridcolor, tickfont=dict(size=10)),
bargap=0.01,
margin=dict(l=left, r=right, b=bottom, t=top),
hovermode="x",
plot_bgcolor=bgcolor,
)
return go.FigureWidget(data=[pl_hist], layout=hist_layout)
def summary_fig(
mapper_summary,
width=600,
height=500,
top=60,
left=20,
bottom=60,
right=20,
bgcolor="rgb(240,240,240)",
):
"""Define a dummy figure that displays info on the algorithms and
sklearn class instances or methods used
Returns a FigureWidget object representing the figure
"""
text = _text_mapper_summary(mapper_summary)
data = [
dict(
type="scatter",
x=[0, width],
y=[height, 0],
mode="text",
text=[text, ""],
textposition="bottom right",
hoverinfo="none",
)
]
layout = dict(
title="Algorithms and scikit-learn objects/methods",
width=width,
height=height,
font=dict(size=12),
xaxis=dict(visible=False),
yaxis=dict(visible=False, range=[0, height + 5]),
margin=dict(t=top, b=bottom, l=left, r=right),
plot_bgcolor=bgcolor,
)
return go.FigureWidget(data=data, layout=layout)
def hovering_widgets(
kmgraph,
graph_fw,
ctooltips=False,
width=400,
height=300,
top=100,
left=50,
bgcolor="rgb(240,240,240)",
y_gridcolor="white",
member_textbox_width=200,
):
"""Defines the widgets that display the distribution of each node on hover
and the members of each nodes
Parameters
----------
kmgraph: the kepler-mapper graph dict returned by `get_mapper_graph()``
graph_fw: the FigureWidget representing the graph
ctooltips: boolean; if True/False the node["custom_tooltips"]/"member_ids"
are passed to member_textbox
width, height, top refer to the figure
size and position of the hovered node distribution
Returns
-------
a box containing the graph figure, the figure of the hovered node
distribution, and the textboxes displaying the cluster size and member_ids
or custom tooltips for hovered node members
"""
fnode = kmgraph["nodes"][0]
fwc = node_hist_fig(
fnode["distribution"],
title="Cluster Member Distribution",
width=width,
height=height,
top=top,
left=left,
bgcolor=bgcolor,
y_gridcolor=y_gridcolor,
)
clust_textbox = ipw.Text(
value="{:d}".format(fnode["cluster"]["size"]),
description="Cluster size:",
disabled=False,
continuous_update=True,
)
clust_textbox.layout = dict(margin="10px 10px 10px 10px", width="200px")
member_textbox = ipw.Textarea(
value=", ".join(str(x) for x in fnode["member_ids"])
if not ctooltips
else ", ".join(str(x) for x in fnode["custom_tooltips"]),
description="Members:",
disabled=False,
continuous_update=True,
)
member_textbox.layout = dict(
margin="5px 5px 5px 10px", width=str(member_textbox_width) + "px"
)
def do_on_hover(trace, points, state):
if not points.point_inds:
return
ind = points.point_inds[0] # get the index of the hovered node
node = kmgraph["nodes"][ind]
# on hover do:
with fwc.batch_update(): # update data in the cluster member histogr
fwc.data[0].text = [
"{:.1f}%".format(d["perc"]) for d in node["distribution"]
]
fwc.data[0].y = [d["height"] for d in node["distribution"]]
fwc.data[0].marker.color = [d["color"] for d in node["distribution"]]
clust_textbox.value = "{:d}".format(node["cluster"]["size"])
member_textbox.value = (
", ".join(str(x) for x in node["member_ids"])
if not ctooltips
else ", ".join(str(x) for x in node["custom_tooltips"])
)
trace = graph_fw.data[1]
trace.on_hover(do_on_hover)
return ipw.VBox([ipw.HBox([graph_fw, fwc]), clust_textbox, member_textbox])
def _get_plotly_data(E, coords):
# E : the list of tuples representing the graph edges
# coords: list of node coordinates assigned by igraph.Layout
N = len(coords)
Xnodes = [coords[k][0] for k in range(N)] # x-coordinates of nodes
Ynodes = [coords[k][1] for k in range(N)] # y-coordnates of nodes
Xedges = []
Yedges = []
for e in E:
Xedges.extend([coords[e[0]][0], coords[e[1]][0], None])
Yedges.extend([coords[e[0]][1], coords[e[1]][1], None])
return Xnodes, Ynodes, Xedges, Yedges
def _text_mapper_summary(mapper_summary):
d = mapper_summary["custom_meta"]
text = "<br><b>Projection: </b>" + d["projection"]
text += (
"<br><b>Clusterer: </b>" + d["clusterer"] + "<br><b>Scaler: </b>" + d["scaler"]
)
if "color_function" in d.keys():
text += "<br><b>Color function: </b>" + d["color_function"]
return text
def _hover_format(member_ids, custom_tooltips, X, X_names, lens, lens_names):
cluster_data = _format_cluster_statistics(member_ids, X, X_names)
tooltip = ""
custom_tooltips = (
custom_tooltips[member_ids] if custom_tooltips is not None else member_ids
)
val_size = cluster_data["size"]
tooltip += "{val_size}".format(**locals())
return tooltip
|
scikit-tda/kepler-mapper | kmapper/plotlyviz.py | plotly_graph | python | def plotly_graph(
kmgraph,
graph_layout="kk",
colorscale=None,
showscale=True,
factor_size=3,
edge_linecolor="rgb(180,180,180)",
edge_linewidth=1.5,
node_linecolor="rgb(255,255,255)",
node_linewidth=1.0,
):
"""Generate Plotly data structures that represent the mapper graph
Parameters
----------
kmgraph: dict representing the mapper graph,
returned by the function get_mapper_graph()
graph_layout: igraph layout; recommended 'kk' (kamada-kawai)
or 'fr' (fruchterman-reingold)
colorscale: a Plotly colorscale(colormap) to color graph nodes
showscale: boolean to display or not the colorbar
factor_size: a factor for the node size
Returns
-------
The plotly traces (dicts) representing the graph edges and nodes
"""
if not colorscale:
colorscale = default_colorscale
# define an igraph.Graph instance of n_nodes
n_nodes = len(kmgraph["nodes"])
if n_nodes == 0:
raise ValueError("Your graph has 0 nodes")
G = ig.Graph(n=n_nodes)
links = [(e["source"], e["target"]) for e in kmgraph["links"]]
G.add_edges(links)
layt = G.layout(graph_layout)
hover_text = [node["name"] for node in kmgraph["nodes"]]
color_vals = [node["color"] for node in kmgraph["nodes"]]
node_size = np.array(
[factor_size * node["size"] for node in kmgraph["nodes"]], dtype=np.int
)
Xn, Yn, Xe, Ye = _get_plotly_data(links, layt)
edge_trace = dict(
type="scatter",
x=Xe,
y=Ye,
mode="lines",
line=dict(color=edge_linecolor, width=edge_linewidth),
hoverinfo="none",
)
node_trace = dict(
type="scatter",
x=Xn,
y=Yn,
mode="markers",
marker=dict(
size=node_size.tolist(),
color=color_vals,
opacity=1.0,
colorscale=colorscale,
showscale=showscale,
line=dict(color=node_linecolor, width=node_linewidth),
colorbar=dict(thickness=20, ticklen=4, x=1.01, tickfont=dict(size=10)),
),
text=hover_text,
hoverinfo="text",
)
return [edge_trace, node_trace] | Generate Plotly data structures that represent the mapper graph
Parameters
----------
kmgraph: dict representing the mapper graph,
returned by the function get_mapper_graph()
graph_layout: igraph layout; recommended 'kk' (kamada-kawai)
or 'fr' (fruchterman-reingold)
colorscale: a Plotly colorscale(colormap) to color graph nodes
showscale: boolean to display or not the colorbar
factor_size: a factor for the node size
Returns
-------
The plotly traces (dicts) representing the graph edges and nodes | train | https://github.com/scikit-tda/kepler-mapper/blob/d4ed39f6392b0a134dd573d7d9c4aa65fbef3a7d/kmapper/plotlyviz.py#L325-L400 | [
"def _get_plotly_data(E, coords):\n # E : the list of tuples representing the graph edges\n # coords: list of node coordinates assigned by igraph.Layout\n N = len(coords)\n Xnodes = [coords[k][0] for k in range(N)] # x-coordinates of nodes\n Ynodes = [coords[k][1] for k in range(N)] # y-coordnates ... | from __future__ import division
import numpy as np
from .visuals import (
init_color_function,
_size_node,
_format_projection_statistics,
_format_cluster_statistics,
_color_function,
format_meta,
_to_html_format,
_map_val2color,
graph_data_distribution,
build_histogram,
_tooltip_components,
)
try:
import igraph as ig
import plotly.graph_objs as go
import ipywidgets as ipw
import plotly.io as pio
except ImportError:
print(
"""To use the plotly visualization tools, you must have the packages python-igraph, plotly, and ipywidgets installed in your environment."""
""" It looks like at least one of these is missing. Please install again with"""
"""\n\n\t`pip install python-igraph plotly ipywidgets`\n\nand try again"""
)
raise
default_colorscale = [
[0.0, "rgb(68, 1, 84)"], # Viridis
[0.1, "rgb(72, 35, 116)"],
[0.2, "rgb(64, 67, 135)"],
[0.3, "rgb(52, 94, 141)"],
[0.4, "rgb(41, 120, 142)"],
[0.5, "rgb(32, 144, 140)"],
[0.6, "rgb(34, 167, 132)"],
[0.7, "rgb(68, 190, 112)"],
[0.8, "rgb(121, 209, 81)"],
[0.9, "rgb(189, 222, 38)"],
[1.0, "rgb(253, 231, 36)"],
]
def mpl_to_plotly(cmap, n_entries):
h = 1.0 / (n_entries - 1)
pl_colorscale = []
for k in range(n_entries):
C = list(map(np.uint8, np.array(cmap(k * h)[:3]) * 255))
pl_colorscale.append(
[round(k * h, 2), "rgb" + str((C[0], C[1], C[2]))]
) # Python 2.7+
# pl_colorscale.append([round(k*h, 2), f'rgb({C[0]}, {C[1]}, {C[2]})']) # Python 3.6+
return pl_colorscale
def plotlyviz(
scomplex,
colorscale=None,
title="Kepler Mapper",
graph_layout="kk",
color_function=None,
color_function_name=None,
dashboard=False,
graph_data=False,
factor_size=3,
edge_linewidth=1.5,
node_linecolor="rgb(200,200,200)",
width=600,
height=500,
bgcolor="rgba(240, 240, 240, 0.95)",
left=10,
bottom=35,
summary_height=300,
summary_width=600,
summary_left=20,
summary_right=20,
hist_left=25,
hist_right=25,
member_textbox_width=800,
filename=None,
):
"""
Visualizations and dashboards for kmapper graphs using Plotly. This method is suitable for use in Jupyter notebooks.
The generated FigureWidget can be updated (by performing a restyle or relayout). For example, let us add a title
to the colorbar (the name of the color function, if any),
and set the title font size. To perform these updates faster, Plotly 3.+ provides a context manager that batches up all data and layout updates:
To display more info on the generated kmapper-graph, define two more FigureWidget(s):
the global node distribution figure, and a dummy figure
that displays info on the algorithms involved in getting the graph from data, as well as sklearn class instances.
A FigureWidget has event listeners for hovering, clicking or selecting. Using the first one for `fw_graph`
we define, via the function `hovering_widgets()`, widgets that display the node distribution, when the node is hovered over, and two textboxes for the cluster size and the member ids/labels of the hovered node members.
Parameters
-----------
scomplex: dict
Simplicial complex is the output from the KeplerMapper `map` method.
title: str
Title of output graphic
graph_layout: igraph layout;
recommended 'kk' (kamada-kawai) or 'fr' (fruchterman-reingold)
colorscale:
Plotly colorscale(colormap) to color graph nodes
dashboard: bool, default is False
If true, display complete dashboard of node information
graph_data: bool, default is False
If true, display graph metadata
factor_size: double, default is 3
a factor for the node size
edge_linewidth : double, default is 1.5
node_linecolor: color str, default is "rgb(200,200,200)"
width: int, default is 600,
height: int, default is 500,
bgcolor: color str, default is "rgba(240, 240, 240, 0.95)",
left: int, default is 10,
bottom: int, default is 35,
summary_height: int, default is 300,
summary_width: int, default is 600,
summary_left: int, default is 20,
summary_right: int, default is 20,
hist_left: int, default is 25,
hist_right: int, default is 25,
member_textbox_width: int, default is 800,
filename: str, default is None
if filename is given, the graphic will be saved to that file.
Returns
---------
result: plotly.FigureWidget
A FigureWidget that can be shown or editted. See the Plotly Demo notebook for examples of use.
"""
if not colorscale:
colorscale = default_colorscale
kmgraph, mapper_summary, n_color_distribution = get_mapper_graph(
scomplex,
colorscale=colorscale,
color_function=color_function,
color_function_name=color_function_name,
)
annotation = get_kmgraph_meta(mapper_summary)
plgraph_data = plotly_graph(
kmgraph,
graph_layout=graph_layout,
colorscale=colorscale,
factor_size=factor_size,
edge_linewidth=edge_linewidth,
node_linecolor=node_linecolor,
)
layout = plot_layout(
title=title,
width=width,
height=height,
annotation_text=annotation,
bgcolor=bgcolor,
left=left,
bottom=bottom,
)
result = go.FigureWidget(data=plgraph_data, layout=layout)
if color_function_name:
with result.batch_update():
result.data[1].marker.colorbar.title = color_function_name
result.data[1].marker.colorbar.titlefont.size = 10
if dashboard or graph_data:
fw_hist = node_hist_fig(n_color_distribution, left=hist_left, right=hist_right)
fw_summary = summary_fig(
mapper_summary,
width=summary_width,
height=summary_height,
left=summary_left,
right=summary_right,
)
fw_graph = result
result = hovering_widgets(
kmgraph, fw_graph, member_textbox_width=member_textbox_width
)
if graph_data:
result = ipw.VBox([fw_graph, ipw.HBox([fw_summary, fw_hist])])
if filename:
pio.write_image(result, filename)
return result
def scomplex_to_graph(
simplicial_complex,
color_function,
X,
X_names,
lens,
lens_names,
custom_tooltips,
colorscale,
):
json_dict = {"nodes": [], "links": []}
node_id_to_num = {}
for i, (node_id, member_ids) in enumerate(simplicial_complex["nodes"].items()):
node_id_to_num[node_id] = i
projection_stats, cluster_stats, member_histogram = _tooltip_components(
member_ids, X, X_names, lens, lens_names, color_function, i, colorscale
)
n = {
"id": i,
"name": node_id,
"member_ids": member_ids,
"color": _color_function(member_ids, color_function),
"size": _size_node(member_ids),
"cluster": cluster_stats,
"distribution": member_histogram,
"projection": projection_stats,
"custom_tooltips": custom_tooltips,
}
json_dict["nodes"].append(n)
for i, (node_id, linked_node_ids) in enumerate(simplicial_complex["links"].items()):
for linked_node_id in linked_node_ids:
lnk = {
"source": node_id_to_num[node_id],
"target": node_id_to_num[linked_node_id],
}
json_dict["links"].append(lnk)
return json_dict
def get_mapper_graph(
simplicial_complex,
color_function=None,
color_function_name=None,
colorscale=None,
custom_tooltips=None,
custom_meta=None,
X=None,
X_names=None,
lens=None,
lens_names=None,
):
"""Generate data for mapper graph visualization and annotation.
Parameters
----------
simplicial_complex : dict
Simplicial complex is the output from the KeplerMapper `map` method.
Returns
-------
the graph dictionary in a json representation, the mapper summary
and the node_distribution
Example
-------
>>> kmgraph, mapper_summary, n_distribution = get_mapper_graph(simplicial_complex)
"""
if not colorscale:
colorscale = default_colorscale
if not len(simplicial_complex["nodes"]) > 0:
raise Exception(
"A mapper graph should have more than 0 nodes. This might be because your clustering algorithm might be too sensitive and be classifying all points as noise."
)
color_function = init_color_function(simplicial_complex, color_function)
if X_names is None:
X_names = []
if lens_names is None:
lens_names = []
json_graph = scomplex_to_graph(
simplicial_complex,
color_function,
X,
X_names,
lens,
lens_names,
custom_tooltips,
colorscale=colorscale,
)
colorf_distribution = graph_data_distribution(
simplicial_complex, color_function, colorscale
)
mapper_summary = format_meta(
simplicial_complex,
color_function_name=color_function_name,
custom_meta=custom_meta,
)
return json_graph, mapper_summary, colorf_distribution
def plotly_graph(
kmgraph,
graph_layout="kk",
colorscale=None,
showscale=True,
factor_size=3,
edge_linecolor="rgb(180,180,180)",
edge_linewidth=1.5,
node_linecolor="rgb(255,255,255)",
node_linewidth=1.0,
):
"""Generate Plotly data structures that represent the mapper graph
Parameters
----------
kmgraph: dict representing the mapper graph,
returned by the function get_mapper_graph()
graph_layout: igraph layout; recommended 'kk' (kamada-kawai)
or 'fr' (fruchterman-reingold)
colorscale: a Plotly colorscale(colormap) to color graph nodes
showscale: boolean to display or not the colorbar
factor_size: a factor for the node size
Returns
-------
The plotly traces (dicts) representing the graph edges and nodes
"""
if not colorscale:
colorscale = default_colorscale
# define an igraph.Graph instance of n_nodes
n_nodes = len(kmgraph["nodes"])
if n_nodes == 0:
raise ValueError("Your graph has 0 nodes")
G = ig.Graph(n=n_nodes)
links = [(e["source"], e["target"]) for e in kmgraph["links"]]
G.add_edges(links)
layt = G.layout(graph_layout)
hover_text = [node["name"] for node in kmgraph["nodes"]]
color_vals = [node["color"] for node in kmgraph["nodes"]]
node_size = np.array(
[factor_size * node["size"] for node in kmgraph["nodes"]], dtype=np.int
)
Xn, Yn, Xe, Ye = _get_plotly_data(links, layt)
edge_trace = dict(
type="scatter",
x=Xe,
y=Ye,
mode="lines",
line=dict(color=edge_linecolor, width=edge_linewidth),
hoverinfo="none",
)
node_trace = dict(
type="scatter",
x=Xn,
y=Yn,
mode="markers",
marker=dict(
size=node_size.tolist(),
color=color_vals,
opacity=1.0,
colorscale=colorscale,
showscale=showscale,
line=dict(color=node_linecolor, width=node_linewidth),
colorbar=dict(thickness=20, ticklen=4, x=1.01, tickfont=dict(size=10)),
),
text=hover_text,
hoverinfo="text",
)
return [edge_trace, node_trace]
def get_kmgraph_meta(mapper_summary):
""" Extract info from mapper summary to be displayed below the graph plot
"""
d = mapper_summary["custom_meta"]
meta = (
"<b>N_cubes:</b> "
+ str(d["n_cubes"])
+ " <b>Perc_overlap:</b> "
+ str(d["perc_overlap"])
)
meta += (
"<br><b>Nodes:</b> "
+ str(mapper_summary["n_nodes"])
+ " <b>Edges:</b> "
+ str(mapper_summary["n_edges"])
+ " <b>Total samples:</b> "
+ str(mapper_summary["n_total"])
+ " <b>Unique_samples:</b> "
+ str(mapper_summary["n_unique"])
)
return meta
def plot_layout(
title="TDA KMapper",
width=600,
height=600,
bgcolor="rgba(255, 255, 255, 1)",
annotation_text=None,
annotation_x=0,
annotation_y=-0.01,
top=100,
left=60,
right=60,
bottom=60,
):
"""Set the plotly layout
Parameters
----------
width, height: integers
setting width and height of plot window
bgcolor: string,
rgba or hex color code for the background color
annotation_text: string
meta data to be displayed
annotation_x & annotation_y:
The coordinates of the point where we insert the annotation; the negative sign for y coord points output that annotation is inserted below the plot
"""
pl_layout = dict(
title=title,
font=dict(size=12),
showlegend=False,
autosize=False,
width=width,
height=height,
xaxis=dict(visible=False),
yaxis=dict(visible=False),
hovermode="closest",
plot_bgcolor=bgcolor,
margin=dict(t=top, b=bottom, l=left, r=right),
)
if annotation_text is None:
return pl_layout
else:
annotations = [
dict(
showarrow=False,
text=annotation_text,
xref="paper",
yref="paper",
x=annotation_x,
y=annotation_y,
align="left",
xanchor="left",
yanchor="top",
font=dict(size=12),
)
]
pl_layout.update(annotations=annotations)
return pl_layout
def node_hist_fig(
node_color_distribution,
title="Graph Node Distribution",
width=400,
height=300,
top=60,
left=25,
bottom=60,
right=25,
bgcolor="rgb(240,240,240)",
y_gridcolor="white",
):
"""Define the plotly plot representing the node histogram
Parameters
----------
node_color_distribution: list of dicts describing the build_histogram
width, height: integers - width and height of the histogram FigureWidget
left, top, right, bottom: ints; number of pixels around the FigureWidget
bgcolor: rgb of hex color code for the figure background color
y_gridcolor: rgb of hex color code for the yaxis y_gridcolor
Returns
-------
FigureWidget object representing the histogram of the graph nodes
"""
text = [
"{perc}%".format(**locals())
for perc in [d["perc"] for d in node_color_distribution]
]
pl_hist = go.Bar(
y=[d["height"] for d in node_color_distribution],
marker=dict(color=[d["color"] for d in node_color_distribution]),
text=text,
hoverinfo="y+text",
)
hist_layout = dict(
title=title,
width=width,
height=height,
font=dict(size=12),
xaxis=dict(showline=True, zeroline=False, showgrid=False, showticklabels=False),
yaxis=dict(showline=False, gridcolor=y_gridcolor, tickfont=dict(size=10)),
bargap=0.01,
margin=dict(l=left, r=right, b=bottom, t=top),
hovermode="x",
plot_bgcolor=bgcolor,
)
return go.FigureWidget(data=[pl_hist], layout=hist_layout)
def summary_fig(
mapper_summary,
width=600,
height=500,
top=60,
left=20,
bottom=60,
right=20,
bgcolor="rgb(240,240,240)",
):
"""Define a dummy figure that displays info on the algorithms and
sklearn class instances or methods used
Returns a FigureWidget object representing the figure
"""
text = _text_mapper_summary(mapper_summary)
data = [
dict(
type="scatter",
x=[0, width],
y=[height, 0],
mode="text",
text=[text, ""],
textposition="bottom right",
hoverinfo="none",
)
]
layout = dict(
title="Algorithms and scikit-learn objects/methods",
width=width,
height=height,
font=dict(size=12),
xaxis=dict(visible=False),
yaxis=dict(visible=False, range=[0, height + 5]),
margin=dict(t=top, b=bottom, l=left, r=right),
plot_bgcolor=bgcolor,
)
return go.FigureWidget(data=data, layout=layout)
def hovering_widgets(
kmgraph,
graph_fw,
ctooltips=False,
width=400,
height=300,
top=100,
left=50,
bgcolor="rgb(240,240,240)",
y_gridcolor="white",
member_textbox_width=200,
):
"""Defines the widgets that display the distribution of each node on hover
and the members of each nodes
Parameters
----------
kmgraph: the kepler-mapper graph dict returned by `get_mapper_graph()``
graph_fw: the FigureWidget representing the graph
ctooltips: boolean; if True/False the node["custom_tooltips"]/"member_ids"
are passed to member_textbox
width, height, top refer to the figure
size and position of the hovered node distribution
Returns
-------
a box containing the graph figure, the figure of the hovered node
distribution, and the textboxes displaying the cluster size and member_ids
or custom tooltips for hovered node members
"""
fnode = kmgraph["nodes"][0]
fwc = node_hist_fig(
fnode["distribution"],
title="Cluster Member Distribution",
width=width,
height=height,
top=top,
left=left,
bgcolor=bgcolor,
y_gridcolor=y_gridcolor,
)
clust_textbox = ipw.Text(
value="{:d}".format(fnode["cluster"]["size"]),
description="Cluster size:",
disabled=False,
continuous_update=True,
)
clust_textbox.layout = dict(margin="10px 10px 10px 10px", width="200px")
member_textbox = ipw.Textarea(
value=", ".join(str(x) for x in fnode["member_ids"])
if not ctooltips
else ", ".join(str(x) for x in fnode["custom_tooltips"]),
description="Members:",
disabled=False,
continuous_update=True,
)
member_textbox.layout = dict(
margin="5px 5px 5px 10px", width=str(member_textbox_width) + "px"
)
def do_on_hover(trace, points, state):
if not points.point_inds:
return
ind = points.point_inds[0] # get the index of the hovered node
node = kmgraph["nodes"][ind]
# on hover do:
with fwc.batch_update(): # update data in the cluster member histogr
fwc.data[0].text = [
"{:.1f}%".format(d["perc"]) for d in node["distribution"]
]
fwc.data[0].y = [d["height"] for d in node["distribution"]]
fwc.data[0].marker.color = [d["color"] for d in node["distribution"]]
clust_textbox.value = "{:d}".format(node["cluster"]["size"])
member_textbox.value = (
", ".join(str(x) for x in node["member_ids"])
if not ctooltips
else ", ".join(str(x) for x in node["custom_tooltips"])
)
trace = graph_fw.data[1]
trace.on_hover(do_on_hover)
return ipw.VBox([ipw.HBox([graph_fw, fwc]), clust_textbox, member_textbox])
def _get_plotly_data(E, coords):
# E : the list of tuples representing the graph edges
# coords: list of node coordinates assigned by igraph.Layout
N = len(coords)
Xnodes = [coords[k][0] for k in range(N)] # x-coordinates of nodes
Ynodes = [coords[k][1] for k in range(N)] # y-coordnates of nodes
Xedges = []
Yedges = []
for e in E:
Xedges.extend([coords[e[0]][0], coords[e[1]][0], None])
Yedges.extend([coords[e[0]][1], coords[e[1]][1], None])
return Xnodes, Ynodes, Xedges, Yedges
def _text_mapper_summary(mapper_summary):
d = mapper_summary["custom_meta"]
text = "<br><b>Projection: </b>" + d["projection"]
text += (
"<br><b>Clusterer: </b>" + d["clusterer"] + "<br><b>Scaler: </b>" + d["scaler"]
)
if "color_function" in d.keys():
text += "<br><b>Color function: </b>" + d["color_function"]
return text
def _hover_format(member_ids, custom_tooltips, X, X_names, lens, lens_names):
cluster_data = _format_cluster_statistics(member_ids, X, X_names)
tooltip = ""
custom_tooltips = (
custom_tooltips[member_ids] if custom_tooltips is not None else member_ids
)
val_size = cluster_data["size"]
tooltip += "{val_size}".format(**locals())
return tooltip
|
scikit-tda/kepler-mapper | kmapper/plotlyviz.py | get_kmgraph_meta | python | def get_kmgraph_meta(mapper_summary):
""" Extract info from mapper summary to be displayed below the graph plot
"""
d = mapper_summary["custom_meta"]
meta = (
"<b>N_cubes:</b> "
+ str(d["n_cubes"])
+ " <b>Perc_overlap:</b> "
+ str(d["perc_overlap"])
)
meta += (
"<br><b>Nodes:</b> "
+ str(mapper_summary["n_nodes"])
+ " <b>Edges:</b> "
+ str(mapper_summary["n_edges"])
+ " <b>Total samples:</b> "
+ str(mapper_summary["n_total"])
+ " <b>Unique_samples:</b> "
+ str(mapper_summary["n_unique"])
)
return meta | Extract info from mapper summary to be displayed below the graph plot | train | https://github.com/scikit-tda/kepler-mapper/blob/d4ed39f6392b0a134dd573d7d9c4aa65fbef3a7d/kmapper/plotlyviz.py#L403-L424 | null | from __future__ import division
import numpy as np
from .visuals import (
init_color_function,
_size_node,
_format_projection_statistics,
_format_cluster_statistics,
_color_function,
format_meta,
_to_html_format,
_map_val2color,
graph_data_distribution,
build_histogram,
_tooltip_components,
)
try:
import igraph as ig
import plotly.graph_objs as go
import ipywidgets as ipw
import plotly.io as pio
except ImportError:
print(
"""To use the plotly visualization tools, you must have the packages python-igraph, plotly, and ipywidgets installed in your environment."""
""" It looks like at least one of these is missing. Please install again with"""
"""\n\n\t`pip install python-igraph plotly ipywidgets`\n\nand try again"""
)
raise
default_colorscale = [
[0.0, "rgb(68, 1, 84)"], # Viridis
[0.1, "rgb(72, 35, 116)"],
[0.2, "rgb(64, 67, 135)"],
[0.3, "rgb(52, 94, 141)"],
[0.4, "rgb(41, 120, 142)"],
[0.5, "rgb(32, 144, 140)"],
[0.6, "rgb(34, 167, 132)"],
[0.7, "rgb(68, 190, 112)"],
[0.8, "rgb(121, 209, 81)"],
[0.9, "rgb(189, 222, 38)"],
[1.0, "rgb(253, 231, 36)"],
]
def mpl_to_plotly(cmap, n_entries):
h = 1.0 / (n_entries - 1)
pl_colorscale = []
for k in range(n_entries):
C = list(map(np.uint8, np.array(cmap(k * h)[:3]) * 255))
pl_colorscale.append(
[round(k * h, 2), "rgb" + str((C[0], C[1], C[2]))]
) # Python 2.7+
# pl_colorscale.append([round(k*h, 2), f'rgb({C[0]}, {C[1]}, {C[2]})']) # Python 3.6+
return pl_colorscale
def plotlyviz(
scomplex,
colorscale=None,
title="Kepler Mapper",
graph_layout="kk",
color_function=None,
color_function_name=None,
dashboard=False,
graph_data=False,
factor_size=3,
edge_linewidth=1.5,
node_linecolor="rgb(200,200,200)",
width=600,
height=500,
bgcolor="rgba(240, 240, 240, 0.95)",
left=10,
bottom=35,
summary_height=300,
summary_width=600,
summary_left=20,
summary_right=20,
hist_left=25,
hist_right=25,
member_textbox_width=800,
filename=None,
):
"""
Visualizations and dashboards for kmapper graphs using Plotly. This method is suitable for use in Jupyter notebooks.
The generated FigureWidget can be updated (by performing a restyle or relayout). For example, let us add a title
to the colorbar (the name of the color function, if any),
and set the title font size. To perform these updates faster, Plotly 3.+ provides a context manager that batches up all data and layout updates:
To display more info on the generated kmapper-graph, define two more FigureWidget(s):
the global node distribution figure, and a dummy figure
that displays info on the algorithms involved in getting the graph from data, as well as sklearn class instances.
A FigureWidget has event listeners for hovering, clicking or selecting. Using the first one for `fw_graph`
we define, via the function `hovering_widgets()`, widgets that display the node distribution, when the node is hovered over, and two textboxes for the cluster size and the member ids/labels of the hovered node members.
Parameters
-----------
scomplex: dict
Simplicial complex is the output from the KeplerMapper `map` method.
title: str
Title of output graphic
graph_layout: igraph layout;
recommended 'kk' (kamada-kawai) or 'fr' (fruchterman-reingold)
colorscale:
Plotly colorscale(colormap) to color graph nodes
dashboard: bool, default is False
If true, display complete dashboard of node information
graph_data: bool, default is False
If true, display graph metadata
factor_size: double, default is 3
a factor for the node size
edge_linewidth : double, default is 1.5
node_linecolor: color str, default is "rgb(200,200,200)"
width: int, default is 600,
height: int, default is 500,
bgcolor: color str, default is "rgba(240, 240, 240, 0.95)",
left: int, default is 10,
bottom: int, default is 35,
summary_height: int, default is 300,
summary_width: int, default is 600,
summary_left: int, default is 20,
summary_right: int, default is 20,
hist_left: int, default is 25,
hist_right: int, default is 25,
member_textbox_width: int, default is 800,
filename: str, default is None
if filename is given, the graphic will be saved to that file.
Returns
---------
result: plotly.FigureWidget
A FigureWidget that can be shown or editted. See the Plotly Demo notebook for examples of use.
"""
if not colorscale:
colorscale = default_colorscale
kmgraph, mapper_summary, n_color_distribution = get_mapper_graph(
scomplex,
colorscale=colorscale,
color_function=color_function,
color_function_name=color_function_name,
)
annotation = get_kmgraph_meta(mapper_summary)
plgraph_data = plotly_graph(
kmgraph,
graph_layout=graph_layout,
colorscale=colorscale,
factor_size=factor_size,
edge_linewidth=edge_linewidth,
node_linecolor=node_linecolor,
)
layout = plot_layout(
title=title,
width=width,
height=height,
annotation_text=annotation,
bgcolor=bgcolor,
left=left,
bottom=bottom,
)
result = go.FigureWidget(data=plgraph_data, layout=layout)
if color_function_name:
with result.batch_update():
result.data[1].marker.colorbar.title = color_function_name
result.data[1].marker.colorbar.titlefont.size = 10
if dashboard or graph_data:
fw_hist = node_hist_fig(n_color_distribution, left=hist_left, right=hist_right)
fw_summary = summary_fig(
mapper_summary,
width=summary_width,
height=summary_height,
left=summary_left,
right=summary_right,
)
fw_graph = result
result = hovering_widgets(
kmgraph, fw_graph, member_textbox_width=member_textbox_width
)
if graph_data:
result = ipw.VBox([fw_graph, ipw.HBox([fw_summary, fw_hist])])
if filename:
pio.write_image(result, filename)
return result
def scomplex_to_graph(
simplicial_complex,
color_function,
X,
X_names,
lens,
lens_names,
custom_tooltips,
colorscale,
):
json_dict = {"nodes": [], "links": []}
node_id_to_num = {}
for i, (node_id, member_ids) in enumerate(simplicial_complex["nodes"].items()):
node_id_to_num[node_id] = i
projection_stats, cluster_stats, member_histogram = _tooltip_components(
member_ids, X, X_names, lens, lens_names, color_function, i, colorscale
)
n = {
"id": i,
"name": node_id,
"member_ids": member_ids,
"color": _color_function(member_ids, color_function),
"size": _size_node(member_ids),
"cluster": cluster_stats,
"distribution": member_histogram,
"projection": projection_stats,
"custom_tooltips": custom_tooltips,
}
json_dict["nodes"].append(n)
for i, (node_id, linked_node_ids) in enumerate(simplicial_complex["links"].items()):
for linked_node_id in linked_node_ids:
lnk = {
"source": node_id_to_num[node_id],
"target": node_id_to_num[linked_node_id],
}
json_dict["links"].append(lnk)
return json_dict
def get_mapper_graph(
simplicial_complex,
color_function=None,
color_function_name=None,
colorscale=None,
custom_tooltips=None,
custom_meta=None,
X=None,
X_names=None,
lens=None,
lens_names=None,
):
"""Generate data for mapper graph visualization and annotation.
Parameters
----------
simplicial_complex : dict
Simplicial complex is the output from the KeplerMapper `map` method.
Returns
-------
the graph dictionary in a json representation, the mapper summary
and the node_distribution
Example
-------
>>> kmgraph, mapper_summary, n_distribution = get_mapper_graph(simplicial_complex)
"""
if not colorscale:
colorscale = default_colorscale
if not len(simplicial_complex["nodes"]) > 0:
raise Exception(
"A mapper graph should have more than 0 nodes. This might be because your clustering algorithm might be too sensitive and be classifying all points as noise."
)
color_function = init_color_function(simplicial_complex, color_function)
if X_names is None:
X_names = []
if lens_names is None:
lens_names = []
json_graph = scomplex_to_graph(
simplicial_complex,
color_function,
X,
X_names,
lens,
lens_names,
custom_tooltips,
colorscale=colorscale,
)
colorf_distribution = graph_data_distribution(
simplicial_complex, color_function, colorscale
)
mapper_summary = format_meta(
simplicial_complex,
color_function_name=color_function_name,
custom_meta=custom_meta,
)
return json_graph, mapper_summary, colorf_distribution
def plotly_graph(
kmgraph,
graph_layout="kk",
colorscale=None,
showscale=True,
factor_size=3,
edge_linecolor="rgb(180,180,180)",
edge_linewidth=1.5,
node_linecolor="rgb(255,255,255)",
node_linewidth=1.0,
):
"""Generate Plotly data structures that represent the mapper graph
Parameters
----------
kmgraph: dict representing the mapper graph,
returned by the function get_mapper_graph()
graph_layout: igraph layout; recommended 'kk' (kamada-kawai)
or 'fr' (fruchterman-reingold)
colorscale: a Plotly colorscale(colormap) to color graph nodes
showscale: boolean to display or not the colorbar
factor_size: a factor for the node size
Returns
-------
The plotly traces (dicts) representing the graph edges and nodes
"""
if not colorscale:
colorscale = default_colorscale
# define an igraph.Graph instance of n_nodes
n_nodes = len(kmgraph["nodes"])
if n_nodes == 0:
raise ValueError("Your graph has 0 nodes")
G = ig.Graph(n=n_nodes)
links = [(e["source"], e["target"]) for e in kmgraph["links"]]
G.add_edges(links)
layt = G.layout(graph_layout)
hover_text = [node["name"] for node in kmgraph["nodes"]]
color_vals = [node["color"] for node in kmgraph["nodes"]]
node_size = np.array(
[factor_size * node["size"] for node in kmgraph["nodes"]], dtype=np.int
)
Xn, Yn, Xe, Ye = _get_plotly_data(links, layt)
edge_trace = dict(
type="scatter",
x=Xe,
y=Ye,
mode="lines",
line=dict(color=edge_linecolor, width=edge_linewidth),
hoverinfo="none",
)
node_trace = dict(
type="scatter",
x=Xn,
y=Yn,
mode="markers",
marker=dict(
size=node_size.tolist(),
color=color_vals,
opacity=1.0,
colorscale=colorscale,
showscale=showscale,
line=dict(color=node_linecolor, width=node_linewidth),
colorbar=dict(thickness=20, ticklen=4, x=1.01, tickfont=dict(size=10)),
),
text=hover_text,
hoverinfo="text",
)
return [edge_trace, node_trace]
def get_kmgraph_meta(mapper_summary):
""" Extract info from mapper summary to be displayed below the graph plot
"""
d = mapper_summary["custom_meta"]
meta = (
"<b>N_cubes:</b> "
+ str(d["n_cubes"])
+ " <b>Perc_overlap:</b> "
+ str(d["perc_overlap"])
)
meta += (
"<br><b>Nodes:</b> "
+ str(mapper_summary["n_nodes"])
+ " <b>Edges:</b> "
+ str(mapper_summary["n_edges"])
+ " <b>Total samples:</b> "
+ str(mapper_summary["n_total"])
+ " <b>Unique_samples:</b> "
+ str(mapper_summary["n_unique"])
)
return meta
def plot_layout(
title="TDA KMapper",
width=600,
height=600,
bgcolor="rgba(255, 255, 255, 1)",
annotation_text=None,
annotation_x=0,
annotation_y=-0.01,
top=100,
left=60,
right=60,
bottom=60,
):
"""Set the plotly layout
Parameters
----------
width, height: integers
setting width and height of plot window
bgcolor: string,
rgba or hex color code for the background color
annotation_text: string
meta data to be displayed
annotation_x & annotation_y:
The coordinates of the point where we insert the annotation; the negative sign for y coord points output that annotation is inserted below the plot
"""
pl_layout = dict(
title=title,
font=dict(size=12),
showlegend=False,
autosize=False,
width=width,
height=height,
xaxis=dict(visible=False),
yaxis=dict(visible=False),
hovermode="closest",
plot_bgcolor=bgcolor,
margin=dict(t=top, b=bottom, l=left, r=right),
)
if annotation_text is None:
return pl_layout
else:
annotations = [
dict(
showarrow=False,
text=annotation_text,
xref="paper",
yref="paper",
x=annotation_x,
y=annotation_y,
align="left",
xanchor="left",
yanchor="top",
font=dict(size=12),
)
]
pl_layout.update(annotations=annotations)
return pl_layout
def node_hist_fig(
node_color_distribution,
title="Graph Node Distribution",
width=400,
height=300,
top=60,
left=25,
bottom=60,
right=25,
bgcolor="rgb(240,240,240)",
y_gridcolor="white",
):
"""Define the plotly plot representing the node histogram
Parameters
----------
node_color_distribution: list of dicts describing the build_histogram
width, height: integers - width and height of the histogram FigureWidget
left, top, right, bottom: ints; number of pixels around the FigureWidget
bgcolor: rgb of hex color code for the figure background color
y_gridcolor: rgb of hex color code for the yaxis y_gridcolor
Returns
-------
FigureWidget object representing the histogram of the graph nodes
"""
text = [
"{perc}%".format(**locals())
for perc in [d["perc"] for d in node_color_distribution]
]
pl_hist = go.Bar(
y=[d["height"] for d in node_color_distribution],
marker=dict(color=[d["color"] for d in node_color_distribution]),
text=text,
hoverinfo="y+text",
)
hist_layout = dict(
title=title,
width=width,
height=height,
font=dict(size=12),
xaxis=dict(showline=True, zeroline=False, showgrid=False, showticklabels=False),
yaxis=dict(showline=False, gridcolor=y_gridcolor, tickfont=dict(size=10)),
bargap=0.01,
margin=dict(l=left, r=right, b=bottom, t=top),
hovermode="x",
plot_bgcolor=bgcolor,
)
return go.FigureWidget(data=[pl_hist], layout=hist_layout)
def summary_fig(
mapper_summary,
width=600,
height=500,
top=60,
left=20,
bottom=60,
right=20,
bgcolor="rgb(240,240,240)",
):
"""Define a dummy figure that displays info on the algorithms and
sklearn class instances or methods used
Returns a FigureWidget object representing the figure
"""
text = _text_mapper_summary(mapper_summary)
data = [
dict(
type="scatter",
x=[0, width],
y=[height, 0],
mode="text",
text=[text, ""],
textposition="bottom right",
hoverinfo="none",
)
]
layout = dict(
title="Algorithms and scikit-learn objects/methods",
width=width,
height=height,
font=dict(size=12),
xaxis=dict(visible=False),
yaxis=dict(visible=False, range=[0, height + 5]),
margin=dict(t=top, b=bottom, l=left, r=right),
plot_bgcolor=bgcolor,
)
return go.FigureWidget(data=data, layout=layout)
def hovering_widgets(
kmgraph,
graph_fw,
ctooltips=False,
width=400,
height=300,
top=100,
left=50,
bgcolor="rgb(240,240,240)",
y_gridcolor="white",
member_textbox_width=200,
):
"""Defines the widgets that display the distribution of each node on hover
and the members of each nodes
Parameters
----------
kmgraph: the kepler-mapper graph dict returned by `get_mapper_graph()``
graph_fw: the FigureWidget representing the graph
ctooltips: boolean; if True/False the node["custom_tooltips"]/"member_ids"
are passed to member_textbox
width, height, top refer to the figure
size and position of the hovered node distribution
Returns
-------
a box containing the graph figure, the figure of the hovered node
distribution, and the textboxes displaying the cluster size and member_ids
or custom tooltips for hovered node members
"""
fnode = kmgraph["nodes"][0]
fwc = node_hist_fig(
fnode["distribution"],
title="Cluster Member Distribution",
width=width,
height=height,
top=top,
left=left,
bgcolor=bgcolor,
y_gridcolor=y_gridcolor,
)
clust_textbox = ipw.Text(
value="{:d}".format(fnode["cluster"]["size"]),
description="Cluster size:",
disabled=False,
continuous_update=True,
)
clust_textbox.layout = dict(margin="10px 10px 10px 10px", width="200px")
member_textbox = ipw.Textarea(
value=", ".join(str(x) for x in fnode["member_ids"])
if not ctooltips
else ", ".join(str(x) for x in fnode["custom_tooltips"]),
description="Members:",
disabled=False,
continuous_update=True,
)
member_textbox.layout = dict(
margin="5px 5px 5px 10px", width=str(member_textbox_width) + "px"
)
def do_on_hover(trace, points, state):
if not points.point_inds:
return
ind = points.point_inds[0] # get the index of the hovered node
node = kmgraph["nodes"][ind]
# on hover do:
with fwc.batch_update(): # update data in the cluster member histogr
fwc.data[0].text = [
"{:.1f}%".format(d["perc"]) for d in node["distribution"]
]
fwc.data[0].y = [d["height"] for d in node["distribution"]]
fwc.data[0].marker.color = [d["color"] for d in node["distribution"]]
clust_textbox.value = "{:d}".format(node["cluster"]["size"])
member_textbox.value = (
", ".join(str(x) for x in node["member_ids"])
if not ctooltips
else ", ".join(str(x) for x in node["custom_tooltips"])
)
trace = graph_fw.data[1]
trace.on_hover(do_on_hover)
return ipw.VBox([ipw.HBox([graph_fw, fwc]), clust_textbox, member_textbox])
def _get_plotly_data(E, coords):
# E : the list of tuples representing the graph edges
# coords: list of node coordinates assigned by igraph.Layout
N = len(coords)
Xnodes = [coords[k][0] for k in range(N)] # x-coordinates of nodes
Ynodes = [coords[k][1] for k in range(N)] # y-coordnates of nodes
Xedges = []
Yedges = []
for e in E:
Xedges.extend([coords[e[0]][0], coords[e[1]][0], None])
Yedges.extend([coords[e[0]][1], coords[e[1]][1], None])
return Xnodes, Ynodes, Xedges, Yedges
def _text_mapper_summary(mapper_summary):
d = mapper_summary["custom_meta"]
text = "<br><b>Projection: </b>" + d["projection"]
text += (
"<br><b>Clusterer: </b>" + d["clusterer"] + "<br><b>Scaler: </b>" + d["scaler"]
)
if "color_function" in d.keys():
text += "<br><b>Color function: </b>" + d["color_function"]
return text
def _hover_format(member_ids, custom_tooltips, X, X_names, lens, lens_names):
cluster_data = _format_cluster_statistics(member_ids, X, X_names)
tooltip = ""
custom_tooltips = (
custom_tooltips[member_ids] if custom_tooltips is not None else member_ids
)
val_size = cluster_data["size"]
tooltip += "{val_size}".format(**locals())
return tooltip
|
scikit-tda/kepler-mapper | kmapper/plotlyviz.py | plot_layout | python | def plot_layout(
title="TDA KMapper",
width=600,
height=600,
bgcolor="rgba(255, 255, 255, 1)",
annotation_text=None,
annotation_x=0,
annotation_y=-0.01,
top=100,
left=60,
right=60,
bottom=60,
):
"""Set the plotly layout
Parameters
----------
width, height: integers
setting width and height of plot window
bgcolor: string,
rgba or hex color code for the background color
annotation_text: string
meta data to be displayed
annotation_x & annotation_y:
The coordinates of the point where we insert the annotation; the negative sign for y coord points output that annotation is inserted below the plot
"""
pl_layout = dict(
title=title,
font=dict(size=12),
showlegend=False,
autosize=False,
width=width,
height=height,
xaxis=dict(visible=False),
yaxis=dict(visible=False),
hovermode="closest",
plot_bgcolor=bgcolor,
margin=dict(t=top, b=bottom, l=left, r=right),
)
if annotation_text is None:
return pl_layout
else:
annotations = [
dict(
showarrow=False,
text=annotation_text,
xref="paper",
yref="paper",
x=annotation_x,
y=annotation_y,
align="left",
xanchor="left",
yanchor="top",
font=dict(size=12),
)
]
pl_layout.update(annotations=annotations)
return pl_layout | Set the plotly layout
Parameters
----------
width, height: integers
setting width and height of plot window
bgcolor: string,
rgba or hex color code for the background color
annotation_text: string
meta data to be displayed
annotation_x & annotation_y:
The coordinates of the point where we insert the annotation; the negative sign for y coord points output that annotation is inserted below the plot | train | https://github.com/scikit-tda/kepler-mapper/blob/d4ed39f6392b0a134dd573d7d9c4aa65fbef3a7d/kmapper/plotlyviz.py#L427-L485 | null | from __future__ import division
import numpy as np
from .visuals import (
init_color_function,
_size_node,
_format_projection_statistics,
_format_cluster_statistics,
_color_function,
format_meta,
_to_html_format,
_map_val2color,
graph_data_distribution,
build_histogram,
_tooltip_components,
)
try:
import igraph as ig
import plotly.graph_objs as go
import ipywidgets as ipw
import plotly.io as pio
except ImportError:
print(
"""To use the plotly visualization tools, you must have the packages python-igraph, plotly, and ipywidgets installed in your environment."""
""" It looks like at least one of these is missing. Please install again with"""
"""\n\n\t`pip install python-igraph plotly ipywidgets`\n\nand try again"""
)
raise
default_colorscale = [
[0.0, "rgb(68, 1, 84)"], # Viridis
[0.1, "rgb(72, 35, 116)"],
[0.2, "rgb(64, 67, 135)"],
[0.3, "rgb(52, 94, 141)"],
[0.4, "rgb(41, 120, 142)"],
[0.5, "rgb(32, 144, 140)"],
[0.6, "rgb(34, 167, 132)"],
[0.7, "rgb(68, 190, 112)"],
[0.8, "rgb(121, 209, 81)"],
[0.9, "rgb(189, 222, 38)"],
[1.0, "rgb(253, 231, 36)"],
]
def mpl_to_plotly(cmap, n_entries):
h = 1.0 / (n_entries - 1)
pl_colorscale = []
for k in range(n_entries):
C = list(map(np.uint8, np.array(cmap(k * h)[:3]) * 255))
pl_colorscale.append(
[round(k * h, 2), "rgb" + str((C[0], C[1], C[2]))]
) # Python 2.7+
# pl_colorscale.append([round(k*h, 2), f'rgb({C[0]}, {C[1]}, {C[2]})']) # Python 3.6+
return pl_colorscale
def plotlyviz(
scomplex,
colorscale=None,
title="Kepler Mapper",
graph_layout="kk",
color_function=None,
color_function_name=None,
dashboard=False,
graph_data=False,
factor_size=3,
edge_linewidth=1.5,
node_linecolor="rgb(200,200,200)",
width=600,
height=500,
bgcolor="rgba(240, 240, 240, 0.95)",
left=10,
bottom=35,
summary_height=300,
summary_width=600,
summary_left=20,
summary_right=20,
hist_left=25,
hist_right=25,
member_textbox_width=800,
filename=None,
):
"""
Visualizations and dashboards for kmapper graphs using Plotly. This method is suitable for use in Jupyter notebooks.
The generated FigureWidget can be updated (by performing a restyle or relayout). For example, let us add a title
to the colorbar (the name of the color function, if any),
and set the title font size. To perform these updates faster, Plotly 3.+ provides a context manager that batches up all data and layout updates:
To display more info on the generated kmapper-graph, define two more FigureWidget(s):
the global node distribution figure, and a dummy figure
that displays info on the algorithms involved in getting the graph from data, as well as sklearn class instances.
A FigureWidget has event listeners for hovering, clicking or selecting. Using the first one for `fw_graph`
we define, via the function `hovering_widgets()`, widgets that display the node distribution, when the node is hovered over, and two textboxes for the cluster size and the member ids/labels of the hovered node members.
Parameters
-----------
scomplex: dict
Simplicial complex is the output from the KeplerMapper `map` method.
title: str
Title of output graphic
graph_layout: igraph layout;
recommended 'kk' (kamada-kawai) or 'fr' (fruchterman-reingold)
colorscale:
Plotly colorscale(colormap) to color graph nodes
dashboard: bool, default is False
If true, display complete dashboard of node information
graph_data: bool, default is False
If true, display graph metadata
factor_size: double, default is 3
a factor for the node size
edge_linewidth : double, default is 1.5
node_linecolor: color str, default is "rgb(200,200,200)"
width: int, default is 600,
height: int, default is 500,
bgcolor: color str, default is "rgba(240, 240, 240, 0.95)",
left: int, default is 10,
bottom: int, default is 35,
summary_height: int, default is 300,
summary_width: int, default is 600,
summary_left: int, default is 20,
summary_right: int, default is 20,
hist_left: int, default is 25,
hist_right: int, default is 25,
member_textbox_width: int, default is 800,
filename: str, default is None
if filename is given, the graphic will be saved to that file.
Returns
---------
result: plotly.FigureWidget
A FigureWidget that can be shown or editted. See the Plotly Demo notebook for examples of use.
"""
if not colorscale:
colorscale = default_colorscale
kmgraph, mapper_summary, n_color_distribution = get_mapper_graph(
scomplex,
colorscale=colorscale,
color_function=color_function,
color_function_name=color_function_name,
)
annotation = get_kmgraph_meta(mapper_summary)
plgraph_data = plotly_graph(
kmgraph,
graph_layout=graph_layout,
colorscale=colorscale,
factor_size=factor_size,
edge_linewidth=edge_linewidth,
node_linecolor=node_linecolor,
)
layout = plot_layout(
title=title,
width=width,
height=height,
annotation_text=annotation,
bgcolor=bgcolor,
left=left,
bottom=bottom,
)
result = go.FigureWidget(data=plgraph_data, layout=layout)
if color_function_name:
with result.batch_update():
result.data[1].marker.colorbar.title = color_function_name
result.data[1].marker.colorbar.titlefont.size = 10
if dashboard or graph_data:
fw_hist = node_hist_fig(n_color_distribution, left=hist_left, right=hist_right)
fw_summary = summary_fig(
mapper_summary,
width=summary_width,
height=summary_height,
left=summary_left,
right=summary_right,
)
fw_graph = result
result = hovering_widgets(
kmgraph, fw_graph, member_textbox_width=member_textbox_width
)
if graph_data:
result = ipw.VBox([fw_graph, ipw.HBox([fw_summary, fw_hist])])
if filename:
pio.write_image(result, filename)
return result
def scomplex_to_graph(
simplicial_complex,
color_function,
X,
X_names,
lens,
lens_names,
custom_tooltips,
colorscale,
):
json_dict = {"nodes": [], "links": []}
node_id_to_num = {}
for i, (node_id, member_ids) in enumerate(simplicial_complex["nodes"].items()):
node_id_to_num[node_id] = i
projection_stats, cluster_stats, member_histogram = _tooltip_components(
member_ids, X, X_names, lens, lens_names, color_function, i, colorscale
)
n = {
"id": i,
"name": node_id,
"member_ids": member_ids,
"color": _color_function(member_ids, color_function),
"size": _size_node(member_ids),
"cluster": cluster_stats,
"distribution": member_histogram,
"projection": projection_stats,
"custom_tooltips": custom_tooltips,
}
json_dict["nodes"].append(n)
for i, (node_id, linked_node_ids) in enumerate(simplicial_complex["links"].items()):
for linked_node_id in linked_node_ids:
lnk = {
"source": node_id_to_num[node_id],
"target": node_id_to_num[linked_node_id],
}
json_dict["links"].append(lnk)
return json_dict
def get_mapper_graph(
simplicial_complex,
color_function=None,
color_function_name=None,
colorscale=None,
custom_tooltips=None,
custom_meta=None,
X=None,
X_names=None,
lens=None,
lens_names=None,
):
"""Generate data for mapper graph visualization and annotation.
Parameters
----------
simplicial_complex : dict
Simplicial complex is the output from the KeplerMapper `map` method.
Returns
-------
the graph dictionary in a json representation, the mapper summary
and the node_distribution
Example
-------
>>> kmgraph, mapper_summary, n_distribution = get_mapper_graph(simplicial_complex)
"""
if not colorscale:
colorscale = default_colorscale
if not len(simplicial_complex["nodes"]) > 0:
raise Exception(
"A mapper graph should have more than 0 nodes. This might be because your clustering algorithm might be too sensitive and be classifying all points as noise."
)
color_function = init_color_function(simplicial_complex, color_function)
if X_names is None:
X_names = []
if lens_names is None:
lens_names = []
json_graph = scomplex_to_graph(
simplicial_complex,
color_function,
X,
X_names,
lens,
lens_names,
custom_tooltips,
colorscale=colorscale,
)
colorf_distribution = graph_data_distribution(
simplicial_complex, color_function, colorscale
)
mapper_summary = format_meta(
simplicial_complex,
color_function_name=color_function_name,
custom_meta=custom_meta,
)
return json_graph, mapper_summary, colorf_distribution
def plotly_graph(
kmgraph,
graph_layout="kk",
colorscale=None,
showscale=True,
factor_size=3,
edge_linecolor="rgb(180,180,180)",
edge_linewidth=1.5,
node_linecolor="rgb(255,255,255)",
node_linewidth=1.0,
):
"""Generate Plotly data structures that represent the mapper graph
Parameters
----------
kmgraph: dict representing the mapper graph,
returned by the function get_mapper_graph()
graph_layout: igraph layout; recommended 'kk' (kamada-kawai)
or 'fr' (fruchterman-reingold)
colorscale: a Plotly colorscale(colormap) to color graph nodes
showscale: boolean to display or not the colorbar
factor_size: a factor for the node size
Returns
-------
The plotly traces (dicts) representing the graph edges and nodes
"""
if not colorscale:
colorscale = default_colorscale
# define an igraph.Graph instance of n_nodes
n_nodes = len(kmgraph["nodes"])
if n_nodes == 0:
raise ValueError("Your graph has 0 nodes")
G = ig.Graph(n=n_nodes)
links = [(e["source"], e["target"]) for e in kmgraph["links"]]
G.add_edges(links)
layt = G.layout(graph_layout)
hover_text = [node["name"] for node in kmgraph["nodes"]]
color_vals = [node["color"] for node in kmgraph["nodes"]]
node_size = np.array(
[factor_size * node["size"] for node in kmgraph["nodes"]], dtype=np.int
)
Xn, Yn, Xe, Ye = _get_plotly_data(links, layt)
edge_trace = dict(
type="scatter",
x=Xe,
y=Ye,
mode="lines",
line=dict(color=edge_linecolor, width=edge_linewidth),
hoverinfo="none",
)
node_trace = dict(
type="scatter",
x=Xn,
y=Yn,
mode="markers",
marker=dict(
size=node_size.tolist(),
color=color_vals,
opacity=1.0,
colorscale=colorscale,
showscale=showscale,
line=dict(color=node_linecolor, width=node_linewidth),
colorbar=dict(thickness=20, ticklen=4, x=1.01, tickfont=dict(size=10)),
),
text=hover_text,
hoverinfo="text",
)
return [edge_trace, node_trace]
def get_kmgraph_meta(mapper_summary):
""" Extract info from mapper summary to be displayed below the graph plot
"""
d = mapper_summary["custom_meta"]
meta = (
"<b>N_cubes:</b> "
+ str(d["n_cubes"])
+ " <b>Perc_overlap:</b> "
+ str(d["perc_overlap"])
)
meta += (
"<br><b>Nodes:</b> "
+ str(mapper_summary["n_nodes"])
+ " <b>Edges:</b> "
+ str(mapper_summary["n_edges"])
+ " <b>Total samples:</b> "
+ str(mapper_summary["n_total"])
+ " <b>Unique_samples:</b> "
+ str(mapper_summary["n_unique"])
)
return meta
def plot_layout(
title="TDA KMapper",
width=600,
height=600,
bgcolor="rgba(255, 255, 255, 1)",
annotation_text=None,
annotation_x=0,
annotation_y=-0.01,
top=100,
left=60,
right=60,
bottom=60,
):
"""Set the plotly layout
Parameters
----------
width, height: integers
setting width and height of plot window
bgcolor: string,
rgba or hex color code for the background color
annotation_text: string
meta data to be displayed
annotation_x & annotation_y:
The coordinates of the point where we insert the annotation; the negative sign for y coord points output that annotation is inserted below the plot
"""
pl_layout = dict(
title=title,
font=dict(size=12),
showlegend=False,
autosize=False,
width=width,
height=height,
xaxis=dict(visible=False),
yaxis=dict(visible=False),
hovermode="closest",
plot_bgcolor=bgcolor,
margin=dict(t=top, b=bottom, l=left, r=right),
)
if annotation_text is None:
return pl_layout
else:
annotations = [
dict(
showarrow=False,
text=annotation_text,
xref="paper",
yref="paper",
x=annotation_x,
y=annotation_y,
align="left",
xanchor="left",
yanchor="top",
font=dict(size=12),
)
]
pl_layout.update(annotations=annotations)
return pl_layout
def node_hist_fig(
node_color_distribution,
title="Graph Node Distribution",
width=400,
height=300,
top=60,
left=25,
bottom=60,
right=25,
bgcolor="rgb(240,240,240)",
y_gridcolor="white",
):
"""Define the plotly plot representing the node histogram
Parameters
----------
node_color_distribution: list of dicts describing the build_histogram
width, height: integers - width and height of the histogram FigureWidget
left, top, right, bottom: ints; number of pixels around the FigureWidget
bgcolor: rgb of hex color code for the figure background color
y_gridcolor: rgb of hex color code for the yaxis y_gridcolor
Returns
-------
FigureWidget object representing the histogram of the graph nodes
"""
text = [
"{perc}%".format(**locals())
for perc in [d["perc"] for d in node_color_distribution]
]
pl_hist = go.Bar(
y=[d["height"] for d in node_color_distribution],
marker=dict(color=[d["color"] for d in node_color_distribution]),
text=text,
hoverinfo="y+text",
)
hist_layout = dict(
title=title,
width=width,
height=height,
font=dict(size=12),
xaxis=dict(showline=True, zeroline=False, showgrid=False, showticklabels=False),
yaxis=dict(showline=False, gridcolor=y_gridcolor, tickfont=dict(size=10)),
bargap=0.01,
margin=dict(l=left, r=right, b=bottom, t=top),
hovermode="x",
plot_bgcolor=bgcolor,
)
return go.FigureWidget(data=[pl_hist], layout=hist_layout)
def summary_fig(
mapper_summary,
width=600,
height=500,
top=60,
left=20,
bottom=60,
right=20,
bgcolor="rgb(240,240,240)",
):
"""Define a dummy figure that displays info on the algorithms and
sklearn class instances or methods used
Returns a FigureWidget object representing the figure
"""
text = _text_mapper_summary(mapper_summary)
data = [
dict(
type="scatter",
x=[0, width],
y=[height, 0],
mode="text",
text=[text, ""],
textposition="bottom right",
hoverinfo="none",
)
]
layout = dict(
title="Algorithms and scikit-learn objects/methods",
width=width,
height=height,
font=dict(size=12),
xaxis=dict(visible=False),
yaxis=dict(visible=False, range=[0, height + 5]),
margin=dict(t=top, b=bottom, l=left, r=right),
plot_bgcolor=bgcolor,
)
return go.FigureWidget(data=data, layout=layout)
def hovering_widgets(
kmgraph,
graph_fw,
ctooltips=False,
width=400,
height=300,
top=100,
left=50,
bgcolor="rgb(240,240,240)",
y_gridcolor="white",
member_textbox_width=200,
):
"""Defines the widgets that display the distribution of each node on hover
and the members of each nodes
Parameters
----------
kmgraph: the kepler-mapper graph dict returned by `get_mapper_graph()``
graph_fw: the FigureWidget representing the graph
ctooltips: boolean; if True/False the node["custom_tooltips"]/"member_ids"
are passed to member_textbox
width, height, top refer to the figure
size and position of the hovered node distribution
Returns
-------
a box containing the graph figure, the figure of the hovered node
distribution, and the textboxes displaying the cluster size and member_ids
or custom tooltips for hovered node members
"""
fnode = kmgraph["nodes"][0]
fwc = node_hist_fig(
fnode["distribution"],
title="Cluster Member Distribution",
width=width,
height=height,
top=top,
left=left,
bgcolor=bgcolor,
y_gridcolor=y_gridcolor,
)
clust_textbox = ipw.Text(
value="{:d}".format(fnode["cluster"]["size"]),
description="Cluster size:",
disabled=False,
continuous_update=True,
)
clust_textbox.layout = dict(margin="10px 10px 10px 10px", width="200px")
member_textbox = ipw.Textarea(
value=", ".join(str(x) for x in fnode["member_ids"])
if not ctooltips
else ", ".join(str(x) for x in fnode["custom_tooltips"]),
description="Members:",
disabled=False,
continuous_update=True,
)
member_textbox.layout = dict(
margin="5px 5px 5px 10px", width=str(member_textbox_width) + "px"
)
def do_on_hover(trace, points, state):
if not points.point_inds:
return
ind = points.point_inds[0] # get the index of the hovered node
node = kmgraph["nodes"][ind]
# on hover do:
with fwc.batch_update(): # update data in the cluster member histogr
fwc.data[0].text = [
"{:.1f}%".format(d["perc"]) for d in node["distribution"]
]
fwc.data[0].y = [d["height"] for d in node["distribution"]]
fwc.data[0].marker.color = [d["color"] for d in node["distribution"]]
clust_textbox.value = "{:d}".format(node["cluster"]["size"])
member_textbox.value = (
", ".join(str(x) for x in node["member_ids"])
if not ctooltips
else ", ".join(str(x) for x in node["custom_tooltips"])
)
trace = graph_fw.data[1]
trace.on_hover(do_on_hover)
return ipw.VBox([ipw.HBox([graph_fw, fwc]), clust_textbox, member_textbox])
def _get_plotly_data(E, coords):
# E : the list of tuples representing the graph edges
# coords: list of node coordinates assigned by igraph.Layout
N = len(coords)
Xnodes = [coords[k][0] for k in range(N)] # x-coordinates of nodes
Ynodes = [coords[k][1] for k in range(N)] # y-coordnates of nodes
Xedges = []
Yedges = []
for e in E:
Xedges.extend([coords[e[0]][0], coords[e[1]][0], None])
Yedges.extend([coords[e[0]][1], coords[e[1]][1], None])
return Xnodes, Ynodes, Xedges, Yedges
def _text_mapper_summary(mapper_summary):
d = mapper_summary["custom_meta"]
text = "<br><b>Projection: </b>" + d["projection"]
text += (
"<br><b>Clusterer: </b>" + d["clusterer"] + "<br><b>Scaler: </b>" + d["scaler"]
)
if "color_function" in d.keys():
text += "<br><b>Color function: </b>" + d["color_function"]
return text
def _hover_format(member_ids, custom_tooltips, X, X_names, lens, lens_names):
cluster_data = _format_cluster_statistics(member_ids, X, X_names)
tooltip = ""
custom_tooltips = (
custom_tooltips[member_ids] if custom_tooltips is not None else member_ids
)
val_size = cluster_data["size"]
tooltip += "{val_size}".format(**locals())
return tooltip
|
scikit-tda/kepler-mapper | kmapper/plotlyviz.py | node_hist_fig | python | def node_hist_fig(
node_color_distribution,
title="Graph Node Distribution",
width=400,
height=300,
top=60,
left=25,
bottom=60,
right=25,
bgcolor="rgb(240,240,240)",
y_gridcolor="white",
):
"""Define the plotly plot representing the node histogram
Parameters
----------
node_color_distribution: list of dicts describing the build_histogram
width, height: integers - width and height of the histogram FigureWidget
left, top, right, bottom: ints; number of pixels around the FigureWidget
bgcolor: rgb of hex color code for the figure background color
y_gridcolor: rgb of hex color code for the yaxis y_gridcolor
Returns
-------
FigureWidget object representing the histogram of the graph nodes
"""
text = [
"{perc}%".format(**locals())
for perc in [d["perc"] for d in node_color_distribution]
]
pl_hist = go.Bar(
y=[d["height"] for d in node_color_distribution],
marker=dict(color=[d["color"] for d in node_color_distribution]),
text=text,
hoverinfo="y+text",
)
hist_layout = dict(
title=title,
width=width,
height=height,
font=dict(size=12),
xaxis=dict(showline=True, zeroline=False, showgrid=False, showticklabels=False),
yaxis=dict(showline=False, gridcolor=y_gridcolor, tickfont=dict(size=10)),
bargap=0.01,
margin=dict(l=left, r=right, b=bottom, t=top),
hovermode="x",
plot_bgcolor=bgcolor,
)
return go.FigureWidget(data=[pl_hist], layout=hist_layout) | Define the plotly plot representing the node histogram
Parameters
----------
node_color_distribution: list of dicts describing the build_histogram
width, height: integers - width and height of the histogram FigureWidget
left, top, right, bottom: ints; number of pixels around the FigureWidget
bgcolor: rgb of hex color code for the figure background color
y_gridcolor: rgb of hex color code for the yaxis y_gridcolor
Returns
-------
FigureWidget object representing the histogram of the graph nodes | train | https://github.com/scikit-tda/kepler-mapper/blob/d4ed39f6392b0a134dd573d7d9c4aa65fbef3a7d/kmapper/plotlyviz.py#L488-L540 | null | from __future__ import division
import numpy as np
from .visuals import (
init_color_function,
_size_node,
_format_projection_statistics,
_format_cluster_statistics,
_color_function,
format_meta,
_to_html_format,
_map_val2color,
graph_data_distribution,
build_histogram,
_tooltip_components,
)
try:
import igraph as ig
import plotly.graph_objs as go
import ipywidgets as ipw
import plotly.io as pio
except ImportError:
print(
"""To use the plotly visualization tools, you must have the packages python-igraph, plotly, and ipywidgets installed in your environment."""
""" It looks like at least one of these is missing. Please install again with"""
"""\n\n\t`pip install python-igraph plotly ipywidgets`\n\nand try again"""
)
raise
default_colorscale = [
[0.0, "rgb(68, 1, 84)"], # Viridis
[0.1, "rgb(72, 35, 116)"],
[0.2, "rgb(64, 67, 135)"],
[0.3, "rgb(52, 94, 141)"],
[0.4, "rgb(41, 120, 142)"],
[0.5, "rgb(32, 144, 140)"],
[0.6, "rgb(34, 167, 132)"],
[0.7, "rgb(68, 190, 112)"],
[0.8, "rgb(121, 209, 81)"],
[0.9, "rgb(189, 222, 38)"],
[1.0, "rgb(253, 231, 36)"],
]
def mpl_to_plotly(cmap, n_entries):
h = 1.0 / (n_entries - 1)
pl_colorscale = []
for k in range(n_entries):
C = list(map(np.uint8, np.array(cmap(k * h)[:3]) * 255))
pl_colorscale.append(
[round(k * h, 2), "rgb" + str((C[0], C[1], C[2]))]
) # Python 2.7+
# pl_colorscale.append([round(k*h, 2), f'rgb({C[0]}, {C[1]}, {C[2]})']) # Python 3.6+
return pl_colorscale
def plotlyviz(
scomplex,
colorscale=None,
title="Kepler Mapper",
graph_layout="kk",
color_function=None,
color_function_name=None,
dashboard=False,
graph_data=False,
factor_size=3,
edge_linewidth=1.5,
node_linecolor="rgb(200,200,200)",
width=600,
height=500,
bgcolor="rgba(240, 240, 240, 0.95)",
left=10,
bottom=35,
summary_height=300,
summary_width=600,
summary_left=20,
summary_right=20,
hist_left=25,
hist_right=25,
member_textbox_width=800,
filename=None,
):
"""
Visualizations and dashboards for kmapper graphs using Plotly. This method is suitable for use in Jupyter notebooks.
The generated FigureWidget can be updated (by performing a restyle or relayout). For example, let us add a title
to the colorbar (the name of the color function, if any),
and set the title font size. To perform these updates faster, Plotly 3.+ provides a context manager that batches up all data and layout updates:
To display more info on the generated kmapper-graph, define two more FigureWidget(s):
the global node distribution figure, and a dummy figure
that displays info on the algorithms involved in getting the graph from data, as well as sklearn class instances.
A FigureWidget has event listeners for hovering, clicking or selecting. Using the first one for `fw_graph`
we define, via the function `hovering_widgets()`, widgets that display the node distribution, when the node is hovered over, and two textboxes for the cluster size and the member ids/labels of the hovered node members.
Parameters
-----------
scomplex: dict
Simplicial complex is the output from the KeplerMapper `map` method.
title: str
Title of output graphic
graph_layout: igraph layout;
recommended 'kk' (kamada-kawai) or 'fr' (fruchterman-reingold)
colorscale:
Plotly colorscale(colormap) to color graph nodes
dashboard: bool, default is False
If true, display complete dashboard of node information
graph_data: bool, default is False
If true, display graph metadata
factor_size: double, default is 3
a factor for the node size
edge_linewidth : double, default is 1.5
node_linecolor: color str, default is "rgb(200,200,200)"
width: int, default is 600,
height: int, default is 500,
bgcolor: color str, default is "rgba(240, 240, 240, 0.95)",
left: int, default is 10,
bottom: int, default is 35,
summary_height: int, default is 300,
summary_width: int, default is 600,
summary_left: int, default is 20,
summary_right: int, default is 20,
hist_left: int, default is 25,
hist_right: int, default is 25,
member_textbox_width: int, default is 800,
filename: str, default is None
if filename is given, the graphic will be saved to that file.
Returns
---------
result: plotly.FigureWidget
A FigureWidget that can be shown or editted. See the Plotly Demo notebook for examples of use.
"""
if not colorscale:
colorscale = default_colorscale
kmgraph, mapper_summary, n_color_distribution = get_mapper_graph(
scomplex,
colorscale=colorscale,
color_function=color_function,
color_function_name=color_function_name,
)
annotation = get_kmgraph_meta(mapper_summary)
plgraph_data = plotly_graph(
kmgraph,
graph_layout=graph_layout,
colorscale=colorscale,
factor_size=factor_size,
edge_linewidth=edge_linewidth,
node_linecolor=node_linecolor,
)
layout = plot_layout(
title=title,
width=width,
height=height,
annotation_text=annotation,
bgcolor=bgcolor,
left=left,
bottom=bottom,
)
result = go.FigureWidget(data=plgraph_data, layout=layout)
if color_function_name:
with result.batch_update():
result.data[1].marker.colorbar.title = color_function_name
result.data[1].marker.colorbar.titlefont.size = 10
if dashboard or graph_data:
fw_hist = node_hist_fig(n_color_distribution, left=hist_left, right=hist_right)
fw_summary = summary_fig(
mapper_summary,
width=summary_width,
height=summary_height,
left=summary_left,
right=summary_right,
)
fw_graph = result
result = hovering_widgets(
kmgraph, fw_graph, member_textbox_width=member_textbox_width
)
if graph_data:
result = ipw.VBox([fw_graph, ipw.HBox([fw_summary, fw_hist])])
if filename:
pio.write_image(result, filename)
return result
def scomplex_to_graph(
simplicial_complex,
color_function,
X,
X_names,
lens,
lens_names,
custom_tooltips,
colorscale,
):
json_dict = {"nodes": [], "links": []}
node_id_to_num = {}
for i, (node_id, member_ids) in enumerate(simplicial_complex["nodes"].items()):
node_id_to_num[node_id] = i
projection_stats, cluster_stats, member_histogram = _tooltip_components(
member_ids, X, X_names, lens, lens_names, color_function, i, colorscale
)
n = {
"id": i,
"name": node_id,
"member_ids": member_ids,
"color": _color_function(member_ids, color_function),
"size": _size_node(member_ids),
"cluster": cluster_stats,
"distribution": member_histogram,
"projection": projection_stats,
"custom_tooltips": custom_tooltips,
}
json_dict["nodes"].append(n)
for i, (node_id, linked_node_ids) in enumerate(simplicial_complex["links"].items()):
for linked_node_id in linked_node_ids:
lnk = {
"source": node_id_to_num[node_id],
"target": node_id_to_num[linked_node_id],
}
json_dict["links"].append(lnk)
return json_dict
def get_mapper_graph(
simplicial_complex,
color_function=None,
color_function_name=None,
colorscale=None,
custom_tooltips=None,
custom_meta=None,
X=None,
X_names=None,
lens=None,
lens_names=None,
):
"""Generate data for mapper graph visualization and annotation.
Parameters
----------
simplicial_complex : dict
Simplicial complex is the output from the KeplerMapper `map` method.
Returns
-------
the graph dictionary in a json representation, the mapper summary
and the node_distribution
Example
-------
>>> kmgraph, mapper_summary, n_distribution = get_mapper_graph(simplicial_complex)
"""
if not colorscale:
colorscale = default_colorscale
if not len(simplicial_complex["nodes"]) > 0:
raise Exception(
"A mapper graph should have more than 0 nodes. This might be because your clustering algorithm might be too sensitive and be classifying all points as noise."
)
color_function = init_color_function(simplicial_complex, color_function)
if X_names is None:
X_names = []
if lens_names is None:
lens_names = []
json_graph = scomplex_to_graph(
simplicial_complex,
color_function,
X,
X_names,
lens,
lens_names,
custom_tooltips,
colorscale=colorscale,
)
colorf_distribution = graph_data_distribution(
simplicial_complex, color_function, colorscale
)
mapper_summary = format_meta(
simplicial_complex,
color_function_name=color_function_name,
custom_meta=custom_meta,
)
return json_graph, mapper_summary, colorf_distribution
def plotly_graph(
kmgraph,
graph_layout="kk",
colorscale=None,
showscale=True,
factor_size=3,
edge_linecolor="rgb(180,180,180)",
edge_linewidth=1.5,
node_linecolor="rgb(255,255,255)",
node_linewidth=1.0,
):
"""Generate Plotly data structures that represent the mapper graph
Parameters
----------
kmgraph: dict representing the mapper graph,
returned by the function get_mapper_graph()
graph_layout: igraph layout; recommended 'kk' (kamada-kawai)
or 'fr' (fruchterman-reingold)
colorscale: a Plotly colorscale(colormap) to color graph nodes
showscale: boolean to display or not the colorbar
factor_size: a factor for the node size
Returns
-------
The plotly traces (dicts) representing the graph edges and nodes
"""
if not colorscale:
colorscale = default_colorscale
# define an igraph.Graph instance of n_nodes
n_nodes = len(kmgraph["nodes"])
if n_nodes == 0:
raise ValueError("Your graph has 0 nodes")
G = ig.Graph(n=n_nodes)
links = [(e["source"], e["target"]) for e in kmgraph["links"]]
G.add_edges(links)
layt = G.layout(graph_layout)
hover_text = [node["name"] for node in kmgraph["nodes"]]
color_vals = [node["color"] for node in kmgraph["nodes"]]
node_size = np.array(
[factor_size * node["size"] for node in kmgraph["nodes"]], dtype=np.int
)
Xn, Yn, Xe, Ye = _get_plotly_data(links, layt)
edge_trace = dict(
type="scatter",
x=Xe,
y=Ye,
mode="lines",
line=dict(color=edge_linecolor, width=edge_linewidth),
hoverinfo="none",
)
node_trace = dict(
type="scatter",
x=Xn,
y=Yn,
mode="markers",
marker=dict(
size=node_size.tolist(),
color=color_vals,
opacity=1.0,
colorscale=colorscale,
showscale=showscale,
line=dict(color=node_linecolor, width=node_linewidth),
colorbar=dict(thickness=20, ticklen=4, x=1.01, tickfont=dict(size=10)),
),
text=hover_text,
hoverinfo="text",
)
return [edge_trace, node_trace]
def get_kmgraph_meta(mapper_summary):
""" Extract info from mapper summary to be displayed below the graph plot
"""
d = mapper_summary["custom_meta"]
meta = (
"<b>N_cubes:</b> "
+ str(d["n_cubes"])
+ " <b>Perc_overlap:</b> "
+ str(d["perc_overlap"])
)
meta += (
"<br><b>Nodes:</b> "
+ str(mapper_summary["n_nodes"])
+ " <b>Edges:</b> "
+ str(mapper_summary["n_edges"])
+ " <b>Total samples:</b> "
+ str(mapper_summary["n_total"])
+ " <b>Unique_samples:</b> "
+ str(mapper_summary["n_unique"])
)
return meta
def plot_layout(
title="TDA KMapper",
width=600,
height=600,
bgcolor="rgba(255, 255, 255, 1)",
annotation_text=None,
annotation_x=0,
annotation_y=-0.01,
top=100,
left=60,
right=60,
bottom=60,
):
"""Set the plotly layout
Parameters
----------
width, height: integers
setting width and height of plot window
bgcolor: string,
rgba or hex color code for the background color
annotation_text: string
meta data to be displayed
annotation_x & annotation_y:
The coordinates of the point where we insert the annotation; the negative sign for y coord points output that annotation is inserted below the plot
"""
pl_layout = dict(
title=title,
font=dict(size=12),
showlegend=False,
autosize=False,
width=width,
height=height,
xaxis=dict(visible=False),
yaxis=dict(visible=False),
hovermode="closest",
plot_bgcolor=bgcolor,
margin=dict(t=top, b=bottom, l=left, r=right),
)
if annotation_text is None:
return pl_layout
else:
annotations = [
dict(
showarrow=False,
text=annotation_text,
xref="paper",
yref="paper",
x=annotation_x,
y=annotation_y,
align="left",
xanchor="left",
yanchor="top",
font=dict(size=12),
)
]
pl_layout.update(annotations=annotations)
return pl_layout
def node_hist_fig(
node_color_distribution,
title="Graph Node Distribution",
width=400,
height=300,
top=60,
left=25,
bottom=60,
right=25,
bgcolor="rgb(240,240,240)",
y_gridcolor="white",
):
"""Define the plotly plot representing the node histogram
Parameters
----------
node_color_distribution: list of dicts describing the build_histogram
width, height: integers - width and height of the histogram FigureWidget
left, top, right, bottom: ints; number of pixels around the FigureWidget
bgcolor: rgb of hex color code for the figure background color
y_gridcolor: rgb of hex color code for the yaxis y_gridcolor
Returns
-------
FigureWidget object representing the histogram of the graph nodes
"""
text = [
"{perc}%".format(**locals())
for perc in [d["perc"] for d in node_color_distribution]
]
pl_hist = go.Bar(
y=[d["height"] for d in node_color_distribution],
marker=dict(color=[d["color"] for d in node_color_distribution]),
text=text,
hoverinfo="y+text",
)
hist_layout = dict(
title=title,
width=width,
height=height,
font=dict(size=12),
xaxis=dict(showline=True, zeroline=False, showgrid=False, showticklabels=False),
yaxis=dict(showline=False, gridcolor=y_gridcolor, tickfont=dict(size=10)),
bargap=0.01,
margin=dict(l=left, r=right, b=bottom, t=top),
hovermode="x",
plot_bgcolor=bgcolor,
)
return go.FigureWidget(data=[pl_hist], layout=hist_layout)
def summary_fig(
mapper_summary,
width=600,
height=500,
top=60,
left=20,
bottom=60,
right=20,
bgcolor="rgb(240,240,240)",
):
"""Define a dummy figure that displays info on the algorithms and
sklearn class instances or methods used
Returns a FigureWidget object representing the figure
"""
text = _text_mapper_summary(mapper_summary)
data = [
dict(
type="scatter",
x=[0, width],
y=[height, 0],
mode="text",
text=[text, ""],
textposition="bottom right",
hoverinfo="none",
)
]
layout = dict(
title="Algorithms and scikit-learn objects/methods",
width=width,
height=height,
font=dict(size=12),
xaxis=dict(visible=False),
yaxis=dict(visible=False, range=[0, height + 5]),
margin=dict(t=top, b=bottom, l=left, r=right),
plot_bgcolor=bgcolor,
)
return go.FigureWidget(data=data, layout=layout)
def hovering_widgets(
kmgraph,
graph_fw,
ctooltips=False,
width=400,
height=300,
top=100,
left=50,
bgcolor="rgb(240,240,240)",
y_gridcolor="white",
member_textbox_width=200,
):
"""Defines the widgets that display the distribution of each node on hover
and the members of each nodes
Parameters
----------
kmgraph: the kepler-mapper graph dict returned by `get_mapper_graph()``
graph_fw: the FigureWidget representing the graph
ctooltips: boolean; if True/False the node["custom_tooltips"]/"member_ids"
are passed to member_textbox
width, height, top refer to the figure
size and position of the hovered node distribution
Returns
-------
a box containing the graph figure, the figure of the hovered node
distribution, and the textboxes displaying the cluster size and member_ids
or custom tooltips for hovered node members
"""
fnode = kmgraph["nodes"][0]
fwc = node_hist_fig(
fnode["distribution"],
title="Cluster Member Distribution",
width=width,
height=height,
top=top,
left=left,
bgcolor=bgcolor,
y_gridcolor=y_gridcolor,
)
clust_textbox = ipw.Text(
value="{:d}".format(fnode["cluster"]["size"]),
description="Cluster size:",
disabled=False,
continuous_update=True,
)
clust_textbox.layout = dict(margin="10px 10px 10px 10px", width="200px")
member_textbox = ipw.Textarea(
value=", ".join(str(x) for x in fnode["member_ids"])
if not ctooltips
else ", ".join(str(x) for x in fnode["custom_tooltips"]),
description="Members:",
disabled=False,
continuous_update=True,
)
member_textbox.layout = dict(
margin="5px 5px 5px 10px", width=str(member_textbox_width) + "px"
)
def do_on_hover(trace, points, state):
if not points.point_inds:
return
ind = points.point_inds[0] # get the index of the hovered node
node = kmgraph["nodes"][ind]
# on hover do:
with fwc.batch_update(): # update data in the cluster member histogr
fwc.data[0].text = [
"{:.1f}%".format(d["perc"]) for d in node["distribution"]
]
fwc.data[0].y = [d["height"] for d in node["distribution"]]
fwc.data[0].marker.color = [d["color"] for d in node["distribution"]]
clust_textbox.value = "{:d}".format(node["cluster"]["size"])
member_textbox.value = (
", ".join(str(x) for x in node["member_ids"])
if not ctooltips
else ", ".join(str(x) for x in node["custom_tooltips"])
)
trace = graph_fw.data[1]
trace.on_hover(do_on_hover)
return ipw.VBox([ipw.HBox([graph_fw, fwc]), clust_textbox, member_textbox])
def _get_plotly_data(E, coords):
# E : the list of tuples representing the graph edges
# coords: list of node coordinates assigned by igraph.Layout
N = len(coords)
Xnodes = [coords[k][0] for k in range(N)] # x-coordinates of nodes
Ynodes = [coords[k][1] for k in range(N)] # y-coordnates of nodes
Xedges = []
Yedges = []
for e in E:
Xedges.extend([coords[e[0]][0], coords[e[1]][0], None])
Yedges.extend([coords[e[0]][1], coords[e[1]][1], None])
return Xnodes, Ynodes, Xedges, Yedges
def _text_mapper_summary(mapper_summary):
d = mapper_summary["custom_meta"]
text = "<br><b>Projection: </b>" + d["projection"]
text += (
"<br><b>Clusterer: </b>" + d["clusterer"] + "<br><b>Scaler: </b>" + d["scaler"]
)
if "color_function" in d.keys():
text += "<br><b>Color function: </b>" + d["color_function"]
return text
def _hover_format(member_ids, custom_tooltips, X, X_names, lens, lens_names):
cluster_data = _format_cluster_statistics(member_ids, X, X_names)
tooltip = ""
custom_tooltips = (
custom_tooltips[member_ids] if custom_tooltips is not None else member_ids
)
val_size = cluster_data["size"]
tooltip += "{val_size}".format(**locals())
return tooltip
|
scikit-tda/kepler-mapper | kmapper/plotlyviz.py | summary_fig | python | def summary_fig(
mapper_summary,
width=600,
height=500,
top=60,
left=20,
bottom=60,
right=20,
bgcolor="rgb(240,240,240)",
):
"""Define a dummy figure that displays info on the algorithms and
sklearn class instances or methods used
Returns a FigureWidget object representing the figure
"""
text = _text_mapper_summary(mapper_summary)
data = [
dict(
type="scatter",
x=[0, width],
y=[height, 0],
mode="text",
text=[text, ""],
textposition="bottom right",
hoverinfo="none",
)
]
layout = dict(
title="Algorithms and scikit-learn objects/methods",
width=width,
height=height,
font=dict(size=12),
xaxis=dict(visible=False),
yaxis=dict(visible=False, range=[0, height + 5]),
margin=dict(t=top, b=bottom, l=left, r=right),
plot_bgcolor=bgcolor,
)
return go.FigureWidget(data=data, layout=layout) | Define a dummy figure that displays info on the algorithms and
sklearn class instances or methods used
Returns a FigureWidget object representing the figure | train | https://github.com/scikit-tda/kepler-mapper/blob/d4ed39f6392b0a134dd573d7d9c4aa65fbef3a7d/kmapper/plotlyviz.py#L543-L583 | [
"def _text_mapper_summary(mapper_summary):\n\n d = mapper_summary[\"custom_meta\"]\n text = \"<br><b>Projection: </b>\" + d[\"projection\"]\n text += (\n \"<br><b>Clusterer: </b>\" + d[\"clusterer\"] + \"<br><b>Scaler: </b>\" + d[\"scaler\"]\n )\n if \"color_function\" in d.keys():\n te... | from __future__ import division
import numpy as np
from .visuals import (
init_color_function,
_size_node,
_format_projection_statistics,
_format_cluster_statistics,
_color_function,
format_meta,
_to_html_format,
_map_val2color,
graph_data_distribution,
build_histogram,
_tooltip_components,
)
try:
import igraph as ig
import plotly.graph_objs as go
import ipywidgets as ipw
import plotly.io as pio
except ImportError:
print(
"""To use the plotly visualization tools, you must have the packages python-igraph, plotly, and ipywidgets installed in your environment."""
""" It looks like at least one of these is missing. Please install again with"""
"""\n\n\t`pip install python-igraph plotly ipywidgets`\n\nand try again"""
)
raise
default_colorscale = [
[0.0, "rgb(68, 1, 84)"], # Viridis
[0.1, "rgb(72, 35, 116)"],
[0.2, "rgb(64, 67, 135)"],
[0.3, "rgb(52, 94, 141)"],
[0.4, "rgb(41, 120, 142)"],
[0.5, "rgb(32, 144, 140)"],
[0.6, "rgb(34, 167, 132)"],
[0.7, "rgb(68, 190, 112)"],
[0.8, "rgb(121, 209, 81)"],
[0.9, "rgb(189, 222, 38)"],
[1.0, "rgb(253, 231, 36)"],
]
def mpl_to_plotly(cmap, n_entries):
h = 1.0 / (n_entries - 1)
pl_colorscale = []
for k in range(n_entries):
C = list(map(np.uint8, np.array(cmap(k * h)[:3]) * 255))
pl_colorscale.append(
[round(k * h, 2), "rgb" + str((C[0], C[1], C[2]))]
) # Python 2.7+
# pl_colorscale.append([round(k*h, 2), f'rgb({C[0]}, {C[1]}, {C[2]})']) # Python 3.6+
return pl_colorscale
def plotlyviz(
scomplex,
colorscale=None,
title="Kepler Mapper",
graph_layout="kk",
color_function=None,
color_function_name=None,
dashboard=False,
graph_data=False,
factor_size=3,
edge_linewidth=1.5,
node_linecolor="rgb(200,200,200)",
width=600,
height=500,
bgcolor="rgba(240, 240, 240, 0.95)",
left=10,
bottom=35,
summary_height=300,
summary_width=600,
summary_left=20,
summary_right=20,
hist_left=25,
hist_right=25,
member_textbox_width=800,
filename=None,
):
"""
Visualizations and dashboards for kmapper graphs using Plotly. This method is suitable for use in Jupyter notebooks.
The generated FigureWidget can be updated (by performing a restyle or relayout). For example, let us add a title
to the colorbar (the name of the color function, if any),
and set the title font size. To perform these updates faster, Plotly 3.+ provides a context manager that batches up all data and layout updates:
To display more info on the generated kmapper-graph, define two more FigureWidget(s):
the global node distribution figure, and a dummy figure
that displays info on the algorithms involved in getting the graph from data, as well as sklearn class instances.
A FigureWidget has event listeners for hovering, clicking or selecting. Using the first one for `fw_graph`
we define, via the function `hovering_widgets()`, widgets that display the node distribution, when the node is hovered over, and two textboxes for the cluster size and the member ids/labels of the hovered node members.
Parameters
-----------
scomplex: dict
Simplicial complex is the output from the KeplerMapper `map` method.
title: str
Title of output graphic
graph_layout: igraph layout;
recommended 'kk' (kamada-kawai) or 'fr' (fruchterman-reingold)
colorscale:
Plotly colorscale(colormap) to color graph nodes
dashboard: bool, default is False
If true, display complete dashboard of node information
graph_data: bool, default is False
If true, display graph metadata
factor_size: double, default is 3
a factor for the node size
edge_linewidth : double, default is 1.5
node_linecolor: color str, default is "rgb(200,200,200)"
width: int, default is 600,
height: int, default is 500,
bgcolor: color str, default is "rgba(240, 240, 240, 0.95)",
left: int, default is 10,
bottom: int, default is 35,
summary_height: int, default is 300,
summary_width: int, default is 600,
summary_left: int, default is 20,
summary_right: int, default is 20,
hist_left: int, default is 25,
hist_right: int, default is 25,
member_textbox_width: int, default is 800,
filename: str, default is None
if filename is given, the graphic will be saved to that file.
Returns
---------
result: plotly.FigureWidget
A FigureWidget that can be shown or editted. See the Plotly Demo notebook for examples of use.
"""
if not colorscale:
colorscale = default_colorscale
kmgraph, mapper_summary, n_color_distribution = get_mapper_graph(
scomplex,
colorscale=colorscale,
color_function=color_function,
color_function_name=color_function_name,
)
annotation = get_kmgraph_meta(mapper_summary)
plgraph_data = plotly_graph(
kmgraph,
graph_layout=graph_layout,
colorscale=colorscale,
factor_size=factor_size,
edge_linewidth=edge_linewidth,
node_linecolor=node_linecolor,
)
layout = plot_layout(
title=title,
width=width,
height=height,
annotation_text=annotation,
bgcolor=bgcolor,
left=left,
bottom=bottom,
)
result = go.FigureWidget(data=plgraph_data, layout=layout)
if color_function_name:
with result.batch_update():
result.data[1].marker.colorbar.title = color_function_name
result.data[1].marker.colorbar.titlefont.size = 10
if dashboard or graph_data:
fw_hist = node_hist_fig(n_color_distribution, left=hist_left, right=hist_right)
fw_summary = summary_fig(
mapper_summary,
width=summary_width,
height=summary_height,
left=summary_left,
right=summary_right,
)
fw_graph = result
result = hovering_widgets(
kmgraph, fw_graph, member_textbox_width=member_textbox_width
)
if graph_data:
result = ipw.VBox([fw_graph, ipw.HBox([fw_summary, fw_hist])])
if filename:
pio.write_image(result, filename)
return result
def scomplex_to_graph(
simplicial_complex,
color_function,
X,
X_names,
lens,
lens_names,
custom_tooltips,
colorscale,
):
json_dict = {"nodes": [], "links": []}
node_id_to_num = {}
for i, (node_id, member_ids) in enumerate(simplicial_complex["nodes"].items()):
node_id_to_num[node_id] = i
projection_stats, cluster_stats, member_histogram = _tooltip_components(
member_ids, X, X_names, lens, lens_names, color_function, i, colorscale
)
n = {
"id": i,
"name": node_id,
"member_ids": member_ids,
"color": _color_function(member_ids, color_function),
"size": _size_node(member_ids),
"cluster": cluster_stats,
"distribution": member_histogram,
"projection": projection_stats,
"custom_tooltips": custom_tooltips,
}
json_dict["nodes"].append(n)
for i, (node_id, linked_node_ids) in enumerate(simplicial_complex["links"].items()):
for linked_node_id in linked_node_ids:
lnk = {
"source": node_id_to_num[node_id],
"target": node_id_to_num[linked_node_id],
}
json_dict["links"].append(lnk)
return json_dict
def get_mapper_graph(
simplicial_complex,
color_function=None,
color_function_name=None,
colorscale=None,
custom_tooltips=None,
custom_meta=None,
X=None,
X_names=None,
lens=None,
lens_names=None,
):
"""Generate data for mapper graph visualization and annotation.
Parameters
----------
simplicial_complex : dict
Simplicial complex is the output from the KeplerMapper `map` method.
Returns
-------
the graph dictionary in a json representation, the mapper summary
and the node_distribution
Example
-------
>>> kmgraph, mapper_summary, n_distribution = get_mapper_graph(simplicial_complex)
"""
if not colorscale:
colorscale = default_colorscale
if not len(simplicial_complex["nodes"]) > 0:
raise Exception(
"A mapper graph should have more than 0 nodes. This might be because your clustering algorithm might be too sensitive and be classifying all points as noise."
)
color_function = init_color_function(simplicial_complex, color_function)
if X_names is None:
X_names = []
if lens_names is None:
lens_names = []
json_graph = scomplex_to_graph(
simplicial_complex,
color_function,
X,
X_names,
lens,
lens_names,
custom_tooltips,
colorscale=colorscale,
)
colorf_distribution = graph_data_distribution(
simplicial_complex, color_function, colorscale
)
mapper_summary = format_meta(
simplicial_complex,
color_function_name=color_function_name,
custom_meta=custom_meta,
)
return json_graph, mapper_summary, colorf_distribution
def plotly_graph(
kmgraph,
graph_layout="kk",
colorscale=None,
showscale=True,
factor_size=3,
edge_linecolor="rgb(180,180,180)",
edge_linewidth=1.5,
node_linecolor="rgb(255,255,255)",
node_linewidth=1.0,
):
"""Generate Plotly data structures that represent the mapper graph
Parameters
----------
kmgraph: dict representing the mapper graph,
returned by the function get_mapper_graph()
graph_layout: igraph layout; recommended 'kk' (kamada-kawai)
or 'fr' (fruchterman-reingold)
colorscale: a Plotly colorscale(colormap) to color graph nodes
showscale: boolean to display or not the colorbar
factor_size: a factor for the node size
Returns
-------
The plotly traces (dicts) representing the graph edges and nodes
"""
if not colorscale:
colorscale = default_colorscale
# define an igraph.Graph instance of n_nodes
n_nodes = len(kmgraph["nodes"])
if n_nodes == 0:
raise ValueError("Your graph has 0 nodes")
G = ig.Graph(n=n_nodes)
links = [(e["source"], e["target"]) for e in kmgraph["links"]]
G.add_edges(links)
layt = G.layout(graph_layout)
hover_text = [node["name"] for node in kmgraph["nodes"]]
color_vals = [node["color"] for node in kmgraph["nodes"]]
node_size = np.array(
[factor_size * node["size"] for node in kmgraph["nodes"]], dtype=np.int
)
Xn, Yn, Xe, Ye = _get_plotly_data(links, layt)
edge_trace = dict(
type="scatter",
x=Xe,
y=Ye,
mode="lines",
line=dict(color=edge_linecolor, width=edge_linewidth),
hoverinfo="none",
)
node_trace = dict(
type="scatter",
x=Xn,
y=Yn,
mode="markers",
marker=dict(
size=node_size.tolist(),
color=color_vals,
opacity=1.0,
colorscale=colorscale,
showscale=showscale,
line=dict(color=node_linecolor, width=node_linewidth),
colorbar=dict(thickness=20, ticklen=4, x=1.01, tickfont=dict(size=10)),
),
text=hover_text,
hoverinfo="text",
)
return [edge_trace, node_trace]
def get_kmgraph_meta(mapper_summary):
""" Extract info from mapper summary to be displayed below the graph plot
"""
d = mapper_summary["custom_meta"]
meta = (
"<b>N_cubes:</b> "
+ str(d["n_cubes"])
+ " <b>Perc_overlap:</b> "
+ str(d["perc_overlap"])
)
meta += (
"<br><b>Nodes:</b> "
+ str(mapper_summary["n_nodes"])
+ " <b>Edges:</b> "
+ str(mapper_summary["n_edges"])
+ " <b>Total samples:</b> "
+ str(mapper_summary["n_total"])
+ " <b>Unique_samples:</b> "
+ str(mapper_summary["n_unique"])
)
return meta
def plot_layout(
title="TDA KMapper",
width=600,
height=600,
bgcolor="rgba(255, 255, 255, 1)",
annotation_text=None,
annotation_x=0,
annotation_y=-0.01,
top=100,
left=60,
right=60,
bottom=60,
):
"""Set the plotly layout
Parameters
----------
width, height: integers
setting width and height of plot window
bgcolor: string,
rgba or hex color code for the background color
annotation_text: string
meta data to be displayed
annotation_x & annotation_y:
The coordinates of the point where we insert the annotation; the negative sign for y coord points output that annotation is inserted below the plot
"""
pl_layout = dict(
title=title,
font=dict(size=12),
showlegend=False,
autosize=False,
width=width,
height=height,
xaxis=dict(visible=False),
yaxis=dict(visible=False),
hovermode="closest",
plot_bgcolor=bgcolor,
margin=dict(t=top, b=bottom, l=left, r=right),
)
if annotation_text is None:
return pl_layout
else:
annotations = [
dict(
showarrow=False,
text=annotation_text,
xref="paper",
yref="paper",
x=annotation_x,
y=annotation_y,
align="left",
xanchor="left",
yanchor="top",
font=dict(size=12),
)
]
pl_layout.update(annotations=annotations)
return pl_layout
def node_hist_fig(
node_color_distribution,
title="Graph Node Distribution",
width=400,
height=300,
top=60,
left=25,
bottom=60,
right=25,
bgcolor="rgb(240,240,240)",
y_gridcolor="white",
):
"""Define the plotly plot representing the node histogram
Parameters
----------
node_color_distribution: list of dicts describing the build_histogram
width, height: integers - width and height of the histogram FigureWidget
left, top, right, bottom: ints; number of pixels around the FigureWidget
bgcolor: rgb of hex color code for the figure background color
y_gridcolor: rgb of hex color code for the yaxis y_gridcolor
Returns
-------
FigureWidget object representing the histogram of the graph nodes
"""
text = [
"{perc}%".format(**locals())
for perc in [d["perc"] for d in node_color_distribution]
]
pl_hist = go.Bar(
y=[d["height"] for d in node_color_distribution],
marker=dict(color=[d["color"] for d in node_color_distribution]),
text=text,
hoverinfo="y+text",
)
hist_layout = dict(
title=title,
width=width,
height=height,
font=dict(size=12),
xaxis=dict(showline=True, zeroline=False, showgrid=False, showticklabels=False),
yaxis=dict(showline=False, gridcolor=y_gridcolor, tickfont=dict(size=10)),
bargap=0.01,
margin=dict(l=left, r=right, b=bottom, t=top),
hovermode="x",
plot_bgcolor=bgcolor,
)
return go.FigureWidget(data=[pl_hist], layout=hist_layout)
def summary_fig(
mapper_summary,
width=600,
height=500,
top=60,
left=20,
bottom=60,
right=20,
bgcolor="rgb(240,240,240)",
):
"""Define a dummy figure that displays info on the algorithms and
sklearn class instances or methods used
Returns a FigureWidget object representing the figure
"""
text = _text_mapper_summary(mapper_summary)
data = [
dict(
type="scatter",
x=[0, width],
y=[height, 0],
mode="text",
text=[text, ""],
textposition="bottom right",
hoverinfo="none",
)
]
layout = dict(
title="Algorithms and scikit-learn objects/methods",
width=width,
height=height,
font=dict(size=12),
xaxis=dict(visible=False),
yaxis=dict(visible=False, range=[0, height + 5]),
margin=dict(t=top, b=bottom, l=left, r=right),
plot_bgcolor=bgcolor,
)
return go.FigureWidget(data=data, layout=layout)
def hovering_widgets(
kmgraph,
graph_fw,
ctooltips=False,
width=400,
height=300,
top=100,
left=50,
bgcolor="rgb(240,240,240)",
y_gridcolor="white",
member_textbox_width=200,
):
"""Defines the widgets that display the distribution of each node on hover
and the members of each nodes
Parameters
----------
kmgraph: the kepler-mapper graph dict returned by `get_mapper_graph()``
graph_fw: the FigureWidget representing the graph
ctooltips: boolean; if True/False the node["custom_tooltips"]/"member_ids"
are passed to member_textbox
width, height, top refer to the figure
size and position of the hovered node distribution
Returns
-------
a box containing the graph figure, the figure of the hovered node
distribution, and the textboxes displaying the cluster size and member_ids
or custom tooltips for hovered node members
"""
fnode = kmgraph["nodes"][0]
fwc = node_hist_fig(
fnode["distribution"],
title="Cluster Member Distribution",
width=width,
height=height,
top=top,
left=left,
bgcolor=bgcolor,
y_gridcolor=y_gridcolor,
)
clust_textbox = ipw.Text(
value="{:d}".format(fnode["cluster"]["size"]),
description="Cluster size:",
disabled=False,
continuous_update=True,
)
clust_textbox.layout = dict(margin="10px 10px 10px 10px", width="200px")
member_textbox = ipw.Textarea(
value=", ".join(str(x) for x in fnode["member_ids"])
if not ctooltips
else ", ".join(str(x) for x in fnode["custom_tooltips"]),
description="Members:",
disabled=False,
continuous_update=True,
)
member_textbox.layout = dict(
margin="5px 5px 5px 10px", width=str(member_textbox_width) + "px"
)
def do_on_hover(trace, points, state):
if not points.point_inds:
return
ind = points.point_inds[0] # get the index of the hovered node
node = kmgraph["nodes"][ind]
# on hover do:
with fwc.batch_update(): # update data in the cluster member histogr
fwc.data[0].text = [
"{:.1f}%".format(d["perc"]) for d in node["distribution"]
]
fwc.data[0].y = [d["height"] for d in node["distribution"]]
fwc.data[0].marker.color = [d["color"] for d in node["distribution"]]
clust_textbox.value = "{:d}".format(node["cluster"]["size"])
member_textbox.value = (
", ".join(str(x) for x in node["member_ids"])
if not ctooltips
else ", ".join(str(x) for x in node["custom_tooltips"])
)
trace = graph_fw.data[1]
trace.on_hover(do_on_hover)
return ipw.VBox([ipw.HBox([graph_fw, fwc]), clust_textbox, member_textbox])
def _get_plotly_data(E, coords):
# E : the list of tuples representing the graph edges
# coords: list of node coordinates assigned by igraph.Layout
N = len(coords)
Xnodes = [coords[k][0] for k in range(N)] # x-coordinates of nodes
Ynodes = [coords[k][1] for k in range(N)] # y-coordnates of nodes
Xedges = []
Yedges = []
for e in E:
Xedges.extend([coords[e[0]][0], coords[e[1]][0], None])
Yedges.extend([coords[e[0]][1], coords[e[1]][1], None])
return Xnodes, Ynodes, Xedges, Yedges
def _text_mapper_summary(mapper_summary):
d = mapper_summary["custom_meta"]
text = "<br><b>Projection: </b>" + d["projection"]
text += (
"<br><b>Clusterer: </b>" + d["clusterer"] + "<br><b>Scaler: </b>" + d["scaler"]
)
if "color_function" in d.keys():
text += "<br><b>Color function: </b>" + d["color_function"]
return text
def _hover_format(member_ids, custom_tooltips, X, X_names, lens, lens_names):
cluster_data = _format_cluster_statistics(member_ids, X, X_names)
tooltip = ""
custom_tooltips = (
custom_tooltips[member_ids] if custom_tooltips is not None else member_ids
)
val_size = cluster_data["size"]
tooltip += "{val_size}".format(**locals())
return tooltip
|
scikit-tda/kepler-mapper | kmapper/plotlyviz.py | hovering_widgets | python | def hovering_widgets(
kmgraph,
graph_fw,
ctooltips=False,
width=400,
height=300,
top=100,
left=50,
bgcolor="rgb(240,240,240)",
y_gridcolor="white",
member_textbox_width=200,
):
"""Defines the widgets that display the distribution of each node on hover
and the members of each nodes
Parameters
----------
kmgraph: the kepler-mapper graph dict returned by `get_mapper_graph()``
graph_fw: the FigureWidget representing the graph
ctooltips: boolean; if True/False the node["custom_tooltips"]/"member_ids"
are passed to member_textbox
width, height, top refer to the figure
size and position of the hovered node distribution
Returns
-------
a box containing the graph figure, the figure of the hovered node
distribution, and the textboxes displaying the cluster size and member_ids
or custom tooltips for hovered node members
"""
fnode = kmgraph["nodes"][0]
fwc = node_hist_fig(
fnode["distribution"],
title="Cluster Member Distribution",
width=width,
height=height,
top=top,
left=left,
bgcolor=bgcolor,
y_gridcolor=y_gridcolor,
)
clust_textbox = ipw.Text(
value="{:d}".format(fnode["cluster"]["size"]),
description="Cluster size:",
disabled=False,
continuous_update=True,
)
clust_textbox.layout = dict(margin="10px 10px 10px 10px", width="200px")
member_textbox = ipw.Textarea(
value=", ".join(str(x) for x in fnode["member_ids"])
if not ctooltips
else ", ".join(str(x) for x in fnode["custom_tooltips"]),
description="Members:",
disabled=False,
continuous_update=True,
)
member_textbox.layout = dict(
margin="5px 5px 5px 10px", width=str(member_textbox_width) + "px"
)
def do_on_hover(trace, points, state):
if not points.point_inds:
return
ind = points.point_inds[0] # get the index of the hovered node
node = kmgraph["nodes"][ind]
# on hover do:
with fwc.batch_update(): # update data in the cluster member histogr
fwc.data[0].text = [
"{:.1f}%".format(d["perc"]) for d in node["distribution"]
]
fwc.data[0].y = [d["height"] for d in node["distribution"]]
fwc.data[0].marker.color = [d["color"] for d in node["distribution"]]
clust_textbox.value = "{:d}".format(node["cluster"]["size"])
member_textbox.value = (
", ".join(str(x) for x in node["member_ids"])
if not ctooltips
else ", ".join(str(x) for x in node["custom_tooltips"])
)
trace = graph_fw.data[1]
trace.on_hover(do_on_hover)
return ipw.VBox([ipw.HBox([graph_fw, fwc]), clust_textbox, member_textbox]) | Defines the widgets that display the distribution of each node on hover
and the members of each nodes
Parameters
----------
kmgraph: the kepler-mapper graph dict returned by `get_mapper_graph()``
graph_fw: the FigureWidget representing the graph
ctooltips: boolean; if True/False the node["custom_tooltips"]/"member_ids"
are passed to member_textbox
width, height, top refer to the figure
size and position of the hovered node distribution
Returns
-------
a box containing the graph figure, the figure of the hovered node
distribution, and the textboxes displaying the cluster size and member_ids
or custom tooltips for hovered node members | train | https://github.com/scikit-tda/kepler-mapper/blob/d4ed39f6392b0a134dd573d7d9c4aa65fbef3a7d/kmapper/plotlyviz.py#L586-L672 | [
"def node_hist_fig(\n node_color_distribution,\n title=\"Graph Node Distribution\",\n width=400,\n height=300,\n top=60,\n left=25,\n bottom=60,\n right=25,\n bgcolor=\"rgb(240,240,240)\",\n y_gridcolor=\"white\",\n):\n \"\"\"Define the plotly plot representing the node histogram\n\... | from __future__ import division
import numpy as np
from .visuals import (
init_color_function,
_size_node,
_format_projection_statistics,
_format_cluster_statistics,
_color_function,
format_meta,
_to_html_format,
_map_val2color,
graph_data_distribution,
build_histogram,
_tooltip_components,
)
try:
import igraph as ig
import plotly.graph_objs as go
import ipywidgets as ipw
import plotly.io as pio
except ImportError:
print(
"""To use the plotly visualization tools, you must have the packages python-igraph, plotly, and ipywidgets installed in your environment."""
""" It looks like at least one of these is missing. Please install again with"""
"""\n\n\t`pip install python-igraph plotly ipywidgets`\n\nand try again"""
)
raise
default_colorscale = [
[0.0, "rgb(68, 1, 84)"], # Viridis
[0.1, "rgb(72, 35, 116)"],
[0.2, "rgb(64, 67, 135)"],
[0.3, "rgb(52, 94, 141)"],
[0.4, "rgb(41, 120, 142)"],
[0.5, "rgb(32, 144, 140)"],
[0.6, "rgb(34, 167, 132)"],
[0.7, "rgb(68, 190, 112)"],
[0.8, "rgb(121, 209, 81)"],
[0.9, "rgb(189, 222, 38)"],
[1.0, "rgb(253, 231, 36)"],
]
def mpl_to_plotly(cmap, n_entries):
h = 1.0 / (n_entries - 1)
pl_colorscale = []
for k in range(n_entries):
C = list(map(np.uint8, np.array(cmap(k * h)[:3]) * 255))
pl_colorscale.append(
[round(k * h, 2), "rgb" + str((C[0], C[1], C[2]))]
) # Python 2.7+
# pl_colorscale.append([round(k*h, 2), f'rgb({C[0]}, {C[1]}, {C[2]})']) # Python 3.6+
return pl_colorscale
def plotlyviz(
scomplex,
colorscale=None,
title="Kepler Mapper",
graph_layout="kk",
color_function=None,
color_function_name=None,
dashboard=False,
graph_data=False,
factor_size=3,
edge_linewidth=1.5,
node_linecolor="rgb(200,200,200)",
width=600,
height=500,
bgcolor="rgba(240, 240, 240, 0.95)",
left=10,
bottom=35,
summary_height=300,
summary_width=600,
summary_left=20,
summary_right=20,
hist_left=25,
hist_right=25,
member_textbox_width=800,
filename=None,
):
"""
Visualizations and dashboards for kmapper graphs using Plotly. This method is suitable for use in Jupyter notebooks.
The generated FigureWidget can be updated (by performing a restyle or relayout). For example, let us add a title
to the colorbar (the name of the color function, if any),
and set the title font size. To perform these updates faster, Plotly 3.+ provides a context manager that batches up all data and layout updates:
To display more info on the generated kmapper-graph, define two more FigureWidget(s):
the global node distribution figure, and a dummy figure
that displays info on the algorithms involved in getting the graph from data, as well as sklearn class instances.
A FigureWidget has event listeners for hovering, clicking or selecting. Using the first one for `fw_graph`
we define, via the function `hovering_widgets()`, widgets that display the node distribution, when the node is hovered over, and two textboxes for the cluster size and the member ids/labels of the hovered node members.
Parameters
-----------
scomplex: dict
Simplicial complex is the output from the KeplerMapper `map` method.
title: str
Title of output graphic
graph_layout: igraph layout;
recommended 'kk' (kamada-kawai) or 'fr' (fruchterman-reingold)
colorscale:
Plotly colorscale(colormap) to color graph nodes
dashboard: bool, default is False
If true, display complete dashboard of node information
graph_data: bool, default is False
If true, display graph metadata
factor_size: double, default is 3
a factor for the node size
edge_linewidth : double, default is 1.5
node_linecolor: color str, default is "rgb(200,200,200)"
width: int, default is 600,
height: int, default is 500,
bgcolor: color str, default is "rgba(240, 240, 240, 0.95)",
left: int, default is 10,
bottom: int, default is 35,
summary_height: int, default is 300,
summary_width: int, default is 600,
summary_left: int, default is 20,
summary_right: int, default is 20,
hist_left: int, default is 25,
hist_right: int, default is 25,
member_textbox_width: int, default is 800,
filename: str, default is None
if filename is given, the graphic will be saved to that file.
Returns
---------
result: plotly.FigureWidget
A FigureWidget that can be shown or editted. See the Plotly Demo notebook for examples of use.
"""
if not colorscale:
colorscale = default_colorscale
kmgraph, mapper_summary, n_color_distribution = get_mapper_graph(
scomplex,
colorscale=colorscale,
color_function=color_function,
color_function_name=color_function_name,
)
annotation = get_kmgraph_meta(mapper_summary)
plgraph_data = plotly_graph(
kmgraph,
graph_layout=graph_layout,
colorscale=colorscale,
factor_size=factor_size,
edge_linewidth=edge_linewidth,
node_linecolor=node_linecolor,
)
layout = plot_layout(
title=title,
width=width,
height=height,
annotation_text=annotation,
bgcolor=bgcolor,
left=left,
bottom=bottom,
)
result = go.FigureWidget(data=plgraph_data, layout=layout)
if color_function_name:
with result.batch_update():
result.data[1].marker.colorbar.title = color_function_name
result.data[1].marker.colorbar.titlefont.size = 10
if dashboard or graph_data:
fw_hist = node_hist_fig(n_color_distribution, left=hist_left, right=hist_right)
fw_summary = summary_fig(
mapper_summary,
width=summary_width,
height=summary_height,
left=summary_left,
right=summary_right,
)
fw_graph = result
result = hovering_widgets(
kmgraph, fw_graph, member_textbox_width=member_textbox_width
)
if graph_data:
result = ipw.VBox([fw_graph, ipw.HBox([fw_summary, fw_hist])])
if filename:
pio.write_image(result, filename)
return result
def scomplex_to_graph(
simplicial_complex,
color_function,
X,
X_names,
lens,
lens_names,
custom_tooltips,
colorscale,
):
json_dict = {"nodes": [], "links": []}
node_id_to_num = {}
for i, (node_id, member_ids) in enumerate(simplicial_complex["nodes"].items()):
node_id_to_num[node_id] = i
projection_stats, cluster_stats, member_histogram = _tooltip_components(
member_ids, X, X_names, lens, lens_names, color_function, i, colorscale
)
n = {
"id": i,
"name": node_id,
"member_ids": member_ids,
"color": _color_function(member_ids, color_function),
"size": _size_node(member_ids),
"cluster": cluster_stats,
"distribution": member_histogram,
"projection": projection_stats,
"custom_tooltips": custom_tooltips,
}
json_dict["nodes"].append(n)
for i, (node_id, linked_node_ids) in enumerate(simplicial_complex["links"].items()):
for linked_node_id in linked_node_ids:
lnk = {
"source": node_id_to_num[node_id],
"target": node_id_to_num[linked_node_id],
}
json_dict["links"].append(lnk)
return json_dict
def get_mapper_graph(
simplicial_complex,
color_function=None,
color_function_name=None,
colorscale=None,
custom_tooltips=None,
custom_meta=None,
X=None,
X_names=None,
lens=None,
lens_names=None,
):
"""Generate data for mapper graph visualization and annotation.
Parameters
----------
simplicial_complex : dict
Simplicial complex is the output from the KeplerMapper `map` method.
Returns
-------
the graph dictionary in a json representation, the mapper summary
and the node_distribution
Example
-------
>>> kmgraph, mapper_summary, n_distribution = get_mapper_graph(simplicial_complex)
"""
if not colorscale:
colorscale = default_colorscale
if not len(simplicial_complex["nodes"]) > 0:
raise Exception(
"A mapper graph should have more than 0 nodes. This might be because your clustering algorithm might be too sensitive and be classifying all points as noise."
)
color_function = init_color_function(simplicial_complex, color_function)
if X_names is None:
X_names = []
if lens_names is None:
lens_names = []
json_graph = scomplex_to_graph(
simplicial_complex,
color_function,
X,
X_names,
lens,
lens_names,
custom_tooltips,
colorscale=colorscale,
)
colorf_distribution = graph_data_distribution(
simplicial_complex, color_function, colorscale
)
mapper_summary = format_meta(
simplicial_complex,
color_function_name=color_function_name,
custom_meta=custom_meta,
)
return json_graph, mapper_summary, colorf_distribution
def plotly_graph(
kmgraph,
graph_layout="kk",
colorscale=None,
showscale=True,
factor_size=3,
edge_linecolor="rgb(180,180,180)",
edge_linewidth=1.5,
node_linecolor="rgb(255,255,255)",
node_linewidth=1.0,
):
"""Generate Plotly data structures that represent the mapper graph
Parameters
----------
kmgraph: dict representing the mapper graph,
returned by the function get_mapper_graph()
graph_layout: igraph layout; recommended 'kk' (kamada-kawai)
or 'fr' (fruchterman-reingold)
colorscale: a Plotly colorscale(colormap) to color graph nodes
showscale: boolean to display or not the colorbar
factor_size: a factor for the node size
Returns
-------
The plotly traces (dicts) representing the graph edges and nodes
"""
if not colorscale:
colorscale = default_colorscale
# define an igraph.Graph instance of n_nodes
n_nodes = len(kmgraph["nodes"])
if n_nodes == 0:
raise ValueError("Your graph has 0 nodes")
G = ig.Graph(n=n_nodes)
links = [(e["source"], e["target"]) for e in kmgraph["links"]]
G.add_edges(links)
layt = G.layout(graph_layout)
hover_text = [node["name"] for node in kmgraph["nodes"]]
color_vals = [node["color"] for node in kmgraph["nodes"]]
node_size = np.array(
[factor_size * node["size"] for node in kmgraph["nodes"]], dtype=np.int
)
Xn, Yn, Xe, Ye = _get_plotly_data(links, layt)
edge_trace = dict(
type="scatter",
x=Xe,
y=Ye,
mode="lines",
line=dict(color=edge_linecolor, width=edge_linewidth),
hoverinfo="none",
)
node_trace = dict(
type="scatter",
x=Xn,
y=Yn,
mode="markers",
marker=dict(
size=node_size.tolist(),
color=color_vals,
opacity=1.0,
colorscale=colorscale,
showscale=showscale,
line=dict(color=node_linecolor, width=node_linewidth),
colorbar=dict(thickness=20, ticklen=4, x=1.01, tickfont=dict(size=10)),
),
text=hover_text,
hoverinfo="text",
)
return [edge_trace, node_trace]
def get_kmgraph_meta(mapper_summary):
""" Extract info from mapper summary to be displayed below the graph plot
"""
d = mapper_summary["custom_meta"]
meta = (
"<b>N_cubes:</b> "
+ str(d["n_cubes"])
+ " <b>Perc_overlap:</b> "
+ str(d["perc_overlap"])
)
meta += (
"<br><b>Nodes:</b> "
+ str(mapper_summary["n_nodes"])
+ " <b>Edges:</b> "
+ str(mapper_summary["n_edges"])
+ " <b>Total samples:</b> "
+ str(mapper_summary["n_total"])
+ " <b>Unique_samples:</b> "
+ str(mapper_summary["n_unique"])
)
return meta
def plot_layout(
title="TDA KMapper",
width=600,
height=600,
bgcolor="rgba(255, 255, 255, 1)",
annotation_text=None,
annotation_x=0,
annotation_y=-0.01,
top=100,
left=60,
right=60,
bottom=60,
):
"""Set the plotly layout
Parameters
----------
width, height: integers
setting width and height of plot window
bgcolor: string,
rgba or hex color code for the background color
annotation_text: string
meta data to be displayed
annotation_x & annotation_y:
The coordinates of the point where we insert the annotation; the negative sign for y coord points output that annotation is inserted below the plot
"""
pl_layout = dict(
title=title,
font=dict(size=12),
showlegend=False,
autosize=False,
width=width,
height=height,
xaxis=dict(visible=False),
yaxis=dict(visible=False),
hovermode="closest",
plot_bgcolor=bgcolor,
margin=dict(t=top, b=bottom, l=left, r=right),
)
if annotation_text is None:
return pl_layout
else:
annotations = [
dict(
showarrow=False,
text=annotation_text,
xref="paper",
yref="paper",
x=annotation_x,
y=annotation_y,
align="left",
xanchor="left",
yanchor="top",
font=dict(size=12),
)
]
pl_layout.update(annotations=annotations)
return pl_layout
def node_hist_fig(
node_color_distribution,
title="Graph Node Distribution",
width=400,
height=300,
top=60,
left=25,
bottom=60,
right=25,
bgcolor="rgb(240,240,240)",
y_gridcolor="white",
):
"""Define the plotly plot representing the node histogram
Parameters
----------
node_color_distribution: list of dicts describing the build_histogram
width, height: integers - width and height of the histogram FigureWidget
left, top, right, bottom: ints; number of pixels around the FigureWidget
bgcolor: rgb of hex color code for the figure background color
y_gridcolor: rgb of hex color code for the yaxis y_gridcolor
Returns
-------
FigureWidget object representing the histogram of the graph nodes
"""
text = [
"{perc}%".format(**locals())
for perc in [d["perc"] for d in node_color_distribution]
]
pl_hist = go.Bar(
y=[d["height"] for d in node_color_distribution],
marker=dict(color=[d["color"] for d in node_color_distribution]),
text=text,
hoverinfo="y+text",
)
hist_layout = dict(
title=title,
width=width,
height=height,
font=dict(size=12),
xaxis=dict(showline=True, zeroline=False, showgrid=False, showticklabels=False),
yaxis=dict(showline=False, gridcolor=y_gridcolor, tickfont=dict(size=10)),
bargap=0.01,
margin=dict(l=left, r=right, b=bottom, t=top),
hovermode="x",
plot_bgcolor=bgcolor,
)
return go.FigureWidget(data=[pl_hist], layout=hist_layout)
def summary_fig(
mapper_summary,
width=600,
height=500,
top=60,
left=20,
bottom=60,
right=20,
bgcolor="rgb(240,240,240)",
):
"""Define a dummy figure that displays info on the algorithms and
sklearn class instances or methods used
Returns a FigureWidget object representing the figure
"""
text = _text_mapper_summary(mapper_summary)
data = [
dict(
type="scatter",
x=[0, width],
y=[height, 0],
mode="text",
text=[text, ""],
textposition="bottom right",
hoverinfo="none",
)
]
layout = dict(
title="Algorithms and scikit-learn objects/methods",
width=width,
height=height,
font=dict(size=12),
xaxis=dict(visible=False),
yaxis=dict(visible=False, range=[0, height + 5]),
margin=dict(t=top, b=bottom, l=left, r=right),
plot_bgcolor=bgcolor,
)
return go.FigureWidget(data=data, layout=layout)
def hovering_widgets(
kmgraph,
graph_fw,
ctooltips=False,
width=400,
height=300,
top=100,
left=50,
bgcolor="rgb(240,240,240)",
y_gridcolor="white",
member_textbox_width=200,
):
"""Defines the widgets that display the distribution of each node on hover
and the members of each nodes
Parameters
----------
kmgraph: the kepler-mapper graph dict returned by `get_mapper_graph()``
graph_fw: the FigureWidget representing the graph
ctooltips: boolean; if True/False the node["custom_tooltips"]/"member_ids"
are passed to member_textbox
width, height, top refer to the figure
size and position of the hovered node distribution
Returns
-------
a box containing the graph figure, the figure of the hovered node
distribution, and the textboxes displaying the cluster size and member_ids
or custom tooltips for hovered node members
"""
fnode = kmgraph["nodes"][0]
fwc = node_hist_fig(
fnode["distribution"],
title="Cluster Member Distribution",
width=width,
height=height,
top=top,
left=left,
bgcolor=bgcolor,
y_gridcolor=y_gridcolor,
)
clust_textbox = ipw.Text(
value="{:d}".format(fnode["cluster"]["size"]),
description="Cluster size:",
disabled=False,
continuous_update=True,
)
clust_textbox.layout = dict(margin="10px 10px 10px 10px", width="200px")
member_textbox = ipw.Textarea(
value=", ".join(str(x) for x in fnode["member_ids"])
if not ctooltips
else ", ".join(str(x) for x in fnode["custom_tooltips"]),
description="Members:",
disabled=False,
continuous_update=True,
)
member_textbox.layout = dict(
margin="5px 5px 5px 10px", width=str(member_textbox_width) + "px"
)
def do_on_hover(trace, points, state):
if not points.point_inds:
return
ind = points.point_inds[0] # get the index of the hovered node
node = kmgraph["nodes"][ind]
# on hover do:
with fwc.batch_update(): # update data in the cluster member histogr
fwc.data[0].text = [
"{:.1f}%".format(d["perc"]) for d in node["distribution"]
]
fwc.data[0].y = [d["height"] for d in node["distribution"]]
fwc.data[0].marker.color = [d["color"] for d in node["distribution"]]
clust_textbox.value = "{:d}".format(node["cluster"]["size"])
member_textbox.value = (
", ".join(str(x) for x in node["member_ids"])
if not ctooltips
else ", ".join(str(x) for x in node["custom_tooltips"])
)
trace = graph_fw.data[1]
trace.on_hover(do_on_hover)
return ipw.VBox([ipw.HBox([graph_fw, fwc]), clust_textbox, member_textbox])
def _get_plotly_data(E, coords):
# E : the list of tuples representing the graph edges
# coords: list of node coordinates assigned by igraph.Layout
N = len(coords)
Xnodes = [coords[k][0] for k in range(N)] # x-coordinates of nodes
Ynodes = [coords[k][1] for k in range(N)] # y-coordnates of nodes
Xedges = []
Yedges = []
for e in E:
Xedges.extend([coords[e[0]][0], coords[e[1]][0], None])
Yedges.extend([coords[e[0]][1], coords[e[1]][1], None])
return Xnodes, Ynodes, Xedges, Yedges
def _text_mapper_summary(mapper_summary):
d = mapper_summary["custom_meta"]
text = "<br><b>Projection: </b>" + d["projection"]
text += (
"<br><b>Clusterer: </b>" + d["clusterer"] + "<br><b>Scaler: </b>" + d["scaler"]
)
if "color_function" in d.keys():
text += "<br><b>Color function: </b>" + d["color_function"]
return text
def _hover_format(member_ids, custom_tooltips, X, X_names, lens, lens_names):
cluster_data = _format_cluster_statistics(member_ids, X, X_names)
tooltip = ""
custom_tooltips = (
custom_tooltips[member_ids] if custom_tooltips is not None else member_ids
)
val_size = cluster_data["size"]
tooltip += "{val_size}".format(**locals())
return tooltip
|
scikit-tda/kepler-mapper | kmapper/nerve.py | GraphNerve.compute | python | def compute(self, nodes):
result = defaultdict(list)
# Create links when clusters from different hypercubes have members with the same sample id.
candidates = itertools.combinations(nodes.keys(), 2)
for candidate in candidates:
# if there are non-unique members in the union
if (
len(set(nodes[candidate[0]]).intersection(nodes[candidate[1]]))
>= self.min_intersection
):
result[candidate[0]].append(candidate[1])
edges = [[x, end] for x in result for end in result[x]]
simplices = [[n] for n in nodes] + edges
return result, simplices | Helper function to find edges of the overlapping clusters.
Parameters
----------
nodes:
A dictionary with entires `{node id}:{list of ids in node}`
Returns
-------
edges:
A 1-skeleton of the nerve (intersecting nodes)
simplicies:
Complete list of simplices | train | https://github.com/scikit-tda/kepler-mapper/blob/d4ed39f6392b0a134dd573d7d9c4aa65fbef3a7d/kmapper/nerve.py#L35-L67 | null | class GraphNerve(Nerve):
""" Creates the 1-skeleton of the Mapper complex.
Parameters
-----------
min_intersection: int, default is 1
Minimum intersection considered when computing the nerve. An edge will be created only when the intersection between two nodes is greater than or equal to `min_intersection`
"""
def __init__(self, min_intersection=1):
self.min_intersection = min_intersection
def __repr__(self):
return "GraphNerve(min_intersection={})".format(self.min_intersection)
|
scikit-tda/kepler-mapper | kmapper/cover.py | Cover.fit | python | def fit(self, data):
# TODO: support indexing into any columns
di = np.array(range(1, data.shape[1]))
indexless_data = data[:, di]
n_dims = indexless_data.shape[1]
# support different values along each dimension
## -- is a list, needs to be array
## -- is a singleton, needs repeating
if isinstance(self.n_cubes, Iterable):
n_cubes = np.array(self.n_cubes)
assert (
len(n_cubes) == n_dims
), "Custom cubes in each dimension must match number of dimensions"
else:
n_cubes = np.repeat(self.n_cubes, n_dims)
if isinstance(self.perc_overlap, Iterable):
perc_overlap = np.array(self.perc_overlap)
assert (
len(perc_overlap) == n_dims
), "Custom cubes in each dimension must match number of dimensions"
else:
perc_overlap = np.repeat(self.perc_overlap, n_dims)
assert all(0.0 <= p <= 1.0 for p in perc_overlap), (
"Each overlap percentage must be between 0.0 and 1.0., not %s"
% perc_overlap
)
bounds = self._compute_bounds(indexless_data)
ranges = bounds[1] - bounds[0]
# (n-1)/n |range|
inner_range = ((n_cubes - 1) / n_cubes) * ranges
inset = (ranges - inner_range) / 2
# |range| / (2n ( 1 - p))
radius = ranges / (2 * (n_cubes) * (1 - perc_overlap))
# centers are fixed w.r.t perc_overlap
zip_items = list(bounds) # work around 2.7,3.4 weird behavior
zip_items.extend([n_cubes, inset])
centers_per_dimension = [
np.linspace(b + r, c - r, num=n) for b, c, n, r in zip(*zip_items)
]
centers = [np.array(c) for c in product(*centers_per_dimension)]
self.centers_ = centers
self.radius_ = radius
self.inset_ = inset
self.inner_range_ = inner_range
self.bounds_ = bounds
self.di_ = di
if self.verbose > 0:
print(
" - Cover - centers: %s\ninner_range: %s\nradius: %s"
% (self.centers_, self.inner_range_, self.radius_)
)
return centers | Fit a cover on the data. This method constructs centers and radii in each dimension given the `perc_overlap` and `n_cube`.
Parameters
============
data: array-like
Data to apply the cover to. Warning: First column must be an index column.
Returns
========
centers: list of arrays
A list of centers for each cube | train | https://github.com/scikit-tda/kepler-mapper/blob/d4ed39f6392b0a134dd573d7d9c4aa65fbef3a7d/kmapper/cover.py#L135-L214 | [
"def _compute_bounds(self, data):\n\n # If self.limits is array-like\n if isinstance(self.limits, np.ndarray):\n # limits_array is used so we can change the values of self.limits from None to the min/max\n limits_array = np.zeros(self.limits.shape)\n limits_array[:, 0] = np.min(data, axis... | class Cover:
"""Helper class that defines the default covering scheme
It calculates the cover based on the following formula for overlap. (https://arxiv.org/pdf/1706.00204.pdf)
::
|cube[i] intersection cube[i+1]|
overlap = --------------------------------------
|cube[i]|
Parameters
============
n_cubes: int
Number of hypercubes along each dimension. Sometimes referred to as resolution.
perc_overlap: float
Amount of overlap between adjacent cubes calculated only along 1 dimension.
limits: Numpy Array (n_dim,2)
(lower bound, upper bound) for every dimension
If a value is set to `np.float('inf')`, the bound will be assumed to be the min/max value of the dimension
Also, if `limits == None`, the limits are defined by the maximum and minimum value of the lens for all dimensions.
i.e. `[[min_1, max_1], [min_2, max_2], [min_3, max_3]]`
Example
---------
::
>>> import numpy as np
>>> from kmapper.cover import Cover
>>> data = np.random.random((100,2))
>>> cov = Cover(n_cubes=15, perc_overlap=0.75)
>>> cube_centers = cov.fit(data)
>>> cov.transform_single(data, cube_centers[0])
array([[0.3594448 , 0.07428465],
[0.14490332, 0.01395559],
[0.94988668, 0.03983579],
[0.73517978, 0.09420806],
[0.16903735, 0.06901085],
[0.81578595, 0.10708731],
[0.26923572, 0.12216203],
[0.89203167, 0.0711279 ],
[0.80442115, 0.10220901],
[0.33210782, 0.04365007],
[0.52207707, 0.05892861],
[0.26589744, 0.08502856],
[0.02360067, 0.1263653 ],
[0.29855631, 0.01209373]])
>>> hyper_cubes = cov.transform(data, cube_centers)
"""
def __init__(self, n_cubes=10, perc_overlap=0.5, limits=None, verbose=0):
self.centers_ = None
self.radius_ = None
self.inset_ = None
self.inner_range_ = None
self.bounds_ = None
self.di_ = None
self.n_cubes = n_cubes
self.perc_overlap = perc_overlap
self.limits = limits
self.verbose = verbose
# Check limits can actually be handled and are set appropriately
assert isinstance(
self.limits, (list, np.ndarray, type(None))
), "limits should either be an array or None"
if isinstance(self.limits, (list, np.ndarray)):
self.limits = np.array(self.limits)
assert self.limits.shape[1] == 2, "limits should be (n_dim,2) in shape"
def __repr__(self):
return "Cover(n_cubes=%s, perc_overlap=%s, limits=%s, verbose=%s)" % (
self.n_cubes,
self.perc_overlap,
self.limits,
self.verbose,
)
def _compute_bounds(self, data):
# If self.limits is array-like
if isinstance(self.limits, np.ndarray):
# limits_array is used so we can change the values of self.limits from None to the min/max
limits_array = np.zeros(self.limits.shape)
limits_array[:, 0] = np.min(data, axis=0)
limits_array[:, 1] = np.max(data, axis=0)
limits_array[self.limits != np.float("inf")] = 0
self.limits[self.limits == np.float("inf")] = 0
bounds_arr = self.limits + limits_array
""" bounds_arr[i,j] = self.limits[i,j] if self.limits[i,j] == inf
bounds_arr[i,j] = max/min(data[i]) if self.limits == inf """
bounds = (bounds_arr[:, 0], bounds_arr[:, 1])
# Check new bounds are actually sensible - do they cover the range of values in the dataset?
if not (
(np.min(data, axis=0) >= bounds_arr[:, 0]).all()
or (np.max(data, axis=0) <= bounds_arr[:, 1]).all()
):
warnings.warn(
"The limits given do not cover the entire range of the lens functions\n"
+ "Actual Minima: %s\tInput Minima: %s\n"
% (np.min(data, axis=0), bounds_arr[:, 0])
+ "Actual Maxima: %s\tInput Maxima: %s\n"
% (np.max(data, axis=0), bounds_arr[:, 1])
)
else: # It must be None, as we checked to see if it is array-like or None in __init__
bounds = (np.min(data, axis=0), np.max(data, axis=0))
return bounds
def transform_single(self, data, center, i=0):
""" Compute entries of `data` in hypercube centered at `center`
Parameters
===========
data: array-like
Data to find in entries in cube. Warning: first column must be index column.
center: array-like
Center points for the cube. Cube is found as all data in `[center-self.radius_, center+self.radius_]`
i: int, default 0
Optional counter to aid in verbose debugging.
"""
lowerbounds, upperbounds = center - self.radius_, center + self.radius_
# Slice the hypercube
entries = (data[:, self.di_] >= lowerbounds) & (
data[:, self.di_] <= upperbounds
)
hypercube = data[np.invert(np.any(entries == False, axis=1))]
if self.verbose > 1:
print(
"There are %s points in cube %s/%s"
% (hypercube.shape[0], i + 1, len(self.centers_))
)
return hypercube
def transform(self, data, centers=None):
""" Find entries of all hypercubes. If `centers=None`, then use `self.centers_` as computed in `self.fit`.
Empty hypercubes are removed from the result
Parameters
===========
data: array-like
Data to find in entries in cube. Warning: first column must be index column.
centers: list of array-like
Center points for all cubes as returned by `self.fit`. Default is to use `self.centers_`.
Returns
=========
hypercubes: list of array-like
list of entries in each hypercobe in `data`.
"""
centers = centers or self.centers_
hypercubes = [
self.transform_single(data, cube, i) for i, cube in enumerate(centers)
]
# Clean out any empty cubes (common in high dimensions)
hypercubes = [cube for cube in hypercubes if len(cube)]
return hypercubes
def fit_transform(self, data):
self.fit(data)
return self.transform(data)
|
scikit-tda/kepler-mapper | kmapper/cover.py | Cover.transform_single | python | def transform_single(self, data, center, i=0):
lowerbounds, upperbounds = center - self.radius_, center + self.radius_
# Slice the hypercube
entries = (data[:, self.di_] >= lowerbounds) & (
data[:, self.di_] <= upperbounds
)
hypercube = data[np.invert(np.any(entries == False, axis=1))]
if self.verbose > 1:
print(
"There are %s points in cube %s/%s"
% (hypercube.shape[0], i + 1, len(self.centers_))
)
return hypercube | Compute entries of `data` in hypercube centered at `center`
Parameters
===========
data: array-like
Data to find in entries in cube. Warning: first column must be index column.
center: array-like
Center points for the cube. Cube is found as all data in `[center-self.radius_, center+self.radius_]`
i: int, default 0
Optional counter to aid in verbose debugging. | train | https://github.com/scikit-tda/kepler-mapper/blob/d4ed39f6392b0a134dd573d7d9c4aa65fbef3a7d/kmapper/cover.py#L216-L244 | null | class Cover:
"""Helper class that defines the default covering scheme
It calculates the cover based on the following formula for overlap. (https://arxiv.org/pdf/1706.00204.pdf)
::
|cube[i] intersection cube[i+1]|
overlap = --------------------------------------
|cube[i]|
Parameters
============
n_cubes: int
Number of hypercubes along each dimension. Sometimes referred to as resolution.
perc_overlap: float
Amount of overlap between adjacent cubes calculated only along 1 dimension.
limits: Numpy Array (n_dim,2)
(lower bound, upper bound) for every dimension
If a value is set to `np.float('inf')`, the bound will be assumed to be the min/max value of the dimension
Also, if `limits == None`, the limits are defined by the maximum and minimum value of the lens for all dimensions.
i.e. `[[min_1, max_1], [min_2, max_2], [min_3, max_3]]`
Example
---------
::
>>> import numpy as np
>>> from kmapper.cover import Cover
>>> data = np.random.random((100,2))
>>> cov = Cover(n_cubes=15, perc_overlap=0.75)
>>> cube_centers = cov.fit(data)
>>> cov.transform_single(data, cube_centers[0])
array([[0.3594448 , 0.07428465],
[0.14490332, 0.01395559],
[0.94988668, 0.03983579],
[0.73517978, 0.09420806],
[0.16903735, 0.06901085],
[0.81578595, 0.10708731],
[0.26923572, 0.12216203],
[0.89203167, 0.0711279 ],
[0.80442115, 0.10220901],
[0.33210782, 0.04365007],
[0.52207707, 0.05892861],
[0.26589744, 0.08502856],
[0.02360067, 0.1263653 ],
[0.29855631, 0.01209373]])
>>> hyper_cubes = cov.transform(data, cube_centers)
"""
def __init__(self, n_cubes=10, perc_overlap=0.5, limits=None, verbose=0):
self.centers_ = None
self.radius_ = None
self.inset_ = None
self.inner_range_ = None
self.bounds_ = None
self.di_ = None
self.n_cubes = n_cubes
self.perc_overlap = perc_overlap
self.limits = limits
self.verbose = verbose
# Check limits can actually be handled and are set appropriately
assert isinstance(
self.limits, (list, np.ndarray, type(None))
), "limits should either be an array or None"
if isinstance(self.limits, (list, np.ndarray)):
self.limits = np.array(self.limits)
assert self.limits.shape[1] == 2, "limits should be (n_dim,2) in shape"
def __repr__(self):
return "Cover(n_cubes=%s, perc_overlap=%s, limits=%s, verbose=%s)" % (
self.n_cubes,
self.perc_overlap,
self.limits,
self.verbose,
)
def _compute_bounds(self, data):
# If self.limits is array-like
if isinstance(self.limits, np.ndarray):
# limits_array is used so we can change the values of self.limits from None to the min/max
limits_array = np.zeros(self.limits.shape)
limits_array[:, 0] = np.min(data, axis=0)
limits_array[:, 1] = np.max(data, axis=0)
limits_array[self.limits != np.float("inf")] = 0
self.limits[self.limits == np.float("inf")] = 0
bounds_arr = self.limits + limits_array
""" bounds_arr[i,j] = self.limits[i,j] if self.limits[i,j] == inf
bounds_arr[i,j] = max/min(data[i]) if self.limits == inf """
bounds = (bounds_arr[:, 0], bounds_arr[:, 1])
# Check new bounds are actually sensible - do they cover the range of values in the dataset?
if not (
(np.min(data, axis=0) >= bounds_arr[:, 0]).all()
or (np.max(data, axis=0) <= bounds_arr[:, 1]).all()
):
warnings.warn(
"The limits given do not cover the entire range of the lens functions\n"
+ "Actual Minima: %s\tInput Minima: %s\n"
% (np.min(data, axis=0), bounds_arr[:, 0])
+ "Actual Maxima: %s\tInput Maxima: %s\n"
% (np.max(data, axis=0), bounds_arr[:, 1])
)
else: # It must be None, as we checked to see if it is array-like or None in __init__
bounds = (np.min(data, axis=0), np.max(data, axis=0))
return bounds
def fit(self, data):
""" Fit a cover on the data. This method constructs centers and radii in each dimension given the `perc_overlap` and `n_cube`.
Parameters
============
data: array-like
Data to apply the cover to. Warning: First column must be an index column.
Returns
========
centers: list of arrays
A list of centers for each cube
"""
# TODO: support indexing into any columns
di = np.array(range(1, data.shape[1]))
indexless_data = data[:, di]
n_dims = indexless_data.shape[1]
# support different values along each dimension
## -- is a list, needs to be array
## -- is a singleton, needs repeating
if isinstance(self.n_cubes, Iterable):
n_cubes = np.array(self.n_cubes)
assert (
len(n_cubes) == n_dims
), "Custom cubes in each dimension must match number of dimensions"
else:
n_cubes = np.repeat(self.n_cubes, n_dims)
if isinstance(self.perc_overlap, Iterable):
perc_overlap = np.array(self.perc_overlap)
assert (
len(perc_overlap) == n_dims
), "Custom cubes in each dimension must match number of dimensions"
else:
perc_overlap = np.repeat(self.perc_overlap, n_dims)
assert all(0.0 <= p <= 1.0 for p in perc_overlap), (
"Each overlap percentage must be between 0.0 and 1.0., not %s"
% perc_overlap
)
bounds = self._compute_bounds(indexless_data)
ranges = bounds[1] - bounds[0]
# (n-1)/n |range|
inner_range = ((n_cubes - 1) / n_cubes) * ranges
inset = (ranges - inner_range) / 2
# |range| / (2n ( 1 - p))
radius = ranges / (2 * (n_cubes) * (1 - perc_overlap))
# centers are fixed w.r.t perc_overlap
zip_items = list(bounds) # work around 2.7,3.4 weird behavior
zip_items.extend([n_cubes, inset])
centers_per_dimension = [
np.linspace(b + r, c - r, num=n) for b, c, n, r in zip(*zip_items)
]
centers = [np.array(c) for c in product(*centers_per_dimension)]
self.centers_ = centers
self.radius_ = radius
self.inset_ = inset
self.inner_range_ = inner_range
self.bounds_ = bounds
self.di_ = di
if self.verbose > 0:
print(
" - Cover - centers: %s\ninner_range: %s\nradius: %s"
% (self.centers_, self.inner_range_, self.radius_)
)
return centers
def transform(self, data, centers=None):
""" Find entries of all hypercubes. If `centers=None`, then use `self.centers_` as computed in `self.fit`.
Empty hypercubes are removed from the result
Parameters
===========
data: array-like
Data to find in entries in cube. Warning: first column must be index column.
centers: list of array-like
Center points for all cubes as returned by `self.fit`. Default is to use `self.centers_`.
Returns
=========
hypercubes: list of array-like
list of entries in each hypercobe in `data`.
"""
centers = centers or self.centers_
hypercubes = [
self.transform_single(data, cube, i) for i, cube in enumerate(centers)
]
# Clean out any empty cubes (common in high dimensions)
hypercubes = [cube for cube in hypercubes if len(cube)]
return hypercubes
def fit_transform(self, data):
self.fit(data)
return self.transform(data)
|
scikit-tda/kepler-mapper | kmapper/cover.py | Cover.transform | python | def transform(self, data, centers=None):
centers = centers or self.centers_
hypercubes = [
self.transform_single(data, cube, i) for i, cube in enumerate(centers)
]
# Clean out any empty cubes (common in high dimensions)
hypercubes = [cube for cube in hypercubes if len(cube)]
return hypercubes | Find entries of all hypercubes. If `centers=None`, then use `self.centers_` as computed in `self.fit`.
Empty hypercubes are removed from the result
Parameters
===========
data: array-like
Data to find in entries in cube. Warning: first column must be index column.
centers: list of array-like
Center points for all cubes as returned by `self.fit`. Default is to use `self.centers_`.
Returns
=========
hypercubes: list of array-like
list of entries in each hypercobe in `data`. | train | https://github.com/scikit-tda/kepler-mapper/blob/d4ed39f6392b0a134dd573d7d9c4aa65fbef3a7d/kmapper/cover.py#L246-L273 | null | class Cover:
"""Helper class that defines the default covering scheme
It calculates the cover based on the following formula for overlap. (https://arxiv.org/pdf/1706.00204.pdf)
::
|cube[i] intersection cube[i+1]|
overlap = --------------------------------------
|cube[i]|
Parameters
============
n_cubes: int
Number of hypercubes along each dimension. Sometimes referred to as resolution.
perc_overlap: float
Amount of overlap between adjacent cubes calculated only along 1 dimension.
limits: Numpy Array (n_dim,2)
(lower bound, upper bound) for every dimension
If a value is set to `np.float('inf')`, the bound will be assumed to be the min/max value of the dimension
Also, if `limits == None`, the limits are defined by the maximum and minimum value of the lens for all dimensions.
i.e. `[[min_1, max_1], [min_2, max_2], [min_3, max_3]]`
Example
---------
::
>>> import numpy as np
>>> from kmapper.cover import Cover
>>> data = np.random.random((100,2))
>>> cov = Cover(n_cubes=15, perc_overlap=0.75)
>>> cube_centers = cov.fit(data)
>>> cov.transform_single(data, cube_centers[0])
array([[0.3594448 , 0.07428465],
[0.14490332, 0.01395559],
[0.94988668, 0.03983579],
[0.73517978, 0.09420806],
[0.16903735, 0.06901085],
[0.81578595, 0.10708731],
[0.26923572, 0.12216203],
[0.89203167, 0.0711279 ],
[0.80442115, 0.10220901],
[0.33210782, 0.04365007],
[0.52207707, 0.05892861],
[0.26589744, 0.08502856],
[0.02360067, 0.1263653 ],
[0.29855631, 0.01209373]])
>>> hyper_cubes = cov.transform(data, cube_centers)
"""
def __init__(self, n_cubes=10, perc_overlap=0.5, limits=None, verbose=0):
self.centers_ = None
self.radius_ = None
self.inset_ = None
self.inner_range_ = None
self.bounds_ = None
self.di_ = None
self.n_cubes = n_cubes
self.perc_overlap = perc_overlap
self.limits = limits
self.verbose = verbose
# Check limits can actually be handled and are set appropriately
assert isinstance(
self.limits, (list, np.ndarray, type(None))
), "limits should either be an array or None"
if isinstance(self.limits, (list, np.ndarray)):
self.limits = np.array(self.limits)
assert self.limits.shape[1] == 2, "limits should be (n_dim,2) in shape"
def __repr__(self):
return "Cover(n_cubes=%s, perc_overlap=%s, limits=%s, verbose=%s)" % (
self.n_cubes,
self.perc_overlap,
self.limits,
self.verbose,
)
def _compute_bounds(self, data):
# If self.limits is array-like
if isinstance(self.limits, np.ndarray):
# limits_array is used so we can change the values of self.limits from None to the min/max
limits_array = np.zeros(self.limits.shape)
limits_array[:, 0] = np.min(data, axis=0)
limits_array[:, 1] = np.max(data, axis=0)
limits_array[self.limits != np.float("inf")] = 0
self.limits[self.limits == np.float("inf")] = 0
bounds_arr = self.limits + limits_array
""" bounds_arr[i,j] = self.limits[i,j] if self.limits[i,j] == inf
bounds_arr[i,j] = max/min(data[i]) if self.limits == inf """
bounds = (bounds_arr[:, 0], bounds_arr[:, 1])
# Check new bounds are actually sensible - do they cover the range of values in the dataset?
if not (
(np.min(data, axis=0) >= bounds_arr[:, 0]).all()
or (np.max(data, axis=0) <= bounds_arr[:, 1]).all()
):
warnings.warn(
"The limits given do not cover the entire range of the lens functions\n"
+ "Actual Minima: %s\tInput Minima: %s\n"
% (np.min(data, axis=0), bounds_arr[:, 0])
+ "Actual Maxima: %s\tInput Maxima: %s\n"
% (np.max(data, axis=0), bounds_arr[:, 1])
)
else: # It must be None, as we checked to see if it is array-like or None in __init__
bounds = (np.min(data, axis=0), np.max(data, axis=0))
return bounds
def fit(self, data):
""" Fit a cover on the data. This method constructs centers and radii in each dimension given the `perc_overlap` and `n_cube`.
Parameters
============
data: array-like
Data to apply the cover to. Warning: First column must be an index column.
Returns
========
centers: list of arrays
A list of centers for each cube
"""
# TODO: support indexing into any columns
di = np.array(range(1, data.shape[1]))
indexless_data = data[:, di]
n_dims = indexless_data.shape[1]
# support different values along each dimension
## -- is a list, needs to be array
## -- is a singleton, needs repeating
if isinstance(self.n_cubes, Iterable):
n_cubes = np.array(self.n_cubes)
assert (
len(n_cubes) == n_dims
), "Custom cubes in each dimension must match number of dimensions"
else:
n_cubes = np.repeat(self.n_cubes, n_dims)
if isinstance(self.perc_overlap, Iterable):
perc_overlap = np.array(self.perc_overlap)
assert (
len(perc_overlap) == n_dims
), "Custom cubes in each dimension must match number of dimensions"
else:
perc_overlap = np.repeat(self.perc_overlap, n_dims)
assert all(0.0 <= p <= 1.0 for p in perc_overlap), (
"Each overlap percentage must be between 0.0 and 1.0., not %s"
% perc_overlap
)
bounds = self._compute_bounds(indexless_data)
ranges = bounds[1] - bounds[0]
# (n-1)/n |range|
inner_range = ((n_cubes - 1) / n_cubes) * ranges
inset = (ranges - inner_range) / 2
# |range| / (2n ( 1 - p))
radius = ranges / (2 * (n_cubes) * (1 - perc_overlap))
# centers are fixed w.r.t perc_overlap
zip_items = list(bounds) # work around 2.7,3.4 weird behavior
zip_items.extend([n_cubes, inset])
centers_per_dimension = [
np.linspace(b + r, c - r, num=n) for b, c, n, r in zip(*zip_items)
]
centers = [np.array(c) for c in product(*centers_per_dimension)]
self.centers_ = centers
self.radius_ = radius
self.inset_ = inset
self.inner_range_ = inner_range
self.bounds_ = bounds
self.di_ = di
if self.verbose > 0:
print(
" - Cover - centers: %s\ninner_range: %s\nradius: %s"
% (self.centers_, self.inner_range_, self.radius_)
)
return centers
def transform_single(self, data, center, i=0):
""" Compute entries of `data` in hypercube centered at `center`
Parameters
===========
data: array-like
Data to find in entries in cube. Warning: first column must be index column.
center: array-like
Center points for the cube. Cube is found as all data in `[center-self.radius_, center+self.radius_]`
i: int, default 0
Optional counter to aid in verbose debugging.
"""
lowerbounds, upperbounds = center - self.radius_, center + self.radius_
# Slice the hypercube
entries = (data[:, self.di_] >= lowerbounds) & (
data[:, self.di_] <= upperbounds
)
hypercube = data[np.invert(np.any(entries == False, axis=1))]
if self.verbose > 1:
print(
"There are %s points in cube %s/%s"
% (hypercube.shape[0], i + 1, len(self.centers_))
)
return hypercube
def transform(self, data, centers=None):
""" Find entries of all hypercubes. If `centers=None`, then use `self.centers_` as computed in `self.fit`.
Empty hypercubes are removed from the result
Parameters
===========
data: array-like
Data to find in entries in cube. Warning: first column must be index column.
centers: list of array-like
Center points for all cubes as returned by `self.fit`. Default is to use `self.centers_`.
Returns
=========
hypercubes: list of array-like
list of entries in each hypercobe in `data`.
"""
centers = centers or self.centers_
hypercubes = [
self.transform_single(data, cube, i) for i, cube in enumerate(centers)
]
# Clean out any empty cubes (common in high dimensions)
hypercubes = [cube for cube in hypercubes if len(cube)]
return hypercubes
def fit_transform(self, data):
self.fit(data)
return self.transform(data)
|
scikit-tda/kepler-mapper | kmapper/adapter.py | to_networkx | python | def to_networkx(graph):
# import here so networkx is not always required.
import networkx as nx
nodes = graph["nodes"].keys()
edges = [[start, end] for start, ends in graph["links"].items() for end in ends]
g = nx.Graph()
g.add_nodes_from(nodes)
nx.set_node_attributes(g, dict(graph["nodes"]), "membership")
g.add_edges_from(edges)
return g | Convert a Mapper 1-complex to a networkx graph.
Parameters
-----------
graph: dictionary, graph object returned from `kmapper.map`
Returns
--------
g: graph as networkx.Graph() object | train | https://github.com/scikit-tda/kepler-mapper/blob/d4ed39f6392b0a134dd573d7d9c4aa65fbef3a7d/kmapper/adapter.py#L8-L35 | null | """ Adapt Mapper format into other common formats.
- networkx
"""
to_nx = to_networkx
|
kennethreitz/legit | legit/scm.py | SCMRepo.git_exec | python | def git_exec(self, command, **kwargs):
from .cli import verbose_echo
command.insert(0, self.git)
if kwargs.pop('no_verbose', False): # used when git output isn't helpful to user
verbose = False
else:
verbose = self.verbose
verbose_echo(' '.join(command), verbose, self.fake)
if not self.fake:
result = self.repo.git.execute(command, **kwargs)
else:
if 'with_extended_output' in kwargs:
result = (0, '', '')
else:
result = ''
return result | Execute git commands | train | https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/scm.py#L47-L65 | [
"def verbose_echo(str, verbose=False, fake=False):\n \"\"\"Selectively output ``str``, with special formatting if ``fake`` is True\"\"\"\n verbose = fake or verbose\n\n if verbose:\n color = crayons.green\n prefix = ''\n if fake:\n color = crayons.red\n prefix = '... | class SCMRepo(object):
git = None
repo = None
remote = None
verbose = False
fake = False
stash_index = None
def __init__(self):
self.git = os.environ.get('GIT_PYTHON_GIT_EXECUTABLE', 'git')
try:
self.repo = Repo(search_parent_directories=True)
self.remote = self.get_remote()
except InvalidGitRepositoryError:
self.repo = None
def repo_check(self, require_remote=False):
if self.repo is None:
click.echo('Not a git repository.')
sys.exit(128)
# TODO: no remote fail
if not self.repo.remotes and require_remote:
click.echo('No git remotes configured. Please add one.')
sys.exit(128)
# TODO: You're in a merge state.
def stash_log(self, sync=False):
if self.repo.is_dirty():
status_log(self.stash_it, 'Saving local changes.', sync=sync)
def unstash_log(self, sync=False):
self.stash_index = self.unstash_index(sync=sync)
if self.stash_index:
status_log(self.unstash_it, 'Restoring local changes.', sync=sync)
def unstash_index(self, sync=False, branch=None):
"""Returns an unstash index if one is available."""
stash_list = self.git_exec(['stash', 'list'], no_verbose=True)
if branch is None:
branch = self.get_current_branch_name()
for stash in stash_list.splitlines():
verb = 'syncing' if sync else 'switching'
if (
(('Legit' in stash) and
('On {0}:'.format(branch) in stash) and
(verb in stash)
) or
(('GitHub' in stash) and
('On {0}:'.format(branch) in stash) and
(verb in stash)
)
):
return stash[7]
def stash_it(self, sync=False):
msg = 'syncing branch' if sync else 'switching branches'
return self.git_exec(
['stash', 'save', '--include-untracked', LEGIT_TEMPLATE.format(msg)])
def unstash_it(self, sync=False):
"""
Unstashes changes from current branch for branch sync.
Requires prior code setting self.stash_index.
"""
if self.stash_index is not None:
return self.git_exec(
['stash', 'pop', 'stash@{{{0}}}'.format(self.stash_index)])
def smart_pull(self):
"""
'git log --merges origin/master..master'
"""
branch = self.get_current_branch_name()
self.git_exec(['fetch', self.remote.name])
return self.smart_merge('{0}/{1}'.format(self.remote.name, branch),
self.smart_merge_enabled())
def smart_merge_enabled(self):
reader = self.repo.config_reader()
if reader.has_option('legit', 'smartMerge'):
return reader.getboolean('legit', 'smartMerge')
else:
return True
def smart_merge(self, branch, allow_rebase=True):
from_branch = self.get_current_branch_name()
merges = self.git_exec(
['log', '--merges', '{0}..{1}'.format(branch, from_branch)])
if allow_rebase:
verb = 'merge' if merges.count('commit') else 'rebase'
else:
if self.pull_rebase():
verb = 'rebase'
else:
verb = 'merge'
if verb != 'rebase' and self.pull_ff_only():
return self.git_exec([verb, '--ff-only', branch])
else:
try:
return self.git_exec([verb, branch])
except GitCommandError as why:
log = self.git_exec([verb, '--abort'])
abort('Merge failed. Reverting.',
log='{0}\n{1}'.format(why, log), type='merge')
def pull_rebase(self):
reader = self.repo.config_reader()
if reader.has_option('pull', 'rebase'):
return reader.getboolean('pull', 'rebase')
else:
return False
def pull_ff_only(self):
reader = self.repo.config_reader()
if reader.has_option('pull', 'ff'):
if reader.get('pull', 'ff') == 'only':
return True
else:
return False
else:
return False
def push(self, branch=None):
if branch is None:
return self.git_exec(['push'])
else:
return self.git_exec(['push', self.remote.name, branch])
def checkout_branch(self, branch):
"""Checks out given branch."""
_, stdout, stderr = self.git_exec(
['checkout', branch],
with_extended_output=True)
return '\n'.join([stderr, stdout])
def unpublish_branch(self, branch):
"""Unpublishes given branch."""
try:
return self.git_exec(
['push', self.remote.name, ':{0}'.format(branch)])
except GitCommandError:
_, _, log = self.git_exec(
['fetch', self.remote.name, '--prune'],
with_extended_output=True)
abort('Unpublish failed. Fetching.', log=log, type='unpublish')
def publish_branch(self, branch):
"""Publishes given branch."""
return self.git_exec(
['push', '-u', self.remote.name, branch])
def undo(self, hard=False):
"""Makes last commit not exist"""
if not self.fake:
return self.repo.git.reset('HEAD^', working_tree=hard)
else:
click.echo(crayons.red('Faked! >>> git reset {}{}'
.format('--hard ' if hard else '', 'HEAD^')))
return 0
def get_remote(self):
self.repo_check()
reader = self.repo.config_reader()
# If there is no remote option in legit section, return default
if reader.has_option('legit', 'remote'):
remote_name = reader.get('legit', 'remote')
if remote_name not in [r.name for r in self.repo.remotes]:
if fallback_enabled(reader):
return self.get_default_remote()
else:
click.echo('Remote "{0}" does not exist!'.format(remote_name))
will_aborted = click.confirm(
'\nPress `Y` to abort now,\n' +
'`n` to use default remote and turn fallback on for this repo:')
if will_aborted:
click.echo('\nAborted. Please update your git configuration.')
sys.exit(64) # EX_USAGE
else:
writer = self.repo.config_writer()
writer.set_value('legit', 'remoteFallback', 'true')
click.echo('\n`legit.RemoteFallback` changed to true for current repo.')
return self.get_default_remote()
else:
return self.repo.remote(remote_name)
else:
return self.get_default_remote()
def get_default_remote(self):
if len(self.repo.remotes) == 0:
return None
else:
return self.repo.remotes[0]
def get_current_branch_name(self):
"""Returns current branch name"""
return self.repo.head.ref.name
def fuzzy_match_branch(self, branch):
if not branch:
return False
all_branches = self.get_branch_names()
if branch in all_branches:
return branch
def branch_fuzzy_match(b):
return b.startswith(branch)
possible_branches = list(filter(branch_fuzzy_match, all_branches))
if len(possible_branches) == 1:
return possible_branches[0]
return branch
def get_branches(self, local=True, remote_branches=True):
"""Returns a list of local and remote branches."""
if not self.repo.remotes:
remote_branches = False
branches = []
if remote_branches:
# Remote refs.
try:
for b in self.remote.refs:
name = '/'.join(b.name.split('/')[1:])
if name not in legit_settings.forbidden_branches:
branches.append(Branch(name, is_published=True))
except (IndexError, AssertionError):
pass
if local:
# Local refs.
for b in [h.name for h in self.repo.heads]:
if (not remote_branches) or (b not in [br.name for br in branches]):
if b not in legit_settings.forbidden_branches:
branches.append(Branch(b, is_published=False))
return sorted(branches, key=attrgetter('name'))
def get_branch_names(self, local=True, remote_branches=True):
branches = self.get_branches(local=local, remote_branches=remote_branches)
return [b.name for b in branches]
def display_available_branches(self):
"""Displays available branches."""
if not self.repo.remotes:
remote_branches = False
else:
remote_branches = True
branches = self.get_branches(local=True, remote_branches=remote_branches)
if not branches:
click.echo(crayons.red('No branches available'))
return
branch_col = len(max([b.name for b in branches], key=len)) + 1
for branch in branches:
try:
branch_is_selected = (branch.name == self.get_current_branch_name())
except TypeError:
branch_is_selected = False
marker = '*' if branch_is_selected else ' '
color = colored.green if branch_is_selected else colored.yellow
pub = '(published)' if branch.is_published else '(unpublished)'
click.echo(columns(
[colored.red(marker), 2],
[color(branch.name, bold=True), branch_col],
[black(pub), 14]
))
|
kennethreitz/legit | legit/scm.py | SCMRepo.unstash_index | python | def unstash_index(self, sync=False, branch=None):
stash_list = self.git_exec(['stash', 'list'], no_verbose=True)
if branch is None:
branch = self.get_current_branch_name()
for stash in stash_list.splitlines():
verb = 'syncing' if sync else 'switching'
if (
(('Legit' in stash) and
('On {0}:'.format(branch) in stash) and
(verb in stash)
) or
(('GitHub' in stash) and
('On {0}:'.format(branch) in stash) and
(verb in stash)
)
):
return stash[7] | Returns an unstash index if one is available. | train | https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/scm.py#L88-L110 | [
"def git_exec(self, command, **kwargs):\n \"\"\"Execute git commands\"\"\"\n from .cli import verbose_echo\n\n command.insert(0, self.git)\n if kwargs.pop('no_verbose', False): # used when git output isn't helpful to user\n verbose = False\n else:\n verbose = self.verbose\n verbose_... | class SCMRepo(object):
git = None
repo = None
remote = None
verbose = False
fake = False
stash_index = None
def __init__(self):
self.git = os.environ.get('GIT_PYTHON_GIT_EXECUTABLE', 'git')
try:
self.repo = Repo(search_parent_directories=True)
self.remote = self.get_remote()
except InvalidGitRepositoryError:
self.repo = None
def git_exec(self, command, **kwargs):
"""Execute git commands"""
from .cli import verbose_echo
command.insert(0, self.git)
if kwargs.pop('no_verbose', False): # used when git output isn't helpful to user
verbose = False
else:
verbose = self.verbose
verbose_echo(' '.join(command), verbose, self.fake)
if not self.fake:
result = self.repo.git.execute(command, **kwargs)
else:
if 'with_extended_output' in kwargs:
result = (0, '', '')
else:
result = ''
return result
def repo_check(self, require_remote=False):
if self.repo is None:
click.echo('Not a git repository.')
sys.exit(128)
# TODO: no remote fail
if not self.repo.remotes and require_remote:
click.echo('No git remotes configured. Please add one.')
sys.exit(128)
# TODO: You're in a merge state.
def stash_log(self, sync=False):
if self.repo.is_dirty():
status_log(self.stash_it, 'Saving local changes.', sync=sync)
def unstash_log(self, sync=False):
self.stash_index = self.unstash_index(sync=sync)
if self.stash_index:
status_log(self.unstash_it, 'Restoring local changes.', sync=sync)
def stash_it(self, sync=False):
msg = 'syncing branch' if sync else 'switching branches'
return self.git_exec(
['stash', 'save', '--include-untracked', LEGIT_TEMPLATE.format(msg)])
def unstash_it(self, sync=False):
"""
Unstashes changes from current branch for branch sync.
Requires prior code setting self.stash_index.
"""
if self.stash_index is not None:
return self.git_exec(
['stash', 'pop', 'stash@{{{0}}}'.format(self.stash_index)])
def smart_pull(self):
"""
'git log --merges origin/master..master'
"""
branch = self.get_current_branch_name()
self.git_exec(['fetch', self.remote.name])
return self.smart_merge('{0}/{1}'.format(self.remote.name, branch),
self.smart_merge_enabled())
def smart_merge_enabled(self):
reader = self.repo.config_reader()
if reader.has_option('legit', 'smartMerge'):
return reader.getboolean('legit', 'smartMerge')
else:
return True
def smart_merge(self, branch, allow_rebase=True):
from_branch = self.get_current_branch_name()
merges = self.git_exec(
['log', '--merges', '{0}..{1}'.format(branch, from_branch)])
if allow_rebase:
verb = 'merge' if merges.count('commit') else 'rebase'
else:
if self.pull_rebase():
verb = 'rebase'
else:
verb = 'merge'
if verb != 'rebase' and self.pull_ff_only():
return self.git_exec([verb, '--ff-only', branch])
else:
try:
return self.git_exec([verb, branch])
except GitCommandError as why:
log = self.git_exec([verb, '--abort'])
abort('Merge failed. Reverting.',
log='{0}\n{1}'.format(why, log), type='merge')
def pull_rebase(self):
reader = self.repo.config_reader()
if reader.has_option('pull', 'rebase'):
return reader.getboolean('pull', 'rebase')
else:
return False
def pull_ff_only(self):
reader = self.repo.config_reader()
if reader.has_option('pull', 'ff'):
if reader.get('pull', 'ff') == 'only':
return True
else:
return False
else:
return False
def push(self, branch=None):
if branch is None:
return self.git_exec(['push'])
else:
return self.git_exec(['push', self.remote.name, branch])
def checkout_branch(self, branch):
"""Checks out given branch."""
_, stdout, stderr = self.git_exec(
['checkout', branch],
with_extended_output=True)
return '\n'.join([stderr, stdout])
def unpublish_branch(self, branch):
"""Unpublishes given branch."""
try:
return self.git_exec(
['push', self.remote.name, ':{0}'.format(branch)])
except GitCommandError:
_, _, log = self.git_exec(
['fetch', self.remote.name, '--prune'],
with_extended_output=True)
abort('Unpublish failed. Fetching.', log=log, type='unpublish')
def publish_branch(self, branch):
"""Publishes given branch."""
return self.git_exec(
['push', '-u', self.remote.name, branch])
def undo(self, hard=False):
"""Makes last commit not exist"""
if not self.fake:
return self.repo.git.reset('HEAD^', working_tree=hard)
else:
click.echo(crayons.red('Faked! >>> git reset {}{}'
.format('--hard ' if hard else '', 'HEAD^')))
return 0
def get_remote(self):
self.repo_check()
reader = self.repo.config_reader()
# If there is no remote option in legit section, return default
if reader.has_option('legit', 'remote'):
remote_name = reader.get('legit', 'remote')
if remote_name not in [r.name for r in self.repo.remotes]:
if fallback_enabled(reader):
return self.get_default_remote()
else:
click.echo('Remote "{0}" does not exist!'.format(remote_name))
will_aborted = click.confirm(
'\nPress `Y` to abort now,\n' +
'`n` to use default remote and turn fallback on for this repo:')
if will_aborted:
click.echo('\nAborted. Please update your git configuration.')
sys.exit(64) # EX_USAGE
else:
writer = self.repo.config_writer()
writer.set_value('legit', 'remoteFallback', 'true')
click.echo('\n`legit.RemoteFallback` changed to true for current repo.')
return self.get_default_remote()
else:
return self.repo.remote(remote_name)
else:
return self.get_default_remote()
def get_default_remote(self):
if len(self.repo.remotes) == 0:
return None
else:
return self.repo.remotes[0]
def get_current_branch_name(self):
"""Returns current branch name"""
return self.repo.head.ref.name
def fuzzy_match_branch(self, branch):
if not branch:
return False
all_branches = self.get_branch_names()
if branch in all_branches:
return branch
def branch_fuzzy_match(b):
return b.startswith(branch)
possible_branches = list(filter(branch_fuzzy_match, all_branches))
if len(possible_branches) == 1:
return possible_branches[0]
return branch
def get_branches(self, local=True, remote_branches=True):
"""Returns a list of local and remote branches."""
if not self.repo.remotes:
remote_branches = False
branches = []
if remote_branches:
# Remote refs.
try:
for b in self.remote.refs:
name = '/'.join(b.name.split('/')[1:])
if name not in legit_settings.forbidden_branches:
branches.append(Branch(name, is_published=True))
except (IndexError, AssertionError):
pass
if local:
# Local refs.
for b in [h.name for h in self.repo.heads]:
if (not remote_branches) or (b not in [br.name for br in branches]):
if b not in legit_settings.forbidden_branches:
branches.append(Branch(b, is_published=False))
return sorted(branches, key=attrgetter('name'))
def get_branch_names(self, local=True, remote_branches=True):
branches = self.get_branches(local=local, remote_branches=remote_branches)
return [b.name for b in branches]
def display_available_branches(self):
"""Displays available branches."""
if not self.repo.remotes:
remote_branches = False
else:
remote_branches = True
branches = self.get_branches(local=True, remote_branches=remote_branches)
if not branches:
click.echo(crayons.red('No branches available'))
return
branch_col = len(max([b.name for b in branches], key=len)) + 1
for branch in branches:
try:
branch_is_selected = (branch.name == self.get_current_branch_name())
except TypeError:
branch_is_selected = False
marker = '*' if branch_is_selected else ' '
color = colored.green if branch_is_selected else colored.yellow
pub = '(published)' if branch.is_published else '(unpublished)'
click.echo(columns(
[colored.red(marker), 2],
[color(branch.name, bold=True), branch_col],
[black(pub), 14]
))
|
kennethreitz/legit | legit/scm.py | SCMRepo.unstash_it | python | def unstash_it(self, sync=False):
if self.stash_index is not None:
return self.git_exec(
['stash', 'pop', 'stash@{{{0}}}'.format(self.stash_index)]) | Unstashes changes from current branch for branch sync.
Requires prior code setting self.stash_index. | train | https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/scm.py#L118-L125 | [
"def git_exec(self, command, **kwargs):\n \"\"\"Execute git commands\"\"\"\n from .cli import verbose_echo\n\n command.insert(0, self.git)\n if kwargs.pop('no_verbose', False): # used when git output isn't helpful to user\n verbose = False\n else:\n verbose = self.verbose\n verbose_... | class SCMRepo(object):
git = None
repo = None
remote = None
verbose = False
fake = False
stash_index = None
def __init__(self):
self.git = os.environ.get('GIT_PYTHON_GIT_EXECUTABLE', 'git')
try:
self.repo = Repo(search_parent_directories=True)
self.remote = self.get_remote()
except InvalidGitRepositoryError:
self.repo = None
def git_exec(self, command, **kwargs):
"""Execute git commands"""
from .cli import verbose_echo
command.insert(0, self.git)
if kwargs.pop('no_verbose', False): # used when git output isn't helpful to user
verbose = False
else:
verbose = self.verbose
verbose_echo(' '.join(command), verbose, self.fake)
if not self.fake:
result = self.repo.git.execute(command, **kwargs)
else:
if 'with_extended_output' in kwargs:
result = (0, '', '')
else:
result = ''
return result
def repo_check(self, require_remote=False):
if self.repo is None:
click.echo('Not a git repository.')
sys.exit(128)
# TODO: no remote fail
if not self.repo.remotes and require_remote:
click.echo('No git remotes configured. Please add one.')
sys.exit(128)
# TODO: You're in a merge state.
def stash_log(self, sync=False):
if self.repo.is_dirty():
status_log(self.stash_it, 'Saving local changes.', sync=sync)
def unstash_log(self, sync=False):
self.stash_index = self.unstash_index(sync=sync)
if self.stash_index:
status_log(self.unstash_it, 'Restoring local changes.', sync=sync)
def unstash_index(self, sync=False, branch=None):
"""Returns an unstash index if one is available."""
stash_list = self.git_exec(['stash', 'list'], no_verbose=True)
if branch is None:
branch = self.get_current_branch_name()
for stash in stash_list.splitlines():
verb = 'syncing' if sync else 'switching'
if (
(('Legit' in stash) and
('On {0}:'.format(branch) in stash) and
(verb in stash)
) or
(('GitHub' in stash) and
('On {0}:'.format(branch) in stash) and
(verb in stash)
)
):
return stash[7]
def stash_it(self, sync=False):
msg = 'syncing branch' if sync else 'switching branches'
return self.git_exec(
['stash', 'save', '--include-untracked', LEGIT_TEMPLATE.format(msg)])
def smart_pull(self):
"""
'git log --merges origin/master..master'
"""
branch = self.get_current_branch_name()
self.git_exec(['fetch', self.remote.name])
return self.smart_merge('{0}/{1}'.format(self.remote.name, branch),
self.smart_merge_enabled())
def smart_merge_enabled(self):
reader = self.repo.config_reader()
if reader.has_option('legit', 'smartMerge'):
return reader.getboolean('legit', 'smartMerge')
else:
return True
def smart_merge(self, branch, allow_rebase=True):
from_branch = self.get_current_branch_name()
merges = self.git_exec(
['log', '--merges', '{0}..{1}'.format(branch, from_branch)])
if allow_rebase:
verb = 'merge' if merges.count('commit') else 'rebase'
else:
if self.pull_rebase():
verb = 'rebase'
else:
verb = 'merge'
if verb != 'rebase' and self.pull_ff_only():
return self.git_exec([verb, '--ff-only', branch])
else:
try:
return self.git_exec([verb, branch])
except GitCommandError as why:
log = self.git_exec([verb, '--abort'])
abort('Merge failed. Reverting.',
log='{0}\n{1}'.format(why, log), type='merge')
def pull_rebase(self):
reader = self.repo.config_reader()
if reader.has_option('pull', 'rebase'):
return reader.getboolean('pull', 'rebase')
else:
return False
def pull_ff_only(self):
reader = self.repo.config_reader()
if reader.has_option('pull', 'ff'):
if reader.get('pull', 'ff') == 'only':
return True
else:
return False
else:
return False
def push(self, branch=None):
if branch is None:
return self.git_exec(['push'])
else:
return self.git_exec(['push', self.remote.name, branch])
def checkout_branch(self, branch):
"""Checks out given branch."""
_, stdout, stderr = self.git_exec(
['checkout', branch],
with_extended_output=True)
return '\n'.join([stderr, stdout])
def unpublish_branch(self, branch):
"""Unpublishes given branch."""
try:
return self.git_exec(
['push', self.remote.name, ':{0}'.format(branch)])
except GitCommandError:
_, _, log = self.git_exec(
['fetch', self.remote.name, '--prune'],
with_extended_output=True)
abort('Unpublish failed. Fetching.', log=log, type='unpublish')
def publish_branch(self, branch):
"""Publishes given branch."""
return self.git_exec(
['push', '-u', self.remote.name, branch])
def undo(self, hard=False):
"""Makes last commit not exist"""
if not self.fake:
return self.repo.git.reset('HEAD^', working_tree=hard)
else:
click.echo(crayons.red('Faked! >>> git reset {}{}'
.format('--hard ' if hard else '', 'HEAD^')))
return 0
def get_remote(self):
self.repo_check()
reader = self.repo.config_reader()
# If there is no remote option in legit section, return default
if reader.has_option('legit', 'remote'):
remote_name = reader.get('legit', 'remote')
if remote_name not in [r.name for r in self.repo.remotes]:
if fallback_enabled(reader):
return self.get_default_remote()
else:
click.echo('Remote "{0}" does not exist!'.format(remote_name))
will_aborted = click.confirm(
'\nPress `Y` to abort now,\n' +
'`n` to use default remote and turn fallback on for this repo:')
if will_aborted:
click.echo('\nAborted. Please update your git configuration.')
sys.exit(64) # EX_USAGE
else:
writer = self.repo.config_writer()
writer.set_value('legit', 'remoteFallback', 'true')
click.echo('\n`legit.RemoteFallback` changed to true for current repo.')
return self.get_default_remote()
else:
return self.repo.remote(remote_name)
else:
return self.get_default_remote()
def get_default_remote(self):
if len(self.repo.remotes) == 0:
return None
else:
return self.repo.remotes[0]
def get_current_branch_name(self):
"""Returns current branch name"""
return self.repo.head.ref.name
def fuzzy_match_branch(self, branch):
if not branch:
return False
all_branches = self.get_branch_names()
if branch in all_branches:
return branch
def branch_fuzzy_match(b):
return b.startswith(branch)
possible_branches = list(filter(branch_fuzzy_match, all_branches))
if len(possible_branches) == 1:
return possible_branches[0]
return branch
def get_branches(self, local=True, remote_branches=True):
"""Returns a list of local and remote branches."""
if not self.repo.remotes:
remote_branches = False
branches = []
if remote_branches:
# Remote refs.
try:
for b in self.remote.refs:
name = '/'.join(b.name.split('/')[1:])
if name not in legit_settings.forbidden_branches:
branches.append(Branch(name, is_published=True))
except (IndexError, AssertionError):
pass
if local:
# Local refs.
for b in [h.name for h in self.repo.heads]:
if (not remote_branches) or (b not in [br.name for br in branches]):
if b not in legit_settings.forbidden_branches:
branches.append(Branch(b, is_published=False))
return sorted(branches, key=attrgetter('name'))
def get_branch_names(self, local=True, remote_branches=True):
branches = self.get_branches(local=local, remote_branches=remote_branches)
return [b.name for b in branches]
def display_available_branches(self):
"""Displays available branches."""
if not self.repo.remotes:
remote_branches = False
else:
remote_branches = True
branches = self.get_branches(local=True, remote_branches=remote_branches)
if not branches:
click.echo(crayons.red('No branches available'))
return
branch_col = len(max([b.name for b in branches], key=len)) + 1
for branch in branches:
try:
branch_is_selected = (branch.name == self.get_current_branch_name())
except TypeError:
branch_is_selected = False
marker = '*' if branch_is_selected else ' '
color = colored.green if branch_is_selected else colored.yellow
pub = '(published)' if branch.is_published else '(unpublished)'
click.echo(columns(
[colored.red(marker), 2],
[color(branch.name, bold=True), branch_col],
[black(pub), 14]
))
|
kennethreitz/legit | legit/scm.py | SCMRepo.smart_pull | python | def smart_pull(self):
branch = self.get_current_branch_name()
self.git_exec(['fetch', self.remote.name])
return self.smart_merge('{0}/{1}'.format(self.remote.name, branch),
self.smart_merge_enabled()) | 'git log --merges origin/master..master' | train | https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/scm.py#L127-L136 | [
"def git_exec(self, command, **kwargs):\n \"\"\"Execute git commands\"\"\"\n from .cli import verbose_echo\n\n command.insert(0, self.git)\n if kwargs.pop('no_verbose', False): # used when git output isn't helpful to user\n verbose = False\n else:\n verbose = self.verbose\n verbose_... | class SCMRepo(object):
git = None
repo = None
remote = None
verbose = False
fake = False
stash_index = None
def __init__(self):
self.git = os.environ.get('GIT_PYTHON_GIT_EXECUTABLE', 'git')
try:
self.repo = Repo(search_parent_directories=True)
self.remote = self.get_remote()
except InvalidGitRepositoryError:
self.repo = None
def git_exec(self, command, **kwargs):
"""Execute git commands"""
from .cli import verbose_echo
command.insert(0, self.git)
if kwargs.pop('no_verbose', False): # used when git output isn't helpful to user
verbose = False
else:
verbose = self.verbose
verbose_echo(' '.join(command), verbose, self.fake)
if not self.fake:
result = self.repo.git.execute(command, **kwargs)
else:
if 'with_extended_output' in kwargs:
result = (0, '', '')
else:
result = ''
return result
def repo_check(self, require_remote=False):
if self.repo is None:
click.echo('Not a git repository.')
sys.exit(128)
# TODO: no remote fail
if not self.repo.remotes and require_remote:
click.echo('No git remotes configured. Please add one.')
sys.exit(128)
# TODO: You're in a merge state.
def stash_log(self, sync=False):
if self.repo.is_dirty():
status_log(self.stash_it, 'Saving local changes.', sync=sync)
def unstash_log(self, sync=False):
self.stash_index = self.unstash_index(sync=sync)
if self.stash_index:
status_log(self.unstash_it, 'Restoring local changes.', sync=sync)
def unstash_index(self, sync=False, branch=None):
"""Returns an unstash index if one is available."""
stash_list = self.git_exec(['stash', 'list'], no_verbose=True)
if branch is None:
branch = self.get_current_branch_name()
for stash in stash_list.splitlines():
verb = 'syncing' if sync else 'switching'
if (
(('Legit' in stash) and
('On {0}:'.format(branch) in stash) and
(verb in stash)
) or
(('GitHub' in stash) and
('On {0}:'.format(branch) in stash) and
(verb in stash)
)
):
return stash[7]
def stash_it(self, sync=False):
msg = 'syncing branch' if sync else 'switching branches'
return self.git_exec(
['stash', 'save', '--include-untracked', LEGIT_TEMPLATE.format(msg)])
def unstash_it(self, sync=False):
"""
Unstashes changes from current branch for branch sync.
Requires prior code setting self.stash_index.
"""
if self.stash_index is not None:
return self.git_exec(
['stash', 'pop', 'stash@{{{0}}}'.format(self.stash_index)])
def smart_merge_enabled(self):
reader = self.repo.config_reader()
if reader.has_option('legit', 'smartMerge'):
return reader.getboolean('legit', 'smartMerge')
else:
return True
def smart_merge(self, branch, allow_rebase=True):
from_branch = self.get_current_branch_name()
merges = self.git_exec(
['log', '--merges', '{0}..{1}'.format(branch, from_branch)])
if allow_rebase:
verb = 'merge' if merges.count('commit') else 'rebase'
else:
if self.pull_rebase():
verb = 'rebase'
else:
verb = 'merge'
if verb != 'rebase' and self.pull_ff_only():
return self.git_exec([verb, '--ff-only', branch])
else:
try:
return self.git_exec([verb, branch])
except GitCommandError as why:
log = self.git_exec([verb, '--abort'])
abort('Merge failed. Reverting.',
log='{0}\n{1}'.format(why, log), type='merge')
def pull_rebase(self):
reader = self.repo.config_reader()
if reader.has_option('pull', 'rebase'):
return reader.getboolean('pull', 'rebase')
else:
return False
def pull_ff_only(self):
reader = self.repo.config_reader()
if reader.has_option('pull', 'ff'):
if reader.get('pull', 'ff') == 'only':
return True
else:
return False
else:
return False
def push(self, branch=None):
if branch is None:
return self.git_exec(['push'])
else:
return self.git_exec(['push', self.remote.name, branch])
def checkout_branch(self, branch):
"""Checks out given branch."""
_, stdout, stderr = self.git_exec(
['checkout', branch],
with_extended_output=True)
return '\n'.join([stderr, stdout])
def unpublish_branch(self, branch):
"""Unpublishes given branch."""
try:
return self.git_exec(
['push', self.remote.name, ':{0}'.format(branch)])
except GitCommandError:
_, _, log = self.git_exec(
['fetch', self.remote.name, '--prune'],
with_extended_output=True)
abort('Unpublish failed. Fetching.', log=log, type='unpublish')
def publish_branch(self, branch):
"""Publishes given branch."""
return self.git_exec(
['push', '-u', self.remote.name, branch])
def undo(self, hard=False):
"""Makes last commit not exist"""
if not self.fake:
return self.repo.git.reset('HEAD^', working_tree=hard)
else:
click.echo(crayons.red('Faked! >>> git reset {}{}'
.format('--hard ' if hard else '', 'HEAD^')))
return 0
def get_remote(self):
self.repo_check()
reader = self.repo.config_reader()
# If there is no remote option in legit section, return default
if reader.has_option('legit', 'remote'):
remote_name = reader.get('legit', 'remote')
if remote_name not in [r.name for r in self.repo.remotes]:
if fallback_enabled(reader):
return self.get_default_remote()
else:
click.echo('Remote "{0}" does not exist!'.format(remote_name))
will_aborted = click.confirm(
'\nPress `Y` to abort now,\n' +
'`n` to use default remote and turn fallback on for this repo:')
if will_aborted:
click.echo('\nAborted. Please update your git configuration.')
sys.exit(64) # EX_USAGE
else:
writer = self.repo.config_writer()
writer.set_value('legit', 'remoteFallback', 'true')
click.echo('\n`legit.RemoteFallback` changed to true for current repo.')
return self.get_default_remote()
else:
return self.repo.remote(remote_name)
else:
return self.get_default_remote()
def get_default_remote(self):
if len(self.repo.remotes) == 0:
return None
else:
return self.repo.remotes[0]
def get_current_branch_name(self):
"""Returns current branch name"""
return self.repo.head.ref.name
def fuzzy_match_branch(self, branch):
if not branch:
return False
all_branches = self.get_branch_names()
if branch in all_branches:
return branch
def branch_fuzzy_match(b):
return b.startswith(branch)
possible_branches = list(filter(branch_fuzzy_match, all_branches))
if len(possible_branches) == 1:
return possible_branches[0]
return branch
def get_branches(self, local=True, remote_branches=True):
"""Returns a list of local and remote branches."""
if not self.repo.remotes:
remote_branches = False
branches = []
if remote_branches:
# Remote refs.
try:
for b in self.remote.refs:
name = '/'.join(b.name.split('/')[1:])
if name not in legit_settings.forbidden_branches:
branches.append(Branch(name, is_published=True))
except (IndexError, AssertionError):
pass
if local:
# Local refs.
for b in [h.name for h in self.repo.heads]:
if (not remote_branches) or (b not in [br.name for br in branches]):
if b not in legit_settings.forbidden_branches:
branches.append(Branch(b, is_published=False))
return sorted(branches, key=attrgetter('name'))
def get_branch_names(self, local=True, remote_branches=True):
branches = self.get_branches(local=local, remote_branches=remote_branches)
return [b.name for b in branches]
def display_available_branches(self):
"""Displays available branches."""
if not self.repo.remotes:
remote_branches = False
else:
remote_branches = True
branches = self.get_branches(local=True, remote_branches=remote_branches)
if not branches:
click.echo(crayons.red('No branches available'))
return
branch_col = len(max([b.name for b in branches], key=len)) + 1
for branch in branches:
try:
branch_is_selected = (branch.name == self.get_current_branch_name())
except TypeError:
branch_is_selected = False
marker = '*' if branch_is_selected else ' '
color = colored.green if branch_is_selected else colored.yellow
pub = '(published)' if branch.is_published else '(unpublished)'
click.echo(columns(
[colored.red(marker), 2],
[color(branch.name, bold=True), branch_col],
[black(pub), 14]
))
|
kennethreitz/legit | legit/scm.py | SCMRepo.checkout_branch | python | def checkout_branch(self, branch):
_, stdout, stderr = self.git_exec(
['checkout', branch],
with_extended_output=True)
return '\n'.join([stderr, stdout]) | Checks out given branch. | train | https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/scm.py#L194-L200 | [
"def git_exec(self, command, **kwargs):\n \"\"\"Execute git commands\"\"\"\n from .cli import verbose_echo\n\n command.insert(0, self.git)\n if kwargs.pop('no_verbose', False): # used when git output isn't helpful to user\n verbose = False\n else:\n verbose = self.verbose\n verbose_... | class SCMRepo(object):
git = None
repo = None
remote = None
verbose = False
fake = False
stash_index = None
def __init__(self):
self.git = os.environ.get('GIT_PYTHON_GIT_EXECUTABLE', 'git')
try:
self.repo = Repo(search_parent_directories=True)
self.remote = self.get_remote()
except InvalidGitRepositoryError:
self.repo = None
def git_exec(self, command, **kwargs):
"""Execute git commands"""
from .cli import verbose_echo
command.insert(0, self.git)
if kwargs.pop('no_verbose', False): # used when git output isn't helpful to user
verbose = False
else:
verbose = self.verbose
verbose_echo(' '.join(command), verbose, self.fake)
if not self.fake:
result = self.repo.git.execute(command, **kwargs)
else:
if 'with_extended_output' in kwargs:
result = (0, '', '')
else:
result = ''
return result
def repo_check(self, require_remote=False):
if self.repo is None:
click.echo('Not a git repository.')
sys.exit(128)
# TODO: no remote fail
if not self.repo.remotes and require_remote:
click.echo('No git remotes configured. Please add one.')
sys.exit(128)
# TODO: You're in a merge state.
def stash_log(self, sync=False):
if self.repo.is_dirty():
status_log(self.stash_it, 'Saving local changes.', sync=sync)
def unstash_log(self, sync=False):
self.stash_index = self.unstash_index(sync=sync)
if self.stash_index:
status_log(self.unstash_it, 'Restoring local changes.', sync=sync)
def unstash_index(self, sync=False, branch=None):
"""Returns an unstash index if one is available."""
stash_list = self.git_exec(['stash', 'list'], no_verbose=True)
if branch is None:
branch = self.get_current_branch_name()
for stash in stash_list.splitlines():
verb = 'syncing' if sync else 'switching'
if (
(('Legit' in stash) and
('On {0}:'.format(branch) in stash) and
(verb in stash)
) or
(('GitHub' in stash) and
('On {0}:'.format(branch) in stash) and
(verb in stash)
)
):
return stash[7]
def stash_it(self, sync=False):
msg = 'syncing branch' if sync else 'switching branches'
return self.git_exec(
['stash', 'save', '--include-untracked', LEGIT_TEMPLATE.format(msg)])
def unstash_it(self, sync=False):
"""
Unstashes changes from current branch for branch sync.
Requires prior code setting self.stash_index.
"""
if self.stash_index is not None:
return self.git_exec(
['stash', 'pop', 'stash@{{{0}}}'.format(self.stash_index)])
def smart_pull(self):
"""
'git log --merges origin/master..master'
"""
branch = self.get_current_branch_name()
self.git_exec(['fetch', self.remote.name])
return self.smart_merge('{0}/{1}'.format(self.remote.name, branch),
self.smart_merge_enabled())
def smart_merge_enabled(self):
reader = self.repo.config_reader()
if reader.has_option('legit', 'smartMerge'):
return reader.getboolean('legit', 'smartMerge')
else:
return True
def smart_merge(self, branch, allow_rebase=True):
from_branch = self.get_current_branch_name()
merges = self.git_exec(
['log', '--merges', '{0}..{1}'.format(branch, from_branch)])
if allow_rebase:
verb = 'merge' if merges.count('commit') else 'rebase'
else:
if self.pull_rebase():
verb = 'rebase'
else:
verb = 'merge'
if verb != 'rebase' and self.pull_ff_only():
return self.git_exec([verb, '--ff-only', branch])
else:
try:
return self.git_exec([verb, branch])
except GitCommandError as why:
log = self.git_exec([verb, '--abort'])
abort('Merge failed. Reverting.',
log='{0}\n{1}'.format(why, log), type='merge')
def pull_rebase(self):
reader = self.repo.config_reader()
if reader.has_option('pull', 'rebase'):
return reader.getboolean('pull', 'rebase')
else:
return False
def pull_ff_only(self):
reader = self.repo.config_reader()
if reader.has_option('pull', 'ff'):
if reader.get('pull', 'ff') == 'only':
return True
else:
return False
else:
return False
def push(self, branch=None):
if branch is None:
return self.git_exec(['push'])
else:
return self.git_exec(['push', self.remote.name, branch])
def unpublish_branch(self, branch):
"""Unpublishes given branch."""
try:
return self.git_exec(
['push', self.remote.name, ':{0}'.format(branch)])
except GitCommandError:
_, _, log = self.git_exec(
['fetch', self.remote.name, '--prune'],
with_extended_output=True)
abort('Unpublish failed. Fetching.', log=log, type='unpublish')
def publish_branch(self, branch):
"""Publishes given branch."""
return self.git_exec(
['push', '-u', self.remote.name, branch])
def undo(self, hard=False):
"""Makes last commit not exist"""
if not self.fake:
return self.repo.git.reset('HEAD^', working_tree=hard)
else:
click.echo(crayons.red('Faked! >>> git reset {}{}'
.format('--hard ' if hard else '', 'HEAD^')))
return 0
def get_remote(self):
self.repo_check()
reader = self.repo.config_reader()
# If there is no remote option in legit section, return default
if reader.has_option('legit', 'remote'):
remote_name = reader.get('legit', 'remote')
if remote_name not in [r.name for r in self.repo.remotes]:
if fallback_enabled(reader):
return self.get_default_remote()
else:
click.echo('Remote "{0}" does not exist!'.format(remote_name))
will_aborted = click.confirm(
'\nPress `Y` to abort now,\n' +
'`n` to use default remote and turn fallback on for this repo:')
if will_aborted:
click.echo('\nAborted. Please update your git configuration.')
sys.exit(64) # EX_USAGE
else:
writer = self.repo.config_writer()
writer.set_value('legit', 'remoteFallback', 'true')
click.echo('\n`legit.RemoteFallback` changed to true for current repo.')
return self.get_default_remote()
else:
return self.repo.remote(remote_name)
else:
return self.get_default_remote()
def get_default_remote(self):
if len(self.repo.remotes) == 0:
return None
else:
return self.repo.remotes[0]
def get_current_branch_name(self):
"""Returns current branch name"""
return self.repo.head.ref.name
def fuzzy_match_branch(self, branch):
if not branch:
return False
all_branches = self.get_branch_names()
if branch in all_branches:
return branch
def branch_fuzzy_match(b):
return b.startswith(branch)
possible_branches = list(filter(branch_fuzzy_match, all_branches))
if len(possible_branches) == 1:
return possible_branches[0]
return branch
def get_branches(self, local=True, remote_branches=True):
"""Returns a list of local and remote branches."""
if not self.repo.remotes:
remote_branches = False
branches = []
if remote_branches:
# Remote refs.
try:
for b in self.remote.refs:
name = '/'.join(b.name.split('/')[1:])
if name not in legit_settings.forbidden_branches:
branches.append(Branch(name, is_published=True))
except (IndexError, AssertionError):
pass
if local:
# Local refs.
for b in [h.name for h in self.repo.heads]:
if (not remote_branches) or (b not in [br.name for br in branches]):
if b not in legit_settings.forbidden_branches:
branches.append(Branch(b, is_published=False))
return sorted(branches, key=attrgetter('name'))
def get_branch_names(self, local=True, remote_branches=True):
branches = self.get_branches(local=local, remote_branches=remote_branches)
return [b.name for b in branches]
def display_available_branches(self):
"""Displays available branches."""
if not self.repo.remotes:
remote_branches = False
else:
remote_branches = True
branches = self.get_branches(local=True, remote_branches=remote_branches)
if not branches:
click.echo(crayons.red('No branches available'))
return
branch_col = len(max([b.name for b in branches], key=len)) + 1
for branch in branches:
try:
branch_is_selected = (branch.name == self.get_current_branch_name())
except TypeError:
branch_is_selected = False
marker = '*' if branch_is_selected else ' '
color = colored.green if branch_is_selected else colored.yellow
pub = '(published)' if branch.is_published else '(unpublished)'
click.echo(columns(
[colored.red(marker), 2],
[color(branch.name, bold=True), branch_col],
[black(pub), 14]
))
|
kennethreitz/legit | legit/scm.py | SCMRepo.unpublish_branch | python | def unpublish_branch(self, branch):
try:
return self.git_exec(
['push', self.remote.name, ':{0}'.format(branch)])
except GitCommandError:
_, _, log = self.git_exec(
['fetch', self.remote.name, '--prune'],
with_extended_output=True)
abort('Unpublish failed. Fetching.', log=log, type='unpublish') | Unpublishes given branch. | train | https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/scm.py#L202-L212 | [
"def abort(message, log=None, type=None):\n\n a = Aborted()\n a.message = message\n a.log = log\n\n legit_settings.abort_handler(a, type=type)\n",
"def git_exec(self, command, **kwargs):\n \"\"\"Execute git commands\"\"\"\n from .cli import verbose_echo\n\n command.insert(0, self.git)\n if... | class SCMRepo(object):
git = None
repo = None
remote = None
verbose = False
fake = False
stash_index = None
def __init__(self):
self.git = os.environ.get('GIT_PYTHON_GIT_EXECUTABLE', 'git')
try:
self.repo = Repo(search_parent_directories=True)
self.remote = self.get_remote()
except InvalidGitRepositoryError:
self.repo = None
def git_exec(self, command, **kwargs):
"""Execute git commands"""
from .cli import verbose_echo
command.insert(0, self.git)
if kwargs.pop('no_verbose', False): # used when git output isn't helpful to user
verbose = False
else:
verbose = self.verbose
verbose_echo(' '.join(command), verbose, self.fake)
if not self.fake:
result = self.repo.git.execute(command, **kwargs)
else:
if 'with_extended_output' in kwargs:
result = (0, '', '')
else:
result = ''
return result
def repo_check(self, require_remote=False):
if self.repo is None:
click.echo('Not a git repository.')
sys.exit(128)
# TODO: no remote fail
if not self.repo.remotes and require_remote:
click.echo('No git remotes configured. Please add one.')
sys.exit(128)
# TODO: You're in a merge state.
def stash_log(self, sync=False):
if self.repo.is_dirty():
status_log(self.stash_it, 'Saving local changes.', sync=sync)
def unstash_log(self, sync=False):
self.stash_index = self.unstash_index(sync=sync)
if self.stash_index:
status_log(self.unstash_it, 'Restoring local changes.', sync=sync)
def unstash_index(self, sync=False, branch=None):
"""Returns an unstash index if one is available."""
stash_list = self.git_exec(['stash', 'list'], no_verbose=True)
if branch is None:
branch = self.get_current_branch_name()
for stash in stash_list.splitlines():
verb = 'syncing' if sync else 'switching'
if (
(('Legit' in stash) and
('On {0}:'.format(branch) in stash) and
(verb in stash)
) or
(('GitHub' in stash) and
('On {0}:'.format(branch) in stash) and
(verb in stash)
)
):
return stash[7]
def stash_it(self, sync=False):
msg = 'syncing branch' if sync else 'switching branches'
return self.git_exec(
['stash', 'save', '--include-untracked', LEGIT_TEMPLATE.format(msg)])
def unstash_it(self, sync=False):
"""
Unstashes changes from current branch for branch sync.
Requires prior code setting self.stash_index.
"""
if self.stash_index is not None:
return self.git_exec(
['stash', 'pop', 'stash@{{{0}}}'.format(self.stash_index)])
def smart_pull(self):
"""
'git log --merges origin/master..master'
"""
branch = self.get_current_branch_name()
self.git_exec(['fetch', self.remote.name])
return self.smart_merge('{0}/{1}'.format(self.remote.name, branch),
self.smart_merge_enabled())
def smart_merge_enabled(self):
reader = self.repo.config_reader()
if reader.has_option('legit', 'smartMerge'):
return reader.getboolean('legit', 'smartMerge')
else:
return True
def smart_merge(self, branch, allow_rebase=True):
from_branch = self.get_current_branch_name()
merges = self.git_exec(
['log', '--merges', '{0}..{1}'.format(branch, from_branch)])
if allow_rebase:
verb = 'merge' if merges.count('commit') else 'rebase'
else:
if self.pull_rebase():
verb = 'rebase'
else:
verb = 'merge'
if verb != 'rebase' and self.pull_ff_only():
return self.git_exec([verb, '--ff-only', branch])
else:
try:
return self.git_exec([verb, branch])
except GitCommandError as why:
log = self.git_exec([verb, '--abort'])
abort('Merge failed. Reverting.',
log='{0}\n{1}'.format(why, log), type='merge')
def pull_rebase(self):
reader = self.repo.config_reader()
if reader.has_option('pull', 'rebase'):
return reader.getboolean('pull', 'rebase')
else:
return False
def pull_ff_only(self):
reader = self.repo.config_reader()
if reader.has_option('pull', 'ff'):
if reader.get('pull', 'ff') == 'only':
return True
else:
return False
else:
return False
def push(self, branch=None):
if branch is None:
return self.git_exec(['push'])
else:
return self.git_exec(['push', self.remote.name, branch])
def checkout_branch(self, branch):
"""Checks out given branch."""
_, stdout, stderr = self.git_exec(
['checkout', branch],
with_extended_output=True)
return '\n'.join([stderr, stdout])
def publish_branch(self, branch):
"""Publishes given branch."""
return self.git_exec(
['push', '-u', self.remote.name, branch])
def undo(self, hard=False):
"""Makes last commit not exist"""
if not self.fake:
return self.repo.git.reset('HEAD^', working_tree=hard)
else:
click.echo(crayons.red('Faked! >>> git reset {}{}'
.format('--hard ' if hard else '', 'HEAD^')))
return 0
def get_remote(self):
self.repo_check()
reader = self.repo.config_reader()
# If there is no remote option in legit section, return default
if reader.has_option('legit', 'remote'):
remote_name = reader.get('legit', 'remote')
if remote_name not in [r.name for r in self.repo.remotes]:
if fallback_enabled(reader):
return self.get_default_remote()
else:
click.echo('Remote "{0}" does not exist!'.format(remote_name))
will_aborted = click.confirm(
'\nPress `Y` to abort now,\n' +
'`n` to use default remote and turn fallback on for this repo:')
if will_aborted:
click.echo('\nAborted. Please update your git configuration.')
sys.exit(64) # EX_USAGE
else:
writer = self.repo.config_writer()
writer.set_value('legit', 'remoteFallback', 'true')
click.echo('\n`legit.RemoteFallback` changed to true for current repo.')
return self.get_default_remote()
else:
return self.repo.remote(remote_name)
else:
return self.get_default_remote()
def get_default_remote(self):
if len(self.repo.remotes) == 0:
return None
else:
return self.repo.remotes[0]
def get_current_branch_name(self):
"""Returns current branch name"""
return self.repo.head.ref.name
def fuzzy_match_branch(self, branch):
if not branch:
return False
all_branches = self.get_branch_names()
if branch in all_branches:
return branch
def branch_fuzzy_match(b):
return b.startswith(branch)
possible_branches = list(filter(branch_fuzzy_match, all_branches))
if len(possible_branches) == 1:
return possible_branches[0]
return branch
def get_branches(self, local=True, remote_branches=True):
"""Returns a list of local and remote branches."""
if not self.repo.remotes:
remote_branches = False
branches = []
if remote_branches:
# Remote refs.
try:
for b in self.remote.refs:
name = '/'.join(b.name.split('/')[1:])
if name not in legit_settings.forbidden_branches:
branches.append(Branch(name, is_published=True))
except (IndexError, AssertionError):
pass
if local:
# Local refs.
for b in [h.name for h in self.repo.heads]:
if (not remote_branches) or (b not in [br.name for br in branches]):
if b not in legit_settings.forbidden_branches:
branches.append(Branch(b, is_published=False))
return sorted(branches, key=attrgetter('name'))
def get_branch_names(self, local=True, remote_branches=True):
branches = self.get_branches(local=local, remote_branches=remote_branches)
return [b.name for b in branches]
def display_available_branches(self):
"""Displays available branches."""
if not self.repo.remotes:
remote_branches = False
else:
remote_branches = True
branches = self.get_branches(local=True, remote_branches=remote_branches)
if not branches:
click.echo(crayons.red('No branches available'))
return
branch_col = len(max([b.name for b in branches], key=len)) + 1
for branch in branches:
try:
branch_is_selected = (branch.name == self.get_current_branch_name())
except TypeError:
branch_is_selected = False
marker = '*' if branch_is_selected else ' '
color = colored.green if branch_is_selected else colored.yellow
pub = '(published)' if branch.is_published else '(unpublished)'
click.echo(columns(
[colored.red(marker), 2],
[color(branch.name, bold=True), branch_col],
[black(pub), 14]
))
|
kennethreitz/legit | legit/scm.py | SCMRepo.undo | python | def undo(self, hard=False):
if not self.fake:
return self.repo.git.reset('HEAD^', working_tree=hard)
else:
click.echo(crayons.red('Faked! >>> git reset {}{}'
.format('--hard ' if hard else '', 'HEAD^')))
return 0 | Makes last commit not exist | train | https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/scm.py#L220-L228 | null | class SCMRepo(object):
git = None
repo = None
remote = None
verbose = False
fake = False
stash_index = None
def __init__(self):
self.git = os.environ.get('GIT_PYTHON_GIT_EXECUTABLE', 'git')
try:
self.repo = Repo(search_parent_directories=True)
self.remote = self.get_remote()
except InvalidGitRepositoryError:
self.repo = None
def git_exec(self, command, **kwargs):
"""Execute git commands"""
from .cli import verbose_echo
command.insert(0, self.git)
if kwargs.pop('no_verbose', False): # used when git output isn't helpful to user
verbose = False
else:
verbose = self.verbose
verbose_echo(' '.join(command), verbose, self.fake)
if not self.fake:
result = self.repo.git.execute(command, **kwargs)
else:
if 'with_extended_output' in kwargs:
result = (0, '', '')
else:
result = ''
return result
def repo_check(self, require_remote=False):
if self.repo is None:
click.echo('Not a git repository.')
sys.exit(128)
# TODO: no remote fail
if not self.repo.remotes and require_remote:
click.echo('No git remotes configured. Please add one.')
sys.exit(128)
# TODO: You're in a merge state.
def stash_log(self, sync=False):
if self.repo.is_dirty():
status_log(self.stash_it, 'Saving local changes.', sync=sync)
def unstash_log(self, sync=False):
self.stash_index = self.unstash_index(sync=sync)
if self.stash_index:
status_log(self.unstash_it, 'Restoring local changes.', sync=sync)
def unstash_index(self, sync=False, branch=None):
"""Returns an unstash index if one is available."""
stash_list = self.git_exec(['stash', 'list'], no_verbose=True)
if branch is None:
branch = self.get_current_branch_name()
for stash in stash_list.splitlines():
verb = 'syncing' if sync else 'switching'
if (
(('Legit' in stash) and
('On {0}:'.format(branch) in stash) and
(verb in stash)
) or
(('GitHub' in stash) and
('On {0}:'.format(branch) in stash) and
(verb in stash)
)
):
return stash[7]
def stash_it(self, sync=False):
msg = 'syncing branch' if sync else 'switching branches'
return self.git_exec(
['stash', 'save', '--include-untracked', LEGIT_TEMPLATE.format(msg)])
def unstash_it(self, sync=False):
"""
Unstashes changes from current branch for branch sync.
Requires prior code setting self.stash_index.
"""
if self.stash_index is not None:
return self.git_exec(
['stash', 'pop', 'stash@{{{0}}}'.format(self.stash_index)])
def smart_pull(self):
"""
'git log --merges origin/master..master'
"""
branch = self.get_current_branch_name()
self.git_exec(['fetch', self.remote.name])
return self.smart_merge('{0}/{1}'.format(self.remote.name, branch),
self.smart_merge_enabled())
def smart_merge_enabled(self):
reader = self.repo.config_reader()
if reader.has_option('legit', 'smartMerge'):
return reader.getboolean('legit', 'smartMerge')
else:
return True
def smart_merge(self, branch, allow_rebase=True):
from_branch = self.get_current_branch_name()
merges = self.git_exec(
['log', '--merges', '{0}..{1}'.format(branch, from_branch)])
if allow_rebase:
verb = 'merge' if merges.count('commit') else 'rebase'
else:
if self.pull_rebase():
verb = 'rebase'
else:
verb = 'merge'
if verb != 'rebase' and self.pull_ff_only():
return self.git_exec([verb, '--ff-only', branch])
else:
try:
return self.git_exec([verb, branch])
except GitCommandError as why:
log = self.git_exec([verb, '--abort'])
abort('Merge failed. Reverting.',
log='{0}\n{1}'.format(why, log), type='merge')
def pull_rebase(self):
reader = self.repo.config_reader()
if reader.has_option('pull', 'rebase'):
return reader.getboolean('pull', 'rebase')
else:
return False
def pull_ff_only(self):
reader = self.repo.config_reader()
if reader.has_option('pull', 'ff'):
if reader.get('pull', 'ff') == 'only':
return True
else:
return False
else:
return False
def push(self, branch=None):
if branch is None:
return self.git_exec(['push'])
else:
return self.git_exec(['push', self.remote.name, branch])
def checkout_branch(self, branch):
"""Checks out given branch."""
_, stdout, stderr = self.git_exec(
['checkout', branch],
with_extended_output=True)
return '\n'.join([stderr, stdout])
def unpublish_branch(self, branch):
"""Unpublishes given branch."""
try:
return self.git_exec(
['push', self.remote.name, ':{0}'.format(branch)])
except GitCommandError:
_, _, log = self.git_exec(
['fetch', self.remote.name, '--prune'],
with_extended_output=True)
abort('Unpublish failed. Fetching.', log=log, type='unpublish')
def publish_branch(self, branch):
"""Publishes given branch."""
return self.git_exec(
['push', '-u', self.remote.name, branch])
def get_remote(self):
self.repo_check()
reader = self.repo.config_reader()
# If there is no remote option in legit section, return default
if reader.has_option('legit', 'remote'):
remote_name = reader.get('legit', 'remote')
if remote_name not in [r.name for r in self.repo.remotes]:
if fallback_enabled(reader):
return self.get_default_remote()
else:
click.echo('Remote "{0}" does not exist!'.format(remote_name))
will_aborted = click.confirm(
'\nPress `Y` to abort now,\n' +
'`n` to use default remote and turn fallback on for this repo:')
if will_aborted:
click.echo('\nAborted. Please update your git configuration.')
sys.exit(64) # EX_USAGE
else:
writer = self.repo.config_writer()
writer.set_value('legit', 'remoteFallback', 'true')
click.echo('\n`legit.RemoteFallback` changed to true for current repo.')
return self.get_default_remote()
else:
return self.repo.remote(remote_name)
else:
return self.get_default_remote()
def get_default_remote(self):
if len(self.repo.remotes) == 0:
return None
else:
return self.repo.remotes[0]
def get_current_branch_name(self):
"""Returns current branch name"""
return self.repo.head.ref.name
def fuzzy_match_branch(self, branch):
if not branch:
return False
all_branches = self.get_branch_names()
if branch in all_branches:
return branch
def branch_fuzzy_match(b):
return b.startswith(branch)
possible_branches = list(filter(branch_fuzzy_match, all_branches))
if len(possible_branches) == 1:
return possible_branches[0]
return branch
def get_branches(self, local=True, remote_branches=True):
"""Returns a list of local and remote branches."""
if not self.repo.remotes:
remote_branches = False
branches = []
if remote_branches:
# Remote refs.
try:
for b in self.remote.refs:
name = '/'.join(b.name.split('/')[1:])
if name not in legit_settings.forbidden_branches:
branches.append(Branch(name, is_published=True))
except (IndexError, AssertionError):
pass
if local:
# Local refs.
for b in [h.name for h in self.repo.heads]:
if (not remote_branches) or (b not in [br.name for br in branches]):
if b not in legit_settings.forbidden_branches:
branches.append(Branch(b, is_published=False))
return sorted(branches, key=attrgetter('name'))
def get_branch_names(self, local=True, remote_branches=True):
branches = self.get_branches(local=local, remote_branches=remote_branches)
return [b.name for b in branches]
def display_available_branches(self):
"""Displays available branches."""
if not self.repo.remotes:
remote_branches = False
else:
remote_branches = True
branches = self.get_branches(local=True, remote_branches=remote_branches)
if not branches:
click.echo(crayons.red('No branches available'))
return
branch_col = len(max([b.name for b in branches], key=len)) + 1
for branch in branches:
try:
branch_is_selected = (branch.name == self.get_current_branch_name())
except TypeError:
branch_is_selected = False
marker = '*' if branch_is_selected else ' '
color = colored.green if branch_is_selected else colored.yellow
pub = '(published)' if branch.is_published else '(unpublished)'
click.echo(columns(
[colored.red(marker), 2],
[color(branch.name, bold=True), branch_col],
[black(pub), 14]
))
|
kennethreitz/legit | legit/scm.py | SCMRepo.get_branches | python | def get_branches(self, local=True, remote_branches=True):
if not self.repo.remotes:
remote_branches = False
branches = []
if remote_branches:
# Remote refs.
try:
for b in self.remote.refs:
name = '/'.join(b.name.split('/')[1:])
if name not in legit_settings.forbidden_branches:
branches.append(Branch(name, is_published=True))
except (IndexError, AssertionError):
pass
if local:
# Local refs.
for b in [h.name for h in self.repo.heads]:
if (not remote_branches) or (b not in [br.name for br in branches]):
if b not in legit_settings.forbidden_branches:
branches.append(Branch(b, is_published=False))
return sorted(branches, key=attrgetter('name')) | Returns a list of local and remote branches. | train | https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/scm.py#L289-L318 | null | class SCMRepo(object):
git = None
repo = None
remote = None
verbose = False
fake = False
stash_index = None
def __init__(self):
self.git = os.environ.get('GIT_PYTHON_GIT_EXECUTABLE', 'git')
try:
self.repo = Repo(search_parent_directories=True)
self.remote = self.get_remote()
except InvalidGitRepositoryError:
self.repo = None
def git_exec(self, command, **kwargs):
"""Execute git commands"""
from .cli import verbose_echo
command.insert(0, self.git)
if kwargs.pop('no_verbose', False): # used when git output isn't helpful to user
verbose = False
else:
verbose = self.verbose
verbose_echo(' '.join(command), verbose, self.fake)
if not self.fake:
result = self.repo.git.execute(command, **kwargs)
else:
if 'with_extended_output' in kwargs:
result = (0, '', '')
else:
result = ''
return result
def repo_check(self, require_remote=False):
if self.repo is None:
click.echo('Not a git repository.')
sys.exit(128)
# TODO: no remote fail
if not self.repo.remotes and require_remote:
click.echo('No git remotes configured. Please add one.')
sys.exit(128)
# TODO: You're in a merge state.
def stash_log(self, sync=False):
if self.repo.is_dirty():
status_log(self.stash_it, 'Saving local changes.', sync=sync)
def unstash_log(self, sync=False):
self.stash_index = self.unstash_index(sync=sync)
if self.stash_index:
status_log(self.unstash_it, 'Restoring local changes.', sync=sync)
def unstash_index(self, sync=False, branch=None):
"""Returns an unstash index if one is available."""
stash_list = self.git_exec(['stash', 'list'], no_verbose=True)
if branch is None:
branch = self.get_current_branch_name()
for stash in stash_list.splitlines():
verb = 'syncing' if sync else 'switching'
if (
(('Legit' in stash) and
('On {0}:'.format(branch) in stash) and
(verb in stash)
) or
(('GitHub' in stash) and
('On {0}:'.format(branch) in stash) and
(verb in stash)
)
):
return stash[7]
def stash_it(self, sync=False):
msg = 'syncing branch' if sync else 'switching branches'
return self.git_exec(
['stash', 'save', '--include-untracked', LEGIT_TEMPLATE.format(msg)])
def unstash_it(self, sync=False):
"""
Unstashes changes from current branch for branch sync.
Requires prior code setting self.stash_index.
"""
if self.stash_index is not None:
return self.git_exec(
['stash', 'pop', 'stash@{{{0}}}'.format(self.stash_index)])
def smart_pull(self):
"""
'git log --merges origin/master..master'
"""
branch = self.get_current_branch_name()
self.git_exec(['fetch', self.remote.name])
return self.smart_merge('{0}/{1}'.format(self.remote.name, branch),
self.smart_merge_enabled())
def smart_merge_enabled(self):
reader = self.repo.config_reader()
if reader.has_option('legit', 'smartMerge'):
return reader.getboolean('legit', 'smartMerge')
else:
return True
def smart_merge(self, branch, allow_rebase=True):
from_branch = self.get_current_branch_name()
merges = self.git_exec(
['log', '--merges', '{0}..{1}'.format(branch, from_branch)])
if allow_rebase:
verb = 'merge' if merges.count('commit') else 'rebase'
else:
if self.pull_rebase():
verb = 'rebase'
else:
verb = 'merge'
if verb != 'rebase' and self.pull_ff_only():
return self.git_exec([verb, '--ff-only', branch])
else:
try:
return self.git_exec([verb, branch])
except GitCommandError as why:
log = self.git_exec([verb, '--abort'])
abort('Merge failed. Reverting.',
log='{0}\n{1}'.format(why, log), type='merge')
def pull_rebase(self):
reader = self.repo.config_reader()
if reader.has_option('pull', 'rebase'):
return reader.getboolean('pull', 'rebase')
else:
return False
def pull_ff_only(self):
reader = self.repo.config_reader()
if reader.has_option('pull', 'ff'):
if reader.get('pull', 'ff') == 'only':
return True
else:
return False
else:
return False
def push(self, branch=None):
if branch is None:
return self.git_exec(['push'])
else:
return self.git_exec(['push', self.remote.name, branch])
def checkout_branch(self, branch):
"""Checks out given branch."""
_, stdout, stderr = self.git_exec(
['checkout', branch],
with_extended_output=True)
return '\n'.join([stderr, stdout])
def unpublish_branch(self, branch):
"""Unpublishes given branch."""
try:
return self.git_exec(
['push', self.remote.name, ':{0}'.format(branch)])
except GitCommandError:
_, _, log = self.git_exec(
['fetch', self.remote.name, '--prune'],
with_extended_output=True)
abort('Unpublish failed. Fetching.', log=log, type='unpublish')
def publish_branch(self, branch):
"""Publishes given branch."""
return self.git_exec(
['push', '-u', self.remote.name, branch])
def undo(self, hard=False):
"""Makes last commit not exist"""
if not self.fake:
return self.repo.git.reset('HEAD^', working_tree=hard)
else:
click.echo(crayons.red('Faked! >>> git reset {}{}'
.format('--hard ' if hard else '', 'HEAD^')))
return 0
def get_remote(self):
self.repo_check()
reader = self.repo.config_reader()
# If there is no remote option in legit section, return default
if reader.has_option('legit', 'remote'):
remote_name = reader.get('legit', 'remote')
if remote_name not in [r.name for r in self.repo.remotes]:
if fallback_enabled(reader):
return self.get_default_remote()
else:
click.echo('Remote "{0}" does not exist!'.format(remote_name))
will_aborted = click.confirm(
'\nPress `Y` to abort now,\n' +
'`n` to use default remote and turn fallback on for this repo:')
if will_aborted:
click.echo('\nAborted. Please update your git configuration.')
sys.exit(64) # EX_USAGE
else:
writer = self.repo.config_writer()
writer.set_value('legit', 'remoteFallback', 'true')
click.echo('\n`legit.RemoteFallback` changed to true for current repo.')
return self.get_default_remote()
else:
return self.repo.remote(remote_name)
else:
return self.get_default_remote()
def get_default_remote(self):
if len(self.repo.remotes) == 0:
return None
else:
return self.repo.remotes[0]
def get_current_branch_name(self):
"""Returns current branch name"""
return self.repo.head.ref.name
def fuzzy_match_branch(self, branch):
if not branch:
return False
all_branches = self.get_branch_names()
if branch in all_branches:
return branch
def branch_fuzzy_match(b):
return b.startswith(branch)
possible_branches = list(filter(branch_fuzzy_match, all_branches))
if len(possible_branches) == 1:
return possible_branches[0]
return branch
def get_branch_names(self, local=True, remote_branches=True):
branches = self.get_branches(local=local, remote_branches=remote_branches)
return [b.name for b in branches]
def display_available_branches(self):
"""Displays available branches."""
if not self.repo.remotes:
remote_branches = False
else:
remote_branches = True
branches = self.get_branches(local=True, remote_branches=remote_branches)
if not branches:
click.echo(crayons.red('No branches available'))
return
branch_col = len(max([b.name for b in branches], key=len)) + 1
for branch in branches:
try:
branch_is_selected = (branch.name == self.get_current_branch_name())
except TypeError:
branch_is_selected = False
marker = '*' if branch_is_selected else ' '
color = colored.green if branch_is_selected else colored.yellow
pub = '(published)' if branch.is_published else '(unpublished)'
click.echo(columns(
[colored.red(marker), 2],
[color(branch.name, bold=True), branch_col],
[black(pub), 14]
))
|
kennethreitz/legit | legit/scm.py | SCMRepo.display_available_branches | python | def display_available_branches(self):
if not self.repo.remotes:
remote_branches = False
else:
remote_branches = True
branches = self.get_branches(local=True, remote_branches=remote_branches)
if not branches:
click.echo(crayons.red('No branches available'))
return
branch_col = len(max([b.name for b in branches], key=len)) + 1
for branch in branches:
try:
branch_is_selected = (branch.name == self.get_current_branch_name())
except TypeError:
branch_is_selected = False
marker = '*' if branch_is_selected else ' '
color = colored.green if branch_is_selected else colored.yellow
pub = '(published)' if branch.is_published else '(unpublished)'
click.echo(columns(
[colored.red(marker), 2],
[color(branch.name, bold=True), branch_col],
[black(pub), 14]
)) | Displays available branches. | train | https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/scm.py#L326-L356 | [
"def black(s, **kwargs):\n if legit_settings.allow_black_foreground:\n return crayons.black(s, **kwargs)\n else:\n return s.encode('utf-8')\n",
"def get_current_branch_name(self):\n \"\"\"Returns current branch name\"\"\"\n\n return self.repo.head.ref.name\n",
"def get_branches(self, l... | class SCMRepo(object):
git = None
repo = None
remote = None
verbose = False
fake = False
stash_index = None
def __init__(self):
self.git = os.environ.get('GIT_PYTHON_GIT_EXECUTABLE', 'git')
try:
self.repo = Repo(search_parent_directories=True)
self.remote = self.get_remote()
except InvalidGitRepositoryError:
self.repo = None
def git_exec(self, command, **kwargs):
"""Execute git commands"""
from .cli import verbose_echo
command.insert(0, self.git)
if kwargs.pop('no_verbose', False): # used when git output isn't helpful to user
verbose = False
else:
verbose = self.verbose
verbose_echo(' '.join(command), verbose, self.fake)
if not self.fake:
result = self.repo.git.execute(command, **kwargs)
else:
if 'with_extended_output' in kwargs:
result = (0, '', '')
else:
result = ''
return result
def repo_check(self, require_remote=False):
if self.repo is None:
click.echo('Not a git repository.')
sys.exit(128)
# TODO: no remote fail
if not self.repo.remotes and require_remote:
click.echo('No git remotes configured. Please add one.')
sys.exit(128)
# TODO: You're in a merge state.
def stash_log(self, sync=False):
if self.repo.is_dirty():
status_log(self.stash_it, 'Saving local changes.', sync=sync)
def unstash_log(self, sync=False):
self.stash_index = self.unstash_index(sync=sync)
if self.stash_index:
status_log(self.unstash_it, 'Restoring local changes.', sync=sync)
def unstash_index(self, sync=False, branch=None):
"""Returns an unstash index if one is available."""
stash_list = self.git_exec(['stash', 'list'], no_verbose=True)
if branch is None:
branch = self.get_current_branch_name()
for stash in stash_list.splitlines():
verb = 'syncing' if sync else 'switching'
if (
(('Legit' in stash) and
('On {0}:'.format(branch) in stash) and
(verb in stash)
) or
(('GitHub' in stash) and
('On {0}:'.format(branch) in stash) and
(verb in stash)
)
):
return stash[7]
def stash_it(self, sync=False):
msg = 'syncing branch' if sync else 'switching branches'
return self.git_exec(
['stash', 'save', '--include-untracked', LEGIT_TEMPLATE.format(msg)])
def unstash_it(self, sync=False):
"""
Unstashes changes from current branch for branch sync.
Requires prior code setting self.stash_index.
"""
if self.stash_index is not None:
return self.git_exec(
['stash', 'pop', 'stash@{{{0}}}'.format(self.stash_index)])
def smart_pull(self):
"""
'git log --merges origin/master..master'
"""
branch = self.get_current_branch_name()
self.git_exec(['fetch', self.remote.name])
return self.smart_merge('{0}/{1}'.format(self.remote.name, branch),
self.smart_merge_enabled())
def smart_merge_enabled(self):
reader = self.repo.config_reader()
if reader.has_option('legit', 'smartMerge'):
return reader.getboolean('legit', 'smartMerge')
else:
return True
def smart_merge(self, branch, allow_rebase=True):
from_branch = self.get_current_branch_name()
merges = self.git_exec(
['log', '--merges', '{0}..{1}'.format(branch, from_branch)])
if allow_rebase:
verb = 'merge' if merges.count('commit') else 'rebase'
else:
if self.pull_rebase():
verb = 'rebase'
else:
verb = 'merge'
if verb != 'rebase' and self.pull_ff_only():
return self.git_exec([verb, '--ff-only', branch])
else:
try:
return self.git_exec([verb, branch])
except GitCommandError as why:
log = self.git_exec([verb, '--abort'])
abort('Merge failed. Reverting.',
log='{0}\n{1}'.format(why, log), type='merge')
def pull_rebase(self):
reader = self.repo.config_reader()
if reader.has_option('pull', 'rebase'):
return reader.getboolean('pull', 'rebase')
else:
return False
def pull_ff_only(self):
reader = self.repo.config_reader()
if reader.has_option('pull', 'ff'):
if reader.get('pull', 'ff') == 'only':
return True
else:
return False
else:
return False
def push(self, branch=None):
if branch is None:
return self.git_exec(['push'])
else:
return self.git_exec(['push', self.remote.name, branch])
def checkout_branch(self, branch):
"""Checks out given branch."""
_, stdout, stderr = self.git_exec(
['checkout', branch],
with_extended_output=True)
return '\n'.join([stderr, stdout])
def unpublish_branch(self, branch):
"""Unpublishes given branch."""
try:
return self.git_exec(
['push', self.remote.name, ':{0}'.format(branch)])
except GitCommandError:
_, _, log = self.git_exec(
['fetch', self.remote.name, '--prune'],
with_extended_output=True)
abort('Unpublish failed. Fetching.', log=log, type='unpublish')
def publish_branch(self, branch):
"""Publishes given branch."""
return self.git_exec(
['push', '-u', self.remote.name, branch])
def undo(self, hard=False):
"""Makes last commit not exist"""
if not self.fake:
return self.repo.git.reset('HEAD^', working_tree=hard)
else:
click.echo(crayons.red('Faked! >>> git reset {}{}'
.format('--hard ' if hard else '', 'HEAD^')))
return 0
def get_remote(self):
self.repo_check()
reader = self.repo.config_reader()
# If there is no remote option in legit section, return default
if reader.has_option('legit', 'remote'):
remote_name = reader.get('legit', 'remote')
if remote_name not in [r.name for r in self.repo.remotes]:
if fallback_enabled(reader):
return self.get_default_remote()
else:
click.echo('Remote "{0}" does not exist!'.format(remote_name))
will_aborted = click.confirm(
'\nPress `Y` to abort now,\n' +
'`n` to use default remote and turn fallback on for this repo:')
if will_aborted:
click.echo('\nAborted. Please update your git configuration.')
sys.exit(64) # EX_USAGE
else:
writer = self.repo.config_writer()
writer.set_value('legit', 'remoteFallback', 'true')
click.echo('\n`legit.RemoteFallback` changed to true for current repo.')
return self.get_default_remote()
else:
return self.repo.remote(remote_name)
else:
return self.get_default_remote()
def get_default_remote(self):
if len(self.repo.remotes) == 0:
return None
else:
return self.repo.remotes[0]
def get_current_branch_name(self):
"""Returns current branch name"""
return self.repo.head.ref.name
def fuzzy_match_branch(self, branch):
if not branch:
return False
all_branches = self.get_branch_names()
if branch in all_branches:
return branch
def branch_fuzzy_match(b):
return b.startswith(branch)
possible_branches = list(filter(branch_fuzzy_match, all_branches))
if len(possible_branches) == 1:
return possible_branches[0]
return branch
def get_branches(self, local=True, remote_branches=True):
"""Returns a list of local and remote branches."""
if not self.repo.remotes:
remote_branches = False
branches = []
if remote_branches:
# Remote refs.
try:
for b in self.remote.refs:
name = '/'.join(b.name.split('/')[1:])
if name not in legit_settings.forbidden_branches:
branches.append(Branch(name, is_published=True))
except (IndexError, AssertionError):
pass
if local:
# Local refs.
for b in [h.name for h in self.repo.heads]:
if (not remote_branches) or (b not in [br.name for br in branches]):
if b not in legit_settings.forbidden_branches:
branches.append(Branch(b, is_published=False))
return sorted(branches, key=attrgetter('name'))
def get_branch_names(self, local=True, remote_branches=True):
branches = self.get_branches(local=local, remote_branches=remote_branches)
return [b.name for b in branches]
|
kennethreitz/legit | legit/utils.py | status_log | python | def status_log(func, message, *args, **kwargs):
click.echo(message)
log = func(*args, **kwargs)
if log:
out = []
for line in log.split('\n'):
if not line.startswith('#'):
out.append(line)
click.echo(black('\n'.join(out))) | Emits header message, executes a callable, and echoes the return strings. | train | https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/utils.py#L8-L20 | [
"def black(s, **kwargs):\n if legit_settings.allow_black_foreground:\n return crayons.black(s, **kwargs)\n else:\n return s.encode('utf-8')\n",
"def unstash_it(self, sync=False):\n \"\"\"\n Unstashes changes from current branch for branch sync.\n Requires prior code setting self.stash... | import click
from clint.textui import colored, columns
import crayons
from .settings import legit_settings
def verbose_echo(str, verbose=False, fake=False):
"""Selectively output ``str``, with special formatting if ``fake`` is True"""
verbose = fake or verbose
if verbose:
color = crayons.green
prefix = ''
if fake:
color = crayons.red
prefix = 'Faked!'
click.echo(color('{} >>> {}'.format(prefix, str)))
def output_aliases(aliases):
"""Display git aliases"""
for alias in aliases:
cmd = '!legit ' + alias
click.echo(columns([colored.yellow('git ' + alias), 20], [cmd, None]))
def order_manually(sub_commands):
"""Order sub-commands for display"""
order = [
"switch",
"sync",
"publish",
"unpublish",
"undo",
"branches",
]
ordered = []
commands = dict(zip([cmd for cmd in sub_commands], sub_commands))
for k in order:
ordered.append(commands.get(k, ""))
if k in commands:
del commands[k]
# Add commands not present in `order` above
for k in commands:
ordered.append(commands[k])
return ordered
def format_help(help):
"""Format the help string."""
help = help.replace('Options:', str(black('Options:', bold=True)))
help = help.replace('Usage: legit', str('Usage: {0}'.format(black('legit', bold=True))))
help = help.replace(' switch', str(crayons.green(' switch', bold=True)))
help = help.replace(' sync', str(crayons.green(' sync', bold=True)))
help = help.replace(' publish', str(crayons.green(' publish', bold=True)))
help = help.replace(' unpublish', str(crayons.green(' unpublish', bold=True)))
help = help.replace(' undo', str(crayons.green(' undo', bold=True)))
help = help.replace(' branches', str(crayons.yellow(' branches', bold=True)))
additional_help = \
"""Usage Examples:
Switch to specific branch:
$ {0}
Sync current branch with remote:
$ {1}
Sync current code with a specific remote branch:
$ {2}
Publish current branch to remote:
$ {3}
Publish to a specific branch to remote:
$ {4}
Unpublish a specific branch from remote:
$ {5}
Commands:""".format(
crayons.red('legit switch <branch>'),
crayons.red('legit sync'),
crayons.red('legit sync <branch>'),
crayons.red('legit publish'),
crayons.red('legit publish <branch>'),
crayons.red('legit unpublish <branch>'),
)
help = help.replace('Commands:', additional_help)
return help
def black(s, **kwargs):
if legit_settings.allow_black_foreground:
return crayons.black(s, **kwargs)
else:
return s.encode('utf-8')
|
kennethreitz/legit | legit/utils.py | verbose_echo | python | def verbose_echo(str, verbose=False, fake=False):
verbose = fake or verbose
if verbose:
color = crayons.green
prefix = ''
if fake:
color = crayons.red
prefix = 'Faked!'
click.echo(color('{} >>> {}'.format(prefix, str))) | Selectively output ``str``, with special formatting if ``fake`` is True | train | https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/utils.py#L23-L33 | null | import click
from clint.textui import colored, columns
import crayons
from .settings import legit_settings
def status_log(func, message, *args, **kwargs):
"""Emits header message, executes a callable, and echoes the return strings."""
click.echo(message)
log = func(*args, **kwargs)
if log:
out = []
for line in log.split('\n'):
if not line.startswith('#'):
out.append(line)
click.echo(black('\n'.join(out)))
def output_aliases(aliases):
"""Display git aliases"""
for alias in aliases:
cmd = '!legit ' + alias
click.echo(columns([colored.yellow('git ' + alias), 20], [cmd, None]))
def order_manually(sub_commands):
"""Order sub-commands for display"""
order = [
"switch",
"sync",
"publish",
"unpublish",
"undo",
"branches",
]
ordered = []
commands = dict(zip([cmd for cmd in sub_commands], sub_commands))
for k in order:
ordered.append(commands.get(k, ""))
if k in commands:
del commands[k]
# Add commands not present in `order` above
for k in commands:
ordered.append(commands[k])
return ordered
def format_help(help):
"""Format the help string."""
help = help.replace('Options:', str(black('Options:', bold=True)))
help = help.replace('Usage: legit', str('Usage: {0}'.format(black('legit', bold=True))))
help = help.replace(' switch', str(crayons.green(' switch', bold=True)))
help = help.replace(' sync', str(crayons.green(' sync', bold=True)))
help = help.replace(' publish', str(crayons.green(' publish', bold=True)))
help = help.replace(' unpublish', str(crayons.green(' unpublish', bold=True)))
help = help.replace(' undo', str(crayons.green(' undo', bold=True)))
help = help.replace(' branches', str(crayons.yellow(' branches', bold=True)))
additional_help = \
"""Usage Examples:
Switch to specific branch:
$ {0}
Sync current branch with remote:
$ {1}
Sync current code with a specific remote branch:
$ {2}
Publish current branch to remote:
$ {3}
Publish to a specific branch to remote:
$ {4}
Unpublish a specific branch from remote:
$ {5}
Commands:""".format(
crayons.red('legit switch <branch>'),
crayons.red('legit sync'),
crayons.red('legit sync <branch>'),
crayons.red('legit publish'),
crayons.red('legit publish <branch>'),
crayons.red('legit unpublish <branch>'),
)
help = help.replace('Commands:', additional_help)
return help
def black(s, **kwargs):
if legit_settings.allow_black_foreground:
return crayons.black(s, **kwargs)
else:
return s.encode('utf-8')
|
kennethreitz/legit | legit/utils.py | output_aliases | python | def output_aliases(aliases):
for alias in aliases:
cmd = '!legit ' + alias
click.echo(columns([colored.yellow('git ' + alias), 20], [cmd, None])) | Display git aliases | train | https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/utils.py#L36-L40 | null | import click
from clint.textui import colored, columns
import crayons
from .settings import legit_settings
def status_log(func, message, *args, **kwargs):
"""Emits header message, executes a callable, and echoes the return strings."""
click.echo(message)
log = func(*args, **kwargs)
if log:
out = []
for line in log.split('\n'):
if not line.startswith('#'):
out.append(line)
click.echo(black('\n'.join(out)))
def verbose_echo(str, verbose=False, fake=False):
"""Selectively output ``str``, with special formatting if ``fake`` is True"""
verbose = fake or verbose
if verbose:
color = crayons.green
prefix = ''
if fake:
color = crayons.red
prefix = 'Faked!'
click.echo(color('{} >>> {}'.format(prefix, str)))
def order_manually(sub_commands):
"""Order sub-commands for display"""
order = [
"switch",
"sync",
"publish",
"unpublish",
"undo",
"branches",
]
ordered = []
commands = dict(zip([cmd for cmd in sub_commands], sub_commands))
for k in order:
ordered.append(commands.get(k, ""))
if k in commands:
del commands[k]
# Add commands not present in `order` above
for k in commands:
ordered.append(commands[k])
return ordered
def format_help(help):
"""Format the help string."""
help = help.replace('Options:', str(black('Options:', bold=True)))
help = help.replace('Usage: legit', str('Usage: {0}'.format(black('legit', bold=True))))
help = help.replace(' switch', str(crayons.green(' switch', bold=True)))
help = help.replace(' sync', str(crayons.green(' sync', bold=True)))
help = help.replace(' publish', str(crayons.green(' publish', bold=True)))
help = help.replace(' unpublish', str(crayons.green(' unpublish', bold=True)))
help = help.replace(' undo', str(crayons.green(' undo', bold=True)))
help = help.replace(' branches', str(crayons.yellow(' branches', bold=True)))
additional_help = \
"""Usage Examples:
Switch to specific branch:
$ {0}
Sync current branch with remote:
$ {1}
Sync current code with a specific remote branch:
$ {2}
Publish current branch to remote:
$ {3}
Publish to a specific branch to remote:
$ {4}
Unpublish a specific branch from remote:
$ {5}
Commands:""".format(
crayons.red('legit switch <branch>'),
crayons.red('legit sync'),
crayons.red('legit sync <branch>'),
crayons.red('legit publish'),
crayons.red('legit publish <branch>'),
crayons.red('legit unpublish <branch>'),
)
help = help.replace('Commands:', additional_help)
return help
def black(s, **kwargs):
if legit_settings.allow_black_foreground:
return crayons.black(s, **kwargs)
else:
return s.encode('utf-8')
|
kennethreitz/legit | legit/cli.py | cli | python | def cli(ctx, verbose, fake, install, uninstall, config):
# Create a repo object and remember it as as the context object. From
# this point onwards other commands can refer to it by using the
# @pass_scm decorator.
ctx.obj = SCMRepo()
ctx.obj.fake = fake
ctx.obj.verbose = fake or verbose
if install:
do_install(ctx, verbose, fake)
ctx.exit()
elif uninstall:
do_uninstall(ctx, verbose, fake)
ctx.exit()
elif config:
do_edit_settings(fake)
ctx.exit()
else:
if ctx.invoked_subcommand is None:
# Display help to user if no commands were passed.
click.echo(format_help(ctx.get_help())) | legit command line interface | train | https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/cli.py#L82-L103 | [
"def format_help(help):\n \"\"\"Format the help string.\"\"\"\n help = help.replace('Options:', str(black('Options:', bold=True)))\n\n help = help.replace('Usage: legit', str('Usage: {0}'.format(black('legit', bold=True))))\n\n help = help.replace(' switch', str(crayons.green(' switch', bold=True)))\n... | # -*- coding: utf-8 -*-
"""
legit.cli
~~~~~~~~~
This module provides the CLI interface to legit.
"""
import os
import click
from clint import resources
from clint.textui import columns
import crayons
from .core import __version__
from .scm import SCMRepo
from .settings import legit_settings
from .utils import black, format_help, order_manually, output_aliases, status_log, verbose_echo
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
pass_scm = click.make_pass_decorator(SCMRepo)
class LegitGroup(click.Group):
"""Custom Group class with specially sorted command list"""
command_aliases = {
'pub': 'publish',
'sw': 'switch',
'sy': 'sync',
'unp': 'unpublish',
'un': 'undo',
}
def list_commands(self, ctx):
"""Override for showing commands in particular order"""
commands = super(LegitGroup, self).list_commands(ctx)
return [cmd for cmd in order_manually(commands)]
def get_command(self, ctx, cmd_name):
"""Override to handle command aliases"""
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
cmd_name = self.command_aliases.get(cmd_name, "")
return click.Group.get_command(self, ctx, cmd_name)
def get_help_option(self, ctx):
"""Override for showing formatted main help via --help and -h options"""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return
def show_help(ctx, param, value):
if value and not ctx.resilient_parsing:
if not ctx.obj:
# legit main help
click.echo(format_help(ctx.get_help()))
else:
# legit sub-command help
click.echo(ctx.get_help(), color=ctx.color)
ctx.exit()
return click.Option(
help_options,
is_flag=True,
is_eager=True,
expose_value=False,
callback=show_help,
help='Show this message and exit.')
@click.group(cls=LegitGroup, invoke_without_command=True, context_settings=CONTEXT_SETTINGS)
@click.version_option(prog_name=black('legit', bold=True), version=__version__)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@click.option('--install', is_flag=True, help='Install legit git aliases.')
@click.option('--uninstall', is_flag=True, help='Uninstall legit git aliases.')
@click.option('--config', is_flag=True, help='Edit legit configuration file.')
@click.pass_context
@cli.command(short_help='Switches to specified branch.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def switch(scm, to_branch, verbose, fake):
"""Switches from one branch to another, safely stashing and restoring local changes.
"""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check()
if to_branch is None:
scm.display_available_branches()
raise click.BadArgumentUsage('Please specify a branch to switch to')
scm.stash_log()
status_log(scm.checkout_branch, 'Switching to {0}.'.format(
crayons.yellow(to_branch)), to_branch)
scm.unstash_log()
@cli.command(short_help='Synchronizes the given branch with remote.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
@click.pass_context
def sync(ctx, scm, to_branch, verbose, fake):
"""Stashes unstaged changes, Fetches remote data, Performs smart
pull+merge, Pushes local commits up, and Unstashes changes.
Defaults to current branch.
"""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
if to_branch:
# Optional branch specifier.
branch = scm.fuzzy_match_branch(to_branch)
if branch:
is_external = True
original_branch = scm.get_current_branch_name()
else:
raise click.BadArgumentUsage(
"Branch {0} does not exist. Use an existing branch."
.format(crayons.yellow(branch)))
else:
# Sync current branch.
branch = scm.get_current_branch_name()
is_external = False
if branch in scm.get_branch_names(local=False):
if is_external:
ctx.invoke(switch, to_branch=branch, verbose=verbose, fake=fake)
scm.stash_log(sync=True)
status_log(scm.smart_pull, 'Pulling commits from the server.')
status_log(scm.push, 'Pushing commits to the server.', branch)
scm.unstash_log(sync=True)
if is_external:
ctx.invoke(switch, to_branch=original_branch, verbose=verbose, fake=fake)
else:
raise click.BadArgumentUsage(
"Branch {0} is not published. Publish before syncing."
.format(crayons.yellow(branch)))
@cli.command(short_help='Publishes specified branch to the remote.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def publish(scm, to_branch, verbose, fake):
"""Pushes an unpublished branch to a remote repository."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
branch = scm.fuzzy_match_branch(to_branch)
if not branch:
branch = scm.get_current_branch_name()
scm.display_available_branches()
if to_branch is None:
click.echo("Using current branch {0}".format(crayons.yellow(branch)))
else:
click.echo(
"Branch {0} not found, using current branch {1}"
.format(crayons.red(to_branch), crayons.yellow(branch)))
branch_names = scm.get_branch_names(local=False)
if branch in branch_names:
raise click.BadArgumentUsage(
"Branch {0} is already published. Use a branch that is not published."
.format(crayons.yellow(branch)))
status_log(scm.publish_branch, 'Publishing {0}.'.format(
crayons.yellow(branch)), branch)
@cli.command(short_help='Removes specified branch from the remote.')
@click.argument('published_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def unpublish(scm, published_branch, verbose, fake):
"""Removes a published branch from the remote repository."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
branch = scm.fuzzy_match_branch(published_branch)
if not branch:
scm.display_available_branches()
raise click.BadArgumentUsage('Please specify a branch to unpublish')
branch_names = scm.get_branch_names(local=False)
if branch not in branch_names:
raise click.BadArgumentUsage(
"Branch {0} is not published. Use a branch that is published."
.format(crayons.yellow(branch)))
status_log(scm.unpublish_branch, 'Unpublishing {0}.'.format(
crayons.yellow(branch)), branch)
@cli.command()
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@click.option('--hard', is_flag=True, help='Discard local changes.')
@pass_scm
def undo(scm, verbose, fake, hard):
"""Removes the last commit from history."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check()
status_log(scm.undo, 'Last commit removed from history.', hard)
@cli.command()
@pass_scm
def branches(scm):
"""Displays a list of branches."""
scm.repo_check()
scm.display_available_branches()
def do_install(ctx, verbose, fake):
"""Installs legit git aliases."""
click.echo('The following git aliases will be installed:\n')
aliases = cli.list_commands(ctx)
output_aliases(aliases)
if click.confirm('\n{}Install aliases above?'.format('FAKE ' if fake else ''), default=fake):
for alias in aliases:
cmd = '!legit ' + alias
system_command = 'git config --global --replace-all alias.{0} "{1}"'.format(alias, cmd)
verbose_echo(system_command, verbose, fake)
if not fake:
os.system(system_command)
if not fake:
click.echo("\nAliases installed.")
else:
click.echo("\nAliases will not be installed.")
def do_uninstall(ctx, verbose, fake):
"""Uninstalls legit git aliases, including deprecated legit sub-commands."""
aliases = cli.list_commands(ctx)
# Add deprecated aliases
aliases.extend(['graft', 'harvest', 'sprout', 'resync', 'settings', 'install', 'uninstall'])
for alias in aliases:
system_command = 'git config --global --unset-all alias.{0}'.format(alias)
verbose_echo(system_command, verbose, fake)
if not fake:
os.system(system_command)
if not fake:
click.echo('\nThe following git aliases are uninstalled:\n')
output_aliases(aliases)
def do_edit_settings(fake):
"""Opens legit settings in editor."""
path = resources.user.open('config.ini').name
click.echo('Legit Settings:\n')
for (option, _, description) in legit_settings.config_defaults:
click.echo(columns([crayons.yellow(option), 25], [description, None]))
click.echo("") # separate settings info from os output
if fake:
click.echo(crayons.red('Faked! >>> edit {}'.format(path)))
else:
click.edit(path)
def handle_abort(aborted, type=None):
click.echo('{0} {1}'.format(crayons.red('Error:'), aborted.message))
click.echo(str(aborted.log))
if type == 'merge':
click.echo('Unfortunately, there was a merge conflict.'
' It has to be merged manually.')
elif type == 'unpublish':
click.echo(
'''It seems that the remote branch is deleted.
If `legit branches` still shows it as published,
then probably the branch has been deleted at the remote by someone else.
You can run `git fetch --prune` to update remote information.
''')
raise click.Abort
legit_settings.abort_handler = handle_abort
|
kennethreitz/legit | legit/cli.py | switch | python | def switch(scm, to_branch, verbose, fake):
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check()
if to_branch is None:
scm.display_available_branches()
raise click.BadArgumentUsage('Please specify a branch to switch to')
scm.stash_log()
status_log(scm.checkout_branch, 'Switching to {0}.'.format(
crayons.yellow(to_branch)), to_branch)
scm.unstash_log() | Switches from one branch to another, safely stashing and restoring local changes. | train | https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/cli.py#L111-L126 | [
"def status_log(func, message, *args, **kwargs):\n \"\"\"Emits header message, executes a callable, and echoes the return strings.\"\"\"\n\n click.echo(message)\n log = func(*args, **kwargs)\n\n if log:\n out = []\n\n for line in log.split('\\n'):\n if not line.startswith('#'):\... | # -*- coding: utf-8 -*-
"""
legit.cli
~~~~~~~~~
This module provides the CLI interface to legit.
"""
import os
import click
from clint import resources
from clint.textui import columns
import crayons
from .core import __version__
from .scm import SCMRepo
from .settings import legit_settings
from .utils import black, format_help, order_manually, output_aliases, status_log, verbose_echo
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
pass_scm = click.make_pass_decorator(SCMRepo)
class LegitGroup(click.Group):
"""Custom Group class with specially sorted command list"""
command_aliases = {
'pub': 'publish',
'sw': 'switch',
'sy': 'sync',
'unp': 'unpublish',
'un': 'undo',
}
def list_commands(self, ctx):
"""Override for showing commands in particular order"""
commands = super(LegitGroup, self).list_commands(ctx)
return [cmd for cmd in order_manually(commands)]
def get_command(self, ctx, cmd_name):
"""Override to handle command aliases"""
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
cmd_name = self.command_aliases.get(cmd_name, "")
return click.Group.get_command(self, ctx, cmd_name)
def get_help_option(self, ctx):
"""Override for showing formatted main help via --help and -h options"""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return
def show_help(ctx, param, value):
if value and not ctx.resilient_parsing:
if not ctx.obj:
# legit main help
click.echo(format_help(ctx.get_help()))
else:
# legit sub-command help
click.echo(ctx.get_help(), color=ctx.color)
ctx.exit()
return click.Option(
help_options,
is_flag=True,
is_eager=True,
expose_value=False,
callback=show_help,
help='Show this message and exit.')
@click.group(cls=LegitGroup, invoke_without_command=True, context_settings=CONTEXT_SETTINGS)
@click.version_option(prog_name=black('legit', bold=True), version=__version__)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@click.option('--install', is_flag=True, help='Install legit git aliases.')
@click.option('--uninstall', is_flag=True, help='Uninstall legit git aliases.')
@click.option('--config', is_flag=True, help='Edit legit configuration file.')
@click.pass_context
def cli(ctx, verbose, fake, install, uninstall, config):
"""legit command line interface"""
# Create a repo object and remember it as as the context object. From
# this point onwards other commands can refer to it by using the
# @pass_scm decorator.
ctx.obj = SCMRepo()
ctx.obj.fake = fake
ctx.obj.verbose = fake or verbose
if install:
do_install(ctx, verbose, fake)
ctx.exit()
elif uninstall:
do_uninstall(ctx, verbose, fake)
ctx.exit()
elif config:
do_edit_settings(fake)
ctx.exit()
else:
if ctx.invoked_subcommand is None:
# Display help to user if no commands were passed.
click.echo(format_help(ctx.get_help()))
@cli.command(short_help='Switches to specified branch.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
@cli.command(short_help='Synchronizes the given branch with remote.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
@click.pass_context
def sync(ctx, scm, to_branch, verbose, fake):
"""Stashes unstaged changes, Fetches remote data, Performs smart
pull+merge, Pushes local commits up, and Unstashes changes.
Defaults to current branch.
"""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
if to_branch:
# Optional branch specifier.
branch = scm.fuzzy_match_branch(to_branch)
if branch:
is_external = True
original_branch = scm.get_current_branch_name()
else:
raise click.BadArgumentUsage(
"Branch {0} does not exist. Use an existing branch."
.format(crayons.yellow(branch)))
else:
# Sync current branch.
branch = scm.get_current_branch_name()
is_external = False
if branch in scm.get_branch_names(local=False):
if is_external:
ctx.invoke(switch, to_branch=branch, verbose=verbose, fake=fake)
scm.stash_log(sync=True)
status_log(scm.smart_pull, 'Pulling commits from the server.')
status_log(scm.push, 'Pushing commits to the server.', branch)
scm.unstash_log(sync=True)
if is_external:
ctx.invoke(switch, to_branch=original_branch, verbose=verbose, fake=fake)
else:
raise click.BadArgumentUsage(
"Branch {0} is not published. Publish before syncing."
.format(crayons.yellow(branch)))
@cli.command(short_help='Publishes specified branch to the remote.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def publish(scm, to_branch, verbose, fake):
"""Pushes an unpublished branch to a remote repository."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
branch = scm.fuzzy_match_branch(to_branch)
if not branch:
branch = scm.get_current_branch_name()
scm.display_available_branches()
if to_branch is None:
click.echo("Using current branch {0}".format(crayons.yellow(branch)))
else:
click.echo(
"Branch {0} not found, using current branch {1}"
.format(crayons.red(to_branch), crayons.yellow(branch)))
branch_names = scm.get_branch_names(local=False)
if branch in branch_names:
raise click.BadArgumentUsage(
"Branch {0} is already published. Use a branch that is not published."
.format(crayons.yellow(branch)))
status_log(scm.publish_branch, 'Publishing {0}.'.format(
crayons.yellow(branch)), branch)
@cli.command(short_help='Removes specified branch from the remote.')
@click.argument('published_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def unpublish(scm, published_branch, verbose, fake):
"""Removes a published branch from the remote repository."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
branch = scm.fuzzy_match_branch(published_branch)
if not branch:
scm.display_available_branches()
raise click.BadArgumentUsage('Please specify a branch to unpublish')
branch_names = scm.get_branch_names(local=False)
if branch not in branch_names:
raise click.BadArgumentUsage(
"Branch {0} is not published. Use a branch that is published."
.format(crayons.yellow(branch)))
status_log(scm.unpublish_branch, 'Unpublishing {0}.'.format(
crayons.yellow(branch)), branch)
@cli.command()
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@click.option('--hard', is_flag=True, help='Discard local changes.')
@pass_scm
def undo(scm, verbose, fake, hard):
"""Removes the last commit from history."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check()
status_log(scm.undo, 'Last commit removed from history.', hard)
@cli.command()
@pass_scm
def branches(scm):
"""Displays a list of branches."""
scm.repo_check()
scm.display_available_branches()
def do_install(ctx, verbose, fake):
"""Installs legit git aliases."""
click.echo('The following git aliases will be installed:\n')
aliases = cli.list_commands(ctx)
output_aliases(aliases)
if click.confirm('\n{}Install aliases above?'.format('FAKE ' if fake else ''), default=fake):
for alias in aliases:
cmd = '!legit ' + alias
system_command = 'git config --global --replace-all alias.{0} "{1}"'.format(alias, cmd)
verbose_echo(system_command, verbose, fake)
if not fake:
os.system(system_command)
if not fake:
click.echo("\nAliases installed.")
else:
click.echo("\nAliases will not be installed.")
def do_uninstall(ctx, verbose, fake):
"""Uninstalls legit git aliases, including deprecated legit sub-commands."""
aliases = cli.list_commands(ctx)
# Add deprecated aliases
aliases.extend(['graft', 'harvest', 'sprout', 'resync', 'settings', 'install', 'uninstall'])
for alias in aliases:
system_command = 'git config --global --unset-all alias.{0}'.format(alias)
verbose_echo(system_command, verbose, fake)
if not fake:
os.system(system_command)
if not fake:
click.echo('\nThe following git aliases are uninstalled:\n')
output_aliases(aliases)
def do_edit_settings(fake):
"""Opens legit settings in editor."""
path = resources.user.open('config.ini').name
click.echo('Legit Settings:\n')
for (option, _, description) in legit_settings.config_defaults:
click.echo(columns([crayons.yellow(option), 25], [description, None]))
click.echo("") # separate settings info from os output
if fake:
click.echo(crayons.red('Faked! >>> edit {}'.format(path)))
else:
click.edit(path)
def handle_abort(aborted, type=None):
click.echo('{0} {1}'.format(crayons.red('Error:'), aborted.message))
click.echo(str(aborted.log))
if type == 'merge':
click.echo('Unfortunately, there was a merge conflict.'
' It has to be merged manually.')
elif type == 'unpublish':
click.echo(
'''It seems that the remote branch is deleted.
If `legit branches` still shows it as published,
then probably the branch has been deleted at the remote by someone else.
You can run `git fetch --prune` to update remote information.
''')
raise click.Abort
legit_settings.abort_handler = handle_abort
|
kennethreitz/legit | legit/cli.py | sync | python | def sync(ctx, scm, to_branch, verbose, fake):
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
if to_branch:
# Optional branch specifier.
branch = scm.fuzzy_match_branch(to_branch)
if branch:
is_external = True
original_branch = scm.get_current_branch_name()
else:
raise click.BadArgumentUsage(
"Branch {0} does not exist. Use an existing branch."
.format(crayons.yellow(branch)))
else:
# Sync current branch.
branch = scm.get_current_branch_name()
is_external = False
if branch in scm.get_branch_names(local=False):
if is_external:
ctx.invoke(switch, to_branch=branch, verbose=verbose, fake=fake)
scm.stash_log(sync=True)
status_log(scm.smart_pull, 'Pulling commits from the server.')
status_log(scm.push, 'Pushing commits to the server.', branch)
scm.unstash_log(sync=True)
if is_external:
ctx.invoke(switch, to_branch=original_branch, verbose=verbose, fake=fake)
else:
raise click.BadArgumentUsage(
"Branch {0} is not published. Publish before syncing."
.format(crayons.yellow(branch))) | Stashes unstaged changes, Fetches remote data, Performs smart
pull+merge, Pushes local commits up, and Unstashes changes.
Defaults to current branch. | train | https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/cli.py#L135-L173 | [
"def status_log(func, message, *args, **kwargs):\n \"\"\"Emits header message, executes a callable, and echoes the return strings.\"\"\"\n\n click.echo(message)\n log = func(*args, **kwargs)\n\n if log:\n out = []\n\n for line in log.split('\\n'):\n if not line.startswith('#'):\... | # -*- coding: utf-8 -*-
"""
legit.cli
~~~~~~~~~
This module provides the CLI interface to legit.
"""
import os
import click
from clint import resources
from clint.textui import columns
import crayons
from .core import __version__
from .scm import SCMRepo
from .settings import legit_settings
from .utils import black, format_help, order_manually, output_aliases, status_log, verbose_echo
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
pass_scm = click.make_pass_decorator(SCMRepo)
class LegitGroup(click.Group):
"""Custom Group class with specially sorted command list"""
command_aliases = {
'pub': 'publish',
'sw': 'switch',
'sy': 'sync',
'unp': 'unpublish',
'un': 'undo',
}
def list_commands(self, ctx):
"""Override for showing commands in particular order"""
commands = super(LegitGroup, self).list_commands(ctx)
return [cmd for cmd in order_manually(commands)]
def get_command(self, ctx, cmd_name):
"""Override to handle command aliases"""
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
cmd_name = self.command_aliases.get(cmd_name, "")
return click.Group.get_command(self, ctx, cmd_name)
def get_help_option(self, ctx):
"""Override for showing formatted main help via --help and -h options"""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return
def show_help(ctx, param, value):
if value and not ctx.resilient_parsing:
if not ctx.obj:
# legit main help
click.echo(format_help(ctx.get_help()))
else:
# legit sub-command help
click.echo(ctx.get_help(), color=ctx.color)
ctx.exit()
return click.Option(
help_options,
is_flag=True,
is_eager=True,
expose_value=False,
callback=show_help,
help='Show this message and exit.')
@click.group(cls=LegitGroup, invoke_without_command=True, context_settings=CONTEXT_SETTINGS)
@click.version_option(prog_name=black('legit', bold=True), version=__version__)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@click.option('--install', is_flag=True, help='Install legit git aliases.')
@click.option('--uninstall', is_flag=True, help='Uninstall legit git aliases.')
@click.option('--config', is_flag=True, help='Edit legit configuration file.')
@click.pass_context
def cli(ctx, verbose, fake, install, uninstall, config):
"""legit command line interface"""
# Create a repo object and remember it as as the context object. From
# this point onwards other commands can refer to it by using the
# @pass_scm decorator.
ctx.obj = SCMRepo()
ctx.obj.fake = fake
ctx.obj.verbose = fake or verbose
if install:
do_install(ctx, verbose, fake)
ctx.exit()
elif uninstall:
do_uninstall(ctx, verbose, fake)
ctx.exit()
elif config:
do_edit_settings(fake)
ctx.exit()
else:
if ctx.invoked_subcommand is None:
# Display help to user if no commands were passed.
click.echo(format_help(ctx.get_help()))
@cli.command(short_help='Switches to specified branch.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def switch(scm, to_branch, verbose, fake):
"""Switches from one branch to another, safely stashing and restoring local changes.
"""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check()
if to_branch is None:
scm.display_available_branches()
raise click.BadArgumentUsage('Please specify a branch to switch to')
scm.stash_log()
status_log(scm.checkout_branch, 'Switching to {0}.'.format(
crayons.yellow(to_branch)), to_branch)
scm.unstash_log()
@cli.command(short_help='Synchronizes the given branch with remote.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
@click.pass_context
@cli.command(short_help='Publishes specified branch to the remote.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def publish(scm, to_branch, verbose, fake):
"""Pushes an unpublished branch to a remote repository."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
branch = scm.fuzzy_match_branch(to_branch)
if not branch:
branch = scm.get_current_branch_name()
scm.display_available_branches()
if to_branch is None:
click.echo("Using current branch {0}".format(crayons.yellow(branch)))
else:
click.echo(
"Branch {0} not found, using current branch {1}"
.format(crayons.red(to_branch), crayons.yellow(branch)))
branch_names = scm.get_branch_names(local=False)
if branch in branch_names:
raise click.BadArgumentUsage(
"Branch {0} is already published. Use a branch that is not published."
.format(crayons.yellow(branch)))
status_log(scm.publish_branch, 'Publishing {0}.'.format(
crayons.yellow(branch)), branch)
@cli.command(short_help='Removes specified branch from the remote.')
@click.argument('published_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def unpublish(scm, published_branch, verbose, fake):
"""Removes a published branch from the remote repository."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
branch = scm.fuzzy_match_branch(published_branch)
if not branch:
scm.display_available_branches()
raise click.BadArgumentUsage('Please specify a branch to unpublish')
branch_names = scm.get_branch_names(local=False)
if branch not in branch_names:
raise click.BadArgumentUsage(
"Branch {0} is not published. Use a branch that is published."
.format(crayons.yellow(branch)))
status_log(scm.unpublish_branch, 'Unpublishing {0}.'.format(
crayons.yellow(branch)), branch)
@cli.command()
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@click.option('--hard', is_flag=True, help='Discard local changes.')
@pass_scm
def undo(scm, verbose, fake, hard):
"""Removes the last commit from history."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check()
status_log(scm.undo, 'Last commit removed from history.', hard)
@cli.command()
@pass_scm
def branches(scm):
"""Displays a list of branches."""
scm.repo_check()
scm.display_available_branches()
def do_install(ctx, verbose, fake):
"""Installs legit git aliases."""
click.echo('The following git aliases will be installed:\n')
aliases = cli.list_commands(ctx)
output_aliases(aliases)
if click.confirm('\n{}Install aliases above?'.format('FAKE ' if fake else ''), default=fake):
for alias in aliases:
cmd = '!legit ' + alias
system_command = 'git config --global --replace-all alias.{0} "{1}"'.format(alias, cmd)
verbose_echo(system_command, verbose, fake)
if not fake:
os.system(system_command)
if not fake:
click.echo("\nAliases installed.")
else:
click.echo("\nAliases will not be installed.")
def do_uninstall(ctx, verbose, fake):
"""Uninstalls legit git aliases, including deprecated legit sub-commands."""
aliases = cli.list_commands(ctx)
# Add deprecated aliases
aliases.extend(['graft', 'harvest', 'sprout', 'resync', 'settings', 'install', 'uninstall'])
for alias in aliases:
system_command = 'git config --global --unset-all alias.{0}'.format(alias)
verbose_echo(system_command, verbose, fake)
if not fake:
os.system(system_command)
if not fake:
click.echo('\nThe following git aliases are uninstalled:\n')
output_aliases(aliases)
def do_edit_settings(fake):
"""Opens legit settings in editor."""
path = resources.user.open('config.ini').name
click.echo('Legit Settings:\n')
for (option, _, description) in legit_settings.config_defaults:
click.echo(columns([crayons.yellow(option), 25], [description, None]))
click.echo("") # separate settings info from os output
if fake:
click.echo(crayons.red('Faked! >>> edit {}'.format(path)))
else:
click.edit(path)
def handle_abort(aborted, type=None):
click.echo('{0} {1}'.format(crayons.red('Error:'), aborted.message))
click.echo(str(aborted.log))
if type == 'merge':
click.echo('Unfortunately, there was a merge conflict.'
' It has to be merged manually.')
elif type == 'unpublish':
click.echo(
'''It seems that the remote branch is deleted.
If `legit branches` still shows it as published,
then probably the branch has been deleted at the remote by someone else.
You can run `git fetch --prune` to update remote information.
''')
raise click.Abort
legit_settings.abort_handler = handle_abort
|
kennethreitz/legit | legit/cli.py | publish | python | def publish(scm, to_branch, verbose, fake):
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
branch = scm.fuzzy_match_branch(to_branch)
if not branch:
branch = scm.get_current_branch_name()
scm.display_available_branches()
if to_branch is None:
click.echo("Using current branch {0}".format(crayons.yellow(branch)))
else:
click.echo(
"Branch {0} not found, using current branch {1}"
.format(crayons.red(to_branch), crayons.yellow(branch)))
branch_names = scm.get_branch_names(local=False)
if branch in branch_names:
raise click.BadArgumentUsage(
"Branch {0} is already published. Use a branch that is not published."
.format(crayons.yellow(branch)))
status_log(scm.publish_branch, 'Publishing {0}.'.format(
crayons.yellow(branch)), branch) | Pushes an unpublished branch to a remote repository. | train | https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/cli.py#L181-L207 | [
"def status_log(func, message, *args, **kwargs):\n \"\"\"Emits header message, executes a callable, and echoes the return strings.\"\"\"\n\n click.echo(message)\n log = func(*args, **kwargs)\n\n if log:\n out = []\n\n for line in log.split('\\n'):\n if not line.startswith('#'):\... | # -*- coding: utf-8 -*-
"""
legit.cli
~~~~~~~~~
This module provides the CLI interface to legit.
"""
import os
import click
from clint import resources
from clint.textui import columns
import crayons
from .core import __version__
from .scm import SCMRepo
from .settings import legit_settings
from .utils import black, format_help, order_manually, output_aliases, status_log, verbose_echo
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
pass_scm = click.make_pass_decorator(SCMRepo)
class LegitGroup(click.Group):
"""Custom Group class with specially sorted command list"""
command_aliases = {
'pub': 'publish',
'sw': 'switch',
'sy': 'sync',
'unp': 'unpublish',
'un': 'undo',
}
def list_commands(self, ctx):
"""Override for showing commands in particular order"""
commands = super(LegitGroup, self).list_commands(ctx)
return [cmd for cmd in order_manually(commands)]
def get_command(self, ctx, cmd_name):
"""Override to handle command aliases"""
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
cmd_name = self.command_aliases.get(cmd_name, "")
return click.Group.get_command(self, ctx, cmd_name)
def get_help_option(self, ctx):
"""Override for showing formatted main help via --help and -h options"""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return
def show_help(ctx, param, value):
if value and not ctx.resilient_parsing:
if not ctx.obj:
# legit main help
click.echo(format_help(ctx.get_help()))
else:
# legit sub-command help
click.echo(ctx.get_help(), color=ctx.color)
ctx.exit()
return click.Option(
help_options,
is_flag=True,
is_eager=True,
expose_value=False,
callback=show_help,
help='Show this message and exit.')
@click.group(cls=LegitGroup, invoke_without_command=True, context_settings=CONTEXT_SETTINGS)
@click.version_option(prog_name=black('legit', bold=True), version=__version__)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@click.option('--install', is_flag=True, help='Install legit git aliases.')
@click.option('--uninstall', is_flag=True, help='Uninstall legit git aliases.')
@click.option('--config', is_flag=True, help='Edit legit configuration file.')
@click.pass_context
def cli(ctx, verbose, fake, install, uninstall, config):
"""legit command line interface"""
# Create a repo object and remember it as as the context object. From
# this point onwards other commands can refer to it by using the
# @pass_scm decorator.
ctx.obj = SCMRepo()
ctx.obj.fake = fake
ctx.obj.verbose = fake or verbose
if install:
do_install(ctx, verbose, fake)
ctx.exit()
elif uninstall:
do_uninstall(ctx, verbose, fake)
ctx.exit()
elif config:
do_edit_settings(fake)
ctx.exit()
else:
if ctx.invoked_subcommand is None:
# Display help to user if no commands were passed.
click.echo(format_help(ctx.get_help()))
@cli.command(short_help='Switches to specified branch.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def switch(scm, to_branch, verbose, fake):
"""Switches from one branch to another, safely stashing and restoring local changes.
"""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check()
if to_branch is None:
scm.display_available_branches()
raise click.BadArgumentUsage('Please specify a branch to switch to')
scm.stash_log()
status_log(scm.checkout_branch, 'Switching to {0}.'.format(
crayons.yellow(to_branch)), to_branch)
scm.unstash_log()
@cli.command(short_help='Synchronizes the given branch with remote.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
@click.pass_context
def sync(ctx, scm, to_branch, verbose, fake):
"""Stashes unstaged changes, Fetches remote data, Performs smart
pull+merge, Pushes local commits up, and Unstashes changes.
Defaults to current branch.
"""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
if to_branch:
# Optional branch specifier.
branch = scm.fuzzy_match_branch(to_branch)
if branch:
is_external = True
original_branch = scm.get_current_branch_name()
else:
raise click.BadArgumentUsage(
"Branch {0} does not exist. Use an existing branch."
.format(crayons.yellow(branch)))
else:
# Sync current branch.
branch = scm.get_current_branch_name()
is_external = False
if branch in scm.get_branch_names(local=False):
if is_external:
ctx.invoke(switch, to_branch=branch, verbose=verbose, fake=fake)
scm.stash_log(sync=True)
status_log(scm.smart_pull, 'Pulling commits from the server.')
status_log(scm.push, 'Pushing commits to the server.', branch)
scm.unstash_log(sync=True)
if is_external:
ctx.invoke(switch, to_branch=original_branch, verbose=verbose, fake=fake)
else:
raise click.BadArgumentUsage(
"Branch {0} is not published. Publish before syncing."
.format(crayons.yellow(branch)))
@cli.command(short_help='Publishes specified branch to the remote.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
@cli.command(short_help='Removes specified branch from the remote.')
@click.argument('published_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def unpublish(scm, published_branch, verbose, fake):
"""Removes a published branch from the remote repository."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
branch = scm.fuzzy_match_branch(published_branch)
if not branch:
scm.display_available_branches()
raise click.BadArgumentUsage('Please specify a branch to unpublish')
branch_names = scm.get_branch_names(local=False)
if branch not in branch_names:
raise click.BadArgumentUsage(
"Branch {0} is not published. Use a branch that is published."
.format(crayons.yellow(branch)))
status_log(scm.unpublish_branch, 'Unpublishing {0}.'.format(
crayons.yellow(branch)), branch)
@cli.command()
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@click.option('--hard', is_flag=True, help='Discard local changes.')
@pass_scm
def undo(scm, verbose, fake, hard):
"""Removes the last commit from history."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check()
status_log(scm.undo, 'Last commit removed from history.', hard)
@cli.command()
@pass_scm
def branches(scm):
"""Displays a list of branches."""
scm.repo_check()
scm.display_available_branches()
def do_install(ctx, verbose, fake):
"""Installs legit git aliases."""
click.echo('The following git aliases will be installed:\n')
aliases = cli.list_commands(ctx)
output_aliases(aliases)
if click.confirm('\n{}Install aliases above?'.format('FAKE ' if fake else ''), default=fake):
for alias in aliases:
cmd = '!legit ' + alias
system_command = 'git config --global --replace-all alias.{0} "{1}"'.format(alias, cmd)
verbose_echo(system_command, verbose, fake)
if not fake:
os.system(system_command)
if not fake:
click.echo("\nAliases installed.")
else:
click.echo("\nAliases will not be installed.")
def do_uninstall(ctx, verbose, fake):
"""Uninstalls legit git aliases, including deprecated legit sub-commands."""
aliases = cli.list_commands(ctx)
# Add deprecated aliases
aliases.extend(['graft', 'harvest', 'sprout', 'resync', 'settings', 'install', 'uninstall'])
for alias in aliases:
system_command = 'git config --global --unset-all alias.{0}'.format(alias)
verbose_echo(system_command, verbose, fake)
if not fake:
os.system(system_command)
if not fake:
click.echo('\nThe following git aliases are uninstalled:\n')
output_aliases(aliases)
def do_edit_settings(fake):
"""Opens legit settings in editor."""
path = resources.user.open('config.ini').name
click.echo('Legit Settings:\n')
for (option, _, description) in legit_settings.config_defaults:
click.echo(columns([crayons.yellow(option), 25], [description, None]))
click.echo("") # separate settings info from os output
if fake:
click.echo(crayons.red('Faked! >>> edit {}'.format(path)))
else:
click.edit(path)
def handle_abort(aborted, type=None):
click.echo('{0} {1}'.format(crayons.red('Error:'), aborted.message))
click.echo(str(aborted.log))
if type == 'merge':
click.echo('Unfortunately, there was a merge conflict.'
' It has to be merged manually.')
elif type == 'unpublish':
click.echo(
'''It seems that the remote branch is deleted.
If `legit branches` still shows it as published,
then probably the branch has been deleted at the remote by someone else.
You can run `git fetch --prune` to update remote information.
''')
raise click.Abort
legit_settings.abort_handler = handle_abort
|
kennethreitz/legit | legit/cli.py | unpublish | python | def unpublish(scm, published_branch, verbose, fake):
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
branch = scm.fuzzy_match_branch(published_branch)
if not branch:
scm.display_available_branches()
raise click.BadArgumentUsage('Please specify a branch to unpublish')
branch_names = scm.get_branch_names(local=False)
if branch not in branch_names:
raise click.BadArgumentUsage(
"Branch {0} is not published. Use a branch that is published."
.format(crayons.yellow(branch)))
status_log(scm.unpublish_branch, 'Unpublishing {0}.'.format(
crayons.yellow(branch)), branch) | Removes a published branch from the remote repository. | train | https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/cli.py#L215-L235 | [
"def status_log(func, message, *args, **kwargs):\n \"\"\"Emits header message, executes a callable, and echoes the return strings.\"\"\"\n\n click.echo(message)\n log = func(*args, **kwargs)\n\n if log:\n out = []\n\n for line in log.split('\\n'):\n if not line.startswith('#'):\... | # -*- coding: utf-8 -*-
"""
legit.cli
~~~~~~~~~
This module provides the CLI interface to legit.
"""
import os
import click
from clint import resources
from clint.textui import columns
import crayons
from .core import __version__
from .scm import SCMRepo
from .settings import legit_settings
from .utils import black, format_help, order_manually, output_aliases, status_log, verbose_echo
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
pass_scm = click.make_pass_decorator(SCMRepo)
class LegitGroup(click.Group):
"""Custom Group class with specially sorted command list"""
command_aliases = {
'pub': 'publish',
'sw': 'switch',
'sy': 'sync',
'unp': 'unpublish',
'un': 'undo',
}
def list_commands(self, ctx):
"""Override for showing commands in particular order"""
commands = super(LegitGroup, self).list_commands(ctx)
return [cmd for cmd in order_manually(commands)]
def get_command(self, ctx, cmd_name):
"""Override to handle command aliases"""
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
cmd_name = self.command_aliases.get(cmd_name, "")
return click.Group.get_command(self, ctx, cmd_name)
def get_help_option(self, ctx):
"""Override for showing formatted main help via --help and -h options"""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return
def show_help(ctx, param, value):
if value and not ctx.resilient_parsing:
if not ctx.obj:
# legit main help
click.echo(format_help(ctx.get_help()))
else:
# legit sub-command help
click.echo(ctx.get_help(), color=ctx.color)
ctx.exit()
return click.Option(
help_options,
is_flag=True,
is_eager=True,
expose_value=False,
callback=show_help,
help='Show this message and exit.')
@click.group(cls=LegitGroup, invoke_without_command=True, context_settings=CONTEXT_SETTINGS)
@click.version_option(prog_name=black('legit', bold=True), version=__version__)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@click.option('--install', is_flag=True, help='Install legit git aliases.')
@click.option('--uninstall', is_flag=True, help='Uninstall legit git aliases.')
@click.option('--config', is_flag=True, help='Edit legit configuration file.')
@click.pass_context
def cli(ctx, verbose, fake, install, uninstall, config):
"""legit command line interface"""
# Create a repo object and remember it as as the context object. From
# this point onwards other commands can refer to it by using the
# @pass_scm decorator.
ctx.obj = SCMRepo()
ctx.obj.fake = fake
ctx.obj.verbose = fake or verbose
if install:
do_install(ctx, verbose, fake)
ctx.exit()
elif uninstall:
do_uninstall(ctx, verbose, fake)
ctx.exit()
elif config:
do_edit_settings(fake)
ctx.exit()
else:
if ctx.invoked_subcommand is None:
# Display help to user if no commands were passed.
click.echo(format_help(ctx.get_help()))
@cli.command(short_help='Switches to specified branch.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def switch(scm, to_branch, verbose, fake):
"""Switches from one branch to another, safely stashing and restoring local changes.
"""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check()
if to_branch is None:
scm.display_available_branches()
raise click.BadArgumentUsage('Please specify a branch to switch to')
scm.stash_log()
status_log(scm.checkout_branch, 'Switching to {0}.'.format(
crayons.yellow(to_branch)), to_branch)
scm.unstash_log()
@cli.command(short_help='Synchronizes the given branch with remote.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
@click.pass_context
def sync(ctx, scm, to_branch, verbose, fake):
"""Stashes unstaged changes, Fetches remote data, Performs smart
pull+merge, Pushes local commits up, and Unstashes changes.
Defaults to current branch.
"""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
if to_branch:
# Optional branch specifier.
branch = scm.fuzzy_match_branch(to_branch)
if branch:
is_external = True
original_branch = scm.get_current_branch_name()
else:
raise click.BadArgumentUsage(
"Branch {0} does not exist. Use an existing branch."
.format(crayons.yellow(branch)))
else:
# Sync current branch.
branch = scm.get_current_branch_name()
is_external = False
if branch in scm.get_branch_names(local=False):
if is_external:
ctx.invoke(switch, to_branch=branch, verbose=verbose, fake=fake)
scm.stash_log(sync=True)
status_log(scm.smart_pull, 'Pulling commits from the server.')
status_log(scm.push, 'Pushing commits to the server.', branch)
scm.unstash_log(sync=True)
if is_external:
ctx.invoke(switch, to_branch=original_branch, verbose=verbose, fake=fake)
else:
raise click.BadArgumentUsage(
"Branch {0} is not published. Publish before syncing."
.format(crayons.yellow(branch)))
@cli.command(short_help='Publishes specified branch to the remote.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def publish(scm, to_branch, verbose, fake):
"""Pushes an unpublished branch to a remote repository."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
branch = scm.fuzzy_match_branch(to_branch)
if not branch:
branch = scm.get_current_branch_name()
scm.display_available_branches()
if to_branch is None:
click.echo("Using current branch {0}".format(crayons.yellow(branch)))
else:
click.echo(
"Branch {0} not found, using current branch {1}"
.format(crayons.red(to_branch), crayons.yellow(branch)))
branch_names = scm.get_branch_names(local=False)
if branch in branch_names:
raise click.BadArgumentUsage(
"Branch {0} is already published. Use a branch that is not published."
.format(crayons.yellow(branch)))
status_log(scm.publish_branch, 'Publishing {0}.'.format(
crayons.yellow(branch)), branch)
@cli.command(short_help='Removes specified branch from the remote.')
@click.argument('published_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
@cli.command()
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@click.option('--hard', is_flag=True, help='Discard local changes.')
@pass_scm
def undo(scm, verbose, fake, hard):
"""Removes the last commit from history."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check()
status_log(scm.undo, 'Last commit removed from history.', hard)
@cli.command()
@pass_scm
def branches(scm):
"""Displays a list of branches."""
scm.repo_check()
scm.display_available_branches()
def do_install(ctx, verbose, fake):
"""Installs legit git aliases."""
click.echo('The following git aliases will be installed:\n')
aliases = cli.list_commands(ctx)
output_aliases(aliases)
if click.confirm('\n{}Install aliases above?'.format('FAKE ' if fake else ''), default=fake):
for alias in aliases:
cmd = '!legit ' + alias
system_command = 'git config --global --replace-all alias.{0} "{1}"'.format(alias, cmd)
verbose_echo(system_command, verbose, fake)
if not fake:
os.system(system_command)
if not fake:
click.echo("\nAliases installed.")
else:
click.echo("\nAliases will not be installed.")
def do_uninstall(ctx, verbose, fake):
"""Uninstalls legit git aliases, including deprecated legit sub-commands."""
aliases = cli.list_commands(ctx)
# Add deprecated aliases
aliases.extend(['graft', 'harvest', 'sprout', 'resync', 'settings', 'install', 'uninstall'])
for alias in aliases:
system_command = 'git config --global --unset-all alias.{0}'.format(alias)
verbose_echo(system_command, verbose, fake)
if not fake:
os.system(system_command)
if not fake:
click.echo('\nThe following git aliases are uninstalled:\n')
output_aliases(aliases)
def do_edit_settings(fake):
"""Opens legit settings in editor."""
path = resources.user.open('config.ini').name
click.echo('Legit Settings:\n')
for (option, _, description) in legit_settings.config_defaults:
click.echo(columns([crayons.yellow(option), 25], [description, None]))
click.echo("") # separate settings info from os output
if fake:
click.echo(crayons.red('Faked! >>> edit {}'.format(path)))
else:
click.edit(path)
def handle_abort(aborted, type=None):
click.echo('{0} {1}'.format(crayons.red('Error:'), aborted.message))
click.echo(str(aborted.log))
if type == 'merge':
click.echo('Unfortunately, there was a merge conflict.'
' It has to be merged manually.')
elif type == 'unpublish':
click.echo(
'''It seems that the remote branch is deleted.
If `legit branches` still shows it as published,
then probably the branch has been deleted at the remote by someone else.
You can run `git fetch --prune` to update remote information.
''')
raise click.Abort
legit_settings.abort_handler = handle_abort
|
kennethreitz/legit | legit/cli.py | undo | python | def undo(scm, verbose, fake, hard):
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check()
status_log(scm.undo, 'Last commit removed from history.', hard) | Removes the last commit from history. | train | https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/cli.py#L243-L250 | [
"def status_log(func, message, *args, **kwargs):\n \"\"\"Emits header message, executes a callable, and echoes the return strings.\"\"\"\n\n click.echo(message)\n log = func(*args, **kwargs)\n\n if log:\n out = []\n\n for line in log.split('\\n'):\n if not line.startswith('#'):\... | # -*- coding: utf-8 -*-
"""
legit.cli
~~~~~~~~~
This module provides the CLI interface to legit.
"""
import os
import click
from clint import resources
from clint.textui import columns
import crayons
from .core import __version__
from .scm import SCMRepo
from .settings import legit_settings
from .utils import black, format_help, order_manually, output_aliases, status_log, verbose_echo
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
pass_scm = click.make_pass_decorator(SCMRepo)
class LegitGroup(click.Group):
"""Custom Group class with specially sorted command list"""
command_aliases = {
'pub': 'publish',
'sw': 'switch',
'sy': 'sync',
'unp': 'unpublish',
'un': 'undo',
}
def list_commands(self, ctx):
"""Override for showing commands in particular order"""
commands = super(LegitGroup, self).list_commands(ctx)
return [cmd for cmd in order_manually(commands)]
def get_command(self, ctx, cmd_name):
"""Override to handle command aliases"""
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
cmd_name = self.command_aliases.get(cmd_name, "")
return click.Group.get_command(self, ctx, cmd_name)
def get_help_option(self, ctx):
"""Override for showing formatted main help via --help and -h options"""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return
def show_help(ctx, param, value):
if value and not ctx.resilient_parsing:
if not ctx.obj:
# legit main help
click.echo(format_help(ctx.get_help()))
else:
# legit sub-command help
click.echo(ctx.get_help(), color=ctx.color)
ctx.exit()
return click.Option(
help_options,
is_flag=True,
is_eager=True,
expose_value=False,
callback=show_help,
help='Show this message and exit.')
@click.group(cls=LegitGroup, invoke_without_command=True, context_settings=CONTEXT_SETTINGS)
@click.version_option(prog_name=black('legit', bold=True), version=__version__)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@click.option('--install', is_flag=True, help='Install legit git aliases.')
@click.option('--uninstall', is_flag=True, help='Uninstall legit git aliases.')
@click.option('--config', is_flag=True, help='Edit legit configuration file.')
@click.pass_context
def cli(ctx, verbose, fake, install, uninstall, config):
"""legit command line interface"""
# Create a repo object and remember it as as the context object. From
# this point onwards other commands can refer to it by using the
# @pass_scm decorator.
ctx.obj = SCMRepo()
ctx.obj.fake = fake
ctx.obj.verbose = fake or verbose
if install:
do_install(ctx, verbose, fake)
ctx.exit()
elif uninstall:
do_uninstall(ctx, verbose, fake)
ctx.exit()
elif config:
do_edit_settings(fake)
ctx.exit()
else:
if ctx.invoked_subcommand is None:
# Display help to user if no commands were passed.
click.echo(format_help(ctx.get_help()))
@cli.command(short_help='Switches to specified branch.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def switch(scm, to_branch, verbose, fake):
"""Switches from one branch to another, safely stashing and restoring local changes.
"""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check()
if to_branch is None:
scm.display_available_branches()
raise click.BadArgumentUsage('Please specify a branch to switch to')
scm.stash_log()
status_log(scm.checkout_branch, 'Switching to {0}.'.format(
crayons.yellow(to_branch)), to_branch)
scm.unstash_log()
@cli.command(short_help='Synchronizes the given branch with remote.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
@click.pass_context
def sync(ctx, scm, to_branch, verbose, fake):
"""Stashes unstaged changes, Fetches remote data, Performs smart
pull+merge, Pushes local commits up, and Unstashes changes.
Defaults to current branch.
"""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
if to_branch:
# Optional branch specifier.
branch = scm.fuzzy_match_branch(to_branch)
if branch:
is_external = True
original_branch = scm.get_current_branch_name()
else:
raise click.BadArgumentUsage(
"Branch {0} does not exist. Use an existing branch."
.format(crayons.yellow(branch)))
else:
# Sync current branch.
branch = scm.get_current_branch_name()
is_external = False
if branch in scm.get_branch_names(local=False):
if is_external:
ctx.invoke(switch, to_branch=branch, verbose=verbose, fake=fake)
scm.stash_log(sync=True)
status_log(scm.smart_pull, 'Pulling commits from the server.')
status_log(scm.push, 'Pushing commits to the server.', branch)
scm.unstash_log(sync=True)
if is_external:
ctx.invoke(switch, to_branch=original_branch, verbose=verbose, fake=fake)
else:
raise click.BadArgumentUsage(
"Branch {0} is not published. Publish before syncing."
.format(crayons.yellow(branch)))
@cli.command(short_help='Publishes specified branch to the remote.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def publish(scm, to_branch, verbose, fake):
"""Pushes an unpublished branch to a remote repository."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
branch = scm.fuzzy_match_branch(to_branch)
if not branch:
branch = scm.get_current_branch_name()
scm.display_available_branches()
if to_branch is None:
click.echo("Using current branch {0}".format(crayons.yellow(branch)))
else:
click.echo(
"Branch {0} not found, using current branch {1}"
.format(crayons.red(to_branch), crayons.yellow(branch)))
branch_names = scm.get_branch_names(local=False)
if branch in branch_names:
raise click.BadArgumentUsage(
"Branch {0} is already published. Use a branch that is not published."
.format(crayons.yellow(branch)))
status_log(scm.publish_branch, 'Publishing {0}.'.format(
crayons.yellow(branch)), branch)
@cli.command(short_help='Removes specified branch from the remote.')
@click.argument('published_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def unpublish(scm, published_branch, verbose, fake):
"""Removes a published branch from the remote repository."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
branch = scm.fuzzy_match_branch(published_branch)
if not branch:
scm.display_available_branches()
raise click.BadArgumentUsage('Please specify a branch to unpublish')
branch_names = scm.get_branch_names(local=False)
if branch not in branch_names:
raise click.BadArgumentUsage(
"Branch {0} is not published. Use a branch that is published."
.format(crayons.yellow(branch)))
status_log(scm.unpublish_branch, 'Unpublishing {0}.'.format(
crayons.yellow(branch)), branch)
@cli.command()
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@click.option('--hard', is_flag=True, help='Discard local changes.')
@pass_scm
@cli.command()
@pass_scm
def branches(scm):
"""Displays a list of branches."""
scm.repo_check()
scm.display_available_branches()
def do_install(ctx, verbose, fake):
"""Installs legit git aliases."""
click.echo('The following git aliases will be installed:\n')
aliases = cli.list_commands(ctx)
output_aliases(aliases)
if click.confirm('\n{}Install aliases above?'.format('FAKE ' if fake else ''), default=fake):
for alias in aliases:
cmd = '!legit ' + alias
system_command = 'git config --global --replace-all alias.{0} "{1}"'.format(alias, cmd)
verbose_echo(system_command, verbose, fake)
if not fake:
os.system(system_command)
if not fake:
click.echo("\nAliases installed.")
else:
click.echo("\nAliases will not be installed.")
def do_uninstall(ctx, verbose, fake):
"""Uninstalls legit git aliases, including deprecated legit sub-commands."""
aliases = cli.list_commands(ctx)
# Add deprecated aliases
aliases.extend(['graft', 'harvest', 'sprout', 'resync', 'settings', 'install', 'uninstall'])
for alias in aliases:
system_command = 'git config --global --unset-all alias.{0}'.format(alias)
verbose_echo(system_command, verbose, fake)
if not fake:
os.system(system_command)
if not fake:
click.echo('\nThe following git aliases are uninstalled:\n')
output_aliases(aliases)
def do_edit_settings(fake):
"""Opens legit settings in editor."""
path = resources.user.open('config.ini').name
click.echo('Legit Settings:\n')
for (option, _, description) in legit_settings.config_defaults:
click.echo(columns([crayons.yellow(option), 25], [description, None]))
click.echo("") # separate settings info from os output
if fake:
click.echo(crayons.red('Faked! >>> edit {}'.format(path)))
else:
click.edit(path)
def handle_abort(aborted, type=None):
click.echo('{0} {1}'.format(crayons.red('Error:'), aborted.message))
click.echo(str(aborted.log))
if type == 'merge':
click.echo('Unfortunately, there was a merge conflict.'
' It has to be merged manually.')
elif type == 'unpublish':
click.echo(
'''It seems that the remote branch is deleted.
If `legit branches` still shows it as published,
then probably the branch has been deleted at the remote by someone else.
You can run `git fetch --prune` to update remote information.
''')
raise click.Abort
legit_settings.abort_handler = handle_abort
|
kennethreitz/legit | legit/cli.py | do_install | python | def do_install(ctx, verbose, fake):
click.echo('The following git aliases will be installed:\n')
aliases = cli.list_commands(ctx)
output_aliases(aliases)
if click.confirm('\n{}Install aliases above?'.format('FAKE ' if fake else ''), default=fake):
for alias in aliases:
cmd = '!legit ' + alias
system_command = 'git config --global --replace-all alias.{0} "{1}"'.format(alias, cmd)
verbose_echo(system_command, verbose, fake)
if not fake:
os.system(system_command)
if not fake:
click.echo("\nAliases installed.")
else:
click.echo("\nAliases will not be installed.") | Installs legit git aliases. | train | https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/cli.py#L262-L278 | [
"def output_aliases(aliases):\n \"\"\"Display git aliases\"\"\"\n for alias in aliases:\n cmd = '!legit ' + alias\n click.echo(columns([colored.yellow('git ' + alias), 20], [cmd, None]))\n",
"def verbose_echo(str, verbose=False, fake=False):\n \"\"\"Selectively output ``str``, with special ... | # -*- coding: utf-8 -*-
"""
legit.cli
~~~~~~~~~
This module provides the CLI interface to legit.
"""
import os
import click
from clint import resources
from clint.textui import columns
import crayons
from .core import __version__
from .scm import SCMRepo
from .settings import legit_settings
from .utils import black, format_help, order_manually, output_aliases, status_log, verbose_echo
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
pass_scm = click.make_pass_decorator(SCMRepo)
class LegitGroup(click.Group):
"""Custom Group class with specially sorted command list"""
command_aliases = {
'pub': 'publish',
'sw': 'switch',
'sy': 'sync',
'unp': 'unpublish',
'un': 'undo',
}
def list_commands(self, ctx):
"""Override for showing commands in particular order"""
commands = super(LegitGroup, self).list_commands(ctx)
return [cmd for cmd in order_manually(commands)]
def get_command(self, ctx, cmd_name):
"""Override to handle command aliases"""
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
cmd_name = self.command_aliases.get(cmd_name, "")
return click.Group.get_command(self, ctx, cmd_name)
def get_help_option(self, ctx):
"""Override for showing formatted main help via --help and -h options"""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return
def show_help(ctx, param, value):
if value and not ctx.resilient_parsing:
if not ctx.obj:
# legit main help
click.echo(format_help(ctx.get_help()))
else:
# legit sub-command help
click.echo(ctx.get_help(), color=ctx.color)
ctx.exit()
return click.Option(
help_options,
is_flag=True,
is_eager=True,
expose_value=False,
callback=show_help,
help='Show this message and exit.')
@click.group(cls=LegitGroup, invoke_without_command=True, context_settings=CONTEXT_SETTINGS)
@click.version_option(prog_name=black('legit', bold=True), version=__version__)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@click.option('--install', is_flag=True, help='Install legit git aliases.')
@click.option('--uninstall', is_flag=True, help='Uninstall legit git aliases.')
@click.option('--config', is_flag=True, help='Edit legit configuration file.')
@click.pass_context
def cli(ctx, verbose, fake, install, uninstall, config):
"""legit command line interface"""
# Create a repo object and remember it as as the context object. From
# this point onwards other commands can refer to it by using the
# @pass_scm decorator.
ctx.obj = SCMRepo()
ctx.obj.fake = fake
ctx.obj.verbose = fake or verbose
if install:
do_install(ctx, verbose, fake)
ctx.exit()
elif uninstall:
do_uninstall(ctx, verbose, fake)
ctx.exit()
elif config:
do_edit_settings(fake)
ctx.exit()
else:
if ctx.invoked_subcommand is None:
# Display help to user if no commands were passed.
click.echo(format_help(ctx.get_help()))
@cli.command(short_help='Switches to specified branch.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def switch(scm, to_branch, verbose, fake):
"""Switches from one branch to another, safely stashing and restoring local changes.
"""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check()
if to_branch is None:
scm.display_available_branches()
raise click.BadArgumentUsage('Please specify a branch to switch to')
scm.stash_log()
status_log(scm.checkout_branch, 'Switching to {0}.'.format(
crayons.yellow(to_branch)), to_branch)
scm.unstash_log()
@cli.command(short_help='Synchronizes the given branch with remote.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
@click.pass_context
def sync(ctx, scm, to_branch, verbose, fake):
"""Stashes unstaged changes, Fetches remote data, Performs smart
pull+merge, Pushes local commits up, and Unstashes changes.
Defaults to current branch.
"""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
if to_branch:
# Optional branch specifier.
branch = scm.fuzzy_match_branch(to_branch)
if branch:
is_external = True
original_branch = scm.get_current_branch_name()
else:
raise click.BadArgumentUsage(
"Branch {0} does not exist. Use an existing branch."
.format(crayons.yellow(branch)))
else:
# Sync current branch.
branch = scm.get_current_branch_name()
is_external = False
if branch in scm.get_branch_names(local=False):
if is_external:
ctx.invoke(switch, to_branch=branch, verbose=verbose, fake=fake)
scm.stash_log(sync=True)
status_log(scm.smart_pull, 'Pulling commits from the server.')
status_log(scm.push, 'Pushing commits to the server.', branch)
scm.unstash_log(sync=True)
if is_external:
ctx.invoke(switch, to_branch=original_branch, verbose=verbose, fake=fake)
else:
raise click.BadArgumentUsage(
"Branch {0} is not published. Publish before syncing."
.format(crayons.yellow(branch)))
@cli.command(short_help='Publishes specified branch to the remote.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def publish(scm, to_branch, verbose, fake):
"""Pushes an unpublished branch to a remote repository."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
branch = scm.fuzzy_match_branch(to_branch)
if not branch:
branch = scm.get_current_branch_name()
scm.display_available_branches()
if to_branch is None:
click.echo("Using current branch {0}".format(crayons.yellow(branch)))
else:
click.echo(
"Branch {0} not found, using current branch {1}"
.format(crayons.red(to_branch), crayons.yellow(branch)))
branch_names = scm.get_branch_names(local=False)
if branch in branch_names:
raise click.BadArgumentUsage(
"Branch {0} is already published. Use a branch that is not published."
.format(crayons.yellow(branch)))
status_log(scm.publish_branch, 'Publishing {0}.'.format(
crayons.yellow(branch)), branch)
@cli.command(short_help='Removes specified branch from the remote.')
@click.argument('published_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def unpublish(scm, published_branch, verbose, fake):
"""Removes a published branch from the remote repository."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
branch = scm.fuzzy_match_branch(published_branch)
if not branch:
scm.display_available_branches()
raise click.BadArgumentUsage('Please specify a branch to unpublish')
branch_names = scm.get_branch_names(local=False)
if branch not in branch_names:
raise click.BadArgumentUsage(
"Branch {0} is not published. Use a branch that is published."
.format(crayons.yellow(branch)))
status_log(scm.unpublish_branch, 'Unpublishing {0}.'.format(
crayons.yellow(branch)), branch)
@cli.command()
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@click.option('--hard', is_flag=True, help='Discard local changes.')
@pass_scm
def undo(scm, verbose, fake, hard):
"""Removes the last commit from history."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check()
status_log(scm.undo, 'Last commit removed from history.', hard)
@cli.command()
@pass_scm
def branches(scm):
"""Displays a list of branches."""
scm.repo_check()
scm.display_available_branches()
def do_uninstall(ctx, verbose, fake):
"""Uninstalls legit git aliases, including deprecated legit sub-commands."""
aliases = cli.list_commands(ctx)
# Add deprecated aliases
aliases.extend(['graft', 'harvest', 'sprout', 'resync', 'settings', 'install', 'uninstall'])
for alias in aliases:
system_command = 'git config --global --unset-all alias.{0}'.format(alias)
verbose_echo(system_command, verbose, fake)
if not fake:
os.system(system_command)
if not fake:
click.echo('\nThe following git aliases are uninstalled:\n')
output_aliases(aliases)
def do_edit_settings(fake):
"""Opens legit settings in editor."""
path = resources.user.open('config.ini').name
click.echo('Legit Settings:\n')
for (option, _, description) in legit_settings.config_defaults:
click.echo(columns([crayons.yellow(option), 25], [description, None]))
click.echo("") # separate settings info from os output
if fake:
click.echo(crayons.red('Faked! >>> edit {}'.format(path)))
else:
click.edit(path)
def handle_abort(aborted, type=None):
click.echo('{0} {1}'.format(crayons.red('Error:'), aborted.message))
click.echo(str(aborted.log))
if type == 'merge':
click.echo('Unfortunately, there was a merge conflict.'
' It has to be merged manually.')
elif type == 'unpublish':
click.echo(
'''It seems that the remote branch is deleted.
If `legit branches` still shows it as published,
then probably the branch has been deleted at the remote by someone else.
You can run `git fetch --prune` to update remote information.
''')
raise click.Abort
legit_settings.abort_handler = handle_abort
|
kennethreitz/legit | legit/cli.py | do_uninstall | python | def do_uninstall(ctx, verbose, fake):
aliases = cli.list_commands(ctx)
# Add deprecated aliases
aliases.extend(['graft', 'harvest', 'sprout', 'resync', 'settings', 'install', 'uninstall'])
for alias in aliases:
system_command = 'git config --global --unset-all alias.{0}'.format(alias)
verbose_echo(system_command, verbose, fake)
if not fake:
os.system(system_command)
if not fake:
click.echo('\nThe following git aliases are uninstalled:\n')
output_aliases(aliases) | Uninstalls legit git aliases, including deprecated legit sub-commands. | train | https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/cli.py#L281-L293 | [
"def output_aliases(aliases):\n \"\"\"Display git aliases\"\"\"\n for alias in aliases:\n cmd = '!legit ' + alias\n click.echo(columns([colored.yellow('git ' + alias), 20], [cmd, None]))\n",
"def verbose_echo(str, verbose=False, fake=False):\n \"\"\"Selectively output ``str``, with special ... | # -*- coding: utf-8 -*-
"""
legit.cli
~~~~~~~~~
This module provides the CLI interface to legit.
"""
import os
import click
from clint import resources
from clint.textui import columns
import crayons
from .core import __version__
from .scm import SCMRepo
from .settings import legit_settings
from .utils import black, format_help, order_manually, output_aliases, status_log, verbose_echo
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
pass_scm = click.make_pass_decorator(SCMRepo)
class LegitGroup(click.Group):
"""Custom Group class with specially sorted command list"""
command_aliases = {
'pub': 'publish',
'sw': 'switch',
'sy': 'sync',
'unp': 'unpublish',
'un': 'undo',
}
def list_commands(self, ctx):
"""Override for showing commands in particular order"""
commands = super(LegitGroup, self).list_commands(ctx)
return [cmd for cmd in order_manually(commands)]
def get_command(self, ctx, cmd_name):
"""Override to handle command aliases"""
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
cmd_name = self.command_aliases.get(cmd_name, "")
return click.Group.get_command(self, ctx, cmd_name)
def get_help_option(self, ctx):
"""Override for showing formatted main help via --help and -h options"""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return
def show_help(ctx, param, value):
if value and not ctx.resilient_parsing:
if not ctx.obj:
# legit main help
click.echo(format_help(ctx.get_help()))
else:
# legit sub-command help
click.echo(ctx.get_help(), color=ctx.color)
ctx.exit()
return click.Option(
help_options,
is_flag=True,
is_eager=True,
expose_value=False,
callback=show_help,
help='Show this message and exit.')
@click.group(cls=LegitGroup, invoke_without_command=True, context_settings=CONTEXT_SETTINGS)
@click.version_option(prog_name=black('legit', bold=True), version=__version__)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@click.option('--install', is_flag=True, help='Install legit git aliases.')
@click.option('--uninstall', is_flag=True, help='Uninstall legit git aliases.')
@click.option('--config', is_flag=True, help='Edit legit configuration file.')
@click.pass_context
def cli(ctx, verbose, fake, install, uninstall, config):
"""legit command line interface"""
# Create a repo object and remember it as as the context object. From
# this point onwards other commands can refer to it by using the
# @pass_scm decorator.
ctx.obj = SCMRepo()
ctx.obj.fake = fake
ctx.obj.verbose = fake or verbose
if install:
do_install(ctx, verbose, fake)
ctx.exit()
elif uninstall:
do_uninstall(ctx, verbose, fake)
ctx.exit()
elif config:
do_edit_settings(fake)
ctx.exit()
else:
if ctx.invoked_subcommand is None:
# Display help to user if no commands were passed.
click.echo(format_help(ctx.get_help()))
@cli.command(short_help='Switches to specified branch.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def switch(scm, to_branch, verbose, fake):
"""Switches from one branch to another, safely stashing and restoring local changes.
"""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check()
if to_branch is None:
scm.display_available_branches()
raise click.BadArgumentUsage('Please specify a branch to switch to')
scm.stash_log()
status_log(scm.checkout_branch, 'Switching to {0}.'.format(
crayons.yellow(to_branch)), to_branch)
scm.unstash_log()
@cli.command(short_help='Synchronizes the given branch with remote.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
@click.pass_context
def sync(ctx, scm, to_branch, verbose, fake):
"""Stashes unstaged changes, Fetches remote data, Performs smart
pull+merge, Pushes local commits up, and Unstashes changes.
Defaults to current branch.
"""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
if to_branch:
# Optional branch specifier.
branch = scm.fuzzy_match_branch(to_branch)
if branch:
is_external = True
original_branch = scm.get_current_branch_name()
else:
raise click.BadArgumentUsage(
"Branch {0} does not exist. Use an existing branch."
.format(crayons.yellow(branch)))
else:
# Sync current branch.
branch = scm.get_current_branch_name()
is_external = False
if branch in scm.get_branch_names(local=False):
if is_external:
ctx.invoke(switch, to_branch=branch, verbose=verbose, fake=fake)
scm.stash_log(sync=True)
status_log(scm.smart_pull, 'Pulling commits from the server.')
status_log(scm.push, 'Pushing commits to the server.', branch)
scm.unstash_log(sync=True)
if is_external:
ctx.invoke(switch, to_branch=original_branch, verbose=verbose, fake=fake)
else:
raise click.BadArgumentUsage(
"Branch {0} is not published. Publish before syncing."
.format(crayons.yellow(branch)))
@cli.command(short_help='Publishes specified branch to the remote.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def publish(scm, to_branch, verbose, fake):
"""Pushes an unpublished branch to a remote repository."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
branch = scm.fuzzy_match_branch(to_branch)
if not branch:
branch = scm.get_current_branch_name()
scm.display_available_branches()
if to_branch is None:
click.echo("Using current branch {0}".format(crayons.yellow(branch)))
else:
click.echo(
"Branch {0} not found, using current branch {1}"
.format(crayons.red(to_branch), crayons.yellow(branch)))
branch_names = scm.get_branch_names(local=False)
if branch in branch_names:
raise click.BadArgumentUsage(
"Branch {0} is already published. Use a branch that is not published."
.format(crayons.yellow(branch)))
status_log(scm.publish_branch, 'Publishing {0}.'.format(
crayons.yellow(branch)), branch)
@cli.command(short_help='Removes specified branch from the remote.')
@click.argument('published_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def unpublish(scm, published_branch, verbose, fake):
"""Removes a published branch from the remote repository."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
branch = scm.fuzzy_match_branch(published_branch)
if not branch:
scm.display_available_branches()
raise click.BadArgumentUsage('Please specify a branch to unpublish')
branch_names = scm.get_branch_names(local=False)
if branch not in branch_names:
raise click.BadArgumentUsage(
"Branch {0} is not published. Use a branch that is published."
.format(crayons.yellow(branch)))
status_log(scm.unpublish_branch, 'Unpublishing {0}.'.format(
crayons.yellow(branch)), branch)
@cli.command()
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@click.option('--hard', is_flag=True, help='Discard local changes.')
@pass_scm
def undo(scm, verbose, fake, hard):
"""Removes the last commit from history."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check()
status_log(scm.undo, 'Last commit removed from history.', hard)
@cli.command()
@pass_scm
def branches(scm):
"""Displays a list of branches."""
scm.repo_check()
scm.display_available_branches()
def do_install(ctx, verbose, fake):
"""Installs legit git aliases."""
click.echo('The following git aliases will be installed:\n')
aliases = cli.list_commands(ctx)
output_aliases(aliases)
if click.confirm('\n{}Install aliases above?'.format('FAKE ' if fake else ''), default=fake):
for alias in aliases:
cmd = '!legit ' + alias
system_command = 'git config --global --replace-all alias.{0} "{1}"'.format(alias, cmd)
verbose_echo(system_command, verbose, fake)
if not fake:
os.system(system_command)
if not fake:
click.echo("\nAliases installed.")
else:
click.echo("\nAliases will not be installed.")
def do_edit_settings(fake):
"""Opens legit settings in editor."""
path = resources.user.open('config.ini').name
click.echo('Legit Settings:\n')
for (option, _, description) in legit_settings.config_defaults:
click.echo(columns([crayons.yellow(option), 25], [description, None]))
click.echo("") # separate settings info from os output
if fake:
click.echo(crayons.red('Faked! >>> edit {}'.format(path)))
else:
click.edit(path)
def handle_abort(aborted, type=None):
click.echo('{0} {1}'.format(crayons.red('Error:'), aborted.message))
click.echo(str(aborted.log))
if type == 'merge':
click.echo('Unfortunately, there was a merge conflict.'
' It has to be merged manually.')
elif type == 'unpublish':
click.echo(
'''It seems that the remote branch is deleted.
If `legit branches` still shows it as published,
then probably the branch has been deleted at the remote by someone else.
You can run `git fetch --prune` to update remote information.
''')
raise click.Abort
legit_settings.abort_handler = handle_abort
|
kennethreitz/legit | legit/cli.py | do_edit_settings | python | def do_edit_settings(fake):
path = resources.user.open('config.ini').name
click.echo('Legit Settings:\n')
for (option, _, description) in legit_settings.config_defaults:
click.echo(columns([crayons.yellow(option), 25], [description, None]))
click.echo("") # separate settings info from os output
if fake:
click.echo(crayons.red('Faked! >>> edit {}'.format(path)))
else:
click.edit(path) | Opens legit settings in editor. | train | https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/cli.py#L296-L310 | null | # -*- coding: utf-8 -*-
"""
legit.cli
~~~~~~~~~
This module provides the CLI interface to legit.
"""
import os
import click
from clint import resources
from clint.textui import columns
import crayons
from .core import __version__
from .scm import SCMRepo
from .settings import legit_settings
from .utils import black, format_help, order_manually, output_aliases, status_log, verbose_echo
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
pass_scm = click.make_pass_decorator(SCMRepo)
class LegitGroup(click.Group):
"""Custom Group class with specially sorted command list"""
command_aliases = {
'pub': 'publish',
'sw': 'switch',
'sy': 'sync',
'unp': 'unpublish',
'un': 'undo',
}
def list_commands(self, ctx):
"""Override for showing commands in particular order"""
commands = super(LegitGroup, self).list_commands(ctx)
return [cmd for cmd in order_manually(commands)]
def get_command(self, ctx, cmd_name):
"""Override to handle command aliases"""
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
cmd_name = self.command_aliases.get(cmd_name, "")
return click.Group.get_command(self, ctx, cmd_name)
def get_help_option(self, ctx):
"""Override for showing formatted main help via --help and -h options"""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return
def show_help(ctx, param, value):
if value and not ctx.resilient_parsing:
if not ctx.obj:
# legit main help
click.echo(format_help(ctx.get_help()))
else:
# legit sub-command help
click.echo(ctx.get_help(), color=ctx.color)
ctx.exit()
return click.Option(
help_options,
is_flag=True,
is_eager=True,
expose_value=False,
callback=show_help,
help='Show this message and exit.')
@click.group(cls=LegitGroup, invoke_without_command=True, context_settings=CONTEXT_SETTINGS)
@click.version_option(prog_name=black('legit', bold=True), version=__version__)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@click.option('--install', is_flag=True, help='Install legit git aliases.')
@click.option('--uninstall', is_flag=True, help='Uninstall legit git aliases.')
@click.option('--config', is_flag=True, help='Edit legit configuration file.')
@click.pass_context
def cli(ctx, verbose, fake, install, uninstall, config):
"""legit command line interface"""
# Create a repo object and remember it as as the context object. From
# this point onwards other commands can refer to it by using the
# @pass_scm decorator.
ctx.obj = SCMRepo()
ctx.obj.fake = fake
ctx.obj.verbose = fake or verbose
if install:
do_install(ctx, verbose, fake)
ctx.exit()
elif uninstall:
do_uninstall(ctx, verbose, fake)
ctx.exit()
elif config:
do_edit_settings(fake)
ctx.exit()
else:
if ctx.invoked_subcommand is None:
# Display help to user if no commands were passed.
click.echo(format_help(ctx.get_help()))
@cli.command(short_help='Switches to specified branch.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def switch(scm, to_branch, verbose, fake):
"""Switches from one branch to another, safely stashing and restoring local changes.
"""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check()
if to_branch is None:
scm.display_available_branches()
raise click.BadArgumentUsage('Please specify a branch to switch to')
scm.stash_log()
status_log(scm.checkout_branch, 'Switching to {0}.'.format(
crayons.yellow(to_branch)), to_branch)
scm.unstash_log()
@cli.command(short_help='Synchronizes the given branch with remote.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
@click.pass_context
def sync(ctx, scm, to_branch, verbose, fake):
"""Stashes unstaged changes, Fetches remote data, Performs smart
pull+merge, Pushes local commits up, and Unstashes changes.
Defaults to current branch.
"""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
if to_branch:
# Optional branch specifier.
branch = scm.fuzzy_match_branch(to_branch)
if branch:
is_external = True
original_branch = scm.get_current_branch_name()
else:
raise click.BadArgumentUsage(
"Branch {0} does not exist. Use an existing branch."
.format(crayons.yellow(branch)))
else:
# Sync current branch.
branch = scm.get_current_branch_name()
is_external = False
if branch in scm.get_branch_names(local=False):
if is_external:
ctx.invoke(switch, to_branch=branch, verbose=verbose, fake=fake)
scm.stash_log(sync=True)
status_log(scm.smart_pull, 'Pulling commits from the server.')
status_log(scm.push, 'Pushing commits to the server.', branch)
scm.unstash_log(sync=True)
if is_external:
ctx.invoke(switch, to_branch=original_branch, verbose=verbose, fake=fake)
else:
raise click.BadArgumentUsage(
"Branch {0} is not published. Publish before syncing."
.format(crayons.yellow(branch)))
@cli.command(short_help='Publishes specified branch to the remote.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def publish(scm, to_branch, verbose, fake):
"""Pushes an unpublished branch to a remote repository."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
branch = scm.fuzzy_match_branch(to_branch)
if not branch:
branch = scm.get_current_branch_name()
scm.display_available_branches()
if to_branch is None:
click.echo("Using current branch {0}".format(crayons.yellow(branch)))
else:
click.echo(
"Branch {0} not found, using current branch {1}"
.format(crayons.red(to_branch), crayons.yellow(branch)))
branch_names = scm.get_branch_names(local=False)
if branch in branch_names:
raise click.BadArgumentUsage(
"Branch {0} is already published. Use a branch that is not published."
.format(crayons.yellow(branch)))
status_log(scm.publish_branch, 'Publishing {0}.'.format(
crayons.yellow(branch)), branch)
@cli.command(short_help='Removes specified branch from the remote.')
@click.argument('published_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def unpublish(scm, published_branch, verbose, fake):
"""Removes a published branch from the remote repository."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
branch = scm.fuzzy_match_branch(published_branch)
if not branch:
scm.display_available_branches()
raise click.BadArgumentUsage('Please specify a branch to unpublish')
branch_names = scm.get_branch_names(local=False)
if branch not in branch_names:
raise click.BadArgumentUsage(
"Branch {0} is not published. Use a branch that is published."
.format(crayons.yellow(branch)))
status_log(scm.unpublish_branch, 'Unpublishing {0}.'.format(
crayons.yellow(branch)), branch)
@cli.command()
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@click.option('--hard', is_flag=True, help='Discard local changes.')
@pass_scm
def undo(scm, verbose, fake, hard):
"""Removes the last commit from history."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check()
status_log(scm.undo, 'Last commit removed from history.', hard)
@cli.command()
@pass_scm
def branches(scm):
"""Displays a list of branches."""
scm.repo_check()
scm.display_available_branches()
def do_install(ctx, verbose, fake):
"""Installs legit git aliases."""
click.echo('The following git aliases will be installed:\n')
aliases = cli.list_commands(ctx)
output_aliases(aliases)
if click.confirm('\n{}Install aliases above?'.format('FAKE ' if fake else ''), default=fake):
for alias in aliases:
cmd = '!legit ' + alias
system_command = 'git config --global --replace-all alias.{0} "{1}"'.format(alias, cmd)
verbose_echo(system_command, verbose, fake)
if not fake:
os.system(system_command)
if not fake:
click.echo("\nAliases installed.")
else:
click.echo("\nAliases will not be installed.")
def do_uninstall(ctx, verbose, fake):
"""Uninstalls legit git aliases, including deprecated legit sub-commands."""
aliases = cli.list_commands(ctx)
# Add deprecated aliases
aliases.extend(['graft', 'harvest', 'sprout', 'resync', 'settings', 'install', 'uninstall'])
for alias in aliases:
system_command = 'git config --global --unset-all alias.{0}'.format(alias)
verbose_echo(system_command, verbose, fake)
if not fake:
os.system(system_command)
if not fake:
click.echo('\nThe following git aliases are uninstalled:\n')
output_aliases(aliases)
def handle_abort(aborted, type=None):
click.echo('{0} {1}'.format(crayons.red('Error:'), aborted.message))
click.echo(str(aborted.log))
if type == 'merge':
click.echo('Unfortunately, there was a merge conflict.'
' It has to be merged manually.')
elif type == 'unpublish':
click.echo(
'''It seems that the remote branch is deleted.
If `legit branches` still shows it as published,
then probably the branch has been deleted at the remote by someone else.
You can run `git fetch --prune` to update remote information.
''')
raise click.Abort
legit_settings.abort_handler = handle_abort
|
kennethreitz/legit | legit/cli.py | LegitGroup.list_commands | python | def list_commands(self, ctx):
commands = super(LegitGroup, self).list_commands(ctx)
return [cmd for cmd in order_manually(commands)] | Override for showing commands in particular order | train | https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/cli.py#L37-L40 | [
"def order_manually(sub_commands):\n \"\"\"Order sub-commands for display\"\"\"\n order = [\n \"switch\",\n \"sync\",\n \"publish\",\n \"unpublish\",\n \"undo\",\n \"branches\",\n ]\n ordered = []\n commands = dict(zip([cmd for cmd in sub_commands], sub_comma... | class LegitGroup(click.Group):
"""Custom Group class with specially sorted command list"""
command_aliases = {
'pub': 'publish',
'sw': 'switch',
'sy': 'sync',
'unp': 'unpublish',
'un': 'undo',
}
def get_command(self, ctx, cmd_name):
"""Override to handle command aliases"""
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
cmd_name = self.command_aliases.get(cmd_name, "")
return click.Group.get_command(self, ctx, cmd_name)
def get_help_option(self, ctx):
"""Override for showing formatted main help via --help and -h options"""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return
def show_help(ctx, param, value):
if value and not ctx.resilient_parsing:
if not ctx.obj:
# legit main help
click.echo(format_help(ctx.get_help()))
else:
# legit sub-command help
click.echo(ctx.get_help(), color=ctx.color)
ctx.exit()
return click.Option(
help_options,
is_flag=True,
is_eager=True,
expose_value=False,
callback=show_help,
help='Show this message and exit.')
|
kennethreitz/legit | legit/cli.py | LegitGroup.get_command | python | def get_command(self, ctx, cmd_name):
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
cmd_name = self.command_aliases.get(cmd_name, "")
return click.Group.get_command(self, ctx, cmd_name) | Override to handle command aliases | train | https://github.com/kennethreitz/legit/blob/699802c5be665bd358456a940953b5c1d8672754/legit/cli.py#L42-L48 | null | class LegitGroup(click.Group):
"""Custom Group class with specially sorted command list"""
command_aliases = {
'pub': 'publish',
'sw': 'switch',
'sy': 'sync',
'unp': 'unpublish',
'un': 'undo',
}
def list_commands(self, ctx):
"""Override for showing commands in particular order"""
commands = super(LegitGroup, self).list_commands(ctx)
return [cmd for cmd in order_manually(commands)]
def get_help_option(self, ctx):
"""Override for showing formatted main help via --help and -h options"""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return
def show_help(ctx, param, value):
if value and not ctx.resilient_parsing:
if not ctx.obj:
# legit main help
click.echo(format_help(ctx.get_help()))
else:
# legit sub-command help
click.echo(ctx.get_help(), color=ctx.color)
ctx.exit()
return click.Option(
help_options,
is_flag=True,
is_eager=True,
expose_value=False,
callback=show_help,
help='Show this message and exit.')
|
github/octodns | octodns/manager.py | Manager.compare | python | def compare(self, a, b, zone):
'''
Compare zone data between 2 sources.
Note: only things supported by both sources will be considered
'''
self.log.info('compare: a=%s, b=%s, zone=%s', a, b, zone)
try:
a = [self.providers[source] for source in a]
b = [self.providers[source] for source in b]
except KeyError as e:
raise Exception('Unknown source: {}'.format(e.args[0]))
sub_zones = self.configured_sub_zones(zone)
za = Zone(zone, sub_zones)
for source in a:
source.populate(za)
zb = Zone(zone, sub_zones)
for source in b:
source.populate(zb)
return zb.changes(za, _AggregateTarget(a + b)) | Compare zone data between 2 sources.
Note: only things supported by both sources will be considered | train | https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/manager.py#L335-L358 | [
"def configured_sub_zones(self, zone_name):\n # Reversed pieces of the zone name\n pieces = zone_name[:-1].split('.')[::-1]\n # Point where at the root of the tree\n where = self.zone_tree\n # Until we've hit the bottom of this zone\n try:\n while pieces:\n # Point where at the v... | class Manager(object):
log = logging.getLogger('Manager')
@classmethod
def _plan_keyer(cls, p):
plan = p[1]
return len(plan.changes[0].record.zone.name) if plan.changes else 0
def __init__(self, config_file, max_workers=None, include_meta=False):
self.log.info('__init__: config_file=%s', config_file)
# Read our config file
with open(config_file, 'r') as fh:
self.config = safe_load(fh, enforce_order=False)
manager_config = self.config.get('manager', {})
max_workers = manager_config.get('max_workers', 1) \
if max_workers is None else max_workers
self.log.info('__init__: max_workers=%d', max_workers)
if max_workers > 1:
self._executor = ThreadPoolExecutor(max_workers=max_workers)
else:
self._executor = MainThreadExecutor()
self.include_meta = include_meta or manager_config.get('include_meta',
False)
self.log.info('__init__: max_workers=%s', self.include_meta)
self.log.debug('__init__: configuring providers')
self.providers = {}
for provider_name, provider_config in self.config['providers'].items():
# Get our class and remove it from the provider_config
try:
_class = provider_config.pop('class')
except KeyError:
self.log.exception('Invalid provider class')
raise Exception('Provider {} is missing class'
.format(provider_name))
_class = self._get_named_class('provider', _class)
kwargs = self._build_kwargs(provider_config)
try:
self.providers[provider_name] = _class(provider_name, **kwargs)
except TypeError:
self.log.exception('Invalid provider config')
raise Exception('Incorrect provider config for {}'
.format(provider_name))
zone_tree = {}
# sort by reversed strings so that parent zones always come first
for name in sorted(self.config['zones'].keys(), key=lambda s: s[::-1]):
# ignore trailing dots, and reverse
pieces = name[:-1].split('.')[::-1]
# where starts out at the top
where = zone_tree
# for all the pieces
for piece in pieces:
try:
where = where[piece]
# our current piece already exists, just point where at
# it's value
except KeyError:
# our current piece doesn't exist, create it
where[piece] = {}
# and then point where at it's newly created value
where = where[piece]
self.zone_tree = zone_tree
self.plan_outputs = {}
plan_outputs = manager_config.get('plan_outputs', {
'logger': {
'class': 'octodns.provider.plan.PlanLogger',
'level': 'info'
}
})
for plan_output_name, plan_output_config in plan_outputs.items():
try:
_class = plan_output_config.pop('class')
except KeyError:
self.log.exception('Invalid plan_output class')
raise Exception('plan_output {} is missing class'
.format(plan_output_name))
_class = self._get_named_class('plan_output', _class)
kwargs = self._build_kwargs(plan_output_config)
try:
self.plan_outputs[plan_output_name] = \
_class(plan_output_name, **kwargs)
except TypeError:
self.log.exception('Invalid plan_output config')
raise Exception('Incorrect plan_output config for {}'
.format(plan_output_name))
def _get_named_class(self, _type, _class):
try:
module_name, class_name = _class.rsplit('.', 1)
module = import_module(module_name)
except (ImportError, ValueError):
self.log.exception('_get_{}_class: Unable to import '
'module %s', _class)
raise Exception('Unknown {} class: {}'.format(_type, _class))
try:
return getattr(module, class_name)
except AttributeError:
self.log.exception('_get_{}_class: Unable to get class %s '
'from module %s', class_name, module)
raise Exception('Unknown {} class: {}'.format(_type, _class))
def _build_kwargs(self, source):
# Build up the arguments we need to pass to the provider
kwargs = {}
for k, v in source.items():
try:
if v.startswith('env/'):
try:
env_var = v[4:]
v = environ[env_var]
except KeyError:
self.log.exception('Invalid provider config')
raise Exception('Incorrect provider config, '
'missing env var {}'
.format(env_var))
except AttributeError:
pass
kwargs[k] = v
return kwargs
def configured_sub_zones(self, zone_name):
# Reversed pieces of the zone name
pieces = zone_name[:-1].split('.')[::-1]
# Point where at the root of the tree
where = self.zone_tree
# Until we've hit the bottom of this zone
try:
while pieces:
# Point where at the value of our current piece
where = where[pieces.pop(0)]
except KeyError:
self.log.debug('configured_sub_zones: unknown zone, %s, no subs',
zone_name)
return set()
# We're not pointed at the dict for our name, the keys of which will be
# any subzones
sub_zone_names = where.keys()
self.log.debug('configured_sub_zones: subs=%s', sub_zone_names)
return set(sub_zone_names)
def _populate_and_plan(self, zone_name, sources, targets):
self.log.debug('sync: populating, zone=%s', zone_name)
zone = Zone(zone_name,
sub_zones=self.configured_sub_zones(zone_name))
for source in sources:
source.populate(zone)
self.log.debug('sync: planning, zone=%s', zone_name)
plans = []
for target in targets:
if self.include_meta:
meta = Record.new(zone, 'octodns-meta', {
'type': 'TXT',
'ttl': 60,
'value': 'provider={}'.format(target.id)
})
zone.add_record(meta, replace=True)
plan = target.plan(zone)
if plan:
plans.append((target, plan))
return plans
def sync(self, eligible_zones=[], eligible_targets=[], dry_run=True,
force=False):
self.log.info('sync: eligible_zones=%s, eligible_targets=%s, '
'dry_run=%s, force=%s', eligible_zones, eligible_targets,
dry_run, force)
zones = self.config['zones'].items()
if eligible_zones:
zones = filter(lambda d: d[0] in eligible_zones, zones)
futures = []
for zone_name, config in zones:
self.log.info('sync: zone=%s', zone_name)
try:
sources = config['sources']
except KeyError:
raise Exception('Zone {} is missing sources'.format(zone_name))
try:
targets = config['targets']
except KeyError:
raise Exception('Zone {} is missing targets'.format(zone_name))
if eligible_targets:
targets = filter(lambda d: d in eligible_targets, targets)
if not targets:
# Don't bother planning (and more importantly populating) zones
# when we don't have any eligible targets, waste of
# time/resources
self.log.info('sync: no eligible targets, skipping')
continue
self.log.info('sync: sources=%s -> targets=%s', sources, targets)
try:
sources = [self.providers[source] for source in sources]
except KeyError:
raise Exception('Zone {}, unknown source: {}'.format(zone_name,
source))
try:
trgs = []
for target in targets:
trg = self.providers[target]
if not isinstance(trg, BaseProvider):
raise Exception('{} - "{}" does not support targeting'
.format(trg, target))
trgs.append(trg)
targets = trgs
except KeyError:
raise Exception('Zone {}, unknown target: {}'.format(zone_name,
target))
futures.append(self._executor.submit(self._populate_and_plan,
zone_name, sources, targets))
# Wait on all results and unpack/flatten them in to a list of target &
# plan pairs.
plans = [p for f in futures for p in f.result()]
# Best effort sort plans children first so that we create/update
# children zones before parents which should allow us to more safely
# extract things into sub-zones. Combining a child back into a parent
# can't really be done all that safely in general so we'll optimize for
# this direction.
plans.sort(key=self._plan_keyer, reverse=True)
for output in self.plan_outputs.values():
output.run(plans=plans, log=self.log)
if not force:
self.log.debug('sync: checking safety')
for target, plan in plans:
plan.raise_if_unsafe()
if dry_run:
return 0
total_changes = 0
self.log.debug('sync: applying')
zones = self.config['zones']
for target, plan in plans:
zone_name = plan.existing.name
if zones[zone_name].get('always-dry-run', False):
self.log.info('sync: zone=%s skipping always-dry-run',
zone_name)
continue
total_changes += target.apply(plan)
self.log.info('sync: %d total changes', total_changes)
return total_changes
def dump(self, zone, output_dir, lenient, split, source, *sources):
'''
Dump zone data from the specified source
'''
self.log.info('dump: zone=%s, sources=%s', zone, sources)
# We broke out source to force at least one to be passed, add it to any
# others we got.
sources = [source] + list(sources)
try:
sources = [self.providers[s] for s in sources]
except KeyError as e:
raise Exception('Unknown source: {}'.format(e.args[0]))
clz = YamlProvider
if split:
clz = SplitYamlProvider
target = clz('dump', output_dir)
zone = Zone(zone, self.configured_sub_zones(zone))
for source in sources:
source.populate(zone, lenient=lenient)
plan = target.plan(zone)
if plan is None:
plan = Plan(zone, zone, [], False)
target.apply(plan)
def validate_configs(self):
for zone_name, config in self.config['zones'].items():
zone = Zone(zone_name, self.configured_sub_zones(zone_name))
try:
sources = config['sources']
except KeyError:
raise Exception('Zone {} is missing sources'.format(zone_name))
try:
sources = [self.providers[source] for source in sources]
except KeyError:
raise Exception('Zone {}, unknown source: {}'.format(zone_name,
source))
for source in sources:
if isinstance(source, YamlProvider):
source.populate(zone)
|
github/octodns | octodns/manager.py | Manager.dump | python | def dump(self, zone, output_dir, lenient, split, source, *sources):
'''
Dump zone data from the specified source
'''
self.log.info('dump: zone=%s, sources=%s', zone, sources)
# We broke out source to force at least one to be passed, add it to any
# others we got.
sources = [source] + list(sources)
try:
sources = [self.providers[s] for s in sources]
except KeyError as e:
raise Exception('Unknown source: {}'.format(e.args[0]))
clz = YamlProvider
if split:
clz = SplitYamlProvider
target = clz('dump', output_dir)
zone = Zone(zone, self.configured_sub_zones(zone))
for source in sources:
source.populate(zone, lenient=lenient)
plan = target.plan(zone)
if plan is None:
plan = Plan(zone, zone, [], False)
target.apply(plan) | Dump zone data from the specified source | train | https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/manager.py#L360-L387 | [
"def plan(self, desired):\n self.log.info('plan: desired=%s', desired.name)\n\n existing = Zone(desired.name, desired.sub_zones)\n exists = self.populate(existing, target=True, lenient=True)\n if exists is None:\n # If your code gets this warning see Source.populate for more\n # informatio... | class Manager(object):
log = logging.getLogger('Manager')
@classmethod
def _plan_keyer(cls, p):
plan = p[1]
return len(plan.changes[0].record.zone.name) if plan.changes else 0
def __init__(self, config_file, max_workers=None, include_meta=False):
self.log.info('__init__: config_file=%s', config_file)
# Read our config file
with open(config_file, 'r') as fh:
self.config = safe_load(fh, enforce_order=False)
manager_config = self.config.get('manager', {})
max_workers = manager_config.get('max_workers', 1) \
if max_workers is None else max_workers
self.log.info('__init__: max_workers=%d', max_workers)
if max_workers > 1:
self._executor = ThreadPoolExecutor(max_workers=max_workers)
else:
self._executor = MainThreadExecutor()
self.include_meta = include_meta or manager_config.get('include_meta',
False)
self.log.info('__init__: max_workers=%s', self.include_meta)
self.log.debug('__init__: configuring providers')
self.providers = {}
for provider_name, provider_config in self.config['providers'].items():
# Get our class and remove it from the provider_config
try:
_class = provider_config.pop('class')
except KeyError:
self.log.exception('Invalid provider class')
raise Exception('Provider {} is missing class'
.format(provider_name))
_class = self._get_named_class('provider', _class)
kwargs = self._build_kwargs(provider_config)
try:
self.providers[provider_name] = _class(provider_name, **kwargs)
except TypeError:
self.log.exception('Invalid provider config')
raise Exception('Incorrect provider config for {}'
.format(provider_name))
zone_tree = {}
# sort by reversed strings so that parent zones always come first
for name in sorted(self.config['zones'].keys(), key=lambda s: s[::-1]):
# ignore trailing dots, and reverse
pieces = name[:-1].split('.')[::-1]
# where starts out at the top
where = zone_tree
# for all the pieces
for piece in pieces:
try:
where = where[piece]
# our current piece already exists, just point where at
# it's value
except KeyError:
# our current piece doesn't exist, create it
where[piece] = {}
# and then point where at it's newly created value
where = where[piece]
self.zone_tree = zone_tree
self.plan_outputs = {}
plan_outputs = manager_config.get('plan_outputs', {
'logger': {
'class': 'octodns.provider.plan.PlanLogger',
'level': 'info'
}
})
for plan_output_name, plan_output_config in plan_outputs.items():
try:
_class = plan_output_config.pop('class')
except KeyError:
self.log.exception('Invalid plan_output class')
raise Exception('plan_output {} is missing class'
.format(plan_output_name))
_class = self._get_named_class('plan_output', _class)
kwargs = self._build_kwargs(plan_output_config)
try:
self.plan_outputs[plan_output_name] = \
_class(plan_output_name, **kwargs)
except TypeError:
self.log.exception('Invalid plan_output config')
raise Exception('Incorrect plan_output config for {}'
.format(plan_output_name))
def _get_named_class(self, _type, _class):
try:
module_name, class_name = _class.rsplit('.', 1)
module = import_module(module_name)
except (ImportError, ValueError):
self.log.exception('_get_{}_class: Unable to import '
'module %s', _class)
raise Exception('Unknown {} class: {}'.format(_type, _class))
try:
return getattr(module, class_name)
except AttributeError:
self.log.exception('_get_{}_class: Unable to get class %s '
'from module %s', class_name, module)
raise Exception('Unknown {} class: {}'.format(_type, _class))
def _build_kwargs(self, source):
# Build up the arguments we need to pass to the provider
kwargs = {}
for k, v in source.items():
try:
if v.startswith('env/'):
try:
env_var = v[4:]
v = environ[env_var]
except KeyError:
self.log.exception('Invalid provider config')
raise Exception('Incorrect provider config, '
'missing env var {}'
.format(env_var))
except AttributeError:
pass
kwargs[k] = v
return kwargs
def configured_sub_zones(self, zone_name):
# Reversed pieces of the zone name
pieces = zone_name[:-1].split('.')[::-1]
# Point where at the root of the tree
where = self.zone_tree
# Until we've hit the bottom of this zone
try:
while pieces:
# Point where at the value of our current piece
where = where[pieces.pop(0)]
except KeyError:
self.log.debug('configured_sub_zones: unknown zone, %s, no subs',
zone_name)
return set()
# We're not pointed at the dict for our name, the keys of which will be
# any subzones
sub_zone_names = where.keys()
self.log.debug('configured_sub_zones: subs=%s', sub_zone_names)
return set(sub_zone_names)
def _populate_and_plan(self, zone_name, sources, targets):
self.log.debug('sync: populating, zone=%s', zone_name)
zone = Zone(zone_name,
sub_zones=self.configured_sub_zones(zone_name))
for source in sources:
source.populate(zone)
self.log.debug('sync: planning, zone=%s', zone_name)
plans = []
for target in targets:
if self.include_meta:
meta = Record.new(zone, 'octodns-meta', {
'type': 'TXT',
'ttl': 60,
'value': 'provider={}'.format(target.id)
})
zone.add_record(meta, replace=True)
plan = target.plan(zone)
if plan:
plans.append((target, plan))
return plans
def sync(self, eligible_zones=[], eligible_targets=[], dry_run=True,
force=False):
self.log.info('sync: eligible_zones=%s, eligible_targets=%s, '
'dry_run=%s, force=%s', eligible_zones, eligible_targets,
dry_run, force)
zones = self.config['zones'].items()
if eligible_zones:
zones = filter(lambda d: d[0] in eligible_zones, zones)
futures = []
for zone_name, config in zones:
self.log.info('sync: zone=%s', zone_name)
try:
sources = config['sources']
except KeyError:
raise Exception('Zone {} is missing sources'.format(zone_name))
try:
targets = config['targets']
except KeyError:
raise Exception('Zone {} is missing targets'.format(zone_name))
if eligible_targets:
targets = filter(lambda d: d in eligible_targets, targets)
if not targets:
# Don't bother planning (and more importantly populating) zones
# when we don't have any eligible targets, waste of
# time/resources
self.log.info('sync: no eligible targets, skipping')
continue
self.log.info('sync: sources=%s -> targets=%s', sources, targets)
try:
sources = [self.providers[source] for source in sources]
except KeyError:
raise Exception('Zone {}, unknown source: {}'.format(zone_name,
source))
try:
trgs = []
for target in targets:
trg = self.providers[target]
if not isinstance(trg, BaseProvider):
raise Exception('{} - "{}" does not support targeting'
.format(trg, target))
trgs.append(trg)
targets = trgs
except KeyError:
raise Exception('Zone {}, unknown target: {}'.format(zone_name,
target))
futures.append(self._executor.submit(self._populate_and_plan,
zone_name, sources, targets))
# Wait on all results and unpack/flatten them in to a list of target &
# plan pairs.
plans = [p for f in futures for p in f.result()]
# Best effort sort plans children first so that we create/update
# children zones before parents which should allow us to more safely
# extract things into sub-zones. Combining a child back into a parent
# can't really be done all that safely in general so we'll optimize for
# this direction.
plans.sort(key=self._plan_keyer, reverse=True)
for output in self.plan_outputs.values():
output.run(plans=plans, log=self.log)
if not force:
self.log.debug('sync: checking safety')
for target, plan in plans:
plan.raise_if_unsafe()
if dry_run:
return 0
total_changes = 0
self.log.debug('sync: applying')
zones = self.config['zones']
for target, plan in plans:
zone_name = plan.existing.name
if zones[zone_name].get('always-dry-run', False):
self.log.info('sync: zone=%s skipping always-dry-run',
zone_name)
continue
total_changes += target.apply(plan)
self.log.info('sync: %d total changes', total_changes)
return total_changes
def compare(self, a, b, zone):
'''
Compare zone data between 2 sources.
Note: only things supported by both sources will be considered
'''
self.log.info('compare: a=%s, b=%s, zone=%s', a, b, zone)
try:
a = [self.providers[source] for source in a]
b = [self.providers[source] for source in b]
except KeyError as e:
raise Exception('Unknown source: {}'.format(e.args[0]))
sub_zones = self.configured_sub_zones(zone)
za = Zone(zone, sub_zones)
for source in a:
source.populate(za)
zb = Zone(zone, sub_zones)
for source in b:
source.populate(zb)
return zb.changes(za, _AggregateTarget(a + b))
def validate_configs(self):
for zone_name, config in self.config['zones'].items():
zone = Zone(zone_name, self.configured_sub_zones(zone_name))
try:
sources = config['sources']
except KeyError:
raise Exception('Zone {} is missing sources'.format(zone_name))
try:
sources = [self.providers[source] for source in sources]
except KeyError:
raise Exception('Zone {}, unknown source: {}'.format(zone_name,
source))
for source in sources:
if isinstance(source, YamlProvider):
source.populate(zone)
|
github/octodns | octodns/provider/dyn.py | _CachingDynZone.flush_zone | python | def flush_zone(cls, zone_name):
'''Flushes the zone cache, if there is one'''
cls.log.debug('flush_zone: zone_name=%s', zone_name)
try:
del cls._cache[zone_name]
except KeyError:
pass | Flushes the zone cache, if there is one | train | https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/provider/dyn.py#L156-L162 | null | class _CachingDynZone(DynZone):
log = getLogger('_CachingDynZone')
_cache = {}
@classmethod
def get(cls, zone_name, create=False):
cls.log.debug('get: zone_name=%s, create=%s', zone_name, create)
# This works in dyn zone names, without the trailing .
try:
dyn_zone = cls._cache[zone_name]
cls.log.debug('get: cache hit')
except KeyError:
cls.log.debug('get: cache miss')
try:
dyn_zone = _CachingDynZone(zone_name)
cls.log.debug('get: fetched')
except DynectGetError:
if not create:
cls.log.debug("get: doesn't exist")
return None
# this value shouldn't really matter, it's not tied to
# whois or anything
hostname = 'hostmaster@{}'.format(zone_name[:-1])
# Try again with the params necessary to create
dyn_zone = _CachingDynZone(zone_name, ttl=3600,
contact=hostname,
serial_style='increment')
cls.log.debug('get: created')
cls._cache[zone_name] = dyn_zone
return dyn_zone
@classmethod
def __init__(self, zone_name, *args, **kwargs):
super(_CachingDynZone, self).__init__(zone_name, *args, **kwargs)
self.flush_cache()
def flush_cache(self):
self._cached_records = None
def get_all_records(self):
if self._cached_records is None:
self._cached_records = \
super(_CachingDynZone, self).get_all_records()
return self._cached_records
def publish(self):
super(_CachingDynZone, self).publish()
self.flush_cache()
|
github/octodns | octodns/provider/azuredns.py | _AzureRecord._equals | python | def _equals(self, b):
'''Checks whether two records are equal by comparing all fields.
:param b: Another _AzureRecord object
:type b: _AzureRecord
:type return: bool
'''
def parse_dict(params):
vals = []
for char in params:
if char != 'ttl':
list_records = params[char]
try:
for record in list_records:
vals.append(record.__dict__)
except:
vals.append(list_records.__dict__)
vals.sort()
return vals
return (self.resource_group == b.resource_group) & \
(self.zone_name == b.zone_name) & \
(self.record_type == b.record_type) & \
(self.params['ttl'] == b.params['ttl']) & \
(parse_dict(self.params) == parse_dict(b.params)) & \
(self.relative_record_set_name == b.relative_record_set_name) | Checks whether two records are equal by comparing all fields.
:param b: Another _AzureRecord object
:type b: _AzureRecord
:type return: bool | train | https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/provider/azuredns.py#L171-L196 | null | class _AzureRecord(object):
'''Wrapper for OctoDNS record for AzureProvider to make dns_client calls.
azuredns.py:
class: octodns.provider.azuredns._AzureRecord
An _AzureRecord is easily accessible to Azure DNS Management library
functions and is used to wrap all relevant data to create a record in
Azure.
'''
TYPE_MAP = {
'A': ARecord,
'AAAA': AaaaRecord,
'CAA': CaaRecord,
'CNAME': CnameRecord,
'MX': MxRecord,
'SRV': SrvRecord,
'NS': NsRecord,
'PTR': PtrRecord,
'TXT': TxtRecord
}
def __init__(self, resource_group, record, delete=False):
'''Constructor for _AzureRecord.
Notes on Azure records: An Azure record set has the form
RecordSet(name=<...>, type=<...>, arecords=[...], aaaa_records, ..)
When constructing an azure record as done in self._apply_Create,
the argument parameters for an A record would be
parameters={'ttl': <int>, 'arecords': [ARecord(<str ip>),]}.
As another example for CNAME record:
parameters={'ttl': <int>, 'cname_record': CnameRecord(<str>)}.
Below, key_name and class_name are the dictionary key and Azure
Record class respectively.
:param resource_group: The name of resource group in Azure
:type resource_group: str
:param record: An OctoDNS record
:type record: ..record.Record
:param delete: If true, omit data parsing; not needed to delete
:type delete: bool
:type return: _AzureRecord
'''
self.resource_group = resource_group
self.zone_name = record.zone.name[:len(record.zone.name) - 1]
self.relative_record_set_name = record.name or '@'
self.record_type = record._type
if delete:
return
# Refer to function docstring for key_name and class_name.
format_u_s = '' if record._type == 'A' else '_'
key_name = '{}{}records'.format(self.record_type, format_u_s).lower()
if record._type == 'CNAME':
key_name = key_name[:len(key_name) - 1]
azure_class = self.TYPE_MAP[self.record_type]
self.params = getattr(self, '_params_for_{}'.format(record._type))
self.params = self.params(record.data, key_name, azure_class)
self.params['ttl'] = record.ttl
def _params_for_A(self, data, key_name, azure_class):
try:
values = data['values']
except KeyError:
values = [data['value']]
return {key_name: [azure_class(ipv4_address=v) for v in values]}
def _params_for_AAAA(self, data, key_name, azure_class):
try:
values = data['values']
except KeyError:
values = [data['value']]
return {key_name: [azure_class(ipv6_address=v) for v in values]}
def _params_for_CAA(self, data, key_name, azure_class):
params = []
if 'values' in data:
for vals in data['values']:
params.append(azure_class(flags=vals['flags'],
tag=vals['tag'],
value=vals['value']))
else: # Else there is a singular data point keyed by 'value'.
params.append(azure_class(flags=data['value']['flags'],
tag=data['value']['tag'],
value=data['value']['value']))
return {key_name: params}
def _params_for_CNAME(self, data, key_name, azure_class):
return {key_name: azure_class(cname=data['value'])}
def _params_for_MX(self, data, key_name, azure_class):
params = []
if 'values' in data:
for vals in data['values']:
params.append(azure_class(preference=vals['preference'],
exchange=vals['exchange']))
else: # Else there is a singular data point keyed by 'value'.
params.append(azure_class(preference=data['value']['preference'],
exchange=data['value']['exchange']))
return {key_name: params}
def _params_for_SRV(self, data, key_name, azure_class):
params = []
if 'values' in data:
for vals in data['values']:
params.append(azure_class(priority=vals['priority'],
weight=vals['weight'],
port=vals['port'],
target=vals['target']))
else: # Else there is a singular data point keyed by 'value'.
params.append(azure_class(priority=data['value']['priority'],
weight=data['value']['weight'],
port=data['value']['port'],
target=data['value']['target']))
return {key_name: params}
def _params_for_NS(self, data, key_name, azure_class):
try:
values = data['values']
except KeyError:
values = [data['value']]
return {key_name: [azure_class(nsdname=v) for v in values]}
def _params_for_PTR(self, data, key_name, azure_class):
try:
values = data['values']
except KeyError:
values = [data['value']]
return {key_name: [azure_class(ptrdname=v) for v in values]}
def _params_for_TXT(self, data, key_name, azure_class):
try: # API for TxtRecord has list of str, even for singleton
values = [unescape_semicolon(v) for v in data['values']]
except KeyError:
values = [unescape_semicolon(data['value'])]
return {key_name: [azure_class(value=[v]) for v in values]}
def __str__(self):
'''String representation of an _AzureRecord.
:type return: str
'''
string = 'Zone: {}; '.format(self.zone_name)
string += 'Name: {}; '.format(self.relative_record_set_name)
string += 'Type: {}; '.format(self.record_type)
if not hasattr(self, 'params'):
return string
string += 'Ttl: {}; '.format(self.params['ttl'])
for char in self.params:
if char != 'ttl':
try:
for rec in self.params[char]:
string += 'Record: {}; '.format(rec.__dict__)
except:
string += 'Record: {}; '.format(self.params[char].__dict__)
return string
|
github/octodns | octodns/provider/azuredns.py | AzureProvider._check_zone | python | def _check_zone(self, name, create=False):
'''Checks whether a zone specified in a source exist in Azure server.
Note that Azure zones omit end '.' eg: contoso.com vs contoso.com.
Returns the name if it exists.
:param name: Name of a zone to checks
:type name: str
:param create: If True, creates the zone of that name.
:type create: bool
:type return: str or None
'''
self.log.debug('_check_zone: name=%s', name)
try:
if name in self._azure_zones:
return name
self._dns_client.zones.get(self._resource_group, name)
self._azure_zones.add(name)
return name
except CloudError as err:
msg = 'The Resource \'Microsoft.Network/dnszones/{}\''.format(name)
msg += ' under resource group \'{}\''.format(self._resource_group)
msg += ' was not found.'
if msg == err.message:
# Then the only error is that the zone doesn't currently exist
if create:
self.log.debug('_check_zone:no matching zone; creating %s',
name)
create_zone = self._dns_client.zones.create_or_update
create_zone(self._resource_group, name,
Zone(location='global'))
return name
else:
return
raise | Checks whether a zone specified in a source exist in Azure server.
Note that Azure zones omit end '.' eg: contoso.com vs contoso.com.
Returns the name if it exists.
:param name: Name of a zone to checks
:type name: str
:param create: If True, creates the zone of that name.
:type create: bool
:type return: str or None | train | https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/provider/azuredns.py#L306-L341 | null | class AzureProvider(BaseProvider):
'''
Azure DNS Provider
azuredns.py:
class: octodns.provider.azuredns.AzureProvider
# Current support of authentication of access to Azure services only
# includes using a Service Principal:
# https://docs.microsoft.com/en-us/azure/azure-resource-manager/
# resource-group-create-service-principal-portal
# The Azure Active Directory Application ID (aka client ID):
client_id:
# Authentication Key Value: (note this should be secret)
key:
# Directory ID (aka tenant ID):
directory_id:
# Subscription ID:
sub_id:
# Resource Group name:
resource_group:
# All are required to authenticate.
Example config file with variables:
"
---
providers:
config:
class: octodns.provider.yaml.YamlProvider
directory: ./config (example path to directory of zone files)
azuredns:
class: octodns.provider.azuredns.AzureProvider
client_id: env/AZURE_APPLICATION_ID
key: env/AZURE_AUTHENTICATION_KEY
directory_id: env/AZURE_DIRECTORY_ID
sub_id: env/AZURE_SUBSCRIPTION_ID
resource_group: 'TestResource1'
zones:
example.com.:
sources:
- config
targets:
- azuredns
"
The first four variables above can be hidden in environment variables
and octoDNS will automatically search for them in the shell. It is
possible to also hard-code into the config file: eg, resource_group.
'''
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
SUPPORTS = set(('A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SRV',
'TXT'))
def __init__(self, id, client_id, key, directory_id, sub_id,
resource_group, *args, **kwargs):
self.log = logging.getLogger('AzureProvider[{}]'.format(id))
self.log.debug('__init__: id=%s, client_id=%s, '
'key=***, directory_id:%s', id, client_id, directory_id)
super(AzureProvider, self).__init__(id, *args, **kwargs)
credentials = ServicePrincipalCredentials(
client_id, secret=key, tenant=directory_id
)
self._dns_client = DnsManagementClient(credentials, sub_id)
self._resource_group = resource_group
self._azure_zones = set()
def _populate_zones(self):
self.log.debug('azure_zones: loading')
list_zones = self._dns_client.zones.list_by_resource_group
for zone in list_zones(self._resource_group):
self._azure_zones.add(zone.name)
def populate(self, zone, target=False, lenient=False):
'''Required function of manager.py to collect records from zone.
Special notes for Azure.
Azure zone names omit final '.'
Azure root records names are represented by '@'. OctoDNS uses ''
Azure records created through online interface may have null values
(eg, no IP address for A record).
Azure online interface allows constructing records with null values
which are destroyed by _apply.
Specific quirks such as these are responsible for any non-obvious
parsing in this function and the functions '_params_for_*'.
:param zone: A dns zone
:type zone: octodns.zone.Zone
:param target: Checks if Azure is source or target of config.
Currently only supports as a target. Unused.
:type target: bool
:param lenient: Unused. Check octodns.manager for usage.
:type lenient: bool
:type return: void
'''
self.log.debug('populate: name=%s', zone.name)
exists = False
before = len(zone.records)
zone_name = zone.name[:len(zone.name) - 1]
self._populate_zones()
self._check_zone(zone_name)
_records = set()
records = self._dns_client.record_sets.list_by_dns_zone
if self._check_zone(zone_name):
exists = True
for azrecord in records(self._resource_group, zone_name):
if _parse_azure_type(azrecord.type) in self.SUPPORTS:
_records.add(azrecord)
for azrecord in _records:
record_name = azrecord.name if azrecord.name != '@' else ''
typ = _parse_azure_type(azrecord.type)
data = getattr(self, '_data_for_{}'.format(typ))
data = data(azrecord)
data['type'] = typ
data['ttl'] = azrecord.ttl
record = Record.new(zone, record_name, data, source=self)
zone.add_record(record, lenient=lenient)
self.log.info('populate: found %s records, exists=%s',
len(zone.records) - before, exists)
return exists
def _data_for_A(self, azrecord):
return {'values': [ar.ipv4_address for ar in azrecord.arecords]}
def _data_for_AAAA(self, azrecord):
return {'values': [ar.ipv6_address for ar in azrecord.aaaa_records]}
def _data_for_CAA(self, azrecord):
return {'values': [{'flags': ar.flags,
'tag': ar.tag,
'value': ar.value}
for ar in azrecord.caa_records]}
def _data_for_CNAME(self, azrecord):
'''Parsing data from Azure DNS Client record call
:param azrecord: a return of a call to list azure records
:type azrecord: azure.mgmt.dns.models.RecordSet
:type return: dict
CNAME and PTR both use the catch block to catch possible empty
records. Refer to population comment.
'''
try:
return {'value': _check_endswith_dot(azrecord.cname_record.cname)}
except:
return {'value': '.'}
def _data_for_MX(self, azrecord):
return {'values': [{'preference': ar.preference,
'exchange': ar.exchange}
for ar in azrecord.mx_records]}
def _data_for_NS(self, azrecord):
vals = [ar.nsdname for ar in azrecord.ns_records]
return {'values': [_check_endswith_dot(val) for val in vals]}
def _data_for_PTR(self, azrecord):
try:
ptrdname = azrecord.ptr_records[0].ptrdname
return {'value': _check_endswith_dot(ptrdname)}
except:
return {'value': '.'}
def _data_for_SRV(self, azrecord):
return {'values': [{'priority': ar.priority, 'weight': ar.weight,
'port': ar.port, 'target': ar.target}
for ar in azrecord.srv_records]}
def _data_for_TXT(self, azrecord):
return {'values': [escape_semicolon(reduce((lambda a, b: a + b),
ar.value))
for ar in azrecord.txt_records]}
def _apply_Create(self, change):
'''A record from change must be created.
:param change: a change object
:type change: octodns.record.Change
:type return: void
'''
ar = _AzureRecord(self._resource_group, change.new)
create = self._dns_client.record_sets.create_or_update
create(resource_group_name=ar.resource_group,
zone_name=ar.zone_name,
relative_record_set_name=ar.relative_record_set_name,
record_type=ar.record_type,
parameters=ar.params)
self.log.debug('* Success Create/Update: {}'.format(ar))
_apply_Update = _apply_Create
def _apply_Delete(self, change):
ar = _AzureRecord(self._resource_group, change.existing, delete=True)
delete = self._dns_client.record_sets.delete
delete(self._resource_group, ar.zone_name, ar.relative_record_set_name,
ar.record_type)
self.log.debug('* Success Delete: {}'.format(ar))
def _apply(self, plan):
'''Required function of manager.py to actually apply a record change.
:param plan: Contains the zones and changes to be made
:type plan: octodns.provider.base.Plan
:type return: void
'''
desired = plan.desired
changes = plan.changes
self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
azure_zone_name = desired.name[:len(desired.name) - 1]
self._check_zone(azure_zone_name, create=True)
for change in changes:
class_name = change.__class__.__name__
getattr(self, '_apply_{}'.format(class_name))(change)
|
github/octodns | octodns/provider/azuredns.py | AzureProvider.populate | python | def populate(self, zone, target=False, lenient=False):
'''Required function of manager.py to collect records from zone.
Special notes for Azure.
Azure zone names omit final '.'
Azure root records names are represented by '@'. OctoDNS uses ''
Azure records created through online interface may have null values
(eg, no IP address for A record).
Azure online interface allows constructing records with null values
which are destroyed by _apply.
Specific quirks such as these are responsible for any non-obvious
parsing in this function and the functions '_params_for_*'.
:param zone: A dns zone
:type zone: octodns.zone.Zone
:param target: Checks if Azure is source or target of config.
Currently only supports as a target. Unused.
:type target: bool
:param lenient: Unused. Check octodns.manager for usage.
:type lenient: bool
:type return: void
'''
self.log.debug('populate: name=%s', zone.name)
exists = False
before = len(zone.records)
zone_name = zone.name[:len(zone.name) - 1]
self._populate_zones()
self._check_zone(zone_name)
_records = set()
records = self._dns_client.record_sets.list_by_dns_zone
if self._check_zone(zone_name):
exists = True
for azrecord in records(self._resource_group, zone_name):
if _parse_azure_type(azrecord.type) in self.SUPPORTS:
_records.add(azrecord)
for azrecord in _records:
record_name = azrecord.name if azrecord.name != '@' else ''
typ = _parse_azure_type(azrecord.type)
data = getattr(self, '_data_for_{}'.format(typ))
data = data(azrecord)
data['type'] = typ
data['ttl'] = azrecord.ttl
record = Record.new(zone, record_name, data, source=self)
zone.add_record(record, lenient=lenient)
self.log.info('populate: found %s records, exists=%s',
len(zone.records) - before, exists)
return exists | Required function of manager.py to collect records from zone.
Special notes for Azure.
Azure zone names omit final '.'
Azure root records names are represented by '@'. OctoDNS uses ''
Azure records created through online interface may have null values
(eg, no IP address for A record).
Azure online interface allows constructing records with null values
which are destroyed by _apply.
Specific quirks such as these are responsible for any non-obvious
parsing in this function and the functions '_params_for_*'.
:param zone: A dns zone
:type zone: octodns.zone.Zone
:param target: Checks if Azure is source or target of config.
Currently only supports as a target. Unused.
:type target: bool
:param lenient: Unused. Check octodns.manager for usage.
:type lenient: bool
:type return: void | train | https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/provider/azuredns.py#L343-L395 | [
"def _parse_azure_type(string):\n '''Converts string representing an Azure RecordSet type to usual type.\n\n :param string: the Azure type. eg: <Microsoft.Network/dnszones/A>\n :type string: str\n\n :type return: str\n '''\n return string.split('/')[len(string.split('/')) - 1]\n",
"... | class AzureProvider(BaseProvider):
'''
Azure DNS Provider
azuredns.py:
class: octodns.provider.azuredns.AzureProvider
# Current support of authentication of access to Azure services only
# includes using a Service Principal:
# https://docs.microsoft.com/en-us/azure/azure-resource-manager/
# resource-group-create-service-principal-portal
# The Azure Active Directory Application ID (aka client ID):
client_id:
# Authentication Key Value: (note this should be secret)
key:
# Directory ID (aka tenant ID):
directory_id:
# Subscription ID:
sub_id:
# Resource Group name:
resource_group:
# All are required to authenticate.
Example config file with variables:
"
---
providers:
config:
class: octodns.provider.yaml.YamlProvider
directory: ./config (example path to directory of zone files)
azuredns:
class: octodns.provider.azuredns.AzureProvider
client_id: env/AZURE_APPLICATION_ID
key: env/AZURE_AUTHENTICATION_KEY
directory_id: env/AZURE_DIRECTORY_ID
sub_id: env/AZURE_SUBSCRIPTION_ID
resource_group: 'TestResource1'
zones:
example.com.:
sources:
- config
targets:
- azuredns
"
The first four variables above can be hidden in environment variables
and octoDNS will automatically search for them in the shell. It is
possible to also hard-code into the config file: eg, resource_group.
'''
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
SUPPORTS = set(('A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SRV',
'TXT'))
def __init__(self, id, client_id, key, directory_id, sub_id,
resource_group, *args, **kwargs):
self.log = logging.getLogger('AzureProvider[{}]'.format(id))
self.log.debug('__init__: id=%s, client_id=%s, '
'key=***, directory_id:%s', id, client_id, directory_id)
super(AzureProvider, self).__init__(id, *args, **kwargs)
credentials = ServicePrincipalCredentials(
client_id, secret=key, tenant=directory_id
)
self._dns_client = DnsManagementClient(credentials, sub_id)
self._resource_group = resource_group
self._azure_zones = set()
def _populate_zones(self):
self.log.debug('azure_zones: loading')
list_zones = self._dns_client.zones.list_by_resource_group
for zone in list_zones(self._resource_group):
self._azure_zones.add(zone.name)
def _check_zone(self, name, create=False):
'''Checks whether a zone specified in a source exist in Azure server.
Note that Azure zones omit end '.' eg: contoso.com vs contoso.com.
Returns the name if it exists.
:param name: Name of a zone to checks
:type name: str
:param create: If True, creates the zone of that name.
:type create: bool
:type return: str or None
'''
self.log.debug('_check_zone: name=%s', name)
try:
if name in self._azure_zones:
return name
self._dns_client.zones.get(self._resource_group, name)
self._azure_zones.add(name)
return name
except CloudError as err:
msg = 'The Resource \'Microsoft.Network/dnszones/{}\''.format(name)
msg += ' under resource group \'{}\''.format(self._resource_group)
msg += ' was not found.'
if msg == err.message:
# Then the only error is that the zone doesn't currently exist
if create:
self.log.debug('_check_zone:no matching zone; creating %s',
name)
create_zone = self._dns_client.zones.create_or_update
create_zone(self._resource_group, name,
Zone(location='global'))
return name
else:
return
raise
def _data_for_A(self, azrecord):
return {'values': [ar.ipv4_address for ar in azrecord.arecords]}
def _data_for_AAAA(self, azrecord):
return {'values': [ar.ipv6_address for ar in azrecord.aaaa_records]}
def _data_for_CAA(self, azrecord):
return {'values': [{'flags': ar.flags,
'tag': ar.tag,
'value': ar.value}
for ar in azrecord.caa_records]}
def _data_for_CNAME(self, azrecord):
'''Parsing data from Azure DNS Client record call
:param azrecord: a return of a call to list azure records
:type azrecord: azure.mgmt.dns.models.RecordSet
:type return: dict
CNAME and PTR both use the catch block to catch possible empty
records. Refer to population comment.
'''
try:
return {'value': _check_endswith_dot(azrecord.cname_record.cname)}
except:
return {'value': '.'}
def _data_for_MX(self, azrecord):
return {'values': [{'preference': ar.preference,
'exchange': ar.exchange}
for ar in azrecord.mx_records]}
def _data_for_NS(self, azrecord):
vals = [ar.nsdname for ar in azrecord.ns_records]
return {'values': [_check_endswith_dot(val) for val in vals]}
def _data_for_PTR(self, azrecord):
try:
ptrdname = azrecord.ptr_records[0].ptrdname
return {'value': _check_endswith_dot(ptrdname)}
except:
return {'value': '.'}
def _data_for_SRV(self, azrecord):
return {'values': [{'priority': ar.priority, 'weight': ar.weight,
'port': ar.port, 'target': ar.target}
for ar in azrecord.srv_records]}
def _data_for_TXT(self, azrecord):
return {'values': [escape_semicolon(reduce((lambda a, b: a + b),
ar.value))
for ar in azrecord.txt_records]}
def _apply_Create(self, change):
'''A record from change must be created.
:param change: a change object
:type change: octodns.record.Change
:type return: void
'''
ar = _AzureRecord(self._resource_group, change.new)
create = self._dns_client.record_sets.create_or_update
create(resource_group_name=ar.resource_group,
zone_name=ar.zone_name,
relative_record_set_name=ar.relative_record_set_name,
record_type=ar.record_type,
parameters=ar.params)
self.log.debug('* Success Create/Update: {}'.format(ar))
_apply_Update = _apply_Create
def _apply_Delete(self, change):
ar = _AzureRecord(self._resource_group, change.existing, delete=True)
delete = self._dns_client.record_sets.delete
delete(self._resource_group, ar.zone_name, ar.relative_record_set_name,
ar.record_type)
self.log.debug('* Success Delete: {}'.format(ar))
def _apply(self, plan):
'''Required function of manager.py to actually apply a record change.
:param plan: Contains the zones and changes to be made
:type plan: octodns.provider.base.Plan
:type return: void
'''
desired = plan.desired
changes = plan.changes
self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
azure_zone_name = desired.name[:len(desired.name) - 1]
self._check_zone(azure_zone_name, create=True)
for change in changes:
class_name = change.__class__.__name__
getattr(self, '_apply_{}'.format(class_name))(change)
|
github/octodns | octodns/provider/azuredns.py | AzureProvider._apply_Create | python | def _apply_Create(self, change):
'''A record from change must be created.
:param change: a change object
:type change: octodns.record.Change
:type return: void
'''
ar = _AzureRecord(self._resource_group, change.new)
create = self._dns_client.record_sets.create_or_update
create(resource_group_name=ar.resource_group,
zone_name=ar.zone_name,
relative_record_set_name=ar.relative_record_set_name,
record_type=ar.record_type,
parameters=ar.params)
self.log.debug('* Success Create/Update: {}'.format(ar)) | A record from change must be created.
:param change: a change object
:type change: octodns.record.Change
:type return: void | train | https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/provider/azuredns.py#L450-L467 | null | class AzureProvider(BaseProvider):
'''
Azure DNS Provider
azuredns.py:
class: octodns.provider.azuredns.AzureProvider
# Current support of authentication of access to Azure services only
# includes using a Service Principal:
# https://docs.microsoft.com/en-us/azure/azure-resource-manager/
# resource-group-create-service-principal-portal
# The Azure Active Directory Application ID (aka client ID):
client_id:
# Authentication Key Value: (note this should be secret)
key:
# Directory ID (aka tenant ID):
directory_id:
# Subscription ID:
sub_id:
# Resource Group name:
resource_group:
# All are required to authenticate.
Example config file with variables:
"
---
providers:
config:
class: octodns.provider.yaml.YamlProvider
directory: ./config (example path to directory of zone files)
azuredns:
class: octodns.provider.azuredns.AzureProvider
client_id: env/AZURE_APPLICATION_ID
key: env/AZURE_AUTHENTICATION_KEY
directory_id: env/AZURE_DIRECTORY_ID
sub_id: env/AZURE_SUBSCRIPTION_ID
resource_group: 'TestResource1'
zones:
example.com.:
sources:
- config
targets:
- azuredns
"
The first four variables above can be hidden in environment variables
and octoDNS will automatically search for them in the shell. It is
possible to also hard-code into the config file: eg, resource_group.
'''
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
SUPPORTS = set(('A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SRV',
'TXT'))
def __init__(self, id, client_id, key, directory_id, sub_id,
resource_group, *args, **kwargs):
self.log = logging.getLogger('AzureProvider[{}]'.format(id))
self.log.debug('__init__: id=%s, client_id=%s, '
'key=***, directory_id:%s', id, client_id, directory_id)
super(AzureProvider, self).__init__(id, *args, **kwargs)
credentials = ServicePrincipalCredentials(
client_id, secret=key, tenant=directory_id
)
self._dns_client = DnsManagementClient(credentials, sub_id)
self._resource_group = resource_group
self._azure_zones = set()
def _populate_zones(self):
self.log.debug('azure_zones: loading')
list_zones = self._dns_client.zones.list_by_resource_group
for zone in list_zones(self._resource_group):
self._azure_zones.add(zone.name)
def _check_zone(self, name, create=False):
'''Checks whether a zone specified in a source exist in Azure server.
Note that Azure zones omit end '.' eg: contoso.com vs contoso.com.
Returns the name if it exists.
:param name: Name of a zone to checks
:type name: str
:param create: If True, creates the zone of that name.
:type create: bool
:type return: str or None
'''
self.log.debug('_check_zone: name=%s', name)
try:
if name in self._azure_zones:
return name
self._dns_client.zones.get(self._resource_group, name)
self._azure_zones.add(name)
return name
except CloudError as err:
msg = 'The Resource \'Microsoft.Network/dnszones/{}\''.format(name)
msg += ' under resource group \'{}\''.format(self._resource_group)
msg += ' was not found.'
if msg == err.message:
# Then the only error is that the zone doesn't currently exist
if create:
self.log.debug('_check_zone:no matching zone; creating %s',
name)
create_zone = self._dns_client.zones.create_or_update
create_zone(self._resource_group, name,
Zone(location='global'))
return name
else:
return
raise
def populate(self, zone, target=False, lenient=False):
'''Required function of manager.py to collect records from zone.
Special notes for Azure.
Azure zone names omit final '.'
Azure root records names are represented by '@'. OctoDNS uses ''
Azure records created through online interface may have null values
(eg, no IP address for A record).
Azure online interface allows constructing records with null values
which are destroyed by _apply.
Specific quirks such as these are responsible for any non-obvious
parsing in this function and the functions '_params_for_*'.
:param zone: A dns zone
:type zone: octodns.zone.Zone
:param target: Checks if Azure is source or target of config.
Currently only supports as a target. Unused.
:type target: bool
:param lenient: Unused. Check octodns.manager for usage.
:type lenient: bool
:type return: void
'''
self.log.debug('populate: name=%s', zone.name)
exists = False
before = len(zone.records)
zone_name = zone.name[:len(zone.name) - 1]
self._populate_zones()
self._check_zone(zone_name)
_records = set()
records = self._dns_client.record_sets.list_by_dns_zone
if self._check_zone(zone_name):
exists = True
for azrecord in records(self._resource_group, zone_name):
if _parse_azure_type(azrecord.type) in self.SUPPORTS:
_records.add(azrecord)
for azrecord in _records:
record_name = azrecord.name if azrecord.name != '@' else ''
typ = _parse_azure_type(azrecord.type)
data = getattr(self, '_data_for_{}'.format(typ))
data = data(azrecord)
data['type'] = typ
data['ttl'] = azrecord.ttl
record = Record.new(zone, record_name, data, source=self)
zone.add_record(record, lenient=lenient)
self.log.info('populate: found %s records, exists=%s',
len(zone.records) - before, exists)
return exists
def _data_for_A(self, azrecord):
return {'values': [ar.ipv4_address for ar in azrecord.arecords]}
def _data_for_AAAA(self, azrecord):
return {'values': [ar.ipv6_address for ar in azrecord.aaaa_records]}
def _data_for_CAA(self, azrecord):
return {'values': [{'flags': ar.flags,
'tag': ar.tag,
'value': ar.value}
for ar in azrecord.caa_records]}
def _data_for_CNAME(self, azrecord):
'''Parsing data from Azure DNS Client record call
:param azrecord: a return of a call to list azure records
:type azrecord: azure.mgmt.dns.models.RecordSet
:type return: dict
CNAME and PTR both use the catch block to catch possible empty
records. Refer to population comment.
'''
try:
return {'value': _check_endswith_dot(azrecord.cname_record.cname)}
except:
return {'value': '.'}
def _data_for_MX(self, azrecord):
return {'values': [{'preference': ar.preference,
'exchange': ar.exchange}
for ar in azrecord.mx_records]}
def _data_for_NS(self, azrecord):
vals = [ar.nsdname for ar in azrecord.ns_records]
return {'values': [_check_endswith_dot(val) for val in vals]}
def _data_for_PTR(self, azrecord):
try:
ptrdname = azrecord.ptr_records[0].ptrdname
return {'value': _check_endswith_dot(ptrdname)}
except:
return {'value': '.'}
def _data_for_SRV(self, azrecord):
return {'values': [{'priority': ar.priority, 'weight': ar.weight,
'port': ar.port, 'target': ar.target}
for ar in azrecord.srv_records]}
def _data_for_TXT(self, azrecord):
return {'values': [escape_semicolon(reduce((lambda a, b: a + b),
ar.value))
for ar in azrecord.txt_records]}
_apply_Update = _apply_Create
def _apply_Delete(self, change):
ar = _AzureRecord(self._resource_group, change.existing, delete=True)
delete = self._dns_client.record_sets.delete
delete(self._resource_group, ar.zone_name, ar.relative_record_set_name,
ar.record_type)
self.log.debug('* Success Delete: {}'.format(ar))
def _apply(self, plan):
'''Required function of manager.py to actually apply a record change.
:param plan: Contains the zones and changes to be made
:type plan: octodns.provider.base.Plan
:type return: void
'''
desired = plan.desired
changes = plan.changes
self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
azure_zone_name = desired.name[:len(desired.name) - 1]
self._check_zone(azure_zone_name, create=True)
for change in changes:
class_name = change.__class__.__name__
getattr(self, '_apply_{}'.format(class_name))(change)
|
github/octodns | octodns/provider/azuredns.py | AzureProvider._apply | python | def _apply(self, plan):
'''Required function of manager.py to actually apply a record change.
:param plan: Contains the zones and changes to be made
:type plan: octodns.provider.base.Plan
:type return: void
'''
desired = plan.desired
changes = plan.changes
self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
azure_zone_name = desired.name[:len(desired.name) - 1]
self._check_zone(azure_zone_name, create=True)
for change in changes:
class_name = change.__class__.__name__
getattr(self, '_apply_{}'.format(class_name))(change) | Required function of manager.py to actually apply a record change.
:param plan: Contains the zones and changes to be made
:type plan: octodns.provider.base.Plan
:type return: void | train | https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/provider/azuredns.py#L480-L498 | null | class AzureProvider(BaseProvider):
'''
Azure DNS Provider
azuredns.py:
class: octodns.provider.azuredns.AzureProvider
# Current support of authentication of access to Azure services only
# includes using a Service Principal:
# https://docs.microsoft.com/en-us/azure/azure-resource-manager/
# resource-group-create-service-principal-portal
# The Azure Active Directory Application ID (aka client ID):
client_id:
# Authentication Key Value: (note this should be secret)
key:
# Directory ID (aka tenant ID):
directory_id:
# Subscription ID:
sub_id:
# Resource Group name:
resource_group:
# All are required to authenticate.
Example config file with variables:
"
---
providers:
config:
class: octodns.provider.yaml.YamlProvider
directory: ./config (example path to directory of zone files)
azuredns:
class: octodns.provider.azuredns.AzureProvider
client_id: env/AZURE_APPLICATION_ID
key: env/AZURE_AUTHENTICATION_KEY
directory_id: env/AZURE_DIRECTORY_ID
sub_id: env/AZURE_SUBSCRIPTION_ID
resource_group: 'TestResource1'
zones:
example.com.:
sources:
- config
targets:
- azuredns
"
The first four variables above can be hidden in environment variables
and octoDNS will automatically search for them in the shell. It is
possible to also hard-code into the config file: eg, resource_group.
'''
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
SUPPORTS = set(('A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SRV',
'TXT'))
def __init__(self, id, client_id, key, directory_id, sub_id,
resource_group, *args, **kwargs):
self.log = logging.getLogger('AzureProvider[{}]'.format(id))
self.log.debug('__init__: id=%s, client_id=%s, '
'key=***, directory_id:%s', id, client_id, directory_id)
super(AzureProvider, self).__init__(id, *args, **kwargs)
credentials = ServicePrincipalCredentials(
client_id, secret=key, tenant=directory_id
)
self._dns_client = DnsManagementClient(credentials, sub_id)
self._resource_group = resource_group
self._azure_zones = set()
def _populate_zones(self):
self.log.debug('azure_zones: loading')
list_zones = self._dns_client.zones.list_by_resource_group
for zone in list_zones(self._resource_group):
self._azure_zones.add(zone.name)
def _check_zone(self, name, create=False):
'''Checks whether a zone specified in a source exist in Azure server.
Note that Azure zones omit end '.' eg: contoso.com vs contoso.com.
Returns the name if it exists.
:param name: Name of a zone to checks
:type name: str
:param create: If True, creates the zone of that name.
:type create: bool
:type return: str or None
'''
self.log.debug('_check_zone: name=%s', name)
try:
if name in self._azure_zones:
return name
self._dns_client.zones.get(self._resource_group, name)
self._azure_zones.add(name)
return name
except CloudError as err:
msg = 'The Resource \'Microsoft.Network/dnszones/{}\''.format(name)
msg += ' under resource group \'{}\''.format(self._resource_group)
msg += ' was not found.'
if msg == err.message:
# Then the only error is that the zone doesn't currently exist
if create:
self.log.debug('_check_zone:no matching zone; creating %s',
name)
create_zone = self._dns_client.zones.create_or_update
create_zone(self._resource_group, name,
Zone(location='global'))
return name
else:
return
raise
def populate(self, zone, target=False, lenient=False):
'''Required function of manager.py to collect records from zone.
Special notes for Azure.
Azure zone names omit final '.'
Azure root records names are represented by '@'. OctoDNS uses ''
Azure records created through online interface may have null values
(eg, no IP address for A record).
Azure online interface allows constructing records with null values
which are destroyed by _apply.
Specific quirks such as these are responsible for any non-obvious
parsing in this function and the functions '_params_for_*'.
:param zone: A dns zone
:type zone: octodns.zone.Zone
:param target: Checks if Azure is source or target of config.
Currently only supports as a target. Unused.
:type target: bool
:param lenient: Unused. Check octodns.manager for usage.
:type lenient: bool
:type return: void
'''
self.log.debug('populate: name=%s', zone.name)
exists = False
before = len(zone.records)
zone_name = zone.name[:len(zone.name) - 1]
self._populate_zones()
self._check_zone(zone_name)
_records = set()
records = self._dns_client.record_sets.list_by_dns_zone
if self._check_zone(zone_name):
exists = True
for azrecord in records(self._resource_group, zone_name):
if _parse_azure_type(azrecord.type) in self.SUPPORTS:
_records.add(azrecord)
for azrecord in _records:
record_name = azrecord.name if azrecord.name != '@' else ''
typ = _parse_azure_type(azrecord.type)
data = getattr(self, '_data_for_{}'.format(typ))
data = data(azrecord)
data['type'] = typ
data['ttl'] = azrecord.ttl
record = Record.new(zone, record_name, data, source=self)
zone.add_record(record, lenient=lenient)
self.log.info('populate: found %s records, exists=%s',
len(zone.records) - before, exists)
return exists
def _data_for_A(self, azrecord):
return {'values': [ar.ipv4_address for ar in azrecord.arecords]}
def _data_for_AAAA(self, azrecord):
return {'values': [ar.ipv6_address for ar in azrecord.aaaa_records]}
def _data_for_CAA(self, azrecord):
return {'values': [{'flags': ar.flags,
'tag': ar.tag,
'value': ar.value}
for ar in azrecord.caa_records]}
def _data_for_CNAME(self, azrecord):
'''Parsing data from Azure DNS Client record call
:param azrecord: a return of a call to list azure records
:type azrecord: azure.mgmt.dns.models.RecordSet
:type return: dict
CNAME and PTR both use the catch block to catch possible empty
records. Refer to population comment.
'''
try:
return {'value': _check_endswith_dot(azrecord.cname_record.cname)}
except:
return {'value': '.'}
def _data_for_MX(self, azrecord):
return {'values': [{'preference': ar.preference,
'exchange': ar.exchange}
for ar in azrecord.mx_records]}
def _data_for_NS(self, azrecord):
vals = [ar.nsdname for ar in azrecord.ns_records]
return {'values': [_check_endswith_dot(val) for val in vals]}
def _data_for_PTR(self, azrecord):
try:
ptrdname = azrecord.ptr_records[0].ptrdname
return {'value': _check_endswith_dot(ptrdname)}
except:
return {'value': '.'}
def _data_for_SRV(self, azrecord):
return {'values': [{'priority': ar.priority, 'weight': ar.weight,
'port': ar.port, 'target': ar.target}
for ar in azrecord.srv_records]}
def _data_for_TXT(self, azrecord):
return {'values': [escape_semicolon(reduce((lambda a, b: a + b),
ar.value))
for ar in azrecord.txt_records]}
def _apply_Create(self, change):
'''A record from change must be created.
:param change: a change object
:type change: octodns.record.Change
:type return: void
'''
ar = _AzureRecord(self._resource_group, change.new)
create = self._dns_client.record_sets.create_or_update
create(resource_group_name=ar.resource_group,
zone_name=ar.zone_name,
relative_record_set_name=ar.relative_record_set_name,
record_type=ar.record_type,
parameters=ar.params)
self.log.debug('* Success Create/Update: {}'.format(ar))
_apply_Update = _apply_Create
def _apply_Delete(self, change):
ar = _AzureRecord(self._resource_group, change.existing, delete=True)
delete = self._dns_client.record_sets.delete
delete(self._resource_group, ar.zone_name, ar.relative_record_set_name,
ar.record_type)
self.log.debug('* Success Delete: {}'.format(ar))
|
github/octodns | octodns/provider/base.py | BaseProvider.apply | python | def apply(self, plan):
'''
Submits actual planned changes to the provider. Returns the number of
changes made
'''
if self.apply_disabled:
self.log.info('apply: disabled')
return 0
self.log.info('apply: making changes')
self._apply(plan)
return len(plan.changes) | Submits actual planned changes to the provider. Returns the number of
changes made | train | https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/provider/base.py#L83-L94 | [
"def _apply(self, plan):\n raise NotImplementedError('Abstract base class, _apply method '\n 'missing')\n",
"def _apply(self, plan):\n desired = plan.desired\n changes = plan.changes\n self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,\n len... | class BaseProvider(BaseSource):
def __init__(self, id, apply_disabled=False,
update_pcent_threshold=Plan.MAX_SAFE_UPDATE_PCENT,
delete_pcent_threshold=Plan.MAX_SAFE_DELETE_PCENT):
super(BaseProvider, self).__init__(id)
self.log.debug('__init__: id=%s, apply_disabled=%s, '
'update_pcent_threshold=%.2f, '
'delete_pcent_threshold=%.2f',
id,
apply_disabled,
update_pcent_threshold,
delete_pcent_threshold)
self.apply_disabled = apply_disabled
self.update_pcent_threshold = update_pcent_threshold
self.delete_pcent_threshold = delete_pcent_threshold
def _include_change(self, change):
'''
An opportunity for providers to filter out false positives due to
peculiarities in their implementation. E.g. minimum TTLs.
'''
return True
def _extra_changes(self, existing, desired, changes):
'''
An opportunity for providers to add extra changes to the plan that are
necessary to update ancillary record data or configure the zone. E.g.
base NS records.
'''
return []
def plan(self, desired):
self.log.info('plan: desired=%s', desired.name)
existing = Zone(desired.name, desired.sub_zones)
exists = self.populate(existing, target=True, lenient=True)
if exists is None:
# If your code gets this warning see Source.populate for more
# information
self.log.warn('Provider %s used in target mode did not return '
'exists', self.id)
# compute the changes at the zone/record level
changes = existing.changes(desired, self)
# allow the provider to filter out false positives
before = len(changes)
changes = filter(self._include_change, changes)
after = len(changes)
if before != after:
self.log.info('plan: filtered out %s changes', before - after)
# allow the provider to add extra changes it needs
extra = self._extra_changes(existing=existing, desired=desired,
changes=changes)
if extra:
self.log.info('plan: extra changes\n %s', '\n '
.join([unicode(c) for c in extra]))
changes += extra
if changes:
plan = Plan(existing, desired, changes, exists,
self.update_pcent_threshold,
self.delete_pcent_threshold)
self.log.info('plan: %s', plan)
return plan
self.log.info('plan: No changes')
return None
def _apply(self, plan):
raise NotImplementedError('Abstract base class, _apply method '
'missing')
|
github/octodns | octodns/provider/route53.py | Route53Provider._gen_mods | python | def _gen_mods(self, action, records, existing_rrsets):
'''
Turns `_Route53*`s in to `change_resource_record_sets` `Changes`
'''
return [r.mod(action, existing_rrsets) for r in records] | Turns `_Route53*`s in to `change_resource_record_sets` `Changes` | train | https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/provider/route53.py#L960-L964 | null | class Route53Provider(BaseProvider):
'''
AWS Route53 Provider
route53:
class: octodns.provider.route53.Route53Provider
# The AWS access key id
access_key_id:
# The AWS secret access key
secret_access_key:
# The AWS session token (optional)
# Only needed if using temporary security credentials
session_token:
Alternatively, you may leave out access_key_id, secret_access_key
and session_token.
This will result in boto3 deciding authentication dynamically.
In general the account used will need full permissions on Route53.
'''
SUPPORTS_GEO = True
SUPPORTS_DYNAMIC = True
SUPPORTS = set(('A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NAPTR', 'NS', 'PTR',
'SPF', 'SRV', 'TXT'))
# This should be bumped when there are underlying changes made to the
# health check config.
HEALTH_CHECK_VERSION = '0001'
def __init__(self, id, access_key_id=None, secret_access_key=None,
max_changes=1000, client_max_attempts=None,
session_token=None, *args, **kwargs):
self.max_changes = max_changes
_msg = 'access_key_id={}, secret_access_key=***, ' \
'session_token=***'.format(access_key_id)
use_fallback_auth = access_key_id is None and \
secret_access_key is None and session_token is None
if use_fallback_auth:
_msg = 'auth=fallback'
self.log = logging.getLogger('Route53Provider[{}]'.format(id))
self.log.debug('__init__: id=%s, %s', id, _msg)
super(Route53Provider, self).__init__(id, *args, **kwargs)
config = None
if client_max_attempts is not None:
self.log.info('__init__: setting max_attempts to %d',
client_max_attempts)
config = Config(retries={'max_attempts': client_max_attempts})
if use_fallback_auth:
self._conn = client('route53', config=config)
else:
self._conn = client('route53', aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key,
aws_session_token=session_token,
config=config)
self._r53_zones = None
self._r53_rrsets = {}
self._health_checks = None
@property
def r53_zones(self):
if self._r53_zones is None:
self.log.debug('r53_zones: loading')
zones = {}
more = True
start = {}
while more:
resp = self._conn.list_hosted_zones(**start)
for z in resp['HostedZones']:
zones[z['Name']] = z['Id']
more = resp['IsTruncated']
start['Marker'] = resp.get('NextMarker', None)
self._r53_zones = zones
return self._r53_zones
def _get_zone_id(self, name, create=False):
self.log.debug('_get_zone_id: name=%s', name)
if name in self.r53_zones:
id = self.r53_zones[name]
self.log.debug('_get_zone_id: id=%s', id)
return id
if create:
ref = uuid4().hex
self.log.debug('_get_zone_id: no matching zone, creating, '
'ref=%s', ref)
resp = self._conn.create_hosted_zone(Name=name,
CallerReference=ref)
self.r53_zones[name] = id = resp['HostedZone']['Id']
return id
return None
def _parse_geo(self, rrset):
try:
loc = rrset['GeoLocation']
except KeyError:
# No geo loc
return
try:
return loc['ContinentCode']
except KeyError:
# Must be country
cc = loc['CountryCode']
if cc == '*':
# This is the default
return
cn = cca_to_ctca2(cc)
try:
return '{}-{}-{}'.format(cn, cc, loc['SubdivisionCode'])
except KeyError:
return '{}-{}'.format(cn, cc)
def _data_for_geo(self, rrset):
ret = {
'type': rrset['Type'],
'values': [v['Value'] for v in rrset['ResourceRecords']],
'ttl': int(rrset['TTL'])
}
geo = self._parse_geo(rrset)
if geo:
ret['geo'] = geo
return ret
_data_for_A = _data_for_geo
_data_for_AAAA = _data_for_geo
def _data_for_CAA(self, rrset):
values = []
for rr in rrset['ResourceRecords']:
flags, tag, value = rr['Value'].split(' ')
values.append({
'flags': flags,
'tag': tag,
'value': value[1:-1],
})
return {
'type': rrset['Type'],
'values': values,
'ttl': int(rrset['TTL'])
}
def _data_for_single(self, rrset):
return {
'type': rrset['Type'],
'value': rrset['ResourceRecords'][0]['Value'],
'ttl': int(rrset['TTL'])
}
_data_for_PTR = _data_for_single
_data_for_CNAME = _data_for_single
_fix_semicolons = re.compile(r'(?<!\\);')
def _data_for_quoted(self, rrset):
return {
'type': rrset['Type'],
'values': [self._fix_semicolons.sub('\\;', rr['Value'][1:-1])
for rr in rrset['ResourceRecords']],
'ttl': int(rrset['TTL'])
}
_data_for_TXT = _data_for_quoted
_data_for_SPF = _data_for_quoted
def _data_for_MX(self, rrset):
values = []
for rr in rrset['ResourceRecords']:
preference, exchange = rr['Value'].split(' ')
values.append({
'preference': preference,
'exchange': exchange,
})
return {
'type': rrset['Type'],
'values': values,
'ttl': int(rrset['TTL'])
}
def _data_for_NAPTR(self, rrset):
values = []
for rr in rrset['ResourceRecords']:
order, preference, flags, service, regexp, replacement = \
rr['Value'].split(' ')
flags = flags[1:-1]
service = service[1:-1]
regexp = regexp[1:-1]
values.append({
'order': order,
'preference': preference,
'flags': flags,
'service': service,
'regexp': regexp,
'replacement': replacement,
})
return {
'type': rrset['Type'],
'values': values,
'ttl': int(rrset['TTL'])
}
def _data_for_NS(self, rrset):
return {
'type': rrset['Type'],
'values': [v['Value'] for v in rrset['ResourceRecords']],
'ttl': int(rrset['TTL'])
}
def _data_for_SRV(self, rrset):
values = []
for rr in rrset['ResourceRecords']:
priority, weight, port, target = rr['Value'].split(' ')
values.append({
'priority': priority,
'weight': weight,
'port': port,
'target': target,
})
return {
'type': rrset['Type'],
'values': values,
'ttl': int(rrset['TTL'])
}
def _load_records(self, zone_id):
if zone_id not in self._r53_rrsets:
self.log.debug('_load_records: zone_id=%s loading', zone_id)
rrsets = []
more = True
start = {}
while more:
resp = \
self._conn.list_resource_record_sets(HostedZoneId=zone_id,
**start)
rrsets += resp['ResourceRecordSets']
more = resp['IsTruncated']
if more:
start = {
'StartRecordName': resp['NextRecordName'],
'StartRecordType': resp['NextRecordType'],
}
try:
start['StartRecordIdentifier'] = \
resp['NextRecordIdentifier']
except KeyError:
pass
self._r53_rrsets[zone_id] = rrsets
return self._r53_rrsets[zone_id]
def _data_for_dynamic(self, name, _type, rrsets):
# This converts a bunch of RRSets into their corresponding dynamic
# Record. It's used by populate.
pools = defaultdict(lambda: {'values': []})
# Data to build our rules will be collected here and "converted" into
# their final form below
rules = defaultdict(lambda: {'pool': None, 'geos': []})
# Base/empty data
data = {
'dynamic': {
'pools': pools,
'rules': [],
}
}
# For all the rrsets that comprise this dynamic record
for rrset in rrsets:
name = rrset['Name']
if '-pool.' in name:
# This is a pool rrset
pool_name = _parse_pool_name(name)
if pool_name == 'default':
# default becomes the base for the record and its
# value(s) will fill the non-dynamic values
data_for = getattr(self, '_data_for_{}'.format(_type))
data.update(data_for(rrset))
elif rrset['Failover'] == 'SECONDARY':
# This is a failover record, we'll ignore PRIMARY, but
# SECONDARY will tell us what the pool's fallback is
fallback_name = \
_parse_pool_name(rrset['AliasTarget']['DNSName'])
# Don't care about default fallbacks, anything else
# we'll record
if fallback_name != 'default':
pools[pool_name]['fallback'] = fallback_name
elif 'GeoLocation' in rrset:
# These are rules
_id = rrset['SetIdentifier']
# We record rule index as the first part of set-id, the 2nd
# part just ensures uniqueness across geos and is ignored
i = int(_id.split('-', 1)[0])
target_pool = _parse_pool_name(rrset['AliasTarget']['DNSName'])
# Record the pool
rules[i]['pool'] = target_pool
# Record geo if we have one
geo = self._parse_geo(rrset)
if geo:
rules[i]['geos'].append(geo)
else:
# These are the pool value(s)
# Grab the pool name out of the SetIdentifier, format looks
# like ...-000 where 000 is a zero-padded index for the value
# it's ignored only used to make sure the value is unique
pool_name = rrset['SetIdentifier'][:-4]
value = rrset['ResourceRecords'][0]['Value']
pools[pool_name]['values'].append({
'value': value,
'weight': rrset['Weight'],
})
# Convert our map of rules into an ordered list now that we have all
# the data
for _, rule in sorted(rules.items()):
r = {
'pool': rule['pool'],
}
geos = sorted(rule['geos'])
if geos:
r['geos'] = geos
data['dynamic']['rules'].append(r)
return data
def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
target, lenient)
before = len(zone.records)
exists = False
zone_id = self._get_zone_id(zone.name)
if zone_id:
exists = True
records = defaultdict(lambda: defaultdict(list))
dynamic = defaultdict(lambda: defaultdict(list))
for rrset in self._load_records(zone_id):
record_name = zone.hostname_from_fqdn(rrset['Name'])
record_name = _octal_replace(record_name)
record_type = rrset['Type']
if record_type not in self.SUPPORTS:
# Skip stuff we don't support
continue
if record_name.startswith('_octodns-'):
# Part of a dynamic record
try:
record_name = record_name.split('.', 1)[1]
except IndexError:
record_name = ''
dynamic[record_name][record_type].append(rrset)
continue
elif 'AliasTarget' in rrset:
if rrset['AliasTarget']['DNSName'].startswith('_octodns-'):
# Part of a dynamic record
dynamic[record_name][record_type].append(rrset)
else:
# Alias records are Route53 specific and are not
# portable, so we need to skip them
self.log.warning("%s is an Alias record. Skipping..."
% rrset['Name'])
continue
# A basic record (potentially including geo)
data = getattr(self, '_data_for_{}'.format(record_type))(rrset)
records[record_name][record_type].append(data)
# Convert the dynamic rrsets to Records
for name, types in dynamic.items():
for _type, rrsets in types.items():
data = self._data_for_dynamic(name, _type, rrsets)
record = Record.new(zone, name, data, source=self,
lenient=lenient)
zone.add_record(record, lenient=lenient)
# Convert the basic (potentially with geo) rrsets to records
for name, types in records.items():
for _type, data in types.items():
if len(data) > 1:
# Multiple data indicates a record with GeoDNS, convert
# them data into the format we need
geo = {}
for d in data:
try:
geo[d['geo']] = d['values']
except KeyError:
primary = d
data = primary
data['geo'] = geo
else:
data = data[0]
record = Record.new(zone, name, data, source=self,
lenient=lenient)
zone.add_record(record, lenient=lenient)
self.log.info('populate: found %s records, exists=%s',
len(zone.records) - before, exists)
return exists
@property
def health_checks(self):
if self._health_checks is None:
# need to do the first load
self.log.debug('health_checks: loading')
checks = {}
more = True
start = {}
while more:
resp = self._conn.list_health_checks(**start)
for health_check in resp['HealthChecks']:
# our format for CallerReference is dddd:hex-uuid
ref = health_check.get('CallerReference', 'xxxxx')
if len(ref) > 4 and ref[4] != ':':
# ignore anything else
continue
checks[health_check['Id']] = health_check
more = resp['IsTruncated']
start['Marker'] = resp.get('NextMarker', None)
self._health_checks = checks
# We've got a cached version use it
return self._health_checks
def _healthcheck_measure_latency(self, record):
return record._octodns.get('route53', {}) \
.get('healthcheck', {}) \
.get('measure_latency', True)
def _health_check_equivilent(self, host, path, protocol, port,
measure_latency, health_check, value=None):
config = health_check['HealthCheckConfig']
# So interestingly Route53 normalizes IPAddress which will cause us to
# fail to find see things as equivalent. To work around this we'll
# ip_address's returned object for equivalence
# E.g 2001:4860:4860::8842 -> 2001:4860:4860:0:0:0:0:8842
if value:
value = ip_address(unicode(value))
config_ip_address = ip_address(unicode(config['IPAddress']))
else:
# No value so give this a None to match value's
config_ip_address = None
return host == config['FullyQualifiedDomainName'] and \
path == config['ResourcePath'] and protocol == config['Type'] \
and port == config['Port'] and \
measure_latency == config['MeasureLatency'] and \
value == config_ip_address
def get_health_check_id(self, record, value, create):
# fqdn & the first value are special, we use them to match up health
# checks to their records. Route53 health checks check a single ip and
# we're going to assume that ips are interchangeable to avoid
# health-checking each one independently
fqdn = record.fqdn
self.log.debug('get_health_check_id: fqdn=%s, type=%s, value=%s',
fqdn, record._type, value)
try:
ip_address(unicode(value))
# We're working with an IP, host is the Host header
healthcheck_host = record.healthcheck_host
except (AddressValueError, ValueError):
# This isn't an IP, host is the value, value should be None
healthcheck_host = value
value = None
healthcheck_path = record.healthcheck_path
healthcheck_protocol = record.healthcheck_protocol
healthcheck_port = record.healthcheck_port
healthcheck_latency = self._healthcheck_measure_latency(record)
# we're looking for a healthcheck with the current version & our record
# type, we'll ignore anything else
expected_ref = '{}:{}:{}:'.format(self.HEALTH_CHECK_VERSION,
record._type, record.fqdn)
for id, health_check in self.health_checks.items():
if not health_check['CallerReference'].startswith(expected_ref):
# not match, ignore
continue
if self._health_check_equivilent(healthcheck_host,
healthcheck_path,
healthcheck_protocol,
healthcheck_port,
healthcheck_latency,
health_check,
value=value):
# this is the health check we're looking for
self.log.debug('get_health_check_id: found match id=%s', id)
return id
if not create:
# no existing matches and not allowed to create, return none
self.log.debug('get_health_check_id: no matches, no create')
return
# no existing matches, we need to create a new health check
config = {
'EnableSNI': healthcheck_protocol == 'HTTPS',
'FailureThreshold': 6,
'FullyQualifiedDomainName': healthcheck_host,
'MeasureLatency': healthcheck_latency,
'Port': healthcheck_port,
'RequestInterval': 10,
'ResourcePath': healthcheck_path,
'Type': healthcheck_protocol,
}
if value:
config['IPAddress'] = value
ref = '{}:{}:{}:{}'.format(self.HEALTH_CHECK_VERSION, record._type,
record.fqdn, uuid4().hex[:12])
resp = self._conn.create_health_check(CallerReference=ref,
HealthCheckConfig=config)
health_check = resp['HealthCheck']
id = health_check['Id']
# Set a Name for the benefit of the UI
name = '{}:{} - {}'.format(record.fqdn, record._type,
value or healthcheck_host)
self._conn.change_tags_for_resource(ResourceType='healthcheck',
ResourceId=id,
AddTags=[{
'Key': 'Name',
'Value': name,
}])
# Manually add it to our cache
health_check['Tags'] = {
'Name': name
}
# store the new health check so that we'll be able to find it in the
# future
self._health_checks[id] = health_check
self.log.info('get_health_check_id: created id=%s, host=%s, '
'path=%s, protocol=%s, port=%d, measure_latency=%r, '
'value=%s', id, healthcheck_host, healthcheck_path,
healthcheck_protocol, healthcheck_port,
healthcheck_latency, value)
return id
def _gc_health_checks(self, record, new):
if record._type not in ('A', 'AAAA'):
return
self.log.debug('_gc_health_checks: record=%s', record)
# Find the health checks we're using for the new route53 records
in_use = set()
for r in new:
hc_id = getattr(r, 'health_check_id', False)
if hc_id:
in_use.add(hc_id)
self.log.debug('_gc_health_checks: in_use=%s', in_use)
# Now we need to run through ALL the health checks looking for those
# that apply to this record, deleting any that do and are no longer in
# use
expected_re = re.compile(r'^\d\d\d\d:{}:{}:'
.format(record._type, record.fqdn))
# UNITL 1.0: we'll clean out the previous version of Route53 health
# checks as best as we can.
expected_legacy_host = record.fqdn[:-1]
expected_legacy = '0000:{}:'.format(record._type)
for id, health_check in self.health_checks.items():
ref = health_check['CallerReference']
if expected_re.match(ref) and id not in in_use:
# this is a health check for this record, but not one we're
# planning to use going forward
self.log.info('_gc_health_checks: deleting id=%s', id)
self._conn.delete_health_check(HealthCheckId=id)
elif ref.startswith(expected_legacy):
config = health_check['HealthCheckConfig']
if expected_legacy_host == config['FullyQualifiedDomainName']:
self.log.info('_gc_health_checks: deleting legacy id=%s',
id)
self._conn.delete_health_check(HealthCheckId=id)
def _gen_records(self, record, zone_id, creating=False):
'''
Turns an octodns.Record into one or more `_Route53*`s
'''
return _Route53Record.new(self, record, zone_id, creating)
def _mod_Create(self, change, zone_id, existing_rrsets):
# New is the stuff that needs to be created
new_records = self._gen_records(change.new, zone_id, creating=True)
# Now is a good time to clear out any unused health checks since we
# know what we'll be using going forward
self._gc_health_checks(change.new, new_records)
return self._gen_mods('CREATE', new_records, existing_rrsets)
def _mod_Update(self, change, zone_id, existing_rrsets):
# See comments in _Route53Record for how the set math is made to do our
# bidding here.
existing_records = self._gen_records(change.existing, zone_id,
creating=False)
new_records = self._gen_records(change.new, zone_id, creating=True)
# Now is a good time to clear out any unused health checks since we
# know what we'll be using going forward
self._gc_health_checks(change.new, new_records)
# Things in existing, but not new are deletes
deletes = existing_records - new_records
# Things in new, but not existing are the creates
creates = new_records - existing_records
# Things in both need updating, we could optimize this and filter out
# things that haven't actually changed, but that's for another day.
# We can't use set math here b/c we won't be able to control which of
# the two objects will be in the result and we need to ensure it's the
# new one.
upserts = set()
for new_record in new_records:
if new_record in existing_records:
upserts.add(new_record)
return self._gen_mods('DELETE', deletes, existing_rrsets) + \
self._gen_mods('CREATE', creates, existing_rrsets) + \
self._gen_mods('UPSERT', upserts, existing_rrsets)
def _mod_Delete(self, change, zone_id, existing_rrsets):
# Existing is the thing that needs to be deleted
existing_records = self._gen_records(change.existing, zone_id,
creating=False)
# Now is a good time to clear out all the health checks since we know
# we're done with them
self._gc_health_checks(change.existing, [])
return self._gen_mods('DELETE', existing_records, existing_rrsets)
def _extra_changes_update_needed(self, record, rrset):
healthcheck_host = record.healthcheck_host
healthcheck_path = record.healthcheck_path
healthcheck_protocol = record.healthcheck_protocol
healthcheck_port = record.healthcheck_port
healthcheck_latency = self._healthcheck_measure_latency(record)
try:
health_check_id = rrset['HealthCheckId']
health_check = self.health_checks[health_check_id]
caller_ref = health_check['CallerReference']
if caller_ref.startswith(self.HEALTH_CHECK_VERSION):
if self._health_check_equivilent(healthcheck_host,
healthcheck_path,
healthcheck_protocol,
healthcheck_port,
healthcheck_latency,
health_check):
# it has the right health check
return False
except (IndexError, KeyError):
# no health check id or one that isn't the right version
pass
# no good, doesn't have the right health check, needs an update
self.log.info('_extra_changes_update_needed: health-check caused '
'update of %s:%s', record.fqdn, record._type)
return True
def _extra_changes_geo_needs_update(self, zone_id, record):
# OK this is a record we don't have change for that does have geo
# information. We need to look and see if it needs to be updated b/c of
# a health check version bump or other mismatch
self.log.debug('_extra_changes_geo_needs_update: inspecting=%s, %s',
record.fqdn, record._type)
fqdn = record.fqdn
# loop through all the r53 rrsets
for rrset in self._load_records(zone_id):
if fqdn == rrset['Name'] and record._type == rrset['Type'] and \
rrset.get('GeoLocation', {}).get('CountryCode', False) != '*' \
and self._extra_changes_update_needed(record, rrset):
# no good, doesn't have the right health check, needs an update
self.log.info('_extra_changes_geo_needs_update: health-check '
'caused update of %s:%s', record.fqdn,
record._type)
return True
return False
def _extra_changes_dynamic_needs_update(self, zone_id, record):
# OK this is a record we don't have change for that does have dynamic
# information. We need to look and see if it needs to be updated b/c of
# a health check version bump or other mismatch
self.log.debug('_extra_changes_dynamic_needs_update: inspecting=%s, '
'%s', record.fqdn, record._type)
fqdn = record.fqdn
# loop through all the r53 rrsets
for rrset in self._load_records(zone_id):
name = rrset['Name']
if record._type == rrset['Type'] and name.endswith(fqdn) and \
name.startswith('_octodns-') and '-value.' in name and \
'-default-' not in name and \
self._extra_changes_update_needed(record, rrset):
# no good, doesn't have the right health check, needs an update
self.log.info('_extra_changes_dynamic_needs_update: '
'health-check caused update of %s:%s',
record.fqdn, record._type)
return True
return False
def _extra_changes(self, desired, changes, **kwargs):
self.log.debug('_extra_changes: desired=%s', desired.name)
zone_id = self._get_zone_id(desired.name)
if not zone_id:
# zone doesn't exist so no extras to worry about
return []
# we'll skip extra checking for anything we're already going to change
changed = set([c.record for c in changes])
# ok, now it's time for the reason we're here, we need to go over all
# the desired records
extras = []
for record in desired.records:
if record in changed:
# already have a change for it, skipping
continue
if getattr(record, 'geo', False):
if self._extra_changes_geo_needs_update(zone_id, record):
extras.append(Update(record, record))
elif getattr(record, 'dynamic', False):
if self._extra_changes_dynamic_needs_update(zone_id, record):
extras.append(Update(record, record))
return extras
def _apply(self, plan):
desired = plan.desired
changes = plan.changes
self.log.info('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
batch = []
batch_rs_count = 0
zone_id = self._get_zone_id(desired.name, True)
existing_rrsets = self._load_records(zone_id)
for c in changes:
# Generate the mods for this change
mod_type = getattr(self, '_mod_{}'.format(c.__class__.__name__))
mods = mod_type(c, zone_id, existing_rrsets)
# Order our mods to make sure targets exist before alises point to
# them and we CRUD in the desired order
mods.sort(key=_mod_keyer)
mods_rs_count = sum(
[len(m['ResourceRecordSet'].get('ResourceRecords', ''))
for m in mods]
)
if mods_rs_count > self.max_changes:
# a single mod resulted in too many ResourceRecords changes
raise Exception('Too many modifications: {}'
.format(mods_rs_count))
# r53 limits changesets to 1000 entries
if (batch_rs_count + mods_rs_count) < self.max_changes:
# append to the batch
batch += mods
batch_rs_count += mods_rs_count
else:
self.log.info('_apply: sending change request for batch of '
'%d mods, %d ResourceRecords', len(batch),
batch_rs_count)
# send the batch
self._really_apply(batch, zone_id)
# start a new batch with the leftovers
batch = mods
batch_rs_count = mods_rs_count
# the way the above process works there will always be something left
# over in batch to process. In the case that we submit a batch up there
# it was always the case that there was something pushing us over
# max_changes and thus left over to submit.
self.log.info('_apply: sending change request for batch of %d mods,'
' %d ResourceRecords', len(batch),
batch_rs_count)
self._really_apply(batch, zone_id)
def _really_apply(self, batch, zone_id):
uuid = uuid4().hex
batch = {
'Comment': 'Change: {}'.format(uuid),
'Changes': batch,
}
self.log.debug('_really_apply: sending change request, comment=%s',
batch['Comment'])
resp = self._conn.change_resource_record_sets(
HostedZoneId=zone_id, ChangeBatch=batch)
self.log.debug('_really_apply: change info=%s', resp['ChangeInfo'])
|
github/octodns | octodns/provider/route53.py | Route53Provider._gen_records | python | def _gen_records(self, record, zone_id, creating=False):
'''
Turns an octodns.Record into one or more `_Route53*`s
'''
return _Route53Record.new(self, record, zone_id, creating) | Turns an octodns.Record into one or more `_Route53*`s | train | https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/provider/route53.py#L1144-L1148 | null | class Route53Provider(BaseProvider):
'''
AWS Route53 Provider
route53:
class: octodns.provider.route53.Route53Provider
# The AWS access key id
access_key_id:
# The AWS secret access key
secret_access_key:
# The AWS session token (optional)
# Only needed if using temporary security credentials
session_token:
Alternatively, you may leave out access_key_id, secret_access_key
and session_token.
This will result in boto3 deciding authentication dynamically.
In general the account used will need full permissions on Route53.
'''
SUPPORTS_GEO = True
SUPPORTS_DYNAMIC = True
SUPPORTS = set(('A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NAPTR', 'NS', 'PTR',
'SPF', 'SRV', 'TXT'))
# This should be bumped when there are underlying changes made to the
# health check config.
HEALTH_CHECK_VERSION = '0001'
def __init__(self, id, access_key_id=None, secret_access_key=None,
max_changes=1000, client_max_attempts=None,
session_token=None, *args, **kwargs):
self.max_changes = max_changes
_msg = 'access_key_id={}, secret_access_key=***, ' \
'session_token=***'.format(access_key_id)
use_fallback_auth = access_key_id is None and \
secret_access_key is None and session_token is None
if use_fallback_auth:
_msg = 'auth=fallback'
self.log = logging.getLogger('Route53Provider[{}]'.format(id))
self.log.debug('__init__: id=%s, %s', id, _msg)
super(Route53Provider, self).__init__(id, *args, **kwargs)
config = None
if client_max_attempts is not None:
self.log.info('__init__: setting max_attempts to %d',
client_max_attempts)
config = Config(retries={'max_attempts': client_max_attempts})
if use_fallback_auth:
self._conn = client('route53', config=config)
else:
self._conn = client('route53', aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key,
aws_session_token=session_token,
config=config)
self._r53_zones = None
self._r53_rrsets = {}
self._health_checks = None
@property
def r53_zones(self):
if self._r53_zones is None:
self.log.debug('r53_zones: loading')
zones = {}
more = True
start = {}
while more:
resp = self._conn.list_hosted_zones(**start)
for z in resp['HostedZones']:
zones[z['Name']] = z['Id']
more = resp['IsTruncated']
start['Marker'] = resp.get('NextMarker', None)
self._r53_zones = zones
return self._r53_zones
def _get_zone_id(self, name, create=False):
self.log.debug('_get_zone_id: name=%s', name)
if name in self.r53_zones:
id = self.r53_zones[name]
self.log.debug('_get_zone_id: id=%s', id)
return id
if create:
ref = uuid4().hex
self.log.debug('_get_zone_id: no matching zone, creating, '
'ref=%s', ref)
resp = self._conn.create_hosted_zone(Name=name,
CallerReference=ref)
self.r53_zones[name] = id = resp['HostedZone']['Id']
return id
return None
def _parse_geo(self, rrset):
try:
loc = rrset['GeoLocation']
except KeyError:
# No geo loc
return
try:
return loc['ContinentCode']
except KeyError:
# Must be country
cc = loc['CountryCode']
if cc == '*':
# This is the default
return
cn = cca_to_ctca2(cc)
try:
return '{}-{}-{}'.format(cn, cc, loc['SubdivisionCode'])
except KeyError:
return '{}-{}'.format(cn, cc)
def _data_for_geo(self, rrset):
ret = {
'type': rrset['Type'],
'values': [v['Value'] for v in rrset['ResourceRecords']],
'ttl': int(rrset['TTL'])
}
geo = self._parse_geo(rrset)
if geo:
ret['geo'] = geo
return ret
_data_for_A = _data_for_geo
_data_for_AAAA = _data_for_geo
def _data_for_CAA(self, rrset):
values = []
for rr in rrset['ResourceRecords']:
flags, tag, value = rr['Value'].split(' ')
values.append({
'flags': flags,
'tag': tag,
'value': value[1:-1],
})
return {
'type': rrset['Type'],
'values': values,
'ttl': int(rrset['TTL'])
}
def _data_for_single(self, rrset):
return {
'type': rrset['Type'],
'value': rrset['ResourceRecords'][0]['Value'],
'ttl': int(rrset['TTL'])
}
_data_for_PTR = _data_for_single
_data_for_CNAME = _data_for_single
_fix_semicolons = re.compile(r'(?<!\\);')
def _data_for_quoted(self, rrset):
return {
'type': rrset['Type'],
'values': [self._fix_semicolons.sub('\\;', rr['Value'][1:-1])
for rr in rrset['ResourceRecords']],
'ttl': int(rrset['TTL'])
}
_data_for_TXT = _data_for_quoted
_data_for_SPF = _data_for_quoted
def _data_for_MX(self, rrset):
values = []
for rr in rrset['ResourceRecords']:
preference, exchange = rr['Value'].split(' ')
values.append({
'preference': preference,
'exchange': exchange,
})
return {
'type': rrset['Type'],
'values': values,
'ttl': int(rrset['TTL'])
}
def _data_for_NAPTR(self, rrset):
values = []
for rr in rrset['ResourceRecords']:
order, preference, flags, service, regexp, replacement = \
rr['Value'].split(' ')
flags = flags[1:-1]
service = service[1:-1]
regexp = regexp[1:-1]
values.append({
'order': order,
'preference': preference,
'flags': flags,
'service': service,
'regexp': regexp,
'replacement': replacement,
})
return {
'type': rrset['Type'],
'values': values,
'ttl': int(rrset['TTL'])
}
def _data_for_NS(self, rrset):
return {
'type': rrset['Type'],
'values': [v['Value'] for v in rrset['ResourceRecords']],
'ttl': int(rrset['TTL'])
}
def _data_for_SRV(self, rrset):
values = []
for rr in rrset['ResourceRecords']:
priority, weight, port, target = rr['Value'].split(' ')
values.append({
'priority': priority,
'weight': weight,
'port': port,
'target': target,
})
return {
'type': rrset['Type'],
'values': values,
'ttl': int(rrset['TTL'])
}
def _load_records(self, zone_id):
if zone_id not in self._r53_rrsets:
self.log.debug('_load_records: zone_id=%s loading', zone_id)
rrsets = []
more = True
start = {}
while more:
resp = \
self._conn.list_resource_record_sets(HostedZoneId=zone_id,
**start)
rrsets += resp['ResourceRecordSets']
more = resp['IsTruncated']
if more:
start = {
'StartRecordName': resp['NextRecordName'],
'StartRecordType': resp['NextRecordType'],
}
try:
start['StartRecordIdentifier'] = \
resp['NextRecordIdentifier']
except KeyError:
pass
self._r53_rrsets[zone_id] = rrsets
return self._r53_rrsets[zone_id]
def _data_for_dynamic(self, name, _type, rrsets):
# This converts a bunch of RRSets into their corresponding dynamic
# Record. It's used by populate.
pools = defaultdict(lambda: {'values': []})
# Data to build our rules will be collected here and "converted" into
# their final form below
rules = defaultdict(lambda: {'pool': None, 'geos': []})
# Base/empty data
data = {
'dynamic': {
'pools': pools,
'rules': [],
}
}
# For all the rrsets that comprise this dynamic record
for rrset in rrsets:
name = rrset['Name']
if '-pool.' in name:
# This is a pool rrset
pool_name = _parse_pool_name(name)
if pool_name == 'default':
# default becomes the base for the record and its
# value(s) will fill the non-dynamic values
data_for = getattr(self, '_data_for_{}'.format(_type))
data.update(data_for(rrset))
elif rrset['Failover'] == 'SECONDARY':
# This is a failover record, we'll ignore PRIMARY, but
# SECONDARY will tell us what the pool's fallback is
fallback_name = \
_parse_pool_name(rrset['AliasTarget']['DNSName'])
# Don't care about default fallbacks, anything else
# we'll record
if fallback_name != 'default':
pools[pool_name]['fallback'] = fallback_name
elif 'GeoLocation' in rrset:
# These are rules
_id = rrset['SetIdentifier']
# We record rule index as the first part of set-id, the 2nd
# part just ensures uniqueness across geos and is ignored
i = int(_id.split('-', 1)[0])
target_pool = _parse_pool_name(rrset['AliasTarget']['DNSName'])
# Record the pool
rules[i]['pool'] = target_pool
# Record geo if we have one
geo = self._parse_geo(rrset)
if geo:
rules[i]['geos'].append(geo)
else:
# These are the pool value(s)
# Grab the pool name out of the SetIdentifier, format looks
# like ...-000 where 000 is a zero-padded index for the value
# it's ignored only used to make sure the value is unique
pool_name = rrset['SetIdentifier'][:-4]
value = rrset['ResourceRecords'][0]['Value']
pools[pool_name]['values'].append({
'value': value,
'weight': rrset['Weight'],
})
# Convert our map of rules into an ordered list now that we have all
# the data
for _, rule in sorted(rules.items()):
r = {
'pool': rule['pool'],
}
geos = sorted(rule['geos'])
if geos:
r['geos'] = geos
data['dynamic']['rules'].append(r)
return data
def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
target, lenient)
before = len(zone.records)
exists = False
zone_id = self._get_zone_id(zone.name)
if zone_id:
exists = True
records = defaultdict(lambda: defaultdict(list))
dynamic = defaultdict(lambda: defaultdict(list))
for rrset in self._load_records(zone_id):
record_name = zone.hostname_from_fqdn(rrset['Name'])
record_name = _octal_replace(record_name)
record_type = rrset['Type']
if record_type not in self.SUPPORTS:
# Skip stuff we don't support
continue
if record_name.startswith('_octodns-'):
# Part of a dynamic record
try:
record_name = record_name.split('.', 1)[1]
except IndexError:
record_name = ''
dynamic[record_name][record_type].append(rrset)
continue
elif 'AliasTarget' in rrset:
if rrset['AliasTarget']['DNSName'].startswith('_octodns-'):
# Part of a dynamic record
dynamic[record_name][record_type].append(rrset)
else:
# Alias records are Route53 specific and are not
# portable, so we need to skip them
self.log.warning("%s is an Alias record. Skipping..."
% rrset['Name'])
continue
# A basic record (potentially including geo)
data = getattr(self, '_data_for_{}'.format(record_type))(rrset)
records[record_name][record_type].append(data)
# Convert the dynamic rrsets to Records
for name, types in dynamic.items():
for _type, rrsets in types.items():
data = self._data_for_dynamic(name, _type, rrsets)
record = Record.new(zone, name, data, source=self,
lenient=lenient)
zone.add_record(record, lenient=lenient)
# Convert the basic (potentially with geo) rrsets to records
for name, types in records.items():
for _type, data in types.items():
if len(data) > 1:
# Multiple data indicates a record with GeoDNS, convert
# them data into the format we need
geo = {}
for d in data:
try:
geo[d['geo']] = d['values']
except KeyError:
primary = d
data = primary
data['geo'] = geo
else:
data = data[0]
record = Record.new(zone, name, data, source=self,
lenient=lenient)
zone.add_record(record, lenient=lenient)
self.log.info('populate: found %s records, exists=%s',
len(zone.records) - before, exists)
return exists
def _gen_mods(self, action, records, existing_rrsets):
'''
Turns `_Route53*`s in to `change_resource_record_sets` `Changes`
'''
return [r.mod(action, existing_rrsets) for r in records]
@property
def health_checks(self):
if self._health_checks is None:
# need to do the first load
self.log.debug('health_checks: loading')
checks = {}
more = True
start = {}
while more:
resp = self._conn.list_health_checks(**start)
for health_check in resp['HealthChecks']:
# our format for CallerReference is dddd:hex-uuid
ref = health_check.get('CallerReference', 'xxxxx')
if len(ref) > 4 and ref[4] != ':':
# ignore anything else
continue
checks[health_check['Id']] = health_check
more = resp['IsTruncated']
start['Marker'] = resp.get('NextMarker', None)
self._health_checks = checks
# We've got a cached version use it
return self._health_checks
def _healthcheck_measure_latency(self, record):
return record._octodns.get('route53', {}) \
.get('healthcheck', {}) \
.get('measure_latency', True)
def _health_check_equivilent(self, host, path, protocol, port,
measure_latency, health_check, value=None):
config = health_check['HealthCheckConfig']
# So interestingly Route53 normalizes IPAddress which will cause us to
# fail to find see things as equivalent. To work around this we'll
# ip_address's returned object for equivalence
# E.g 2001:4860:4860::8842 -> 2001:4860:4860:0:0:0:0:8842
if value:
value = ip_address(unicode(value))
config_ip_address = ip_address(unicode(config['IPAddress']))
else:
# No value so give this a None to match value's
config_ip_address = None
return host == config['FullyQualifiedDomainName'] and \
path == config['ResourcePath'] and protocol == config['Type'] \
and port == config['Port'] and \
measure_latency == config['MeasureLatency'] and \
value == config_ip_address
def get_health_check_id(self, record, value, create):
# fqdn & the first value are special, we use them to match up health
# checks to their records. Route53 health checks check a single ip and
# we're going to assume that ips are interchangeable to avoid
# health-checking each one independently
fqdn = record.fqdn
self.log.debug('get_health_check_id: fqdn=%s, type=%s, value=%s',
fqdn, record._type, value)
try:
ip_address(unicode(value))
# We're working with an IP, host is the Host header
healthcheck_host = record.healthcheck_host
except (AddressValueError, ValueError):
# This isn't an IP, host is the value, value should be None
healthcheck_host = value
value = None
healthcheck_path = record.healthcheck_path
healthcheck_protocol = record.healthcheck_protocol
healthcheck_port = record.healthcheck_port
healthcheck_latency = self._healthcheck_measure_latency(record)
# we're looking for a healthcheck with the current version & our record
# type, we'll ignore anything else
expected_ref = '{}:{}:{}:'.format(self.HEALTH_CHECK_VERSION,
record._type, record.fqdn)
for id, health_check in self.health_checks.items():
if not health_check['CallerReference'].startswith(expected_ref):
# not match, ignore
continue
if self._health_check_equivilent(healthcheck_host,
healthcheck_path,
healthcheck_protocol,
healthcheck_port,
healthcheck_latency,
health_check,
value=value):
# this is the health check we're looking for
self.log.debug('get_health_check_id: found match id=%s', id)
return id
if not create:
# no existing matches and not allowed to create, return none
self.log.debug('get_health_check_id: no matches, no create')
return
# no existing matches, we need to create a new health check
config = {
'EnableSNI': healthcheck_protocol == 'HTTPS',
'FailureThreshold': 6,
'FullyQualifiedDomainName': healthcheck_host,
'MeasureLatency': healthcheck_latency,
'Port': healthcheck_port,
'RequestInterval': 10,
'ResourcePath': healthcheck_path,
'Type': healthcheck_protocol,
}
if value:
config['IPAddress'] = value
ref = '{}:{}:{}:{}'.format(self.HEALTH_CHECK_VERSION, record._type,
record.fqdn, uuid4().hex[:12])
resp = self._conn.create_health_check(CallerReference=ref,
HealthCheckConfig=config)
health_check = resp['HealthCheck']
id = health_check['Id']
# Set a Name for the benefit of the UI
name = '{}:{} - {}'.format(record.fqdn, record._type,
value or healthcheck_host)
self._conn.change_tags_for_resource(ResourceType='healthcheck',
ResourceId=id,
AddTags=[{
'Key': 'Name',
'Value': name,
}])
# Manually add it to our cache
health_check['Tags'] = {
'Name': name
}
# store the new health check so that we'll be able to find it in the
# future
self._health_checks[id] = health_check
self.log.info('get_health_check_id: created id=%s, host=%s, '
'path=%s, protocol=%s, port=%d, measure_latency=%r, '
'value=%s', id, healthcheck_host, healthcheck_path,
healthcheck_protocol, healthcheck_port,
healthcheck_latency, value)
return id
def _gc_health_checks(self, record, new):
if record._type not in ('A', 'AAAA'):
return
self.log.debug('_gc_health_checks: record=%s', record)
# Find the health checks we're using for the new route53 records
in_use = set()
for r in new:
hc_id = getattr(r, 'health_check_id', False)
if hc_id:
in_use.add(hc_id)
self.log.debug('_gc_health_checks: in_use=%s', in_use)
# Now we need to run through ALL the health checks looking for those
# that apply to this record, deleting any that do and are no longer in
# use
expected_re = re.compile(r'^\d\d\d\d:{}:{}:'
.format(record._type, record.fqdn))
# UNITL 1.0: we'll clean out the previous version of Route53 health
# checks as best as we can.
expected_legacy_host = record.fqdn[:-1]
expected_legacy = '0000:{}:'.format(record._type)
for id, health_check in self.health_checks.items():
ref = health_check['CallerReference']
if expected_re.match(ref) and id not in in_use:
# this is a health check for this record, but not one we're
# planning to use going forward
self.log.info('_gc_health_checks: deleting id=%s', id)
self._conn.delete_health_check(HealthCheckId=id)
elif ref.startswith(expected_legacy):
config = health_check['HealthCheckConfig']
if expected_legacy_host == config['FullyQualifiedDomainName']:
self.log.info('_gc_health_checks: deleting legacy id=%s',
id)
self._conn.delete_health_check(HealthCheckId=id)
def _mod_Create(self, change, zone_id, existing_rrsets):
# New is the stuff that needs to be created
new_records = self._gen_records(change.new, zone_id, creating=True)
# Now is a good time to clear out any unused health checks since we
# know what we'll be using going forward
self._gc_health_checks(change.new, new_records)
return self._gen_mods('CREATE', new_records, existing_rrsets)
def _mod_Update(self, change, zone_id, existing_rrsets):
# See comments in _Route53Record for how the set math is made to do our
# bidding here.
existing_records = self._gen_records(change.existing, zone_id,
creating=False)
new_records = self._gen_records(change.new, zone_id, creating=True)
# Now is a good time to clear out any unused health checks since we
# know what we'll be using going forward
self._gc_health_checks(change.new, new_records)
# Things in existing, but not new are deletes
deletes = existing_records - new_records
# Things in new, but not existing are the creates
creates = new_records - existing_records
# Things in both need updating, we could optimize this and filter out
# things that haven't actually changed, but that's for another day.
# We can't use set math here b/c we won't be able to control which of
# the two objects will be in the result and we need to ensure it's the
# new one.
upserts = set()
for new_record in new_records:
if new_record in existing_records:
upserts.add(new_record)
return self._gen_mods('DELETE', deletes, existing_rrsets) + \
self._gen_mods('CREATE', creates, existing_rrsets) + \
self._gen_mods('UPSERT', upserts, existing_rrsets)
def _mod_Delete(self, change, zone_id, existing_rrsets):
# Existing is the thing that needs to be deleted
existing_records = self._gen_records(change.existing, zone_id,
creating=False)
# Now is a good time to clear out all the health checks since we know
# we're done with them
self._gc_health_checks(change.existing, [])
return self._gen_mods('DELETE', existing_records, existing_rrsets)
def _extra_changes_update_needed(self, record, rrset):
healthcheck_host = record.healthcheck_host
healthcheck_path = record.healthcheck_path
healthcheck_protocol = record.healthcheck_protocol
healthcheck_port = record.healthcheck_port
healthcheck_latency = self._healthcheck_measure_latency(record)
try:
health_check_id = rrset['HealthCheckId']
health_check = self.health_checks[health_check_id]
caller_ref = health_check['CallerReference']
if caller_ref.startswith(self.HEALTH_CHECK_VERSION):
if self._health_check_equivilent(healthcheck_host,
healthcheck_path,
healthcheck_protocol,
healthcheck_port,
healthcheck_latency,
health_check):
# it has the right health check
return False
except (IndexError, KeyError):
# no health check id or one that isn't the right version
pass
# no good, doesn't have the right health check, needs an update
self.log.info('_extra_changes_update_needed: health-check caused '
'update of %s:%s', record.fqdn, record._type)
return True
def _extra_changes_geo_needs_update(self, zone_id, record):
# OK this is a record we don't have change for that does have geo
# information. We need to look and see if it needs to be updated b/c of
# a health check version bump or other mismatch
self.log.debug('_extra_changes_geo_needs_update: inspecting=%s, %s',
record.fqdn, record._type)
fqdn = record.fqdn
# loop through all the r53 rrsets
for rrset in self._load_records(zone_id):
if fqdn == rrset['Name'] and record._type == rrset['Type'] and \
rrset.get('GeoLocation', {}).get('CountryCode', False) != '*' \
and self._extra_changes_update_needed(record, rrset):
# no good, doesn't have the right health check, needs an update
self.log.info('_extra_changes_geo_needs_update: health-check '
'caused update of %s:%s', record.fqdn,
record._type)
return True
return False
def _extra_changes_dynamic_needs_update(self, zone_id, record):
# OK this is a record we don't have change for that does have dynamic
# information. We need to look and see if it needs to be updated b/c of
# a health check version bump or other mismatch
self.log.debug('_extra_changes_dynamic_needs_update: inspecting=%s, '
'%s', record.fqdn, record._type)
fqdn = record.fqdn
# loop through all the r53 rrsets
for rrset in self._load_records(zone_id):
name = rrset['Name']
if record._type == rrset['Type'] and name.endswith(fqdn) and \
name.startswith('_octodns-') and '-value.' in name and \
'-default-' not in name and \
self._extra_changes_update_needed(record, rrset):
# no good, doesn't have the right health check, needs an update
self.log.info('_extra_changes_dynamic_needs_update: '
'health-check caused update of %s:%s',
record.fqdn, record._type)
return True
return False
def _extra_changes(self, desired, changes, **kwargs):
self.log.debug('_extra_changes: desired=%s', desired.name)
zone_id = self._get_zone_id(desired.name)
if not zone_id:
# zone doesn't exist so no extras to worry about
return []
# we'll skip extra checking for anything we're already going to change
changed = set([c.record for c in changes])
# ok, now it's time for the reason we're here, we need to go over all
# the desired records
extras = []
for record in desired.records:
if record in changed:
# already have a change for it, skipping
continue
if getattr(record, 'geo', False):
if self._extra_changes_geo_needs_update(zone_id, record):
extras.append(Update(record, record))
elif getattr(record, 'dynamic', False):
if self._extra_changes_dynamic_needs_update(zone_id, record):
extras.append(Update(record, record))
return extras
def _apply(self, plan):
desired = plan.desired
changes = plan.changes
self.log.info('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
batch = []
batch_rs_count = 0
zone_id = self._get_zone_id(desired.name, True)
existing_rrsets = self._load_records(zone_id)
for c in changes:
# Generate the mods for this change
mod_type = getattr(self, '_mod_{}'.format(c.__class__.__name__))
mods = mod_type(c, zone_id, existing_rrsets)
# Order our mods to make sure targets exist before alises point to
# them and we CRUD in the desired order
mods.sort(key=_mod_keyer)
mods_rs_count = sum(
[len(m['ResourceRecordSet'].get('ResourceRecords', ''))
for m in mods]
)
if mods_rs_count > self.max_changes:
# a single mod resulted in too many ResourceRecords changes
raise Exception('Too many modifications: {}'
.format(mods_rs_count))
# r53 limits changesets to 1000 entries
if (batch_rs_count + mods_rs_count) < self.max_changes:
# append to the batch
batch += mods
batch_rs_count += mods_rs_count
else:
self.log.info('_apply: sending change request for batch of '
'%d mods, %d ResourceRecords', len(batch),
batch_rs_count)
# send the batch
self._really_apply(batch, zone_id)
# start a new batch with the leftovers
batch = mods
batch_rs_count = mods_rs_count
# the way the above process works there will always be something left
# over in batch to process. In the case that we submit a batch up there
# it was always the case that there was something pushing us over
# max_changes and thus left over to submit.
self.log.info('_apply: sending change request for batch of %d mods,'
' %d ResourceRecords', len(batch),
batch_rs_count)
self._really_apply(batch, zone_id)
def _really_apply(self, batch, zone_id):
uuid = uuid4().hex
batch = {
'Comment': 'Change: {}'.format(uuid),
'Changes': batch,
}
self.log.debug('_really_apply: sending change request, comment=%s',
batch['Comment'])
resp = self._conn.change_resource_record_sets(
HostedZoneId=zone_id, ChangeBatch=batch)
self.log.debug('_really_apply: change info=%s', resp['ChangeInfo'])
|
github/octodns | octodns/provider/ovh.py | OvhProvider._is_valid_dkim | python | def _is_valid_dkim(self, value):
validator_dict = {'h': lambda val: val in ['sha1', 'sha256'],
's': lambda val: val in ['*', 'email'],
't': lambda val: val in ['y', 's'],
'v': lambda val: val == 'DKIM1',
'k': lambda val: val == 'rsa',
'n': lambda _: True,
'g': lambda _: True}
splitted = value.split('\\;')
found_key = False
for splitted_value in splitted:
sub_split = map(lambda x: x.strip(), splitted_value.split("=", 1))
if len(sub_split) < 2:
return False
key, value = sub_split[0], sub_split[1]
if key == "p":
is_valid_key = self._is_valid_dkim_key(value)
if not is_valid_key:
return False
found_key = True
else:
is_valid_key = validator_dict.get(key, lambda _: False)(value)
if not is_valid_key:
return False
return found_key | Check if value is a valid DKIM | train | https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/provider/ovh.py#L315-L341 | [
"def _is_valid_dkim_key(key):\n try:\n base64.decodestring(key)\n except binascii.Error:\n return False\n return True\n"
] | class OvhProvider(BaseProvider):
"""
OVH provider using API v6
ovh:
class: octodns.provider.ovh.OvhProvider
# OVH api v6 endpoint
endpoint: ovh-eu
# API application key
application_key: 1234
# API application secret
application_secret: 1234
# API consumer key
consumer_key: 1234
"""
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
ZONE_NOT_FOUND_MESSAGE = 'This service does not exist'
# This variable is also used in populate method to filter which OVH record
# types are supported by octodns
SUPPORTS = set(('A', 'AAAA', 'CNAME', 'DKIM', 'MX', 'NAPTR', 'NS', 'PTR',
'SPF', 'SRV', 'SSHFP', 'TXT'))
def __init__(self, id, endpoint, application_key, application_secret,
consumer_key, *args, **kwargs):
self.log = logging.getLogger('OvhProvider[{}]'.format(id))
self.log.debug('__init__: id=%s, endpoint=%s, application_key=%s, '
'application_secret=***, consumer_key=%s', id, endpoint,
application_key, consumer_key)
super(OvhProvider, self).__init__(id, *args, **kwargs)
self._client = ovh.Client(
endpoint=endpoint,
application_key=application_key,
application_secret=application_secret,
consumer_key=consumer_key,
)
def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
target, lenient)
zone_name = zone.name[:-1]
try:
records = self.get_records(zone_name=zone_name)
exists = True
except ResourceNotFoundError as e:
if e.message != self.ZONE_NOT_FOUND_MESSAGE:
raise
exists = False
records = []
values = defaultdict(lambda: defaultdict(list))
for record in records:
values[record['subDomain']][record['fieldType']].append(record)
before = len(zone.records)
for name, types in values.items():
for _type, records in types.items():
if _type not in self.SUPPORTS:
self.log.warning('Not managed record of type %s, skip',
_type)
continue
data_for = getattr(self, '_data_for_{}'.format(_type))
record = Record.new(zone, name, data_for(_type, records),
source=self, lenient=lenient)
zone.add_record(record, lenient=lenient)
self.log.info('populate: found %s records, exists=%s',
len(zone.records) - before, exists)
return exists
def _apply(self, plan):
desired = plan.desired
changes = plan.changes
zone_name = desired.name[:-1]
self.log.info('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
for change in changes:
class_name = change.__class__.__name__
getattr(self, '_apply_{}'.format(class_name).lower())(zone_name,
change)
# We need to refresh the zone to really apply the changes
self._client.post('/domain/zone/{}/refresh'.format(zone_name))
def _apply_create(self, zone_name, change):
new = change.new
params_for = getattr(self, '_params_for_{}'.format(new._type))
for params in params_for(new):
self.create_record(zone_name, params)
def _apply_update(self, zone_name, change):
self._apply_delete(zone_name, change)
self._apply_create(zone_name, change)
def _apply_delete(self, zone_name, change):
existing = change.existing
record_type = existing._type
if record_type == "TXT":
if self._is_valid_dkim(existing.values[0]):
record_type = 'DKIM'
self.delete_records(zone_name, record_type, existing.name)
@staticmethod
def _data_for_multiple(_type, records):
return {
'ttl': records[0]['ttl'],
'type': _type,
'values': [record['target'] for record in records]
}
@staticmethod
def _data_for_single(_type, records):
record = records[0]
return {
'ttl': record['ttl'],
'type': _type,
'value': record['target']
}
@staticmethod
def _data_for_MX(_type, records):
values = []
for record in records:
preference, exchange = record['target'].split(' ', 1)
values.append({
'preference': preference,
'exchange': exchange,
})
return {
'ttl': records[0]['ttl'],
'type': _type,
'values': values,
}
@staticmethod
def _data_for_NAPTR(_type, records):
values = []
for record in records:
order, preference, flags, service, regexp, replacement = record[
'target'].split(' ', 5)
values.append({
'flags': flags[1:-1],
'order': order,
'preference': preference,
'regexp': regexp[1:-1],
'replacement': replacement,
'service': service[1:-1],
})
return {
'type': _type,
'ttl': records[0]['ttl'],
'values': values
}
@staticmethod
def _data_for_SRV(_type, records):
values = []
for record in records:
priority, weight, port, target = record['target'].split(' ', 3)
values.append({
'port': port,
'priority': priority,
'target': '{}.'.format(target),
'weight': weight
})
return {
'type': _type,
'ttl': records[0]['ttl'],
'values': values
}
@staticmethod
def _data_for_SSHFP(_type, records):
values = []
for record in records:
algorithm, fingerprint_type, fingerprint = record['target'].split(
' ', 2)
values.append({
'algorithm': algorithm,
'fingerprint': fingerprint,
'fingerprint_type': fingerprint_type
})
return {
'type': _type,
'ttl': records[0]['ttl'],
'values': values
}
@staticmethod
def _data_for_DKIM(_type, records):
return {
'ttl': records[0]['ttl'],
'type': "TXT",
'values': [record['target'].replace(';', '\\;')
for record in records]
}
_data_for_A = _data_for_multiple
_data_for_AAAA = _data_for_multiple
_data_for_NS = _data_for_multiple
_data_for_TXT = _data_for_multiple
_data_for_SPF = _data_for_multiple
_data_for_PTR = _data_for_single
_data_for_CNAME = _data_for_single
@staticmethod
def _params_for_multiple(record):
for value in record.values:
yield {
'target': value,
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type,
}
@staticmethod
def _params_for_single(record):
yield {
'target': record.value,
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
@staticmethod
def _params_for_MX(record):
for value in record.values:
yield {
'target': '%d %s' % (value.preference, value.exchange),
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
@staticmethod
def _params_for_NAPTR(record):
for value in record.values:
content = '{} {} "{}" "{}" "{}" {}' \
.format(value.order, value.preference, value.flags,
value.service, value.regexp, value.replacement)
yield {
'target': content,
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
@staticmethod
def _params_for_SRV(record):
for value in record.values:
yield {
'target': '{} {} {} {}'.format(value.priority,
value.weight,
value.port,
value.target),
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
@staticmethod
def _params_for_SSHFP(record):
for value in record.values:
yield {
'target': '{} {} {}'.format(value.algorithm,
value.fingerprint_type,
value.fingerprint),
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
def _params_for_TXT(self, record):
for value in record.values:
field_type = 'TXT'
if self._is_valid_dkim(value):
field_type = 'DKIM'
value = value.replace("\\;", ";")
yield {
'target': value,
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': field_type
}
_params_for_A = _params_for_multiple
_params_for_AAAA = _params_for_multiple
_params_for_NS = _params_for_multiple
_params_for_SPF = _params_for_multiple
_params_for_CNAME = _params_for_single
_params_for_PTR = _params_for_single
@staticmethod
def _is_valid_dkim_key(key):
try:
base64.decodestring(key)
except binascii.Error:
return False
return True
def get_records(self, zone_name):
"""
List all records of a DNS zone
:param zone_name: Name of zone
:return: list of id's records
"""
records = self._client.get('/domain/zone/{}/record'.format(zone_name))
return [self.get_record(zone_name, record_id) for record_id in records]
def get_record(self, zone_name, record_id):
"""
Get record with given id
:param zone_name: Name of the zone
:param record_id: Id of the record
:return: Value of the record
"""
return self._client.get(
'/domain/zone/{}/record/{}'.format(zone_name, record_id))
def delete_records(self, zone_name, record_type, subdomain):
"""
Delete record from have fieldType=type and subDomain=subdomain
:param zone_name: Name of the zone
:param record_type: fieldType
:param subdomain: subDomain
"""
records = self._client.get('/domain/zone/{}/record'.format(zone_name),
fieldType=record_type, subDomain=subdomain)
for record in records:
self.delete_record(zone_name, record)
def delete_record(self, zone_name, record_id):
"""
Delete record with a given id
:param zone_name: Name of the zone
:param record_id: Id of the record
"""
self.log.debug('Delete record: zone: %s, id %s', zone_name,
record_id)
self._client.delete(
'/domain/zone/{}/record/{}'.format(zone_name, record_id))
def create_record(self, zone_name, params):
"""
Create a record
:param zone_name: Name of the zone
:param params: {'fieldType': 'A', 'ttl': 60, 'subDomain': 'www',
'target': '1.2.3.4'
"""
self.log.debug('Create record: zone: %s, id %s', zone_name,
params)
return self._client.post('/domain/zone/{}/record'.format(zone_name),
**params)
|
github/octodns | octodns/provider/ovh.py | OvhProvider.get_records | python | def get_records(self, zone_name):
records = self._client.get('/domain/zone/{}/record'.format(zone_name))
return [self.get_record(zone_name, record_id) for record_id in records] | List all records of a DNS zone
:param zone_name: Name of zone
:return: list of id's records | train | https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/provider/ovh.py#L351-L358 | null | class OvhProvider(BaseProvider):
"""
OVH provider using API v6
ovh:
class: octodns.provider.ovh.OvhProvider
# OVH api v6 endpoint
endpoint: ovh-eu
# API application key
application_key: 1234
# API application secret
application_secret: 1234
# API consumer key
consumer_key: 1234
"""
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
ZONE_NOT_FOUND_MESSAGE = 'This service does not exist'
# This variable is also used in populate method to filter which OVH record
# types are supported by octodns
SUPPORTS = set(('A', 'AAAA', 'CNAME', 'DKIM', 'MX', 'NAPTR', 'NS', 'PTR',
'SPF', 'SRV', 'SSHFP', 'TXT'))
def __init__(self, id, endpoint, application_key, application_secret,
consumer_key, *args, **kwargs):
self.log = logging.getLogger('OvhProvider[{}]'.format(id))
self.log.debug('__init__: id=%s, endpoint=%s, application_key=%s, '
'application_secret=***, consumer_key=%s', id, endpoint,
application_key, consumer_key)
super(OvhProvider, self).__init__(id, *args, **kwargs)
self._client = ovh.Client(
endpoint=endpoint,
application_key=application_key,
application_secret=application_secret,
consumer_key=consumer_key,
)
def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
target, lenient)
zone_name = zone.name[:-1]
try:
records = self.get_records(zone_name=zone_name)
exists = True
except ResourceNotFoundError as e:
if e.message != self.ZONE_NOT_FOUND_MESSAGE:
raise
exists = False
records = []
values = defaultdict(lambda: defaultdict(list))
for record in records:
values[record['subDomain']][record['fieldType']].append(record)
before = len(zone.records)
for name, types in values.items():
for _type, records in types.items():
if _type not in self.SUPPORTS:
self.log.warning('Not managed record of type %s, skip',
_type)
continue
data_for = getattr(self, '_data_for_{}'.format(_type))
record = Record.new(zone, name, data_for(_type, records),
source=self, lenient=lenient)
zone.add_record(record, lenient=lenient)
self.log.info('populate: found %s records, exists=%s',
len(zone.records) - before, exists)
return exists
def _apply(self, plan):
desired = plan.desired
changes = plan.changes
zone_name = desired.name[:-1]
self.log.info('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
for change in changes:
class_name = change.__class__.__name__
getattr(self, '_apply_{}'.format(class_name).lower())(zone_name,
change)
# We need to refresh the zone to really apply the changes
self._client.post('/domain/zone/{}/refresh'.format(zone_name))
def _apply_create(self, zone_name, change):
new = change.new
params_for = getattr(self, '_params_for_{}'.format(new._type))
for params in params_for(new):
self.create_record(zone_name, params)
def _apply_update(self, zone_name, change):
self._apply_delete(zone_name, change)
self._apply_create(zone_name, change)
def _apply_delete(self, zone_name, change):
existing = change.existing
record_type = existing._type
if record_type == "TXT":
if self._is_valid_dkim(existing.values[0]):
record_type = 'DKIM'
self.delete_records(zone_name, record_type, existing.name)
@staticmethod
def _data_for_multiple(_type, records):
return {
'ttl': records[0]['ttl'],
'type': _type,
'values': [record['target'] for record in records]
}
@staticmethod
def _data_for_single(_type, records):
record = records[0]
return {
'ttl': record['ttl'],
'type': _type,
'value': record['target']
}
@staticmethod
def _data_for_MX(_type, records):
values = []
for record in records:
preference, exchange = record['target'].split(' ', 1)
values.append({
'preference': preference,
'exchange': exchange,
})
return {
'ttl': records[0]['ttl'],
'type': _type,
'values': values,
}
@staticmethod
def _data_for_NAPTR(_type, records):
values = []
for record in records:
order, preference, flags, service, regexp, replacement = record[
'target'].split(' ', 5)
values.append({
'flags': flags[1:-1],
'order': order,
'preference': preference,
'regexp': regexp[1:-1],
'replacement': replacement,
'service': service[1:-1],
})
return {
'type': _type,
'ttl': records[0]['ttl'],
'values': values
}
@staticmethod
def _data_for_SRV(_type, records):
values = []
for record in records:
priority, weight, port, target = record['target'].split(' ', 3)
values.append({
'port': port,
'priority': priority,
'target': '{}.'.format(target),
'weight': weight
})
return {
'type': _type,
'ttl': records[0]['ttl'],
'values': values
}
@staticmethod
def _data_for_SSHFP(_type, records):
values = []
for record in records:
algorithm, fingerprint_type, fingerprint = record['target'].split(
' ', 2)
values.append({
'algorithm': algorithm,
'fingerprint': fingerprint,
'fingerprint_type': fingerprint_type
})
return {
'type': _type,
'ttl': records[0]['ttl'],
'values': values
}
@staticmethod
def _data_for_DKIM(_type, records):
return {
'ttl': records[0]['ttl'],
'type': "TXT",
'values': [record['target'].replace(';', '\\;')
for record in records]
}
_data_for_A = _data_for_multiple
_data_for_AAAA = _data_for_multiple
_data_for_NS = _data_for_multiple
_data_for_TXT = _data_for_multiple
_data_for_SPF = _data_for_multiple
_data_for_PTR = _data_for_single
_data_for_CNAME = _data_for_single
@staticmethod
def _params_for_multiple(record):
for value in record.values:
yield {
'target': value,
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type,
}
@staticmethod
def _params_for_single(record):
yield {
'target': record.value,
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
@staticmethod
def _params_for_MX(record):
for value in record.values:
yield {
'target': '%d %s' % (value.preference, value.exchange),
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
@staticmethod
def _params_for_NAPTR(record):
for value in record.values:
content = '{} {} "{}" "{}" "{}" {}' \
.format(value.order, value.preference, value.flags,
value.service, value.regexp, value.replacement)
yield {
'target': content,
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
@staticmethod
def _params_for_SRV(record):
for value in record.values:
yield {
'target': '{} {} {} {}'.format(value.priority,
value.weight,
value.port,
value.target),
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
@staticmethod
def _params_for_SSHFP(record):
for value in record.values:
yield {
'target': '{} {} {}'.format(value.algorithm,
value.fingerprint_type,
value.fingerprint),
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
def _params_for_TXT(self, record):
for value in record.values:
field_type = 'TXT'
if self._is_valid_dkim(value):
field_type = 'DKIM'
value = value.replace("\\;", ";")
yield {
'target': value,
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': field_type
}
_params_for_A = _params_for_multiple
_params_for_AAAA = _params_for_multiple
_params_for_NS = _params_for_multiple
_params_for_SPF = _params_for_multiple
_params_for_CNAME = _params_for_single
_params_for_PTR = _params_for_single
def _is_valid_dkim(self, value):
"""Check if value is a valid DKIM"""
validator_dict = {'h': lambda val: val in ['sha1', 'sha256'],
's': lambda val: val in ['*', 'email'],
't': lambda val: val in ['y', 's'],
'v': lambda val: val == 'DKIM1',
'k': lambda val: val == 'rsa',
'n': lambda _: True,
'g': lambda _: True}
splitted = value.split('\\;')
found_key = False
for splitted_value in splitted:
sub_split = map(lambda x: x.strip(), splitted_value.split("=", 1))
if len(sub_split) < 2:
return False
key, value = sub_split[0], sub_split[1]
if key == "p":
is_valid_key = self._is_valid_dkim_key(value)
if not is_valid_key:
return False
found_key = True
else:
is_valid_key = validator_dict.get(key, lambda _: False)(value)
if not is_valid_key:
return False
return found_key
@staticmethod
def _is_valid_dkim_key(key):
try:
base64.decodestring(key)
except binascii.Error:
return False
return True
def get_record(self, zone_name, record_id):
"""
Get record with given id
:param zone_name: Name of the zone
:param record_id: Id of the record
:return: Value of the record
"""
return self._client.get(
'/domain/zone/{}/record/{}'.format(zone_name, record_id))
def delete_records(self, zone_name, record_type, subdomain):
"""
Delete record from have fieldType=type and subDomain=subdomain
:param zone_name: Name of the zone
:param record_type: fieldType
:param subdomain: subDomain
"""
records = self._client.get('/domain/zone/{}/record'.format(zone_name),
fieldType=record_type, subDomain=subdomain)
for record in records:
self.delete_record(zone_name, record)
def delete_record(self, zone_name, record_id):
"""
Delete record with a given id
:param zone_name: Name of the zone
:param record_id: Id of the record
"""
self.log.debug('Delete record: zone: %s, id %s', zone_name,
record_id)
self._client.delete(
'/domain/zone/{}/record/{}'.format(zone_name, record_id))
def create_record(self, zone_name, params):
"""
Create a record
:param zone_name: Name of the zone
:param params: {'fieldType': 'A', 'ttl': 60, 'subDomain': 'www',
'target': '1.2.3.4'
"""
self.log.debug('Create record: zone: %s, id %s', zone_name,
params)
return self._client.post('/domain/zone/{}/record'.format(zone_name),
**params)
|
github/octodns | octodns/provider/ovh.py | OvhProvider.get_record | python | def get_record(self, zone_name, record_id):
return self._client.get(
'/domain/zone/{}/record/{}'.format(zone_name, record_id)) | Get record with given id
:param zone_name: Name of the zone
:param record_id: Id of the record
:return: Value of the record | train | https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/provider/ovh.py#L360-L368 | null | class OvhProvider(BaseProvider):
"""
OVH provider using API v6
ovh:
class: octodns.provider.ovh.OvhProvider
# OVH api v6 endpoint
endpoint: ovh-eu
# API application key
application_key: 1234
# API application secret
application_secret: 1234
# API consumer key
consumer_key: 1234
"""
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
ZONE_NOT_FOUND_MESSAGE = 'This service does not exist'
# This variable is also used in populate method to filter which OVH record
# types are supported by octodns
SUPPORTS = set(('A', 'AAAA', 'CNAME', 'DKIM', 'MX', 'NAPTR', 'NS', 'PTR',
'SPF', 'SRV', 'SSHFP', 'TXT'))
def __init__(self, id, endpoint, application_key, application_secret,
consumer_key, *args, **kwargs):
self.log = logging.getLogger('OvhProvider[{}]'.format(id))
self.log.debug('__init__: id=%s, endpoint=%s, application_key=%s, '
'application_secret=***, consumer_key=%s', id, endpoint,
application_key, consumer_key)
super(OvhProvider, self).__init__(id, *args, **kwargs)
self._client = ovh.Client(
endpoint=endpoint,
application_key=application_key,
application_secret=application_secret,
consumer_key=consumer_key,
)
def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
target, lenient)
zone_name = zone.name[:-1]
try:
records = self.get_records(zone_name=zone_name)
exists = True
except ResourceNotFoundError as e:
if e.message != self.ZONE_NOT_FOUND_MESSAGE:
raise
exists = False
records = []
values = defaultdict(lambda: defaultdict(list))
for record in records:
values[record['subDomain']][record['fieldType']].append(record)
before = len(zone.records)
for name, types in values.items():
for _type, records in types.items():
if _type not in self.SUPPORTS:
self.log.warning('Not managed record of type %s, skip',
_type)
continue
data_for = getattr(self, '_data_for_{}'.format(_type))
record = Record.new(zone, name, data_for(_type, records),
source=self, lenient=lenient)
zone.add_record(record, lenient=lenient)
self.log.info('populate: found %s records, exists=%s',
len(zone.records) - before, exists)
return exists
def _apply(self, plan):
desired = plan.desired
changes = plan.changes
zone_name = desired.name[:-1]
self.log.info('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
for change in changes:
class_name = change.__class__.__name__
getattr(self, '_apply_{}'.format(class_name).lower())(zone_name,
change)
# We need to refresh the zone to really apply the changes
self._client.post('/domain/zone/{}/refresh'.format(zone_name))
def _apply_create(self, zone_name, change):
new = change.new
params_for = getattr(self, '_params_for_{}'.format(new._type))
for params in params_for(new):
self.create_record(zone_name, params)
def _apply_update(self, zone_name, change):
self._apply_delete(zone_name, change)
self._apply_create(zone_name, change)
def _apply_delete(self, zone_name, change):
existing = change.existing
record_type = existing._type
if record_type == "TXT":
if self._is_valid_dkim(existing.values[0]):
record_type = 'DKIM'
self.delete_records(zone_name, record_type, existing.name)
@staticmethod
def _data_for_multiple(_type, records):
return {
'ttl': records[0]['ttl'],
'type': _type,
'values': [record['target'] for record in records]
}
@staticmethod
def _data_for_single(_type, records):
record = records[0]
return {
'ttl': record['ttl'],
'type': _type,
'value': record['target']
}
@staticmethod
def _data_for_MX(_type, records):
values = []
for record in records:
preference, exchange = record['target'].split(' ', 1)
values.append({
'preference': preference,
'exchange': exchange,
})
return {
'ttl': records[0]['ttl'],
'type': _type,
'values': values,
}
@staticmethod
def _data_for_NAPTR(_type, records):
values = []
for record in records:
order, preference, flags, service, regexp, replacement = record[
'target'].split(' ', 5)
values.append({
'flags': flags[1:-1],
'order': order,
'preference': preference,
'regexp': regexp[1:-1],
'replacement': replacement,
'service': service[1:-1],
})
return {
'type': _type,
'ttl': records[0]['ttl'],
'values': values
}
@staticmethod
def _data_for_SRV(_type, records):
values = []
for record in records:
priority, weight, port, target = record['target'].split(' ', 3)
values.append({
'port': port,
'priority': priority,
'target': '{}.'.format(target),
'weight': weight
})
return {
'type': _type,
'ttl': records[0]['ttl'],
'values': values
}
@staticmethod
def _data_for_SSHFP(_type, records):
values = []
for record in records:
algorithm, fingerprint_type, fingerprint = record['target'].split(
' ', 2)
values.append({
'algorithm': algorithm,
'fingerprint': fingerprint,
'fingerprint_type': fingerprint_type
})
return {
'type': _type,
'ttl': records[0]['ttl'],
'values': values
}
@staticmethod
def _data_for_DKIM(_type, records):
return {
'ttl': records[0]['ttl'],
'type': "TXT",
'values': [record['target'].replace(';', '\\;')
for record in records]
}
_data_for_A = _data_for_multiple
_data_for_AAAA = _data_for_multiple
_data_for_NS = _data_for_multiple
_data_for_TXT = _data_for_multiple
_data_for_SPF = _data_for_multiple
_data_for_PTR = _data_for_single
_data_for_CNAME = _data_for_single
@staticmethod
def _params_for_multiple(record):
for value in record.values:
yield {
'target': value,
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type,
}
@staticmethod
def _params_for_single(record):
yield {
'target': record.value,
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
@staticmethod
def _params_for_MX(record):
for value in record.values:
yield {
'target': '%d %s' % (value.preference, value.exchange),
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
@staticmethod
def _params_for_NAPTR(record):
for value in record.values:
content = '{} {} "{}" "{}" "{}" {}' \
.format(value.order, value.preference, value.flags,
value.service, value.regexp, value.replacement)
yield {
'target': content,
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
@staticmethod
def _params_for_SRV(record):
for value in record.values:
yield {
'target': '{} {} {} {}'.format(value.priority,
value.weight,
value.port,
value.target),
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
@staticmethod
def _params_for_SSHFP(record):
for value in record.values:
yield {
'target': '{} {} {}'.format(value.algorithm,
value.fingerprint_type,
value.fingerprint),
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
def _params_for_TXT(self, record):
for value in record.values:
field_type = 'TXT'
if self._is_valid_dkim(value):
field_type = 'DKIM'
value = value.replace("\\;", ";")
yield {
'target': value,
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': field_type
}
_params_for_A = _params_for_multiple
_params_for_AAAA = _params_for_multiple
_params_for_NS = _params_for_multiple
_params_for_SPF = _params_for_multiple
_params_for_CNAME = _params_for_single
_params_for_PTR = _params_for_single
def _is_valid_dkim(self, value):
"""Check if value is a valid DKIM"""
validator_dict = {'h': lambda val: val in ['sha1', 'sha256'],
's': lambda val: val in ['*', 'email'],
't': lambda val: val in ['y', 's'],
'v': lambda val: val == 'DKIM1',
'k': lambda val: val == 'rsa',
'n': lambda _: True,
'g': lambda _: True}
splitted = value.split('\\;')
found_key = False
for splitted_value in splitted:
sub_split = map(lambda x: x.strip(), splitted_value.split("=", 1))
if len(sub_split) < 2:
return False
key, value = sub_split[0], sub_split[1]
if key == "p":
is_valid_key = self._is_valid_dkim_key(value)
if not is_valid_key:
return False
found_key = True
else:
is_valid_key = validator_dict.get(key, lambda _: False)(value)
if not is_valid_key:
return False
return found_key
@staticmethod
def _is_valid_dkim_key(key):
try:
base64.decodestring(key)
except binascii.Error:
return False
return True
def get_records(self, zone_name):
"""
List all records of a DNS zone
:param zone_name: Name of zone
:return: list of id's records
"""
records = self._client.get('/domain/zone/{}/record'.format(zone_name))
return [self.get_record(zone_name, record_id) for record_id in records]
def delete_records(self, zone_name, record_type, subdomain):
"""
Delete record from have fieldType=type and subDomain=subdomain
:param zone_name: Name of the zone
:param record_type: fieldType
:param subdomain: subDomain
"""
records = self._client.get('/domain/zone/{}/record'.format(zone_name),
fieldType=record_type, subDomain=subdomain)
for record in records:
self.delete_record(zone_name, record)
def delete_record(self, zone_name, record_id):
"""
Delete record with a given id
:param zone_name: Name of the zone
:param record_id: Id of the record
"""
self.log.debug('Delete record: zone: %s, id %s', zone_name,
record_id)
self._client.delete(
'/domain/zone/{}/record/{}'.format(zone_name, record_id))
def create_record(self, zone_name, params):
"""
Create a record
:param zone_name: Name of the zone
:param params: {'fieldType': 'A', 'ttl': 60, 'subDomain': 'www',
'target': '1.2.3.4'
"""
self.log.debug('Create record: zone: %s, id %s', zone_name,
params)
return self._client.post('/domain/zone/{}/record'.format(zone_name),
**params)
|
github/octodns | octodns/provider/ovh.py | OvhProvider.delete_records | python | def delete_records(self, zone_name, record_type, subdomain):
records = self._client.get('/domain/zone/{}/record'.format(zone_name),
fieldType=record_type, subDomain=subdomain)
for record in records:
self.delete_record(zone_name, record) | Delete record from have fieldType=type and subDomain=subdomain
:param zone_name: Name of the zone
:param record_type: fieldType
:param subdomain: subDomain | train | https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/provider/ovh.py#L370-L380 | [
"def delete_record(self, zone_name, record_id):\n \"\"\"\n Delete record with a given id\n :param zone_name: Name of the zone\n :param record_id: Id of the record\n \"\"\"\n self.log.debug('Delete record: zone: %s, id %s', zone_name,\n record_id)\n self._client.delete(\n ... | class OvhProvider(BaseProvider):
"""
OVH provider using API v6
ovh:
class: octodns.provider.ovh.OvhProvider
# OVH api v6 endpoint
endpoint: ovh-eu
# API application key
application_key: 1234
# API application secret
application_secret: 1234
# API consumer key
consumer_key: 1234
"""
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
ZONE_NOT_FOUND_MESSAGE = 'This service does not exist'
# This variable is also used in populate method to filter which OVH record
# types are supported by octodns
SUPPORTS = set(('A', 'AAAA', 'CNAME', 'DKIM', 'MX', 'NAPTR', 'NS', 'PTR',
'SPF', 'SRV', 'SSHFP', 'TXT'))
def __init__(self, id, endpoint, application_key, application_secret,
consumer_key, *args, **kwargs):
self.log = logging.getLogger('OvhProvider[{}]'.format(id))
self.log.debug('__init__: id=%s, endpoint=%s, application_key=%s, '
'application_secret=***, consumer_key=%s', id, endpoint,
application_key, consumer_key)
super(OvhProvider, self).__init__(id, *args, **kwargs)
self._client = ovh.Client(
endpoint=endpoint,
application_key=application_key,
application_secret=application_secret,
consumer_key=consumer_key,
)
def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
target, lenient)
zone_name = zone.name[:-1]
try:
records = self.get_records(zone_name=zone_name)
exists = True
except ResourceNotFoundError as e:
if e.message != self.ZONE_NOT_FOUND_MESSAGE:
raise
exists = False
records = []
values = defaultdict(lambda: defaultdict(list))
for record in records:
values[record['subDomain']][record['fieldType']].append(record)
before = len(zone.records)
for name, types in values.items():
for _type, records in types.items():
if _type not in self.SUPPORTS:
self.log.warning('Not managed record of type %s, skip',
_type)
continue
data_for = getattr(self, '_data_for_{}'.format(_type))
record = Record.new(zone, name, data_for(_type, records),
source=self, lenient=lenient)
zone.add_record(record, lenient=lenient)
self.log.info('populate: found %s records, exists=%s',
len(zone.records) - before, exists)
return exists
def _apply(self, plan):
desired = plan.desired
changes = plan.changes
zone_name = desired.name[:-1]
self.log.info('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
for change in changes:
class_name = change.__class__.__name__
getattr(self, '_apply_{}'.format(class_name).lower())(zone_name,
change)
# We need to refresh the zone to really apply the changes
self._client.post('/domain/zone/{}/refresh'.format(zone_name))
def _apply_create(self, zone_name, change):
new = change.new
params_for = getattr(self, '_params_for_{}'.format(new._type))
for params in params_for(new):
self.create_record(zone_name, params)
def _apply_update(self, zone_name, change):
self._apply_delete(zone_name, change)
self._apply_create(zone_name, change)
def _apply_delete(self, zone_name, change):
existing = change.existing
record_type = existing._type
if record_type == "TXT":
if self._is_valid_dkim(existing.values[0]):
record_type = 'DKIM'
self.delete_records(zone_name, record_type, existing.name)
@staticmethod
def _data_for_multiple(_type, records):
return {
'ttl': records[0]['ttl'],
'type': _type,
'values': [record['target'] for record in records]
}
@staticmethod
def _data_for_single(_type, records):
record = records[0]
return {
'ttl': record['ttl'],
'type': _type,
'value': record['target']
}
@staticmethod
def _data_for_MX(_type, records):
values = []
for record in records:
preference, exchange = record['target'].split(' ', 1)
values.append({
'preference': preference,
'exchange': exchange,
})
return {
'ttl': records[0]['ttl'],
'type': _type,
'values': values,
}
@staticmethod
def _data_for_NAPTR(_type, records):
values = []
for record in records:
order, preference, flags, service, regexp, replacement = record[
'target'].split(' ', 5)
values.append({
'flags': flags[1:-1],
'order': order,
'preference': preference,
'regexp': regexp[1:-1],
'replacement': replacement,
'service': service[1:-1],
})
return {
'type': _type,
'ttl': records[0]['ttl'],
'values': values
}
@staticmethod
def _data_for_SRV(_type, records):
values = []
for record in records:
priority, weight, port, target = record['target'].split(' ', 3)
values.append({
'port': port,
'priority': priority,
'target': '{}.'.format(target),
'weight': weight
})
return {
'type': _type,
'ttl': records[0]['ttl'],
'values': values
}
@staticmethod
def _data_for_SSHFP(_type, records):
values = []
for record in records:
algorithm, fingerprint_type, fingerprint = record['target'].split(
' ', 2)
values.append({
'algorithm': algorithm,
'fingerprint': fingerprint,
'fingerprint_type': fingerprint_type
})
return {
'type': _type,
'ttl': records[0]['ttl'],
'values': values
}
@staticmethod
def _data_for_DKIM(_type, records):
return {
'ttl': records[0]['ttl'],
'type': "TXT",
'values': [record['target'].replace(';', '\\;')
for record in records]
}
_data_for_A = _data_for_multiple
_data_for_AAAA = _data_for_multiple
_data_for_NS = _data_for_multiple
_data_for_TXT = _data_for_multiple
_data_for_SPF = _data_for_multiple
_data_for_PTR = _data_for_single
_data_for_CNAME = _data_for_single
@staticmethod
def _params_for_multiple(record):
for value in record.values:
yield {
'target': value,
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type,
}
@staticmethod
def _params_for_single(record):
yield {
'target': record.value,
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
@staticmethod
def _params_for_MX(record):
for value in record.values:
yield {
'target': '%d %s' % (value.preference, value.exchange),
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
@staticmethod
def _params_for_NAPTR(record):
for value in record.values:
content = '{} {} "{}" "{}" "{}" {}' \
.format(value.order, value.preference, value.flags,
value.service, value.regexp, value.replacement)
yield {
'target': content,
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
@staticmethod
def _params_for_SRV(record):
for value in record.values:
yield {
'target': '{} {} {} {}'.format(value.priority,
value.weight,
value.port,
value.target),
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
@staticmethod
def _params_for_SSHFP(record):
for value in record.values:
yield {
'target': '{} {} {}'.format(value.algorithm,
value.fingerprint_type,
value.fingerprint),
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
def _params_for_TXT(self, record):
for value in record.values:
field_type = 'TXT'
if self._is_valid_dkim(value):
field_type = 'DKIM'
value = value.replace("\\;", ";")
yield {
'target': value,
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': field_type
}
_params_for_A = _params_for_multiple
_params_for_AAAA = _params_for_multiple
_params_for_NS = _params_for_multiple
_params_for_SPF = _params_for_multiple
_params_for_CNAME = _params_for_single
_params_for_PTR = _params_for_single
def _is_valid_dkim(self, value):
"""Check if value is a valid DKIM"""
validator_dict = {'h': lambda val: val in ['sha1', 'sha256'],
's': lambda val: val in ['*', 'email'],
't': lambda val: val in ['y', 's'],
'v': lambda val: val == 'DKIM1',
'k': lambda val: val == 'rsa',
'n': lambda _: True,
'g': lambda _: True}
splitted = value.split('\\;')
found_key = False
for splitted_value in splitted:
sub_split = map(lambda x: x.strip(), splitted_value.split("=", 1))
if len(sub_split) < 2:
return False
key, value = sub_split[0], sub_split[1]
if key == "p":
is_valid_key = self._is_valid_dkim_key(value)
if not is_valid_key:
return False
found_key = True
else:
is_valid_key = validator_dict.get(key, lambda _: False)(value)
if not is_valid_key:
return False
return found_key
@staticmethod
def _is_valid_dkim_key(key):
try:
base64.decodestring(key)
except binascii.Error:
return False
return True
def get_records(self, zone_name):
"""
List all records of a DNS zone
:param zone_name: Name of zone
:return: list of id's records
"""
records = self._client.get('/domain/zone/{}/record'.format(zone_name))
return [self.get_record(zone_name, record_id) for record_id in records]
def get_record(self, zone_name, record_id):
"""
Get record with given id
:param zone_name: Name of the zone
:param record_id: Id of the record
:return: Value of the record
"""
return self._client.get(
'/domain/zone/{}/record/{}'.format(zone_name, record_id))
def delete_record(self, zone_name, record_id):
"""
Delete record with a given id
:param zone_name: Name of the zone
:param record_id: Id of the record
"""
self.log.debug('Delete record: zone: %s, id %s', zone_name,
record_id)
self._client.delete(
'/domain/zone/{}/record/{}'.format(zone_name, record_id))
def create_record(self, zone_name, params):
"""
Create a record
:param zone_name: Name of the zone
:param params: {'fieldType': 'A', 'ttl': 60, 'subDomain': 'www',
'target': '1.2.3.4'
"""
self.log.debug('Create record: zone: %s, id %s', zone_name,
params)
return self._client.post('/domain/zone/{}/record'.format(zone_name),
**params)
|
github/octodns | octodns/provider/ovh.py | OvhProvider.delete_record | python | def delete_record(self, zone_name, record_id):
self.log.debug('Delete record: zone: %s, id %s', zone_name,
record_id)
self._client.delete(
'/domain/zone/{}/record/{}'.format(zone_name, record_id)) | Delete record with a given id
:param zone_name: Name of the zone
:param record_id: Id of the record | train | https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/provider/ovh.py#L382-L391 | null | class OvhProvider(BaseProvider):
"""
OVH provider using API v6
ovh:
class: octodns.provider.ovh.OvhProvider
# OVH api v6 endpoint
endpoint: ovh-eu
# API application key
application_key: 1234
# API application secret
application_secret: 1234
# API consumer key
consumer_key: 1234
"""
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
ZONE_NOT_FOUND_MESSAGE = 'This service does not exist'
# This variable is also used in populate method to filter which OVH record
# types are supported by octodns
SUPPORTS = set(('A', 'AAAA', 'CNAME', 'DKIM', 'MX', 'NAPTR', 'NS', 'PTR',
'SPF', 'SRV', 'SSHFP', 'TXT'))
def __init__(self, id, endpoint, application_key, application_secret,
consumer_key, *args, **kwargs):
self.log = logging.getLogger('OvhProvider[{}]'.format(id))
self.log.debug('__init__: id=%s, endpoint=%s, application_key=%s, '
'application_secret=***, consumer_key=%s', id, endpoint,
application_key, consumer_key)
super(OvhProvider, self).__init__(id, *args, **kwargs)
self._client = ovh.Client(
endpoint=endpoint,
application_key=application_key,
application_secret=application_secret,
consumer_key=consumer_key,
)
def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
target, lenient)
zone_name = zone.name[:-1]
try:
records = self.get_records(zone_name=zone_name)
exists = True
except ResourceNotFoundError as e:
if e.message != self.ZONE_NOT_FOUND_MESSAGE:
raise
exists = False
records = []
values = defaultdict(lambda: defaultdict(list))
for record in records:
values[record['subDomain']][record['fieldType']].append(record)
before = len(zone.records)
for name, types in values.items():
for _type, records in types.items():
if _type not in self.SUPPORTS:
self.log.warning('Not managed record of type %s, skip',
_type)
continue
data_for = getattr(self, '_data_for_{}'.format(_type))
record = Record.new(zone, name, data_for(_type, records),
source=self, lenient=lenient)
zone.add_record(record, lenient=lenient)
self.log.info('populate: found %s records, exists=%s',
len(zone.records) - before, exists)
return exists
def _apply(self, plan):
desired = plan.desired
changes = plan.changes
zone_name = desired.name[:-1]
self.log.info('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
for change in changes:
class_name = change.__class__.__name__
getattr(self, '_apply_{}'.format(class_name).lower())(zone_name,
change)
# We need to refresh the zone to really apply the changes
self._client.post('/domain/zone/{}/refresh'.format(zone_name))
def _apply_create(self, zone_name, change):
new = change.new
params_for = getattr(self, '_params_for_{}'.format(new._type))
for params in params_for(new):
self.create_record(zone_name, params)
def _apply_update(self, zone_name, change):
self._apply_delete(zone_name, change)
self._apply_create(zone_name, change)
def _apply_delete(self, zone_name, change):
existing = change.existing
record_type = existing._type
if record_type == "TXT":
if self._is_valid_dkim(existing.values[0]):
record_type = 'DKIM'
self.delete_records(zone_name, record_type, existing.name)
@staticmethod
def _data_for_multiple(_type, records):
return {
'ttl': records[0]['ttl'],
'type': _type,
'values': [record['target'] for record in records]
}
@staticmethod
def _data_for_single(_type, records):
record = records[0]
return {
'ttl': record['ttl'],
'type': _type,
'value': record['target']
}
@staticmethod
def _data_for_MX(_type, records):
values = []
for record in records:
preference, exchange = record['target'].split(' ', 1)
values.append({
'preference': preference,
'exchange': exchange,
})
return {
'ttl': records[0]['ttl'],
'type': _type,
'values': values,
}
@staticmethod
def _data_for_NAPTR(_type, records):
values = []
for record in records:
order, preference, flags, service, regexp, replacement = record[
'target'].split(' ', 5)
values.append({
'flags': flags[1:-1],
'order': order,
'preference': preference,
'regexp': regexp[1:-1],
'replacement': replacement,
'service': service[1:-1],
})
return {
'type': _type,
'ttl': records[0]['ttl'],
'values': values
}
@staticmethod
def _data_for_SRV(_type, records):
values = []
for record in records:
priority, weight, port, target = record['target'].split(' ', 3)
values.append({
'port': port,
'priority': priority,
'target': '{}.'.format(target),
'weight': weight
})
return {
'type': _type,
'ttl': records[0]['ttl'],
'values': values
}
@staticmethod
def _data_for_SSHFP(_type, records):
values = []
for record in records:
algorithm, fingerprint_type, fingerprint = record['target'].split(
' ', 2)
values.append({
'algorithm': algorithm,
'fingerprint': fingerprint,
'fingerprint_type': fingerprint_type
})
return {
'type': _type,
'ttl': records[0]['ttl'],
'values': values
}
@staticmethod
def _data_for_DKIM(_type, records):
return {
'ttl': records[0]['ttl'],
'type': "TXT",
'values': [record['target'].replace(';', '\\;')
for record in records]
}
_data_for_A = _data_for_multiple
_data_for_AAAA = _data_for_multiple
_data_for_NS = _data_for_multiple
_data_for_TXT = _data_for_multiple
_data_for_SPF = _data_for_multiple
_data_for_PTR = _data_for_single
_data_for_CNAME = _data_for_single
@staticmethod
def _params_for_multiple(record):
for value in record.values:
yield {
'target': value,
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type,
}
@staticmethod
def _params_for_single(record):
yield {
'target': record.value,
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
@staticmethod
def _params_for_MX(record):
for value in record.values:
yield {
'target': '%d %s' % (value.preference, value.exchange),
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
@staticmethod
def _params_for_NAPTR(record):
for value in record.values:
content = '{} {} "{}" "{}" "{}" {}' \
.format(value.order, value.preference, value.flags,
value.service, value.regexp, value.replacement)
yield {
'target': content,
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
@staticmethod
def _params_for_SRV(record):
for value in record.values:
yield {
'target': '{} {} {} {}'.format(value.priority,
value.weight,
value.port,
value.target),
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
@staticmethod
def _params_for_SSHFP(record):
for value in record.values:
yield {
'target': '{} {} {}'.format(value.algorithm,
value.fingerprint_type,
value.fingerprint),
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
def _params_for_TXT(self, record):
for value in record.values:
field_type = 'TXT'
if self._is_valid_dkim(value):
field_type = 'DKIM'
value = value.replace("\\;", ";")
yield {
'target': value,
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': field_type
}
_params_for_A = _params_for_multiple
_params_for_AAAA = _params_for_multiple
_params_for_NS = _params_for_multiple
_params_for_SPF = _params_for_multiple
_params_for_CNAME = _params_for_single
_params_for_PTR = _params_for_single
def _is_valid_dkim(self, value):
"""Check if value is a valid DKIM"""
validator_dict = {'h': lambda val: val in ['sha1', 'sha256'],
's': lambda val: val in ['*', 'email'],
't': lambda val: val in ['y', 's'],
'v': lambda val: val == 'DKIM1',
'k': lambda val: val == 'rsa',
'n': lambda _: True,
'g': lambda _: True}
splitted = value.split('\\;')
found_key = False
for splitted_value in splitted:
sub_split = map(lambda x: x.strip(), splitted_value.split("=", 1))
if len(sub_split) < 2:
return False
key, value = sub_split[0], sub_split[1]
if key == "p":
is_valid_key = self._is_valid_dkim_key(value)
if not is_valid_key:
return False
found_key = True
else:
is_valid_key = validator_dict.get(key, lambda _: False)(value)
if not is_valid_key:
return False
return found_key
@staticmethod
def _is_valid_dkim_key(key):
try:
base64.decodestring(key)
except binascii.Error:
return False
return True
def get_records(self, zone_name):
"""
List all records of a DNS zone
:param zone_name: Name of zone
:return: list of id's records
"""
records = self._client.get('/domain/zone/{}/record'.format(zone_name))
return [self.get_record(zone_name, record_id) for record_id in records]
def get_record(self, zone_name, record_id):
"""
Get record with given id
:param zone_name: Name of the zone
:param record_id: Id of the record
:return: Value of the record
"""
return self._client.get(
'/domain/zone/{}/record/{}'.format(zone_name, record_id))
def delete_records(self, zone_name, record_type, subdomain):
"""
Delete record from have fieldType=type and subDomain=subdomain
:param zone_name: Name of the zone
:param record_type: fieldType
:param subdomain: subDomain
"""
records = self._client.get('/domain/zone/{}/record'.format(zone_name),
fieldType=record_type, subDomain=subdomain)
for record in records:
self.delete_record(zone_name, record)
def create_record(self, zone_name, params):
"""
Create a record
:param zone_name: Name of the zone
:param params: {'fieldType': 'A', 'ttl': 60, 'subDomain': 'www',
'target': '1.2.3.4'
"""
self.log.debug('Create record: zone: %s, id %s', zone_name,
params)
return self._client.post('/domain/zone/{}/record'.format(zone_name),
**params)
|
github/octodns | octodns/provider/ovh.py | OvhProvider.create_record | python | def create_record(self, zone_name, params):
self.log.debug('Create record: zone: %s, id %s', zone_name,
params)
return self._client.post('/domain/zone/{}/record'.format(zone_name),
**params) | Create a record
:param zone_name: Name of the zone
:param params: {'fieldType': 'A', 'ttl': 60, 'subDomain': 'www',
'target': '1.2.3.4' | train | https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/provider/ovh.py#L393-L403 | null | class OvhProvider(BaseProvider):
"""
OVH provider using API v6
ovh:
class: octodns.provider.ovh.OvhProvider
# OVH api v6 endpoint
endpoint: ovh-eu
# API application key
application_key: 1234
# API application secret
application_secret: 1234
# API consumer key
consumer_key: 1234
"""
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
ZONE_NOT_FOUND_MESSAGE = 'This service does not exist'
# This variable is also used in populate method to filter which OVH record
# types are supported by octodns
SUPPORTS = set(('A', 'AAAA', 'CNAME', 'DKIM', 'MX', 'NAPTR', 'NS', 'PTR',
'SPF', 'SRV', 'SSHFP', 'TXT'))
def __init__(self, id, endpoint, application_key, application_secret,
consumer_key, *args, **kwargs):
self.log = logging.getLogger('OvhProvider[{}]'.format(id))
self.log.debug('__init__: id=%s, endpoint=%s, application_key=%s, '
'application_secret=***, consumer_key=%s', id, endpoint,
application_key, consumer_key)
super(OvhProvider, self).__init__(id, *args, **kwargs)
self._client = ovh.Client(
endpoint=endpoint,
application_key=application_key,
application_secret=application_secret,
consumer_key=consumer_key,
)
def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
target, lenient)
zone_name = zone.name[:-1]
try:
records = self.get_records(zone_name=zone_name)
exists = True
except ResourceNotFoundError as e:
if e.message != self.ZONE_NOT_FOUND_MESSAGE:
raise
exists = False
records = []
values = defaultdict(lambda: defaultdict(list))
for record in records:
values[record['subDomain']][record['fieldType']].append(record)
before = len(zone.records)
for name, types in values.items():
for _type, records in types.items():
if _type not in self.SUPPORTS:
self.log.warning('Not managed record of type %s, skip',
_type)
continue
data_for = getattr(self, '_data_for_{}'.format(_type))
record = Record.new(zone, name, data_for(_type, records),
source=self, lenient=lenient)
zone.add_record(record, lenient=lenient)
self.log.info('populate: found %s records, exists=%s',
len(zone.records) - before, exists)
return exists
def _apply(self, plan):
desired = plan.desired
changes = plan.changes
zone_name = desired.name[:-1]
self.log.info('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
for change in changes:
class_name = change.__class__.__name__
getattr(self, '_apply_{}'.format(class_name).lower())(zone_name,
change)
# We need to refresh the zone to really apply the changes
self._client.post('/domain/zone/{}/refresh'.format(zone_name))
def _apply_create(self, zone_name, change):
new = change.new
params_for = getattr(self, '_params_for_{}'.format(new._type))
for params in params_for(new):
self.create_record(zone_name, params)
def _apply_update(self, zone_name, change):
self._apply_delete(zone_name, change)
self._apply_create(zone_name, change)
def _apply_delete(self, zone_name, change):
existing = change.existing
record_type = existing._type
if record_type == "TXT":
if self._is_valid_dkim(existing.values[0]):
record_type = 'DKIM'
self.delete_records(zone_name, record_type, existing.name)
@staticmethod
def _data_for_multiple(_type, records):
return {
'ttl': records[0]['ttl'],
'type': _type,
'values': [record['target'] for record in records]
}
@staticmethod
def _data_for_single(_type, records):
record = records[0]
return {
'ttl': record['ttl'],
'type': _type,
'value': record['target']
}
@staticmethod
def _data_for_MX(_type, records):
values = []
for record in records:
preference, exchange = record['target'].split(' ', 1)
values.append({
'preference': preference,
'exchange': exchange,
})
return {
'ttl': records[0]['ttl'],
'type': _type,
'values': values,
}
@staticmethod
def _data_for_NAPTR(_type, records):
values = []
for record in records:
order, preference, flags, service, regexp, replacement = record[
'target'].split(' ', 5)
values.append({
'flags': flags[1:-1],
'order': order,
'preference': preference,
'regexp': regexp[1:-1],
'replacement': replacement,
'service': service[1:-1],
})
return {
'type': _type,
'ttl': records[0]['ttl'],
'values': values
}
@staticmethod
def _data_for_SRV(_type, records):
values = []
for record in records:
priority, weight, port, target = record['target'].split(' ', 3)
values.append({
'port': port,
'priority': priority,
'target': '{}.'.format(target),
'weight': weight
})
return {
'type': _type,
'ttl': records[0]['ttl'],
'values': values
}
@staticmethod
def _data_for_SSHFP(_type, records):
values = []
for record in records:
algorithm, fingerprint_type, fingerprint = record['target'].split(
' ', 2)
values.append({
'algorithm': algorithm,
'fingerprint': fingerprint,
'fingerprint_type': fingerprint_type
})
return {
'type': _type,
'ttl': records[0]['ttl'],
'values': values
}
@staticmethod
def _data_for_DKIM(_type, records):
return {
'ttl': records[0]['ttl'],
'type': "TXT",
'values': [record['target'].replace(';', '\\;')
for record in records]
}
_data_for_A = _data_for_multiple
_data_for_AAAA = _data_for_multiple
_data_for_NS = _data_for_multiple
_data_for_TXT = _data_for_multiple
_data_for_SPF = _data_for_multiple
_data_for_PTR = _data_for_single
_data_for_CNAME = _data_for_single
@staticmethod
def _params_for_multiple(record):
for value in record.values:
yield {
'target': value,
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type,
}
@staticmethod
def _params_for_single(record):
yield {
'target': record.value,
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
@staticmethod
def _params_for_MX(record):
for value in record.values:
yield {
'target': '%d %s' % (value.preference, value.exchange),
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
@staticmethod
def _params_for_NAPTR(record):
for value in record.values:
content = '{} {} "{}" "{}" "{}" {}' \
.format(value.order, value.preference, value.flags,
value.service, value.regexp, value.replacement)
yield {
'target': content,
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
@staticmethod
def _params_for_SRV(record):
for value in record.values:
yield {
'target': '{} {} {} {}'.format(value.priority,
value.weight,
value.port,
value.target),
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
@staticmethod
def _params_for_SSHFP(record):
for value in record.values:
yield {
'target': '{} {} {}'.format(value.algorithm,
value.fingerprint_type,
value.fingerprint),
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': record._type
}
def _params_for_TXT(self, record):
for value in record.values:
field_type = 'TXT'
if self._is_valid_dkim(value):
field_type = 'DKIM'
value = value.replace("\\;", ";")
yield {
'target': value,
'subDomain': record.name,
'ttl': record.ttl,
'fieldType': field_type
}
_params_for_A = _params_for_multiple
_params_for_AAAA = _params_for_multiple
_params_for_NS = _params_for_multiple
_params_for_SPF = _params_for_multiple
_params_for_CNAME = _params_for_single
_params_for_PTR = _params_for_single
def _is_valid_dkim(self, value):
"""Check if value is a valid DKIM"""
validator_dict = {'h': lambda val: val in ['sha1', 'sha256'],
's': lambda val: val in ['*', 'email'],
't': lambda val: val in ['y', 's'],
'v': lambda val: val == 'DKIM1',
'k': lambda val: val == 'rsa',
'n': lambda _: True,
'g': lambda _: True}
splitted = value.split('\\;')
found_key = False
for splitted_value in splitted:
sub_split = map(lambda x: x.strip(), splitted_value.split("=", 1))
if len(sub_split) < 2:
return False
key, value = sub_split[0], sub_split[1]
if key == "p":
is_valid_key = self._is_valid_dkim_key(value)
if not is_valid_key:
return False
found_key = True
else:
is_valid_key = validator_dict.get(key, lambda _: False)(value)
if not is_valid_key:
return False
return found_key
@staticmethod
def _is_valid_dkim_key(key):
try:
base64.decodestring(key)
except binascii.Error:
return False
return True
def get_records(self, zone_name):
"""
List all records of a DNS zone
:param zone_name: Name of zone
:return: list of id's records
"""
records = self._client.get('/domain/zone/{}/record'.format(zone_name))
return [self.get_record(zone_name, record_id) for record_id in records]
def get_record(self, zone_name, record_id):
"""
Get record with given id
:param zone_name: Name of the zone
:param record_id: Id of the record
:return: Value of the record
"""
return self._client.get(
'/domain/zone/{}/record/{}'.format(zone_name, record_id))
def delete_records(self, zone_name, record_type, subdomain):
"""
Delete record from have fieldType=type and subDomain=subdomain
:param zone_name: Name of the zone
:param record_type: fieldType
:param subdomain: subDomain
"""
records = self._client.get('/domain/zone/{}/record'.format(zone_name),
fieldType=record_type, subDomain=subdomain)
for record in records:
self.delete_record(zone_name, record)
def delete_record(self, zone_name, record_id):
"""
Delete record with a given id
:param zone_name: Name of the zone
:param record_id: Id of the record
"""
self.log.debug('Delete record: zone: %s, id %s', zone_name,
record_id)
self._client.delete(
'/domain/zone/{}/record/{}'.format(zone_name, record_id))
|
github/octodns | octodns/provider/googlecloud.py | GoogleCloudProvider._apply | python | def _apply(self, plan):
desired = plan.desired
changes = plan.changes
self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
# Get gcloud zone, or create one if none existed before.
if desired.name not in self.gcloud_zones:
gcloud_zone = self._create_gcloud_zone(desired.name)
else:
gcloud_zone = self.gcloud_zones.get(desired.name)
gcloud_changes = gcloud_zone.changes()
for change in changes:
class_name = change.__class__.__name__
_rrset_func = getattr(
self, '_rrset_for_{}'.format(change.record._type))
if class_name == 'Create':
gcloud_changes.add_record_set(
_rrset_func(gcloud_zone, change.record))
elif class_name == 'Delete':
gcloud_changes.delete_record_set(
_rrset_func(gcloud_zone, change.record))
elif class_name == 'Update':
gcloud_changes.delete_record_set(
_rrset_func(gcloud_zone, change.existing))
gcloud_changes.add_record_set(
_rrset_func(gcloud_zone, change.new))
else:
raise RuntimeError('Change type "{}" for change "{!s}" '
'is none of "Create", "Delete" or "Update'
.format(class_name, change))
gcloud_changes.create()
for i in range(120):
gcloud_changes.reload()
# https://cloud.google.com/dns/api/v1/changes#resource
# status can be one of either "pending" or "done"
if gcloud_changes.status != 'pending':
break
self.log.debug("Waiting for changes to complete")
time.sleep(self.CHANGE_LOOP_WAIT)
if gcloud_changes.status != 'done':
raise RuntimeError("Timeout reached after {} seconds".format(
i * self.CHANGE_LOOP_WAIT)) | Required function of manager.py to actually apply a record change.
:param plan: Contains the zones and changes to be made
:type plan: octodns.provider.base.Plan
:type return: void | train | https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/provider/googlecloud.py#L64-L120 | null | class GoogleCloudProvider(BaseProvider):
"""
Google Cloud DNS provider
google_cloud:
class: octodns.provider.googlecloud.GoogleCloudProvider
# Credentials file for a service_account or other account can be
# specified with the GOOGLE_APPLICATION_CREDENTIALS environment
# variable. (https://console.cloud.google.com/apis/credentials)
#
# The project to work on (not required)
# project: foobar
#
# The File with the google credentials (not required). If used, the
# "project" parameter needs to be set, else it will fall back to the
# "default credentials"
# credentials_file: ~/google_cloud_credentials_file.json
#
"""
SUPPORTS = set(('A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NAPTR',
'NS', 'PTR', 'SPF', 'SRV', 'TXT'))
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
CHANGE_LOOP_WAIT = 5
def __init__(self, id, project=None, credentials_file=None,
*args, **kwargs):
if credentials_file:
self.gcloud_client = dns.Client.from_service_account_json(
credentials_file, project=project)
else:
self.gcloud_client = dns.Client(project=project)
# Logger
self.log = getLogger('GoogleCloudProvider[{}]'.format(id))
self.id = id
self._gcloud_zones = {}
super(GoogleCloudProvider, self).__init__(id, *args, **kwargs)
def _create_gcloud_zone(self, dns_name):
"""Creates a google cloud ManagedZone with dns_name, and zone named
derived from it. calls .create() method and returns it.
:param dns_name: fqdn of zone to create
:type dns_name: str
:type return: new google.cloud.dns.ManagedZone
"""
# Zone name must begin with a letter, end with a letter or digit,
# and only contain lowercase letters, digits or dashes,
# and be 63 characters or less
zone_name = 'zone-{}-{}'.format(
dns_name.replace('.', '-'), uuid4().hex)[:63]
gcloud_zone = self.gcloud_client.zone(
name=zone_name,
dns_name=dns_name
)
gcloud_zone.create(client=self.gcloud_client)
# add this new zone to the list of zones.
self._gcloud_zones[gcloud_zone.dns_name] = gcloud_zone
self.log.info("Created zone {}. Fqdn {}.".format(zone_name, dns_name))
return gcloud_zone
def _get_gcloud_records(self, gcloud_zone, page_token=None):
""" Generator function which yields ResourceRecordSet for the managed
gcloud zone, until there are no more records to pull.
:param gcloud_zone: zone to pull records from
:type gcloud_zone: google.cloud.dns.ManagedZone
:param page_token: page token for the page to get
:return: a resource record set
:type return: google.cloud.dns.ResourceRecordSet
"""
gcloud_iterator = gcloud_zone.list_resource_record_sets(
page_token=page_token)
for gcloud_record in gcloud_iterator:
yield gcloud_record
# This is to get results which may be on a "paged" page.
# (if more than max_results) entries.
if gcloud_iterator.next_page_token:
for gcloud_record in self._get_gcloud_records(
gcloud_zone, gcloud_iterator.next_page_token):
# yield from is in python 3 only.
yield gcloud_record
def _get_cloud_zones(self, page_token=None):
"""Load all ManagedZones into the self._gcloud_zones dict which is
mapped with the dns_name as key.
:return: void
"""
gcloud_zones = self.gcloud_client.list_zones(page_token=page_token)
for gcloud_zone in gcloud_zones:
self._gcloud_zones[gcloud_zone.dns_name] = gcloud_zone
if gcloud_zones.next_page_token:
self._get_cloud_zones(gcloud_zones.next_page_token)
@property
def gcloud_zones(self):
if not self._gcloud_zones:
self._get_cloud_zones()
return self._gcloud_zones
def populate(self, zone, target=False, lenient=False):
"""Required function of manager.py to collect records from zone.
:param zone: A dns zone
:type zone: octodns.zone.Zone
:param target: Unused.
:type target: bool
:param lenient: Unused. Check octodns.manager for usage.
:type lenient: bool
:type return: void
"""
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
target, lenient)
exists = False
before = len(zone.records)
gcloud_zone = self.gcloud_zones.get(zone.name)
if gcloud_zone:
exists = True
for gcloud_record in self._get_gcloud_records(gcloud_zone):
if gcloud_record.record_type.upper() not in self.SUPPORTS:
continue
record_name = gcloud_record.name
if record_name.endswith(zone.name):
# google cloud always return fqdn. Make relative record
# here. "root" records will then get the '' record_name,
# which is also the way octodns likes it.
record_name = record_name[:-(len(zone.name) + 1)]
typ = gcloud_record.record_type.upper()
data = getattr(self, '_data_for_{}'.format(typ))
data = data(gcloud_record)
data['type'] = typ
data['ttl'] = gcloud_record.ttl
self.log.debug('populate: adding record {} records: {!s}'
.format(record_name, data))
record = Record.new(zone, record_name, data, source=self)
zone.add_record(record, lenient=lenient)
self.log.info('populate: found %s records, exists=%s',
len(zone.records) - before, exists)
return exists
def _data_for_A(self, gcloud_record):
return {
'values': gcloud_record.rrdatas
}
_data_for_AAAA = _data_for_A
def _data_for_CAA(self, gcloud_record):
return {
'values': [{
'flags': v[0],
'tag': v[1],
'value': v[2]}
for v in [shlex.split(g) for g in gcloud_record.rrdatas]]}
def _data_for_CNAME(self, gcloud_record):
return {
'value': gcloud_record.rrdatas[0]
}
def _data_for_MX(self, gcloud_record):
return {'values': [{
"preference": v[0],
"exchange": v[1]}
for v in [shlex.split(g) for g in gcloud_record.rrdatas]]}
def _data_for_NAPTR(self, gcloud_record):
return {'values': [{
'order': v[0],
'preference': v[1],
'flags': v[2],
'service': v[3],
'regexp': v[4],
'replacement': v[5]}
for v in [shlex.split(g) for g in gcloud_record.rrdatas]]}
_data_for_NS = _data_for_A
_data_for_PTR = _data_for_CNAME
_fix_semicolons = re.compile(r'(?<!\\);')
def _data_for_SPF(self, gcloud_record):
if len(gcloud_record.rrdatas) > 1:
return {
'values': [self._fix_semicolons.sub('\\;', rr)
for rr in gcloud_record.rrdatas]}
return {
'value': self._fix_semicolons.sub('\\;', gcloud_record.rrdatas[0])}
def _data_for_SRV(self, gcloud_record):
return {'values': [{
'priority': v[0],
'weight': v[1],
'port': v[2],
'target': v[3]}
for v in [shlex.split(g) for g in gcloud_record.rrdatas]]}
_data_for_TXT = _data_for_SPF
def _rrset_for_A(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, record.values)
_rrset_for_AAAA = _rrset_for_A
def _rrset_for_CAA(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [
'{} {} {}'.format(v.flags, v.tag, v.value)
for v in record.values])
def _rrset_for_CNAME(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [record.value])
def _rrset_for_MX(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [
'{} {}'.format(v.preference, v.exchange)
for v in record.values])
def _rrset_for_NAPTR(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [
'{} {} "{}" "{}" "{}" {}'.format(
v.order, v.preference, v.flags, v.service,
v.regexp, v.replacement) for v in record.values])
_rrset_for_NS = _rrset_for_A
_rrset_for_PTR = _rrset_for_CNAME
def _rrset_for_SPF(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, record.chunked_values)
def _rrset_for_SRV(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [
'{} {} {} {}'
.format(v.priority, v.weight, v.port, v.target)
for v in record.values])
_rrset_for_TXT = _rrset_for_SPF
|
github/octodns | octodns/provider/googlecloud.py | GoogleCloudProvider._create_gcloud_zone | python | def _create_gcloud_zone(self, dns_name):
# Zone name must begin with a letter, end with a letter or digit,
# and only contain lowercase letters, digits or dashes,
# and be 63 characters or less
zone_name = 'zone-{}-{}'.format(
dns_name.replace('.', '-'), uuid4().hex)[:63]
gcloud_zone = self.gcloud_client.zone(
name=zone_name,
dns_name=dns_name
)
gcloud_zone.create(client=self.gcloud_client)
# add this new zone to the list of zones.
self._gcloud_zones[gcloud_zone.dns_name] = gcloud_zone
self.log.info("Created zone {}. Fqdn {}.".format(zone_name, dns_name))
return gcloud_zone | Creates a google cloud ManagedZone with dns_name, and zone named
derived from it. calls .create() method and returns it.
:param dns_name: fqdn of zone to create
:type dns_name: str
:type return: new google.cloud.dns.ManagedZone | train | https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/provider/googlecloud.py#L122-L148 | null | class GoogleCloudProvider(BaseProvider):
"""
Google Cloud DNS provider
google_cloud:
class: octodns.provider.googlecloud.GoogleCloudProvider
# Credentials file for a service_account or other account can be
# specified with the GOOGLE_APPLICATION_CREDENTIALS environment
# variable. (https://console.cloud.google.com/apis/credentials)
#
# The project to work on (not required)
# project: foobar
#
# The File with the google credentials (not required). If used, the
# "project" parameter needs to be set, else it will fall back to the
# "default credentials"
# credentials_file: ~/google_cloud_credentials_file.json
#
"""
SUPPORTS = set(('A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NAPTR',
'NS', 'PTR', 'SPF', 'SRV', 'TXT'))
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
CHANGE_LOOP_WAIT = 5
def __init__(self, id, project=None, credentials_file=None,
*args, **kwargs):
if credentials_file:
self.gcloud_client = dns.Client.from_service_account_json(
credentials_file, project=project)
else:
self.gcloud_client = dns.Client(project=project)
# Logger
self.log = getLogger('GoogleCloudProvider[{}]'.format(id))
self.id = id
self._gcloud_zones = {}
super(GoogleCloudProvider, self).__init__(id, *args, **kwargs)
def _apply(self, plan):
"""Required function of manager.py to actually apply a record change.
:param plan: Contains the zones and changes to be made
:type plan: octodns.provider.base.Plan
:type return: void
"""
desired = plan.desired
changes = plan.changes
self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
# Get gcloud zone, or create one if none existed before.
if desired.name not in self.gcloud_zones:
gcloud_zone = self._create_gcloud_zone(desired.name)
else:
gcloud_zone = self.gcloud_zones.get(desired.name)
gcloud_changes = gcloud_zone.changes()
for change in changes:
class_name = change.__class__.__name__
_rrset_func = getattr(
self, '_rrset_for_{}'.format(change.record._type))
if class_name == 'Create':
gcloud_changes.add_record_set(
_rrset_func(gcloud_zone, change.record))
elif class_name == 'Delete':
gcloud_changes.delete_record_set(
_rrset_func(gcloud_zone, change.record))
elif class_name == 'Update':
gcloud_changes.delete_record_set(
_rrset_func(gcloud_zone, change.existing))
gcloud_changes.add_record_set(
_rrset_func(gcloud_zone, change.new))
else:
raise RuntimeError('Change type "{}" for change "{!s}" '
'is none of "Create", "Delete" or "Update'
.format(class_name, change))
gcloud_changes.create()
for i in range(120):
gcloud_changes.reload()
# https://cloud.google.com/dns/api/v1/changes#resource
# status can be one of either "pending" or "done"
if gcloud_changes.status != 'pending':
break
self.log.debug("Waiting for changes to complete")
time.sleep(self.CHANGE_LOOP_WAIT)
if gcloud_changes.status != 'done':
raise RuntimeError("Timeout reached after {} seconds".format(
i * self.CHANGE_LOOP_WAIT))
def _get_gcloud_records(self, gcloud_zone, page_token=None):
""" Generator function which yields ResourceRecordSet for the managed
gcloud zone, until there are no more records to pull.
:param gcloud_zone: zone to pull records from
:type gcloud_zone: google.cloud.dns.ManagedZone
:param page_token: page token for the page to get
:return: a resource record set
:type return: google.cloud.dns.ResourceRecordSet
"""
gcloud_iterator = gcloud_zone.list_resource_record_sets(
page_token=page_token)
for gcloud_record in gcloud_iterator:
yield gcloud_record
# This is to get results which may be on a "paged" page.
# (if more than max_results) entries.
if gcloud_iterator.next_page_token:
for gcloud_record in self._get_gcloud_records(
gcloud_zone, gcloud_iterator.next_page_token):
# yield from is in python 3 only.
yield gcloud_record
def _get_cloud_zones(self, page_token=None):
"""Load all ManagedZones into the self._gcloud_zones dict which is
mapped with the dns_name as key.
:return: void
"""
gcloud_zones = self.gcloud_client.list_zones(page_token=page_token)
for gcloud_zone in gcloud_zones:
self._gcloud_zones[gcloud_zone.dns_name] = gcloud_zone
if gcloud_zones.next_page_token:
self._get_cloud_zones(gcloud_zones.next_page_token)
@property
def gcloud_zones(self):
if not self._gcloud_zones:
self._get_cloud_zones()
return self._gcloud_zones
def populate(self, zone, target=False, lenient=False):
"""Required function of manager.py to collect records from zone.
:param zone: A dns zone
:type zone: octodns.zone.Zone
:param target: Unused.
:type target: bool
:param lenient: Unused. Check octodns.manager for usage.
:type lenient: bool
:type return: void
"""
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
target, lenient)
exists = False
before = len(zone.records)
gcloud_zone = self.gcloud_zones.get(zone.name)
if gcloud_zone:
exists = True
for gcloud_record in self._get_gcloud_records(gcloud_zone):
if gcloud_record.record_type.upper() not in self.SUPPORTS:
continue
record_name = gcloud_record.name
if record_name.endswith(zone.name):
# google cloud always return fqdn. Make relative record
# here. "root" records will then get the '' record_name,
# which is also the way octodns likes it.
record_name = record_name[:-(len(zone.name) + 1)]
typ = gcloud_record.record_type.upper()
data = getattr(self, '_data_for_{}'.format(typ))
data = data(gcloud_record)
data['type'] = typ
data['ttl'] = gcloud_record.ttl
self.log.debug('populate: adding record {} records: {!s}'
.format(record_name, data))
record = Record.new(zone, record_name, data, source=self)
zone.add_record(record, lenient=lenient)
self.log.info('populate: found %s records, exists=%s',
len(zone.records) - before, exists)
return exists
def _data_for_A(self, gcloud_record):
return {
'values': gcloud_record.rrdatas
}
_data_for_AAAA = _data_for_A
def _data_for_CAA(self, gcloud_record):
return {
'values': [{
'flags': v[0],
'tag': v[1],
'value': v[2]}
for v in [shlex.split(g) for g in gcloud_record.rrdatas]]}
def _data_for_CNAME(self, gcloud_record):
return {
'value': gcloud_record.rrdatas[0]
}
def _data_for_MX(self, gcloud_record):
return {'values': [{
"preference": v[0],
"exchange": v[1]}
for v in [shlex.split(g) for g in gcloud_record.rrdatas]]}
def _data_for_NAPTR(self, gcloud_record):
return {'values': [{
'order': v[0],
'preference': v[1],
'flags': v[2],
'service': v[3],
'regexp': v[4],
'replacement': v[5]}
for v in [shlex.split(g) for g in gcloud_record.rrdatas]]}
_data_for_NS = _data_for_A
_data_for_PTR = _data_for_CNAME
_fix_semicolons = re.compile(r'(?<!\\);')
def _data_for_SPF(self, gcloud_record):
if len(gcloud_record.rrdatas) > 1:
return {
'values': [self._fix_semicolons.sub('\\;', rr)
for rr in gcloud_record.rrdatas]}
return {
'value': self._fix_semicolons.sub('\\;', gcloud_record.rrdatas[0])}
def _data_for_SRV(self, gcloud_record):
return {'values': [{
'priority': v[0],
'weight': v[1],
'port': v[2],
'target': v[3]}
for v in [shlex.split(g) for g in gcloud_record.rrdatas]]}
_data_for_TXT = _data_for_SPF
def _rrset_for_A(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, record.values)
_rrset_for_AAAA = _rrset_for_A
def _rrset_for_CAA(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [
'{} {} {}'.format(v.flags, v.tag, v.value)
for v in record.values])
def _rrset_for_CNAME(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [record.value])
def _rrset_for_MX(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [
'{} {}'.format(v.preference, v.exchange)
for v in record.values])
def _rrset_for_NAPTR(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [
'{} {} "{}" "{}" "{}" {}'.format(
v.order, v.preference, v.flags, v.service,
v.regexp, v.replacement) for v in record.values])
_rrset_for_NS = _rrset_for_A
_rrset_for_PTR = _rrset_for_CNAME
def _rrset_for_SPF(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, record.chunked_values)
def _rrset_for_SRV(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [
'{} {} {} {}'
.format(v.priority, v.weight, v.port, v.target)
for v in record.values])
_rrset_for_TXT = _rrset_for_SPF
|
github/octodns | octodns/provider/googlecloud.py | GoogleCloudProvider._get_gcloud_records | python | def _get_gcloud_records(self, gcloud_zone, page_token=None):
gcloud_iterator = gcloud_zone.list_resource_record_sets(
page_token=page_token)
for gcloud_record in gcloud_iterator:
yield gcloud_record
# This is to get results which may be on a "paged" page.
# (if more than max_results) entries.
if gcloud_iterator.next_page_token:
for gcloud_record in self._get_gcloud_records(
gcloud_zone, gcloud_iterator.next_page_token):
# yield from is in python 3 only.
yield gcloud_record | Generator function which yields ResourceRecordSet for the managed
gcloud zone, until there are no more records to pull.
:param gcloud_zone: zone to pull records from
:type gcloud_zone: google.cloud.dns.ManagedZone
:param page_token: page token for the page to get
:return: a resource record set
:type return: google.cloud.dns.ResourceRecordSet | train | https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/provider/googlecloud.py#L150-L171 | null | class GoogleCloudProvider(BaseProvider):
"""
Google Cloud DNS provider
google_cloud:
class: octodns.provider.googlecloud.GoogleCloudProvider
# Credentials file for a service_account or other account can be
# specified with the GOOGLE_APPLICATION_CREDENTIALS environment
# variable. (https://console.cloud.google.com/apis/credentials)
#
# The project to work on (not required)
# project: foobar
#
# The File with the google credentials (not required). If used, the
# "project" parameter needs to be set, else it will fall back to the
# "default credentials"
# credentials_file: ~/google_cloud_credentials_file.json
#
"""
SUPPORTS = set(('A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NAPTR',
'NS', 'PTR', 'SPF', 'SRV', 'TXT'))
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
CHANGE_LOOP_WAIT = 5
def __init__(self, id, project=None, credentials_file=None,
*args, **kwargs):
if credentials_file:
self.gcloud_client = dns.Client.from_service_account_json(
credentials_file, project=project)
else:
self.gcloud_client = dns.Client(project=project)
# Logger
self.log = getLogger('GoogleCloudProvider[{}]'.format(id))
self.id = id
self._gcloud_zones = {}
super(GoogleCloudProvider, self).__init__(id, *args, **kwargs)
def _apply(self, plan):
"""Required function of manager.py to actually apply a record change.
:param plan: Contains the zones and changes to be made
:type plan: octodns.provider.base.Plan
:type return: void
"""
desired = plan.desired
changes = plan.changes
self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
# Get gcloud zone, or create one if none existed before.
if desired.name not in self.gcloud_zones:
gcloud_zone = self._create_gcloud_zone(desired.name)
else:
gcloud_zone = self.gcloud_zones.get(desired.name)
gcloud_changes = gcloud_zone.changes()
for change in changes:
class_name = change.__class__.__name__
_rrset_func = getattr(
self, '_rrset_for_{}'.format(change.record._type))
if class_name == 'Create':
gcloud_changes.add_record_set(
_rrset_func(gcloud_zone, change.record))
elif class_name == 'Delete':
gcloud_changes.delete_record_set(
_rrset_func(gcloud_zone, change.record))
elif class_name == 'Update':
gcloud_changes.delete_record_set(
_rrset_func(gcloud_zone, change.existing))
gcloud_changes.add_record_set(
_rrset_func(gcloud_zone, change.new))
else:
raise RuntimeError('Change type "{}" for change "{!s}" '
'is none of "Create", "Delete" or "Update'
.format(class_name, change))
gcloud_changes.create()
for i in range(120):
gcloud_changes.reload()
# https://cloud.google.com/dns/api/v1/changes#resource
# status can be one of either "pending" or "done"
if gcloud_changes.status != 'pending':
break
self.log.debug("Waiting for changes to complete")
time.sleep(self.CHANGE_LOOP_WAIT)
if gcloud_changes.status != 'done':
raise RuntimeError("Timeout reached after {} seconds".format(
i * self.CHANGE_LOOP_WAIT))
def _create_gcloud_zone(self, dns_name):
"""Creates a google cloud ManagedZone with dns_name, and zone named
derived from it. calls .create() method and returns it.
:param dns_name: fqdn of zone to create
:type dns_name: str
:type return: new google.cloud.dns.ManagedZone
"""
# Zone name must begin with a letter, end with a letter or digit,
# and only contain lowercase letters, digits or dashes,
# and be 63 characters or less
zone_name = 'zone-{}-{}'.format(
dns_name.replace('.', '-'), uuid4().hex)[:63]
gcloud_zone = self.gcloud_client.zone(
name=zone_name,
dns_name=dns_name
)
gcloud_zone.create(client=self.gcloud_client)
# add this new zone to the list of zones.
self._gcloud_zones[gcloud_zone.dns_name] = gcloud_zone
self.log.info("Created zone {}. Fqdn {}.".format(zone_name, dns_name))
return gcloud_zone
def _get_cloud_zones(self, page_token=None):
"""Load all ManagedZones into the self._gcloud_zones dict which is
mapped with the dns_name as key.
:return: void
"""
gcloud_zones = self.gcloud_client.list_zones(page_token=page_token)
for gcloud_zone in gcloud_zones:
self._gcloud_zones[gcloud_zone.dns_name] = gcloud_zone
if gcloud_zones.next_page_token:
self._get_cloud_zones(gcloud_zones.next_page_token)
@property
def gcloud_zones(self):
if not self._gcloud_zones:
self._get_cloud_zones()
return self._gcloud_zones
def populate(self, zone, target=False, lenient=False):
"""Required function of manager.py to collect records from zone.
:param zone: A dns zone
:type zone: octodns.zone.Zone
:param target: Unused.
:type target: bool
:param lenient: Unused. Check octodns.manager for usage.
:type lenient: bool
:type return: void
"""
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
target, lenient)
exists = False
before = len(zone.records)
gcloud_zone = self.gcloud_zones.get(zone.name)
if gcloud_zone:
exists = True
for gcloud_record in self._get_gcloud_records(gcloud_zone):
if gcloud_record.record_type.upper() not in self.SUPPORTS:
continue
record_name = gcloud_record.name
if record_name.endswith(zone.name):
# google cloud always return fqdn. Make relative record
# here. "root" records will then get the '' record_name,
# which is also the way octodns likes it.
record_name = record_name[:-(len(zone.name) + 1)]
typ = gcloud_record.record_type.upper()
data = getattr(self, '_data_for_{}'.format(typ))
data = data(gcloud_record)
data['type'] = typ
data['ttl'] = gcloud_record.ttl
self.log.debug('populate: adding record {} records: {!s}'
.format(record_name, data))
record = Record.new(zone, record_name, data, source=self)
zone.add_record(record, lenient=lenient)
self.log.info('populate: found %s records, exists=%s',
len(zone.records) - before, exists)
return exists
def _data_for_A(self, gcloud_record):
return {
'values': gcloud_record.rrdatas
}
_data_for_AAAA = _data_for_A
def _data_for_CAA(self, gcloud_record):
return {
'values': [{
'flags': v[0],
'tag': v[1],
'value': v[2]}
for v in [shlex.split(g) for g in gcloud_record.rrdatas]]}
def _data_for_CNAME(self, gcloud_record):
return {
'value': gcloud_record.rrdatas[0]
}
def _data_for_MX(self, gcloud_record):
return {'values': [{
"preference": v[0],
"exchange": v[1]}
for v in [shlex.split(g) for g in gcloud_record.rrdatas]]}
def _data_for_NAPTR(self, gcloud_record):
return {'values': [{
'order': v[0],
'preference': v[1],
'flags': v[2],
'service': v[3],
'regexp': v[4],
'replacement': v[5]}
for v in [shlex.split(g) for g in gcloud_record.rrdatas]]}
_data_for_NS = _data_for_A
_data_for_PTR = _data_for_CNAME
_fix_semicolons = re.compile(r'(?<!\\);')
def _data_for_SPF(self, gcloud_record):
if len(gcloud_record.rrdatas) > 1:
return {
'values': [self._fix_semicolons.sub('\\;', rr)
for rr in gcloud_record.rrdatas]}
return {
'value': self._fix_semicolons.sub('\\;', gcloud_record.rrdatas[0])}
def _data_for_SRV(self, gcloud_record):
return {'values': [{
'priority': v[0],
'weight': v[1],
'port': v[2],
'target': v[3]}
for v in [shlex.split(g) for g in gcloud_record.rrdatas]]}
_data_for_TXT = _data_for_SPF
def _rrset_for_A(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, record.values)
_rrset_for_AAAA = _rrset_for_A
def _rrset_for_CAA(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [
'{} {} {}'.format(v.flags, v.tag, v.value)
for v in record.values])
def _rrset_for_CNAME(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [record.value])
def _rrset_for_MX(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [
'{} {}'.format(v.preference, v.exchange)
for v in record.values])
def _rrset_for_NAPTR(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [
'{} {} "{}" "{}" "{}" {}'.format(
v.order, v.preference, v.flags, v.service,
v.regexp, v.replacement) for v in record.values])
_rrset_for_NS = _rrset_for_A
_rrset_for_PTR = _rrset_for_CNAME
def _rrset_for_SPF(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, record.chunked_values)
def _rrset_for_SRV(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [
'{} {} {} {}'
.format(v.priority, v.weight, v.port, v.target)
for v in record.values])
_rrset_for_TXT = _rrset_for_SPF
|
github/octodns | octodns/provider/googlecloud.py | GoogleCloudProvider._get_cloud_zones | python | def _get_cloud_zones(self, page_token=None):
gcloud_zones = self.gcloud_client.list_zones(page_token=page_token)
for gcloud_zone in gcloud_zones:
self._gcloud_zones[gcloud_zone.dns_name] = gcloud_zone
if gcloud_zones.next_page_token:
self._get_cloud_zones(gcloud_zones.next_page_token) | Load all ManagedZones into the self._gcloud_zones dict which is
mapped with the dns_name as key.
:return: void | train | https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/provider/googlecloud.py#L173-L185 | [
"def _get_cloud_zones(self, page_token=None):\n \"\"\"Load all ManagedZones into the self._gcloud_zones dict which is\n mapped with the dns_name as key.\n\n :return: void\n \"\"\"\n\n gcloud_zones = self.gcloud_client.list_zones(page_token=page_token)\n for gcloud_zone in gcloud_zones:\n se... | class GoogleCloudProvider(BaseProvider):
"""
Google Cloud DNS provider
google_cloud:
class: octodns.provider.googlecloud.GoogleCloudProvider
# Credentials file for a service_account or other account can be
# specified with the GOOGLE_APPLICATION_CREDENTIALS environment
# variable. (https://console.cloud.google.com/apis/credentials)
#
# The project to work on (not required)
# project: foobar
#
# The File with the google credentials (not required). If used, the
# "project" parameter needs to be set, else it will fall back to the
# "default credentials"
# credentials_file: ~/google_cloud_credentials_file.json
#
"""
SUPPORTS = set(('A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NAPTR',
'NS', 'PTR', 'SPF', 'SRV', 'TXT'))
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
CHANGE_LOOP_WAIT = 5
def __init__(self, id, project=None, credentials_file=None,
*args, **kwargs):
if credentials_file:
self.gcloud_client = dns.Client.from_service_account_json(
credentials_file, project=project)
else:
self.gcloud_client = dns.Client(project=project)
# Logger
self.log = getLogger('GoogleCloudProvider[{}]'.format(id))
self.id = id
self._gcloud_zones = {}
super(GoogleCloudProvider, self).__init__(id, *args, **kwargs)
def _apply(self, plan):
"""Required function of manager.py to actually apply a record change.
:param plan: Contains the zones and changes to be made
:type plan: octodns.provider.base.Plan
:type return: void
"""
desired = plan.desired
changes = plan.changes
self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
# Get gcloud zone, or create one if none existed before.
if desired.name not in self.gcloud_zones:
gcloud_zone = self._create_gcloud_zone(desired.name)
else:
gcloud_zone = self.gcloud_zones.get(desired.name)
gcloud_changes = gcloud_zone.changes()
for change in changes:
class_name = change.__class__.__name__
_rrset_func = getattr(
self, '_rrset_for_{}'.format(change.record._type))
if class_name == 'Create':
gcloud_changes.add_record_set(
_rrset_func(gcloud_zone, change.record))
elif class_name == 'Delete':
gcloud_changes.delete_record_set(
_rrset_func(gcloud_zone, change.record))
elif class_name == 'Update':
gcloud_changes.delete_record_set(
_rrset_func(gcloud_zone, change.existing))
gcloud_changes.add_record_set(
_rrset_func(gcloud_zone, change.new))
else:
raise RuntimeError('Change type "{}" for change "{!s}" '
'is none of "Create", "Delete" or "Update'
.format(class_name, change))
gcloud_changes.create()
for i in range(120):
gcloud_changes.reload()
# https://cloud.google.com/dns/api/v1/changes#resource
# status can be one of either "pending" or "done"
if gcloud_changes.status != 'pending':
break
self.log.debug("Waiting for changes to complete")
time.sleep(self.CHANGE_LOOP_WAIT)
if gcloud_changes.status != 'done':
raise RuntimeError("Timeout reached after {} seconds".format(
i * self.CHANGE_LOOP_WAIT))
def _create_gcloud_zone(self, dns_name):
"""Creates a google cloud ManagedZone with dns_name, and zone named
derived from it. calls .create() method and returns it.
:param dns_name: fqdn of zone to create
:type dns_name: str
:type return: new google.cloud.dns.ManagedZone
"""
# Zone name must begin with a letter, end with a letter or digit,
# and only contain lowercase letters, digits or dashes,
# and be 63 characters or less
zone_name = 'zone-{}-{}'.format(
dns_name.replace('.', '-'), uuid4().hex)[:63]
gcloud_zone = self.gcloud_client.zone(
name=zone_name,
dns_name=dns_name
)
gcloud_zone.create(client=self.gcloud_client)
# add this new zone to the list of zones.
self._gcloud_zones[gcloud_zone.dns_name] = gcloud_zone
self.log.info("Created zone {}. Fqdn {}.".format(zone_name, dns_name))
return gcloud_zone
def _get_gcloud_records(self, gcloud_zone, page_token=None):
""" Generator function which yields ResourceRecordSet for the managed
gcloud zone, until there are no more records to pull.
:param gcloud_zone: zone to pull records from
:type gcloud_zone: google.cloud.dns.ManagedZone
:param page_token: page token for the page to get
:return: a resource record set
:type return: google.cloud.dns.ResourceRecordSet
"""
gcloud_iterator = gcloud_zone.list_resource_record_sets(
page_token=page_token)
for gcloud_record in gcloud_iterator:
yield gcloud_record
# This is to get results which may be on a "paged" page.
# (if more than max_results) entries.
if gcloud_iterator.next_page_token:
for gcloud_record in self._get_gcloud_records(
gcloud_zone, gcloud_iterator.next_page_token):
# yield from is in python 3 only.
yield gcloud_record
@property
def gcloud_zones(self):
if not self._gcloud_zones:
self._get_cloud_zones()
return self._gcloud_zones
def populate(self, zone, target=False, lenient=False):
"""Required function of manager.py to collect records from zone.
:param zone: A dns zone
:type zone: octodns.zone.Zone
:param target: Unused.
:type target: bool
:param lenient: Unused. Check octodns.manager for usage.
:type lenient: bool
:type return: void
"""
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
target, lenient)
exists = False
before = len(zone.records)
gcloud_zone = self.gcloud_zones.get(zone.name)
if gcloud_zone:
exists = True
for gcloud_record in self._get_gcloud_records(gcloud_zone):
if gcloud_record.record_type.upper() not in self.SUPPORTS:
continue
record_name = gcloud_record.name
if record_name.endswith(zone.name):
# google cloud always return fqdn. Make relative record
# here. "root" records will then get the '' record_name,
# which is also the way octodns likes it.
record_name = record_name[:-(len(zone.name) + 1)]
typ = gcloud_record.record_type.upper()
data = getattr(self, '_data_for_{}'.format(typ))
data = data(gcloud_record)
data['type'] = typ
data['ttl'] = gcloud_record.ttl
self.log.debug('populate: adding record {} records: {!s}'
.format(record_name, data))
record = Record.new(zone, record_name, data, source=self)
zone.add_record(record, lenient=lenient)
self.log.info('populate: found %s records, exists=%s',
len(zone.records) - before, exists)
return exists
def _data_for_A(self, gcloud_record):
return {
'values': gcloud_record.rrdatas
}
_data_for_AAAA = _data_for_A
def _data_for_CAA(self, gcloud_record):
return {
'values': [{
'flags': v[0],
'tag': v[1],
'value': v[2]}
for v in [shlex.split(g) for g in gcloud_record.rrdatas]]}
def _data_for_CNAME(self, gcloud_record):
return {
'value': gcloud_record.rrdatas[0]
}
def _data_for_MX(self, gcloud_record):
return {'values': [{
"preference": v[0],
"exchange": v[1]}
for v in [shlex.split(g) for g in gcloud_record.rrdatas]]}
def _data_for_NAPTR(self, gcloud_record):
return {'values': [{
'order': v[0],
'preference': v[1],
'flags': v[2],
'service': v[3],
'regexp': v[4],
'replacement': v[5]}
for v in [shlex.split(g) for g in gcloud_record.rrdatas]]}
_data_for_NS = _data_for_A
_data_for_PTR = _data_for_CNAME
_fix_semicolons = re.compile(r'(?<!\\);')
def _data_for_SPF(self, gcloud_record):
if len(gcloud_record.rrdatas) > 1:
return {
'values': [self._fix_semicolons.sub('\\;', rr)
for rr in gcloud_record.rrdatas]}
return {
'value': self._fix_semicolons.sub('\\;', gcloud_record.rrdatas[0])}
def _data_for_SRV(self, gcloud_record):
return {'values': [{
'priority': v[0],
'weight': v[1],
'port': v[2],
'target': v[3]}
for v in [shlex.split(g) for g in gcloud_record.rrdatas]]}
_data_for_TXT = _data_for_SPF
def _rrset_for_A(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, record.values)
_rrset_for_AAAA = _rrset_for_A
def _rrset_for_CAA(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [
'{} {} {}'.format(v.flags, v.tag, v.value)
for v in record.values])
def _rrset_for_CNAME(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [record.value])
def _rrset_for_MX(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [
'{} {}'.format(v.preference, v.exchange)
for v in record.values])
def _rrset_for_NAPTR(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [
'{} {} "{}" "{}" "{}" {}'.format(
v.order, v.preference, v.flags, v.service,
v.regexp, v.replacement) for v in record.values])
_rrset_for_NS = _rrset_for_A
_rrset_for_PTR = _rrset_for_CNAME
def _rrset_for_SPF(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, record.chunked_values)
def _rrset_for_SRV(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [
'{} {} {} {}'
.format(v.priority, v.weight, v.port, v.target)
for v in record.values])
_rrset_for_TXT = _rrset_for_SPF
|
github/octodns | octodns/provider/googlecloud.py | GoogleCloudProvider.populate | python | def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
target, lenient)
exists = False
before = len(zone.records)
gcloud_zone = self.gcloud_zones.get(zone.name)
if gcloud_zone:
exists = True
for gcloud_record in self._get_gcloud_records(gcloud_zone):
if gcloud_record.record_type.upper() not in self.SUPPORTS:
continue
record_name = gcloud_record.name
if record_name.endswith(zone.name):
# google cloud always return fqdn. Make relative record
# here. "root" records will then get the '' record_name,
# which is also the way octodns likes it.
record_name = record_name[:-(len(zone.name) + 1)]
typ = gcloud_record.record_type.upper()
data = getattr(self, '_data_for_{}'.format(typ))
data = data(gcloud_record)
data['type'] = typ
data['ttl'] = gcloud_record.ttl
self.log.debug('populate: adding record {} records: {!s}'
.format(record_name, data))
record = Record.new(zone, record_name, data, source=self)
zone.add_record(record, lenient=lenient)
self.log.info('populate: found %s records, exists=%s',
len(zone.records) - before, exists)
return exists | Required function of manager.py to collect records from zone.
:param zone: A dns zone
:type zone: octodns.zone.Zone
:param target: Unused.
:type target: bool
:param lenient: Unused. Check octodns.manager for usage.
:type lenient: bool
:type return: void | train | https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/provider/googlecloud.py#L193-L238 | [
"def new(cls, zone, name, data, source=None, lenient=False):\n fqdn = '{}.{}'.format(name, zone.name) if name else zone.name\n try:\n _type = data['type']\n except KeyError:\n raise Exception('Invalid record {}, missing type'.format(fqdn))\n try:\n _class = {\n 'A': AReco... | class GoogleCloudProvider(BaseProvider):
"""
Google Cloud DNS provider
google_cloud:
class: octodns.provider.googlecloud.GoogleCloudProvider
# Credentials file for a service_account or other account can be
# specified with the GOOGLE_APPLICATION_CREDENTIALS environment
# variable. (https://console.cloud.google.com/apis/credentials)
#
# The project to work on (not required)
# project: foobar
#
# The File with the google credentials (not required). If used, the
# "project" parameter needs to be set, else it will fall back to the
# "default credentials"
# credentials_file: ~/google_cloud_credentials_file.json
#
"""
SUPPORTS = set(('A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NAPTR',
'NS', 'PTR', 'SPF', 'SRV', 'TXT'))
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
CHANGE_LOOP_WAIT = 5
def __init__(self, id, project=None, credentials_file=None,
*args, **kwargs):
if credentials_file:
self.gcloud_client = dns.Client.from_service_account_json(
credentials_file, project=project)
else:
self.gcloud_client = dns.Client(project=project)
# Logger
self.log = getLogger('GoogleCloudProvider[{}]'.format(id))
self.id = id
self._gcloud_zones = {}
super(GoogleCloudProvider, self).__init__(id, *args, **kwargs)
def _apply(self, plan):
"""Required function of manager.py to actually apply a record change.
:param plan: Contains the zones and changes to be made
:type plan: octodns.provider.base.Plan
:type return: void
"""
desired = plan.desired
changes = plan.changes
self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
# Get gcloud zone, or create one if none existed before.
if desired.name not in self.gcloud_zones:
gcloud_zone = self._create_gcloud_zone(desired.name)
else:
gcloud_zone = self.gcloud_zones.get(desired.name)
gcloud_changes = gcloud_zone.changes()
for change in changes:
class_name = change.__class__.__name__
_rrset_func = getattr(
self, '_rrset_for_{}'.format(change.record._type))
if class_name == 'Create':
gcloud_changes.add_record_set(
_rrset_func(gcloud_zone, change.record))
elif class_name == 'Delete':
gcloud_changes.delete_record_set(
_rrset_func(gcloud_zone, change.record))
elif class_name == 'Update':
gcloud_changes.delete_record_set(
_rrset_func(gcloud_zone, change.existing))
gcloud_changes.add_record_set(
_rrset_func(gcloud_zone, change.new))
else:
raise RuntimeError('Change type "{}" for change "{!s}" '
'is none of "Create", "Delete" or "Update'
.format(class_name, change))
gcloud_changes.create()
for i in range(120):
gcloud_changes.reload()
# https://cloud.google.com/dns/api/v1/changes#resource
# status can be one of either "pending" or "done"
if gcloud_changes.status != 'pending':
break
self.log.debug("Waiting for changes to complete")
time.sleep(self.CHANGE_LOOP_WAIT)
if gcloud_changes.status != 'done':
raise RuntimeError("Timeout reached after {} seconds".format(
i * self.CHANGE_LOOP_WAIT))
def _create_gcloud_zone(self, dns_name):
"""Creates a google cloud ManagedZone with dns_name, and zone named
derived from it. calls .create() method and returns it.
:param dns_name: fqdn of zone to create
:type dns_name: str
:type return: new google.cloud.dns.ManagedZone
"""
# Zone name must begin with a letter, end with a letter or digit,
# and only contain lowercase letters, digits or dashes,
# and be 63 characters or less
zone_name = 'zone-{}-{}'.format(
dns_name.replace('.', '-'), uuid4().hex)[:63]
gcloud_zone = self.gcloud_client.zone(
name=zone_name,
dns_name=dns_name
)
gcloud_zone.create(client=self.gcloud_client)
# add this new zone to the list of zones.
self._gcloud_zones[gcloud_zone.dns_name] = gcloud_zone
self.log.info("Created zone {}. Fqdn {}.".format(zone_name, dns_name))
return gcloud_zone
def _get_gcloud_records(self, gcloud_zone, page_token=None):
""" Generator function which yields ResourceRecordSet for the managed
gcloud zone, until there are no more records to pull.
:param gcloud_zone: zone to pull records from
:type gcloud_zone: google.cloud.dns.ManagedZone
:param page_token: page token for the page to get
:return: a resource record set
:type return: google.cloud.dns.ResourceRecordSet
"""
gcloud_iterator = gcloud_zone.list_resource_record_sets(
page_token=page_token)
for gcloud_record in gcloud_iterator:
yield gcloud_record
# This is to get results which may be on a "paged" page.
# (if more than max_results) entries.
if gcloud_iterator.next_page_token:
for gcloud_record in self._get_gcloud_records(
gcloud_zone, gcloud_iterator.next_page_token):
# yield from is in python 3 only.
yield gcloud_record
def _get_cloud_zones(self, page_token=None):
"""Load all ManagedZones into the self._gcloud_zones dict which is
mapped with the dns_name as key.
:return: void
"""
gcloud_zones = self.gcloud_client.list_zones(page_token=page_token)
for gcloud_zone in gcloud_zones:
self._gcloud_zones[gcloud_zone.dns_name] = gcloud_zone
if gcloud_zones.next_page_token:
self._get_cloud_zones(gcloud_zones.next_page_token)
@property
def gcloud_zones(self):
if not self._gcloud_zones:
self._get_cloud_zones()
return self._gcloud_zones
def _data_for_A(self, gcloud_record):
return {
'values': gcloud_record.rrdatas
}
_data_for_AAAA = _data_for_A
def _data_for_CAA(self, gcloud_record):
return {
'values': [{
'flags': v[0],
'tag': v[1],
'value': v[2]}
for v in [shlex.split(g) for g in gcloud_record.rrdatas]]}
def _data_for_CNAME(self, gcloud_record):
return {
'value': gcloud_record.rrdatas[0]
}
def _data_for_MX(self, gcloud_record):
return {'values': [{
"preference": v[0],
"exchange": v[1]}
for v in [shlex.split(g) for g in gcloud_record.rrdatas]]}
def _data_for_NAPTR(self, gcloud_record):
return {'values': [{
'order': v[0],
'preference': v[1],
'flags': v[2],
'service': v[3],
'regexp': v[4],
'replacement': v[5]}
for v in [shlex.split(g) for g in gcloud_record.rrdatas]]}
_data_for_NS = _data_for_A
_data_for_PTR = _data_for_CNAME
_fix_semicolons = re.compile(r'(?<!\\);')
def _data_for_SPF(self, gcloud_record):
if len(gcloud_record.rrdatas) > 1:
return {
'values': [self._fix_semicolons.sub('\\;', rr)
for rr in gcloud_record.rrdatas]}
return {
'value': self._fix_semicolons.sub('\\;', gcloud_record.rrdatas[0])}
def _data_for_SRV(self, gcloud_record):
return {'values': [{
'priority': v[0],
'weight': v[1],
'port': v[2],
'target': v[3]}
for v in [shlex.split(g) for g in gcloud_record.rrdatas]]}
_data_for_TXT = _data_for_SPF
def _rrset_for_A(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, record.values)
_rrset_for_AAAA = _rrset_for_A
def _rrset_for_CAA(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [
'{} {} {}'.format(v.flags, v.tag, v.value)
for v in record.values])
def _rrset_for_CNAME(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [record.value])
def _rrset_for_MX(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [
'{} {}'.format(v.preference, v.exchange)
for v in record.values])
def _rrset_for_NAPTR(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [
'{} {} "{}" "{}" "{}" {}'.format(
v.order, v.preference, v.flags, v.service,
v.regexp, v.replacement) for v in record.values])
_rrset_for_NS = _rrset_for_A
_rrset_for_PTR = _rrset_for_CNAME
def _rrset_for_SPF(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, record.chunked_values)
def _rrset_for_SRV(self, gcloud_zone, record):
return gcloud_zone.resource_record_set(
record.fqdn, record._type, record.ttl, [
'{} {} {} {}'
.format(v.priority, v.weight, v.port, v.target)
for v in record.values])
_rrset_for_TXT = _rrset_for_SPF
|
github/octodns | octodns/record/geo.py | GeoCodes.validate | python | def validate(cls, code, prefix):
'''
Validates an octoDNS geo code making sure that it is a valid and
corresponding:
* continent
* continent & country
* continent, country, & province
'''
reasons = []
pieces = code.split('-')
n = len(pieces)
if n > 3:
reasons.append('{}invalid geo code "{}"'.format(prefix, code))
elif n > 0 and pieces[0] not in geo_data:
reasons.append('{}unknown continent code "{}"'
.format(prefix, code))
elif n > 1 and pieces[1] not in geo_data[pieces[0]]:
reasons.append('{}unknown country code "{}"'.format(prefix, code))
elif n > 2 and \
pieces[2] not in geo_data[pieces[0]][pieces[1]]['provinces']:
reasons.append('{}unknown province code "{}"'.format(prefix, code))
return reasons | Validates an octoDNS geo code making sure that it is a valid and
corresponding:
* continent
* continent & country
* continent, country, & province | train | https://github.com/github/octodns/blob/65ee60491e22e6bb0a2aa08f7069c6ecf6c3fee6/octodns/record/geo.py#L14-L37 | null | class GeoCodes(object):
log = getLogger('GeoCodes')
@classmethod
@classmethod
def parse(cls, code):
pieces = code.split('-')
try:
country_code = pieces[1]
except IndexError:
country_code = None
try:
province_code = pieces[2]
except IndexError:
province_code = None
return {
'continent_code': pieces[0],
'country_code': country_code,
'province_code': province_code,
}
@classmethod
def country_to_code(cls, country):
for continent, countries in geo_data.items():
if country in countries:
return '{}-{}'.format(continent, country)
cls.log.warn('country_to_code: unrecognized country "%s"', country)
return
@classmethod
def province_to_code(cls, province):
# We get to cheat on this one since we only support provinces in NA-US
if province not in geo_data['NA']['US']['provinces']:
cls.log.warn('country_to_code: unrecognized province "%s"',
province)
return
return 'NA-US-{}'.format(province)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.